Example #1
0
def recorrect_s2xy(
        data,
        old_map_file='s2_xy_XENON1T_17Feb2017.json',
        new_map_file=pax_config['WaveformSimulator']['s2_light_yield_map']):
    """Recompute the (x,y) correction for a different map
    :param data: dataframe (Basics and Extended minitrees required)
    :param old_map_file: Map filename that was used to process the dataframe. Defaults to the map used for 6.4.2
    :param new_map_file: Map filename that you want to use for the correction. Defaults to the pax config default.
    :return: dataframe with altered value in cS2 (and few added columns for uncorrected position)

    TODO: This could be rewritten to use the extended minitrees, so the old map no longer needs to be specified.
    """
    data = data.copy()
    add_uncorrected_position(data)

    old_map = InterpolatingMap(data_file_name(old_map_file))
    new_map = InterpolatingMap(data_file_name(new_map_file))

    # Correction is a *division* factor (map contains light yield), so to un-correct we first multiply
    recorrection = np.zeros(len(data))
    x = data._u_x.values
    y = data._u_y.values
    for i in tqdm(range(len(data))):
        recorrection[i] = old_map.get_value(x[i], y[i]) / new_map.get_value(
            x[i], y[i])

    data['cs2'] *= recorrection

    return data
Example #2
0
    def load_nn(self):
        """For loading NN files"""
        from keras.models import model_from_json

        # If we already loaded it up then skip
        if ((self.tfnn_weights == self.corrections_handler.get_misc_correction(
                "tfnn_weights", self.run_number)) and
            (self.tfnn_model == self.corrections_handler.get_misc_correction(
                "tfnn_model", self.run_number))):
            return

        self.tfnn_weights = self.corrections_handler.get_misc_correction(
            "tfnn_weights", self.run_number)
        self.tfnn_model = self.corrections_handler.get_misc_correction(
            "tfnn_model", self.run_number)

        json_file_nn = open(utils.data_file_name(self.tfnn_model), 'r')
        loaded_model_json = json_file_nn.read()
        self.loaded_nn = model_from_json(loaded_model_json)
        json_file_nn.close()

        # Get bad PMT List in JSON file:
        json_file_nn = open(utils.data_file_name(self.tfnn_model), 'r')
        loaded_model_json_dict = json.load(json_file_nn)
        self.list_bad_pmts = loaded_model_json_dict['badPMTList']
        json_file_nn.close()

        weights_file = utils.data_file_name(self.tfnn_weights)
        self.loaded_nn.load_weights(weights_file)
Example #3
0
    def __init__(self):

        hax.minitrees.TreeMaker.__init__(self)
        self.extra_metadata = hax.config['corrections_definitions']
        self.corrections_handler = CorrectionsHandler()

        # We need to pull some stuff from the pax config
        self.pax_config = load_configuration("XENON1T")
        self.tpc_channels = self.pax_config['DEFAULT']['channels_in_detector']['tpc']
        self.confused_s1_channels = []
        self.s1_statistic = (
            self.pax_config['BuildInteractions.BasicInteractionProperties']['s1_pattern_statistic']
        )
        qes = np.array(self.pax_config['DEFAULT']['quantum_efficiencies'])

        self.s1_pattern_fitter = PatternFitter(
            filename=utils.data_file_name(self.pax_config['WaveformSimulator']['s1_patterns_file']),
            zoom_factor=self.pax_config['WaveformSimulator'].get('s1_patterns_zoom_factor', 1),
            adjust_to_qe=qes[self.tpc_channels],
            default_errors=(self.pax_config['DEFAULT']['relative_qe_error'] +
                            self.pax_config['DEFAULT']['relative_gain_error'])
        )

        self.top_channels = self.pax_config['DEFAULT']['channels_top']
        self.ntop_pmts = len(self.top_channels)

        # Declare nn stuff
        self.tfnn_weights = None
        self.tfnn_model = None
        self.loaded_nn = None

        # Run doc
        self.loaded_run_doc = None
        self.run_doc = None
Example #4
0
    def startup(self):
        """ Initialize the neural net.
        """
        if self.config['pmt_0_is_fake']:
            self.input_channels = self.pmts[1:]
        else:
            self.input_channels = self.pmts
        self.nn_output_unit = self.config['nn_output_unit']

        # Possibly scale the input of the activation function by a supplied value (float)
        activation_scale = self.config['activation_function_scale']

        # Apply the activation function to the output layer (bool)
        output_layer_function = self.config['output_layer_function']

        # Load the file defining the structure (number of nodes per layer)
        # as well as the weights and biases of the neural network
        data = np.load(utils.data_file_name(self.config['neural_net_file']))

        self.nn = NeuralNet(structure=data['structure'],
                            weights=data['weights'],
                            biases=data['biases'],
                            activation_scale=activation_scale,
                            output_layer_function=output_layer_function)

        data.close()
Example #5
0
def recorrect_rz(data, new_map_file=None):
    """Recompute the (r,z)(r,z) field distortion correction
    Be sure to redo the S1(x,y,z) correction after this as well, whether or not the S1(x,y,z) map changed!

    :param data: input dataframe
    :param new_map_file: file with (r,z)(r,z) correction map to use. Defaults to map currently in pax config.
    :return: dataframe with altered values in x, y, z (and few added columns for uncorrected position)
    """
    if new_map_file is None:
        # Specified here, since older pax versions do not have this map defined
        return pax_config['WaveformSimulator']['rz_position_distortion_map']

    data = data.copy()
    add_uncorrected_position(data)

    # Compute correction for new map
    new_map = InterpolatingMap(data_file_name(new_map_file))

    r_corr = np.zeros(len(data))
    z_corr = np.zeros(len(data))
    _u_r = data._u_r.values
    _u_z = data._u_z.values
    for i in tqdm(range(len(data)), desc="Redoing (r,z) correction"):
        r_corr[i] = new_map.get_value(_u_r[i], _u_z[i], map_name='to_true_r')
        z_corr[i] = new_map.get_value(_u_r[i], _u_z[i], map_name='to_true_z')

    data['r'] = data._u_r + r_corr
    data['x'] = data.r * np.cos(data.theta)
    data['y'] = data.r * np.sin(data.theta)
    data['z'] = data._u_z + z_corr
    return data
Example #6
0
    def startup(self):
        """ Initialize the neural net.
        """
        if self.config['pmt_0_is_fake']:
            self.input_channels = self.pmts[1:]
        else:
            self.input_channels = self.pmts
        self.nn_output_unit = self.config['nn_output_unit']

        # Possibly scale the input of the activation function by a supplied value (float)
        activation_scale = self.config['activation_function_scale']

        # Apply the activation function to the output layer (bool)
        output_layer_function = self.config['output_layer_function']

        # Load the file defining the structure (number of nodes per layer)
        # as well as the weights and biases of the neural network
        data = np.load(utils.data_file_name(self.config['neural_net_file']))

        self.nn = NeuralNet(structure=data['structure'],
                            weights=data['weights'],
                            biases=data['biases'],
                            activation_scale=activation_scale,
                            output_layer_function=output_layer_function)

        data.close()
Example #7
0
    def startup(self):
        self.raw_data_files = []
        self.current_file_number = None
        self.current_filename = None
        self.current_first_event = None
        self.current_last_event = None

        input_name = utils.data_file_name(self.config['input_name'])
        if not os.path.exists(input_name):
            raise ValueError("Can't read from %s: it does not exist!" %
                             input_name)

        if not os.path.isdir(input_name):
            if not input_name.endswith('.' + self.file_extension):
                self.log.error("input_name %s does not end "
                               "with the expected file extension %s" %
                               (input_name, self.file_extension))
                return
            self.log.debug("InputFromFolder: Single file mode")
            self.init_file(input_name)

        else:
            self.log.debug("InputFromFolder: Directory mode")
            file_names = glob.glob(
                os.path.join(input_name, "*." + self.file_extension))
            # Remove the pax_info.json file from the file list-- the JSON I/O will thank us
            file_names = [
                fn for fn in file_names if not fn.endswith('pax_info.json')
            ]
            file_names.sort()
            self.log.debug("InputFromFolder: Found these files: %s",
                           str(file_names))
            if len(file_names) == 0:
                raise ValueError(
                    "InputFromFolder: No %s files found in input directory %s!"
                    % (self.file_extension, input_name))
            for fn in file_names:
                if 'trigger_monitor_data.' in fn:
                    continue
                if 'temp.' in fn:
                    self.log.warning(
                        "Temporary raw data file found in directory: this data is still being built "
                        "or has crashed while building!")
                    continue
                self.init_file(fn)

        # Sort the files by first event number, so events are read in order
        # Files are read in lexically, but in some cases that may not reflect the event order (see issue #345)
        self.raw_data_files = sorted(self.raw_data_files,
                                     key=itemgetter('first_event'))

        # Select the first file
        self.select_file(0)

        # Set the number of total events
        self.number_of_events = sum(
            [fr['n_events'] for fr in self.raw_data_files])
Example #8
0
    def startup(self):
        if not have_root:
            raise RuntimeError("Can't read MC ROOT files if you do not have root!")

        self.config.setdefault('add_to_z', 0)
        self.log.warning('This plugin is completely untested and will probably crash!')
        filename = self.config['input_name']

        self.f = ROOT.TFile(utils.data_file_name(filename))
        self.t = self.f.Get("events/events")  # new MC structure, 160622
        WaveformSimulator.startup(self)
        self.number_of_events = self.t.GetEntries() * self.config['event_repetitions']
Example #9
0
    def startup(self):
        # Call original startup function
        PosRecTopPatternFit.startup(self)

        # Get the Fax config
        c = self.processor.simulator.config
        qes = np.array(c['quantum_efficiencies'])

        # Change the pattern fitter instance so it uses TPFF
        self.pf = PatternFitter(
            filename=utils.data_file_name(c['s2_fitted_patterns_file']),
            zoom_factor=c.get('s2_fitted_patterns_zoom_factor', 1),
            adjust_to_qe=qes[c['channels_top']],
            default_errors=c['relative_qe_error'] + c['relative_gain_error'])
Example #10
0
    def startup(self):
        if not have_root:
            raise RuntimeError(
                "Can't read MC ROOT files if you do not have root!")

        self.config.setdefault('add_to_z', 0)
        self.log.warning(
            'This plugin is completely untested and will probably crash!')
        filename = self.config['input_name']

        self.f = ROOT.TFile(utils.data_file_name(filename))
        self.t = self.f.Get("events/events")  # new MC structure, 160622
        WaveformSimulator.startup(self)
        self.number_of_events = self.t.GetEntries(
        ) * self.config['event_repetitions']
Example #11
0
    def __init__(self, zoom_multiplier=1):
        # Get some settings from the XENON1T detector configuration
        config = load_configuration('XENON1T')

        # The per-PMT S2 LCE maps (and zoom factor which is a technical detail)
        lce_maps = config['WaveformSimulator']['s2_patterns_file']
        lce_map_zoom = config['WaveformSimulator']['s2_patterns_zoom_factor']

        # Simulate the right PMT response
        qes = np.array(config['DEFAULT']['quantum_efficiencies'])
        top_pmts = config['DEFAULT']['channels_top']
        errors = config['DEFAULT']['relative_qe_error'] + config['DEFAULT'][
            'relative_gain_error']

        # Set up the PatternFitter which sample the LCE maps
        self.pf = PatternFitter(filename=utils.data_file_name(lce_maps),
                                zoom_factor=lce_map_zoom * zoom_multiplier,
                                adjust_to_qe=qes[top_pmts],
                                default_errors=errors)
Example #12
0
    def startup(self):
        """
        The startup routine of the WaveformSimulatorFromCSV plugin
        """

        # Open the instructions file
        filename = self.config['input_name']
        self.dataset_name = os.path.basename(filename)
        self.instructions_file = open(utils.data_file_name(filename), 'r')
        #
        # Slurp the entire instructions file, so we know the number of events
        self.instruction_reader = csv.DictReader(self.instructions_file)
        self.instructions = []
        #
        # Loop over lines, make instructions
        instruction_number = 0
        instruction = []
        for p in self.instruction_reader:
            p['g4_id'] = -1  # create fake g4_id=-1 for csv input
            if p['depth'] == 'random':
                p['z'] = 'random'
            else:
                p['z'] = -1 * float(p['depth'])
            del p['depth']
            if int(p['instruction']) == instruction_number:
                # Deposition is part of the previous instruction
                instruction.append(p)
            else:
                # New deposition reached!
                if instruction:
                    self.instructions.append(instruction)
                instruction_number = int(p['instruction'])
                instruction = [p]
        # For the final instruction
        self.instructions.append(instruction)

        self.number_of_events = len(
            self.instructions) * self.config['event_repetitions']
        WaveformSimulator.startup(self)
Example #13
0
    def startup(self):
        """
        The startup routine of the WaveformSimulatorFromCSV plugin
        """

        # Open the instructions file
        filename = self.config['input_name']
        self.dataset_name = os.path.basename(filename)
        self.instructions_file = open(utils.data_file_name(filename), 'r')
        #
        # Slurp the entire instructions file, so we know the number of events
        self.instruction_reader = csv.DictReader(self.instructions_file)
        self.instructions = []
        #
        # Loop over lines, make instructions
        instruction_number = 0
        instruction = []
        for p in self.instruction_reader:
            p['g4_id'] = -1  # create fake g4_id=-1 for csv input
            if p['depth'] == 'random':
                p['z'] = 'random'
            else:
                p['z'] = -1 * float(p['depth'])
            del p['depth']
            if int(p['instruction']) == instruction_number:
                # Deposition is part of the previous instruction
                instruction.append(p)
            else:
                # New deposition reached!
                if instruction:
                    self.instructions.append(instruction)
                instruction_number = int(p['instruction'])
                instruction = [p]
        # For the final instruction
        self.instructions.append(instruction)

        self.number_of_events = len(self.instructions) * self.config['event_repetitions']
        WaveformSimulator.startup(self)
Example #14
0
    def startup(self):
        """ Initialize the neural net.
        """
        if self.config['pmt_0_is_fake']:
            self.input_channels = self.pmts[1:]
        else:
            self.input_channels = self.pmts

        activation_scale = 0.5
        output_layer_function = True
        self.nn_output_unit = self.config['nn_output_unit']

        # Load the file defining the structure (number of nodes per layer)
        # as well as the weights and biases of the neural network
        data = np.load(utils.data_file_name(self.config['neural_net_file']))

        self.nn = NeuralNet(structure=data['structure'],
                            weights=data['weights'],
                            biases=data['biases'],
                            activation_scale=activation_scale,
                            output_layer_function=output_layer_function)

        data.close()
Example #15
0
def recorrect_s1xyz(
        data,
        new_map_file=pax_config['WaveformSimulator']['s1_light_yield_map']):
    """Recompute the S1(x,y,z) light yield correction.
    If you want to redo (r,z)(r,z), do it before doing this!

    :param data: Dataframe. Only Basics minitree required.
    :param new_map_name: Filename of map you want to use for the correction.
    :return: Dataframe with changed values in cs1 column
    """

    new_map = InterpolatingMap(data_file_name(new_map_file))

    # Correction is a *division* factor (map contains light yield)
    x = data.x.values
    y = data.y.values
    z = data.z.values
    correction = np.zeros(len(data))
    for i in tqdm(range(len(data)), desc='Redoing S1(x,y,z) correction'):
        correction[i] = 1 / new_map.get_value(x[i], y[i], z[i])

    data['cs1'] = data['s1'] * correction

    return data
Example #16
0
    def __init__(self,
                 filename,
                 zoom_factor=1,
                 adjust_to_qe=None,
                 default_errors=None):
        """Initialize a pattern map file from filename.
        Format of the file is very similar to InterpolatingMap; a (gzip compressed) json containing:
            'coordinate_system' :   [['x', (x_min, x_max, n_x)], ['y',...
            'map' :                 [[[valuex1y1pmt1, valuex1y1pmt2, ...], ...], ...]
            'name':                 'Nice file with maps',
            'description':          'Say what the maps are, who you are, your favorite food, etc',
            'timestamp':            unix epoch seconds timestamp
        where x_min is the lowest x coordinate of a point, x_max the highest, n_x the number of points
        zoom_factor is factor by which the spatial dimensions of the map will be upsampled.

        adjust_to_qe: array of same length as the number of pmts in the map;
            we'll adjust the patterns to account for these QEs, upweighing PMTs with higher QEs
            Obviously this should be None if map already includes QE effects (e.g. if it is data-derived)!

        default_errors: array of the same length as the number of pmts in the map;
            This is the default factor which will be applied to obtain the squared systematic errors in the goodness
            of fit statistic, as follows:
                squared_systematic_errors = (areas_observed * default_errors)**2
        """
        self.log = logging.getLogger('PatternFitter')
        with gzip.open(utils.data_file_name(filename)) as infile:
            json_data = json.loads(infile.read().decode())

        self.data = np.array(json_data['map'])
        self.log.debug('Loaded pattern file named: %s' % json_data['name'])
        self.log.debug('Description:\n    ' +
                       re.sub(r'\n', r'\n    ', json_data['description']))
        self.log.debug('Data shape: %s' % str(self.data.shape))
        self.log.debug('Will zoom in by factor %s' % zoom_factor)
        self.dimensions = len(
            json_data['coordinate_system']
        )  # Spatial dimensions (other one is sampling points)

        # Zoom the spatial map using linear interpolation, if desired
        if zoom_factor != 1:
            self.data = image_zoom(self.data,
                                   zoom=[zoom_factor] * self.dimensions + [1],
                                   order=1)

        # Adjust the expected patterns to the PMT's quantum efficiencies, if desired
        # No need to re-normalize: will be done in each gof computation anyway
        if adjust_to_qe is not None:
            self.data *= adjust_to_qe[[np.newaxis] * self.dimensions]

        # Store index starts and distances for quick access, assuming uniform grid spacing
        self.coordinate_data = []
        for dim_i, (name,
                    (start, stop,
                     n_points)) in enumerate(json_data['coordinate_system']):
            n_points *= zoom_factor
            if not n_points == self.data.shape[dim_i]:
                raise ValueError(
                    "Map interpretation error: %d points expected along %s, but map is %d points long"
                    % (n_points, name, self.data.shape[dim_i]))
            self.coordinate_data.append(
                CoordinateData(minimum=start,
                               maximum=stop,
                               n_points=n_points,
                               point_spacing=(stop - start) / (n_points - 1)))
        self.log.debug('Coordinate ranges: %s' % ', '.join([
            '%s-%s (%d points)' % (cd.minimum, cd.maximum, cd.n_points)
            for cd in self.coordinate_data
        ]))

        # TODO: Technically we should zero the points outside the tpc bounds again:
        # some LCE may have leaked into this region due to upsampling... but doesn't matter:
        # if it causes a bias, it will push some events who are already far outside the fiducial volume
        # even further out.
        self.n_points = self.data.shape[-1]
        self.default_pmt_selection = np.ones(self.n_points, dtype=np.bool)
        if default_errors is None:
            default_errors = 0
        self.default_errors = default_errors
 def startup(self):
     aftmap_filename = utils.data_file_name('XENON1T_s1_aft_xyz_20170808.json')
     self.aft_map = InterpolatingMap(aftmap_filename)
 def startup(self):
     aftmap_filename = utils.data_file_name('s1_aft_xyz_XENON1T_06Mar2017.json')
     self.aft_map = InterpolatingMap(aftmap_filename)
     self.low_pe_threshold = 10  # below this in PE, transition to hits
Example #19
0
    def __init__(self, config_to_init):
        c = self.config = config_to_init

        # Should we repeat events?
        if 'event_repetitions' not in c:
            c['event_repetitions'] = 1

        # Primary excimer fraction from Nest Version 098
        # See G4S1Light.cc line 298
        density = c['liquid_density'] / (units.g / units.cm**3)
        excfrac = 0.4 - 0.11131 * density - 0.0026651 * density**2  # primary / secondary excimers
        excfrac = 1 / (1 + excfrac)  # primary / all excimers
        # primary / all excimers that produce a photon:
        excfrac /= 1 - (1 - excfrac) * (1 - c['s1_ER_recombination_fraction'])
        c['s1_ER_primary_excimer_fraction'] = excfrac
        log.debug('Inferred s1_ER_primary_excimer_fraction %s' % excfrac)

        # Recombination time from NEST 2014
        # 3.5 seems fishy, they fit an exponential to data, but in the code they use a non-exponential distribution...
        efield = (c['drift_field'] / (units.V / units.cm))
        c['s1_ER_recombination_time'] = 3.5 / 0.18 * (
            1 / 20 + 0.41) * math.exp(-0.009 * efield)
        log.debug('Inferred s1_ER_recombination_time %s ns' %
                  c['s1_ER_recombination_time'])

        # Which channels stand to receive any photons?
        channels_for_photons = c['channels_in_detector']['tpc']
        if c['pmt_0_is_fake']:
            channels_for_photons = [
                ch for ch in channels_for_photons if ch != 0
            ]
        if c.get('magically_avoid_dead_pmts', False):
            channels_for_photons = [
                ch for ch in channels_for_photons if c['gains'][ch] > 0
            ]
        if c.get('magically_avoid_s1_excluded_pmts', False) and \
           'channels_excluded_for_s1' in c:
            channels_for_photons = [
                ch for ch in channels_for_photons
                if ch not in c['channels_excluded_for_s1']
            ]
        c['channels_for_photons'] = channels_for_photons

        # Determine sensible length of a pmt pulse to simulate
        dt = c['sample_duration']
        if c['pe_pulse_model'] == 'exponential':
            c['samples_before_pulse_center'] = math.ceil(
                c['pulse_width_cutoff'] * c['pmt_rise_time'] / dt)
            c['samples_after_pulse_center'] = math.ceil(
                c['pulse_width_cutoff'] * c['pmt_fall_time'] / dt)
        else:
            # Build the custom PMT pulse model
            ts = np.array(c['pe_pulse_ts'])
            ys = np.array(c['pe_pulse_ys'])

            # Integrate and normalize it
            # Note we're storing the integrated pulse, while the user gives the regular pulse.
            c['pe_pulse_function'] = interp1d(ts,
                                              np.cumsum(ys) / np.sum(ys),
                                              bounds_error=False,
                                              fill_value=(0, 1))

        log.debug(
            'Simulating %s samples before and %s samples after PMT pulse centers.'
            % (c['samples_before_pulse_center'],
               c['samples_after_pulse_center']))

        # Load real noise data from file, if requested
        if c['real_noise_file']:
            self.noise_data = np.load(
                utils.data_file_name(c['real_noise_file']))['arr_0']
            # The silly XENON100 PMT offset again: it's relevant for indexing the array of noise data
            # (which is one row per channel)
            self.channel_offset = 1 if c['pmt_0_is_fake'] else 0

        # Load light yields
        self.s1_light_yield_map = InterpolatingMap(
            utils.data_file_name(c['s1_light_yield_map']))
        self.s2_light_yield_map = InterpolatingMap(
            utils.data_file_name(c['s2_light_yield_map']))

        # Load transverse field (r,z) distortion map
        if c.get('rz_position_distortion_map'):
            self.rz_position_distortion_map = InterpolatingMap(
                utils.data_file_name(c['rz_position_distortion_map']))
        else:
            self.rz_position_distortion_map = None

        # Init s2 per pmt lce map
        qes = np.array(c['quantum_efficiencies'])
        if c.get('s2_patterns_file', None) is not None:
            self.s2_patterns = PatternFitter(
                filename=utils.data_file_name(c['s2_patterns_file']),
                zoom_factor=c.get('s2_patterns_zoom_factor', 1),
                adjust_to_qe=qes[c['channels_top']],
                default_errors=c['relative_qe_error'] +
                c['relative_gain_error'])
        else:
            self.s2_patterns = None

        ##
        # Load pdf for single photoelectron, if available
        ##
        if c.get('photon_area_distribution'):
            # Extract the spe pdf from a csv file into a pandas dataframe
            spe_shapes = pd.read_csv(
                utils.data_file_name(c['photon_area_distribution']))

            # Create a converter array from uniform random numbers to SPE gains (one interpolator per channel)
            # Scale the distributions so that they have an SPE mean of 1 and then calculate the cdf
            # We have set the distribution of the off channels to be explicitly 0 as a precaution
            # as of now these channels are
            # 1, 2, 12, 26, 34, 62, 65, 79, 86, 88, 102, 118, 130, 134, 135, 139,
            # 148, 150, 152, 162, 178, 183, 190, 198, 206, 213, 214, 234, 239, 244

            uniform_to_pe_arr = []
            for ch in spe_shapes.columns[
                    1:]:  # skip the first element which is the 'charge' header
                if spe_shapes[ch].sum() > 0:
                    mean_spe = (spe_shapes['charge'] *
                                spe_shapes[ch]).sum() / spe_shapes[ch].sum()
                    scaled_bins = spe_shapes['charge'] / mean_spe
                    cdf = np.cumsum(spe_shapes[ch]) / np.sum(spe_shapes[ch])
                else:
                    # if sum is 0, just make some dummy axes to pass to interpolator
                    cdf = np.linspace(0, 1, 10)
                    scaled_bins = np.zeros_like(cdf)

                uniform_to_pe_arr.append(interp1d(cdf, scaled_bins))
            if uniform_to_pe_arr != []:
                self.uniform_to_pe_arr = np.array(uniform_to_pe_arr)
            else:
                self.uniform_to_pe_arr = None

        else:
            self.uniform_to_pe_arr = None

        # Init s1 pattern maps
        # We're assuming the map is MC-derived, so we adjust for QE (just like for the S2 maps)
        log.debug("Initializing s1 patterns...")
        if c.get('s1_patterns_file', None) is not None:
            self.s1_patterns = PatternFitter(
                filename=utils.data_file_name(c['s1_patterns_file']),
                zoom_factor=c.get('s1_patterns_zoom_factor', 1),
                adjust_to_qe=qes[c['channels_in_detector']['tpc']],
                default_errors=c['relative_qe_error'] +
                c['relative_gain_error'])
        else:
            self.s1_patterns = None

        ##
        # Luminescence time distribution precomputation
        ##

        # For which gas gaps do we have to compute the luminescence time distribution?
        gas_gap_warping_map = c.get('gas_gap_warping_map', None)
        base_dg = c['elr_gas_gap_length']
        if gas_gap_warping_map is not None:
            with open(utils.data_file_name(gas_gap_warping_map),
                      mode='rb') as infile:
                mh = pickle.load(infile)
            self.gas_gap_length = lambda x, y: base_dg + mh.lookup([x], [y]
                                                                   ).item()
            self.luminescence_converters_dgs = np.linspace(
                mh.histogram.min(), mh.histogram.max(),
                c.get('n_luminescence_time_converters', 20)) + base_dg
        else:
            self.gas_gap_length = lambda x, y: base_dg
            self.luminescence_converters_dgs = np.array([base_dg])

        self.luminescence_converters = []

        # Calculate particle number density in the gas (ideal gas law)
        number_density_gas = c['pressure'] / (units.boltzmannConstant *
                                              c['temperature'])

        # Slope of the drift velocity vs field relation
        alpha = c['gas_drift_velocity_slope'] / number_density_gas

        @np.vectorize
        def yield_per_dr(E):
            # Gives something proportional to the yield, not the yield itself!
            y = E / (units.kV / units.cm) - 0.8 * c['pressure'] / units.bar
            return max(y, 0)

        rA = c['anode_field_domination_distance']
        rW = c['anode_wire_radius']

        for dg in self.luminescence_converters_dgs:
            dl = c['gate_to_anode_distance'] - dg
            rL = dg

            # Voltage over the gas gap
            V = c['anode_voltage'] / (
                1 + dl / dg / c['lxe_dielectric_constant']
            )  # From eq1 in se note, * dg

            # Field in the gas gap. r is distance from anode center: start at r=rL
            E0 = V / (rL - rA + rA * (np.log(rA) - np.log(rW)))

            @np.vectorize
            def Er(r):
                if r < rW:
                    return 0
                elif rW <= r < rA:
                    return E0 * rA / r
                else:
                    return E0

            # Small numeric calculation to get emission time cdf
            R = np.linspace(rL, rW, 1000)
            E = Er(R)
            RDOT = alpha * E
            T = np.cumsum(-np.diff(R)[0] / RDOT)  # dt = dx / v
            yield_density = yield_per_dr(
                E) * RDOT  # density/dt = density/dx * dx/dt
            yield_density /= yield_density.sum()

            # Invert CDF using interpolator
            uniform_to_emission_time = interp1d(np.cumsum(yield_density),
                                                T,
                                                fill_value=0,
                                                bounds_error=False)

            self.luminescence_converters.append(uniform_to_emission_time)

        self.clear_signals_queue()
Example #20
0
 def startup(self):
     aftmap_filename = utils.data_file_name(
         'XENON1T_s1_aft_xyz_20170808.json')
     self.aft_map = InterpolatingMap(aftmap_filename)
Example #21
0
    def __init__(self, config_to_init):
        c = self.config = config_to_init

        # Should we repeat events?
        if 'event_repetitions' not in c:
            c['event_repetitions'] = 1

        # Primary excimer fraction from Nest Version 098
        # See G4S1Light.cc line 298
        density = c['liquid_density'] / (units.g / units.cm ** 3)
        excfrac = 0.4 - 0.11131 * density - 0.0026651 * density ** 2    # primary / secondary excimers
        excfrac = 1 / (1 + excfrac)                                     # primary / all excimers
        # primary / all excimers that produce a photon:
        excfrac /= 1 - (1 - excfrac) * (1 - c['s1_ER_recombination_fraction'])
        c['s1_ER_primary_excimer_fraction'] = excfrac
        log.debug('Inferred s1_ER_primary_excimer_fraction %s' % excfrac)

        # Recombination time from NEST 2014
        # 3.5 seems fishy, they fit an exponential to data, but in the code they use a non-exponential distribution...
        efield = (c['drift_field'] / (units.V / units.cm))
        c['s1_ER_recombination_time'] = 3.5 / 0.18 * (1 / 20 + 0.41) * math.exp(-0.009 * efield)
        log.debug('Inferred s1_ER_recombination_time %s ns' % c['s1_ER_recombination_time'])

        # Which channels stand to receive any photons?
        channels_for_photons = c['channels_in_detector']['tpc']
        if c['pmt_0_is_fake']:
            channels_for_photons = [ch for ch in channels_for_photons if ch != 0]
        if c.get('magically_avoid_dead_pmts', False):
            channels_for_photons = [ch for ch in channels_for_photons if c['gains'][ch] > 0]
        if c.get('magically_avoid_s1_excluded_pmts', False) and \
           'channels_excluded_for_s1' in c:
            channels_for_photons = [ch for ch in channels_for_photons
                                    if ch not in c['channels_excluded_for_s1']]
        c['channels_for_photons'] = channels_for_photons

        # Determine sensible length of a pmt pulse to simulate
        dt = c['sample_duration']
        if c['pe_pulse_model'] == 'exponential':
            c['samples_before_pulse_center'] = math.ceil(c['pulse_width_cutoff'] * c['pmt_rise_time'] / dt)
            c['samples_after_pulse_center'] = math.ceil(c['pulse_width_cutoff'] * c['pmt_fall_time'] / dt)
        else:
            # Build the custom PMT pulse model
            ts = np.array(c['pe_pulse_ts'])
            ys = np.array(c['pe_pulse_ys'])

            # Integrate and normalize it
            # Note we're storing the integrated pulse, while the user gives the regular pulse.
            c['pe_pulse_function'] = interp1d(ts, np.cumsum(ys)/np.sum(ys), bounds_error=False, fill_value=(0, 1))

        log.debug('Simulating %s samples before and %s samples after PMT pulse centers.' % (
            c['samples_before_pulse_center'], c['samples_after_pulse_center']))

        # Load real noise data from file, if requested
        if c['real_noise_file']:
            self.noise_data = np.load(utils.data_file_name(c['real_noise_file']))['arr_0']
            # The silly XENON100 PMT offset again: it's relevant for indexing the array of noise data
            # (which is one row per channel)
            self.channel_offset = 1 if c['pmt_0_is_fake'] else 0

        # Load light yields
        self.s1_light_yield_map = InterpolatingMap(utils.data_file_name(c['s1_light_yield_map']))
        self.s2_light_yield_map = InterpolatingMap(utils.data_file_name(c['s2_light_yield_map']))

        # Load transverse field (r,z) distortion map
        if c.get('rz_position_distortion_map'):
            self.rz_position_distortion_map = InterpolatingMap(utils.data_file_name(c['rz_position_distortion_map']))
        else:
            self.rz_position_distortion_map = None

        # Init s2 per pmt lce map
        qes = np.array(c['quantum_efficiencies'])
        if c.get('s2_patterns_file', None) is not None:
            self.s2_patterns = PatternFitter(filename=utils.data_file_name(c['s2_patterns_file']),
                                             zoom_factor=c.get('s2_patterns_zoom_factor', 1),
                                             adjust_to_qe=qes[c['channels_top']],
                                             default_errors=c['relative_qe_error'] + c['relative_gain_error'])
        else:
            self.s2_patterns = None

        ##
        # Load pdf for single photoelectron, if available
        ##
        if c.get('photon_area_distribution'):
            # Extract the spe pdf from a csv file into a pandas dataframe
            spe_shapes = pd.read_csv(utils.data_file_name(c['photon_area_distribution']))

            # Create a converter array from uniform random numbers to SPE gains (one interpolator per channel)
            # Scale the distributions so that they have an SPE mean of 1 and then calculate the cdf
            # We have set the distribution of the off channels to be explicitly 0 as a precaution
            # as of now these channels are
            # 1, 2, 12, 26, 34, 62, 65, 79, 86, 88, 102, 118, 130, 134, 135, 139,
            # 148, 150, 152, 162, 178, 183, 190, 198, 206, 213, 214, 234, 239, 244

            uniform_to_pe_arr = []
            for ch in spe_shapes.columns[1:]:  # skip the first element which is the 'charge' header
                if spe_shapes[ch].sum() > 0:
                    mean_spe = (spe_shapes['charge'] * spe_shapes[ch]).sum() / spe_shapes[ch].sum()
                    scaled_bins = spe_shapes['charge'] / mean_spe
                    cdf = np.cumsum(spe_shapes[ch])/np.sum(spe_shapes[ch])
                else:
                    # if sum is 0, just make some dummy axes to pass to interpolator
                    cdf = np.linspace(0, 1, 10)
                    scaled_bins = np.zeros_like(cdf)

                uniform_to_pe_arr.append(interp1d(cdf, scaled_bins))
            if uniform_to_pe_arr != []:
                self.uniform_to_pe_arr = np.array(uniform_to_pe_arr)
            else:
                self.uniform_to_pe_arr = None

        else:
            self.uniform_to_pe_arr = None

        # Init s1 pattern maps
        # We're assuming the map is MC-derived, so we adjust for QE (just like for the S2 maps)
        log.debug("Initializing s1 patterns...")
        if c.get('s1_patterns_file', None) is not None:
            self.s1_patterns = PatternFitter(filename=utils.data_file_name(c['s1_patterns_file']),
                                             zoom_factor=c.get('s1_patterns_zoom_factor', 1),
                                             adjust_to_qe=qes[c['channels_in_detector']['tpc']],
                                             default_errors=c['relative_qe_error'] + c['relative_gain_error'])
        else:
            self.s1_patterns = None

        ##
        # Luminescence time distribution precomputation
        ##

        # For which gas gaps do we have to compute the luminescence time distribution?
        gas_gap_warping_map = c.get('gas_gap_warping_map', None)
        base_dg = c['elr_gas_gap_length']
        if gas_gap_warping_map is not None:
            with open(utils.data_file_name(gas_gap_warping_map), mode='rb') as infile:
                mh = pickle.load(infile)
            self.gas_gap_length = lambda x, y: base_dg + mh.lookup([x], [y]).item()
            self.luminescence_converters_dgs = np.linspace(mh.histogram.min(),
                                                           mh.histogram.max(),
                                                           c.get('n_luminescence_time_converters', 20)) + base_dg
        else:
            self.gas_gap_length = lambda x, y: base_dg
            self.luminescence_converters_dgs = np.array([base_dg])

        self.luminescence_converters = []

        # Calculate particle number density in the gas (ideal gas law)
        number_density_gas = c['pressure'] / (units.boltzmannConstant * c['temperature'])

        # Slope of the drift velocity vs field relation
        alpha = c['gas_drift_velocity_slope'] / number_density_gas

        @np.vectorize
        def yield_per_dr(E):
            # Gives something proportional to the yield, not the yield itself!
            y = E / (units.kV / units.cm) - 0.8 * c['pressure'] / units.bar
            return max(y, 0)

        rA = c['anode_field_domination_distance']
        rW = c['anode_wire_radius']

        for dg in self.luminescence_converters_dgs:
            dl = c['gate_to_anode_distance'] - dg
            rL = dg

            # Voltage over the gas gap
            V = c['anode_voltage'] / (1 + dl/dg/c['lxe_dielectric_constant'])  # From eq1 in se note, * dg

            # Field in the gas gap. r is distance from anode center: start at r=rL
            E0 = V/(rL - rA + rA * (np.log(rA) - np.log(rW)))

            @np.vectorize
            def Er(r):
                if r < rW:
                    return 0
                elif rW <= r < rA:
                    return E0 * rA / r
                else:
                    return E0

            # Small numeric calculation to get emission time cdf
            R = np.linspace(rL, rW, 1000)
            E = Er(R)
            RDOT = alpha * E
            T = np.cumsum(- np.diff(R)[0] / RDOT)     # dt = dx / v
            yield_density = yield_per_dr(E) * RDOT    # density/dt = density/dx * dx/dt
            yield_density /= yield_density.sum()

            # Invert CDF using interpolator
            uniform_to_emission_time = interp1d(np.cumsum(yield_density), T,
                                                fill_value=0, bounds_error=False)

            self.luminescence_converters.append(uniform_to_emission_time)

        self.clear_signals_queue()
 def startup(self):
     aftmap_filename = utils.data_file_name(
         's1_aft_xyz_XENON1T_06Mar2017.json')
     self.aft_map = InterpolatingMap(aftmap_filename)
     self.low_pe_threshold = 10  # below this in PE, transition to hits