def setup(self): self.mean_pe_photon = self.config['mean_pe_per_photon'] # Getting S1 AFT maps self.s1_aft_map = straxen.InterpolatingMap( straxen.get_resource( self.config['s1_aft_map'], fmt=self._infer_map_format(self.config['s1_aft_map']))) # Getting optical maps self.s1_pattern_map = straxen.InterpolatingMap( straxen.get_resource( self.config['s1_optical_map'], fmt=self._infer_map_format(self.config['s1_optical_map']))) self.s2_pattern_map = straxen.InterpolatingMap( straxen.get_resource( self.config['s2_optical_map'], fmt=self._infer_map_format(self.config['s2_optical_map']))) # Getting gain model to get dead PMTs self.to_pe = straxen.get_correction_from_cmt(self.run_id, self.config['gain_model']) self.dead_PMTs = np.where(self.to_pe == 0)[0] self.pmtbool = ~np.in1d(np.arange(0, self.config['n_tpc_pmts']), self.dead_PMTs) self.pmtbool_top = self.pmtbool[:self.config['n_top_pmts']] self.pmtbool_bottom = self.pmtbool[self.config['n_top_pmts']:self.config['n_tpc_pmts']]
def setup(self): self.electron_drift_velocity = get_correction_from_cmt( self.run_id, self.config['electron_drift_velocity']) self.electron_drift_time_gate = get_correction_from_cmt( self.run_id, self.config['electron_drift_time_gate']) self.mean_pe_photon = self.config['mean_pe_per_photon'] # Getting S1 AFT maps self.s1_aft_map = straxen.InterpolatingMap( straxen.get_resource(self.config['s1_aft_map'], fmt=self._infer_map_format( self.config['s1_aft_map']))) # Getting optical maps self.s1_pattern_map = straxen.InterpolatingMap( straxen.get_resource(self.config['s1_optical_map'], fmt=self._infer_map_format( self.config['s1_optical_map']))) self.s2_pattern_map = straxen.InterpolatingMap( straxen.get_resource(self.config['s2_optical_map'], fmt=self._infer_map_format( self.config['s2_optical_map']))) # Getting S2 data-driven tensorflow models downloader = straxen.MongoDownloader() self.model_file = downloader.download_single( self.config['s2_tf_model']) with tempfile.TemporaryDirectory() as tmpdirname: tar = tarfile.open(self.model_file, mode="r:gz") tar.extractall(path=tmpdirname) import tensorflow as tf def _logl_loss(patterns_true, likelihood): return likelihood / 10. self.model = tf.keras.models.load_model( tmpdirname, custom_objects={"_logl_loss": _logl_loss}) self.model_chi2 = tf.keras.Model( self.model.inputs, self.model.get_layer('Likelihood').output) # Getting gain model to get dead PMTs self.to_pe = straxen.get_correction_from_cmt(self.run_id, self.config['gain_model']) self.dead_PMTs = np.where(self.to_pe == 0)[0] self.pmtbool = ~np.in1d(np.arange(0, self.config['n_tpc_pmts']), self.dead_PMTs) self.pmtbool_top = self.pmtbool[:self.config['n_top_pmts']] self.pmtbool_bottom = self.pmtbool[self.config['n_top_pmts']:self. config['n_tpc_pmts']]
def test_format(self): """ We did not do this correctly before, so let's make sure to do it right this time """ json_as_text = get_resource(self.json_file, fmt='text') self.assertIsInstance(json_as_text, str) # load it from memory json_as_text_from_mem = get_resource(self.json_file, fmt='text') self.assertEqual(json_as_text, json_as_text_from_mem) # Now let's check out if we do a JSON file json_as_dict = get_resource(self.json_file, fmt='json') self.assertIsInstance(json_as_dict, dict) self.assertEqual(json_as_dict, get_resource(self.json_file, fmt='json'))
def get_elife(run_id, elife_conf): if isinstance(elife_conf, tuple) and len(elife_conf) == 3: # We want to use corrections management is_nt = elife_conf[-1] cmt = straxen.CorrectionsManagementServices(is_nt=is_nt) e = cmt.get_corrections_config(run_id, elife_conf[:2]) elif isinstance(elife_conf, str): warn("get_elife will be replaced by CorrectionsManagementSevices", DeprecationWarning, 2) # Let's remove these functions and only rely on the CMT in the future x = straxen.get_resource(elife_conf, fmt='npy') run_index = np.where(x['run_id'] == int(run_id))[0] if not len(run_index): # Gains not known: using placeholders e = 623e3 else: e = x[run_index[0]]['e_life'] else: raise ValueError( 'Wrong elife model. Either specify a string (url) or the ' 'Corrections Management Tools format: ' '(model_type->str, model_config->str, is_nT->bool)' '') return e
def test_up_and_download(self): with self.assertRaises(ConnectionError): # Should be empty! self.downloader.test_find() file_name = 'test.txt' self.assertFalse(self.downloader.md5_stored(file_name)) self.assertEqual(self.downloader.compute_md5(file_name), '') file_content = 'This is a test' with open(file_name, 'w') as f: f.write(file_content) self.assertTrue(os.path.exists(file_name)) self.uploader.upload_from_dict({file_name: os.path.abspath(file_name)}) self.assertTrue(self.uploader.md5_stored(file_name)) self.assertTrue(self.downloader.config_exists(file_name)) path = self.downloader.download_single(file_name) path_hr = self.downloader.download_single( file_name, human_readable_file_name=True) abs_path = self.downloader.get_abs_path(file_name) for p in [path, path_hr, abs_path]: self.assertTrue(os.path.exists(p)) read_file = straxen.get_resource(path) self.assertTrue(file_content == read_file) os.remove(file_name) self.assertFalse(os.path.exists(file_name)) self.downloader.test_find() self.downloader.download_all() # Now the test on init should work, let's double try straxen.MongoDownloader( collection=self.collection, file_database=None, _test_on_init=True, )
def get_metadata(self): md = get_resource(self.rundoc_file, fmt='json') # This is a flat dict but we need to have a datetime object, # since this is only a test, let's just replace it with a # placeholder md['start'] = datetime.now() return md
def __init__(self, context=None, use_progress_bar=True): """ Interface to excess the XENONnT slow control data via python. :param context: Context you are using e.g. st. This is needed if you would like to query data via run_ids. :param use_progress_bar: Use a progress bar in the Scada interface """ self.we_are_straxen = False self._token_expire_time = None self._token = None self.pmt_file_found = True try: self.SCLogin_url = straxen.uconfig.get('scada', 'sclogin_url') self.SCData_URL = straxen.uconfig.get('scada', 'scdata_url') self.SCLastValue_URL = straxen.uconfig.get('scada', 'sclastvalue_url') except ValueError as e: raise ValueError(f'Cannot load SCADA information, from your xenon' ' config. SCADAInterface cannot be used.') from e try: # Load parameters from the database. self.pmt_file = straxen.get_resource('PMTmap_SCADA.json', fmt='json') except FileNotFoundError: warnings.warn('Cannot find PMT map, "find_pmt_names" cannot be used.') self.pmt_file_found = False # Use a tqdm progress bar if requested. If a user does not want # a progress bar, just wrap it by a tuple self._use_progress_bar = use_progress_bar self.context = context self.we_are_straxen = True self.get_new_token()
def __init__(self, context=None, use_progress_bar=True): """ Interface to excess the XENONnT slow control data via python. :param context: Context you are using e.g. st. This is needed if you would like to query data via run_ids. :param use_progress_bar: Use a progress bar in the Scada interface """ try: self.SCData_URL = uconfig.get('scada', 'scdata_url') self.SCLastValue_URL = uconfig.get('scada', 'sclastvalue_url') self.SCADA_SECRETS = dict(QueryType=uconfig.get('scada', 'querytype'), username=uconfig.get('scada', 'username'), api_key=uconfig.get('scada', 'api_key') ) # Load parameters from the database. self.pmt_file = straxen.get_resource('PMTmap_SCADA.json', fmt='json') except ValueError as e: raise ValueError(f'Cannot load SCADA information, from your xenon' ' config. SCADAInterface cannot be used.') from e # Use a tqdm progress bar if requested. If a user does not want # a progress bar, just wrap it by a tuple self._use_progress_bar = use_progress_bar self.context = context
def set_config(self, ): self.config.update( straxen.get_resource(self.config['fax_config'], fmt='json')) overrides = self.config['fax_config_override'] if overrides is not None: self.config.update(overrides) # backwards compatibility if 'field_distortion_on' in self.config and not 'field_distortion_model' in self.config: self.config.update({ 'field_distortion_model': "inverse_fdc" if self.config['field_distortion_on'] else "none" }) # Update gains to the nT defaults self.to_pe = straxen.get_correction_from_cmt( self.run_id, self.config['gain_model_mc']) adc_2_current = (self.config['digitizer_voltage_range'] / 2**(self.config['digitizer_bits']) / self.config['pmt_circuit_load_resistor']) self.config['gains'] = np.divide(adc_2_current, self.to_pe, out=np.zeros_like(self.to_pe), where=self.to_pe != 0) if self.config['seed']: np.random.seed(self.config['seed']) # We hash the config to load resources. Channel map is immutable and cannot be hashed self.config['channel_map'] = dict(self.config['channel_map']) self.config['channel_map']['sum_signal'] = 800 self.config['channels_bottom'] = np.arange(self.config['n_top_pmts'], self.config['n_tpc_pmts']) # Update some values stored in CMT if self.config['fax_config_override_from_cmt'] is not None: for fax_field, cmt_option in self.config[ 'fax_config_override_from_cmt'].items(): if (fax_field in ['fdc_3d', 's1_light_yield_map'] and self.config.get('default_reconstruction_algorithm', False)): cmt_option = tuple([ 'suffix', self.config['default_reconstruction_algorithm'], *cmt_option ]) cmt_value = straxen.get_correction_from_cmt( self.run_id, cmt_option) log.warning( f'Replacing {fax_field} with CMT option {cmt_option} to {cmt_value}' ) self.config[fax_field] = cmt_value
def get_resource(name: str, fmt: str = 'text', **kwargs): """ Fetch a straxen resource Allow a direct download using <fmt='abs_path'> otherwise kwargs are passed directly to straxen.get_resource. """ if fmt == 'abs_path': downloader = straxen.MongoDownloader() return downloader.download_single(name) return straxen.get_resource(name, fmt=fmt)
def get_elife(run_id, elife_conf): # 1T support for electron lifetimes from a file # Let's remove these functions and only rely on the CMT in the future x = straxen.get_resource(elife_conf, fmt='npy') run_index = np.where(x['run_id'] == int(run_id))[0] if not len(run_index): # Electron lifetime not known: using placeholders e = 623e3 else: e = x[run_index[0]]['e_life'] return float(e)
def test_1T_elife(): """ Test elife from CMT DB against historical data(aux file) """ elife_conf = ('elife_xenon1t', 'ONLINE', False) elife_cmt = straxen.get_correction_from_cmt(test_run_id_1T, elife_conf) elife_file = aux_repo + '3548132b55f81a43654dba5141366041e1daaf01/strax_files/elife.npy' x = straxen.get_resource(elife_file, fmt='npy') run_index = np.where(x['run_id'] == int(test_run_id_1T))[0] elife = x[run_index[0]]['e_life'] mes = 'Elife values do not match. Please check' assert elife_cmt == elife, mes
def setup(self): if isinstance(self.config['nveto_pmt_position_map'], str): # Load PMT settings file: npmt_pos = straxen.get_resource(self.config['nveto_pmt_position_map'], fmt='csv') elif isinstance(self.config['nveto_pmt_position_map'], dict): # Testing support npmt_pos = pd.DataFrame(self.config['nveto_pmt_position_map']) else: raise ValueError(f"{self.config['nveto_pmt_position_map']} is not understood") # Use records instead of a dataframe. self.pmt_properties = npmt_pos.to_records(index=False)
def __init__(self, events=None, hitlets=None, run_id=0, channel_range=(2000, 2119), pmt_map='nveto_pmt_position.csv', plot_extension='bokeh'): """ Class to plot an interactive nveto display. :param events: Events which should be plot. Can also be none in case the hitlet matrix and/or pattern map should be plotted separately. :param hitlets: Same as events, but hitlets_nv. :param run_id: Run_id which should be displayed in the title. :param channel_range: Channel range of the detector. :param pmt_map: PMT map which is loaded via straxen.get_resource. The map has to contain the channel number, and xyz coordinates. :param plot_extension: Extension which should be used for rendering can be either bokeh or matpltolib. Default is bokeh to support dynamic plots. """ self.import_holoviews() self.hv.extension(plot_extension) self.df_event_time = None self.df_event_properties = None self.hitlets = hitlets self.channel_range = channel_range self.run_id = run_id # Load PMT data: if isinstance(pmt_map, str): self.pmt_positions = straxen.get_resource(pmt_map, fmt='csv') elif isinstance(pmt_map, np.ndarray): self.pmt_positions = pmt_map else: raise ValueError('pmt_map not understood, has either to be ' f'a string or a numpy array, got "{pmt_map}".') if events is not None: self.event_df = straxen.convert_array_to_df(events) else: self.event_df = None if events is not None and hitlets is not None: self.hitlets_per_event = strax.split_by_containment( hitlets, events)
def test_1T_elife(): """ Test elife from CMT DB against historical data(aux file) """ if not straxen.utilix_is_configured(): warn('Cannot do test becaus ' 'no have access to the database.') return elife_conf = ('elife', 'ONLINE', False) elife_cmt = straxen.get_correction_from_cmt(test_run_id_1T, elife_conf) elife_file = elife_conf = aux_repo + '3548132b55f81a43654dba5141366041e1daaf01/strax_files/elife.npy' x = straxen.get_resource(elife_file, fmt='npy') run_index = np.where(x['run_id'] == int(test_run_id_1T))[0] elife = x[run_index[0]]['e_life'] mes = 'Elife values do not match. Please check' assert elife_cmt == elife, mes
def get_to_pe(run_id, gain_model, n_tpc_pmts): if not isinstance(gain_model, tuple): raise ValueError(f"gain_model must be a tuple") if not len(gain_model) == 2: raise ValueError(f"gain_model must have two elements: " f"the model type and its specific configuration") model_type, model_conf = gain_model # Convert from ADC * samples to electrons emitted by PMT # see pax.dsputils.adc_to_pe for calculation adc_to_e = 17142.81741 if model_type == 'disabled': # Somebody messed up raise RuntimeError("Attempt to use a disabled gain model") if model_type == 'to_pe_per_run': # Load a npy file specifing a run_id -> to_pe array to_pe_file = model_conf x = straxen.get_resource(to_pe_file, fmt='npy') run_index = np.where(x['run_id'] == int(run_id))[0] if not len(run_index): # Gains not known: using placeholders run_index = [-1] to_pe = x[run_index[0]]['to_pe'] elif model_type == 'to_pe_constant': if model_conf in FIXED_TO_PE: return FIXED_TO_PE[model_conf] # Uniform gain, specified as a to_pe factor to_pe = np.ones(n_tpc_pmts, dtype=np.float32) * model_conf else: raise NotImplementedError( f"Gain model type {model_type} not implemented") if len(to_pe) != n_tpc_pmts: raise ValueError( f"Gain model {gain_model} resulted in a to_pe " f"of length {len(to_pe)}, but n_tpc_pmts is {n_tpc_pmts}!") return to_pe
def make_patternmap(map_file, fmt=None, method='WeightedNearestNeighbors', pmt_mask=None): """ This is special interpretation of the of previous make_map(), but designed for pattern map loading with provided PMT mask. This way simplifies both S1 and S2 cases """ # making tests not failing, we can probably overwrite it completel if isinstance(map_file, list): log.warning( f'Using dummy map with pattern mask! This has no effect here!') assert map_file[0] == 'constant dummy', ( 'Alternative file input can only be ' '("constant dummy", constant: int, shape: list') return DummyMap(map_file[1], map_file[2]) elif isinstance(map_file, str): if fmt is None: fmt = parse_extension(map_file) map_data = deepcopy(straxen.get_resource(map_file, fmt=fmt)) # XXX: straxed deals with pointers and caches resources, it means that resources are global # what is bad, so we make own copy here and modify it locally if 'compressed' in map_data: compressor, dtype, shape = map_data['compressed'] map_data['map'] = np.frombuffer( strax.io.COMPRESSORS[compressor]['decompress']( map_data['map']), dtype=dtype).reshape(*shape) del map_data['compressed'] if 'quantized' in map_data: map_data['map'] = map_data['quantized'] * map_data['map'].astype( np.float32) del map_data['quantized'] if not (pmt_mask is None): assert ( map_data['map'].shape[-1] == pmt_mask.shape[0] ), "Error! Pattern map and PMT gains must have same dimensions!" map_data['map'][..., ~pmt_mask] = 0.0 return straxen.InterpolatingMap(map_data, method=method) else: raise TypeError("Can't handle map_file except a string or a list")
def make_map(map_file, fmt=None, method='WeightedNearestNeighbors'): """Fetch and make an instance of InterpolatingMap based on map_file Alternatively map_file can be a list of ["constant dummy", constant: int, shape: list] return an instance of DummyMap""" if isinstance(map_file, list): assert map_file[0] == 'constant dummy', ( 'Alternative file input can only be ' '("constant dummy", constant: int, shape: list') return DummyMap(map_file[1], map_file[2]) elif isinstance(map_file, str): if fmt is None: fmt = parse_extension(map_file) log.debug(f'Initialize map interpolator for file {map_file}') map_data = straxen.get_resource(map_file, fmt=fmt) return straxen.InterpolatingMap(map_data, method=method) else: raise TypeError("Can't handle map_file except a string or a list")
def set_config(self, ): super().set_config() if 'nveto' in self.config['targets']: self.config_nveto = deepcopy(self.config) self.config_nveto.update( straxen.get_resource(self.config_nveto['fax_config_nveto'], fmt='json')) self.config_nveto['detector'] = 'XENONnT_neutron_veto' self.config_nveto['channel_map'] = dict( self.config_nveto['channel_map']) overrides = self.config['fax_config_override_nveto'] if overrides is not None: self.config_nveto.update(overrides) self.to_pe_nveto = straxen.get_correction_from_cmt( self.run_id, self.config['gain_model_nv']) self.config_nveto['gains'] = np.divide( (2e-9 * 2 / 2**14) / (1.6e-19 * 1 * 50), self.to_pe_nveto, out=np.zeros_like(self.to_pe_nveto), where=self.to_pe_nveto != 0) self.config_nveto['channels_bottom'] = np.array([], np.int64)
def test_nveto_event_plugin(hitlets, area): hitlets['area'] = area hitlets = strax.sort_by_time(hitlets) events, hitlets_ids_in_event = straxen.find_veto_events(hitlets, 3, 300, 0) straxen.plugins.veto_events.compute_nveto_event_properties( events, hitlets, hitlets_ids_in_event, start_channel=2000) # Test some of the parameters: for e, hit_ids in zip(events, hitlets_ids_in_event): hits = hitlets[hit_ids[0]:hit_ids[1]] assert e['time'] == np.min( hits['time']), f'Event start is wrong (hit_ids: hit_ids)' assert e['endtime'] == np.max( strax.endtime(hits)), f'Event end is wrong (hit_ids: hit_ids)' assert np.isclose( e['area'], np.sum(hits['area']) ), f'Event area is wrong for {e["area"]}, {hits["area"]}' mes = f'Event n_contributing_pmt is wrong for {e["n_contributing_pmt"]}, {hits["channel"]}' assert e['n_contributing_pmt'] == len(np.unique(hits['channel'])), mes assert e['n_hits'] == len( hits), f'Event n_hits is wrong for {e["n_hits"]}, {hits}' # ----------------------- # Check if updated events # have the correct boundaries: # ----------------------- if len(events) > 1: mes = f'Updated event boundaries overlap! {events}' assert (events['endtime'][:-1] - events['time'][1:]) > 0, mes split_hitlets = strax.split_by_containment(hitlets, events) for sbc_hitlets, tw_hitlet_id in zip(split_hitlets, hitlets_ids_in_event): h = hitlets[tw_hitlet_id[0]:tw_hitlet_id[1]] mes = ( 'Touching windows and split_by_containment yield different hitlets' ' after updating the event boundaries. This should not have happened.' ) assert np.all(sbc_hitlets == h), mes # Test event positions: try: npmt_pos = straxen.get_resource('nveto_pmt_position.csv', fmt='csv') npmt_pos = npmt_pos.to_records(index=False) except FileNotFoundError: npmt_pos = np.ones(120, dtype=[('x', np.float64), ('y', np.float64), ('z', np.float64)]) events_angle = np.zeros( len(events), dtype=straxen.plugins.veto_events.veto_event_positions_dtype()) straxen.plugins.veto_events.compute_positions(events_angle, events, split_hitlets, npmt_pos, start_channel=2000) angle = straxen.plugins.veto_events.compute_average_angle( split_hitlets, npmt_pos, start_channel=2000) # Compute truth angles: truth_angle = np.angle(events_angle['pos_x'] + events_angle['pos_y'] * 1j) # Replace not defined angles, into zeros to match np.angles return # and to simplify comparison m = (events_angle['pos_x'] == 0) & (events_angle['pos_y'] == 0) angle[m] = 0 # Fixing +2pi issue and np.angle [-180, 180] and [0, 360) convention # issue. angle = angle % (2 * np.pi) truth_angle = truth_angle % (2 * np.pi) # Compare angle, also indirectly tests average x/y/z mes = f'Event angle did not match expected {truth_angle}, got {angle}.' assert np.isclose(angle, truth_angle), mes
def get_pmt_gains(self, run_id, model_type, global_version, cacheable_versions=('ONLINE', ), gain_dtype=np.float32): """ Smart logic to return pmt gains to PE values. :param run_id: run id from runDB :param model_type: to_pe_model (gain model) :param global_version: global version :param cacheable_versions: versions that are allowed to be cached in ./resource_cache :param gain_dtype: dtype of the gains to be returned as array :return: array of pmt gains to PE values """ to_pe = None cache_name = None if 'to_pe_model' in model_type: # Get the detector name based on the requested model_type # This also will be used to the cachable name convention # pmt == TPC, n_veto == n_veto's PMT, etc detector_names = { 'to_pe_model': 'pmt', 'to_pe_model_nv': 'n_veto', 'to_pe_model_mv': 'mu_veto' } target_detector = detector_names[model_type] if global_version in cacheable_versions: # Try to load from cache, if it does not exist it will be created below cache_name = cacheable_naming(run_id, model_type, global_version) try: to_pe = straxen.get_resource(cache_name, fmt='npy') except (ValueError, FileNotFoundError): pass if to_pe is None: to_pe = self._get_correction(run_id, target_detector, global_version) # be cautious with very early runs, check that not all are None if np.isnan(to_pe).all(): raise ValueError( f"to_pe(PMT gains) values are NaN, no data available " f"for {run_id} in the gain model with version " f"{global_version}, please set constant values for " f"{run_id}") else: raise ValueError(f"{model_type} not implemented for to_pe values") # Double check the dtype of the gains to_pe = np.array(to_pe, dtype=gain_dtype) # Double check that all the gains are found, None is not allowed # since strax processing does not handle this well. If a PMT is # off it's gain should be 0. if np.any(np.isnan(to_pe)): pmts_affected = np.argwhere(np.isnan(to_pe))[:, 0] raise GainsNotFoundError( f'Gains returned by CMT are None for PMT_i = {pmts_affected}. ' f'Cannot proceed with processing. Report to CMT-maintainers.') if (cache_name is not None and global_version in cacheable_versions and not os.path.exists(cache_name)): # This is an array we can save since it's in the cacheable # versions but it has not been saved yet. Next time we need # it, we can get it from our cache. np.save(cache_name, to_pe, allow_pickle=False) return to_pe
def get_to_pe(run_id, gain_model, n_pmts): if not isinstance(gain_model, tuple): raise ValueError(f"gain_model must be a tuple") if not len(gain_model) == 2: raise ValueError(f"gain_model must have two elements: " f"the model type and its specific configuration") model_type, model_conf = gain_model if model_type == 'disabled': # Somebody messed up raise RuntimeError("Attempt to use a disabled gain model") if model_type == 'CMT_model': if not isinstance(model_conf, tuple) or len(model_conf) != 2: # Raise a value error if the condition is not met. We should have: # ("CMT_model", -> To specify that we want to use the online # corrections management tool # ( # "to_pe_model", -> This is to specify that we want the gains # "v1", -> The version of the correction 'v1' is the online version # ) raise ValueError('CMT gain model should be similar to:' '("CMT_model", ("to_pe_model", "v1"). Instead got:' f'{model_conf}') # is this the best way to do this? is_nt = n_pmts == straxen.n_tpc_pmts or n_pmts == straxen.n_nveto_pmts or n_pmts == straxen.n_mveto_pmts corrections = straxen.CorrectionsManagementServices(is_nt=is_nt) to_pe = corrections.get_corrections_config(run_id, model_conf) return to_pe elif model_type == 'to_pe_per_run': warn("to_pe_per_run will be replaced by CorrectionsManagementSevices", DeprecationWarning, 2) # Load a npy file specifying a run_id -> to_pe array to_pe_file = model_conf x = straxen.get_resource(to_pe_file, fmt='npy') run_index = np.where(x['run_id'] == int(run_id))[0] if not len(run_index): # Gains not known: using placeholders run_index = [-1] to_pe = x[run_index[0]]['to_pe'] elif model_type == 'to_pe_constant': if model_conf in FIXED_TO_PE: return FIXED_TO_PE[model_conf] try: # Uniform gain, specified as a to_pe factor to_pe = np.ones(n_pmts, dtype=np.float32) * model_conf except np.core._exceptions.UFuncTypeError as e: raise(str(e) + f'\nTried multiplying by {model_conf}. Insert a number instead.') else: raise NotImplementedError(f"Gain model type {model_type} not implemented") if len(to_pe) != n_pmts: raise ValueError( f"Gain model {gain_model} resulted in a to_pe " f"of length {len(to_pe)}, but n_pmts is {n_pmts}!") return to_pe
# Just some id which allows CMT to load run_id = run_list[0] # setting up instructions like this may take a while. You can set e.g. instructions = dict( event_rate=2, chunk_size=5, nchunk=400, # 40 works photons_low=1, # PE photons_high=1e4, # PE (1e5 works) electrons_low=1, # electrons_high=1e4, # (1e5 works) tpc_radius=straxen.tpc_r, tpc_length=straxen.tpc_z, drift_field=straxen.get_resource('fax_config_nt_low_field.json', fmt='json').get('drift_field'), timing='uniform', # Double S1 peaks uniform over time ) pema.inst_to_csv(instructions, instructions_csv, get_inst_from=pema.rand_instructions) # TODO can we add noise? config_update = dict( detector='XENONnT', fax_file=os.path.abspath(instructions_csv), fax_config='fax_config_nt_low_field.json', # fax_config_override=dict(enable_electron_afterpulses=False, # enable_pmt_afterpulses=False # ),
def xenonnt_simulation(output_folder='./strax_data', wfsim_registry='RawRecordsFromFaxNT', cmt_run_id_sim=None, cmt_run_id_proc=None, cmt_version='global_v5', fax_config='fax_config_nt_design.json', overwrite_from_fax_file_sim=False, overwrite_from_fax_file_proc=False, cmt_option_overwrite_sim=immutabledict(), cmt_option_overwrite_proc=immutabledict(), _forbid_creation_of=None, _config_overlap=immutabledict( drift_time_gate='electron_drift_time_gate', drift_velocity_liquid='electron_drift_velocity', electron_lifetime_liquid='elife'), **kwargs): """ The most generic context that allows for setting full divergent settings for simulation purposes It makes full divergent setup, allowing to set detector simulation part (i.e. for wfsim up to truth and raw_records). Parameters _sim refer to detector simulation parameters. Arguments having _proc in their name refer to detector parameters that are used for processing of simulations as done to the real datector data. This means starting from already existing raw_records and finishing with higher level data, such as peaks, events etc. If only one cmt_run_id is given, the second one will be set automatically, resulting in CMT match between simulation and processing. However, detector parameters can be still overwritten from fax file or manually using cmt config overwrite options. CMT options can also be overwritten via fax config file. :param output_folder: Output folder for strax data. :param wfsim_registry: Name of WFSim plugin used to generate data. :param cmt_run_id_sim: Run id for detector parameters from CMT to be used for creation of raw_records. :param cmt_run_id_proc: Run id for detector parameters from CMT to be used for processing from raw_records to higher level data. :param cmt_version: Global version for corrections to be loaded. :param fax_config: Fax config file to use. :param overwrite_from_fax_file_sim: If true sets detector simulation parameters for truth/raw_records from from fax_config file istead of CMT :param overwrite_from_fax_file_proc: If true sets detector processing parameters after raw_records(peaklets/events/etc) from from fax_config file istead of CMT :param cmt_option_overwrite_sim: Dictionary to overwrite CMT settings for the detector simulation part. :param cmt_option_overwrite_proc: Dictionary to overwrite CMT settings for the data processing part. :param _forbid_creation_of: str/tuple, of datatypes to prevent form being written (e.g. 'raw_records' for read only simulation context). :param _config_overlap: Dictionary of options to overwrite. Keys must be simulation config keys, values must be valid CMT option keys. :param kwargs: Additional kwargs taken by strax.Context. :return: strax.Context instance """ import wfsim st = strax.Context(storage=strax.DataDirectory(output_folder), config=dict( detector='XENONnT', fax_config=fax_config, check_raw_record_overlaps=True, **straxen.contexts.xnt_common_config, ), **straxen.contexts.xnt_common_opts, **kwargs) st.register(getattr(wfsim, wfsim_registry)) # Make sure that the non-simulated raw-record types are not requested st.deregister_plugins_with_missing_dependencies() if straxen.utilix_is_configured( warning_message='Bad context as we cannot set CMT since we ' 'have no database access' ''): st.apply_cmt_version(cmt_version) if _forbid_creation_of is not None: st.context_config['forbid_creation_of'] += strax.to_str_tuple( _forbid_creation_of) # doing sanity checks for cmt run ids for simulation and processing if (not cmt_run_id_sim) and (not cmt_run_id_proc): raise RuntimeError("cmt_run_id_sim and cmt_run_id_proc are None. " "You have to specify at least one CMT run id. ") if (cmt_run_id_sim and cmt_run_id_proc) and (cmt_run_id_sim != cmt_run_id_proc): print("INFO : divergent CMT runs for simulation and processing") print(" cmt_run_id_sim".ljust(25), cmt_run_id_sim) print(" cmt_run_id_proc".ljust(25), cmt_run_id_proc) else: cmt_id = cmt_run_id_sim or cmt_run_id_proc cmt_run_id_sim = cmt_id cmt_run_id_proc = cmt_id # Replace default cmt options with cmt_run_id tag + cmt run id cmt_options = straxen.get_corrections.get_cmt_options(st) # prune to just get the strax options cmt_options = { key: val['strax_option'] for key, val in cmt_options.items() } # First, fix gain model for simulation st.set_config({ 'gain_model_mc': ('cmt_run_id', cmt_run_id_sim, *cmt_options['gain_model']) }) fax_config_override_from_cmt = dict() for fax_field, cmt_field in _config_overlap.items(): fax_config_override_from_cmt[fax_field] = ('cmt_run_id', cmt_run_id_sim, *cmt_options[cmt_field]) st.set_config( {'fax_config_override_from_cmt': fax_config_override_from_cmt}) # and all other parameters for processing for option in cmt_options: st.config[option] = ('cmt_run_id', cmt_run_id_proc, *cmt_options[option]) # Done with "default" usage, now to overwrites from file # # Take fax config and put into context option if overwrite_from_fax_file_proc or overwrite_from_fax_file_sim: fax_config = straxen.get_resource(fax_config, fmt='json') for fax_field, cmt_field in _config_overlap.items(): if overwrite_from_fax_file_proc: st.config[cmt_field] = (cmt_options[cmt_field][0] + '_constant', fax_config[fax_field]) if overwrite_from_fax_file_sim: st.config['fax_config_override_from_cmt'][fax_field] = ( cmt_options[cmt_field][0] + '_constant', fax_config[fax_field]) # And as the last step - manual overrrides, since they have the highest priority # User customized for simulation for option in cmt_option_overwrite_sim: if option not in cmt_options: raise ValueError( f'Overwrite option {option} is not using CMT by default ' 'you should just use set config') if option not in _config_overlap.values(): raise ValueError( f'Overwrite option {option} does not have mapping from ' f'CMT to fax config!') for fax_key, cmt_key in _config_overlap.items(): if cmt_key == option: _name_index = 2 if 'cmt_run_id' in cmt_options[option] else 0 st.config['fax_config_override_from_cmt'][fax_key] = ( cmt_options[option][_name_index] + '_constant', cmt_option_overwrite_sim[option]) del _name_index del (fax_key, cmt_key) # User customized for simulation for option in cmt_option_overwrite_proc: if option not in cmt_options: raise ValueError( f'Overwrite option {option} is not using CMT by default ' 'you should just use set config') _name_index = 2 if 'cmt_run_id' in cmt_options[option] else 0 st.config[option] = (cmt_options[option][_name_index] + '_constant', cmt_option_overwrite_proc[option]) del _name_index # Only for simulations st.set_config({"event_info_function": "disabled"}) return st
def __init__(self, config=None): files = self.config_to_file(config) log.debug('Getting\n' + '\n'.join([f'{k}: {v}' for k, v in files.items()])) for k, v in files.items(): if isinstance(v, list): # It's a dummy map call, do nothing continue if k == 'url_base': continue if v == '': log.warning( f'{k} has no path so this config file is set to None') files[k] = None continue log.debug(f'Obtaining {k} from {v}') files[k] = self.get_file_path(files['url_base'], v) if config.get('detector', 'XENONnT') == 'XENON1T': self.s1_pattern_map = make_map(files['s1_pattern_map'], fmt='json.gz') self.s1_lce_correction_map = make_map( files['s1_lce_correction_map'], fmt='json') self.s2_correction_map = make_map(files['s2_correction_map'], fmt='json') self.s2_pattern_map = make_map(files['s2_pattern_map'], fmt='json.gz') self.fdc_3d = make_map(files['fdc_3d'], fmt='json.gz') # Gas gap warping map if config.get('enable_gas_gap_warping', False): self.gas_gap_length = make_map( ["constant dummy", 0.25, [ 254, ]]) # Photon After Pulses if config.get('enable_pmt_afterpulses', False): self.uniform_to_pmt_ap = straxen.get_resource( files['photon_ap_cdfs'], fmt='pkl.gz') # Electron After Pulses if config.get('enable_electron_afterpulses', False): self.uniform_to_ele_ap = straxen.get_resource( files['ele_ap_pdfs'], fmt='pkl.gz') elif config.get('detector', 'XENONnT') == 'XENONnT': pmt_mask = np.array( config['gains'] ) > 0 # Converted from to pe (from cmt by default) self.s1_pattern_map = make_patternmap(files['s1_pattern_map'], fmt='pkl', pmt_mask=pmt_mask) self.s2_pattern_map = make_patternmap(files['s2_pattern_map'], fmt='pkl', pmt_mask=pmt_mask) self.se_gain_map = make_map(files['se_gain_map']) # self.s2_correction_map = make_map(files['s2_correction_map'], fmt = 'json') # if there is a (data driven!) map, load it. If not make it from the pattern map if files['s1_lce_correction_map']: self.s1_lce_correction_map = make_map( files['s1_lce_correction_map']) else: lymap = deepcopy(self.s1_pattern_map) # AT: this scaling with mast is redundant to `make_patternmap`, but keep it in for now lymap.data['map'] = np.sum(lymap.data['map'][:][:][:], axis=3, keepdims=True, where=pmt_mask) lymap.__init__(lymap.data) self.s1_lce_correction_map = lymap # making S2 aft scaling (if provided) if 's2_mean_area_fraction_top' in config.keys(): avg_s2aft_ = config['s2_mean_area_fraction_top'] if avg_s2aft_ >= 0.0: if isinstance(files['s2_pattern_map'], list): log.warning( f'Scaling of S2 AFT with dummy map, this will have no effect!' ) else: s2map = deepcopy(self.s2_pattern_map) s2map_topeff_ = s2map.data['map'][ ..., 0:config['n_top_pmts']].sum(axis=2) s2map_toteff_ = s2map.data['map'].sum(axis=2) orig_aft_ = np.mean( (s2map_topeff_ / s2map_toteff_)[s2map_toteff_ > 0.0]) # getting scales for top/bottom separately to preserve total efficiency scale_top_ = avg_s2aft_ / orig_aft_ scale_bot_ = (1 - avg_s2aft_) / (1 - orig_aft_) s2map.data['map'][:, :, 0:config['n_top_pmts']] *= scale_top_ s2map.data['map'][:, :, config['n_top_pmts']: config['n_tpc_pmts']] *= scale_bot_ self.s2_pattern_map.__init__(s2map.data) # if there is a (data driven!) map, load it. If not make it from the pattern map if files['s2_correction_map']: self.s2_correction_map = make_map(files['s2_correction_map'], fmt='json') else: s2cmap = deepcopy(self.s2_pattern_map) # Lower the LCE by removing contribution from dead PMTs # AT: masking is a bit redundant due to PMT mask application in make_patternmap s2cmap.data['map'] = np.sum(s2cmap.data['map'][:][:], axis=2, keepdims=True, where=pmt_mask) # Scale by median value s2cmap.data['map'] = s2cmap.data['map'] / np.median( s2cmap.data['map'][s2cmap.data['map'] > 0]) s2cmap.__init__(s2cmap.data) self.s2_correction_map = s2cmap # Garfield luminescence timing samples # if config.get('s2_luminescence_model', False) == 'garfield': if 'garfield_gas_gap' in config.get('s2_luminescence_model', ''): #garfield_gas_gap option is using (x,y) -> gas gap (from the map) -> s2 luminescence #from garfield. This s2_luminescence_gg is indexed only by the gas gap, and #corresponds to electrons drawn directly below the anode s2_luminescence_map = straxen.get_resource( files['s2_luminescence_gg'], fmt='npy') self.s2_luminescence_gg = s2_luminescence_map self.garfield_gas_gap_map = make_map( files['garfield_gas_gap_map'], fmt='json') elif 'garfield' in config.get('s2_luminescence_model', ''): #This option indexes the luminescence times using the liquid level values #as well as the position between the full pitch of two gate wires gf_file_name = files['s2_luminescence'] if gf_file_name.endswith('npy'): s2_luminescence_map = straxen.get_resource(gf_file_name, fmt='npy') self.s2_luminescence = s2_luminescence_map elif gf_file_name.endswith('npz'): # Backwards compatibility from before #363 / #370 s2_luminescence_map = straxen.get_resource( gf_file_name, fmt='npy_pickle')['arr_0'] # Get directly the map for the simulated level liquid_level_available = np.unique( s2_luminescence_map['ll']) # available levels (cm) liquid_level = config['gate_to_anode_distance'] - config[ 'elr_gas_gap_length'] # cm liquid_level = min(liquid_level_available, key=lambda x: abs(x - liquid_level)) self.s2_luminescence = s2_luminescence_map[ s2_luminescence_map['ll'] == liquid_level] else: raise ValueError(f'{gf_file_name} is of unknown format') if config.get('field_distortion_model', "none") == "inverse_fdc": self.fdc_3d = make_map(files['fdc_3d'], fmt='json.gz') self.fdc_3d.scale_coordinates( [1., 1., -config['drift_velocity_liquid']]) if config.get('field_distortion_model', "none") == "comsol": self.fd_comsol = make_map( config['field_distortion_comsol_map'], fmt='json.gz', method='RectBivariateSpline') # Gas gap warping map if config.get('enable_gas_gap_warping', False): gas_gap_map = straxen.get_resource(files['gas_gap_map'], fmt='pkl') self.gas_gap_length = lambda positions: gas_gap_map.lookup( *positions.T) # Field dependencies # This config entry a dictionary of 5 items if any(config['enable_field_dependencies'].values()): field_dependencies_map = make_map( files['field_dependencies_map'], fmt='json.gz', method='RectBivariateSpline') self.drift_velocity_scaling = 1.0 # calculating drift velocity scaling to match total drift time for R=0 between cathode and gate if "norm_drift_velocity" in config[ 'enable_field_dependencies'].keys(): if config['enable_field_dependencies'][ 'norm_drift_velocity']: norm_dvel = field_dependencies_map( np.array([[0], [-config['tpc_length']]]).T, map_name='drift_speed_map')[0] norm_dvel *= 1e-4 self.drift_velocity_scaling = config[ 'drift_velocity_liquid'] / norm_dvel def rz_map(z, xy, **kwargs): r = np.sqrt(xy[:, 0]**2 + xy[:, 1]**2) return field_dependencies_map(np.array([r, z]).T, **kwargs) self.field_dependencies_map = rz_map # Data-driven longitudinal diffusion map # TODO: Change to the best way to accommodate simulation/data-driven map if config['enable_field_dependencies'][ "diffusion_longitudinal_map"]: diffusion_longitudinal_map = make_map( files['diffusion_longitudinal_map'], fmt='json.gz', method='WeightedNearestNeighbors') def _rz_map(z, xy, **kwargs): r = np.sqrt(xy[:, 0]**2 + xy[:, 1]**2) return diffusion_longitudinal_map( np.array([r, z]).T, **kwargs) self.diffusion_longitudinal_map = _rz_map # Photon After Pulses if config.get('enable_pmt_afterpulses', False): self.uniform_to_pmt_ap = straxen.get_resource( files['photon_ap_cdfs'], fmt='json.gz') # S1 photon timing splines if config.get('s1_time_spline', False): self.s1_optical_propagation_spline = make_map( files['s1_time_spline'], fmt='json.gz', method='RegularGridInterpolator') # Electron After Pulses if config.get('enable_electron_afterpulses', False): self.uniform_to_ele_ap = straxen.get_resource( files['ele_ap_pdfs'], fmt='pkl.gz') # S2 photons timing optical propagation delays if config.get('s2_time_spline', False): self.s2_optical_propagation_spline = make_map( files['s2_time_spline']) elif config.get('detector', 'XENONnT') == 'XENONnT_neutron_veto': # Neutron veto PMT QE as function of wavelength self.nv_pmt_qe = straxen.get_resource(files['nv_pmt_qe'], fmt='json') # SPE area distributions self.photon_area_distribution = straxen.get_resource( files['photon_area_distribution'], fmt='csv') # Noise sample if config.get('enable_noise', False): self.noise_data = straxen.get_resource(files['noise_file'], fmt='npy')['arr_0'] n_channels = len(self.noise_data[0]) log.warning( f'Using noise data {files["noise_file"]} with {n_channels} channels for {config["detector"]}' ) log.debug(f'{self.__class__.__name__} fully initialized')
def get_cmt_resource(run_id, conf, fmt=''): """ Get resource with CMT correction file name """ return straxen.get_resource(get_correction_from_cmt(run_id, conf), fmt=fmt)
def setup(self): # TODO: Unify with TPC and add adc thresholds self.to_pe = straxen.get_resource(self.config['to_pe_file_nv'], fmt='npy')
def make_map(map_file: str, fmt='text'): map_data = straxen.get_resource(map_file, fmt) return straxen.InterpolatingMap(map_data)
def __init__(self, config=None): if config is None: config = dict() config = deepcopy(config) files = { 'ele_ap_pdfs': 'x1t_se_afterpulse_delaytime.pkl.gz', 'ele_ap_cdfs': 'ele_after_pulse.npy', 'noise_file': 'x1t_noise_170203_0850_00_small.npz', } if config['detector'] == 'XENON1T': files.update({ 'photon_area_distribution': 'XENON1T_spe_distributions.csv', 's1_light_yield_map': 'XENON1T_s1_xyz_ly_kr83m_SR1_pax-680_fdc-3d_v0.json', 's1_pattern_map': 'XENON1T_s1_xyz_patterns_interp_corrected_MCv2.1.0.json.gz', 's2_light_yield_map': 'XENON1T_s2_xy_ly_SR1_v2.2.json', 's2_pattern_map': 'XENON1T_s2_xy_patterns_top_corrected_MCv2.1.0.json.gz', 'photon_ap_cdfs': 'x1t_pmt_afterpulse_config.pkl.gz', 'fdc_3d': 'XENON1T_FDC_SR1_data_driven_time_dependent_3d_correction_tf_nn_part1_v1.json.gz', }) elif config['detector'] == 'XENONnT': files.update({ 'photon_area_distribution': 'XENONnT_spe_distributions.csv', 's1_pattern_map': 'XENONnT_s1_xyz_patterns_corrected_MCv3.1.0_disks.pkl', 's2_pattern_map': 'XENONnT_s2_xy_patterns_topbottom_corrected_MCv3.1.0_disks.pkl', 'photon_ap_cdfs': 'xnt_pmt_afterpulse_config.pkl.gz', 's2_luminescence': 'XENONnT_s2_garfield_luminescence_distribution_v0.pkl.gz', }) else: raise ValueError(f"Unsupported detector {config['detector']}") for k in set(config).intersection(files): files[k] = config[ k] # Allowing user to replace default with specified files commit = 'master' # Replace this by a commit hash if you feel solid and responsible url_base = f'https://raw.githubusercontent.com/XENONnT/strax_auxiliary_files/{commit}/fax_files' for k, v in files.items(): if v.startswith('/'): print( f"WARNING: Using local file {v} for a resource. " f"Do not set this as a default or TravisCI tests will break" ) files[k] = osp.join(url_base, v) self.photon_area_distribution = straxen.get_resource( files['photon_area_distribution'], fmt='csv') if config['detector'] == 'XENON1T': self.s1_pattern_map = make_map(files['s1_pattern_map'], fmt='json.gz') self.s1_light_yield_map = make_map(files['s1_light_yield_map'], fmt='json') self.s2_light_yield_map = make_map(files['s2_light_yield_map'], fmt='json') self.s2_pattern_map = make_map(files['s2_pattern_map'], fmt='json.gz') self.fdc_3d = make_map(files['fdc_3d'], fmt='json.gz') if config['detector'] == 'XENONnT': self.s1_pattern_map = make_map(files['s1_pattern_map'], fmt='pkl') lymap = deepcopy(self.s1_pattern_map) lymap.data['map'] = np.sum(lymap.data['map'][:][:][:], axis=3, keepdims=True) lymap.__init__(lymap.data) self.s1_light_yield_map = lymap self.s2_pattern_map = make_map(files['s2_pattern_map'], fmt='pkl') lymap = deepcopy(self.s2_pattern_map) lymap.data['map'] = np.sum(lymap.data['map'][:][:], axis=2, keepdims=True) lymap.__init__(lymap.data) self.s2_light_yield_map = lymap self.s2_luminescence = straxen.get_resource( files['s2_luminescence'], fmt='pkl.gz') self.fdc_3d = dummy_map(result=0) # Electron After Pulses compressed, haven't figure out how pkl.gz works self.uniform_to_ele_ap = straxen.get_resource(files['ele_ap_pdfs'], fmt='pkl.gz') # Photon After Pulses self.uniform_to_pmt_ap = straxen.get_resource(files['photon_ap_cdfs'], fmt='pkl.gz') # Noise sample self.noise_data = straxen.get_resource(files['noise_file'], fmt='npy')['arr_0'].flatten()
def __init__(self, config=None): log.debug(f'Getting {config}') if config is None: config = dict() config = deepcopy(config) files = { 'ele_ap_pdfs': 'x1t_se_afterpulse_delaytime.pkl.gz', 'ele_ap_cdfs': 'ele_after_pulse.npy', 'noise_file': 'x1t_noise_170203_0850_00_small.npz', } if config['detector'] == 'XENON1T': files.update({ 'photon_area_distribution': 'XENON1T_spe_distributions.csv', 's1_light_yield_map': 'XENON1T_s1_xyz_ly_kr83m_SR1_pax-680_fdc-3d_v0.json', 's1_pattern_map': 'XENON1T_s1_xyz_patterns_interp_corrected_MCv2.1.0.json.gz', 's2_light_yield_map': 'XENON1T_s2_xy_ly_SR1_v2.2.json', 's2_pattern_map': 'XENON1T_s2_xy_patterns_top_corrected_MCv2.1.0.json.gz', 'photon_ap_cdfs': 'x1t_pmt_afterpulse_config.pkl.gz', 'fdc_3d': 'XENON1T_FDC_SR1_data_driven_time_dependent_3d_correction_tf_nn_part1_v1.json.gz', }) elif config['detector'] == 'XENONnT': files.update({ 'photon_area_distribution': 'XENONnT_spe_distributions.csv', 's1_pattern_map': 'XENONnT_s1_xyz_patterns_corrected_qes_MCva43fa9b_wires.pkl', 's2_pattern_map': 'XENONnT_s2_xy_patterns_topbottom_corrected_qes_MCva43fa9b_wires.pkl', 'photon_ap_cdfs': 'xnt_pmt_afterpulse_config.pkl.gz', 's2_luminescence': 'XENONnT_s2_garfield_luminescence_distribution_v0.pkl.gz', 'gas_gap_map': 'gas_gap_warping_map_January_2021.pkl', }) else: raise ValueError(f"Unsupported detector {config['detector']}") for k in set(config).intersection(files): files[k] = config[ k] # Allowing user to replace default with specified files commit = 'master' # Replace this by a commit hash if you feel solid and responsible url_base = f'https://raw.githubusercontent.com/XENONnT/strax_auxiliary_files/{commit}/sim_files' for k, v in files.items(): log.debug(f'Obtaining {k} from {v}') if v.startswith('/'): log.warning( f"WARNING: Using local file {v} for a resource. " f"Do not set this as a default or TravisCI tests will break" ) try: # First try downloading it via # https://straxen.readthedocs.io/en/latest/config_storage.html#downloading-xenonnt-files-from-the-database # noqa # we need to add the straxen.MongoDownloader() in this # try: except NameError: logic because the NameError # gets raised if we don't have access to utilix. downloader = straxen.MongoDownloader() # FileNotFoundError, ValueErrors can be raised if we # cannot load the requested config downloaded_file = downloader.download_single(v) files[k] = downloaded_file except (FileNotFoundError, ValueError, NameError, AttributeError): # We cannot download the file from the database. We need to # try to get a placeholder file from a URL. raw_url = osp.join(url_base, v) log.warning(f'{k} did not download, trying {raw_url}') files[k] = raw_url log.debug(f'Downloaded {k} successfully') self.photon_area_distribution = straxen.get_resource( files['photon_area_distribution'], fmt='csv') if config['detector'] == 'XENON1T': self.s1_pattern_map = make_map(files['s1_pattern_map'], fmt='json.gz') self.s1_light_yield_map = make_map(files['s1_light_yield_map'], fmt='json') self.s2_light_yield_map = make_map(files['s2_light_yield_map'], fmt='json') self.s2_pattern_map = make_map(files['s2_pattern_map'], fmt='json.gz') self.fdc_3d = make_map(files['fdc_3d'], fmt='json.gz') # TODO # config not set self.gas_gap_length = lambda positions: np.ones(253) if config['detector'] == 'XENONnT': self.s1_pattern_map = make_map(files['s1_pattern_map'], fmt='pkl') lymap = deepcopy(self.s1_pattern_map) lymap.data['map'] = np.sum(lymap.data['map'][:][:][:], axis=3, keepdims=True) lymap.__init__(lymap.data) self.s1_light_yield_map = lymap self.s2_pattern_map = make_map(files['s2_pattern_map'], fmt='pkl') lymap = deepcopy(self.s2_pattern_map) lymap.data['map'] = np.sum(lymap.data['map'][:][:], axis=2, keepdims=True) lymap.__init__(lymap.data) self.s2_light_yield_map = lymap self.s2_luminescence = straxen.get_resource( files['s2_luminescence'], fmt='pkl.gz') self.fdc_3d = dummy_map(result=0) gas_gap_map = straxen.get_resource(files['gas_gap_map'], fmt='pkl') self.gas_gap_length = lambda positions: gas_gap_map.lookup( *positions.T) # Electron After Pulses compressed, haven't figure out how pkl.gz works self.uniform_to_ele_ap = straxen.get_resource(files['ele_ap_pdfs'], fmt='pkl.gz') # Photon After Pulses self.uniform_to_pmt_ap = straxen.get_resource(files['photon_ap_cdfs'], fmt='pkl.gz') # Noise sample self.noise_data = straxen.get_resource(files['noise_file'], fmt='npy')['arr_0'].flatten() # nVeto PMT Q.E. if config['neutron_veto']: self.nv_pmt_qe_data = straxen.get_resource( config['nv_pmt_qe_file'], fmt='json') log.debug(f'{self.__class__.__name__} fully initialized')