def test_replace_config(): a = dict(toto=1, tata=2) b = dict(tata=3, tutu=4) c = config.replace_config(a, b) assert c["toto"] == 1 assert c["tata"] == 3 assert c["tutu"] == 4
def main(): std_config = get_standard_config() if args.config_file is not None: config = replace_config(std_config, read_configuration_file(args.config_file)) else: config = std_config print(config['tailcut']) geom = CameraGeometry.from_name('LSTCam-002') foclen = OpticsDescription.from_name('LST').equivalent_focal_length dl1_container = DL1ParametersContainer() parameters_to_update = list(HillasParametersContainer().keys()) parameters_to_update.extend(['wl', 'r', 'leakage', 'n_islands', 'intercept', 'time_gradient']) nodes_keys = get_dataset_keys(args.input_file) if args.noimage: nodes_keys.remove(dl1_images_lstcam_key) auto_merge_h5files([args.input_file], args.output_file, nodes_keys=nodes_keys) with tables.open_file(args.input_file, mode='r') as input: image_table = input.root[dl1_images_lstcam_key] with tables.open_file(args.output_file, mode='a') as output: params = output.root[dl1_params_lstcam_key].read() for ii, row in enumerate(image_table): if ii%10000 == 0: print(ii) image = row['image'] pulse_time = row['pulse_time'] signal_pixels = tailcuts_clean(geom, image, **config['tailcut']) if image[signal_pixels].shape[0] > 0: num_islands, island_labels = number_of_islands(geom, signal_pixels) hillas = hillas_parameters(geom[signal_pixels], image[signal_pixels]) dl1_container.fill_hillas(hillas) dl1_container.set_timing_features(geom[signal_pixels], image[signal_pixels], pulse_time[signal_pixels], hillas) dl1_container.set_leakage(geom, image, signal_pixels) dl1_container.n_islands = num_islands dl1_container.wl = dl1_container.width / dl1_container.length width = np.rad2deg(np.arctan2(dl1_container.width, foclen)) length = np.rad2deg(np.arctan2(dl1_container.length, foclen)) dl1_container.width = width.value dl1_container.length = length.value dl1_container.r = np.sqrt(dl1_container.x**2 + dl1_container.y**2) for p in parameters_to_update: params[ii][p] = Quantity(dl1_container[p]).value else: for p in parameters_to_update: params[ii][p] = 0 output.root[dl1_params_lstcam_key][:] = params
def main(): std_config = get_standard_config() log.setLevel(logging.INFO) handler = logging.StreamHandler() logging.getLogger().addHandler(handler) if args.config_file is not None: config = replace_config(std_config, read_configuration_file(args.config_file)) else: config = std_config log.info(f"Tailcut config used: {config['tailcut']}") foclen = OpticsDescription.from_name('LST').equivalent_focal_length cam_table = Table.read(args.input_file, path="instrument/telescope/camera/LSTCam") camera_geom = CameraGeometry.from_table(cam_table) dl1_container = DL1ParametersContainer() parameters_to_update = list(HillasParametersContainer().keys()) parameters_to_update.extend([ 'concentration_cog', 'concentration_core', 'concentration_pixel', 'leakage_intensity_width_1', 'leakage_intensity_width_2', 'leakage_pixels_width_1', 'leakage_pixels_width_2', 'n_islands', 'intercept', 'time_gradient', 'n_pixels', 'wl', 'log_intensity' ]) nodes_keys = get_dataset_keys(args.input_file) if args.noimage: nodes_keys.remove(dl1_images_lstcam_key) auto_merge_h5files([args.input_file], args.output_file, nodes_keys=nodes_keys) with tables.open_file(args.input_file, mode='r') as input: image_table = input.root[dl1_images_lstcam_key] dl1_params_input = input.root[dl1_params_lstcam_key].colnames disp_params = {'disp_dx', 'disp_dy', 'disp_norm', 'disp_angle', 'disp_sign'} if set(dl1_params_input).intersection(disp_params): parameters_to_update.extend(disp_params) with tables.open_file(args.output_file, mode='a') as output: params = output.root[dl1_params_lstcam_key].read() for ii, row in enumerate(image_table): dl1_container.reset() image = row['image'] peak_time = row['peak_time'] signal_pixels = tailcuts_clean(camera_geom, image, **config['tailcut']) n_pixels = np.count_nonzero(signal_pixels) if n_pixels > 0: num_islands, island_labels = number_of_islands(camera_geom, signal_pixels) n_pixels_on_island = np.bincount(island_labels.astype(np.int)) n_pixels_on_island[0] = 0 # first island is no-island and should not be considered max_island_label = np.argmax(n_pixels_on_island) signal_pixels[island_labels != max_island_label] = False hillas = hillas_parameters(camera_geom[signal_pixels], image[signal_pixels]) dl1_container.fill_hillas(hillas) dl1_container.set_timing_features(camera_geom[signal_pixels], image[signal_pixels], peak_time[signal_pixels], hillas) dl1_container.set_leakage(camera_geom, image, signal_pixels) dl1_container.set_concentration(camera_geom, image, hillas) dl1_container.n_islands = num_islands dl1_container.wl = dl1_container.width / dl1_container.length dl1_container.n_pixels = n_pixels width = np.rad2deg(np.arctan2(dl1_container.width, foclen)) length = np.rad2deg(np.arctan2(dl1_container.length, foclen)) dl1_container.width = width dl1_container.length = length dl1_container.log_intensity = np.log10(dl1_container.intensity) if set(dl1_params_input).intersection(disp_params): disp_dx, disp_dy, disp_norm, disp_angle, disp_sign = disp( dl1_container['x'].to_value(u.m), dl1_container['y'].to_value(u.m), params['src_x'][ii], params['src_y'][ii] ) dl1_container['disp_dx'] = disp_dx dl1_container['disp_dy'] = disp_dy dl1_container['disp_norm'] = disp_norm dl1_container['disp_angle'] = disp_angle dl1_container['disp_sign'] = disp_sign for p in parameters_to_update: params[ii][p] = u.Quantity(dl1_container[p]).value output.root[dl1_params_lstcam_key][:] = params
def main(): std_config = get_standard_config() if args.config_file is not None: config = replace_config(std_config, read_configuration_file(args.config_file)) else: config = std_config print(config['tailcut']) foclen = OpticsDescription.from_name('LST').equivalent_focal_length cam_table = Table.read(args.input_file, path="instrument/telescope/camera/LSTCam") camera_geom = CameraGeometry.from_table(cam_table) dl1_container = DL1ParametersContainer() parameters_to_update = list(HillasParametersContainer().keys()) parameters_to_update.extend([ 'concentration_cog', 'concentration_core', 'concentration_pixel', 'leakage_intensity_width_1', 'leakage_intensity_width_2', 'leakage_pixels_width_1', 'leakage_pixels_width_2', 'n_islands', 'intercept', 'time_gradient', 'n_pixels', 'wl', 'r', ]) nodes_keys = get_dataset_keys(args.input_file) if args.noimage: nodes_keys.remove(dl1_images_lstcam_key) auto_merge_h5files([args.input_file], args.output_file, nodes_keys=nodes_keys) with tables.open_file(args.input_file, mode='r') as input: image_table = input.root[dl1_images_lstcam_key] with tables.open_file(args.output_file, mode='a') as output: params = output.root[dl1_params_lstcam_key].read() for ii, row in enumerate(image_table): if ii % 10000 == 0: print(ii) image = row['image'] peak_time = row['peak_time'] signal_pixels = tailcuts_clean(camera_geom, image, **config['tailcut']) n_pixels = np.count_nonzero(signal_pixels) if n_pixels > 0: num_islands, island_labels = number_of_islands( camera_geom, signal_pixels) n_pixels_on_island = np.bincount( island_labels.astype(np.int)) n_pixels_on_island[ 0] = 0 # first island is no-island and should not be considered max_island_label = np.argmax(n_pixels_on_island) signal_pixels[island_labels != max_island_label] = False hillas = hillas_parameters(camera_geom[signal_pixels], image[signal_pixels]) dl1_container.fill_hillas(hillas) dl1_container.set_timing_features( camera_geom[signal_pixels], image[signal_pixels], peak_time[signal_pixels], hillas) dl1_container.set_leakage(camera_geom, image, signal_pixels) dl1_container.set_concentration(camera_geom, image, hillas) dl1_container.n_islands = num_islands dl1_container.wl = dl1_container.width / dl1_container.length dl1_container.n_pixels = n_pixels width = np.rad2deg(np.arctan2(dl1_container.width, foclen)) length = np.rad2deg( np.arctan2(dl1_container.length, foclen)) dl1_container.width = width dl1_container.length = length dl1_container.r = np.sqrt(dl1_container.x**2 + dl1_container.y**2) else: # for consistency with r0_to_dl1.py: for key in dl1_container.keys(): dl1_container[key] = \ u.Quantity(0, dl1_container.fields[key].unit) dl1_container.width = u.Quantity(np.nan, u.m) dl1_container.length = u.Quantity(np.nan, u.m) dl1_container.wl = u.Quantity(np.nan, u.m) for p in parameters_to_update: params[ii][p] = u.Quantity(dl1_container[p]).value output.root[dl1_params_lstcam_key][:] = params
def main(): args = parser.parse_args() log.setLevel(logging.INFO) handler = logging.StreamHandler() logging.getLogger().addHandler(handler) if Path(args.output_file).exists(): log.critical(f'Output file {args.output_file} already exists') sys.exit(1) std_config = get_standard_config() if args.config_file is not None: config = replace_config(std_config, read_configuration_file(args.config_file)) else: config = std_config with tables.open_file(args.input_file, 'r') as f: is_simulation = 'simulation' in f.root increase_nsb = False increase_psf = False if "image_modifier" in config: imconfig = config["image_modifier"] increase_nsb = imconfig["increase_nsb"] increase_psf = imconfig["increase_psf"] if increase_nsb or increase_psf: log.info(f"image_modifier configuration: {imconfig}") extra_noise_in_dim_pixels = imconfig["extra_noise_in_dim_pixels"] extra_bias_in_dim_pixels = imconfig["extra_bias_in_dim_pixels"] transition_charge = imconfig["transition_charge"] extra_noise_in_bright_pixels = imconfig["extra_noise_in_bright_pixels"] smeared_light_fraction = imconfig["smeared_light_fraction"] if (increase_nsb or increase_psf): log.info( "NOTE: Using the image_modifier options means images will " "not be saved.") args.no_image = True if is_simulation: args.pedestal_cleaning = False if args.pedestal_cleaning: log.info("Pedestal cleaning") clean_method_name = 'tailcuts_clean_with_pedestal_threshold' sigma = config[clean_method_name]['sigma'] pedestal_thresh = get_threshold_from_dl1_file(args.input_file, sigma) cleaning_params = get_cleaning_parameters(config, clean_method_name) pic_th, boundary_th, isolated_pixels, min_n_neighbors = cleaning_params log.info( f"Fraction of pixel cleaning thresholds above picture thr.:" f"{np.sum(pedestal_thresh > pic_th) / len(pedestal_thresh):.3f}") picture_th = np.clip(pedestal_thresh, pic_th, None) log.info(f"Tailcut clean with pedestal threshold config used:" f"{config['tailcuts_clean_with_pedestal_threshold']}") else: clean_method_name = 'tailcut' cleaning_params = get_cleaning_parameters(config, clean_method_name) picture_th, boundary_th, isolated_pixels, min_n_neighbors = cleaning_params log.info(f"Tailcut config used: {config['tailcut']}") use_dynamic_cleaning = False if 'apply' in config['dynamic_cleaning']: use_dynamic_cleaning = config['dynamic_cleaning']['apply'] if use_dynamic_cleaning: THRESHOLD_DYNAMIC_CLEANING = config['dynamic_cleaning']['threshold'] FRACTION_CLEANING_SIZE = config['dynamic_cleaning'][ 'fraction_cleaning_intensity'] log.info( "Using dynamic cleaning for events with average size of the " f"3 most brighest pixels > {config['dynamic_cleaning']['threshold']} p.e" ) log.info( "Remove from image pixels which have charge below " f"= {config['dynamic_cleaning']['fraction_cleaning_intensity']} * average size" ) use_only_main_island = True if "use_only_main_island" in config[clean_method_name]: use_only_main_island = config[clean_method_name][ "use_only_main_island"] delta_time = None if "delta_time" in config[clean_method_name]: delta_time = config[clean_method_name]["delta_time"] subarray_info = SubarrayDescription.from_hdf(args.input_file) tel_id = config["allowed_tels"][0] if "allowed_tels" in config else 1 optics = subarray_info.tel[tel_id].optics camera_geom = subarray_info.tel[tel_id].camera.geometry dl1_container = DL1ParametersContainer() parameters_to_update = [ 'intensity', 'x', 'y', 'r', 'phi', 'length', 'width', 'psi', 'skewness', 'kurtosis', 'concentration_cog', 'concentration_core', 'concentration_pixel', 'leakage_intensity_width_1', 'leakage_intensity_width_2', 'leakage_pixels_width_1', 'leakage_pixels_width_2', 'n_islands', 'intercept', 'time_gradient', 'n_pixels', 'wl', 'log_intensity' ] nodes_keys = get_dataset_keys(args.input_file) if args.no_image: nodes_keys.remove(dl1_images_lstcam_key) metadata = global_metadata() with tables.open_file(args.input_file, mode='r') as infile: image_table = infile.root[dl1_images_lstcam_key] dl1_params_input = infile.root[dl1_params_lstcam_key].colnames disp_params = { 'disp_dx', 'disp_dy', 'disp_norm', 'disp_angle', 'disp_sign' } if set(dl1_params_input).intersection(disp_params): parameters_to_update.extend(disp_params) uncertainty_params = {'width_uncertainty', 'length_uncertainty'} if set(dl1_params_input).intersection(uncertainty_params): parameters_to_update.extend(uncertainty_params) if increase_nsb: rng = np.random.default_rng( infile.root.dl1.event.subarray.trigger.col('obs_id')[0]) if increase_psf: set_numba_seed( infile.root.dl1.event.subarray.trigger.col('obs_id')[0]) image_mask_save = not args.no_image and 'image_mask' in infile.root[ dl1_images_lstcam_key].colnames with tables.open_file(args.output_file, mode='a', filters=HDF5_ZSTD_FILTERS) as outfile: copy_h5_nodes(infile, outfile, nodes=nodes_keys) add_source_filenames(outfile, [args.input_file]) params = outfile.root[dl1_params_lstcam_key].read() if image_mask_save: image_mask = outfile.root[dl1_images_lstcam_key].col( 'image_mask') # need container to use lstchain.io.add_global_metadata and lstchain.io.add_config_metadata for k, item in metadata.as_dict().items(): outfile.root[dl1_params_lstcam_key].attrs[k] = item outfile.root[dl1_params_lstcam_key].attrs["config"] = str(config) for ii, row in enumerate(image_table): dl1_container.reset() image = row['image'] peak_time = row['peak_time'] if increase_nsb: # Add noise in pixels, to adjust MC to data noise levels. # TO BE DONE: in case of "pedestal cleaning" (not used now # in MC) we should recalculate picture_th above! image = add_noise_in_pixels(rng, image, extra_noise_in_dim_pixels, extra_bias_in_dim_pixels, transition_charge, extra_noise_in_bright_pixels) if increase_psf: image = random_psf_smearer( image, smeared_light_fraction, camera_geom.neighbor_matrix_sparse.indices, camera_geom.neighbor_matrix_sparse.indptr) signal_pixels = tailcuts_clean(camera_geom, image, picture_th, boundary_th, isolated_pixels, min_n_neighbors) n_pixels = np.count_nonzero(signal_pixels) if n_pixels > 0: # if delta_time has been set, we require at least one # neighbor within delta_time to accept a pixel in the image: if delta_time is not None: cleaned_pixel_times = peak_time # makes sure only signal pixels are used in the time # check: cleaned_pixel_times[~signal_pixels] = np.nan new_mask = apply_time_delta_cleaning( camera_geom, signal_pixels, cleaned_pixel_times, 1, delta_time) signal_pixels = new_mask if use_dynamic_cleaning: new_mask = apply_dynamic_cleaning( image, signal_pixels, THRESHOLD_DYNAMIC_CLEANING, FRACTION_CLEANING_SIZE) signal_pixels = new_mask # count a number of islands after all of the image cleaning steps num_islands, island_labels = number_of_islands( camera_geom, signal_pixels) dl1_container.n_islands = num_islands n_pixels_on_island = np.bincount( island_labels.astype(np.int64)) n_pixels_on_island[ 0] = 0 # first island is no-island and should not be considered max_island_label = np.argmax(n_pixels_on_island) if use_only_main_island: signal_pixels[ island_labels != max_island_label] = False # count the surviving pixels n_pixels = np.count_nonzero(signal_pixels) dl1_container.n_pixels = n_pixels if n_pixels > 0: parametrize_image( image=image, peak_time=peak_time, signal_pixels=signal_pixels, camera_geometry=camera_geom, focal_length=optics.equivalent_focal_length, dl1_container=dl1_container, ) if set(dl1_params_input).intersection(disp_params): disp_dx, disp_dy, disp_norm, disp_angle, disp_sign = disp( dl1_container['x'].to_value(u.m), dl1_container['y'].to_value(u.m), params['src_x'][ii], params['src_y'][ii]) dl1_container['disp_dx'] = disp_dx dl1_container['disp_dy'] = disp_dy dl1_container['disp_norm'] = disp_norm dl1_container['disp_angle'] = disp_angle dl1_container['disp_sign'] = disp_sign for p in parameters_to_update: params[ii][p] = u.Quantity(dl1_container[p]).value if image_mask_save: image_mask[ii] = signal_pixels outfile.root[dl1_params_lstcam_key][:] = params if image_mask_save: outfile.root[dl1_images_lstcam_key].modify_column( colname='image_mask', column=image_mask) write_metadata(metadata, args.output_file)
def main(): std_config = get_standard_config() log.setLevel(logging.INFO) handler = logging.StreamHandler() logging.getLogger().addHandler(handler) if args.config_file is not None: config = replace_config(std_config, read_configuration_file(args.config_file)) else: config = std_config if args.pedestal_cleaning: print("Pedestal cleaning") clean_method_name = 'tailcuts_clean_with_pedestal_threshold' sigma = config[clean_method_name]['sigma'] pedestal_thresh = get_threshold_from_dl1_file(args.input_file, sigma) cleaning_params = get_cleaning_parameters(config, clean_method_name) pic_th, boundary_th, isolated_pixels, min_n_neighbors = cleaning_params log.info( f"Fraction of pixel cleaning thresholds above picture thr.:" f"{np.sum(pedestal_thresh>pic_th) / len(pedestal_thresh):.3f}") picture_th = np.clip(pedestal_thresh, pic_th, None) log.info(f"Tailcut clean with pedestal threshold config used:" f"{config['tailcuts_clean_with_pedestal_threshold']}") else: clean_method_name = 'tailcut' cleaning_params = get_cleaning_parameters(config, clean_method_name) picture_th, boundary_th, isolated_pixels, min_n_neighbors = cleaning_params log.info(f"Tailcut config used: {config['tailcut']}") use_only_main_island = True if "use_only_main_island" in config[clean_method_name]: use_only_main_island = config[clean_method_name][ "use_only_main_island"] delta_time = None if "delta_time" in config[clean_method_name]: delta_time = config[clean_method_name]["delta_time"] foclen = OpticsDescription.from_name('LST').equivalent_focal_length cam_table = Table.read(args.input_file, path="instrument/telescope/camera/LSTCam") camera_geom = CameraGeometry.from_table(cam_table) dl1_container = DL1ParametersContainer() parameters_to_update = [ 'intensity', 'x', 'y', 'r', 'phi', 'length', 'width', 'psi', 'skewness', 'kurtosis', 'concentration_cog', 'concentration_core', 'concentration_pixel', 'leakage_intensity_width_1', 'leakage_intensity_width_2', 'leakage_pixels_width_1', 'leakage_pixels_width_2', 'n_islands', 'intercept', 'time_gradient', 'n_pixels', 'wl', 'log_intensity' ] nodes_keys = get_dataset_keys(args.input_file) if args.noimage: nodes_keys.remove(dl1_images_lstcam_key) auto_merge_h5files([args.input_file], args.output_file, nodes_keys=nodes_keys) with tables.open_file(args.input_file, mode='r') as input: image_table = input.root[dl1_images_lstcam_key] dl1_params_input = input.root[dl1_params_lstcam_key].colnames disp_params = { 'disp_dx', 'disp_dy', 'disp_norm', 'disp_angle', 'disp_sign' } if set(dl1_params_input).intersection(disp_params): parameters_to_update.extend(disp_params) with tables.open_file(args.output_file, mode='a') as output: params = output.root[dl1_params_lstcam_key].read() for ii, row in enumerate(image_table): dl1_container.reset() image = row['image'] peak_time = row['peak_time'] signal_pixels = tailcuts_clean(camera_geom, image, picture_th, boundary_th, isolated_pixels, min_n_neighbors) n_pixels = np.count_nonzero(signal_pixels) if n_pixels > 0: num_islands, island_labels = number_of_islands( camera_geom, signal_pixels) n_pixels_on_island = np.bincount( island_labels.astype(np.int64)) n_pixels_on_island[ 0] = 0 # first island is no-island and should not be considered max_island_label = np.argmax(n_pixels_on_island) if use_only_main_island: signal_pixels[ island_labels != max_island_label] = False # if delta_time has been set, we require at least one # neighbor within delta_time to accept a pixel in the image: if delta_time is not None: cleaned_pixel_times = peak_time # makes sure only signal pixels are used in the time # check: cleaned_pixel_times[~signal_pixels] = np.nan new_mask = apply_time_delta_cleaning( camera_geom, signal_pixels, cleaned_pixel_times, 1, delta_time) signal_pixels = new_mask # count the surviving pixels n_pixels = np.count_nonzero(signal_pixels) if n_pixels > 0: hillas = hillas_parameters(camera_geom[signal_pixels], image[signal_pixels]) dl1_container.fill_hillas(hillas) dl1_container.set_timing_features( camera_geom[signal_pixels], image[signal_pixels], peak_time[signal_pixels], hillas) dl1_container.set_leakage(camera_geom, image, signal_pixels) dl1_container.set_concentration( camera_geom, image, hillas) dl1_container.n_islands = num_islands dl1_container.wl = dl1_container.width / dl1_container.length dl1_container.n_pixels = n_pixels width = np.rad2deg( np.arctan2(dl1_container.width, foclen)) length = np.rad2deg( np.arctan2(dl1_container.length, foclen)) dl1_container.width = width dl1_container.length = length dl1_container.log_intensity = np.log10( dl1_container.intensity) if set(dl1_params_input).intersection(disp_params): disp_dx, disp_dy, disp_norm, disp_angle, disp_sign = disp( dl1_container['x'].to_value(u.m), dl1_container['y'].to_value(u.m), params['src_x'][ii], params['src_y'][ii]) dl1_container['disp_dx'] = disp_dx dl1_container['disp_dy'] = disp_dy dl1_container['disp_norm'] = disp_norm dl1_container['disp_angle'] = disp_angle dl1_container['disp_sign'] = disp_sign for p in parameters_to_update: params[ii][p] = u.Quantity(dl1_container[p]).value output.root[dl1_params_lstcam_key][:] = params
def find_safe_threshold_from_dl1_file(dl1_path, config_file=None, max_fraction=0.04): """ Function to obtain an integer value for the picture threshold such that at most a fraction max_fraction of pixels have a higher value resulting from the "clean_with_pedestal_threshold" cleaning approach. That approach increases the cleaning picture threshold of a pixel to, say, 2.5 standard deviations (or the number in "sigma" below) above its pedestal mean, hence pixels illuminated by stars get a higher threshold, and so we avoid too many spurious signals from the starlight fluctuations. The downside of the method is that it introduces non-uniformities in the camera response for the real data, and therefore data-MC discrepancies (there are no stars in MC and all pixels have the same cleaning settings). Here we try to calculate what should be the "base" picture threshold (to use both in MC and data) so that at most a given fraction "max_fraction" of the camera gets an increased threshold via the clean_with_pedestal_threshold condition. In this way the number / extension of camera inhomogeneities in the real data is limited. By default the max_fraction is 0.04, which e.g. gives a picture threshold around 8 for the Crab field (in no-moon conditions). Note: we want cleaning settings for a whole run, so we will have to run the function over sub-runs and average the values, or make the function able to read multiple DL1 files Parameters ---------- dl1_path: real data DL1 file, to have access to the monitoring table where we can read the pedesta bias & std dev estimated with interleaved pedestal events config_file: must be the one used for the analysis of the real data, in particular, it has to contain the tailcuts_clean_with_pedestal_threshold setting (sigma) that one wants to use max_fraction: maximum fraction of camera pixels that are allowed to get a picture threshold above the base one. That is, we calculate here the base picture threshold that will ensure that the condition is fulfilled Returns ------- (scalar) the value of the picture threshold that has to be used in data and MC to ensure that no more than max_fraction of the camera gets an increased value via tailcuts_clean_with_pedestal_threshold """ std_config = get_standard_config() if config_file is not None: config = replace_config(std_config, read_configuration_file(config_file)) else: config = std_config cleaning_method = 'tailcuts_clean_with_pedestal_threshold' picture_th, _, _, _ = get_cleaning_parameters(config, cleaning_method) sigma = config[cleaning_method]['sigma'] # Obtain the picture thresholds of pixels based on the "clean with # pedestal threshold" method: pic_threshold = get_threshold_from_dl1_file(dl1_path, sigma) threshold_sorted = np.sort(pic_threshold) # find the value new_threshold above which a fraction max_fraction of # pixels lies: index = int(len(threshold_sorted) * (1 - max_fraction)) new_threshold = threshold_sorted[index] # Return the first integer value above new_threshold (to avoid too many # different cleaning settings in different runs): return np.ceil(new_threshold)