def main(): os.makedirs(args.outdir, exist_ok=True) dl0_to_dl1.allowed_tels = {1, 2, 3, 4} output_filename = args.outdir + '/dl1_' + os.path.basename( args.infile).rsplit('.', 1)[0] + '.h5' config = {} if args.config_file is not None: try: config = read_configuration_file(args.config_file) except ("Custom configuration could not be loaded !!!"): pass config["max_events"] = args.max_events dl0_to_dl1.r0_to_dl1(args.infile, output_filename=output_filename, custom_config=config, pedestal_path=args.pedestal_path, calibration_path=args.calibration_path, time_calibration_path=args.time_calibration_path, pointing_file_path=args.pointing_file_path, ucts_t0_dragon=args.ucts_t0_dragon, dragon_counter0=args.dragon_counter0, ucts_t0_tib=args.ucts_t0_tib, tib_counter0=args.tib_counter0)
def main(): args = parser.parse_args() # using a default of None and only using get_dataset_path here # prevents downloading gamma_test_large when an input file is actually given # or just --help is called. if args.input_file is None: args.input_file = get_dataset_path('gamma_test_large.simtel.gz') output_dir = args.output_dir.absolute() output_dir.mkdir(exist_ok=True, parents=True) output_file = output_dir / r0_to_dl1_filename(args.input_file.name) r0_to_dl1.allowed_tels = {1, 2, 3, 4} config = {} if args.config_file is not None: try: config = read_configuration_file(args.config_file.absolute()) except Exception as e: log.error(f'Config file {args.config_file} could not be read: {e}') sys.exit(1) r0_to_dl1.r0_to_dl1( args.input_file, output_filename=output_file, custom_config=config, )
def main(): std_config = get_standard_config() if args.config_file is not None: config = replace_config(std_config, read_configuration_file(args.config_file)) else: config = std_config print(config['tailcut']) geom = CameraGeometry.from_name('LSTCam-002') foclen = OpticsDescription.from_name('LST').equivalent_focal_length dl1_container = DL1ParametersContainer() parameters_to_update = list(HillasParametersContainer().keys()) parameters_to_update.extend(['wl', 'r', 'leakage', 'n_islands', 'intercept', 'time_gradient']) nodes_keys = get_dataset_keys(args.input_file) if args.noimage: nodes_keys.remove(dl1_images_lstcam_key) auto_merge_h5files([args.input_file], args.output_file, nodes_keys=nodes_keys) with tables.open_file(args.input_file, mode='r') as input: image_table = input.root[dl1_images_lstcam_key] with tables.open_file(args.output_file, mode='a') as output: params = output.root[dl1_params_lstcam_key].read() for ii, row in enumerate(image_table): if ii%10000 == 0: print(ii) image = row['image'] pulse_time = row['pulse_time'] signal_pixels = tailcuts_clean(geom, image, **config['tailcut']) if image[signal_pixels].shape[0] > 0: num_islands, island_labels = number_of_islands(geom, signal_pixels) hillas = hillas_parameters(geom[signal_pixels], image[signal_pixels]) dl1_container.fill_hillas(hillas) dl1_container.set_timing_features(geom[signal_pixels], image[signal_pixels], pulse_time[signal_pixels], hillas) dl1_container.set_leakage(geom, image, signal_pixels) dl1_container.n_islands = num_islands dl1_container.wl = dl1_container.width / dl1_container.length width = np.rad2deg(np.arctan2(dl1_container.width, foclen)) length = np.rad2deg(np.arctan2(dl1_container.length, foclen)) dl1_container.width = width.value dl1_container.length = length.value dl1_container.r = np.sqrt(dl1_container.x**2 + dl1_container.y**2) for p in parameters_to_update: params[ii][p] = Quantity(dl1_container[p]).value else: for p in parameters_to_update: params[ii][p] = 0 output.root[dl1_params_lstcam_key][:] = params
def main(): ctaplot.set_style() output_dir = args.output_dir.absolute() output_dir.mkdir(exist_ok=True, parents=True) output_file = output_dir / r0_to_dl1_filename(args.input_file.name) r0_to_dl1.allowed_tels = {1, 2, 3, 4} if args.config_file is not None: try: config = read_configuration_file(args.config_file.absolute()) except Exception as e: log.error(f'Config file {args.config_file} could not be read: {e}') sys.exit(1) else: config = get_standard_config() # This benchmark needs true pe image config['write_pe_image'] = True # directly jump to the benchmarks if the dl1 file already exists if not os.path.exists(output_file): r0_to_dl1.r0_to_dl1( args.input_file, output_filename=output_file, custom_config=config, ) with tables.open_file(output_file) as f: sim_table = Table(f.root.dl1.event.simulation.LST_LSTCam.read()) im_table = Table(f.root.dl1.event.telescope.image.LST_LSTCam.read()) if len(sim_table) != len(im_table): raise ValueError( 'the number of events with simulation info is not equal to the number of dl1 events' ) pdf_filename = os.path.join( args.output_dir, f"charge_bench_{os.path.basename(output_file).replace('.h5', '')}.pdf") with PdfPages(pdf_filename) as pdf: plot_pixels_pe_spectrum(sim_table['true_image'], im_table['image']) plt.tight_layout() pdf.savefig() plt.close() plot_photoelectron_true_reco(sim_table['true_image'], im_table['image']) plt.tight_layout() pdf.savefig() plt.close() ax = plot_charge_resolution(sim_table['true_image'], im_table['image']) ax.set_ylim(-1, 10) plt.tight_layout() pdf.savefig() plt.close()
def get_cleaning_config(config_file=None): if config_file is None: config = get_standard_config() else: config = read_configuration_file(config_file) cleaning_parameters = config["tailcut"] cleaning_parameters.pop("use_only_main_island", True) cleaning_parameters.pop("delta_time", None) return cleaning_parameters
def main(): args = parser.parse_args() log.setLevel(logging.INFO) handler = logging.StreamHandler() logging.getLogger().addHandler(handler) log.info(f'Input file: {args.input_file}') log.info(f'Number of events in each subrun: {args.max_events}') path_list = sorted(glob.glob(args.input_file)) log.info(f'list of files: {path_list}') config_dic = {} # read the configuration file if args.config is not None: config_dic = read_configuration_file(args.config) config = Config(config_dic) source_config = Config({ "LSTEventSource": { "max_events": args.max_events, "pointing_information": False, "default_trigger_type": 'tib', "use_flatfield_heuristic": args.use_flatfield_heuristic, "EventTimeCalculator": { "run_summary_path": args.run_summary_path, }, "LSTR0Corrections": { "drs4_pedestal_path": args.pedestal_file, } } }) config.merge(source_config) with EventSource(path_list[0]) as s: subarray = s.subarray timeCorr = TimeCorrectionCalculate(calib_file_path=args.output_file, config=config, subarray=subarray) for i, path in enumerate(path_list): log.info(f'File {i + 1} out of {len(path_list)}') log.info(f'Processing: {path}') reader = EventSource(input_url=path, config=config) for event in tqdm(reader, disable=args.no_progress): timeCorr.calibrate_peak_time(event) # write output timeCorr.finalize()
def main(): log.setLevel(logging.INFO) handler = logging.StreamHandler() logging.getLogger().addHandler(handler) log.info(f'Input file: {args.input_file}') log.info(f'Number of events in each subrun: {args.max_events}') path_list = sorted(glob.glob(args.input_file)) log.info(f'list of files: {path_list}') config_dic = {} # read the configuration file if args.config_file is not None: config_dic = read_configuration_file(args.config_file) config = Config(config_dic) source_config = Config({ "LSTEventSource": { "max_events": args.max_events, "default_trigger_type": 'tib', "EventTimeCalculator": { "run_summary_path": args.run_summary_path, }, "LSTR0Corrections": { "drs4_pedestal_path": args.pedestal_file, } } }) config.merge(source_config) for i, path in enumerate(path_list): log.info(f'File {i+1} out of {len(path_list)}') log.info(f'Processing: {path}') reader = EventSource(input_url=path, config=config) if i == 0: timeCorr = TimeCorrectionCalculate( calib_file_path=args.output_file, config=config, subarray=reader.subarray) for event in reader: if event.index.event_id % 5000 == 0: log.info(f'event id = {event.index.event_id}') timeCorr.calibrate_peak_time(event) # write output timeCorr.finalize()
def main(): args = parser.parse_args() #Train the models config = {} if args.config_file is not None: config = read_configuration_file(args.config_file) dl1_to_dl2.build_models( args.gammafile, args.protonfile, save_models=args.save_models, path_models=args.path_models, custom_config=config, )
def main(): log.setLevel(logging.INFO) handler = logging.StreamHandler() logging.getLogger().addHandler(handler) log.info(f'Input file: {args.input_file}') log.info(f'Number of events in each subrun: {args.max_events}') path_list = sorted(glob.glob(args.input_file)) log.info(f'list of files: {path_list}') config_dic = {} if args.config_file is not None: try: config_dic = read_configuration_file(args.config_file) except ("Custom configuration could not be loaded !!!"): pass # read the configuration file config = Config(config_dic) # declare the pedestal calibrator lst_r0 = LSTR0Corrections(pedestal_path=args.pedestal_file, config=config) reader = LSTEventSource(input_url=path_list[0], max_events=args.max_events) # declare the time corrector timeCorr = TimeCorrectionCalculate(calib_file_path=args.output_file, config=config, subarray=reader.subarray) tel_id = timeCorr.tel_id for i, path in enumerate(path_list): log.info(f'File {i+1} out of {len(path_list)}') log.info(f'Processing: {path}') reader = LSTEventSource(input_url=path, max_events=args.max_events) for event in reader: if event.index.event_id % 5000 == 0: log.info(f'event id = {event.index.event_id}') lst_r0.calibrate(event) # Cut in signal to avoid cosmic events if event.r1.tel[tel_id].trigger_type == 4 or (np.median( np.sum(event.r1.tel[tel_id].waveform[0], axis=1)) > 300): timeCorr.calibrate_peak_time(event) # write output timeCorr.finalize()
def main(): #Train the models config = {} if args.config_file is not None: try: config = read_configuration_file(args.config_file) except("Custom configuration could not be loaded !!!"): pass dl1_to_dl2.build_models(args.gammafile, args.protonfile, save_models=args.storerf, path_models=args.path_models, custom_config=config, )
def main(): os.makedirs(args.outdir, exist_ok=True) dl0_to_dl1.allowed_tels = {1, 2, 3, 4} output_filename = args.outdir + '/dl1_' + os.path.basename( args.infile).rsplit('.', 1)[0] + '.h5' config = {} if args.config_file is not None: try: config = read_configuration_file(os.path.abspath(args.config_file)) except ("Custom configuration could not be loaded !!!"): pass dl0_to_dl1.r0_to_dl1(args.infile, output_filename=output_filename, custom_config=config)
def main(): output_dir = args.output_dir.absolute() output_dir.mkdir(exist_ok=True) if not args.input_file.is_file(): log.error('Input file does not exist or is not a file') sys.exit(1) log.setLevel(logging.INFO) handler = logging.StreamHandler() logging.getLogger().addHandler(handler) r0_to_dl1.allowed_tels = {1, 2, 3, 4} # test if this matches data file name pattern try: run = parse_r0_filename(args.input_file) output_filename = output_dir / run_to_dl1_filename( run.tel_id, run.run, run.subrun) except ValueError: # for arbitrary filenames, including mc output_filename = output_dir / r0_to_dl1_filename(args.input_file.name) config = {} if args.config_file is not None: try: config = read_configuration_file(args.config_file) except Exception as e: log.error(f'Configuration file could not be read: {e}') sys.exit(1) config["max_events"] = args.max_events r0_to_dl1.r0_to_dl1(args.input_file, output_filename=output_filename, custom_config=config, pedestal_path=args.pedestal_file, calibration_path=args.calibration_file, time_calibration_path=args.time_calibration_file, pointing_file_path=args.pointing_file, ucts_t0_dragon=args.ucts_t0_dragon, dragon_counter0=args.dragon_counter0, ucts_t0_tib=args.ucts_t0_tib, tib_counter0=args.tib_counter0)
def main(): output_dir = args.output_dir.absolute() output_dir.mkdir(exist_ok=True) output_file = output_dir / r0_to_dl1_filename(args.input_file.name) r0_to_dl1.allowed_tels = {1, 2, 3, 4} config = {} if args.config_file is not None: try: config = read_configuration_file(args.config_file.absolute()) except Exception as e: log.error(f'Config file {args.config_file} could not be read: {e}') sys.exit(1) r0_to_dl1.r0_to_dl1( args.input_file, output_filename=output_file, custom_config=config, )
def get_dl1b_tailcut(dl1a_img, dl1a_pulse, config_path, use_main_island=True): cleaning_method = tailcuts_clean config = read_configuration_file(config_path) cleaning_parameters = config["tailcut"] dl1_container = DL1ParametersContainer() image = dl1a_img pulse_time = dl1a_pulse signal_pixels = cleaning_method(camera_geometry, image, **cleaning_parameters) n_pixels = np.count_nonzero(signal_pixels) if n_pixels > 0: # check the number of islands num_islands, island_labels = number_of_islands(camera_geometry, signal_pixels) if use_main_island: n_pixels_on_island = np.bincount(island_labels.astype(np.int)) n_pixels_on_island[ 0] = 0 # first island is no-island and should not be considered max_island_label = np.argmax(n_pixels_on_island) signal_pixels[island_labels != max_island_label] = False hillas = hillas_parameters(camera_geometry[signal_pixels], image[signal_pixels]) dl1_container.fill_hillas(hillas) dl1_container.set_timing_features(camera_geometry[signal_pixels], image[signal_pixels], pulse_time[signal_pixels], hillas) set_converted_hillas_param(dl1_container, dl1_container.width, dl1_container.length) set_image_param(dl1_container, image, signal_pixels, hillas, n_pixels, num_islands) return dl1_container
def create_dl1b_tailcut(dl1_path, output_file, config_file): global camera_geometry camera_geometry = read_single_camera_geometry(dl1_path, "LSTCam") obs_id_array, event_id_array = get_id_info(dl1_path) trig_type_array, ucts_trig_type_array, trig_time_array = get_trigger_info( dl1_path) dragon_t, ucts_t, tib_t = get_time_info(dl1_path) f = tables.open_file(dl1_path) dl1a_images = f.root['/dl1/event/telescope/image/LST_LSTCam'].col('image') dl1a_pulse = f.root['/dl1/event/telescope/image/LST_LSTCam'].col( 'peak_time') f.close() tel_name = "LST_LSTCam" clean_info = CleaningInfo() conf_f = read_configuration_file("lstchain_standard_config.json") print(conf_f) clean_info.name = "tailcut" clean_info.pic_th = conf_f["tailcut"]["picture_thresh"] clean_info.bound_th = conf_f["tailcut"]["boundary_thresh"] with HDF5TableWriter(filename=output_file, group_name='dl1/event', mode='a', filters=None, add_prefix=False) as writer: for i in range(len(dl1a_images)): dl1b = get_dl1b_tailcut(dl1a_images[i], dl1a_pulse[i], config_file) set_trigger_info(dl1b, trig_type_array[i], ucts_trig_type_array[i], trig_time_array[i][0]) set_time_info(dl1b, dragon_t[i], ucts_t[i], tib_t[i]) set_id_info(dl1b, obs_id_array[i], event_id_array[i]) writer.write(table_name=f'telescope/parameters/{tel_name}', containers=dl1b) writer.write(table_name=f'telescope/parameters/info', containers=clean_info)
def main(): print("--> Input file: {}".format(args.input_file)) print("--> Number of events: {}".format(args.max_events)) reader = event_source(input_url=args.input_file, max_events=args.max_events) print("--> Number of files", reader.multi_file.num_inputs()) config_dic = {} if args.config_file is not None: try: config_dic = read_configuration_file(args.config_file) except ("Custom configuration could not be loaded !!!"): pass # read the configuration file config = Config(config_dic) # declare the pedestal calibrator lst_r0 = LSTR0Corrections(pedestal_path=args.pedestal_file, config=config) # declare the time corrector timeCorr = TimeCorrectionCalculate(calib_file_path=args.output_file, config=config) tel_id = timeCorr.tel_id for i, event in enumerate(reader): if event.r0.event_id % 5000 == 0: print(event.r0.event_id) lst_r0.calibrate(event) # Cut in signal to avoid cosmic events if event.r1.tel[tel_id].trigger_type == 4 or (np.median( np.sum(event.r1.tel[tel_id].waveform[0], axis=1)) > 300): timeCorr.calibrate_pulse_time(event) # write output timeCorr.finalize()
def main(): args = parser.parse_args() if not args.config.is_file(): log.error('Config file does not exist or is not a file') sys.exit(1) if not args.input_mc.is_file(): log.error('MC simtel file does not exist or is not a file') sys.exit(1) if not args.input_data.is_file(): log.error('DL1 data file does not exist or is not a file') sys.exit(1) log.setLevel(logging.INFO) handler = logging.StreamHandler() logging.getLogger().addHandler(handler) a, b, c = calculate_noise_parameters(args.input_mc, args.input_data, args.config) dict_nsb = { "increase_nsb": True, "extra_noise_in_dim_pixels": round(a, 3), "extra_bias_in_dim_pixels": round(b, 3), "transition_charge": 8, "extra_noise_in_bright_pixels": round(c, 3) } log.info('\n') log.info(json.dumps(dict_nsb, indent=2)) log.info('\n') if args.output_file: cfg = read_configuration_file(args.config) cfg['image_modifier'].update(dict_nsb) dump_config(cfg, args.output_file, overwrite=args.overwrite)
def main(): logging.basicConfig() logging.getLogger("lstchain.reco.r0_to_dl1").setLevel(args.log_level) logging.getLogger("lstchain.reco.reconstructor").setLevel(args.log_level) output_dir = args.output_dir.absolute() output_dir.mkdir(exist_ok=True) output_file = output_dir / r0_to_dl1_filename(args.input_file.name) r0_to_dl1.allowed_tels = {1, 2, 3, 4} config = {} if args.config_file is not None: try: config = read_configuration_file(args.config_file.absolute()) except Exception as e: log.error(f'Config file {args.config_file} could not be read: {e}') sys.exit(1) r0_to_dl1.r0_to_dl1( args.input_file, output_filename=output_file, custom_config=config, )
def main(): args = build_parser().parse_args() if args.mc and args.src_dep: raise ValueError("--mc and --src-dep can't be used at the same time") if args.mc: config = get_mc_config() elif args.src_dep: config = get_srcdep_config() else: config = get_standard_config() if args.update_with: if not args.update_with.is_file(): raise FileNotFoundError( f"Config file {args.update_with} does not exist") extra_config = read_configuration_file(args.update_with) config.update(extra_config) dump_config(config, args.output_file, overwrite=args.overwrite) log.info(f"Config dumped in {args.output_file}")
def main(): std_config = get_standard_config() log.setLevel(logging.INFO) handler = logging.StreamHandler() logging.getLogger().addHandler(handler) if args.config_file is not None: config = replace_config(std_config, read_configuration_file(args.config_file)) else: config = std_config log.info(f"Tailcut config used: {config['tailcut']}") foclen = OpticsDescription.from_name('LST').equivalent_focal_length cam_table = Table.read(args.input_file, path="instrument/telescope/camera/LSTCam") camera_geom = CameraGeometry.from_table(cam_table) dl1_container = DL1ParametersContainer() parameters_to_update = list(HillasParametersContainer().keys()) parameters_to_update.extend([ 'concentration_cog', 'concentration_core', 'concentration_pixel', 'leakage_intensity_width_1', 'leakage_intensity_width_2', 'leakage_pixels_width_1', 'leakage_pixels_width_2', 'n_islands', 'intercept', 'time_gradient', 'n_pixels', 'wl', 'log_intensity' ]) nodes_keys = get_dataset_keys(args.input_file) if args.noimage: nodes_keys.remove(dl1_images_lstcam_key) auto_merge_h5files([args.input_file], args.output_file, nodes_keys=nodes_keys) with tables.open_file(args.input_file, mode='r') as input: image_table = input.root[dl1_images_lstcam_key] dl1_params_input = input.root[dl1_params_lstcam_key].colnames disp_params = {'disp_dx', 'disp_dy', 'disp_norm', 'disp_angle', 'disp_sign'} if set(dl1_params_input).intersection(disp_params): parameters_to_update.extend(disp_params) with tables.open_file(args.output_file, mode='a') as output: params = output.root[dl1_params_lstcam_key].read() for ii, row in enumerate(image_table): dl1_container.reset() image = row['image'] peak_time = row['peak_time'] signal_pixels = tailcuts_clean(camera_geom, image, **config['tailcut']) n_pixels = np.count_nonzero(signal_pixels) if n_pixels > 0: num_islands, island_labels = number_of_islands(camera_geom, signal_pixels) n_pixels_on_island = np.bincount(island_labels.astype(np.int)) n_pixels_on_island[0] = 0 # first island is no-island and should not be considered max_island_label = np.argmax(n_pixels_on_island) signal_pixels[island_labels != max_island_label] = False hillas = hillas_parameters(camera_geom[signal_pixels], image[signal_pixels]) dl1_container.fill_hillas(hillas) dl1_container.set_timing_features(camera_geom[signal_pixels], image[signal_pixels], peak_time[signal_pixels], hillas) dl1_container.set_leakage(camera_geom, image, signal_pixels) dl1_container.set_concentration(camera_geom, image, hillas) dl1_container.n_islands = num_islands dl1_container.wl = dl1_container.width / dl1_container.length dl1_container.n_pixels = n_pixels width = np.rad2deg(np.arctan2(dl1_container.width, foclen)) length = np.rad2deg(np.arctan2(dl1_container.length, foclen)) dl1_container.width = width dl1_container.length = length dl1_container.log_intensity = np.log10(dl1_container.intensity) if set(dl1_params_input).intersection(disp_params): disp_dx, disp_dy, disp_norm, disp_angle, disp_sign = disp( dl1_container['x'].to_value(u.m), dl1_container['y'].to_value(u.m), params['src_x'][ii], params['src_y'][ii] ) dl1_container['disp_dx'] = disp_dx dl1_container['disp_dy'] = disp_dy dl1_container['disp_norm'] = disp_norm dl1_container['disp_angle'] = disp_angle dl1_container['disp_sign'] = disp_sign for p in parameters_to_update: params[ii][p] = u.Quantity(dl1_container[p]).value output.root[dl1_params_lstcam_key][:] = params
def main(): std_config = get_standard_config() if args.config_file is not None: config = replace_config(std_config, read_configuration_file(args.config_file)) else: config = std_config print(config['tailcut']) foclen = OpticsDescription.from_name('LST').equivalent_focal_length cam_table = Table.read(args.input_file, path="instrument/telescope/camera/LSTCam") camera_geom = CameraGeometry.from_table(cam_table) dl1_container = DL1ParametersContainer() parameters_to_update = list(HillasParametersContainer().keys()) parameters_to_update.extend([ 'concentration_cog', 'concentration_core', 'concentration_pixel', 'leakage_intensity_width_1', 'leakage_intensity_width_2', 'leakage_pixels_width_1', 'leakage_pixels_width_2', 'n_islands', 'intercept', 'time_gradient', 'n_pixels', 'wl', 'r', ]) nodes_keys = get_dataset_keys(args.input_file) if args.noimage: nodes_keys.remove(dl1_images_lstcam_key) auto_merge_h5files([args.input_file], args.output_file, nodes_keys=nodes_keys) with tables.open_file(args.input_file, mode='r') as input: image_table = input.root[dl1_images_lstcam_key] with tables.open_file(args.output_file, mode='a') as output: params = output.root[dl1_params_lstcam_key].read() for ii, row in enumerate(image_table): if ii % 10000 == 0: print(ii) image = row['image'] peak_time = row['peak_time'] signal_pixels = tailcuts_clean(camera_geom, image, **config['tailcut']) n_pixels = np.count_nonzero(signal_pixels) if n_pixels > 0: num_islands, island_labels = number_of_islands( camera_geom, signal_pixels) n_pixels_on_island = np.bincount( island_labels.astype(np.int)) n_pixels_on_island[ 0] = 0 # first island is no-island and should not be considered max_island_label = np.argmax(n_pixels_on_island) signal_pixels[island_labels != max_island_label] = False hillas = hillas_parameters(camera_geom[signal_pixels], image[signal_pixels]) dl1_container.fill_hillas(hillas) dl1_container.set_timing_features( camera_geom[signal_pixels], image[signal_pixels], peak_time[signal_pixels], hillas) dl1_container.set_leakage(camera_geom, image, signal_pixels) dl1_container.set_concentration(camera_geom, image, hillas) dl1_container.n_islands = num_islands dl1_container.wl = dl1_container.width / dl1_container.length dl1_container.n_pixels = n_pixels width = np.rad2deg(np.arctan2(dl1_container.width, foclen)) length = np.rad2deg( np.arctan2(dl1_container.length, foclen)) dl1_container.width = width dl1_container.length = length dl1_container.r = np.sqrt(dl1_container.x**2 + dl1_container.y**2) else: # for consistency with r0_to_dl1.py: for key in dl1_container.keys(): dl1_container[key] = \ u.Quantity(0, dl1_container.fields[key].unit) dl1_container.width = u.Quantity(np.nan, u.m) dl1_container.length = u.Quantity(np.nan, u.m) dl1_container.wl = u.Quantity(np.nan, u.m) for p in parameters_to_update: params[ii][p] = u.Quantity(dl1_container[p]).value output.root[dl1_params_lstcam_key][:] = params
def main(): args = parser.parse_args() output_dir = args.output_dir.absolute() output_dir.mkdir(exist_ok=True, parents=True) if not args.input_file.is_file(): log.error('Input file does not exist or is not a file') sys.exit(1) log.setLevel(logging.INFO) handler = logging.StreamHandler() logging.getLogger().addHandler(handler) r0_to_dl1.allowed_tels = {1, 2, 3, 4} # test if this matches data file name pattern try: run = parse_r0_filename(args.input_file) output_filename = output_dir / run_to_dl1_filename( run.tel_id, run.run, run.subrun) except ValueError: # for arbitrary filenames, including mc output_filename = output_dir / r0_to_dl1_filename(args.input_file.name) config = {} if args.config_file is not None: try: config = read_configuration_file(args.config_file) except Exception as e: log.error(f'Configuration file could not be read: {e}') sys.exit(1) else: config = standard_config # Add to configuration config the parameters provided through command-line, # which supersede those in the file: if args.max_events is not None: config['source_config']['EventSource']['max_events'] = args.max_events lst_event_source = config['source_config']['LSTEventSource'] time_calculator = lst_event_source['EventTimeCalculator'] if args.dragon_reference_time is not None: time_calculator['dragon_reference_time'] = args.dragon_reference_time if args.dragon_reference_counter is not None: time_calculator[ 'dragon_reference_counter'] = args.dragon_reference_counter if args.dragon_module_id is not None: time_calculator['dragon_module_id'] = args.dragon_module_id if args.run_summary_path is not None: time_calculator['run_summary_path'] = args.run_summary_path if args.pointing_file is not None: lst_event_source['PointingSource'][ 'drive_report_path'] = args.pointing_file if args.pedestal_ids_path is not None: lst_event_source['pedestal_ids_path'] = args.pedestal_ids_path if args.default_trigger_type is not None: lst_event_source["default_trigger_type"] = args.default_trigger_type lst_r0_corrections = lst_event_source['LSTR0Corrections'] if args.pedestal_file is not None: lst_r0_corrections['drs4_pedestal_path'] = args.pedestal_file if args.calibration_file is not None: lst_r0_corrections['calibration_path'] = args.calibration_file if args.time_calibration_file is not None: lst_r0_corrections[ 'drs4_time_calibration_path'] = args.time_calibration_file calib_config = config[config['calibration_product']] if args.systematic_correction_file is not None: calib_config[ 'systematic_correction_path'] = args.systematic_correction_file lst_event_source["use_flatfield_heuristic"] = args.use_flatfield_heuristic r0_to_dl1.r0_to_dl1( args.input_file, output_filename=output_filename, custom_config=config, )
parser.add_argument('--outdir', '-o', action='store', type=str, dest='outdir', help='Path where to store the reco dl2 events', default='./dl1_data/') parser.add_argument('--config_file', '-conf', action='store', type=str, dest='config_file', help='Path to a configuration file. If none is given, a standard configuration is applied', default=None ) args = parser.parse_args() if __name__ == '__main__': os.makedirs(args.outdir, exist_ok=True) dl0_to_dl1.allowed_tels = {1, 2, 3, 4} output_filename = args.outdir + '/dl1_' + os.path.basename(args.infile).rsplit('.', 1)[0] + '.h5' config = {} if args.config_file is not None: try: config = read_configuration_file(args.config_file) except("Custom configuration could not be loaded !!!"): pass dl0_to_dl1.r0_to_dl1(args.infile, output_filename=output_filename, custom_config=config)
def calculate_noise_parameters(simtel_filename, data_dl1_filename, config_filename=None): """ Calculates the parameters needed to increase the noise in an MC DL1 file to match the noise in a real data DL1 file, using add_noise_in_pixels The returned parameters are those needed by the function add_noise_in_pixels (see description in its documentation above). Parameters ---------- simtel_filename: `str` a simtel file containing showers, from the same production (same NSB and telescope settings) as the MC DL1 file below. It must contain pixel-wise info on true number of p.e.'s from C-photons ( will be used to identify pixels which only contain noise). data_dl1_filename: `str` a real data DL1 file (processed with calibration settings corresponding to those with which the MC is to be processed). It must contain calibrated images, i.e. "DL1a" data. This file has the "target" noise which we want to have in the MC files, for better agreement of data and simulations. config_filename: `str` configuration file containing the calibration settings used for processing both the data and the MC files above Returns ------- extra_noise_in_dim_pixels: `float` Extra noise of dim pixels. extra_bias_in_dim_pixels: `float` Extra bias of dim pixels. extra_noise_in_bright_pixels: `float` Extra noise of bright pixels """ log.setLevel(logging.INFO) if config_filename is None: config = standard_config else: config = read_configuration_file(config_filename) # Real data DL1 tables: data_dl1_calibration = read_table( data_dl1_filename, '/dl1/event/telescope/monitoring/calibration') data_dl1_pedestal = read_table(data_dl1_filename, '/dl1/event/telescope/monitoring/pedestal') data_dl1_parameters = read_table( data_dl1_filename, '/dl1/event/telescope/parameters/LST_LSTCam') data_dl1_image = read_table(data_dl1_filename, '/dl1/event/telescope/image/LST_LSTCam') unusable = data_dl1_calibration['unusable_pixels'] # Locate pixels with HG declared unusable either in original calibration or # in interleaved events: bad_pixels = unusable[0][0] # original calibration for tf in unusable[1:][0]: # calibrations with interleaveds bad_pixels = np.logical_or(bad_pixels, tf) good_pixels = ~bad_pixels # First index: 1,2,... = values from interleaveds (0 is for original # calibration run) # Second index: 0 = high gain # Third index: pixels # HG adc to pe conversion factors from interleaved calibrations: data_HG_dc_to_pe = data_dl1_calibration['dc_to_pe'][:, 0, :] # Pixel-wise pedestal standard deviation (for an unbiased extractor), # in adc counts: data_HG_ped_std = data_dl1_pedestal['charge_std'][1:, 0, :] # indices which connect each pedestal calculation to a given calibration: calibration_id = data_dl1_pedestal['calibration_id'][1:] # convert pedestal st deviations to p.e. dummy = [] for i, x in enumerate(data_HG_ped_std[:, ]): dummy.append(x * data_HG_dc_to_pe[calibration_id[i], ]) dummy = np.array(dummy) # Average for all interleaved calibrations (in case there are more than one) data_HG_ped_std_pe = np.mean(dummy, axis=0) # one value per pixel # Identify noisy pixels, likely containing stars - we want to adjust MC to # the average diffuse NSB across the camera data_median_std_ped_pe = np.median(data_HG_ped_std_pe) data_std_std_ped_pe = np.std(data_HG_ped_std_pe) log.info(f'Real data: median across camera of good pixels\' pedestal std ' f'{data_median_std_ped_pe:.3f} p.e.') brightness_limit = data_median_std_ped_pe + 3 * data_std_std_ped_pe too_bright_pixels = (data_HG_ped_std_pe > brightness_limit) log.info(f'Number of pixels beyond 3 std dev of median: ' f'{too_bright_pixels.sum()}, (above {brightness_limit:.2f} p.e.)') ped_mask = data_dl1_parameters['event_type'] == 2 # The charges in the images below are obtained with the extractor for # showers, usually a biased one, like e.g. LocalPeakWindowSum data_ped_charges = data_dl1_image['image'][ped_mask] # Exclude too bright pixels, besides those with unusable calibration: good_pixels &= ~too_bright_pixels # recalculate the median of the pixels' std dev, with good_pixels: data_median_std_ped_pe = np.median(data_HG_ped_std_pe[good_pixels]) log.info(f'Good and not too bright pixels: {good_pixels.sum()}') # all_good is an events*pixels boolean array of valid signals: all_good = np.reshape(np.tile(good_pixels, data_ped_charges.shape[0]), data_ped_charges.shape) # histogram of pedestal charges (biased extractor) from good and not noisy # pixels: qbins = 100 qrange = (-10, 15) dataq = np.histogram(data_ped_charges[all_good].flatten(), bins=qbins, range=qrange, density=True) # Find the peak of the pedestal biased charge distribution of real data. # Use an interpolated version of the histogram, for robustness: func = interp1d(0.5 * (dataq[1][1:] + dataq[1][:-1]), dataq[0], kind='quadratic', fill_value='extrapolate') xx = np.linspace(qrange[0], qrange[1], 100 * qbins) mode_data = xx[np.argmax(func(xx))] # Event reader for simtel file: mc_reader = EventSource(input_url=simtel_filename, config=Config(config)) # Obtain the configuration with which the pedestal calculations were # performed: ped_config = config['LSTCalibrationCalculator']['PedestalIntegrator'] tel_id = ped_config['tel_id'] # Obtain the (unbiased) extractor used for pedestal calculations: pedestal_extractor_type = ped_config['charge_product'] pedestal_calibrator = CameraCalibrator( image_extractor_type=pedestal_extractor_type, config=Config(ped_config), subarray=mc_reader.subarray) # Obtain the (usually biased) extractor used for shower images: shower_extractor_type = config['image_extractor'] shower_calibrator = CameraCalibrator( image_extractor_type=shower_extractor_type, config=Config(config), subarray=mc_reader.subarray) # Since these extractors are now for use on MC, we have to apply the pulse # integration correction (in data that is currently, as of # lstchain v0.7.5, replaced by an empirical (hard-coded) correction of the # adc to pe conversion factors ) pedestal_calibrator.image_extractors[ ped_config['charge_product']].apply_integration_correction = True shower_calibrator.image_extractors[ shower_extractor_type].apply_integration_correction = True # Pulse integration window width of the (biased) extractor for showers: shower_extractor_window_width = config[ config['image_extractor']]['window_width'] # Pulse integration window width for the pedestal estimation: pedestal_extractor_config = ped_config[pedestal_extractor_type] pedestal_extractor_window_width = pedestal_extractor_config['window_width'] # MC pedestals integrated with the unbiased pedestal extractor mc_ped_charges = [] # MC pedestals integrated with the biased shower extractor mc_ped_charges_biased = [] for event in mc_reader: if tel_id not in event.trigger.tels_with_trigger: continue # Extract the signals as we do for pedestals (unbiased fixed window # extractor): pedestal_calibrator(event) charges = event.dl1.tel[tel_id].image # True number of pe's from Cherenkov photons (to identify noise-only pixels) true_image = event.simulation.tel[tel_id].true_image mc_ped_charges.append(charges[true_image == 0]) # Now extract the signal as we would do for shower events (usually # with a biased extractor, e.g. LocalPeakWindowSum): shower_calibrator(event) charges_biased = event.dl1.tel[tel_id].image mc_ped_charges_biased.append(charges_biased[true_image == 0]) # All pixels behave (for now) in the same way in MC, just put them together mc_ped_charges = np.concatenate(mc_ped_charges) mc_ped_charges_biased = np.concatenate(mc_ped_charges_biased) mcq = np.histogram(mc_ped_charges_biased, bins=qbins, range=qrange, density=True) # Find the peak of the pedestal biased charge distribution of MC. Use # an interpolated version of the histogram, for robustness: func = interp1d(0.5 * (mcq[1][1:] + mcq[1][:-1]), mcq[0], kind='quadratic', fill_value='extrapolate') xx = np.linspace(qrange[0], qrange[1], 100 * qbins) mode_mc = xx[np.argmax(func(xx))] mc_unbiased_std_ped_pe = np.std(mc_ped_charges) # Find the additional noise (in data w.r.t. MC) for the unbiased extractor, # and scale it to the width of the window for integration of shower images. # The idea is that when a strong signal is present, the biased extractor # will integrate around it, and the additional noise is unbiased because # it won't modify the integration range. extra_noise_in_bright_pixels = \ ((data_median_std_ped_pe**2 - mc_unbiased_std_ped_pe**2) * shower_extractor_window_width / pedestal_extractor_window_width) # Just in case, makes sure we just add noise if the MC noise is smaller # than the real data's: extra_noise_in_bright_pixels = max(0., extra_noise_in_bright_pixels) bias = mode_data - mode_mc extra_bias_in_dim_pixels = max(bias, 0) # differences of values to peak charge: dq = data_ped_charges[all_good].flatten() - mode_data dqmc = mc_ped_charges_biased - mode_mc # maximum distance (in pe) from peak, to avoid strong impact of outliers: maxq = 10 # calculate widening of the noise bump: added_noise = (np.sum(dq[dq < maxq]**2) / len(dq[dq < maxq]) - np.sum(dqmc[dqmc < maxq]**2) / len(dqmc[dqmc < maxq])) added_noise = (max(0, added_noise))**0.5 extra_noise_in_dim_pixels = added_noise return extra_noise_in_dim_pixels, extra_bias_in_dim_pixels, \ extra_noise_in_bright_pixels
def main(): args = parser.parse_args() log.setLevel(logging.INFO) handler = logging.StreamHandler() logging.getLogger().addHandler(handler) if Path(args.output_file).exists(): log.critical(f'Output file {args.output_file} already exists') sys.exit(1) std_config = get_standard_config() if args.config_file is not None: config = replace_config(std_config, read_configuration_file(args.config_file)) else: config = std_config with tables.open_file(args.input_file, 'r') as f: is_simulation = 'simulation' in f.root increase_nsb = False increase_psf = False if "image_modifier" in config: imconfig = config["image_modifier"] increase_nsb = imconfig["increase_nsb"] increase_psf = imconfig["increase_psf"] if increase_nsb or increase_psf: log.info(f"image_modifier configuration: {imconfig}") extra_noise_in_dim_pixels = imconfig["extra_noise_in_dim_pixels"] extra_bias_in_dim_pixels = imconfig["extra_bias_in_dim_pixels"] transition_charge = imconfig["transition_charge"] extra_noise_in_bright_pixels = imconfig["extra_noise_in_bright_pixels"] smeared_light_fraction = imconfig["smeared_light_fraction"] if (increase_nsb or increase_psf): log.info( "NOTE: Using the image_modifier options means images will " "not be saved.") args.no_image = True if is_simulation: args.pedestal_cleaning = False if args.pedestal_cleaning: log.info("Pedestal cleaning") clean_method_name = 'tailcuts_clean_with_pedestal_threshold' sigma = config[clean_method_name]['sigma'] pedestal_thresh = get_threshold_from_dl1_file(args.input_file, sigma) cleaning_params = get_cleaning_parameters(config, clean_method_name) pic_th, boundary_th, isolated_pixels, min_n_neighbors = cleaning_params log.info( f"Fraction of pixel cleaning thresholds above picture thr.:" f"{np.sum(pedestal_thresh > pic_th) / len(pedestal_thresh):.3f}") picture_th = np.clip(pedestal_thresh, pic_th, None) log.info(f"Tailcut clean with pedestal threshold config used:" f"{config['tailcuts_clean_with_pedestal_threshold']}") else: clean_method_name = 'tailcut' cleaning_params = get_cleaning_parameters(config, clean_method_name) picture_th, boundary_th, isolated_pixels, min_n_neighbors = cleaning_params log.info(f"Tailcut config used: {config['tailcut']}") use_dynamic_cleaning = False if 'apply' in config['dynamic_cleaning']: use_dynamic_cleaning = config['dynamic_cleaning']['apply'] if use_dynamic_cleaning: THRESHOLD_DYNAMIC_CLEANING = config['dynamic_cleaning']['threshold'] FRACTION_CLEANING_SIZE = config['dynamic_cleaning'][ 'fraction_cleaning_intensity'] log.info( "Using dynamic cleaning for events with average size of the " f"3 most brighest pixels > {config['dynamic_cleaning']['threshold']} p.e" ) log.info( "Remove from image pixels which have charge below " f"= {config['dynamic_cleaning']['fraction_cleaning_intensity']} * average size" ) use_only_main_island = True if "use_only_main_island" in config[clean_method_name]: use_only_main_island = config[clean_method_name][ "use_only_main_island"] delta_time = None if "delta_time" in config[clean_method_name]: delta_time = config[clean_method_name]["delta_time"] subarray_info = SubarrayDescription.from_hdf(args.input_file) tel_id = config["allowed_tels"][0] if "allowed_tels" in config else 1 optics = subarray_info.tel[tel_id].optics camera_geom = subarray_info.tel[tel_id].camera.geometry dl1_container = DL1ParametersContainer() parameters_to_update = [ 'intensity', 'x', 'y', 'r', 'phi', 'length', 'width', 'psi', 'skewness', 'kurtosis', 'concentration_cog', 'concentration_core', 'concentration_pixel', 'leakage_intensity_width_1', 'leakage_intensity_width_2', 'leakage_pixels_width_1', 'leakage_pixels_width_2', 'n_islands', 'intercept', 'time_gradient', 'n_pixels', 'wl', 'log_intensity' ] nodes_keys = get_dataset_keys(args.input_file) if args.no_image: nodes_keys.remove(dl1_images_lstcam_key) metadata = global_metadata() with tables.open_file(args.input_file, mode='r') as infile: image_table = infile.root[dl1_images_lstcam_key] dl1_params_input = infile.root[dl1_params_lstcam_key].colnames disp_params = { 'disp_dx', 'disp_dy', 'disp_norm', 'disp_angle', 'disp_sign' } if set(dl1_params_input).intersection(disp_params): parameters_to_update.extend(disp_params) uncertainty_params = {'width_uncertainty', 'length_uncertainty'} if set(dl1_params_input).intersection(uncertainty_params): parameters_to_update.extend(uncertainty_params) if increase_nsb: rng = np.random.default_rng( infile.root.dl1.event.subarray.trigger.col('obs_id')[0]) if increase_psf: set_numba_seed( infile.root.dl1.event.subarray.trigger.col('obs_id')[0]) image_mask_save = not args.no_image and 'image_mask' in infile.root[ dl1_images_lstcam_key].colnames with tables.open_file(args.output_file, mode='a', filters=HDF5_ZSTD_FILTERS) as outfile: copy_h5_nodes(infile, outfile, nodes=nodes_keys) add_source_filenames(outfile, [args.input_file]) params = outfile.root[dl1_params_lstcam_key].read() if image_mask_save: image_mask = outfile.root[dl1_images_lstcam_key].col( 'image_mask') # need container to use lstchain.io.add_global_metadata and lstchain.io.add_config_metadata for k, item in metadata.as_dict().items(): outfile.root[dl1_params_lstcam_key].attrs[k] = item outfile.root[dl1_params_lstcam_key].attrs["config"] = str(config) for ii, row in enumerate(image_table): dl1_container.reset() image = row['image'] peak_time = row['peak_time'] if increase_nsb: # Add noise in pixels, to adjust MC to data noise levels. # TO BE DONE: in case of "pedestal cleaning" (not used now # in MC) we should recalculate picture_th above! image = add_noise_in_pixels(rng, image, extra_noise_in_dim_pixels, extra_bias_in_dim_pixels, transition_charge, extra_noise_in_bright_pixels) if increase_psf: image = random_psf_smearer( image, smeared_light_fraction, camera_geom.neighbor_matrix_sparse.indices, camera_geom.neighbor_matrix_sparse.indptr) signal_pixels = tailcuts_clean(camera_geom, image, picture_th, boundary_th, isolated_pixels, min_n_neighbors) n_pixels = np.count_nonzero(signal_pixels) if n_pixels > 0: # if delta_time has been set, we require at least one # neighbor within delta_time to accept a pixel in the image: if delta_time is not None: cleaned_pixel_times = peak_time # makes sure only signal pixels are used in the time # check: cleaned_pixel_times[~signal_pixels] = np.nan new_mask = apply_time_delta_cleaning( camera_geom, signal_pixels, cleaned_pixel_times, 1, delta_time) signal_pixels = new_mask if use_dynamic_cleaning: new_mask = apply_dynamic_cleaning( image, signal_pixels, THRESHOLD_DYNAMIC_CLEANING, FRACTION_CLEANING_SIZE) signal_pixels = new_mask # count a number of islands after all of the image cleaning steps num_islands, island_labels = number_of_islands( camera_geom, signal_pixels) dl1_container.n_islands = num_islands n_pixels_on_island = np.bincount( island_labels.astype(np.int64)) n_pixels_on_island[ 0] = 0 # first island is no-island and should not be considered max_island_label = np.argmax(n_pixels_on_island) if use_only_main_island: signal_pixels[ island_labels != max_island_label] = False # count the surviving pixels n_pixels = np.count_nonzero(signal_pixels) dl1_container.n_pixels = n_pixels if n_pixels > 0: parametrize_image( image=image, peak_time=peak_time, signal_pixels=signal_pixels, camera_geometry=camera_geom, focal_length=optics.equivalent_focal_length, dl1_container=dl1_container, ) if set(dl1_params_input).intersection(disp_params): disp_dx, disp_dy, disp_norm, disp_angle, disp_sign = disp( dl1_container['x'].to_value(u.m), dl1_container['y'].to_value(u.m), params['src_x'][ii], params['src_y'][ii]) dl1_container['disp_dx'] = disp_dx dl1_container['disp_dy'] = disp_dy dl1_container['disp_norm'] = disp_norm dl1_container['disp_angle'] = disp_angle dl1_container['disp_sign'] = disp_sign for p in parameters_to_update: params[ii][p] = u.Quantity(dl1_container[p]).value if image_mask_save: image_mask[ii] = signal_pixels outfile.root[dl1_params_lstcam_key][:] = params if image_mask_save: outfile.root[dl1_images_lstcam_key].modify_column( colname='image_mask', column=image_mask) write_metadata(metadata, args.output_file)
def validate_lstchain(filename): read_configuration_file(filename)
def main(): std_config = get_standard_config() log.setLevel(logging.INFO) handler = logging.StreamHandler() logging.getLogger().addHandler(handler) if args.config_file is not None: config = replace_config(std_config, read_configuration_file(args.config_file)) else: config = std_config if args.pedestal_cleaning: print("Pedestal cleaning") clean_method_name = 'tailcuts_clean_with_pedestal_threshold' sigma = config[clean_method_name]['sigma'] pedestal_thresh = get_threshold_from_dl1_file(args.input_file, sigma) cleaning_params = get_cleaning_parameters(config, clean_method_name) pic_th, boundary_th, isolated_pixels, min_n_neighbors = cleaning_params log.info( f"Fraction of pixel cleaning thresholds above picture thr.:" f"{np.sum(pedestal_thresh>pic_th) / len(pedestal_thresh):.3f}") picture_th = np.clip(pedestal_thresh, pic_th, None) log.info(f"Tailcut clean with pedestal threshold config used:" f"{config['tailcuts_clean_with_pedestal_threshold']}") else: clean_method_name = 'tailcut' cleaning_params = get_cleaning_parameters(config, clean_method_name) picture_th, boundary_th, isolated_pixels, min_n_neighbors = cleaning_params log.info(f"Tailcut config used: {config['tailcut']}") use_only_main_island = True if "use_only_main_island" in config[clean_method_name]: use_only_main_island = config[clean_method_name][ "use_only_main_island"] delta_time = None if "delta_time" in config[clean_method_name]: delta_time = config[clean_method_name]["delta_time"] foclen = OpticsDescription.from_name('LST').equivalent_focal_length cam_table = Table.read(args.input_file, path="instrument/telescope/camera/LSTCam") camera_geom = CameraGeometry.from_table(cam_table) dl1_container = DL1ParametersContainer() parameters_to_update = [ 'intensity', 'x', 'y', 'r', 'phi', 'length', 'width', 'psi', 'skewness', 'kurtosis', 'concentration_cog', 'concentration_core', 'concentration_pixel', 'leakage_intensity_width_1', 'leakage_intensity_width_2', 'leakage_pixels_width_1', 'leakage_pixels_width_2', 'n_islands', 'intercept', 'time_gradient', 'n_pixels', 'wl', 'log_intensity' ] nodes_keys = get_dataset_keys(args.input_file) if args.noimage: nodes_keys.remove(dl1_images_lstcam_key) auto_merge_h5files([args.input_file], args.output_file, nodes_keys=nodes_keys) with tables.open_file(args.input_file, mode='r') as input: image_table = input.root[dl1_images_lstcam_key] dl1_params_input = input.root[dl1_params_lstcam_key].colnames disp_params = { 'disp_dx', 'disp_dy', 'disp_norm', 'disp_angle', 'disp_sign' } if set(dl1_params_input).intersection(disp_params): parameters_to_update.extend(disp_params) with tables.open_file(args.output_file, mode='a') as output: params = output.root[dl1_params_lstcam_key].read() for ii, row in enumerate(image_table): dl1_container.reset() image = row['image'] peak_time = row['peak_time'] signal_pixels = tailcuts_clean(camera_geom, image, picture_th, boundary_th, isolated_pixels, min_n_neighbors) n_pixels = np.count_nonzero(signal_pixels) if n_pixels > 0: num_islands, island_labels = number_of_islands( camera_geom, signal_pixels) n_pixels_on_island = np.bincount( island_labels.astype(np.int64)) n_pixels_on_island[ 0] = 0 # first island is no-island and should not be considered max_island_label = np.argmax(n_pixels_on_island) if use_only_main_island: signal_pixels[ island_labels != max_island_label] = False # if delta_time has been set, we require at least one # neighbor within delta_time to accept a pixel in the image: if delta_time is not None: cleaned_pixel_times = peak_time # makes sure only signal pixels are used in the time # check: cleaned_pixel_times[~signal_pixels] = np.nan new_mask = apply_time_delta_cleaning( camera_geom, signal_pixels, cleaned_pixel_times, 1, delta_time) signal_pixels = new_mask # count the surviving pixels n_pixels = np.count_nonzero(signal_pixels) if n_pixels > 0: hillas = hillas_parameters(camera_geom[signal_pixels], image[signal_pixels]) dl1_container.fill_hillas(hillas) dl1_container.set_timing_features( camera_geom[signal_pixels], image[signal_pixels], peak_time[signal_pixels], hillas) dl1_container.set_leakage(camera_geom, image, signal_pixels) dl1_container.set_concentration( camera_geom, image, hillas) dl1_container.n_islands = num_islands dl1_container.wl = dl1_container.width / dl1_container.length dl1_container.n_pixels = n_pixels width = np.rad2deg( np.arctan2(dl1_container.width, foclen)) length = np.rad2deg( np.arctan2(dl1_container.length, foclen)) dl1_container.width = width dl1_container.length = length dl1_container.log_intensity = np.log10( dl1_container.intensity) if set(dl1_params_input).intersection(disp_params): disp_dx, disp_dy, disp_norm, disp_angle, disp_sign = disp( dl1_container['x'].to_value(u.m), dl1_container['y'].to_value(u.m), params['src_x'][ii], params['src_y'][ii]) dl1_container['disp_dx'] = disp_dx dl1_container['disp_dy'] = disp_dy dl1_container['disp_norm'] = disp_norm dl1_container['disp_angle'] = disp_angle dl1_container['disp_sign'] = disp_sign for p in parameters_to_update: params[ii][p] = u.Quantity(dl1_container[p]).value output.root[dl1_params_lstcam_key][:] = params
def test_dump_config(): cfg = {'myconf': 1} with tempfile.NamedTemporaryFile() as file: config.dump_config(cfg, file.name, overwrite=True) read_cfg = config.read_configuration_file(file.name) assert read_cfg['myconf'] == 1
def find_safe_threshold_from_dl1_file(dl1_path, config_file=None, max_fraction=0.04): """ Function to obtain an integer value for the picture threshold such that at most a fraction max_fraction of pixels have a higher value resulting from the "clean_with_pedestal_threshold" cleaning approach. That approach increases the cleaning picture threshold of a pixel to, say, 2.5 standard deviations (or the number in "sigma" below) above its pedestal mean, hence pixels illuminated by stars get a higher threshold, and so we avoid too many spurious signals from the starlight fluctuations. The downside of the method is that it introduces non-uniformities in the camera response for the real data, and therefore data-MC discrepancies (there are no stars in MC and all pixels have the same cleaning settings). Here we try to calculate what should be the "base" picture threshold (to use both in MC and data) so that at most a given fraction "max_fraction" of the camera gets an increased threshold via the clean_with_pedestal_threshold condition. In this way the number / extension of camera inhomogeneities in the real data is limited. By default the max_fraction is 0.04, which e.g. gives a picture threshold around 8 for the Crab field (in no-moon conditions). Note: we want cleaning settings for a whole run, so we will have to run the function over sub-runs and average the values, or make the function able to read multiple DL1 files Parameters ---------- dl1_path: real data DL1 file, to have access to the monitoring table where we can read the pedesta bias & std dev estimated with interleaved pedestal events config_file: must be the one used for the analysis of the real data, in particular, it has to contain the tailcuts_clean_with_pedestal_threshold setting (sigma) that one wants to use max_fraction: maximum fraction of camera pixels that are allowed to get a picture threshold above the base one. That is, we calculate here the base picture threshold that will ensure that the condition is fulfilled Returns ------- (scalar) the value of the picture threshold that has to be used in data and MC to ensure that no more than max_fraction of the camera gets an increased value via tailcuts_clean_with_pedestal_threshold """ std_config = get_standard_config() if config_file is not None: config = replace_config(std_config, read_configuration_file(config_file)) else: config = std_config cleaning_method = 'tailcuts_clean_with_pedestal_threshold' picture_th, _, _, _ = get_cleaning_parameters(config, cleaning_method) sigma = config[cleaning_method]['sigma'] # Obtain the picture thresholds of pixels based on the "clean with # pedestal threshold" method: pic_threshold = get_threshold_from_dl1_file(dl1_path, sigma) threshold_sorted = np.sort(pic_threshold) # find the value new_threshold above which a fraction max_fraction of # pixels lies: index = int(len(threshold_sorted) * (1 - max_fraction)) new_threshold = threshold_sorted[index] # Return the first integer value above new_threshold (to avoid too many # different cleaning settings in different runs): return np.ceil(new_threshold)