def aeff2d_hdus(): from pyirf.io import create_aeff2d_hdu area = np.full((len(e_bins) - 1, len(fov_bins) - 1), 1e6) * u.m**2 hdus = [ create_aeff2d_hdu(area, e_bins, fov_bins, point_like=point_like) for point_like in [True, False] ] return area, hdus
def test_effective_area2d(): '''Test our effective area is readable by gammapy''' pytest.importorskip('gammapy') from pyirf.io import create_aeff2d_hdu from gammapy.irf import EffectiveAreaTable2D e_bins = np.geomspace(0.1, 100, 31) * u.TeV fov_bins = [0, 1, 2, 3] * u.deg area = np.full((30, 3), 1e6) * u.m**2 for point_like in [True, False]: with tempfile.NamedTemporaryFile(suffix='.fits') as f: hdu = create_aeff2d_hdu(area, e_bins, fov_bins, point_like=point_like) fits.HDUList([fits.PrimaryHDU(), hdu]).writeto(f.name) # test reading with gammapy works aeff2d = EffectiveAreaTable2D.read(f.name) assert u.allclose(area, aeff2d.data.data, atol=1e-16 * u.m**2)
def main(): # Read arguments parser = argparse.ArgumentParser(description='Make performance files') parser.add_argument('--config_file', type=str, required=True, help='') mode_group = parser.add_mutually_exclusive_group() mode_group.add_argument('--wave', dest="mode", action='store_const', const="wave", default="tail", help="if set, use wavelet cleaning") mode_group.add_argument('--tail', dest="mode", action='store_const', const="tail", help="if set, use tail cleaning (default)") args = parser.parse_args() # Read configuration file cfg = load_config(args.config_file) # Create output directory if necessary outdir = os.path.join( cfg['general']['outdir'], 'performance_protopipe_{}_CTA{}_{}_Zd{}_{}_Time{:.2f}{}'.format( cfg['general']['prod'], cfg['general']['site'], cfg['general']['array'], cfg['general']['zenith'], cfg['general']['azimuth'], cfg['analysis']['obs_time']['value'], cfg['analysis']['obs_time']['unit']), ) indir = cfg['general']['indir'] template_input_file = cfg['general']['template_input_file'] T_OBS = cfg['analysis']['obs_time']['value'] * u.Unit( cfg['analysis']['obs_time']['unit']) # scaling between on and off region. # Make off region 5 times larger than on region for better # background statistics ALPHA = cfg['analysis']['alpha'] # Radius to use for calculating bg rate MAX_BG_RADIUS = cfg['analysis']['max_bg_radius'] * u.deg particles = { "gamma": { "file": os.path.join(indir, template_input_file.format(args.mode, "gamma")), "target_spectrum": CRAB_HEGRA, "run_header": cfg['particle_information']['gamma'] }, "proton": { "file": os.path.join(indir, template_input_file.format(args.mode, "proton")), "target_spectrum": IRFDOC_PROTON_SPECTRUM, "run_header": cfg['particle_information']['proton'] }, "electron": { "file": os.path.join(indir, template_input_file.format(args.mode, "electron")), "target_spectrum": IRFDOC_ELECTRON_SPECTRUM, "run_header": cfg['particle_information']['electron'] }, } logging.basicConfig(level=logging.INFO) logging.getLogger("pyirf").setLevel(logging.DEBUG) for particle_type, p in particles.items(): log.info(f"Simulated {particle_type.title()} Events:") p["events"], p["simulation_info"] = read_DL2_pyirf( p["file"], p["run_header"]) # Multiplicity cut p["events"] = p["events"][ p["events"]["multiplicity"] >= cfg['analysis'] ['cut_on_multiplicity']].copy() p["simulated_spectrum"] = PowerLaw.from_simulation( p["simulation_info"], T_OBS) # Weight events p["events"]["weight"] = calculate_event_weights( p["events"]["true_energy"], p["target_spectrum"], p["simulated_spectrum"]) for prefix in ('true', 'reco'): k = f"{prefix}_source_fov_offset" p["events"][k] = calculate_source_fov_offset(p["events"], prefix=prefix) # calculate theta / distance between reco and assuemd source positoin # we handle only ON observations here, so the assumed source pos # is the pointing position p["events"]["theta"] = calculate_theta( p["events"], assumed_source_az=p["events"]["pointing_az"], assumed_source_alt=p["events"]["pointing_alt"], ) log.info(p["simulation_info"]) log.info("") gammas = particles["gamma"]["events"] # background table composed of both electrons and protons background = table.vstack( [particles["proton"]["events"], particles["electron"]["events"]]) MAX_GH_CUT_EFFICIENCY = 0.8 GH_CUT_EFFICIENCY_STEP = 0.01 # gh cut used for first calculation of the binned theta cuts INITIAL_GH_CUT_EFFICENCY = 0.4 INITIAL_GH_CUT = np.quantile(gammas['gh_score'], (1 - INITIAL_GH_CUT_EFFICENCY)) log.info( f"Using fixed G/H cut of {INITIAL_GH_CUT} to calculate theta cuts") # event display uses much finer bins for the theta cut than # for the sensitivity theta_bins = add_overflow_bins( create_bins_per_decade( 10**(-1.9) * u.TeV, 10**2.3005 * u.TeV, 50, )) # theta cut is 68 percent containmente of the gammas # for now with a fixed global, unoptimized score cut mask_theta_cuts = gammas["gh_score"] >= INITIAL_GH_CUT theta_cuts = calculate_percentile_cut( gammas["theta"][mask_theta_cuts], gammas["reco_energy"][mask_theta_cuts], bins=theta_bins, min_value=0.05 * u.deg, fill_value=0.32 * u.deg, max_value=0.32 * u.deg, percentile=68, ) # same bins as event display uses sensitivity_bins = add_overflow_bins( create_bins_per_decade(10**-1.9 * u.TeV, 10**2.31 * u.TeV, bins_per_decade=5)) log.info("Optimizing G/H separation cut for best sensitivity") gh_cut_efficiencies = np.arange( GH_CUT_EFFICIENCY_STEP, MAX_GH_CUT_EFFICIENCY + GH_CUT_EFFICIENCY_STEP / 2, GH_CUT_EFFICIENCY_STEP) sensitivity_step_2, gh_cuts = optimize_gh_cut( gammas, background, reco_energy_bins=sensitivity_bins, gh_cut_efficiencies=gh_cut_efficiencies, op=operator.ge, theta_cuts=theta_cuts, alpha=ALPHA, background_radius=MAX_BG_RADIUS, ) # now that we have the optimized gh cuts, we recalculate the theta # cut as 68 percent containment on the events surviving these cuts. log.info('Recalculating theta cut for optimized GH Cuts') for tab in (gammas, background): tab["selected_gh"] = evaluate_binned_cut(tab["gh_score"], tab["reco_energy"], gh_cuts, operator.ge) theta_cuts_opt = calculate_percentile_cut( gammas[gammas['selected_gh']]["theta"], gammas[gammas['selected_gh']]["reco_energy"], theta_bins, percentile=68, fill_value=0.32 * u.deg, max_value=0.32 * u.deg, min_value=0.05 * u.deg, ) gammas["selected_theta"] = evaluate_binned_cut(gammas["theta"], gammas["reco_energy"], theta_cuts_opt, operator.le) gammas["selected"] = gammas["selected_theta"] & gammas["selected_gh"] # calculate sensitivity signal_hist = create_histogram_table(gammas[gammas["selected"]], bins=sensitivity_bins) background_hist = estimate_background( background[background["selected_gh"]], reco_energy_bins=sensitivity_bins, theta_cuts=theta_cuts_opt, alpha=ALPHA, background_radius=MAX_BG_RADIUS, ) sensitivity = calculate_sensitivity(signal_hist, background_hist, alpha=ALPHA) # scale relative sensitivity by Crab flux to get the flux sensitivity spectrum = particles['gamma']['target_spectrum'] for s in (sensitivity_step_2, sensitivity): s["flux_sensitivity"] = (s["relative_sensitivity"] * spectrum(s['reco_energy_center'])) log.info('Calculating IRFs') hdus = [ fits.PrimaryHDU(), fits.BinTableHDU(sensitivity, name="SENSITIVITY"), fits.BinTableHDU(sensitivity_step_2, name="SENSITIVITY_STEP_2"), fits.BinTableHDU(theta_cuts, name="THETA_CUTS"), fits.BinTableHDU(theta_cuts_opt, name="THETA_CUTS_OPT"), fits.BinTableHDU(gh_cuts, name="GH_CUTS"), ] masks = { "": gammas["selected"], "_NO_CUTS": slice(None), "_ONLY_GH": gammas["selected_gh"], "_ONLY_THETA": gammas["selected_theta"], } # binnings for the irfs true_energy_bins = add_overflow_bins( create_bins_per_decade(10**-1.9 * u.TeV, 10**2.31 * u.TeV, 10)) reco_energy_bins = add_overflow_bins( create_bins_per_decade(10**-1.9 * u.TeV, 10**2.31 * u.TeV, 5)) fov_offset_bins = [0, 0.5] * u.deg source_offset_bins = np.arange(0, 1 + 1e-4, 1e-3) * u.deg energy_migration_bins = np.geomspace(0.2, 5, 200) for label, mask in masks.items(): effective_area = effective_area_per_energy( gammas[mask], particles["gamma"]["simulation_info"], true_energy_bins=true_energy_bins, ) hdus.append( create_aeff2d_hdu( effective_area[..., np.newaxis], # +1 dimension for FOV offset true_energy_bins, fov_offset_bins, extname="EFFECTIVE_AREA" + label, )) edisp = energy_dispersion( gammas[mask], true_energy_bins=true_energy_bins, fov_offset_bins=fov_offset_bins, migration_bins=energy_migration_bins, ) hdus.append( create_energy_dispersion_hdu( edisp, true_energy_bins=true_energy_bins, migration_bins=energy_migration_bins, fov_offset_bins=fov_offset_bins, extname="ENERGY_DISPERSION" + label, )) # Here we use reconstructed energy instead of true energy for the sake of # current pipelines comparisons bias_resolution = energy_bias_resolution(gammas[gammas["selected"]], reco_energy_bins, energy_type="reco") # Here we use reconstructed energy instead of true energy for the sake of # current pipelines comparisons ang_res = angular_resolution(gammas[gammas["selected_gh"]], reco_energy_bins, energy_type="reco") psf = psf_table( gammas[gammas["selected_gh"]], true_energy_bins, fov_offset_bins=fov_offset_bins, source_offset_bins=source_offset_bins, ) background_rate = background_2d( background[background['selected_gh']], reco_energy_bins, fov_offset_bins=np.arange(0, 11) * u.deg, t_obs=T_OBS, ) hdus.append( create_background_2d_hdu( background_rate, reco_energy_bins, fov_offset_bins=np.arange(0, 11) * u.deg, )) hdus.append( create_psf_table_hdu( psf, true_energy_bins, source_offset_bins, fov_offset_bins, )) hdus.append( create_rad_max_hdu(theta_cuts_opt["cut"][:, np.newaxis], theta_bins, fov_offset_bins)) hdus.append(fits.BinTableHDU(ang_res, name="ANGULAR_RESOLUTION")) hdus.append( fits.BinTableHDU(bias_resolution, name="ENERGY_BIAS_RESOLUTION")) log.info('Writing outputfile') fits.HDUList(hdus).writeto(outdir + '.fits.gz', overwrite=True)
def main(): log = logging.getLogger("lstchain MC DL2 to IRF - sensitivity curves") parser = argparse.ArgumentParser(description="MC DL2 to IRF") # Required arguments parser.add_argument( "--gamma-dl2", "-g", type=str, dest="gamma_file", help="Path to the dl2 gamma file" ) parser.add_argument( "--proton-dl2", "-p", type=str, dest="proton_file", help="Path to the dl2 proton file", ) parser.add_argument( "--electron-dl2", "-e", type=str, dest="electron_file", help="Path to the dl2 electron file", ) parser.add_argument( "--outfile", "-o", action="store", type=str, dest="outfile", help="Path where to save IRF FITS file", default="sensitivity.fits.gz", ) parser.add_argument( "--source_alt", action="store", type=float, dest="source_alt", help="Source altitude (optional). If not provided, it will be guessed from the gammas true altitude", default=None ) parser.add_argument( "--source_az", action="store", type=float, dest="source_az", help="Source azimuth (optional). If not provided, it will be guessed from the gammas true altitude", default=None ) # Optional arguments # parser.add_argument('--config', '-c', action='store', type=Path, # dest='config_file', # help='Path to a configuration file. If none is given, a standard configuration is applied', # default=None # ) args = parser.parse_args() logging.basicConfig(level=logging.INFO) logging.getLogger("pyirf").setLevel(logging.DEBUG) particles = { "gamma": {"file": args.gamma_file, "target_spectrum": CRAB_HEGRA}, "proton": {"file": args.proton_file, "target_spectrum": IRFDOC_PROTON_SPECTRUM}, "electron": { "file": args.electron_file, "target_spectrum": IRFDOC_ELECTRON_SPECTRUM, }, } for particle_type, p in particles.items(): log.info("Simulated Events: {}".format(particle_type.title())) p["events"], p["simulation_info"] = read_mc_dl2_to_QTable(p["file"]) # p['events'] = filter_events(p['events'], filters) print("=====", particle_type, "=====") # p["events"]["particle_type"] = particle_type p["simulated_spectrum"] = PowerLaw.from_simulation(p["simulation_info"], T_OBS) p["events"]["weight"] = calculate_event_weights( p["events"]["true_energy"], p["target_spectrum"], p["simulated_spectrum"] ) for prefix in ("true", "reco"): k = f"{prefix}_source_fov_offset" p["events"][k] = calculate_source_fov_offset(p["events"], prefix=prefix) gammas = particles["gamma"]["events"] # background table composed of both electrons and protons background = table.vstack( [particles["proton"]["events"], particles["electron"]["events"]] ) if args.source_alt is None or args.source_az is None: source_alt, source_az = determine_source_position(gammas) else: source_alt, source_az = args.source_alt, args.source_az for particle_type, p in particles.items(): # calculate theta / distance between reco and assumed source position # we handle only ON observations here, so the assumed source pos is the pointing position p["events"]["theta"] = calculate_theta(p["events"], assumed_source_az=source_az, assumed_source_alt=source_alt) log.info(p["simulation_info"]) log.info("") INITIAL_GH_CUT = np.quantile(gammas["gh_score"], (1 - INITIAL_GH_CUT_EFFICENCY)) log.info("Using fixed G/H cut of {} to calculate theta cuts".format(INITIAL_GH_CUT)) # event display uses much finer bins for the theta cut than # for the sensitivity theta_bins = add_overflow_bins( create_bins_per_decade(MIN_ENERGY, MAX_ENERGY, N_BIN_PER_DECADE) ) # theta cut is 68 percent containment of the gammas # for now with a fixed global, unoptimized score cut mask_theta_cuts = gammas["gh_score"] >= INITIAL_GH_CUT theta_cuts = calculate_percentile_cut( gammas["theta"][mask_theta_cuts], gammas["reco_energy"][mask_theta_cuts], bins=theta_bins, min_value=MIN_THETA_CUT, fill_value=MAX_THETA_CUT, max_value=MAX_THETA_CUT, percentile=68, ) # same number of bins per decade than official CTA IRFs sensitivity_bins = add_overflow_bins( create_bins_per_decade(MIN_ENERGY, MAX_ENERGY, bins_per_decade=N_BIN_PER_DECADE) ) log.info("Optimizing G/H separation cut for best sensitivity") gh_cut_efficiencies = np.arange( GH_CUT_EFFICIENCY_STEP, MAX_GH_CUT_EFFICIENCY + GH_CUT_EFFICIENCY_STEP / 2, GH_CUT_EFFICIENCY_STEP, ) sensitivity_step_2, gh_cuts = optimize_gh_cut( gammas, background, reco_energy_bins=sensitivity_bins, gh_cut_efficiencies=gh_cut_efficiencies, op=operator.ge, theta_cuts=theta_cuts, alpha=ALPHA, background_radius=MAX_BG_RADIUS, ) # now that we have the optimized gh cuts, we recalculate the theta # cut as 68 percent containment on the events surviving these cuts. log.info("Recalculating theta cut for optimized GH Cuts") for tab in (gammas, background): tab["selected_gh"] = evaluate_binned_cut( tab["gh_score"], tab["reco_energy"], gh_cuts, operator.ge ) theta_cuts_opt = calculate_percentile_cut( gammas[gammas["selected_gh"]]["theta"], gammas[gammas["selected_gh"]]["reco_energy"], theta_bins, percentile=68, fill_value=MAX_THETA_CUT, max_value=MAX_THETA_CUT, min_value=MIN_THETA_CUT, ) gammas["selected_theta"] = evaluate_binned_cut( gammas["theta"], gammas["reco_energy"], theta_cuts_opt, operator.le ) gammas["selected"] = gammas["selected_theta"] & gammas["selected_gh"] # calculate sensitivity signal_hist = create_histogram_table( gammas[gammas["selected"]], bins=sensitivity_bins ) background_hist = estimate_background( background[background["selected_gh"]], reco_energy_bins=sensitivity_bins, theta_cuts=theta_cuts_opt, alpha=ALPHA, background_radius=MAX_BG_RADIUS, ) sensitivity = calculate_sensitivity(signal_hist, background_hist, alpha=ALPHA) # scale relative sensitivity by Crab flux to get the flux sensitivity spectrum = particles["gamma"]["target_spectrum"] for s in (sensitivity_step_2, sensitivity): s["flux_sensitivity"] = s["relative_sensitivity"] * spectrum( s["reco_energy_center"] ) log.info("Calculating IRFs") hdus = [ fits.PrimaryHDU(), fits.BinTableHDU(sensitivity, name="SENSITIVITY"), fits.BinTableHDU(sensitivity_step_2, name="SENSITIVITY_STEP_2"), fits.BinTableHDU(theta_cuts, name="THETA_CUTS"), fits.BinTableHDU(theta_cuts_opt, name="THETA_CUTS_OPT"), fits.BinTableHDU(gh_cuts, name="GH_CUTS"), ] masks = { "": gammas["selected"], "_NO_CUTS": slice(None), "_ONLY_GH": gammas["selected_gh"], "_ONLY_THETA": gammas["selected_theta"], } # binnings for the irfs true_energy_bins = add_overflow_bins( create_bins_per_decade(MIN_ENERGY, MAX_ENERGY, N_BIN_PER_DECADE) ) reco_energy_bins = add_overflow_bins( create_bins_per_decade(MIN_ENERGY, MAX_ENERGY, N_BIN_PER_DECADE) ) fov_offset_bins = [0, 0.6] * u.deg source_offset_bins = np.arange(0, 1 + 1e-4, 1e-3) * u.deg energy_migration_bins = np.geomspace(0.2, 5, 200) for label, mask in masks.items(): effective_area = effective_area_per_energy( gammas[mask], particles["gamma"]["simulation_info"], true_energy_bins=true_energy_bins, ) hdus.append( create_aeff2d_hdu( effective_area[..., np.newaxis], # add one dimension for FOV offset true_energy_bins, fov_offset_bins, extname="EFFECTIVE_AREA" + label, ) ) edisp = energy_dispersion( gammas[mask], true_energy_bins=true_energy_bins, fov_offset_bins=fov_offset_bins, migration_bins=energy_migration_bins, ) hdus.append( create_energy_dispersion_hdu( edisp, true_energy_bins=true_energy_bins, migration_bins=energy_migration_bins, fov_offset_bins=fov_offset_bins, extname="ENERGY_DISPERSION" + label, ) ) bias_resolution = energy_bias_resolution( gammas[gammas["selected"]], true_energy_bins, resolution_function=energy_resolution_absolute_68, ) ang_res = angular_resolution(gammas[gammas["selected_gh"]], true_energy_bins) psf = psf_table( gammas[gammas["selected_gh"]], true_energy_bins, fov_offset_bins=fov_offset_bins, source_offset_bins=source_offset_bins, ) background_rate = background_2d( background[background["selected_gh"]], reco_energy_bins, fov_offset_bins=np.arange(0, 11) * u.deg, t_obs=T_OBS, ) hdus.append( create_background_2d_hdu( background_rate, reco_energy_bins, fov_offset_bins=np.arange(0, 11) * u.deg ) ) hdus.append( create_psf_table_hdu(psf, true_energy_bins, source_offset_bins, fov_offset_bins) ) hdus.append( create_rad_max_hdu( theta_cuts_opt["cut"][:, np.newaxis], theta_bins, fov_offset_bins ) ) hdus.append(fits.BinTableHDU(ang_res, name="ANGULAR_RESOLUTION")) hdus.append(fits.BinTableHDU(bias_resolution, name="ENERGY_BIAS_RESOLUTION")) log.info("Writing output file") Path(args.outfile).parent.mkdir(exist_ok=True) fits.HDUList(hdus).writeto(args.outfile, overwrite=True)
def main(): logging.basicConfig(level=logging.INFO) logging.getLogger("pyirf").setLevel(logging.DEBUG) for k, p in particles.items(): log.info(f"Simulated {k.title()} Events:") p["events"], p["simulation_info"] = read_eventdisplay_fits(p["file"]) p["simulated_spectrum"] = PowerLaw.from_simulation( p["simulation_info"], T_OBS) p["events"]["weight"] = calculate_event_weights( p["events"]["true_energy"], p["target_spectrum"], p["simulated_spectrum"]) p["events"]["source_fov_offset"] = calculate_source_fov_offset( p["events"]) # calculate theta / distance between reco and assuemd source positoin # we handle only ON observations here, so the assumed source pos # is the pointing position p["events"]["theta"] = calculate_theta( p["events"], assumed_source_az=p["events"]["pointing_az"], assumed_source_alt=p["events"]["pointing_alt"], ) log.info(p["simulation_info"]) log.info("") gammas = particles["gamma"]["events"] # background table composed of both electrons and protons background = table.vstack( [particles["proton"]["events"], particles["electron"]["events"]]) log.info( f"Using fixed G/H cut of {INITIAL_GH_CUT} to calculate theta cuts") # event display uses much finer bins for the theta cut than # for the sensitivity theta_bins = add_overflow_bins( create_bins_per_decade( 10**(-1.9) * u.TeV, 10**2.3005 * u.TeV, 50, )) # theta cut is 68 percent containmente of the gammas # for now with a fixed global, unoptimized score cut mask_theta_cuts = gammas["gh_score"] >= INITIAL_GH_CUT theta_cuts = calculate_percentile_cut( gammas["theta"][mask_theta_cuts], gammas["reco_energy"][mask_theta_cuts], bins=theta_bins, min_value=0.05 * u.deg, fill_value=np.nan * u.deg, percentile=68, ) # evaluate the theta cut gammas["selected_theta"] = evaluate_binned_cut(gammas["theta"], gammas["reco_energy"], theta_cuts, operator.le) # we make the background region larger by a factor of ALPHA, # so the radius by sqrt(ALPHA) to get better statistics for the background theta_cuts_bg = get_bg_cuts(theta_cuts, ALPHA) background["selected_theta"] = evaluate_binned_cut( background["theta"], background["reco_energy"], theta_cuts_bg, operator.le) # same bins as event display uses sensitivity_bins = add_overflow_bins( create_bins_per_decade(10**-1.9 * u.TeV, 10**2.31 * u.TeV, bins_per_decade=5)) log.info("Optimizing G/H separation cut for best sensitivity") sensitivity_step_2, gh_cuts = optimize_gh_cut( gammas[gammas["selected_theta"]], background[background["selected_theta"]], bins=sensitivity_bins, cut_values=np.arange(-1.0, 1.005, 0.05), op=operator.ge, alpha=ALPHA, ) # now that we have the optimized gh cuts, we recalculate the theta # cut as 68 percent containment on the events surviving these cuts. for tab in (gammas, background): tab["selected_gh"] = evaluate_binned_cut(tab["gh_score"], tab["reco_energy"], gh_cuts, operator.ge) theta_cuts_opt = calculate_percentile_cut( gammas["theta"], gammas["reco_energy"], theta_bins, fill_value=np.nan * u.deg, percentile=68, min_value=0.05 * u.deg, ) theta_cuts_opt_bg = get_bg_cuts(theta_cuts_opt, ALPHA) for tab, cuts in zip([gammas, background], [theta_cuts_opt, theta_cuts_opt_bg]): tab["selected_theta"] = evaluate_binned_cut(tab["theta"], tab["reco_energy"], cuts, operator.le) tab["selected"] = tab["selected_theta"] & tab["selected_gh"] signal_hist = create_histogram_table(gammas[gammas["selected"]], bins=sensitivity_bins) background_hist = create_histogram_table( background[background["selected"]], bins=sensitivity_bins) sensitivity = calculate_sensitivity(signal_hist, background_hist, alpha=ALPHA) # scale relative sensitivity by Crab flux to get the flux sensitivity for s in (sensitivity_step_2, sensitivity): s["flux_sensitivity"] = s["relative_sensitivity"] * CRAB_HEGRA( s["reco_energy_center"]) # write OGADF output file hdus = [ fits.PrimaryHDU(), fits.BinTableHDU(sensitivity, name="SENSITIVITY"), fits.BinTableHDU(sensitivity_step_2, name="SENSITIVITY_STEP_2"), fits.BinTableHDU(theta_cuts, name="THETA_CUTS"), fits.BinTableHDU(theta_cuts_opt, name="THETA_CUTS_OPT"), fits.BinTableHDU(gh_cuts, name="GH_CUTS"), ] masks = { "": gammas["selected"], "_NO_CUTS": slice(None), "_ONLY_GH": gammas["selected_gh"], "_ONLY_THETA": gammas["selected_theta"], } # binnings for the irfs true_energy_bins = add_overflow_bins( create_bins_per_decade( 10**-1.9 * u.TeV, 10**2.31 * u.TeV, 10, )) reco_energy_bins = add_overflow_bins( create_bins_per_decade( 10**-1.9 * u.TeV, 10**2.31 * u.TeV, 10, )) fov_offset_bins = [0, 0.5] * u.deg source_offset_bins = np.arange(0, 1 + 1e-4, 1e-3) * u.deg energy_migration_bins = np.geomspace(0.2, 5, 200) for label, mask in masks.items(): effective_area = point_like_effective_area( gammas[mask], particles["gamma"]["simulation_info"], true_energy_bins=true_energy_bins, ) hdus.append( create_aeff2d_hdu( effective_area[..., np.newaxis], # add one dimension for FOV offset true_energy_bins, fov_offset_bins, extname="EFFECTIVE_AREA" + label, )) edisp = energy_dispersion( gammas[mask], true_energy_bins=true_energy_bins, fov_offset_bins=fov_offset_bins, migration_bins=energy_migration_bins, ) hdus.append( create_energy_dispersion_hdu( edisp, true_energy_bins=true_energy_bins, migration_bins=energy_migration_bins, fov_offset_bins=fov_offset_bins, extname="ENERGY_DISPERSION" + label, )) bias_resolution = energy_bias_resolution( gammas[gammas["selected"]], true_energy_bins, ) ang_res = angular_resolution( gammas[gammas["selected_gh"]], true_energy_bins, ) psf = psf_table( gammas[gammas["selected_gh"]], true_energy_bins, fov_offset_bins=fov_offset_bins, source_offset_bins=source_offset_bins, ) background_rate = background_2d( background[background['selected_gh']], reco_energy_bins, fov_offset_bins=np.arange(0, 11) * u.deg, t_obs=T_OBS, ) hdus.append( create_background_2d_hdu( background_rate, reco_energy_bins, fov_offset_bins=np.arange(0, 11) * u.deg, )) hdus.append( create_psf_table_hdu( psf, true_energy_bins, source_offset_bins, fov_offset_bins, )) hdus.append( create_rad_max_hdu(theta_bins, fov_offset_bins, rad_max=theta_cuts_opt["cut"][:, np.newaxis])) hdus.append(fits.BinTableHDU(ang_res, name="ANGULAR_RESOLUTION")) hdus.append( fits.BinTableHDU(bias_resolution, name="ENERGY_BIAS_RESOLUTION")) fits.HDUList(hdus).writeto("pyirf_eventdisplay.fits.gz", overwrite=True)
def main(): logging.basicConfig(level=logging.INFO) logging.getLogger("pyirf").setLevel(logging.DEBUG) for particle_type, p in particles.items(): log.info(f"Simulated {particle_type.title()} Events:") p["events"], p["simulation_info"] = read_eventdisplay_fits(p["file"]) p["events"]["particle_type"] = particle_type p["simulated_spectrum"] = PowerLaw.from_simulation( p["simulation_info"], T_OBS) p["events"]["weight"] = calculate_event_weights( p["events"]["true_energy"], p["target_spectrum"], p["simulated_spectrum"]) for prefix in ('true', 'reco'): k = f"{prefix}_source_fov_offset" p["events"][k] = calculate_source_fov_offset(p["events"], prefix=prefix) # calculate theta / distance between reco and assuemd source positoin # we handle only ON observations here, so the assumed source pos # is the pointing position p["events"]["theta"] = calculate_theta( p["events"], assumed_source_az=p["events"]["pointing_az"], assumed_source_alt=p["events"]["pointing_alt"], ) log.info(p["simulation_info"]) log.info("") gammas = particles["gamma"]["events"] # background table composed of both electrons and protons background = table.vstack( [particles["proton"]["events"], particles["electron"]["events"]]) INITIAL_GH_CUT = np.quantile(gammas['gh_score'], (1 - INITIAL_GH_CUT_EFFICENCY)) log.info( f"Using fixed G/H cut of {INITIAL_GH_CUT} to calculate theta cuts") # event display uses much finer bins for the theta cut than # for the sensitivity theta_bins = add_overflow_bins( create_bins_per_decade(10**(-1.9) * u.TeV, 10**2.3005 * u.TeV, 50)) # same bins as event display uses sensitivity_bins = add_overflow_bins( create_bins_per_decade(10**-1.9 * u.TeV, 10**2.31 * u.TeV, bins_per_decade=5)) # theta cut is 68 percent containmente of the gammas # for now with a fixed global, unoptimized score cut # the cut is calculated in the same bins as the sensitivity, # but then interpolated to 10x the resolution. mask_theta_cuts = gammas["gh_score"] >= INITIAL_GH_CUT theta_cuts_coarse = calculate_percentile_cut( gammas["theta"][mask_theta_cuts], gammas["reco_energy"][mask_theta_cuts], bins=sensitivity_bins, min_value=0.05 * u.deg, fill_value=0.32 * u.deg, max_value=0.32 * u.deg, percentile=68, ) # interpolate to 50 bins per decade theta_center = bin_center(theta_bins) inter_center = bin_center(sensitivity_bins) theta_cuts = table.QTable({ "low": theta_bins[:-1], "high": theta_bins[1:], "center": theta_center, "cut": np.interp(np.log10(theta_center / u.TeV), np.log10(inter_center / u.TeV), theta_cuts_coarse['cut']), }) log.info("Optimizing G/H separation cut for best sensitivity") gh_cut_efficiencies = np.arange( GH_CUT_EFFICIENCY_STEP, MAX_GH_CUT_EFFICIENCY + GH_CUT_EFFICIENCY_STEP / 2, GH_CUT_EFFICIENCY_STEP) sensitivity, gh_cuts = optimize_gh_cut( gammas, background, reco_energy_bins=sensitivity_bins, gh_cut_efficiencies=gh_cut_efficiencies, op=operator.ge, theta_cuts=theta_cuts, alpha=ALPHA, background_radius=MAX_BG_RADIUS, ) # now that we have the optimized gh cuts, we recalculate the theta # cut as 68 percent containment on the events surviving these cuts. log.info('Recalculating theta cut for optimized GH Cuts') for tab in (gammas, background): tab["selected_gh"] = evaluate_binned_cut(tab["gh_score"], tab["reco_energy"], gh_cuts, operator.ge) gammas["selected_theta"] = evaluate_binned_cut(gammas["theta"], gammas["reco_energy"], theta_cuts, operator.le) gammas["selected"] = gammas["selected_theta"] & gammas["selected_gh"] # scale relative sensitivity by Crab flux to get the flux sensitivity spectrum = particles['gamma']['target_spectrum'] sensitivity["flux_sensitivity"] = ( sensitivity["relative_sensitivity"] * spectrum(sensitivity['reco_energy_center'])) log.info('Calculating IRFs') hdus = [ fits.PrimaryHDU(), fits.BinTableHDU(sensitivity, name="SENSITIVITY"), fits.BinTableHDU(theta_cuts, name="THETA_CUTS"), fits.BinTableHDU(gh_cuts, name="GH_CUTS"), ] masks = { "": gammas["selected"], "_NO_CUTS": slice(None), "_ONLY_GH": gammas["selected_gh"], "_ONLY_THETA": gammas["selected_theta"], } # binnings for the irfs true_energy_bins = add_overflow_bins( create_bins_per_decade(10**-1.9 * u.TeV, 10**2.31 * u.TeV, 10)) reco_energy_bins = add_overflow_bins( create_bins_per_decade(10**-1.9 * u.TeV, 10**2.31 * u.TeV, 5)) fov_offset_bins = [0, 0.5] * u.deg source_offset_bins = np.arange(0, 1 + 1e-4, 1e-3) * u.deg energy_migration_bins = np.geomspace(0.2, 5, 200) for label, mask in masks.items(): effective_area = effective_area_per_energy( gammas[mask], particles["gamma"]["simulation_info"], true_energy_bins=true_energy_bins, ) hdus.append( create_aeff2d_hdu( effective_area[..., np.newaxis], # add one dimension for FOV offset true_energy_bins, fov_offset_bins, extname="EFFECTIVE_AREA" + label, )) edisp = energy_dispersion( gammas[mask], true_energy_bins=true_energy_bins, fov_offset_bins=fov_offset_bins, migration_bins=energy_migration_bins, ) hdus.append( create_energy_dispersion_hdu( edisp, true_energy_bins=true_energy_bins, migration_bins=energy_migration_bins, fov_offset_bins=fov_offset_bins, extname="ENERGY_DISPERSION" + label, )) bias_resolution = energy_bias_resolution(gammas[gammas["selected"]], reco_energy_bins, energy_type="reco") ang_res = angular_resolution(gammas[gammas["selected_gh"]], reco_energy_bins, energy_type="reco") psf = psf_table( gammas[gammas["selected_gh"]], true_energy_bins, fov_offset_bins=fov_offset_bins, source_offset_bins=source_offset_bins, ) background_rate = background_2d( background[background['selected_gh']], reco_energy_bins, fov_offset_bins=np.arange(0, 11) * u.deg, t_obs=T_OBS, ) hdus.append( create_background_2d_hdu( background_rate, reco_energy_bins, fov_offset_bins=np.arange(0, 11) * u.deg, )) hdus.append( create_psf_table_hdu( psf, true_energy_bins, source_offset_bins, fov_offset_bins, )) hdus.append( create_rad_max_hdu(theta_cuts["cut"][:, np.newaxis], theta_bins, fov_offset_bins)) hdus.append(fits.BinTableHDU(ang_res, name="ANGULAR_RESOLUTION")) hdus.append( fits.BinTableHDU(bias_resolution, name="ENERGY_BIAS_RESOLUTION")) log.info('Writing outputfile') fits.HDUList(hdus).writeto("pyirf_eventdisplay.fits.gz", overwrite=True)
def main(gammafile, protonfile, electronfile, outputfile): logging.basicConfig(level=logging.INFO) logging.getLogger("pyirf").setLevel(logging.DEBUG) particles = { "gamma": { "file": gammafile, "target_spectrum": CRAB_HEGRA, }, "proton": { "file": protonfile, "target_spectrum": IRFDOC_PROTON_SPECTRUM, }, "electron": { "file": electronfile, "target_spectrum": IRFDOC_ELECTRON_SPECTRUM, }, } for particle_type, p in particles.items(): log.info(f"Simulated {particle_type.title()} Events:") p["events"], p["simulation_info"] = read_file(p["file"]) p["events"]["particle_type"] = particle_type p["simulated_spectrum"] = PowerLaw.from_simulation( p["simulation_info"], T_OBS) p["events"]["weight"] = calculate_event_weights( p["events"]["true_energy"], p["target_spectrum"], p["simulated_spectrum"]) for prefix in ('true', 'reco'): k = f"{prefix}_source_fov_offset" p["events"][k] = calculate_source_fov_offset(p["events"], prefix=prefix) log.info(p["simulation_info"]) log.info("") gammas = particles["gamma"]["events"] background = table.vstack( [particles["proton"]["events"], particles["electron"]["events"]]) # calculate theta / distance between reco and assuemd source position gammas["theta"] = calculate_theta( gammas, assumed_source_az=gammas["true_az"], assumed_source_alt=gammas["true_alt"], ) INITIAL_GH_CUT = np.quantile(gammas['gh_score'], (1 - INITIAL_GH_CUT_EFFICENCY)) log.info( f"Using fixed G/H cut of {INITIAL_GH_CUT} to calculate theta cuts") theta_bins = add_overflow_bins( create_bins_per_decade(10**-1.8 * u.TeV, 10**2.41 * u.TeV, bins_per_decade=25)) sensitivity_bins = add_overflow_bins( create_bins_per_decade(10**-1.8 * u.TeV, 10**2.41 * u.TeV, bins_per_decade=5)) # theta cut is 68 percent containmente of the gammas # for now with a fixed global, unoptimized score cut mask_theta_cuts = gammas["gh_score"] >= INITIAL_GH_CUT theta_cuts = calculate_percentile_cut( gammas["theta"][mask_theta_cuts], gammas["reco_energy"][mask_theta_cuts], bins=theta_bins, min_value=0.05 * u.deg, fill_value=0.32 * u.deg, max_value=0.32 * u.deg, percentile=68, ) log.info("Optimizing G/H separation cut for best sensitivity") gh_cut_efficiencies = np.arange( GH_CUT_EFFICIENCY_STEP, MAX_GH_CUT_EFFICIENCY + GH_CUT_EFFICIENCY_STEP / 2, GH_CUT_EFFICIENCY_STEP) sensitivity_step_2, gh_cuts = optimize_gh_cut( gammas, background, reco_energy_bins=sensitivity_bins, gh_cut_efficiencies=gh_cut_efficiencies, op=operator.ge, theta_cuts=theta_cuts, alpha=ALPHA, background_radius=MAX_BG_RADIUS, ) # now that we have the optimized gh cuts, we recalculate the theta # cut as 68 percent containment on the events surviving these cuts. log.info('Recalculating theta cut for optimized GH Cuts') for tab in (gammas, background): tab["selected_gh"] = evaluate_binned_cut(tab["gh_score"], tab["reco_energy"], gh_cuts, operator.ge) theta_cuts_opt = calculate_percentile_cut( gammas[gammas['selected_gh']]["theta"], gammas[gammas['selected_gh']]["reco_energy"], theta_bins, percentile=68, fill_value=0.32 * u.deg, max_value=0.32 * u.deg, min_value=0.05 * u.deg, ) gammas["selected_theta"] = evaluate_binned_cut(gammas["theta"], gammas["reco_energy"], theta_cuts_opt, operator.le) gammas["selected"] = gammas["selected_theta"] & gammas["selected_gh"] # calculate sensitivity signal_hist = create_histogram_table(gammas[gammas["selected"]], bins=sensitivity_bins) background_hist = estimate_background( background[background["selected_gh"]], reco_energy_bins=sensitivity_bins, theta_cuts=theta_cuts_opt, alpha=ALPHA, background_radius=MAX_BG_RADIUS, ) sensitivity = calculate_sensitivity(signal_hist, background_hist, alpha=ALPHA) # scale relative sensitivity by Crab flux to get the flux sensitivity spectrum = particles['gamma']['target_spectrum'] for s in (sensitivity_step_2, sensitivity): s["flux_sensitivity"] = (s["relative_sensitivity"] * spectrum(s['reco_energy_center'])) hdus = [ fits.PrimaryHDU(), fits.BinTableHDU(sensitivity, name="SENSITIVITY"), fits.BinTableHDU(sensitivity_step_2, name="SENSITIVITY_STEP_2"), fits.BinTableHDU(theta_cuts, name="THETA_CUTS"), fits.BinTableHDU(theta_cuts_opt, name="THETA_CUTS_OPT"), fits.BinTableHDU(gh_cuts, name="GH_CUTS"), ] # calculate sensitivity using unoptimised cuts gammas["theta_unop"] = gammas["theta"].to_value(u.deg) <= np.sqrt(0.03) gammas["gh_unop"] = gammas["gh_score"] > 0.85 theta_cut_unop = table.QTable() theta_cut_unop['low'] = theta_cuts_opt['low'] theta_cut_unop['high'] = theta_cuts_opt['high'] theta_cut_unop['center'] = theta_cuts_opt['center'] theta_cut_unop['cut'] = np.sqrt(0.03) * u.deg signal_hist_unop = create_histogram_table(gammas[gammas["theta_unop"] & gammas["gh_unop"]], bins=sensitivity_bins) background_hist_unop = estimate_background( background[background["gh_score"] > 0.85], reco_energy_bins=sensitivity_bins, theta_cuts=theta_cut_unop, alpha=ALPHA, background_radius=MAX_BG_RADIUS, ) sensitivity_unop = calculate_sensitivity(signal_hist_unop, background_hist_unop, alpha=ALPHA) sensitivity_unop["flux_sensitivity"] = ( sensitivity_unop["relative_sensitivity"] * spectrum(sensitivity_unop['reco_energy_center'])) hdus.append(fits.BinTableHDU(sensitivity_unop, name="SENSITIVITY_UNOP")) log.info('Calculating IRFs') masks = { "": gammas["selected"], "_NO_CUTS": slice(None), "_ONLY_GH": gammas["selected_gh"], "_ONLY_THETA": gammas["selected_theta"], } # binnings for the irfs true_energy_bins = add_overflow_bins( create_bins_per_decade(10**-1.8 * u.TeV, 10**2.41 * u.TeV, bins_per_decade=10)) reco_energy_bins = add_overflow_bins( create_bins_per_decade(10**-1.8 * u.TeV, 10**2.41 * u.TeV, bins_per_decade=5)) fov_offset_bins = [0, 0.5] * u.deg source_offset_bins = np.arange(0, 1 + 1e-4, 1e-3) * u.deg energy_migration_bins = np.geomspace(0.2, 5, 200) for label, mask in masks.items(): effective_area = effective_area_per_energy( gammas[mask], particles["gamma"]["simulation_info"], true_energy_bins=true_energy_bins, ) hdus.append( create_aeff2d_hdu( effective_area[..., np.newaxis], # add one dimension for FOV offset true_energy_bins, fov_offset_bins, extname="EFFECTIVE_AREA" + label, )) edisp = energy_dispersion( gammas[mask], true_energy_bins=true_energy_bins, fov_offset_bins=fov_offset_bins, migration_bins=energy_migration_bins, ) hdus.append( create_energy_dispersion_hdu( edisp, true_energy_bins=true_energy_bins, migration_bins=energy_migration_bins, fov_offset_bins=fov_offset_bins, extname="ENERGY_DISPERSION" + label, )) bias_resolution = energy_bias_resolution( gammas[gammas["selected"]], true_energy_bins, ) ang_res = angular_resolution( gammas[gammas["selected_gh"]], true_energy_bins, ) psf = psf_table( gammas[gammas["selected_gh"]], true_energy_bins, fov_offset_bins=fov_offset_bins, source_offset_bins=source_offset_bins, ) background_rate = background_2d( background[background['selected_gh']], reco_energy_bins, fov_offset_bins=np.arange(0, 11) * u.deg, t_obs=T_OBS, ) hdus.append( create_background_2d_hdu( background_rate, reco_energy_bins, fov_offset_bins=np.arange(0, 11) * u.deg, )) hdus.append( create_psf_table_hdu( psf, true_energy_bins, source_offset_bins, fov_offset_bins, )) hdus.append( create_rad_max_hdu(theta_cuts_opt["cut"][:, np.newaxis], theta_bins, fov_offset_bins)) hdus.append(fits.BinTableHDU(ang_res, name="ANGULAR_RESOLUTION")) hdus.append( fits.BinTableHDU(bias_resolution, name="ENERGY_BIAS_RESOLUTION")) log.info('Writing outputfile') fits.HDUList(hdus).writeto(outputfile, overwrite=True)