def test_read_mc_dl2_to_QTable(simulated_dl2_file): from lstchain.io.io import read_mc_dl2_to_QTable import astropy.units as u events, sim_info = read_mc_dl2_to_QTable(simulated_dl2_file) assert "true_energy" in events.colnames assert sim_info.energy_max == 330 * u.TeV
def theta2_hist_per_energy_bin(irf_file, dl2_gamma_file): """ plot a theta2 histogram per energy bin of gammas selected event (passing gh_score cut) and displaying the theta2 cut applied for IRFs """ gammas, sim_info = read_mc_dl2_to_QTable(dl2_gamma_file) gammas = filter_events(gammas, filters) for prefix in ("true", "reco"): k = f"{prefix}_source_fov_offset" gammas[k] = calculate_source_fov_offset(gammas, prefix=prefix) source_alt, source_az = determine_source_position(gammas) gammas['theta'] = calculate_theta(gammas, assumed_source_az=source_az, assumed_source_alt=source_alt) gh_cuts = read_gh_cut_table(irf_file) theta_cuts = read_theta_cut_table(irf_file) tc = theta_cuts["RAD_MAX"].T[:, 0] energy_min = theta_cuts['ENERG_LO'] energy_max = theta_cuts['ENERG_HI'] np.testing.assert_allclose(energy_min, gh_cuts['low']) np.testing.assert_allclose(energy_max, gh_cuts['high']) ncols = 5 nrows = len(energy_min) // ncols + int((len(energy_min) % ncols) > 0) fig, axes = plt.subplots(ncols=ncols, nrows=nrows, figsize=(ncols * 5, nrows * 5)) xrange = (0, 0.4) for ii, emin in enumerate(energy_min): ax = axes.ravel()[ii] emax = energy_max[ii] mask = (gammas['gh_score'] < gh_cuts['cut'][ii]) & ( emin <= gammas['true_energy']) & (gammas['true_energy'] < emax) t2unit = u.deg**2 n, bins, _ = ax.hist( (gammas[mask]['theta']**2).to_value(t2unit), label=f'{emin:0.2f}-{emax:0.2f}', histtype='step', lw=2, bins=np.linspace(*xrange), range=xrange, density=False, ) ax.vlines((tc[ii]**2).to_value(t2unit), 0, np.max(n), color='orange') ax.legend() ax.set_xlim(*xrange) ax.set_xlabel(f'theta2 / {t2unit}') ax.set_title(f"gh cut: {gh_cuts['cut'][ii]:0.3f}") plt.tight_layout() return axes
def main(): log = logging.getLogger("lstchain MC DL2 to IRF - sensitivity curves") parser = argparse.ArgumentParser(description="MC DL2 to IRF") # Required arguments parser.add_argument( "--gamma-dl2", "-g", type=str, dest="gamma_file", help="Path to the dl2 gamma file" ) parser.add_argument( "--proton-dl2", "-p", type=str, dest="proton_file", help="Path to the dl2 proton file", ) parser.add_argument( "--electron-dl2", "-e", type=str, dest="electron_file", help="Path to the dl2 electron file", ) parser.add_argument( "--outfile", "-o", action="store", type=str, dest="outfile", help="Path where to save IRF FITS file", default="sensitivity.fits.gz", ) parser.add_argument( "--source_alt", action="store", type=float, dest="source_alt", help="Source altitude (optional). If not provided, it will be guessed from the gammas true altitude", default=None ) parser.add_argument( "--source_az", action="store", type=float, dest="source_az", help="Source azimuth (optional). If not provided, it will be guessed from the gammas true altitude", default=None ) # Optional arguments # parser.add_argument('--config', '-c', action='store', type=Path, # dest='config_file', # help='Path to a configuration file. If none is given, a standard configuration is applied', # default=None # ) args = parser.parse_args() logging.basicConfig(level=logging.INFO) logging.getLogger("pyirf").setLevel(logging.DEBUG) particles = { "gamma": {"file": args.gamma_file, "target_spectrum": CRAB_HEGRA}, "proton": {"file": args.proton_file, "target_spectrum": IRFDOC_PROTON_SPECTRUM}, "electron": { "file": args.electron_file, "target_spectrum": IRFDOC_ELECTRON_SPECTRUM, }, } for particle_type, p in particles.items(): log.info("Simulated Events: {}".format(particle_type.title())) p["events"], p["simulation_info"] = read_mc_dl2_to_QTable(p["file"]) # p['events'] = filter_events(p['events'], filters) print("=====", particle_type, "=====") # p["events"]["particle_type"] = particle_type p["simulated_spectrum"] = PowerLaw.from_simulation(p["simulation_info"], T_OBS) p["events"]["weight"] = calculate_event_weights( p["events"]["true_energy"], p["target_spectrum"], p["simulated_spectrum"] ) for prefix in ("true", "reco"): k = f"{prefix}_source_fov_offset" p["events"][k] = calculate_source_fov_offset(p["events"], prefix=prefix) gammas = particles["gamma"]["events"] # background table composed of both electrons and protons background = table.vstack( [particles["proton"]["events"], particles["electron"]["events"]] ) if args.source_alt is None or args.source_az is None: source_alt, source_az = determine_source_position(gammas) else: source_alt, source_az = args.source_alt, args.source_az for particle_type, p in particles.items(): # calculate theta / distance between reco and assumed source position # we handle only ON observations here, so the assumed source pos is the pointing position p["events"]["theta"] = calculate_theta(p["events"], assumed_source_az=source_az, assumed_source_alt=source_alt) log.info(p["simulation_info"]) log.info("") INITIAL_GH_CUT = np.quantile(gammas["gh_score"], (1 - INITIAL_GH_CUT_EFFICENCY)) log.info("Using fixed G/H cut of {} to calculate theta cuts".format(INITIAL_GH_CUT)) # event display uses much finer bins for the theta cut than # for the sensitivity theta_bins = add_overflow_bins( create_bins_per_decade(MIN_ENERGY, MAX_ENERGY, N_BIN_PER_DECADE) ) # theta cut is 68 percent containment of the gammas # for now with a fixed global, unoptimized score cut mask_theta_cuts = gammas["gh_score"] >= INITIAL_GH_CUT theta_cuts = calculate_percentile_cut( gammas["theta"][mask_theta_cuts], gammas["reco_energy"][mask_theta_cuts], bins=theta_bins, min_value=MIN_THETA_CUT, fill_value=MAX_THETA_CUT, max_value=MAX_THETA_CUT, percentile=68, ) # same number of bins per decade than official CTA IRFs sensitivity_bins = add_overflow_bins( create_bins_per_decade(MIN_ENERGY, MAX_ENERGY, bins_per_decade=N_BIN_PER_DECADE) ) log.info("Optimizing G/H separation cut for best sensitivity") gh_cut_efficiencies = np.arange( GH_CUT_EFFICIENCY_STEP, MAX_GH_CUT_EFFICIENCY + GH_CUT_EFFICIENCY_STEP / 2, GH_CUT_EFFICIENCY_STEP, ) sensitivity_step_2, gh_cuts = optimize_gh_cut( gammas, background, reco_energy_bins=sensitivity_bins, gh_cut_efficiencies=gh_cut_efficiencies, op=operator.ge, theta_cuts=theta_cuts, alpha=ALPHA, background_radius=MAX_BG_RADIUS, ) # now that we have the optimized gh cuts, we recalculate the theta # cut as 68 percent containment on the events surviving these cuts. log.info("Recalculating theta cut for optimized GH Cuts") for tab in (gammas, background): tab["selected_gh"] = evaluate_binned_cut( tab["gh_score"], tab["reco_energy"], gh_cuts, operator.ge ) theta_cuts_opt = calculate_percentile_cut( gammas[gammas["selected_gh"]]["theta"], gammas[gammas["selected_gh"]]["reco_energy"], theta_bins, percentile=68, fill_value=MAX_THETA_CUT, max_value=MAX_THETA_CUT, min_value=MIN_THETA_CUT, ) gammas["selected_theta"] = evaluate_binned_cut( gammas["theta"], gammas["reco_energy"], theta_cuts_opt, operator.le ) gammas["selected"] = gammas["selected_theta"] & gammas["selected_gh"] # calculate sensitivity signal_hist = create_histogram_table( gammas[gammas["selected"]], bins=sensitivity_bins ) background_hist = estimate_background( background[background["selected_gh"]], reco_energy_bins=sensitivity_bins, theta_cuts=theta_cuts_opt, alpha=ALPHA, background_radius=MAX_BG_RADIUS, ) sensitivity = calculate_sensitivity(signal_hist, background_hist, alpha=ALPHA) # scale relative sensitivity by Crab flux to get the flux sensitivity spectrum = particles["gamma"]["target_spectrum"] for s in (sensitivity_step_2, sensitivity): s["flux_sensitivity"] = s["relative_sensitivity"] * spectrum( s["reco_energy_center"] ) log.info("Calculating IRFs") hdus = [ fits.PrimaryHDU(), fits.BinTableHDU(sensitivity, name="SENSITIVITY"), fits.BinTableHDU(sensitivity_step_2, name="SENSITIVITY_STEP_2"), fits.BinTableHDU(theta_cuts, name="THETA_CUTS"), fits.BinTableHDU(theta_cuts_opt, name="THETA_CUTS_OPT"), fits.BinTableHDU(gh_cuts, name="GH_CUTS"), ] masks = { "": gammas["selected"], "_NO_CUTS": slice(None), "_ONLY_GH": gammas["selected_gh"], "_ONLY_THETA": gammas["selected_theta"], } # binnings for the irfs true_energy_bins = add_overflow_bins( create_bins_per_decade(MIN_ENERGY, MAX_ENERGY, N_BIN_PER_DECADE) ) reco_energy_bins = add_overflow_bins( create_bins_per_decade(MIN_ENERGY, MAX_ENERGY, N_BIN_PER_DECADE) ) fov_offset_bins = [0, 0.6] * u.deg source_offset_bins = np.arange(0, 1 + 1e-4, 1e-3) * u.deg energy_migration_bins = np.geomspace(0.2, 5, 200) for label, mask in masks.items(): effective_area = effective_area_per_energy( gammas[mask], particles["gamma"]["simulation_info"], true_energy_bins=true_energy_bins, ) hdus.append( create_aeff2d_hdu( effective_area[..., np.newaxis], # add one dimension for FOV offset true_energy_bins, fov_offset_bins, extname="EFFECTIVE_AREA" + label, ) ) edisp = energy_dispersion( gammas[mask], true_energy_bins=true_energy_bins, fov_offset_bins=fov_offset_bins, migration_bins=energy_migration_bins, ) hdus.append( create_energy_dispersion_hdu( edisp, true_energy_bins=true_energy_bins, migration_bins=energy_migration_bins, fov_offset_bins=fov_offset_bins, extname="ENERGY_DISPERSION" + label, ) ) bias_resolution = energy_bias_resolution( gammas[gammas["selected"]], true_energy_bins, resolution_function=energy_resolution_absolute_68, ) ang_res = angular_resolution(gammas[gammas["selected_gh"]], true_energy_bins) psf = psf_table( gammas[gammas["selected_gh"]], true_energy_bins, fov_offset_bins=fov_offset_bins, source_offset_bins=source_offset_bins, ) background_rate = background_2d( background[background["selected_gh"]], reco_energy_bins, fov_offset_bins=np.arange(0, 11) * u.deg, t_obs=T_OBS, ) hdus.append( create_background_2d_hdu( background_rate, reco_energy_bins, fov_offset_bins=np.arange(0, 11) * u.deg ) ) hdus.append( create_psf_table_hdu(psf, true_energy_bins, source_offset_bins, fov_offset_bins) ) hdus.append( create_rad_max_hdu( theta_cuts_opt["cut"][:, np.newaxis], theta_bins, fov_offset_bins ) ) hdus.append(fits.BinTableHDU(ang_res, name="ANGULAR_RESOLUTION")) hdus.append(fits.BinTableHDU(bias_resolution, name="ENERGY_BIAS_RESOLUTION")) log.info("Writing output file") Path(args.outfile).parent.mkdir(exist_ok=True) fits.HDUList(hdus).writeto(args.outfile, overwrite=True)