Beispiel #1
0
def test_energy_bias_resolution():
    from pyirf.benchmarks import energy_bias_resolution_from_energy_dispersion
    from pyirf.binning import bin_center

    # create a toy energy dispersion
    n_migra_bins = 500
    true_bias = np.array([
        [0.5, 0.6],
        [0, 0.1],
        [-0.1, -0.2],
    ])
    true_resolution = np.array([
        [0.4, 0.5],
        [0.2, 0.3],
        [0.1, 0.15],
    ])

    n_energy_bins, n_fov_bins = true_bias.shape
    energy_bins = np.geomspace(10, 1000, n_energy_bins + 1)
    energy_center = bin_center(energy_bins)
    migra_bins = np.geomspace(0.2, 5, n_migra_bins + 1)

    cdf = np.empty((n_energy_bins, n_migra_bins + 1, n_fov_bins))
    for energy_bin, fov_bin in product(range(n_energy_bins),
                                       range(n_fov_bins)):

        energy = energy_center[energy_bin]
        mu = (1 + true_bias[energy_bin, fov_bin]) * energy
        sigma = true_resolution[energy_bin, fov_bin] * energy
        reco_energy = migra_bins * energy

        cdf[energy_bin, :, fov_bin] = norm.cdf(reco_energy, mu, sigma)

    edisp = cdf[:, 1:, :] - cdf[:, :-1, :]

    bias, resolution = energy_bias_resolution_from_energy_dispersion(
        edisp,
        migra_bins,
    )

    assert np.allclose(bias, true_bias, atol=0.01)
    assert np.allclose(resolution, true_resolution, atol=0.01)
Beispiel #2
0
def test_calculate_sensitivity():
    from pyirf.sensitivity import calculate_sensitivity
    from pyirf.binning import bin_center
    bins = [0.1, 1.0, 10] * u.TeV
    signal_hist = QTable({
        'reco_energy_low': bins[:-1],
        'reco_energy_high': bins[1:],
        'reco_energy_center': bin_center(bins),
        'n': [100, 100],
        'n_weighted': [100, 100],
    })
    bg_hist = signal_hist.copy()
    bg_hist['n'] = 20
    bg_hist['n_weighted'] = 50

    sensitivity = calculate_sensitivity(signal_hist, bg_hist, alpha=0.2)

    # too small sensitivity
    signal_hist['n_weighted'] = [10, 10]
    sensitivity = calculate_sensitivity(signal_hist, bg_hist, alpha=0.2)

    assert len(sensitivity) == len(signal_hist)
    assert np.all(np.isnan(sensitivity['relative_sensitivity']))
    assert np.all(sensitivity['failed_checks'] == 0b100)

    # not above 5 percent of remaining background
    signal_hist['n_weighted'] = 699
    bg_hist['n_weighted'] = 70_000
    sensitivity = calculate_sensitivity(signal_hist, bg_hist, alpha=0.2)
    assert len(sensitivity) == len(signal_hist)
    assert np.all(np.isnan(sensitivity['relative_sensitivity']))
    # too few signal
    assert np.all(sensitivity['failed_checks'] == 0b010)

    # less then 10 events
    signal_hist['n_weighted'] = [9, 9]
    bg_hist['n_weighted'] = [1, 1]
    sensitivity = calculate_sensitivity(signal_hist, bg_hist, alpha=0.2)
    assert len(sensitivity) == len(signal_hist)
    assert np.all(np.isnan(sensitivity['relative_sensitivity']))
    # too few signal
    assert np.all(sensitivity['failed_checks'] == 0b001)
Beispiel #3
0
def test_calculate_sensitivity():
    from pyirf.sensitivity import calculate_sensitivity
    from pyirf.binning import bin_center
    bins = [0.1, 1.0, 10] * u.TeV
    signal_hist = QTable({
        'reco_energy_low': bins[:-1],
        'reco_energy_high': bins[1:],
        'reco_energy_center': bin_center(bins),
        'n': [100, 100],
        'n_weighted': [100, 100],
    })
    bg_hist = signal_hist.copy()
    bg_hist['n'] = 20
    bg_hist['n_weighted'] = 50

    sensitivity = calculate_sensitivity(signal_hist, bg_hist, alpha=0.2)

    # too small sensitivity
    signal_hist['n_weighted'] = [10, 10]
    sensitivity = calculate_sensitivity(signal_hist, bg_hist, alpha=0.2)

    assert len(sensitivity) == len(signal_hist)
    assert np.all(np.isnan(sensitivity['relative_sensitivity']))

    # not above 5 percent of remaining background
    signal_hist['n_weighted'] = 699
    bg_hist['n_weighted'] = 70_000
    sensitivity = calculate_sensitivity(signal_hist, bg_hist, alpha=0.2)
    assert len(sensitivity) == len(signal_hist)
    # we scale up the signal until we meet the requirement, so
    # we must have more than 5 sigma
    assert np.all(sensitivity['significance'] >= 5)

    # less then 10 events
    signal_hist['n_weighted'] = [9, 9]
    bg_hist['n_weighted'] = [1, 1]
    sensitivity = calculate_sensitivity(signal_hist, bg_hist, alpha=0.2)
    # we scale up the signal until we meet the requirement, so
    # we must have more than 5 sigma
    assert np.all(sensitivity['significance'] >= 5)
def main():
    logging.basicConfig(level=logging.INFO)
    logging.getLogger("pyirf").setLevel(logging.DEBUG)

    for particle_type, p in particles.items():
        log.info(f"Simulated {particle_type.title()} Events:")
        p["events"], p["simulation_info"] = read_eventdisplay_fits(p["file"])
        p["events"]["particle_type"] = particle_type

        p["simulated_spectrum"] = PowerLaw.from_simulation(
            p["simulation_info"], T_OBS)
        p["events"]["weight"] = calculate_event_weights(
            p["events"]["true_energy"], p["target_spectrum"],
            p["simulated_spectrum"])
        for prefix in ('true', 'reco'):
            k = f"{prefix}_source_fov_offset"
            p["events"][k] = calculate_source_fov_offset(p["events"],
                                                         prefix=prefix)

        # calculate theta / distance between reco and assuemd source positoin
        # we handle only ON observations here, so the assumed source pos
        # is the pointing position
        p["events"]["theta"] = calculate_theta(
            p["events"],
            assumed_source_az=p["events"]["pointing_az"],
            assumed_source_alt=p["events"]["pointing_alt"],
        )
        log.info(p["simulation_info"])
        log.info("")

    gammas = particles["gamma"]["events"]
    # background table composed of both electrons and protons
    background = table.vstack(
        [particles["proton"]["events"], particles["electron"]["events"]])

    INITIAL_GH_CUT = np.quantile(gammas['gh_score'],
                                 (1 - INITIAL_GH_CUT_EFFICENCY))
    log.info(
        f"Using fixed G/H cut of {INITIAL_GH_CUT} to calculate theta cuts")

    # event display uses much finer bins for the theta cut than
    # for the sensitivity
    theta_bins = add_overflow_bins(
        create_bins_per_decade(10**(-1.9) * u.TeV, 10**2.3005 * u.TeV, 50))
    # same bins as event display uses
    sensitivity_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.9 * u.TeV,
                               10**2.31 * u.TeV,
                               bins_per_decade=5))

    # theta cut is 68 percent containmente of the gammas
    # for now with a fixed global, unoptimized score cut
    # the cut is calculated in the same bins as the sensitivity,
    # but then interpolated to 10x the resolution.
    mask_theta_cuts = gammas["gh_score"] >= INITIAL_GH_CUT
    theta_cuts_coarse = calculate_percentile_cut(
        gammas["theta"][mask_theta_cuts],
        gammas["reco_energy"][mask_theta_cuts],
        bins=sensitivity_bins,
        min_value=0.05 * u.deg,
        fill_value=0.32 * u.deg,
        max_value=0.32 * u.deg,
        percentile=68,
    )

    # interpolate to 50 bins per decade
    theta_center = bin_center(theta_bins)
    inter_center = bin_center(sensitivity_bins)
    theta_cuts = table.QTable({
        "low":
        theta_bins[:-1],
        "high":
        theta_bins[1:],
        "center":
        theta_center,
        "cut":
        np.interp(np.log10(theta_center / u.TeV),
                  np.log10(inter_center / u.TeV), theta_cuts_coarse['cut']),
    })

    log.info("Optimizing G/H separation cut for best sensitivity")
    gh_cut_efficiencies = np.arange(
        GH_CUT_EFFICIENCY_STEP,
        MAX_GH_CUT_EFFICIENCY + GH_CUT_EFFICIENCY_STEP / 2,
        GH_CUT_EFFICIENCY_STEP)
    sensitivity, gh_cuts = optimize_gh_cut(
        gammas,
        background,
        reco_energy_bins=sensitivity_bins,
        gh_cut_efficiencies=gh_cut_efficiencies,
        op=operator.ge,
        theta_cuts=theta_cuts,
        alpha=ALPHA,
        background_radius=MAX_BG_RADIUS,
    )

    # now that we have the optimized gh cuts, we recalculate the theta
    # cut as 68 percent containment on the events surviving these cuts.
    log.info('Recalculating theta cut for optimized GH Cuts')
    for tab in (gammas, background):
        tab["selected_gh"] = evaluate_binned_cut(tab["gh_score"],
                                                 tab["reco_energy"], gh_cuts,
                                                 operator.ge)

    gammas["selected_theta"] = evaluate_binned_cut(gammas["theta"],
                                                   gammas["reco_energy"],
                                                   theta_cuts, operator.le)
    gammas["selected"] = gammas["selected_theta"] & gammas["selected_gh"]

    # scale relative sensitivity by Crab flux to get the flux sensitivity
    spectrum = particles['gamma']['target_spectrum']
    sensitivity["flux_sensitivity"] = (
        sensitivity["relative_sensitivity"] *
        spectrum(sensitivity['reco_energy_center']))

    log.info('Calculating IRFs')
    hdus = [
        fits.PrimaryHDU(),
        fits.BinTableHDU(sensitivity, name="SENSITIVITY"),
        fits.BinTableHDU(theta_cuts, name="THETA_CUTS"),
        fits.BinTableHDU(gh_cuts, name="GH_CUTS"),
    ]

    masks = {
        "": gammas["selected"],
        "_NO_CUTS": slice(None),
        "_ONLY_GH": gammas["selected_gh"],
        "_ONLY_THETA": gammas["selected_theta"],
    }

    # binnings for the irfs
    true_energy_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.9 * u.TeV, 10**2.31 * u.TeV, 10))
    reco_energy_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.9 * u.TeV, 10**2.31 * u.TeV, 5))
    fov_offset_bins = [0, 0.5] * u.deg
    source_offset_bins = np.arange(0, 1 + 1e-4, 1e-3) * u.deg
    energy_migration_bins = np.geomspace(0.2, 5, 200)

    for label, mask in masks.items():
        effective_area = effective_area_per_energy(
            gammas[mask],
            particles["gamma"]["simulation_info"],
            true_energy_bins=true_energy_bins,
        )
        hdus.append(
            create_aeff2d_hdu(
                effective_area[...,
                               np.newaxis],  # add one dimension for FOV offset
                true_energy_bins,
                fov_offset_bins,
                extname="EFFECTIVE_AREA" + label,
            ))
        edisp = energy_dispersion(
            gammas[mask],
            true_energy_bins=true_energy_bins,
            fov_offset_bins=fov_offset_bins,
            migration_bins=energy_migration_bins,
        )
        hdus.append(
            create_energy_dispersion_hdu(
                edisp,
                true_energy_bins=true_energy_bins,
                migration_bins=energy_migration_bins,
                fov_offset_bins=fov_offset_bins,
                extname="ENERGY_DISPERSION" + label,
            ))

    bias_resolution = energy_bias_resolution(gammas[gammas["selected"]],
                                             reco_energy_bins,
                                             energy_type="reco")
    ang_res = angular_resolution(gammas[gammas["selected_gh"]],
                                 reco_energy_bins,
                                 energy_type="reco")
    psf = psf_table(
        gammas[gammas["selected_gh"]],
        true_energy_bins,
        fov_offset_bins=fov_offset_bins,
        source_offset_bins=source_offset_bins,
    )

    background_rate = background_2d(
        background[background['selected_gh']],
        reco_energy_bins,
        fov_offset_bins=np.arange(0, 11) * u.deg,
        t_obs=T_OBS,
    )

    hdus.append(
        create_background_2d_hdu(
            background_rate,
            reco_energy_bins,
            fov_offset_bins=np.arange(0, 11) * u.deg,
        ))
    hdus.append(
        create_psf_table_hdu(
            psf,
            true_energy_bins,
            source_offset_bins,
            fov_offset_bins,
        ))
    hdus.append(
        create_rad_max_hdu(theta_cuts["cut"][:, np.newaxis], theta_bins,
                           fov_offset_bins))
    hdus.append(fits.BinTableHDU(ang_res, name="ANGULAR_RESOLUTION"))
    hdus.append(
        fits.BinTableHDU(bias_resolution, name="ENERGY_BIAS_RESOLUTION"))

    log.info('Writing outputfile')
    fits.HDUList(hdus).writeto("pyirf_eventdisplay.fits.gz", overwrite=True)