コード例 #1
0
def load_mc(path, obstime, spectrum):
    sim_info = read_sim_info(path)
    data = read_mc_dl1(path)
    try:
        dl2 = read_mc_dl2(path)
        data = join(data,
                    dl2,
                    keys=["obs_id", "event_id"],
                    table_names=["dl1", "dl2"])
        for c in list(data.columns):
            if c.endswith("dl1"):
                data.rename_column(c, re.sub("_dl1", "", c))
            elif c.endswith("dl2"):
                del data[c]
            continue
    except:
        log.info("Only using dl1 data")
    data["weights"] = calculate_event_weights(
        data["true_energy"],
        spectrum,
        PowerLaw.from_simulation(sim_info, obstime),
    )
    log.info(f"Loading of {path} finished")
    log.info(f"{len(data)} events")
    return data
コード例 #2
0
def load_mc(path, obstime, spectrum):
    data, sim_info = read_mc_dl2_to_QTable(path)
    log.info(f"Loading of {path} finished")
    log.info(f"{len(data)} events")
    log.info(f"Weighting from {sim_info.n_showers} showers to: {obstime.to(u.min):.2f}")
    data["weights"] = calculate_event_weights(
        data["true_energy"],
        spectrum,
        PowerLaw.from_simulation(sim_info, obstime),
    )
    return data
コード例 #3
0
    def start(self):

        for particle_type, p in self.mc_particle.items():
            self.log.info(f"Simulated {particle_type.title()} Events:")
            p["events"], p["simulation_info"] = read_mc_dl2_to_QTable(
                p["file"])

            p["mc_type"] = check_mc_type(p["file"])

            self.log.debug(
                f"Simulated {p['mc_type']} {particle_type.title()} Events:")

            # Calculating event weights for Background IRF
            if particle_type != "gamma":
                p["simulated_spectrum"] = PowerLaw.from_simulation(
                    p["simulation_info"], self.t_obs)

                p["events"]["weight"] = calculate_event_weights(
                    p["events"]["true_energy"],
                    p["target_spectrum"],
                    p["simulated_spectrum"],
                )

            if not self.source_dep:
                for prefix in ("true", "reco"):
                    k = f"{prefix}_source_fov_offset"
                    p["events"][k] = calculate_source_fov_offset(p["events"],
                                                                 prefix=prefix)

                # calculate theta / distance between reco and assumed source position
                p["events"]["theta"] = calculate_theta(
                    p["events"],
                    assumed_source_az=p["events"]["true_az"],
                    assumed_source_alt=p["events"]["true_alt"],
                )

            else:
                # Alpha cut is applied for source-dependent analysis.
                # To adapt source-dependent analysis to pyirf codes,
                # true position is set as reco position for survived events
                # after alpha cut
                p["events"][
                    "true_source_fov_offset"] = calculate_source_fov_offset(
                        p["events"], prefix="true")
                p["events"]["reco_source_fov_offset"] = p["events"][
                    "true_source_fov_offset"]

        self.log.debug(p["simulation_info"])
        gammas = self.mc_particle["gamma"]["events"]

        # Binning of parameters used in IRFs
        true_energy_bins = self.data_bin.true_energy_bins()
        reco_energy_bins = self.data_bin.reco_energy_bins()
        migration_bins = self.data_bin.energy_migration_bins()
        source_offset_bins = self.data_bin.source_offset_bins()

        gammas = self.event_sel.filter_cut(gammas)
        gammas = self.cuts.allowed_tels_filter(gammas)

        if self.energy_dependent_gh:
            self.gh_cuts_gamma = self.cuts.energy_dependent_gh_cuts(
                gammas, reco_energy_bins)
            gammas = self.cuts.apply_energy_dependent_gh_cuts(
                gammas, self.gh_cuts_gamma)
            self.log.info(
                f"Using gamma efficiency of {self.cuts.gh_efficiency}")
        else:
            gammas = self.cuts.apply_global_gh_cut(gammas)
            self.log.info("Using a global gammaness cut of "
                          f"{self.cuts.global_gh_cut}")

        if self.point_like:
            if not self.source_dep:
                if self.energy_dependent_theta:
                    self.theta_cuts = self.cuts.energy_dependent_theta_cuts(
                        gammas,
                        reco_energy_bins,
                    )
                    gammas = self.cuts.apply_energy_dependent_theta_cuts(
                        gammas, self.theta_cuts)
                    self.log.info("Using a containment region for theta of "
                                  f"{self.cuts.theta_containment}")
                else:
                    gammas = self.cuts.apply_global_theta_cut(gammas)
                    self.log.info(
                        "Using a global Theta cut of "
                        f"{self.cuts.global_theta_cut} for point-like IRF")
            else:
                if self.energy_dependent_alpha:
                    self.alpha_cuts = self.cuts.energy_dependent_alpha_cuts(
                        gammas,
                        reco_energy_bins,
                    )
                    gammas = self.cuts.apply_energy_dependent_alpha_cuts(
                        gammas, self.alpha_cuts)
                    self.log.info("Using a containment region for alpha of "
                                  f"{self.cuts.alpha_containment} %")
                else:
                    gammas = self.cuts.apply_global_alpha_cut(gammas)
                    self.log.info(
                        'Using a global Alpha cut of '
                        f'{self.cuts.global_alpha_cut} for point like IRF')

        if self.mc_particle["gamma"]["mc_type"] in [
                "point_like", "ring_wobble"
        ]:
            mean_fov_offset = round(
                gammas["true_source_fov_offset"].mean().to_value(), 1)
            fov_offset_bins = [mean_fov_offset - 0.1, mean_fov_offset + 0.1
                               ] * u.deg
            self.log.info('Single offset for point like gamma MC')
        else:
            fov_offset_bins = self.data_bin.fov_offset_bins()
            self.log.info('Multiple offset for diffuse gamma MC')

            if self.energy_dependent_theta:
                fov_offset_bins = [
                    round(gammas["true_source_fov_offset"].min().to_value(),
                          1),
                    round(gammas["true_source_fov_offset"].max().to_value(), 1)
                ] * u.deg
                self.log.info("For RAD MAX, the full FoV is used")

        if not self.only_gamma_irf:
            background = table.vstack([
                self.mc_particle["proton"]["events"],
                self.mc_particle["electron"]["events"]
            ])

            if self.energy_dependent_gh:
                background = self.cuts.apply_energy_dependent_gh_cuts(
                    background, self.gh_cuts_gamma)
            else:
                background = self.cuts.apply_global_gh_cut(background)

            background = self.event_sel.filter_cut(background)
            background = self.cuts.allowed_tels_filter(background)

            background_offset_bins = self.data_bin.bkg_fov_offset_bins()

        # For a global gh/theta cut, only a header value is added.
        # For energy-dependent cuts, along with GADF specified RAD_MAX HDU,
        # a new HDU is created, GH_CUTS which is based on RAD_MAX table

        # NOTE: The GH_CUTS HDU is just for provenance and is not supported
        # by GADF or used by any Science Tools
        extra_headers = {
            "TELESCOP": "CTA-N",
            "INSTRUME": "LST-" + " ".join(map(str, self.cuts.allowed_tels)),
            "FOVALIGN": "RADEC",
        }
        if self.point_like:
            self.log.info("Generating point_like IRF HDUs")
        else:
            self.log.info("Generating Full-Enclosure IRF HDUs")

        # Updating the HDU headers with the gammaness and theta cuts/efficiency
        if not self.energy_dependent_gh:
            extra_headers["GH_CUT"] = self.cuts.global_gh_cut

        else:
            extra_headers["GH_EFF"] = (self.cuts.gh_efficiency,
                                       "gamma/hadron efficiency")

        if self.point_like:
            if not self.source_dep:
                if self.energy_dependent_theta:
                    extra_headers["TH_CONT"] = (
                        self.cuts.theta_containment,
                        "Theta containment region in percentage")
                else:
                    extra_headers["RAD_MAX"] = (self.cuts.global_theta_cut,
                                                'deg')
            else:
                if self.energy_dependent_alpha:
                    extra_headers["AL_CONT"] = (
                        self.cuts.alpha_containment,
                        "Alpha containment region in percentage")
                else:
                    extra_headers["AL_CUT"] = (self.cuts.global_alpha_cut,
                                               'deg')

        # Write HDUs
        self.hdus = [
            fits.PrimaryHDU(),
        ]

        with np.errstate(invalid="ignore", divide="ignore"):
            if self.mc_particle["gamma"]["mc_type"] in [
                    "point_like", "ring_wobble"
            ]:
                self.effective_area = effective_area_per_energy(
                    gammas,
                    self.mc_particle["gamma"]["simulation_info"],
                    true_energy_bins,
                )
                self.hdus.append(
                    create_aeff2d_hdu(
                        # add one dimension for single FOV offset
                        self.effective_area[..., np.newaxis],
                        true_energy_bins,
                        fov_offset_bins,
                        point_like=self.point_like,
                        extname="EFFECTIVE AREA",
                        **extra_headers,
                    ))
            else:
                self.effective_area = effective_area_per_energy_and_fov(
                    gammas,
                    self.mc_particle["gamma"]["simulation_info"],
                    true_energy_bins,
                    fov_offset_bins,
                )
                self.hdus.append(
                    create_aeff2d_hdu(
                        self.effective_area,
                        true_energy_bins,
                        fov_offset_bins,
                        point_like=self.point_like,
                        extname="EFFECTIVE AREA",
                        **extra_headers,
                    ))

        self.log.info("Effective Area HDU created")
        self.edisp = energy_dispersion(
            gammas,
            true_energy_bins,
            fov_offset_bins,
            migration_bins,
        )
        self.hdus.append(
            create_energy_dispersion_hdu(
                self.edisp,
                true_energy_bins,
                migration_bins,
                fov_offset_bins,
                point_like=self.point_like,
                extname="ENERGY DISPERSION",
                **extra_headers,
            ))
        self.log.info("Energy Dispersion HDU created")

        if not self.only_gamma_irf:
            self.background = background_2d(
                background,
                reco_energy_bins=reco_energy_bins,
                fov_offset_bins=background_offset_bins,
                t_obs=self.t_obs,
            )
            self.hdus.append(
                create_background_2d_hdu(
                    self.background.T,
                    reco_energy_bins,
                    background_offset_bins,
                    extname="BACKGROUND",
                    **extra_headers,
                ))
            self.log.info("Background HDU created")

        if not self.point_like:
            self.psf = psf_table(
                gammas,
                true_energy_bins,
                fov_offset_bins=fov_offset_bins,
                source_offset_bins=source_offset_bins,
            )
            self.hdus.append(
                create_psf_table_hdu(
                    self.psf,
                    true_energy_bins,
                    source_offset_bins,
                    fov_offset_bins,
                    extname="PSF",
                    **extra_headers,
                ))
            self.log.info("PSF HDU created")

        if self.energy_dependent_gh:
            # Create a separate temporary header
            gh_header = fits.Header()
            gh_header["CREATOR"] = f"lstchain v{__version__}"
            gh_header["DATE"] = Time.now().utc.iso

            for k, v in extra_headers.items():
                gh_header[k] = v

            self.hdus.append(
                fits.BinTableHDU(self.gh_cuts_gamma,
                                 header=gh_header,
                                 name="GH_CUTS"))
            self.log.info("GH CUTS HDU added")

        if self.energy_dependent_theta and self.point_like:
            if not self.source_dep:
                self.hdus.append(
                    create_rad_max_hdu(self.theta_cuts["cut"][:, np.newaxis],
                                       reco_energy_bins, fov_offset_bins,
                                       **extra_headers))
                self.log.info("RAD MAX HDU added")

        if self.energy_dependent_alpha and self.source_dep:
            # Create a separate temporary header
            alpha_header = fits.Header()
            alpha_header["CREATOR"] = f"lstchain v{__version__}"
            alpha_header["DATE"] = Time.now().utc.iso

            for k, v in extra_headers.items():
                alpha_header[k] = v

            self.hdus.append(
                fits.BinTableHDU(self.alpha_cuts,
                                 header=gh_header,
                                 name="AL_CUTS"))
            self.log.info("ALPHA CUTS HDU added")
コード例 #4
0
def main():

    # Read arguments
    parser = argparse.ArgumentParser(description='Make performance files')
    parser.add_argument('--config_file', type=str, required=True, help='')

    mode_group = parser.add_mutually_exclusive_group()
    mode_group.add_argument('--wave',
                            dest="mode",
                            action='store_const',
                            const="wave",
                            default="tail",
                            help="if set, use wavelet cleaning")
    mode_group.add_argument('--tail',
                            dest="mode",
                            action='store_const',
                            const="tail",
                            help="if set, use tail cleaning (default)")

    args = parser.parse_args()

    # Read configuration file
    cfg = load_config(args.config_file)

    # Create output directory if necessary
    outdir = os.path.join(
        cfg['general']['outdir'],
        'performance_protopipe_{}_CTA{}_{}_Zd{}_{}_Time{:.2f}{}'.format(
            cfg['general']['prod'], cfg['general']['site'],
            cfg['general']['array'], cfg['general']['zenith'],
            cfg['general']['azimuth'], cfg['analysis']['obs_time']['value'],
            cfg['analysis']['obs_time']['unit']),
    )

    indir = cfg['general']['indir']
    template_input_file = cfg['general']['template_input_file']

    T_OBS = cfg['analysis']['obs_time']['value'] * u.Unit(
        cfg['analysis']['obs_time']['unit'])

    # scaling between on and off region.
    # Make off region 5 times larger than on region for better
    # background statistics
    ALPHA = cfg['analysis']['alpha']
    # Radius to use for calculating bg rate
    MAX_BG_RADIUS = cfg['analysis']['max_bg_radius'] * u.deg

    particles = {
        "gamma": {
            "file":
            os.path.join(indir, template_input_file.format(args.mode,
                                                           "gamma")),
            "target_spectrum":
            CRAB_HEGRA,
            "run_header":
            cfg['particle_information']['gamma']
        },
        "proton": {
            "file":
            os.path.join(indir,
                         template_input_file.format(args.mode, "proton")),
            "target_spectrum":
            IRFDOC_PROTON_SPECTRUM,
            "run_header":
            cfg['particle_information']['proton']
        },
        "electron": {
            "file":
            os.path.join(indir,
                         template_input_file.format(args.mode, "electron")),
            "target_spectrum":
            IRFDOC_ELECTRON_SPECTRUM,
            "run_header":
            cfg['particle_information']['electron']
        },
    }

    logging.basicConfig(level=logging.INFO)
    logging.getLogger("pyirf").setLevel(logging.DEBUG)

    for particle_type, p in particles.items():
        log.info(f"Simulated {particle_type.title()} Events:")
        p["events"], p["simulation_info"] = read_DL2_pyirf(
            p["file"], p["run_header"])

        # Multiplicity cut
        p["events"] = p["events"][
            p["events"]["multiplicity"] >= cfg['analysis']
            ['cut_on_multiplicity']].copy()

        p["simulated_spectrum"] = PowerLaw.from_simulation(
            p["simulation_info"], T_OBS)
        # Weight events
        p["events"]["weight"] = calculate_event_weights(
            p["events"]["true_energy"], p["target_spectrum"],
            p["simulated_spectrum"])

        for prefix in ('true', 'reco'):
            k = f"{prefix}_source_fov_offset"
            p["events"][k] = calculate_source_fov_offset(p["events"],
                                                         prefix=prefix)

        # calculate theta / distance between reco and assuemd source positoin
        # we handle only ON observations here, so the assumed source pos
        # is the pointing position
        p["events"]["theta"] = calculate_theta(
            p["events"],
            assumed_source_az=p["events"]["pointing_az"],
            assumed_source_alt=p["events"]["pointing_alt"],
        )
        log.info(p["simulation_info"])
        log.info("")

    gammas = particles["gamma"]["events"]
    # background table composed of both electrons and protons
    background = table.vstack(
        [particles["proton"]["events"], particles["electron"]["events"]])

    MAX_GH_CUT_EFFICIENCY = 0.8
    GH_CUT_EFFICIENCY_STEP = 0.01

    # gh cut used for first calculation of the binned theta cuts
    INITIAL_GH_CUT_EFFICENCY = 0.4

    INITIAL_GH_CUT = np.quantile(gammas['gh_score'],
                                 (1 - INITIAL_GH_CUT_EFFICENCY))
    log.info(
        f"Using fixed G/H cut of {INITIAL_GH_CUT} to calculate theta cuts")

    # event display uses much finer bins for the theta cut than
    # for the sensitivity
    theta_bins = add_overflow_bins(
        create_bins_per_decade(
            10**(-1.9) * u.TeV,
            10**2.3005 * u.TeV,
            50,
        ))

    # theta cut is 68 percent containmente of the gammas
    # for now with a fixed global, unoptimized score cut
    mask_theta_cuts = gammas["gh_score"] >= INITIAL_GH_CUT
    theta_cuts = calculate_percentile_cut(
        gammas["theta"][mask_theta_cuts],
        gammas["reco_energy"][mask_theta_cuts],
        bins=theta_bins,
        min_value=0.05 * u.deg,
        fill_value=0.32 * u.deg,
        max_value=0.32 * u.deg,
        percentile=68,
    )

    # same bins as event display uses
    sensitivity_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.9 * u.TeV,
                               10**2.31 * u.TeV,
                               bins_per_decade=5))

    log.info("Optimizing G/H separation cut for best sensitivity")
    gh_cut_efficiencies = np.arange(
        GH_CUT_EFFICIENCY_STEP,
        MAX_GH_CUT_EFFICIENCY + GH_CUT_EFFICIENCY_STEP / 2,
        GH_CUT_EFFICIENCY_STEP)
    sensitivity_step_2, gh_cuts = optimize_gh_cut(
        gammas,
        background,
        reco_energy_bins=sensitivity_bins,
        gh_cut_efficiencies=gh_cut_efficiencies,
        op=operator.ge,
        theta_cuts=theta_cuts,
        alpha=ALPHA,
        background_radius=MAX_BG_RADIUS,
    )

    # now that we have the optimized gh cuts, we recalculate the theta
    # cut as 68 percent containment on the events surviving these cuts.
    log.info('Recalculating theta cut for optimized GH Cuts')
    for tab in (gammas, background):
        tab["selected_gh"] = evaluate_binned_cut(tab["gh_score"],
                                                 tab["reco_energy"], gh_cuts,
                                                 operator.ge)

    theta_cuts_opt = calculate_percentile_cut(
        gammas[gammas['selected_gh']]["theta"],
        gammas[gammas['selected_gh']]["reco_energy"],
        theta_bins,
        percentile=68,
        fill_value=0.32 * u.deg,
        max_value=0.32 * u.deg,
        min_value=0.05 * u.deg,
    )

    gammas["selected_theta"] = evaluate_binned_cut(gammas["theta"],
                                                   gammas["reco_energy"],
                                                   theta_cuts_opt, operator.le)
    gammas["selected"] = gammas["selected_theta"] & gammas["selected_gh"]

    # calculate sensitivity
    signal_hist = create_histogram_table(gammas[gammas["selected"]],
                                         bins=sensitivity_bins)
    background_hist = estimate_background(
        background[background["selected_gh"]],
        reco_energy_bins=sensitivity_bins,
        theta_cuts=theta_cuts_opt,
        alpha=ALPHA,
        background_radius=MAX_BG_RADIUS,
    )
    sensitivity = calculate_sensitivity(signal_hist,
                                        background_hist,
                                        alpha=ALPHA)

    # scale relative sensitivity by Crab flux to get the flux sensitivity
    spectrum = particles['gamma']['target_spectrum']
    for s in (sensitivity_step_2, sensitivity):
        s["flux_sensitivity"] = (s["relative_sensitivity"] *
                                 spectrum(s['reco_energy_center']))

    log.info('Calculating IRFs')
    hdus = [
        fits.PrimaryHDU(),
        fits.BinTableHDU(sensitivity, name="SENSITIVITY"),
        fits.BinTableHDU(sensitivity_step_2, name="SENSITIVITY_STEP_2"),
        fits.BinTableHDU(theta_cuts, name="THETA_CUTS"),
        fits.BinTableHDU(theta_cuts_opt, name="THETA_CUTS_OPT"),
        fits.BinTableHDU(gh_cuts, name="GH_CUTS"),
    ]

    masks = {
        "": gammas["selected"],
        "_NO_CUTS": slice(None),
        "_ONLY_GH": gammas["selected_gh"],
        "_ONLY_THETA": gammas["selected_theta"],
    }

    # binnings for the irfs
    true_energy_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.9 * u.TeV, 10**2.31 * u.TeV, 10))
    reco_energy_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.9 * u.TeV, 10**2.31 * u.TeV, 5))
    fov_offset_bins = [0, 0.5] * u.deg
    source_offset_bins = np.arange(0, 1 + 1e-4, 1e-3) * u.deg
    energy_migration_bins = np.geomspace(0.2, 5, 200)

    for label, mask in masks.items():
        effective_area = effective_area_per_energy(
            gammas[mask],
            particles["gamma"]["simulation_info"],
            true_energy_bins=true_energy_bins,
        )
        hdus.append(
            create_aeff2d_hdu(
                effective_area[..., np.newaxis],  # +1 dimension for FOV offset
                true_energy_bins,
                fov_offset_bins,
                extname="EFFECTIVE_AREA" + label,
            ))
        edisp = energy_dispersion(
            gammas[mask],
            true_energy_bins=true_energy_bins,
            fov_offset_bins=fov_offset_bins,
            migration_bins=energy_migration_bins,
        )
        hdus.append(
            create_energy_dispersion_hdu(
                edisp,
                true_energy_bins=true_energy_bins,
                migration_bins=energy_migration_bins,
                fov_offset_bins=fov_offset_bins,
                extname="ENERGY_DISPERSION" + label,
            ))

    # Here we use reconstructed energy instead of true energy for the sake of
    # current pipelines comparisons
    bias_resolution = energy_bias_resolution(gammas[gammas["selected"]],
                                             reco_energy_bins,
                                             energy_type="reco")

    # Here we use reconstructed energy instead of true energy for the sake of
    # current pipelines comparisons
    ang_res = angular_resolution(gammas[gammas["selected_gh"]],
                                 reco_energy_bins,
                                 energy_type="reco")

    psf = psf_table(
        gammas[gammas["selected_gh"]],
        true_energy_bins,
        fov_offset_bins=fov_offset_bins,
        source_offset_bins=source_offset_bins,
    )

    background_rate = background_2d(
        background[background['selected_gh']],
        reco_energy_bins,
        fov_offset_bins=np.arange(0, 11) * u.deg,
        t_obs=T_OBS,
    )

    hdus.append(
        create_background_2d_hdu(
            background_rate,
            reco_energy_bins,
            fov_offset_bins=np.arange(0, 11) * u.deg,
        ))
    hdus.append(
        create_psf_table_hdu(
            psf,
            true_energy_bins,
            source_offset_bins,
            fov_offset_bins,
        ))
    hdus.append(
        create_rad_max_hdu(theta_cuts_opt["cut"][:, np.newaxis], theta_bins,
                           fov_offset_bins))
    hdus.append(fits.BinTableHDU(ang_res, name="ANGULAR_RESOLUTION"))
    hdus.append(
        fits.BinTableHDU(bias_resolution, name="ENERGY_BIAS_RESOLUTION"))

    log.info('Writing outputfile')
    fits.HDUList(hdus).writeto(outdir + '.fits.gz', overwrite=True)
コード例 #5
0
    def start(self):

        for particle_type, p in self.mc_particle.items():
            self.log.info(f"Simulated {particle_type.title()} Events:")
            p["events"], p["simulation_info"] = read_mc_dl2_to_QTable(p["file"])

            if p["simulation_info"].viewcone.value == 0.0:
                p["mc_type"] = "point_like"
            else:
                p["mc_type"] = "diffuse"

            self.log.debug(f"Simulated {p['mc_type']} {particle_type.title()} Events:")

            # Calculating event weights for Background IRF
            if particle_type != "gamma":
                p["simulated_spectrum"] = PowerLaw.from_simulation(
                    p["simulation_info"], self.t_obs
                )

                p["events"]["weight"] = calculate_event_weights(
                    p["events"]["true_energy"],
                    p["target_spectrum"],
                    p["simulated_spectrum"],
                )

            for prefix in ("true", "reco"):
                k = f"{prefix}_source_fov_offset"
                p["events"][k] = calculate_source_fov_offset(p["events"], prefix=prefix)
            # calculate theta / distance between reco and assumed source position
            p["events"]["theta"] = calculate_theta(
                p["events"],
                assumed_source_az=p["events"]["true_az"],
                assumed_source_alt=p["events"]["true_alt"],
            )
            self.log.debug(p["simulation_info"])

        gammas = self.mc_particle["gamma"]["events"]

        self.log.info(f"Using fixed G/H cut of {self.fixed_cuts.fixed_gh_cut}")

        gammas = self.event_sel.filter_cut(gammas)
        gammas = self.fixed_cuts.allowed_tels_filter(gammas)
        gammas = self.fixed_cuts.gh_cut(gammas)

        if self.point_like:
            gammas = self.fixed_cuts.theta_cut(gammas)
            self.log.info('Theta cuts applied for point like IRF')

        # Binning of parameters used in IRFs
        true_energy_bins = self.data_bin.true_energy_bins()
        reco_energy_bins = self.data_bin.reco_energy_bins()
        migration_bins = self.data_bin.energy_migration_bins()
        source_offset_bins = self.data_bin.source_offset_bins()

        if self.mc_particle["gamma"]["mc_type"] == "point_like":
            mean_fov_offset = round(gammas["true_source_fov_offset"].mean().to_value(), 1)
            fov_offset_bins = [mean_fov_offset - 0.1, mean_fov_offset + 0.1] * u.deg
            self.log.info('Single offset for point like gamma MC')
        else:
            fov_offset_bins = self.data_bin.fov_offset_bins()
            self.log.info('Multiple offset for diffuse gamma MC')

        if not self.only_gamma_irf:
            background = table.vstack(
                [
                    self.mc_particle["proton"]["events"],
                    self.mc_particle["electron"]["events"],
                ]
            )

            background = self.event_sel.filter_cut(background)
            background = self.fixed_cuts.allowed_tels_filter(background)
            background = self.fixed_cuts.gh_cut(background)

            background_offset_bins = self.data_bin.bkg_fov_offset_bins()

        # For a fixed gh/theta cut, only a header value is added.
        # For energy dependent cuts, a new HDU should be created
        # GH_CUT and FOV_CUT are temporary non-standard header data
        extra_headers = {
            "TELESCOP": "CTA-N",
            "INSTRUME": "LST-" + " ".join(map(str, self.fixed_cuts.allowed_tels)),
            "FOVALIGN": "RADEC",
            "GH_CUT": self.fixed_cuts.fixed_gh_cut,
        }
        if self.point_like:
            self.log.info("Generating point_like IRF HDUs")
            extra_headers["RAD_MAX"] = str(self.fixed_cuts.fixed_theta_cut * u.deg)
        else:
            self.log.info("Generating Full-Enclosure IRF HDUs")

        # Write HDUs
        self.hdus = [fits.PrimaryHDU(), ]

        with np.errstate(invalid="ignore", divide="ignore"):
            if self.mc_particle["gamma"]["mc_type"] == "point_like":
                self.effective_area = effective_area_per_energy(
                    gammas,
                    self.mc_particle["gamma"]["simulation_info"],
                    true_energy_bins,
                )
                self.hdus.append(
                    create_aeff2d_hdu(
                        # add one dimension for single FOV offset
                        self.effective_area[..., np.newaxis],
                        true_energy_bins,
                        fov_offset_bins,
                        point_like=self.point_like,
                        extname="EFFECTIVE AREA",
                        **extra_headers,
                    )
                )
            else:
                self.effective_area = effective_area_per_energy_and_fov(
                    gammas,
                    self.mc_particle["gamma"]["simulation_info"],
                    true_energy_bins,
                    fov_offset_bins,
                )
                self.hdus.append(
                    create_aeff2d_hdu(
                        self.effective_area,
                        true_energy_bins,
                        fov_offset_bins,
                        point_like=self.point_like,
                        extname="EFFECTIVE AREA",
                        **extra_headers,
                    )
                )

        self.log.info("Effective Area HDU created")
        self.edisp = energy_dispersion(
            gammas,
            true_energy_bins,
            fov_offset_bins,
            migration_bins,
        )
        self.hdus.append(
            create_energy_dispersion_hdu(
                self.edisp,
                true_energy_bins,
                migration_bins,
                fov_offset_bins,
                point_like=self.point_like,
                extname="ENERGY DISPERSION",
                **extra_headers,
            )
        )
        self.log.info("Energy Dispersion HDU created")

        if not self.only_gamma_irf:
            self.background = background_2d(
                background,
                reco_energy_bins=reco_energy_bins,
                fov_offset_bins=background_offset_bins,
                t_obs=self.t_obs,
            )
            self.hdus.append(
                create_background_2d_hdu(
                    self.background.T,
                    reco_energy_bins,
                    background_offset_bins,
                    extname="BACKGROUND",
                    **extra_headers,
                )
            )
            self.log.info("Background HDU created")

        if not self.point_like:
            self.psf = psf_table(
                gammas,
                true_energy_bins,
                fov_offset_bins=fov_offset_bins,
                source_offset_bins=source_offset_bins,
            )
            self.hdus.append(
                create_psf_table_hdu(
                    self.psf,
                    true_energy_bins,
                    source_offset_bins,
                    fov_offset_bins,
                    extname="PSF",
                    **extra_headers,
                )
            )
            self.log.info("PSF HDU created")
コード例 #6
0
def main():
    log = logging.getLogger("lstchain MC DL2 to IRF - sensitivity curves")

    parser = argparse.ArgumentParser(description="MC DL2 to IRF")

    # Required arguments
    parser.add_argument(
        "--gamma-dl2", "-g", type=str, dest="gamma_file", help="Path to the dl2 gamma file"
    )

    parser.add_argument(
        "--proton-dl2",
        "-p",
        type=str,
        dest="proton_file",
        help="Path to the dl2 proton file",
    )

    parser.add_argument(
        "--electron-dl2",
        "-e",
        type=str,
        dest="electron_file",
        help="Path to the dl2 electron file",
    )

    parser.add_argument(
        "--outfile",
        "-o",
        action="store",
        type=str,
        dest="outfile",
        help="Path where to save IRF FITS file",
        default="sensitivity.fits.gz",
    )

    parser.add_argument(
        "--source_alt",
        action="store",
        type=float,
        dest="source_alt",
        help="Source altitude (optional). If not provided, it will be guessed from the gammas true altitude",
        default=None
    )

    parser.add_argument(
        "--source_az",
        action="store",
        type=float,
        dest="source_az",
        help="Source azimuth (optional). If not provided, it will be guessed from the gammas true altitude",
        default=None
    )

    # Optional arguments
    # parser.add_argument('--config', '-c', action='store', type=Path,
    #                     dest='config_file',
    #                     help='Path to a configuration file. If none is given, a standard configuration is applied',
    #                     default=None
    #                     )


    args = parser.parse_args()
    
    logging.basicConfig(level=logging.INFO)
    logging.getLogger("pyirf").setLevel(logging.DEBUG)

    particles = {
    "gamma": {"file": args.gamma_file, "target_spectrum": CRAB_HEGRA},
    "proton": {"file": args.proton_file, "target_spectrum": IRFDOC_PROTON_SPECTRUM},
    "electron": {
        "file": args.electron_file,
        "target_spectrum": IRFDOC_ELECTRON_SPECTRUM,
    },
}

    for particle_type, p in particles.items():
        log.info("Simulated Events: {}".format(particle_type.title()))
        p["events"], p["simulation_info"] = read_mc_dl2_to_QTable(p["file"])
        # p['events'] = filter_events(p['events'], filters)

        print("=====", particle_type, "=====")
        # p["events"]["particle_type"] = particle_type

        p["simulated_spectrum"] = PowerLaw.from_simulation(p["simulation_info"], T_OBS)
        p["events"]["weight"] = calculate_event_weights(
            p["events"]["true_energy"], p["target_spectrum"], p["simulated_spectrum"]
        )
        for prefix in ("true", "reco"):
            k = f"{prefix}_source_fov_offset"
            p["events"][k] = calculate_source_fov_offset(p["events"], prefix=prefix)

    gammas = particles["gamma"]["events"]
    # background table composed of both electrons and protons
    background = table.vstack(
        [particles["proton"]["events"], particles["electron"]["events"]]
    )

    if args.source_alt is None or args.source_az is None:
        source_alt, source_az = determine_source_position(gammas)
    else:
        source_alt, source_az = args.source_alt, args.source_az

    for particle_type, p in particles.items():
        # calculate theta / distance between reco and assumed source position
        # we handle only ON observations here, so the assumed source pos is the pointing position
        p["events"]["theta"] = calculate_theta(p["events"], assumed_source_az=source_az, assumed_source_alt=source_alt)
        log.info(p["simulation_info"])
        log.info("")


    INITIAL_GH_CUT = np.quantile(gammas["gh_score"], (1 - INITIAL_GH_CUT_EFFICENCY))
    log.info("Using fixed G/H cut of {} to calculate theta cuts".format(INITIAL_GH_CUT))

    # event display uses much finer bins for the theta cut than
    # for the sensitivity
    theta_bins = add_overflow_bins(
        create_bins_per_decade(MIN_ENERGY, MAX_ENERGY, N_BIN_PER_DECADE)
    )

    # theta cut is 68 percent containment of the gammas
    # for now with a fixed global, unoptimized score cut
    mask_theta_cuts = gammas["gh_score"] >= INITIAL_GH_CUT
    theta_cuts = calculate_percentile_cut(
        gammas["theta"][mask_theta_cuts],
        gammas["reco_energy"][mask_theta_cuts],
        bins=theta_bins,
        min_value=MIN_THETA_CUT,
        fill_value=MAX_THETA_CUT,
        max_value=MAX_THETA_CUT,
        percentile=68,
    )

    # same number of bins per decade than official CTA IRFs
    sensitivity_bins = add_overflow_bins(
        create_bins_per_decade(MIN_ENERGY, MAX_ENERGY, bins_per_decade=N_BIN_PER_DECADE)
    )

    log.info("Optimizing G/H separation cut for best sensitivity")
    gh_cut_efficiencies = np.arange(
        GH_CUT_EFFICIENCY_STEP,
        MAX_GH_CUT_EFFICIENCY + GH_CUT_EFFICIENCY_STEP / 2,
        GH_CUT_EFFICIENCY_STEP,
    )
    sensitivity_step_2, gh_cuts = optimize_gh_cut(
        gammas,
        background,
        reco_energy_bins=sensitivity_bins,
        gh_cut_efficiencies=gh_cut_efficiencies,
        op=operator.ge,
        theta_cuts=theta_cuts,
        alpha=ALPHA,
        background_radius=MAX_BG_RADIUS,
    )

    # now that we have the optimized gh cuts, we recalculate the theta
    # cut as 68 percent containment on the events surviving these cuts.
    log.info("Recalculating theta cut for optimized GH Cuts")
    for tab in (gammas, background):
        tab["selected_gh"] = evaluate_binned_cut(
            tab["gh_score"], tab["reco_energy"], gh_cuts, operator.ge
        )

    theta_cuts_opt = calculate_percentile_cut(
        gammas[gammas["selected_gh"]]["theta"],
        gammas[gammas["selected_gh"]]["reco_energy"],
        theta_bins,
        percentile=68,
        fill_value=MAX_THETA_CUT,
        max_value=MAX_THETA_CUT,
        min_value=MIN_THETA_CUT,
    )

    gammas["selected_theta"] = evaluate_binned_cut(
        gammas["theta"], gammas["reco_energy"], theta_cuts_opt, operator.le
    )
    gammas["selected"] = gammas["selected_theta"] & gammas["selected_gh"]

    # calculate sensitivity
    signal_hist = create_histogram_table(
        gammas[gammas["selected"]], bins=sensitivity_bins
    )
    background_hist = estimate_background(
        background[background["selected_gh"]],
        reco_energy_bins=sensitivity_bins,
        theta_cuts=theta_cuts_opt,
        alpha=ALPHA,
        background_radius=MAX_BG_RADIUS,
    )
    sensitivity = calculate_sensitivity(signal_hist, background_hist, alpha=ALPHA)

    # scale relative sensitivity by Crab flux to get the flux sensitivity
    spectrum = particles["gamma"]["target_spectrum"]
    for s in (sensitivity_step_2, sensitivity):
        s["flux_sensitivity"] = s["relative_sensitivity"] * spectrum(
            s["reco_energy_center"]
        )

    log.info("Calculating IRFs")
    hdus = [
        fits.PrimaryHDU(),
        fits.BinTableHDU(sensitivity, name="SENSITIVITY"),
        fits.BinTableHDU(sensitivity_step_2, name="SENSITIVITY_STEP_2"),
        fits.BinTableHDU(theta_cuts, name="THETA_CUTS"),
        fits.BinTableHDU(theta_cuts_opt, name="THETA_CUTS_OPT"),
        fits.BinTableHDU(gh_cuts, name="GH_CUTS"),
    ]

    masks = {
        "": gammas["selected"],
        "_NO_CUTS": slice(None),
        "_ONLY_GH": gammas["selected_gh"],
        "_ONLY_THETA": gammas["selected_theta"],
    }

    # binnings for the irfs
    true_energy_bins = add_overflow_bins(
        create_bins_per_decade(MIN_ENERGY, MAX_ENERGY, N_BIN_PER_DECADE)
    )
    reco_energy_bins = add_overflow_bins(
        create_bins_per_decade(MIN_ENERGY, MAX_ENERGY, N_BIN_PER_DECADE)
    )

    fov_offset_bins = [0, 0.6] * u.deg
    source_offset_bins = np.arange(0, 1 + 1e-4, 1e-3) * u.deg
    energy_migration_bins = np.geomspace(0.2, 5, 200)

    for label, mask in masks.items():
        effective_area = effective_area_per_energy(
            gammas[mask],
            particles["gamma"]["simulation_info"],
            true_energy_bins=true_energy_bins,
        )
        hdus.append(
            create_aeff2d_hdu(
                effective_area[..., np.newaxis],  # add one dimension for FOV offset
                true_energy_bins,
                fov_offset_bins,
                extname="EFFECTIVE_AREA" + label,
            )
        )
        edisp = energy_dispersion(
            gammas[mask],
            true_energy_bins=true_energy_bins,
            fov_offset_bins=fov_offset_bins,
            migration_bins=energy_migration_bins,
        )
        hdus.append(
            create_energy_dispersion_hdu(
                edisp,
                true_energy_bins=true_energy_bins,
                migration_bins=energy_migration_bins,
                fov_offset_bins=fov_offset_bins,
                extname="ENERGY_DISPERSION" + label,
            )
        )

    bias_resolution = energy_bias_resolution(
        gammas[gammas["selected"]],
        true_energy_bins,
        resolution_function=energy_resolution_absolute_68,
    )
    ang_res = angular_resolution(gammas[gammas["selected_gh"]], true_energy_bins)
    psf = psf_table(
        gammas[gammas["selected_gh"]],
        true_energy_bins,
        fov_offset_bins=fov_offset_bins,
        source_offset_bins=source_offset_bins,
    )

    background_rate = background_2d(
        background[background["selected_gh"]],
        reco_energy_bins,
        fov_offset_bins=np.arange(0, 11) * u.deg,
        t_obs=T_OBS,
    )

    hdus.append(
        create_background_2d_hdu(
            background_rate, reco_energy_bins, fov_offset_bins=np.arange(0, 11) * u.deg
        )
    )
    hdus.append(
        create_psf_table_hdu(psf, true_energy_bins, source_offset_bins, fov_offset_bins)
    )
    hdus.append(
        create_rad_max_hdu(
            theta_cuts_opt["cut"][:, np.newaxis], theta_bins, fov_offset_bins
        )
    )
    hdus.append(fits.BinTableHDU(ang_res, name="ANGULAR_RESOLUTION"))
    hdus.append(fits.BinTableHDU(bias_resolution, name="ENERGY_BIAS_RESOLUTION"))

    log.info("Writing output file")
    Path(args.outfile).parent.mkdir(exist_ok=True)
    fits.HDUList(hdus).writeto(args.outfile, overwrite=True)
コード例 #7
0
def main():
    logging.basicConfig(level=logging.INFO)
    logging.getLogger("pyirf").setLevel(logging.DEBUG)

    for k, p in particles.items():
        log.info(f"Simulated {k.title()} Events:")
        p["events"], p["simulation_info"] = read_eventdisplay_fits(p["file"])

        p["simulated_spectrum"] = PowerLaw.from_simulation(
            p["simulation_info"], T_OBS)
        p["events"]["weight"] = calculate_event_weights(
            p["events"]["true_energy"], p["target_spectrum"],
            p["simulated_spectrum"])
        p["events"]["source_fov_offset"] = calculate_source_fov_offset(
            p["events"])
        # calculate theta / distance between reco and assuemd source positoin
        # we handle only ON observations here, so the assumed source pos
        # is the pointing position
        p["events"]["theta"] = calculate_theta(
            p["events"],
            assumed_source_az=p["events"]["pointing_az"],
            assumed_source_alt=p["events"]["pointing_alt"],
        )
        log.info(p["simulation_info"])
        log.info("")

    gammas = particles["gamma"]["events"]
    # background table composed of both electrons and protons
    background = table.vstack(
        [particles["proton"]["events"], particles["electron"]["events"]])

    log.info(
        f"Using fixed G/H cut of {INITIAL_GH_CUT} to calculate theta cuts")

    # event display uses much finer bins for the theta cut than
    # for the sensitivity
    theta_bins = add_overflow_bins(
        create_bins_per_decade(
            10**(-1.9) * u.TeV,
            10**2.3005 * u.TeV,
            50,
        ))

    # theta cut is 68 percent containmente of the gammas
    # for now with a fixed global, unoptimized score cut
    mask_theta_cuts = gammas["gh_score"] >= INITIAL_GH_CUT
    theta_cuts = calculate_percentile_cut(
        gammas["theta"][mask_theta_cuts],
        gammas["reco_energy"][mask_theta_cuts],
        bins=theta_bins,
        min_value=0.05 * u.deg,
        fill_value=np.nan * u.deg,
        percentile=68,
    )

    # evaluate the theta cut
    gammas["selected_theta"] = evaluate_binned_cut(gammas["theta"],
                                                   gammas["reco_energy"],
                                                   theta_cuts, operator.le)
    # we make the background region larger by a factor of ALPHA,
    # so the radius by sqrt(ALPHA) to get better statistics for the background
    theta_cuts_bg = get_bg_cuts(theta_cuts, ALPHA)
    background["selected_theta"] = evaluate_binned_cut(
        background["theta"], background["reco_energy"], theta_cuts_bg,
        operator.le)

    # same bins as event display uses
    sensitivity_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.9 * u.TeV,
                               10**2.31 * u.TeV,
                               bins_per_decade=5))

    log.info("Optimizing G/H separation cut for best sensitivity")
    sensitivity_step_2, gh_cuts = optimize_gh_cut(
        gammas[gammas["selected_theta"]],
        background[background["selected_theta"]],
        bins=sensitivity_bins,
        cut_values=np.arange(-1.0, 1.005, 0.05),
        op=operator.ge,
        alpha=ALPHA,
    )

    # now that we have the optimized gh cuts, we recalculate the theta
    # cut as 68 percent containment on the events surviving these cuts.
    for tab in (gammas, background):
        tab["selected_gh"] = evaluate_binned_cut(tab["gh_score"],
                                                 tab["reco_energy"], gh_cuts,
                                                 operator.ge)

    theta_cuts_opt = calculate_percentile_cut(
        gammas["theta"],
        gammas["reco_energy"],
        theta_bins,
        fill_value=np.nan * u.deg,
        percentile=68,
        min_value=0.05 * u.deg,
    )

    theta_cuts_opt_bg = get_bg_cuts(theta_cuts_opt, ALPHA)

    for tab, cuts in zip([gammas, background],
                         [theta_cuts_opt, theta_cuts_opt_bg]):
        tab["selected_theta"] = evaluate_binned_cut(tab["theta"],
                                                    tab["reco_energy"], cuts,
                                                    operator.le)
        tab["selected"] = tab["selected_theta"] & tab["selected_gh"]

    signal_hist = create_histogram_table(gammas[gammas["selected"]],
                                         bins=sensitivity_bins)
    background_hist = create_histogram_table(
        background[background["selected"]], bins=sensitivity_bins)

    sensitivity = calculate_sensitivity(signal_hist,
                                        background_hist,
                                        alpha=ALPHA)

    # scale relative sensitivity by Crab flux to get the flux sensitivity
    for s in (sensitivity_step_2, sensitivity):
        s["flux_sensitivity"] = s["relative_sensitivity"] * CRAB_HEGRA(
            s["reco_energy_center"])

    # write OGADF output file
    hdus = [
        fits.PrimaryHDU(),
        fits.BinTableHDU(sensitivity, name="SENSITIVITY"),
        fits.BinTableHDU(sensitivity_step_2, name="SENSITIVITY_STEP_2"),
        fits.BinTableHDU(theta_cuts, name="THETA_CUTS"),
        fits.BinTableHDU(theta_cuts_opt, name="THETA_CUTS_OPT"),
        fits.BinTableHDU(gh_cuts, name="GH_CUTS"),
    ]

    masks = {
        "": gammas["selected"],
        "_NO_CUTS": slice(None),
        "_ONLY_GH": gammas["selected_gh"],
        "_ONLY_THETA": gammas["selected_theta"],
    }

    # binnings for the irfs
    true_energy_bins = add_overflow_bins(
        create_bins_per_decade(
            10**-1.9 * u.TeV,
            10**2.31 * u.TeV,
            10,
        ))
    reco_energy_bins = add_overflow_bins(
        create_bins_per_decade(
            10**-1.9 * u.TeV,
            10**2.31 * u.TeV,
            10,
        ))
    fov_offset_bins = [0, 0.5] * u.deg
    source_offset_bins = np.arange(0, 1 + 1e-4, 1e-3) * u.deg
    energy_migration_bins = np.geomspace(0.2, 5, 200)

    for label, mask in masks.items():
        effective_area = point_like_effective_area(
            gammas[mask],
            particles["gamma"]["simulation_info"],
            true_energy_bins=true_energy_bins,
        )
        hdus.append(
            create_aeff2d_hdu(
                effective_area[...,
                               np.newaxis],  # add one dimension for FOV offset
                true_energy_bins,
                fov_offset_bins,
                extname="EFFECTIVE_AREA" + label,
            ))
        edisp = energy_dispersion(
            gammas[mask],
            true_energy_bins=true_energy_bins,
            fov_offset_bins=fov_offset_bins,
            migration_bins=energy_migration_bins,
        )
        hdus.append(
            create_energy_dispersion_hdu(
                edisp,
                true_energy_bins=true_energy_bins,
                migration_bins=energy_migration_bins,
                fov_offset_bins=fov_offset_bins,
                extname="ENERGY_DISPERSION" + label,
            ))

    bias_resolution = energy_bias_resolution(
        gammas[gammas["selected"]],
        true_energy_bins,
    )
    ang_res = angular_resolution(
        gammas[gammas["selected_gh"]],
        true_energy_bins,
    )
    psf = psf_table(
        gammas[gammas["selected_gh"]],
        true_energy_bins,
        fov_offset_bins=fov_offset_bins,
        source_offset_bins=source_offset_bins,
    )

    background_rate = background_2d(
        background[background['selected_gh']],
        reco_energy_bins,
        fov_offset_bins=np.arange(0, 11) * u.deg,
        t_obs=T_OBS,
    )

    hdus.append(
        create_background_2d_hdu(
            background_rate,
            reco_energy_bins,
            fov_offset_bins=np.arange(0, 11) * u.deg,
        ))
    hdus.append(
        create_psf_table_hdu(
            psf,
            true_energy_bins,
            source_offset_bins,
            fov_offset_bins,
        ))
    hdus.append(
        create_rad_max_hdu(theta_bins,
                           fov_offset_bins,
                           rad_max=theta_cuts_opt["cut"][:, np.newaxis]))
    hdus.append(fits.BinTableHDU(ang_res, name="ANGULAR_RESOLUTION"))
    hdus.append(
        fits.BinTableHDU(bias_resolution, name="ENERGY_BIAS_RESOLUTION"))
    fits.HDUList(hdus).writeto("pyirf_eventdisplay.fits.gz", overwrite=True)
コード例 #8
0
def main():
    logging.basicConfig(level=logging.INFO)
    logging.getLogger("pyirf").setLevel(logging.DEBUG)

    for particle_type, p in particles.items():
        log.info(f"Simulated {particle_type.title()} Events:")
        p["events"], p["simulation_info"] = read_eventdisplay_fits(p["file"])
        p["events"]["particle_type"] = particle_type

        p["simulated_spectrum"] = PowerLaw.from_simulation(
            p["simulation_info"], T_OBS)
        p["events"]["weight"] = calculate_event_weights(
            p["events"]["true_energy"], p["target_spectrum"],
            p["simulated_spectrum"])
        for prefix in ('true', 'reco'):
            k = f"{prefix}_source_fov_offset"
            p["events"][k] = calculate_source_fov_offset(p["events"],
                                                         prefix=prefix)

        # calculate theta / distance between reco and assuemd source positoin
        # we handle only ON observations here, so the assumed source pos
        # is the pointing position
        p["events"]["theta"] = calculate_theta(
            p["events"],
            assumed_source_az=p["events"]["pointing_az"],
            assumed_source_alt=p["events"]["pointing_alt"],
        )
        log.info(p["simulation_info"])
        log.info("")

    gammas = particles["gamma"]["events"]
    # background table composed of both electrons and protons
    background = table.vstack(
        [particles["proton"]["events"], particles["electron"]["events"]])

    INITIAL_GH_CUT = np.quantile(gammas['gh_score'],
                                 (1 - INITIAL_GH_CUT_EFFICENCY))
    log.info(
        f"Using fixed G/H cut of {INITIAL_GH_CUT} to calculate theta cuts")

    # event display uses much finer bins for the theta cut than
    # for the sensitivity
    theta_bins = add_overflow_bins(
        create_bins_per_decade(10**(-1.9) * u.TeV, 10**2.3005 * u.TeV, 50))
    # same bins as event display uses
    sensitivity_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.9 * u.TeV,
                               10**2.31 * u.TeV,
                               bins_per_decade=5))

    # theta cut is 68 percent containmente of the gammas
    # for now with a fixed global, unoptimized score cut
    # the cut is calculated in the same bins as the sensitivity,
    # but then interpolated to 10x the resolution.
    mask_theta_cuts = gammas["gh_score"] >= INITIAL_GH_CUT
    theta_cuts_coarse = calculate_percentile_cut(
        gammas["theta"][mask_theta_cuts],
        gammas["reco_energy"][mask_theta_cuts],
        bins=sensitivity_bins,
        min_value=0.05 * u.deg,
        fill_value=0.32 * u.deg,
        max_value=0.32 * u.deg,
        percentile=68,
    )

    # interpolate to 50 bins per decade
    theta_center = bin_center(theta_bins)
    inter_center = bin_center(sensitivity_bins)
    theta_cuts = table.QTable({
        "low":
        theta_bins[:-1],
        "high":
        theta_bins[1:],
        "center":
        theta_center,
        "cut":
        np.interp(np.log10(theta_center / u.TeV),
                  np.log10(inter_center / u.TeV), theta_cuts_coarse['cut']),
    })

    log.info("Optimizing G/H separation cut for best sensitivity")
    gh_cut_efficiencies = np.arange(
        GH_CUT_EFFICIENCY_STEP,
        MAX_GH_CUT_EFFICIENCY + GH_CUT_EFFICIENCY_STEP / 2,
        GH_CUT_EFFICIENCY_STEP)
    sensitivity, gh_cuts = optimize_gh_cut(
        gammas,
        background,
        reco_energy_bins=sensitivity_bins,
        gh_cut_efficiencies=gh_cut_efficiencies,
        op=operator.ge,
        theta_cuts=theta_cuts,
        alpha=ALPHA,
        background_radius=MAX_BG_RADIUS,
    )

    # now that we have the optimized gh cuts, we recalculate the theta
    # cut as 68 percent containment on the events surviving these cuts.
    log.info('Recalculating theta cut for optimized GH Cuts')
    for tab in (gammas, background):
        tab["selected_gh"] = evaluate_binned_cut(tab["gh_score"],
                                                 tab["reco_energy"], gh_cuts,
                                                 operator.ge)

    gammas["selected_theta"] = evaluate_binned_cut(gammas["theta"],
                                                   gammas["reco_energy"],
                                                   theta_cuts, operator.le)
    gammas["selected"] = gammas["selected_theta"] & gammas["selected_gh"]

    # scale relative sensitivity by Crab flux to get the flux sensitivity
    spectrum = particles['gamma']['target_spectrum']
    sensitivity["flux_sensitivity"] = (
        sensitivity["relative_sensitivity"] *
        spectrum(sensitivity['reco_energy_center']))

    log.info('Calculating IRFs')
    hdus = [
        fits.PrimaryHDU(),
        fits.BinTableHDU(sensitivity, name="SENSITIVITY"),
        fits.BinTableHDU(theta_cuts, name="THETA_CUTS"),
        fits.BinTableHDU(gh_cuts, name="GH_CUTS"),
    ]

    masks = {
        "": gammas["selected"],
        "_NO_CUTS": slice(None),
        "_ONLY_GH": gammas["selected_gh"],
        "_ONLY_THETA": gammas["selected_theta"],
    }

    # binnings for the irfs
    true_energy_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.9 * u.TeV, 10**2.31 * u.TeV, 10))
    reco_energy_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.9 * u.TeV, 10**2.31 * u.TeV, 5))
    fov_offset_bins = [0, 0.5] * u.deg
    source_offset_bins = np.arange(0, 1 + 1e-4, 1e-3) * u.deg
    energy_migration_bins = np.geomspace(0.2, 5, 200)

    for label, mask in masks.items():
        effective_area = effective_area_per_energy(
            gammas[mask],
            particles["gamma"]["simulation_info"],
            true_energy_bins=true_energy_bins,
        )
        hdus.append(
            create_aeff2d_hdu(
                effective_area[...,
                               np.newaxis],  # add one dimension for FOV offset
                true_energy_bins,
                fov_offset_bins,
                extname="EFFECTIVE_AREA" + label,
            ))
        edisp = energy_dispersion(
            gammas[mask],
            true_energy_bins=true_energy_bins,
            fov_offset_bins=fov_offset_bins,
            migration_bins=energy_migration_bins,
        )
        hdus.append(
            create_energy_dispersion_hdu(
                edisp,
                true_energy_bins=true_energy_bins,
                migration_bins=energy_migration_bins,
                fov_offset_bins=fov_offset_bins,
                extname="ENERGY_DISPERSION" + label,
            ))

    bias_resolution = energy_bias_resolution(gammas[gammas["selected"]],
                                             reco_energy_bins,
                                             energy_type="reco")
    ang_res = angular_resolution(gammas[gammas["selected_gh"]],
                                 reco_energy_bins,
                                 energy_type="reco")
    psf = psf_table(
        gammas[gammas["selected_gh"]],
        true_energy_bins,
        fov_offset_bins=fov_offset_bins,
        source_offset_bins=source_offset_bins,
    )

    background_rate = background_2d(
        background[background['selected_gh']],
        reco_energy_bins,
        fov_offset_bins=np.arange(0, 11) * u.deg,
        t_obs=T_OBS,
    )

    hdus.append(
        create_background_2d_hdu(
            background_rate,
            reco_energy_bins,
            fov_offset_bins=np.arange(0, 11) * u.deg,
        ))
    hdus.append(
        create_psf_table_hdu(
            psf,
            true_energy_bins,
            source_offset_bins,
            fov_offset_bins,
        ))
    hdus.append(
        create_rad_max_hdu(theta_cuts["cut"][:, np.newaxis], theta_bins,
                           fov_offset_bins))
    hdus.append(fits.BinTableHDU(ang_res, name="ANGULAR_RESOLUTION"))
    hdus.append(
        fits.BinTableHDU(bias_resolution, name="ENERGY_BIAS_RESOLUTION"))

    log.info('Writing outputfile')
    fits.HDUList(hdus).writeto("pyirf_eventdisplay.fits.gz", overwrite=True)
コード例 #9
0
def load_data(files, cache):
    if cache.exists():
        plot_values = read_plot_data(cache, data_structure)
    else:
        proton_file = files["protons"]
        electron_file = files["electrons"]
        observation_files = files["observations"]
        plot_values = {}
        obstime = 0 * u.s
        observations = {}
        for f in tqdm(observation_files):
            data = read_lst_dl1(f, images=True, drop_nans=False)
            observations[data[0]["obs_id"]] = data
            run_time = (data["time"][-1] - data["time"][0]).to(u.s)
            obstime += run_time
        combined = vstack(list(observations.values()))
        combined["weights"] = 1 / obstime.to_value(u.s)

        if proton_file:
            proton_sim_info = read_sim_info(proton_file)
            protons = read_mc_dl1(proton_file, drop_nans=False, images=True)

            protons["weights"] = calculate_event_weights(
                protons["true_energy"],
                IRFDOC_PROTON_SPECTRUM,
                PowerLaw.from_simulation(proton_sim_info, 1*u.s),
            )
        if electron_file:
            electron_sim_info = read_sim_info(electron_file)
            electrons = read_mc_dl1(electron_file, drop_nans=False, images=True)
            electrons["weights"] = calculate_event_weights(
                electrons["true_energy"],
                IRFDOC_ELECTRON_SPECTRUM,
                PowerLaw.from_simulation(electron_sim_info, 1*u.s),
            )
        if protons and electrons:
            background = vstack([protons, electrons])
        elif protons:
            background = protons
        elif electrons:
            background = electrons
        else:
            background = None

        max_ = np.percentile(combined["image"], 99)
        min_ = np.percentile(combined["image"], 1)
        
        pixel_values = defaultdict(list)
        for pixel, values in enumerate(combined["image"].T) :
            pixel_values["std"].append(np.std(values))
            pixel_values["mean"].append(np.mean(values))
            pixel_values["median"].append(np.median(values))
            per_25 = np.percentile(values, 25)
            per_75 = np.percentile(values, 75)
            iqr = per_75 - per_25
            pixel_values["25"].append(per_25)
            pixel_values["75"].append(per_75)
            pixel_values["iqr"].append(iqr)
            pixel_values["min"].append(np.min(values))
            pixel_values["max"].append(np.max(values))
        plot_values["pixels"] = {
            "bins": pd.Series(),
            "values": pd.DataFrame(pixel_values),
        }
        if background:
            ## datamc
            datamc_bins = np.linspace(
                min(min_, np.percentile(background["image"], 1)),
                max(max_, np.percentile(background["image"], 99)),
                30,
            )
            datamc_df = pd.DataFrame()
            datamc_df["data"], _ = np.histogram(combined["image"], bins=datamc_bins)
            datamc_df["mc"], _ = np.histogram(background["image"], bins=datamc_bins)
            plot_values["datamc"] = {
                "bins": pd.Series(datamc_bins),
                "values": datamc_df,
            }

        ## time
        combined["delta_t_sec"] = (combined["time"] - combined["time"][0]).sec
        last = combined["delta_t_sec"].max()
        time_bins = np.linspace(0, last, 20)
        # pandas cant work with the image columns, so we build the binned df manually
        indices = np.digitize(combined["delta_t_sec"], time_bins)
        mean = []
        std = []
        # This loses only the very last event and should be the way to go hopefully
        # basically since the bins are bin edges, np.digitize will always have just one event in the last bin
        # (with the default right=False, otherwise the same applies for the first bin)
        for bin_index in np.unique(indices)[:-1]:
            group = combined[indices == bin_index]
            mean.append(np.mean(group["image"]))
            std.append(np.std(group["image"]))
        time_df = pd.DataFrame({
            "center": 0.5 * (time_bins[:-1] + time_bins[1:]),
            "width": np.diff(time_bins),
            "mean": mean,
            "std": std,
        })
        
        plot_values["time"] = {
            "bins": pd.Series(time_bins),
            "values": time_df,
        }
        
        # fraction surviving events
        combined["size"] = combined["image"].sum(axis=1)
        combined["survived"] = combined["image_mask"].sum(axis=1) >= 0
        size_bins = np.logspace(
            np.log10(1),
            np.log10(combined["size"].max()),
            100
        )
        fraction_df = pd.DataFrame()
        fraction = []
        indices = np.digitize(combined["size"], size_bins)
        for b in range(100):
            bin_index=b+1
            group = combined[indices == bin_index]
            fraction.append(np.mean(group["survived"]))
        fraction_df["data"] = fraction #np.histogram(combined["survived"], bins=size_bins)
        if background:
            background["size"] = background["image"].sum(axis=1)
            background["survived"] = background["image_mask"].sum(axis=1) >= 0
            fraction_mc = []
            indices = np.digitize(background["size"], size_bins)
            for b in range(100):
                bin_index=b+1
                group = background[indices == bin_index]
                fraction_mc.append(np.mean(group["survived"]))
            fraction_df["mc"] = fraction_mc #np.histogram(combined["survived"], bins=size_bins)

        plot_values["surviving"] = {
            "bins": pd.Series(size_bins),
            "values": fraction_df,
        }

        save_plot_data(cache, plot_values)

    return plot_values
コード例 #10
0
def main(gammafile, protonfile, electronfile, outputfile):
    logging.basicConfig(level=logging.INFO)
    logging.getLogger("pyirf").setLevel(logging.DEBUG)

    particles = {
        "gamma": {
            "file": gammafile,
            "target_spectrum": CRAB_HEGRA,
        },
        "proton": {
            "file": protonfile,
            "target_spectrum": IRFDOC_PROTON_SPECTRUM,
        },
        "electron": {
            "file": electronfile,
            "target_spectrum": IRFDOC_ELECTRON_SPECTRUM,
        },
    }

    for particle_type, p in particles.items():
        log.info(f"Simulated {particle_type.title()} Events:")
        p["events"], p["simulation_info"] = read_file(p["file"])
        p["events"]["particle_type"] = particle_type

        p["simulated_spectrum"] = PowerLaw.from_simulation(
            p["simulation_info"], T_OBS)
        p["events"]["weight"] = calculate_event_weights(
            p["events"]["true_energy"], p["target_spectrum"],
            p["simulated_spectrum"])
        for prefix in ('true', 'reco'):
            k = f"{prefix}_source_fov_offset"
            p["events"][k] = calculate_source_fov_offset(p["events"],
                                                         prefix=prefix)

        log.info(p["simulation_info"])
        log.info("")

    gammas = particles["gamma"]["events"]
    background = table.vstack(
        [particles["proton"]["events"], particles["electron"]["events"]])

    # calculate theta / distance between reco and assuemd source position
    gammas["theta"] = calculate_theta(
        gammas,
        assumed_source_az=gammas["true_az"],
        assumed_source_alt=gammas["true_alt"],
    )

    INITIAL_GH_CUT = np.quantile(gammas['gh_score'],
                                 (1 - INITIAL_GH_CUT_EFFICENCY))
    log.info(
        f"Using fixed G/H cut of {INITIAL_GH_CUT} to calculate theta cuts")

    theta_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.8 * u.TeV,
                               10**2.41 * u.TeV,
                               bins_per_decade=25))
    sensitivity_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.8 * u.TeV,
                               10**2.41 * u.TeV,
                               bins_per_decade=5))

    # theta cut is 68 percent containmente of the gammas
    # for now with a fixed global, unoptimized score cut
    mask_theta_cuts = gammas["gh_score"] >= INITIAL_GH_CUT
    theta_cuts = calculate_percentile_cut(
        gammas["theta"][mask_theta_cuts],
        gammas["reco_energy"][mask_theta_cuts],
        bins=theta_bins,
        min_value=0.05 * u.deg,
        fill_value=0.32 * u.deg,
        max_value=0.32 * u.deg,
        percentile=68,
    )

    log.info("Optimizing G/H separation cut for best sensitivity")
    gh_cut_efficiencies = np.arange(
        GH_CUT_EFFICIENCY_STEP,
        MAX_GH_CUT_EFFICIENCY + GH_CUT_EFFICIENCY_STEP / 2,
        GH_CUT_EFFICIENCY_STEP)
    sensitivity_step_2, gh_cuts = optimize_gh_cut(
        gammas,
        background,
        reco_energy_bins=sensitivity_bins,
        gh_cut_efficiencies=gh_cut_efficiencies,
        op=operator.ge,
        theta_cuts=theta_cuts,
        alpha=ALPHA,
        background_radius=MAX_BG_RADIUS,
    )

    # now that we have the optimized gh cuts, we recalculate the theta
    # cut as 68 percent containment on the events surviving these cuts.
    log.info('Recalculating theta cut for optimized GH Cuts')
    for tab in (gammas, background):
        tab["selected_gh"] = evaluate_binned_cut(tab["gh_score"],
                                                 tab["reco_energy"], gh_cuts,
                                                 operator.ge)

    theta_cuts_opt = calculate_percentile_cut(
        gammas[gammas['selected_gh']]["theta"],
        gammas[gammas['selected_gh']]["reco_energy"],
        theta_bins,
        percentile=68,
        fill_value=0.32 * u.deg,
        max_value=0.32 * u.deg,
        min_value=0.05 * u.deg,
    )

    gammas["selected_theta"] = evaluate_binned_cut(gammas["theta"],
                                                   gammas["reco_energy"],
                                                   theta_cuts_opt, operator.le)
    gammas["selected"] = gammas["selected_theta"] & gammas["selected_gh"]

    # calculate sensitivity
    signal_hist = create_histogram_table(gammas[gammas["selected"]],
                                         bins=sensitivity_bins)
    background_hist = estimate_background(
        background[background["selected_gh"]],
        reco_energy_bins=sensitivity_bins,
        theta_cuts=theta_cuts_opt,
        alpha=ALPHA,
        background_radius=MAX_BG_RADIUS,
    )
    sensitivity = calculate_sensitivity(signal_hist,
                                        background_hist,
                                        alpha=ALPHA)

    # scale relative sensitivity by Crab flux to get the flux sensitivity
    spectrum = particles['gamma']['target_spectrum']
    for s in (sensitivity_step_2, sensitivity):
        s["flux_sensitivity"] = (s["relative_sensitivity"] *
                                 spectrum(s['reco_energy_center']))

    hdus = [
        fits.PrimaryHDU(),
        fits.BinTableHDU(sensitivity, name="SENSITIVITY"),
        fits.BinTableHDU(sensitivity_step_2, name="SENSITIVITY_STEP_2"),
        fits.BinTableHDU(theta_cuts, name="THETA_CUTS"),
        fits.BinTableHDU(theta_cuts_opt, name="THETA_CUTS_OPT"),
        fits.BinTableHDU(gh_cuts, name="GH_CUTS"),
    ]

    # calculate sensitivity using unoptimised cuts
    gammas["theta_unop"] = gammas["theta"].to_value(u.deg) <= np.sqrt(0.03)
    gammas["gh_unop"] = gammas["gh_score"] > 0.85

    theta_cut_unop = table.QTable()
    theta_cut_unop['low'] = theta_cuts_opt['low']
    theta_cut_unop['high'] = theta_cuts_opt['high']
    theta_cut_unop['center'] = theta_cuts_opt['center']
    theta_cut_unop['cut'] = np.sqrt(0.03) * u.deg

    signal_hist_unop = create_histogram_table(gammas[gammas["theta_unop"]
                                                     & gammas["gh_unop"]],
                                              bins=sensitivity_bins)
    background_hist_unop = estimate_background(
        background[background["gh_score"] > 0.85],
        reco_energy_bins=sensitivity_bins,
        theta_cuts=theta_cut_unop,
        alpha=ALPHA,
        background_radius=MAX_BG_RADIUS,
    )
    sensitivity_unop = calculate_sensitivity(signal_hist_unop,
                                             background_hist_unop,
                                             alpha=ALPHA)
    sensitivity_unop["flux_sensitivity"] = (
        sensitivity_unop["relative_sensitivity"] *
        spectrum(sensitivity_unop['reco_energy_center']))
    hdus.append(fits.BinTableHDU(sensitivity_unop, name="SENSITIVITY_UNOP"))

    log.info('Calculating IRFs')
    masks = {
        "": gammas["selected"],
        "_NO_CUTS": slice(None),
        "_ONLY_GH": gammas["selected_gh"],
        "_ONLY_THETA": gammas["selected_theta"],
    }

    # binnings for the irfs
    true_energy_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.8 * u.TeV,
                               10**2.41 * u.TeV,
                               bins_per_decade=10))
    reco_energy_bins = add_overflow_bins(
        create_bins_per_decade(10**-1.8 * u.TeV,
                               10**2.41 * u.TeV,
                               bins_per_decade=5))
    fov_offset_bins = [0, 0.5] * u.deg
    source_offset_bins = np.arange(0, 1 + 1e-4, 1e-3) * u.deg
    energy_migration_bins = np.geomspace(0.2, 5, 200)

    for label, mask in masks.items():
        effective_area = effective_area_per_energy(
            gammas[mask],
            particles["gamma"]["simulation_info"],
            true_energy_bins=true_energy_bins,
        )
        hdus.append(
            create_aeff2d_hdu(
                effective_area[...,
                               np.newaxis],  # add one dimension for FOV offset
                true_energy_bins,
                fov_offset_bins,
                extname="EFFECTIVE_AREA" + label,
            ))
        edisp = energy_dispersion(
            gammas[mask],
            true_energy_bins=true_energy_bins,
            fov_offset_bins=fov_offset_bins,
            migration_bins=energy_migration_bins,
        )
        hdus.append(
            create_energy_dispersion_hdu(
                edisp,
                true_energy_bins=true_energy_bins,
                migration_bins=energy_migration_bins,
                fov_offset_bins=fov_offset_bins,
                extname="ENERGY_DISPERSION" + label,
            ))

    bias_resolution = energy_bias_resolution(
        gammas[gammas["selected"]],
        true_energy_bins,
    )
    ang_res = angular_resolution(
        gammas[gammas["selected_gh"]],
        true_energy_bins,
    )
    psf = psf_table(
        gammas[gammas["selected_gh"]],
        true_energy_bins,
        fov_offset_bins=fov_offset_bins,
        source_offset_bins=source_offset_bins,
    )
    background_rate = background_2d(
        background[background['selected_gh']],
        reco_energy_bins,
        fov_offset_bins=np.arange(0, 11) * u.deg,
        t_obs=T_OBS,
    )

    hdus.append(
        create_background_2d_hdu(
            background_rate,
            reco_energy_bins,
            fov_offset_bins=np.arange(0, 11) * u.deg,
        ))
    hdus.append(
        create_psf_table_hdu(
            psf,
            true_energy_bins,
            source_offset_bins,
            fov_offset_bins,
        ))
    hdus.append(
        create_rad_max_hdu(theta_cuts_opt["cut"][:, np.newaxis], theta_bins,
                           fov_offset_bins))
    hdus.append(fits.BinTableHDU(ang_res, name="ANGULAR_RESOLUTION"))
    hdus.append(
        fits.BinTableHDU(bias_resolution, name="ENERGY_BIAS_RESOLUTION"))

    log.info('Writing outputfile')
    fits.HDUList(hdus).writeto(outputfile, overwrite=True)