Exemplo n.º 1
0
def test_convolve_nd():
    energy_axis = MapAxis.from_edges(np.logspace(-1.0, 1.0, 4),
                                     unit="TeV",
                                     name="energy_true")
    geom = WcsGeom.create(binsz=0.02 * u.deg,
                          width=4.0 * u.deg,
                          axes=[energy_axis])
    m = Map.from_geom(geom)
    m.fill_by_coord([[0.2, 0.4], [-0.1, 0.6], [0.5, 3.6]])

    # TODO : build EnergyDependentTablePSF programmatically rather than using CTA 1DC IRF
    filename = (
        "$GAMMAPY_DATA/cta-1dc/caldb/data/cta//1dc/bcf/South_z20_50h/irf_file.fits"
    )
    psf = EnergyDependentMultiGaussPSF.read(filename,
                                            hdu="POINT SPREAD FUNCTION")
    table_psf = psf.to_energy_dependent_table_psf(theta=0.5 * u.deg)

    psf_kernel = PSFKernel.from_table_psf(table_psf,
                                          geom,
                                          max_radius=1 * u.deg)

    assert psf_kernel.psf_kernel_map.data.shape == (3, 101, 101)

    mc = m.convolve(psf_kernel)
    assert_allclose(mc.data.sum(axis=(1, 2)), [0, 1, 1], atol=1e-5)

    kernel_2d = Box2DKernel(3, mode="center")
    kernel_2d.normalize("peak")
    mc = m.convolve(kernel_2d.array)
    assert_allclose(mc.data[0, :, :].sum(), 0, atol=1e-5)
    assert_allclose(mc.data[1, :, :].sum(), 9, atol=1e-5)
Exemplo n.º 2
0
def test_psf_kernel_to_image():
    sigma1 = 0.5 * u.deg
    sigma2 = 0.2 * u.deg
    binsz = 0.1 * u.deg

    axis = MapAxis.from_energy_bounds(1, 10, 2, unit="TeV", name="energy_true")
    geom = WcsGeom.create(binsz=binsz, npix=50, axes=[axis])

    rad = Angle(np.linspace(0.0, 1.5 * sigma1.to("deg").value, 100), "deg")
    table_psf1 = TablePSF.from_shape(shape="disk", width=sigma1, rad=rad)
    table_psf2 = TablePSF.from_shape(shape="disk", width=sigma2, rad=rad)

    kernel1 = PSFKernel.from_table_psf(table_psf1, geom)
    kernel2 = PSFKernel.from_table_psf(table_psf2, geom)

    kernel1.psf_kernel_map.data[1, :, :] = kernel2.psf_kernel_map.data[1, :, :]

    kernel_image_1 = kernel1.to_image()
    kernel_image_2 = kernel1.to_image(exposure=[1, 2])

    assert_allclose(kernel_image_1.psf_kernel_map.data.sum(), 1.0, atol=1e-5)
    assert_allclose(kernel_image_1.psf_kernel_map.data[0, 25, 25],
                    0.028415,
                    atol=1e-5)
    assert_allclose(kernel_image_1.psf_kernel_map.data[0, 22, 22],
                    0.009806,
                    atol=1e-5)
    assert_allclose(kernel_image_1.psf_kernel_map.data[0, 20, 20],
                    0.0,
                    atol=1e-5)

    assert_allclose(kernel_image_2.psf_kernel_map.data.sum(), 1.0, atol=1e-5)
    assert_allclose(kernel_image_2.psf_kernel_map.data[0, 25, 25],
                    0.03791383,
                    atol=1e-5)
    assert_allclose(kernel_image_2.psf_kernel_map.data[0, 22, 22],
                    0.0079069,
                    atol=1e-5)
    assert_allclose(kernel_image_2.psf_kernel_map.data[0, 20, 20],
                    0.0,
                    atol=1e-5)
Exemplo n.º 3
0
def test_table_psf_to_kernel_map():
    sigma = 0.5 * u.deg
    binsz = 0.1 * u.deg
    geom = WcsGeom.create(binsz=binsz, npix=150)

    rad = Angle(np.linspace(0.0, 3 * sigma.to("deg").value, 100), "deg")
    table_psf = TablePSF.from_shape(shape="gauss", width=sigma, rad=rad)
    kernel = PSFKernel.from_table_psf(table_psf, geom)
    kernel_array = kernel.psf_kernel_map.data

    # Is normalization OK?
    assert_allclose(kernel_array.sum(), 1.0, atol=1e-5)

    # maximum at the center of map?
    ind = np.unravel_index(np.argmax(kernel_array, axis=None), kernel_array.shape)
    # absolute tolerance at 0.5 because of even number of pixel here
    assert_allclose(ind, geom.center_pix, atol=0.5)
Exemplo n.º 4
0
"""Plot Fermi PSF."""
import matplotlib.pyplot as plt
from gammapy.irf import EnergyDependentTablePSF, PSFKernel
from gammapy.maps import WcsGeom

filename = "$GAMMAPY_DATA/tests/unbundled/fermi/psf.fits"
fermi_psf = EnergyDependentTablePSF.read(filename)

psf = fermi_psf.table_psf_at_energy(energy="1 GeV")
geom = WcsGeom.create(npix=100, binsz=0.01)
kernel = PSFKernel.from_table_psf(psf, geom)

plt.imshow(kernel.data)
plt.colorbar()
plt.show()
Exemplo n.º 5
0
    def run_region(self, kr, lon, lat, radius):
        #    TODO: for now we have to read/create the allsky maps each in each job
        #    because we can't pickle <functools._lru_cache_wrapper object
        #    send this back to init when fixed

        # exposure
        exposure_hpx = Map.read(
            "$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_exposure_cube_hpx.fits.gz"
        )
        exposure_hpx.unit = "cm2 s"

        # iem
        iem_filepath = BASE_PATH / "data" / "gll_iem_v06_extrapolated.fits"
        iem_fermi_extra = Map.read(iem_filepath)
        # norm=1.1, tilt=0.03 see paper appendix A
        model_iem = SkyDiffuseCube(
            iem_fermi_extra, norm=1.1, tilt=0.03, name="iem_extrapolated"
        )

        # ROI
        roi_time = time()
        ROI_pos = SkyCoord(lon, lat, frame="galactic", unit="deg")
        width = 2 * (radius + self.psf_margin)

        # Counts
        counts = Map.create(
            skydir=ROI_pos,
            width=width,
            proj="CAR",
            frame="galactic",
            binsz=1 / 8.0,
            axes=[self.energy_axis],
            dtype=float,
        )
        counts.fill_by_coord(
            {"skycoord": self.events.radec, "energy": self.events.energy}
        )

        axis = MapAxis.from_nodes(
            counts.geom.axes[0].center, name="energy_true", unit="GeV", interp="log"
        )
        wcs = counts.geom.wcs
        geom = WcsGeom(wcs=wcs, npix=counts.geom.npix, axes=[axis])
        coords = geom.get_coord()
        # expo
        data = exposure_hpx.interp_by_coord(coords)
        exposure = WcsNDMap(geom, data, unit=exposure_hpx.unit, dtype=float)

        # read PSF
        psf_kernel = PSFKernel.from_table_psf(
            self.psf, geom, max_radius=self.psf_margin * u.deg
        )

        # Energy Dispersion
        e_true = exposure.geom.axes[0].edges
        e_reco = counts.geom.axes[0].edges
        edisp = EDispKernel.from_diagonal_response(e_true=e_true, e_reco=e_reco)

        # fit mask
        if coords["lon"].min() < 90 * u.deg and coords["lon"].max() > 270 * u.deg:
            coords["lon"][coords["lon"].value > 180] -= 360 * u.deg
        mask = (
            (coords["lon"] >= coords["lon"].min() + self.psf_margin * u.deg)
            & (coords["lon"] <= coords["lon"].max() - self.psf_margin * u.deg)
            & (coords["lat"] >= coords["lat"].min() + self.psf_margin * u.deg)
            & (coords["lat"] <= coords["lat"].max() - self.psf_margin * u.deg)
        )
        mask_fermi = WcsNDMap(counts.geom, mask)

        # IEM
        eval_iem = MapEvaluator(
            model=model_iem, exposure=exposure, psf=psf_kernel, edisp=edisp
        )
        bkg_iem = eval_iem.compute_npred()

        # ISO
        eval_iso = MapEvaluator(model=self.model_iso, exposure=exposure, edisp=edisp)
        bkg_iso = eval_iso.compute_npred()

        # merge iem and iso, only one local normalization is fitted
        dataset_name = "3FHL_ROI_num" + str(kr)
        background_total = bkg_iem + bkg_iso
        background_model = BackgroundModel(
            background_total, name="bkg_iem+iso", datasets_names=[dataset_name]
        )
        background_model.parameters["norm"].min = 0.0

        # Sources model
        in_roi = self.FHL3.positions.galactic.contained_by(wcs)
        FHL3_roi = []
        for ks in range(len(self.FHL3.table)):
            if in_roi[ks] == True:
                model = self.FHL3[ks].sky_model()
                model.spatial_model.parameters.freeze_all()  # freeze spatial
                model.spectral_model.parameters["amplitude"].min = 0.0
                if isinstance(model.spectral_model, PowerLawSpectralModel):
                    model.spectral_model.parameters["index"].min = 0.1
                    model.spectral_model.parameters["index"].max = 10.0
                else:
                    model.spectral_model.parameters["alpha"].min = 0.1
                    model.spectral_model.parameters["alpha"].max = 10.0

                FHL3_roi.append(model)
        model_total = Models([background_model] + FHL3_roi)

        # Dataset
        dataset = MapDataset(
            models=model_total,
            counts=counts,
            exposure=exposure,
            psf=psf_kernel,
            edisp=edisp,
            mask_fit=mask_fermi,
            name=dataset_name,
        )
        cat_stat = dataset.stat_sum()

        datasets = Datasets([dataset])
        fit = Fit(datasets)
        results = fit.run(**self.optimize_opts)
        print("ROI_num", str(kr), "\n", results)
        fit_stat = datasets.stat_sum()

        if results.message != "Optimization failed.":
            datasets.write(path=Path(self.resdir), prefix=dataset.name, overwrite=True)
            np.savez(
                self.resdir / f"3FHL_ROI_num{kr}_fit_infos.npz",
                message=results.message,
                stat=[cat_stat, fit_stat],
            )

            exec_time = time() - roi_time
            print("ROI", kr, " time (s): ", exec_time)

            for model in FHL3_roi:
                if (
                    self.FHL3[model.name].data["ROI_num"] == kr
                    and self.FHL3[model.name].data["Signif_Avg"] >= self.sig_cut
                ):
                    flux_points = FluxPointsEstimator(
                        e_edges=self.El_flux, source=model.name, n_sigma_ul=2,
                    ).run(datasets=datasets)
                    filename = self.resdir / f"{model.name}_flux_points.fits"
                    flux_points.write(filename, overwrite=True)

            exec_time = time() - roi_time - exec_time
            print("ROI", kr, " Flux points time (s): ", exec_time)