def test_psf_kernel_to_image(): sigma1 = 0.5 * u.deg sigma2 = 0.2 * u.deg binsz = 0.1 * u.deg axis = MapAxis.from_energy_bounds(1, 10, 2, unit="TeV", name="energy_true") geom = WcsGeom.create(binsz=binsz, npix=50, axes=[axis]) disk_1 = DiskSpatialModel(r_0=sigma1) disk_2 = DiskSpatialModel(r_0=sigma2) rad_max = 2.5 * u.deg kernel1 = PSFKernel.from_spatial_model(disk_1, geom, max_radius=rad_max, factor=4) kernel2 = PSFKernel.from_spatial_model(disk_2, geom, max_radius=rad_max, factor=4) kernel1.psf_kernel_map.data[1, :, :] = kernel2.psf_kernel_map.data[1, :, :] kernel_image_1 = kernel1.to_image() kernel_image_2 = kernel1.to_image(exposure=[1, 2]) assert_allclose(kernel_image_1.psf_kernel_map.data.sum(), 1.0, atol=1e-5) assert_allclose(kernel_image_1.psf_kernel_map.data[0, 25, 25], 0.02844, atol=1e-5) assert_allclose(kernel_image_1.psf_kernel_map.data[0, 22, 22], 0.009636, atol=1e-5) assert_allclose(kernel_image_1.psf_kernel_map.data[0, 20, 20], 0.0, atol=1e-5) assert_allclose(kernel_image_2.psf_kernel_map.data.sum(), 1.0, atol=1e-5) assert_allclose( kernel_image_2.psf_kernel_map.data[0, 25, 25], 0.038091, atol=1e-5 ) assert_allclose(kernel_image_2.psf_kernel_map.data[0, 22, 22], 0.00777, atol=1e-5) assert_allclose(kernel_image_2.psf_kernel_map.data[0, 20, 20], 0.0, atol=1e-5)
def test_compute_flux_spatial(): center = SkyCoord("0 deg", "0 deg", frame="galactic") region = CircleSkyRegion(center=center, radius=0.1 * u.deg) nbin = 2 energy_axis_true = MapAxis.from_energy_bounds(".1 TeV", "10 TeV", nbin=nbin, name="energy_true") spectral_model = ConstantSpectralModel() spatial_model = PointSpatialModel(lon_0=0 * u.deg, lat_0=0 * u.deg, frame="galactic") models = SkyModel(spectral_model=spectral_model, spatial_model=spatial_model) model = Models(models) exposure_region = RegionNDMap.create(region, axes=[energy_axis_true], binsz_wcs="0.01deg") exposure_region.data += 1.0 exposure_region.unit = "m2 s" geom = RegionGeom(region, axes=[energy_axis_true], binsz_wcs="0.01deg") psf = PSFKernel.from_gauss(geom.to_wcs_geom(), sigma="0.1 deg") evaluator = MapEvaluator(model=model[0], exposure=exposure_region, psf=psf) flux = evaluator.compute_flux_spatial() g = Gauss2DPDF(0.1) reference = g.containment_fraction(0.1) assert_allclose(flux.value, reference, rtol=0.003)
def test_convolve_nd(): energy_axis = MapAxis.from_edges(np.logspace(-1.0, 1.0, 4), unit="TeV", name="energy_true") geom = WcsGeom.create(binsz=0.02 * u.deg, width=4.0 * u.deg, axes=[energy_axis]) m = Map.from_geom(geom) m.fill_by_coord([[0.2, 0.4], [-0.1, 0.6], [0.5, 3.6]]) # TODO : build EnergyDependentTablePSF programmatically rather than using CTA 1DC IRF filename = ( "$GAMMAPY_DATA/cta-1dc/caldb/data/cta//1dc/bcf/South_z20_50h/irf_file.fits" ) psf = EnergyDependentMultiGaussPSF.read(filename, hdu="POINT SPREAD FUNCTION") table_psf = psf.to_energy_dependent_table_psf(theta=0.5 * u.deg) psf_kernel = PSFKernel.from_table_psf(table_psf, geom, max_radius=1 * u.deg) assert psf_kernel.psf_kernel_map.data.shape == (3, 101, 101) mc = m.convolve(psf_kernel) assert_allclose(mc.data.sum(axis=(1, 2)), [0, 1, 1], atol=1e-5) kernel_2d = Box2DKernel(3, mode="center") kernel_2d.normalize("peak") mc = m.convolve(kernel_2d.array) assert_allclose(mc.data[0, :, :].sum(), 0, atol=1e-5) assert_allclose(mc.data[1, :, :].sum(), 9, atol=1e-5)
def test_psf_kernel_from_gauss_read_write(tmp_path): sigma = 0.5 * u.deg binsz = 0.1 * u.deg geom = WcsGeom.create(binsz=binsz, npix=150, axes=[MapAxis((0, 1, 2))]) kernel = PSFKernel.from_gauss(geom, sigma) # Check that both maps are identical assert_allclose(kernel.psf_kernel_map.data[0], kernel.psf_kernel_map.data[1]) # Is there an odd number of pixels assert_allclose(np.array(kernel.psf_kernel_map.geom.npix) % 2, 1) kernel.write(tmp_path / "tmp.fits", overwrite=True) kernel2 = PSFKernel.read(tmp_path / "tmp.fits") assert_allclose(kernel.psf_kernel_map.data, kernel2.psf_kernel_map.data)
def test_convolve_pixel_scale_error(): m = WcsNDMap.create(binsz=0.05 * u.deg, width=5 * u.deg) kgeom = WcsGeom.create(binsz=0.04 * u.deg, width=0.5 * u.deg) kernel = PSFKernel.from_gauss(kgeom, sigma=0.1 * u.deg, max_radius=1.5 * u.deg) with pytest.raises(ValueError): m.convolve(kernel)
def test_convolve_kernel_size_error(): axis_1 = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=2) axis_2 = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=3) m = WcsNDMap.create(binsz=0.05 * u.deg, width=5 * u.deg, axes=[axis_1]) kgeom = WcsGeom.create(binsz=0.05 * u.deg, width=0.5 * u.deg, axes=[axis_2]) kernel = PSFKernel.from_gauss(kgeom, sigma=0.1 * u.deg, max_radius=1.5 * u.deg) with pytest.raises(ValueError): m.convolve(kernel)
def test_psf_kernel_to_image(): sigma1 = 0.5 * u.deg sigma2 = 0.2 * u.deg binsz = 0.1 * u.deg axis = MapAxis.from_energy_bounds(1, 10, 2, unit="TeV", name="energy_true") geom = WcsGeom.create(binsz=binsz, npix=50, axes=[axis]) rad = Angle(np.linspace(0.0, 1.5 * sigma1.to("deg").value, 100), "deg") table_psf1 = TablePSF.from_shape(shape="disk", width=sigma1, rad=rad) table_psf2 = TablePSF.from_shape(shape="disk", width=sigma2, rad=rad) kernel1 = PSFKernel.from_table_psf(table_psf1, geom) kernel2 = PSFKernel.from_table_psf(table_psf2, geom) kernel1.psf_kernel_map.data[1, :, :] = kernel2.psf_kernel_map.data[1, :, :] kernel_image_1 = kernel1.to_image() kernel_image_2 = kernel1.to_image(exposure=[1, 2]) assert_allclose(kernel_image_1.psf_kernel_map.data.sum(), 1.0, atol=1e-5) assert_allclose(kernel_image_1.psf_kernel_map.data[0, 25, 25], 0.028415, atol=1e-5) assert_allclose(kernel_image_1.psf_kernel_map.data[0, 22, 22], 0.009806, atol=1e-5) assert_allclose(kernel_image_1.psf_kernel_map.data[0, 20, 20], 0.0, atol=1e-5) assert_allclose(kernel_image_2.psf_kernel_map.data.sum(), 1.0, atol=1e-5) assert_allclose(kernel_image_2.psf_kernel_map.data[0, 25, 25], 0.03791383, atol=1e-5) assert_allclose(kernel_image_2.psf_kernel_map.data[0, 22, 22], 0.0079069, atol=1e-5) assert_allclose(kernel_image_2.psf_kernel_map.data[0, 20, 20], 0.0, atol=1e-5)
def test_convolve_wcs(nest): energy = MapAxis.from_bounds(1, 100, unit='TeV', nbin=2, name='energy') nside = 256 hpx_geom = HpxGeom.create(nside=nside, axes=[energy], region='DISK(0,0,2.5)', nest=nest) hpx_map = Map.from_geom(hpx_geom) hpx_map.set_by_coord((0, 0, [2, 90]), 1) wcs_geom = WcsGeom.create(width=5, binsz=0.04, axes=[energy]) kernel = PSFKernel.from_gauss(wcs_geom, 0.4 * u.deg) convolved_map = hpx_map.convolve_wcs(kernel) assert_allclose(convolved_map.data.sum(), 2, rtol=0.001)
def test_table_psf_to_kernel_map(): sigma = 0.5 * u.deg binsz = 0.1 * u.deg geom = WcsGeom.create(binsz=binsz, npix=150) rad = Angle(np.linspace(0.0, 3 * sigma.to("deg").value, 100), "deg") table_psf = TablePSF.from_shape(shape="gauss", width=sigma, rad=rad) kernel = PSFKernel.from_table_psf(table_psf, geom) kernel_array = kernel.psf_kernel_map.data # Is normalization OK? assert_allclose(kernel_array.sum(), 1.0, atol=1e-5) # maximum at the center of map? ind = np.unravel_index(np.argmax(kernel_array, axis=None), kernel_array.shape) # absolute tolerance at 0.5 because of even number of pixel here assert_allclose(ind, geom.center_pix, atol=0.5)
def test_large_oversampling(): nbin = 2 energy_axis_true = MapAxis.from_energy_bounds(".1 TeV", "10 TeV", nbin=nbin, name="energy_true") geom = WcsGeom.create(width=1, binsz=0.02, axes=[energy_axis_true]) spectral_model = ConstantSpectralModel() spatial_model = GaussianSpatialModel(lon_0=0 * u.deg, lat_0=0 * u.deg, sigma=1e-4 * u.deg, frame="icrs") models = SkyModel(spectral_model=spectral_model, spatial_model=spatial_model) model = Models(models) exposure = Map.from_geom(geom, unit="m2 s") exposure.data += 1.0 psf = PSFKernel.from_gauss(geom, sigma="0.1 deg") evaluator = MapEvaluator(model=model[0], exposure=exposure, psf=psf) flux_1 = evaluator.compute_flux_spatial() spatial_model.sigma.value = 0.001 flux_2 = evaluator.compute_flux_spatial() spatial_model.sigma.value = 0.01 flux_3 = evaluator.compute_flux_spatial() spatial_model.sigma.value = 0.03 flux_4 = evaluator.compute_flux_spatial() assert_allclose(flux_1.data.sum(), nbin, rtol=1e-4) assert_allclose(flux_2.data.sum(), nbin, rtol=1e-4) assert_allclose(flux_3.data.sum(), nbin, rtol=1e-4) assert_allclose(flux_4.data.sum(), nbin, rtol=1e-4)
def test_compute_npred_sign(): center = SkyCoord("0 deg", "0 deg", frame="galactic") energy_axis_true = MapAxis.from_energy_bounds(".1 TeV", "10 TeV", nbin=2, name="energy_true") geom = WcsGeom.create(skydir=center, width=1 * u.deg, axes=[energy_axis_true], frame='galactic', binsz=0.2 * u.deg) spectral_model_pos = PowerLawSpectralModel(index=2, amplitude="1e-11 TeV-1 s-1 m-2") spectral_model_neg = PowerLawSpectralModel( index=2, amplitude="-1e-11 TeV-1 s-1 m-2") spatial_model = PointSpatialModel(lon_0=0 * u.deg, lat_0=0 * u.deg, frame="galactic") model_pos = SkyModel(spectral_model=spectral_model_pos, spatial_model=spatial_model) model_neg = SkyModel(spectral_model=spectral_model_neg, spatial_model=spatial_model) exposure = Map.from_geom(geom, unit="m2 s") exposure.data += 1.0 psf = PSFKernel.from_gauss(geom, sigma="0.1 deg") evaluator_pos = MapEvaluator(model=model_pos, exposure=exposure, psf=psf) evaluator_neg = MapEvaluator(model=model_neg, exposure=exposure, psf=psf) npred_pos = evaluator_pos.compute_npred() npred_neg = evaluator_neg.compute_npred() assert (npred_pos.data == -npred_neg.data).all() assert np.all(npred_pos.data >= 0) assert np.all(npred_neg.data <= 0)
def psf(geom_true): sigma = 0.5 * u.deg return PSFKernel.from_gauss(geom_true, sigma)
"""Plot Fermi PSF.""" import matplotlib.pyplot as plt from gammapy.irf import EnergyDependentTablePSF, PSFKernel from gammapy.maps import WcsGeom filename = "$GAMMAPY_DATA/tests/unbundled/fermi/psf.fits" fermi_psf = EnergyDependentTablePSF.read(filename) psf = fermi_psf.table_psf_at_energy(energy="1 GeV") geom = WcsGeom.create(npix=100, binsz=0.01) kernel = PSFKernel.from_table_psf(psf, geom) plt.imshow(kernel.data) plt.colorbar() plt.show()
def run_region(self, kr, lon, lat, radius): # TODO: for now we have to read/create the allsky maps each in each job # because we can't pickle <functools._lru_cache_wrapper object # send this back to init when fixed # exposure exposure_hpx = Map.read( "$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_exposure_cube_hpx.fits.gz" ) exposure_hpx.unit = "cm2 s" # iem iem_filepath = BASE_PATH / "data" / "gll_iem_v06_extrapolated.fits" iem_fermi_extra = Map.read(iem_filepath) # norm=1.1, tilt=0.03 see paper appendix A model_iem = SkyDiffuseCube( iem_fermi_extra, norm=1.1, tilt=0.03, name="iem_extrapolated" ) # ROI roi_time = time() ROI_pos = SkyCoord(lon, lat, frame="galactic", unit="deg") width = 2 * (radius + self.psf_margin) # Counts counts = Map.create( skydir=ROI_pos, width=width, proj="CAR", frame="galactic", binsz=1 / 8.0, axes=[self.energy_axis], dtype=float, ) counts.fill_by_coord( {"skycoord": self.events.radec, "energy": self.events.energy} ) axis = MapAxis.from_nodes( counts.geom.axes[0].center, name="energy_true", unit="GeV", interp="log" ) wcs = counts.geom.wcs geom = WcsGeom(wcs=wcs, npix=counts.geom.npix, axes=[axis]) coords = geom.get_coord() # expo data = exposure_hpx.interp_by_coord(coords) exposure = WcsNDMap(geom, data, unit=exposure_hpx.unit, dtype=float) # read PSF psf_kernel = PSFKernel.from_table_psf( self.psf, geom, max_radius=self.psf_margin * u.deg ) # Energy Dispersion e_true = exposure.geom.axes[0].edges e_reco = counts.geom.axes[0].edges edisp = EDispKernel.from_diagonal_response(e_true=e_true, e_reco=e_reco) # fit mask if coords["lon"].min() < 90 * u.deg and coords["lon"].max() > 270 * u.deg: coords["lon"][coords["lon"].value > 180] -= 360 * u.deg mask = ( (coords["lon"] >= coords["lon"].min() + self.psf_margin * u.deg) & (coords["lon"] <= coords["lon"].max() - self.psf_margin * u.deg) & (coords["lat"] >= coords["lat"].min() + self.psf_margin * u.deg) & (coords["lat"] <= coords["lat"].max() - self.psf_margin * u.deg) ) mask_fermi = WcsNDMap(counts.geom, mask) # IEM eval_iem = MapEvaluator( model=model_iem, exposure=exposure, psf=psf_kernel, edisp=edisp ) bkg_iem = eval_iem.compute_npred() # ISO eval_iso = MapEvaluator(model=self.model_iso, exposure=exposure, edisp=edisp) bkg_iso = eval_iso.compute_npred() # merge iem and iso, only one local normalization is fitted dataset_name = "3FHL_ROI_num" + str(kr) background_total = bkg_iem + bkg_iso background_model = BackgroundModel( background_total, name="bkg_iem+iso", datasets_names=[dataset_name] ) background_model.parameters["norm"].min = 0.0 # Sources model in_roi = self.FHL3.positions.galactic.contained_by(wcs) FHL3_roi = [] for ks in range(len(self.FHL3.table)): if in_roi[ks] == True: model = self.FHL3[ks].sky_model() model.spatial_model.parameters.freeze_all() # freeze spatial model.spectral_model.parameters["amplitude"].min = 0.0 if isinstance(model.spectral_model, PowerLawSpectralModel): model.spectral_model.parameters["index"].min = 0.1 model.spectral_model.parameters["index"].max = 10.0 else: model.spectral_model.parameters["alpha"].min = 0.1 model.spectral_model.parameters["alpha"].max = 10.0 FHL3_roi.append(model) model_total = Models([background_model] + FHL3_roi) # Dataset dataset = MapDataset( models=model_total, counts=counts, exposure=exposure, psf=psf_kernel, edisp=edisp, mask_fit=mask_fermi, name=dataset_name, ) cat_stat = dataset.stat_sum() datasets = Datasets([dataset]) fit = Fit(datasets) results = fit.run(**self.optimize_opts) print("ROI_num", str(kr), "\n", results) fit_stat = datasets.stat_sum() if results.message != "Optimization failed.": datasets.write(path=Path(self.resdir), prefix=dataset.name, overwrite=True) np.savez( self.resdir / f"3FHL_ROI_num{kr}_fit_infos.npz", message=results.message, stat=[cat_stat, fit_stat], ) exec_time = time() - roi_time print("ROI", kr, " time (s): ", exec_time) for model in FHL3_roi: if ( self.FHL3[model.name].data["ROI_num"] == kr and self.FHL3[model.name].data["Signif_Avg"] >= self.sig_cut ): flux_points = FluxPointsEstimator( e_edges=self.El_flux, source=model.name, n_sigma_ul=2, ).run(datasets=datasets) filename = self.resdir / f"{model.name}_flux_points.fits" flux_points.write(filename, overwrite=True) exec_time = time() - roi_time - exec_time print("ROI", kr, " Flux points time (s): ", exec_time)