def make_map_dataset(observations, target_pos, geom, geom_true, offset_max=2 * u.deg): maker = MapMaker(geom, offset_max, geom_true=geom_true) maps = maker.run(observations) table_psf = make_mean_psf(observations, target_pos) # PSF kernel used for the model convolution psf_kernel = PSFKernel.from_table_psf(table_psf, geom_true, max_radius="0.3 deg") edisp = make_mean_edisp( observations, target_pos, e_true=geom_true.axes[0].edges, e_reco=geom.axes[0].edges, ) background_model = BackgroundModel(maps["background"]) background_model.parameters["norm"].frozen = False background_model.parameters["tilt"].frozen = True dataset = MapDataset( counts=maps["counts"], exposure=maps["exposure"], background_model=background_model, psf=psf_kernel, edisp=edisp, ) return dataset
def test_convolve_nd(): energy_axis = MapAxis.from_edges(np.logspace(-1.0, 1.0, 4), unit="TeV", name="energy") geom = WcsGeom.create(binsz=0.02 * u.deg, width=4.0 * u.deg, axes=[energy_axis]) m = Map.from_geom(geom) m.fill_by_coord([[0.2, 0.4], [-0.1, 0.6], [0.5, 3.6]]) # TODO : build EnergyDependentTablePSF programmatically rather than using CTA 1DC IRF filename = ( "$GAMMAPY_DATA/cta-1dc/caldb/data/cta//1dc/bcf/South_z20_50h/irf_file.fits" ) psf = EnergyDependentMultiGaussPSF.read(filename, hdu="POINT SPREAD FUNCTION") table_psf = psf.to_energy_dependent_table_psf(theta=0.5 * u.deg) psf_kernel = PSFKernel.from_table_psf(table_psf, geom, max_radius=1 * u.deg) assert psf_kernel.psf_kernel_map.data.shape == (3, 101, 101) mc = m.convolve(psf_kernel) assert_allclose(mc.data.sum(axis=(1, 2)), [0, 1, 1], atol=1e-5)
def test_psf_kernel_from_gauss_read_write(tmp_path): sigma = 0.5 * u.deg binsz = 0.1 * u.deg geom = WcsGeom.create(binsz=binsz, npix=150, axes=[MapAxis((0, 1, 2))]) kernel = PSFKernel.from_gauss(geom, sigma) # Check that both maps are identical assert_allclose(kernel.psf_kernel_map.data[0], kernel.psf_kernel_map.data[1]) # Is there an odd number of pixels assert_allclose(np.array(kernel.psf_kernel_map.geom.npix) % 2, 1) kernel.write(tmp_path / "tmp.fits", overwrite=True) kernel2 = PSFKernel.read(tmp_path / "tmp.fits") assert_allclose(kernel.psf_kernel_map.data, kernel2.psf_kernel_map.data)
def get_psf(geom_etrue): filename = ( "$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits" ) psf = EnergyDependentMultiGaussPSF.read(filename, hdu="POINT SPREAD FUNCTION") table_psf = psf.to_energy_dependent_table_psf(theta=0.5 * u.deg) psf_kernel = PSFKernel.from_table_psf(table_psf, geom_etrue, max_radius=0.5 * u.deg) return psf_kernel
def test_psf_kernel_from_gauss_read_write(tmpdir): sigma = 0.5 * u.deg binsz = 0.1 * u.deg geom = WcsGeom.create(binsz=binsz, npix=150, axes=[MapAxis((0, 1, 2))]) kernel = PSFKernel.from_gauss(geom, sigma) # Check that both maps are identical assert_allclose(kernel.psf_kernel_map.data[0], kernel.psf_kernel_map.data[1]) # Is there an odd number of pixels assert_allclose(np.array(kernel.psf_kernel_map.geom.npix) % 2, 1) filename = str(tmpdir / "test_kernel.fits") # Test read and write kernel.write(filename, overwrite=True) newkernel = PSFKernel.read(filename) assert_allclose(kernel.psf_kernel_map.data, newkernel.psf_kernel_map.data)
def test_convolve_pixel_scale_error(): m = WcsNDMap.create(binsz=0.05 * u.deg, width=5 * u.deg) kgeom = WcsGeom.create(binsz=0.04 * u.deg, width=0.5 * u.deg) kernel = PSFKernel.from_gauss(kgeom, sigma=0.1 * u.deg, max_radius=1.5 * u.deg) with pytest.raises(ValueError): m.convolve(kernel)
def test_convolve_pixel_scale_error(): m = WcsNDMap.create(binsz=0.05 * u.deg, width=5 * u.deg) kgeom = WcsGeom.create(binsz=0.04 * u.deg, width=0.5 * u.deg) kernel = PSFKernel.from_gauss(kgeom, sigma=0.1 * u.deg, max_radius=1.5 * u.deg) with pytest.raises(ValueError) as excinfo: m.convolve(kernel) assert "Pixel size of kernel and map not compatible." == str(excinfo.value)
def test_make_image(): energy_axis = MapAxis.from_edges([100, 200, 1000], name="energy", unit="TeV", interp="log") geom = WcsGeom.create(binsz=0.1, npix=20, axes=[energy_axis]) exposures = np.ones(geom.shape_axes) sigma = 0.5 * u.deg kernel = PSFKernel.from_gauss(geom, sigma) psf2D = kernel.make_image(exposures=exposures) assert_allclose(psf2D.psf_kernel_map.data.sum(), 1.0, rtol=1e-3) assert_allclose(psf2D.data[10, 14], 0.0038, rtol=1e-2)
def test_table_psf_to_kernel_map(): sigma = 0.5 * u.deg binsz = 0.1 * u.deg geom = WcsGeom.create(binsz=binsz, npix=150) rad = Angle(np.linspace(0.0, 3 * sigma.to("deg").value, 100), "deg") table_psf = TablePSF.from_shape(shape="gauss", width=sigma, rad=rad) kernel = PSFKernel.from_table_psf(table_psf, geom) kernel_array = kernel.psf_kernel_map.data # Is normalization OK? assert_allclose(kernel_array.sum(), 1.0, atol=1e-5) # maximum at the center of map? ind = np.unravel_index(np.argmax(kernel_array, axis=None), kernel_array.shape) # absolute tolerance at 0.5 because of even number of pixel here assert_allclose(ind, geom.center_pix, atol=0.5)
def make_irfs(): log.info("Executing make_irfs") observations = get_observations() #mean PSF src_pos = config.target_skycoord table_psf = observations.make_mean_psf(src_pos) # PSF kernel used for the model convolution psf_kernel = PSFKernel.from_table_psf(table_psf, geom=config.map_geom, max_radius="0.5 deg") # define energy grid energy = config.energy_axis.edges * config.energy_axis.unit # mean edisp edisp = observations.make_mean_edisp(position=src_pos, e_true=energy, e_reco=energy) log.info("Writing psf.fits.gz and edisp.fits.gz") psf_kernel.write("psf.fits.gz",overwrite=True) edisp.write("edisp.fits.gz",overwrite=True)
def get_psf_kernel(self, position, geom, max_radius=None, factor=4): """Returns a PSF kernel at the given position. The PSF is returned in the form a WcsNDMap defined by the input MapGeom. Parameters ---------- position : `~astropy.coordinates.SkyCoord` the target position. Should be a single coordinate geom : `~gammapy.maps.MapGeom` the target geometry to use max_radius : `~astropy.coordinates.Angle` maximum angular size of the kernel map factor : int oversampling factor to compute the PSF Returns ------- kernel : `~gammapy.cube.PSFKernel` the resulting kernel """ table_psf = self.get_energy_dependent_table_psf(position) return PSFKernel.from_table_psf(table_psf, geom, max_radius, factor)
get_ipython().run_cell_magic('time', '', 'maker = MapMaker(geom, offset_max=4.0 * u.deg)\nmaps = maker.run(observations)') # ### Making a PSF Map # # Make a PSF map and weigh it with the exposure at the source position to get a 2D PSF # In[ ]: # mean PSF src_pos = SkyCoord(0, 0, unit="deg", frame="galactic") table_psf = make_mean_psf(observations, src_pos) # PSF kernel used for the model convolution psf_kernel = PSFKernel.from_table_psf(table_psf, geom, max_radius="0.3 deg") # get the exposure at the source position exposure_at_pos = maps["exposure"].get_by_coord( { "lon": src_pos.l.value, "lat": src_pos.b.value, "energy": energy_axis.center, } ) # now compute the 2D PSF psf2D = psf_kernel.make_image(exposures=exposure_at_pos) # ### Make 2D images from 3D ones
# In[ ]: counts = Map.read("$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-counts.fits.gz") background = Map.read( "$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-background.fits.gz" ) exposure = Map.read( "$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-exposure.fits.gz" ) maps = {"counts": counts, "background": background, "exposure": exposure} kernel = PSFKernel.read( "$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-psf.fits.gz" ) # In[ ]: get_ipython().run_cell_magic('time', '', 'estimator = TSMapEstimator()\nimages = estimator.run(maps, kernel.data)') # ## Plot images # In[ ]: plt.figure(figsize=(15, 5))
def simulate_dataset( skymodel, geom, pointing, irfs, livetime=1 * u.h, offset=0 * u.deg, max_radius=0.8 * u.deg, random_state="random-seed", ): """Simulate a 3D dataset. Simulate a source defined with a sky model for a given pointing, geometry and irfs for a given exposure time. This will return a dataset object which includes the counts cube, the exposure cube, the psf cube, the background model and the sky model. Parameters ---------- skymodel : `~gammapy.modeling.models.SkyModel` Background model map geom : `~gammapy.maps.WcsGeom` Geometry object for the observation pointing : `~astropy.coordinates.SkyCoord` Pointing position irfs : dict Irfs used for simulating the observation livetime : `~astropy.units.Quantity` Livetime exposure of the simulated observation offset : `~astropy.units.Quantity` Offset from the center of the pointing position. This is used for the PSF and Edisp estimation max_radius : `~astropy.coordinates.Angle` The maximum radius of the PSF kernel. random_state: {int, 'random-seed', 'global-rng', `~numpy.random.RandomState`} Defines random number generator initialisation. Returns ------- dataset : `~gammapy.cube.MapDataset` A dataset of the simulated observation. """ background = make_map_background_irf( pointing=pointing, ontime=livetime, bkg=irfs["bkg"], geom=geom ) background_model = BackgroundModel(background) psf = irfs["psf"].to_energy_dependent_table_psf(theta=offset) psf_kernel = PSFKernel.from_table_psf(psf, geom, max_radius=max_radius) exposure = make_map_exposure_true_energy( pointing=pointing, livetime=livetime, aeff=irfs["aeff"], geom=geom ) if "edisp" in irfs: energy = geom.axes[0].edges edisp = irfs["edisp"].to_energy_dispersion(offset, e_reco=energy, e_true=energy) else: edisp = None dataset = MapDataset( model=skymodel, exposure=exposure, background_model=background_model, psf=psf_kernel, edisp=edisp, ) npred_map = dataset.npred() rng = get_random_state(random_state) counts = rng.poisson(npred_map.data) dataset.counts = WcsNDMap(geom, counts) return dataset
) # ### Prepare IRFs # PSF and Edisp are estimated for each observation at a specific source position defined by `src_pos`: # # In[ ]: # define energy grid for edisp energy = energy_axis.edges * energy_axis.unit # In[ ]: for obs in observations: table_psf = make_psf(obs, src_pos) psf = PSFKernel.from_table_psf(table_psf, geom, max_radius="0.5 deg") observations_data[obs.obs_id]["psf"] = psf # create Edisp offset = src_pos.separation(obs.pointing_radec) edisp = obs.edisp.to_energy_dispersion(offset, e_true=energy, e_reco=energy) observations_data[obs.obs_id]["edisp"] = edisp # Save maps as well as IRFs to disk: # In[ ]: for obs_id in obs_ids: path = Path("analysis_3d_joint") / "obs_{}".format(obs_id)
psf_at_energy.plot_psf_vs_rad(label="PSF @ {:.0f}".format(energy), lw=2) erange = [50, 2000] * u.GeV psf_mean = psf.table_psf_in_energy_band(energy_band=erange, spectral_index=2.3) psf_mean.plot_psf_vs_rad(label="PSF Mean", lw=4, c="k", ls="--") plt.xlim(1e-3, 0.3) plt.ylim(1e3, 1e6) plt.legend(); # In[ ]: # Let's compute a PSF kernel matching the pixel size of our map psf_kernel = PSFKernel.from_table_psf(psf, counts.geom, max_radius="0.5 deg") # In[ ]: psf_kernel.psf_kernel_map.sum_over_axes().plot(stretch="log", add_cbar=True); # ## Background # # Let's compute a background cube, with predicted number of background events per pixel from the diffuse Galactic and isotropic model components. For this, we use the use the [gammapy.cube.MapEvaluator](http://docs.gammapy.org/dev/api/gammapy.cube.MapEvaluator.html) to multiply with the exposure and apply the PSF. The Fermi-LAT energy dispersion at high energies is small, we neglect it here. # In[ ]:
"""Plot Fermi PSF.""" import matplotlib.pyplot as plt from gammapy.datasets import FermiGalacticCenter from gammapy.irf import EnergyDependentTablePSF from gammapy.maps import WcsGeom from gammapy.cube import PSFKernel filename = FermiGalacticCenter.filenames()['psf'] fermi_psf = EnergyDependentTablePSF.read(filename) psf = fermi_psf.table_psf_at_energy(energy='1 GeV') geom = WcsGeom.create(npix=100, binsz=0.01) kernel = PSFKernel.from_table_psf(psf, geom) plt.imshow(kernel.data) plt.colorbar() plt.show()
}).plot(add_cbar=True) # In[ ]: background = make_map_background_irf(pointing=pointing, ontime=livetime, bkg=irfs["bkg"], geom=geom) background.slice_by_idx({ "energy": 3 }).plot(add_cbar=True) # In[ ]: psf = irfs["psf"].to_energy_dependent_table_psf(theta=offset) psf_kernel = PSFKernel.from_table_psf(psf, geom, max_radius=0.3 * u.deg) psf_kernel.psf_kernel_map.sum_over_axes().plot(stretch="log") # In[ ]: energy = axis.edges edisp = irfs["edisp"].to_energy_dispersion(offset, e_reco=energy, e_true=energy) edisp.plot_matrix() # Now we have to compute `npred` maps, i.e. "predicted counts per pixel" given the model and the observation infos: exposure, background, PSF and EDISP. For this we use the `MapDataset` object: # In[ ]: background_model = BackgroundModel(background)
vmax=1, add_cbar=True, ax=axs[1]) # ### Prepare IRFs # # To estimate the mean PSF across all observations at a given source position `src_pos`, we use `make_mean_psf()`: # In[ ]: # mean PSF src_pos = SkyCoord(0, 0, unit="deg", frame="galactic") table_psf = make_mean_psf(observations, src_pos) # PSF kernel used for the model convolution psf_kernel = PSFKernel.from_table_psf(table_psf, geom, max_radius="0.3 deg") # To estimate the mean energy dispersion across all observations at a given source position `src_pos`, we use `make_mean_edisp()`: # In[ ]: # define energy grid energy = energy_axis.edges * energy_axis.unit # mean edisp edisp = make_mean_edisp(observations, position=src_pos, e_true=energy, e_reco=energy) # ### Save maps and IRFs to disk
def run_region(self, kr, lon, lat, radius): # TODO: for now we have to read/create the allsky maps each in each job # because we can't pickle <functools._lru_cache_wrapper object # send this back to init when fixed # exposure exposure_hpx = Map.read( self.datadir + "/fermi_3fhl/fermi_3fhl_exposure_cube_hpx.fits.gz") exposure_hpx.unit = "cm2 s" # background iem infile = self.datadir + "/catalogs/fermi/gll_iem_v06.fits.gz" outfile = self.resdir + "/gll_iem_v06_extra.fits" model_iem = extrapolate_iem(infile, outfile, self.logEc_extra) # ROI roi_time = time() ROI_pos = SkyCoord(lon, lat, frame="galactic", unit="deg") width = 2 * (radius + self.psf_margin) # Counts counts = Map.create( skydir=ROI_pos, width=width, proj="CAR", coordsys="GAL", binsz=self.dlb, axes=[self.energy_axis], dtype=float, ) counts.fill_by_coord({ "skycoord": self.events.radec, "energy": self.events.energy }) axis = MapAxis.from_nodes(counts.geom.axes[0].center, name="energy", unit="GeV", interp="log") wcs = counts.geom.wcs geom = WcsGeom(wcs=wcs, npix=counts.geom.npix, axes=[axis]) coords = counts.geom.get_coord() # expo data = exposure_hpx.interp_by_coord(coords) exposure = WcsNDMap(geom, data, unit=exposure_hpx.unit, dtype=float) # read PSF psf_kernel = PSFKernel.from_table_psf(self.psf, counts.geom, max_radius=self.psf_margin * u.deg) # Energy Dispersion e_true = exposure.geom.axes[0].edges e_reco = counts.geom.axes[0].edges edisp = EnergyDispersion.from_diagonal_response(e_true=e_true, e_reco=e_reco) # fit mask if coords["lon"].min() < 90 * u.deg and coords["lon"].max( ) > 270 * u.deg: coords["lon"][coords["lon"].value > 180] -= 360 * u.deg mask = ( (coords["lon"] >= coords["lon"].min() + self.psf_margin * u.deg) & (coords["lon"] <= coords["lon"].max() - self.psf_margin * u.deg) & (coords["lat"] >= coords["lat"].min() + self.psf_margin * u.deg) & (coords["lat"] <= coords["lat"].max() - self.psf_margin * u.deg)) mask_fermi = WcsNDMap(counts.geom, mask) # IEM eval_iem = MapEvaluator(model=model_iem, exposure=exposure, psf=psf_kernel, edisp=edisp) bkg_iem = eval_iem.compute_npred() # ISO eval_iso = MapEvaluator(model=self.model_iso, exposure=exposure, edisp=edisp) bkg_iso = eval_iso.compute_npred() # merge iem and iso, only one local normalization is fitted background_total = bkg_iem + bkg_iso background_model = BackgroundModel(background_total) background_model.parameters["norm"].min = 0.0 # Sources model in_roi = self.FHL3.positions.galactic.contained_by(wcs) FHL3_roi = [] for ks in range(len(self.FHL3.table)): if in_roi[ks] == True: model = self.FHL3[ks].sky_model() model.spatial_model.parameters.freeze_all() # freeze spatial model.spectral_model.parameters["amplitude"].min = 0.0 if isinstance(model.spectral_model, PowerLawSpectralModel): model.spectral_model.parameters["index"].min = 0.1 model.spectral_model.parameters["index"].max = 10.0 else: model.spectral_model.parameters["alpha"].min = 0.1 model.spectral_model.parameters["alpha"].max = 10.0 FHL3_roi.append(model) model_total = SkyModels(FHL3_roi) # Dataset dataset = MapDataset( model=model_total, counts=counts, exposure=exposure, psf=psf_kernel, edisp=edisp, background_model=background_model, mask_fit=mask_fermi, name="3FHL_ROI_num" + str(kr), ) cat_stat = dataset.stat_sum() datasets = Datasets([dataset]) fit = Fit(datasets) results = fit.run(optimize_opts=self.optimize_opts) print("ROI_num", str(kr), "\n", results) fit_stat = datasets.stat_sum() if results.message == "Optimization failed.": pass else: datasets.to_yaml(path=Path(self.resdir), prefix=dataset.name, overwrite=True) np.save( self.resdir + "/3FHL_ROI_num" + str(kr) + "_covariance.npy", results.parameters.covariance, ) np.savez( self.resdir + "/3FHL_ROI_num" + str(kr) + "_fit_infos.npz", message=results.message, stat=[cat_stat, fit_stat], ) exec_time = time() - roi_time print("ROI", kr, " time (s): ", exec_time) for model in FHL3_roi: if (self.FHL3[model.name].data["ROI_num"] == kr and self.FHL3[model.name].data["Signif_Avg"] >= self.sig_cut): flux_points = FluxPointsEstimator( datasets=datasets, e_edges=self.El_flux, source=model.name, sigma_ul=2.0, ).run() filename = self.resdir + "/" + model.name + "_flux_points.fits" flux_points.write(filename, overwrite=True) exec_time = time() - roi_time - exec_time print("ROI", kr, " Flux points time (s): ", exec_time)
def psf(geom_true): sigma = 0.5 * u.deg return PSFKernel.from_gauss(geom_true, sigma)
"""Plot Fermi PSF.""" import matplotlib.pyplot as plt from gammapy.irf import EnergyDependentTablePSF from gammapy.maps import WcsGeom from gammapy.cube import PSFKernel filename = "$GAMMAPY_DATA/tests/unbundled/fermi/psf.fits" fermi_psf = EnergyDependentTablePSF.read(filename) psf = fermi_psf.table_psf_at_energy(energy="1 GeV") geom = WcsGeom.create(npix=100, binsz=0.01) kernel = PSFKernel.from_table_psf(psf, geom) plt.imshow(kernel.data) plt.colorbar() plt.show()