def diffuse_model(): axis = MapAxis.from_nodes([0.1, 100], name="energy", unit="TeV", interp="log") m = Map.create( npix=(4, 3), binsz=2, axes=[axis], unit="cm-2 s-1 MeV-1 sr-1", frame="galactic" ) m.data += 42 return SkyDiffuseCube(m)
def test_read(): model = SkyDiffuseCube.read( "$GAMMAPY_DATA/tests/unbundled/fermi/gll_iem_v02_cutout.fits") assert model.map.unit == "cm-2 s-1 MeV-1 sr-1" # Check pixel inside map val = model.evaluate(0 * u.deg, 0 * u.deg, 100 * u.GeV) assert val.unit == "cm-2 s-1 MeV-1 sr-1" assert val.shape == (1, ) assert_allclose(val.value, 1.396424e-12, rtol=1e-5)
def extrapolate_iem(infile, outfile, logEc_extra): iem_fermi = Map.read(infile) Ec = iem_fermi.geom.axes[0].center.value finterp = interp1d( np.log10(Ec), np.log10(iem_fermi.data), axis=0, kind="linear", fill_value="extrapolate", ) iem_extra = 10**finterp(logEc_extra) Ec_ax = MapAxis.from_nodes(10**logEc_extra, unit="MeV", name="energy", interp="log") geom_3D = iem_fermi.geom.to_image().to_cube([Ec_ax]) iem_fermi_extra = Map.from_geom(geom_3D, data=iem_extra.astype("float32")) iem_fermi_extra.unit = "cm-2 s-1 MeV-1 sr-1" iem_fermi_extra.write(outfile, hdu=Path(outfile).stem, overwrite=True) return SkyDiffuseCube( iem_fermi_extra, norm=1.1, tilt=0.03, name="iem_extrapolated") # norm=1.1, tilt=0.03 see paper appendix A
def make_datasets_example(): # Define which data to use and print some information energy_axis = MapAxis.from_edges( np.logspace(-1.0, 1.0, 4), unit="TeV", name="energy", interp="log" ) geom0 = WcsGeom.create( skydir=(0, 0), binsz=0.1, width=(1, 1), coordsys="GAL", proj="CAR", axes=[energy_axis], ) geom1 = WcsGeom.create( skydir=(1, 0), binsz=0.1, width=(1, 1), coordsys="GAL", proj="CAR", axes=[energy_axis], ) geoms = [geom0, geom1] sources_coords = [(0, 0), (0.9, 0.1)] names = ["gc", "g09"] models = [] for ind, (lon, lat) in enumerate(sources_coords): spatial_model = PointSpatialModel( lon_0=lon * u.deg, lat_0=lat * u.deg, frame="galactic" ) spectral_model = ExpCutoffPowerLawSpectralModel( index=2 * u.Unit(""), amplitude=3e-12 * u.Unit("cm-2 s-1 TeV-1"), reference=1.0 * u.TeV, lambda_=0.1 / u.TeV, ) model_ecpl = SkyModel( spatial_model=spatial_model, spectral_model=spectral_model, name=names[ind] ) models.append(model_ecpl) # test to link a spectral parameter params0 = models[0].spectral_model.parameters params1 = models[1].spectral_model.parameters ind = params0.parameters.index(params0["reference"]) params0.parameters[ind] = params1["reference"] # update the sky model ind = models[0].parameters.parameters.index(models[0].parameters["reference"]) models[0].parameters.parameters[ind] = params1["reference"] obs_ids = [110380, 111140, 111159] data_store = DataStore.from_dir("$GAMMAPY_DATA/cta-1dc/index/gps/") diffuse_model = SkyDiffuseCube.read( "$GAMMAPY_DATA/fermi_3fhl/gll_iem_v06_cutout.fits" ) datasets_list = [] for idx, geom in enumerate(geoms): observations = data_store.get_observations(obs_ids) stacked = MapDataset.create(geom=geom) stacked.background_model.name = "background_irf_" + names[idx] maker = MapDatasetMaker(geom=geom, offset_max=4.0 * u.deg) for obs in observations: dataset = maker.run(obs) stacked.stack(dataset) stacked.psf = stacked.psf.get_psf_kernel(position=geom.center_skydir, geom=geom, max_radius="0.3 deg") stacked.edisp = stacked.edisp.get_energy_dispersion(position=geom.center_skydir, e_reco=energy_axis.edges) stacked.name = names[idx] stacked.model = models[idx] + diffuse_model datasets_list.append(stacked) datasets = Datasets(datasets_list) dataset0 = datasets.datasets[0] print("dataset0") print("counts sum : ", dataset0.counts.data.sum()) print("expo sum : ", dataset0.exposure.data.sum()) print("bkg0 sum : ", dataset0.background_model.evaluate().data.sum()) path = "$GAMMAPY_DATA/tests/models/gc_example_" datasets.to_yaml(path, overwrite=True)
covariance = result.parameters.covariance spec.parameters.covariance = covariance[2:5, 2:5] energy_range = [0.3, 10] * u.TeV spec.plot(energy_range=energy_range, energy_power=2) spec.plot_error(energy_range=energy_range, energy_power=2) # Apparently our model should be improved by adding a component for diffuse Galactic emission and at least one second point source. # ### Add Galactic diffuse emission to model # We use both models at the same time, our diffuse model (the same from the Fermi file used before) and our model for the central source. This time, in order to make it more realistic, we will consider an exponential cut off power law spectral model for the source. We will fit again the normalization and tilt of the background. # In[ ]: diffuse_model = SkyDiffuseCube.read( "$GAMMAPY_DATA/fermi-3fhl-gc/gll_iem_v06_gc.fits.gz") # In[ ]: spatial_model = PointSpatialModel(lon_0="-0.05 deg", lat_0="-0.05 deg", frame="galactic") spectral_model = ExpCutoffPowerLawSpectralModel( index=2, amplitude=3e-12 * u.Unit("cm-2 s-1 TeV-1"), reference=1.0 * u.TeV, lambda_=0.1 / u.TeV, ) model_ecpl = SkyModel( spatial_model=spatial_model,
data = diffuse_galactic_fermi.interp_by_coord( { "skycoord": coord.skycoord, "energy": coord["energy"] }, interp=3) diffuse_galactic = WcsNDMap(exposure.geom, data, unit=diffuse_galactic_fermi.unit) print(diffuse_galactic.geom) print(diffuse_galactic.geom.axes[0]) # In[ ]: diffuse_gal = SkyDiffuseCube(diffuse_galactic) # In[ ]: diffuse_gal.map.slice_by_idx({ "energy": 0 }).plot(add_cbar=True) # In[ ]: # Exposure varies very little with energy at these high energies energy = np.logspace(1, 3, 10) * u.GeV dnde = diffuse_gal.map.interp_by_coord({ "skycoord": gc_pos, "energy": energy },
def make_datasets_example(): # Define which data to use and print some information energy_axis = MapAxis.from_edges(np.logspace(-1.0, 1.0, 4), unit="TeV", name="energy", interp="log") geom0 = WcsGeom.create( skydir=(0, 0), binsz=0.1, width=(2, 2), frame="galactic", proj="CAR", axes=[energy_axis], ) geom1 = WcsGeom.create( skydir=(1, 0), binsz=0.1, width=(2, 2), frame="galactic", proj="CAR", axes=[energy_axis], ) geoms = [geom0, geom1] sources_coords = [(0, 0), (0.9, 0.1)] names = ["gc", "g09"] models = Models() for idx, (lon, lat) in enumerate(sources_coords): spatial_model = PointSpatialModel(lon_0=lon * u.deg, lat_0=lat * u.deg, frame="galactic") spectral_model = ExpCutoffPowerLawSpectralModel( index=2 * u.Unit(""), amplitude=3e-12 * u.Unit("cm-2 s-1 TeV-1"), reference=1.0 * u.TeV, lambda_=0.1 / u.TeV, ) model_ecpl = SkyModel(spatial_model=spatial_model, spectral_model=spectral_model, name=names[idx]) models.append(model_ecpl) models["gc"].spectral_model.reference = models[ "g09"].spectral_model.reference obs_ids = [110380, 111140, 111159] data_store = DataStore.from_dir("$GAMMAPY_DATA/cta-1dc/index/gps/") diffuse_model = SkyDiffuseCube.read( "$GAMMAPY_DATA/fermi_3fhl/gll_iem_v06_cutout.fits") maker = MapDatasetMaker() datasets = Datasets() observations = data_store.get_observations(obs_ids) for idx, geom in enumerate(geoms): stacked = MapDataset.create(geom=geom, name=names[idx]) for obs in observations: dataset = maker.run(stacked, obs) stacked.stack(dataset) bkg = stacked.models.pop(0) stacked.models = [models[idx], diffuse_model, bkg] datasets.append(stacked) datasets.write("$GAMMAPY_DATA/tests/models", prefix="gc_example", overwrite=True, write_covariance=False)
def run_region(self, kr, lon, lat, radius): # TODO: for now we have to read/create the allsky maps each in each job # because we can't pickle <functools._lru_cache_wrapper object # send this back to init when fixed # exposure exposure_hpx = Map.read( "$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_exposure_cube_hpx.fits.gz" ) exposure_hpx.unit = "cm2 s" # iem iem_filepath = BASE_PATH / "data" / "gll_iem_v06_extrapolated.fits" iem_fermi_extra = Map.read(iem_filepath) # norm=1.1, tilt=0.03 see paper appendix A model_iem = SkyDiffuseCube( iem_fermi_extra, norm=1.1, tilt=0.03, name="iem_extrapolated" ) # ROI roi_time = time() ROI_pos = SkyCoord(lon, lat, frame="galactic", unit="deg") width = 2 * (radius + self.psf_margin) # Counts counts = Map.create( skydir=ROI_pos, width=width, proj="CAR", frame="galactic", binsz=1 / 8.0, axes=[self.energy_axis], dtype=float, ) counts.fill_by_coord( {"skycoord": self.events.radec, "energy": self.events.energy} ) axis = MapAxis.from_nodes( counts.geom.axes[0].center, name="energy_true", unit="GeV", interp="log" ) wcs = counts.geom.wcs geom = WcsGeom(wcs=wcs, npix=counts.geom.npix, axes=[axis]) coords = geom.get_coord() # expo data = exposure_hpx.interp_by_coord(coords) exposure = WcsNDMap(geom, data, unit=exposure_hpx.unit, dtype=float) # read PSF psf_kernel = PSFKernel.from_table_psf( self.psf, geom, max_radius=self.psf_margin * u.deg ) # Energy Dispersion e_true = exposure.geom.axes[0].edges e_reco = counts.geom.axes[0].edges edisp = EDispKernel.from_diagonal_response(e_true=e_true, e_reco=e_reco) # fit mask if coords["lon"].min() < 90 * u.deg and coords["lon"].max() > 270 * u.deg: coords["lon"][coords["lon"].value > 180] -= 360 * u.deg mask = ( (coords["lon"] >= coords["lon"].min() + self.psf_margin * u.deg) & (coords["lon"] <= coords["lon"].max() - self.psf_margin * u.deg) & (coords["lat"] >= coords["lat"].min() + self.psf_margin * u.deg) & (coords["lat"] <= coords["lat"].max() - self.psf_margin * u.deg) ) mask_fermi = WcsNDMap(counts.geom, mask) # IEM eval_iem = MapEvaluator( model=model_iem, exposure=exposure, psf=psf_kernel, edisp=edisp ) bkg_iem = eval_iem.compute_npred() # ISO eval_iso = MapEvaluator(model=self.model_iso, exposure=exposure, edisp=edisp) bkg_iso = eval_iso.compute_npred() # merge iem and iso, only one local normalization is fitted dataset_name = "3FHL_ROI_num" + str(kr) background_total = bkg_iem + bkg_iso background_model = BackgroundModel( background_total, name="bkg_iem+iso", datasets_names=[dataset_name] ) background_model.parameters["norm"].min = 0.0 # Sources model in_roi = self.FHL3.positions.galactic.contained_by(wcs) FHL3_roi = [] for ks in range(len(self.FHL3.table)): if in_roi[ks] == True: model = self.FHL3[ks].sky_model() model.spatial_model.parameters.freeze_all() # freeze spatial model.spectral_model.parameters["amplitude"].min = 0.0 if isinstance(model.spectral_model, PowerLawSpectralModel): model.spectral_model.parameters["index"].min = 0.1 model.spectral_model.parameters["index"].max = 10.0 else: model.spectral_model.parameters["alpha"].min = 0.1 model.spectral_model.parameters["alpha"].max = 10.0 FHL3_roi.append(model) model_total = Models([background_model] + FHL3_roi) # Dataset dataset = MapDataset( models=model_total, counts=counts, exposure=exposure, psf=psf_kernel, edisp=edisp, mask_fit=mask_fermi, name=dataset_name, ) cat_stat = dataset.stat_sum() datasets = Datasets([dataset]) fit = Fit(datasets) results = fit.run(**self.optimize_opts) print("ROI_num", str(kr), "\n", results) fit_stat = datasets.stat_sum() if results.message != "Optimization failed.": datasets.write(path=Path(self.resdir), prefix=dataset.name, overwrite=True) np.savez( self.resdir / f"3FHL_ROI_num{kr}_fit_infos.npz", message=results.message, stat=[cat_stat, fit_stat], ) exec_time = time() - roi_time print("ROI", kr, " time (s): ", exec_time) for model in FHL3_roi: if ( self.FHL3[model.name].data["ROI_num"] == kr and self.FHL3[model.name].data["Signif_Avg"] >= self.sig_cut ): flux_points = FluxPointsEstimator( e_edges=self.El_flux, source=model.name, n_sigma_ul=2, ).run(datasets=datasets) filename = self.resdir / f"{model.name}_flux_points.fits" flux_points.write(filename, overwrite=True) exec_time = time() - roi_time - exec_time print("ROI", kr, " Flux points time (s): ", exec_time)
# ### Energy Dispersion # For simplicity we assume a diagonal energy dispersion: # In[ ]: e_true = exposure.geom.axes[0].edges e_reco = counts.geom.axes[0].edges edisp = EnergyDispersion.from_diagonal_response(e_true=e_true, e_reco=e_reco) # ## Background # # Let's compute a background cube, with predicted number of background events per pixel from the diffuse Galactic and isotropic model components. For this, we use the use the [gammapy.cube.MapEvaluator](https://docs.gammapy.org/dev/api/gammapy.cube.MapEvaluator.html) to multiply with the exposure and apply the PSF. The Fermi-LAT energy dispersion at high energies is small, we neglect it here. # In[ ]: model_diffuse = SkyDiffuseCube(diffuse_galactic, name="diffuse") eval_diffuse = MapEvaluator(model=model_diffuse, exposure=exposure, psf=psf_kernel, edisp=edisp) background_gal = eval_diffuse.compute_npred() background_gal.sum_over_axes().plot() print("Background counts from Galactic diffuse: ", background_gal.data.sum()) # In[ ]: model_iso = SkyModel(ConstantSpatialModel(), diffuse_iso, name="diffuse-iso") eval_iso = MapEvaluator(model=model_iso, exposure=exposure, edisp=edisp)