def diffuse_model(): axis = MapAxis.from_nodes([0.1, 100], name="energy_true", unit="TeV", interp="log") m = Map.create(npix=(4, 3), binsz=2, axes=[axis], unit="cm-2 s-1 MeV-1 sr-1", frame="galactic") m.data += 42 spatial_model = TemplateSpatialModel(m, normalize=False) return SkyModel(PowerLawNormSpectralModel(), spatial_model)
def test_fov_bkg_maker_fit(obs_dataset, exclusion_mask): spectral_model = PowerLawNormSpectralModel() spectral_model.tilt.frozen = False fov_bkg_maker = FoVBackgroundMaker(method="fit", exclusion_mask=exclusion_mask, spectral_model=spectral_model) test_dataset = obs_dataset.copy(name="test-fov") dataset = fov_bkg_maker.run(test_dataset) model = dataset.models[f"{dataset.name}-bkg"].spectral_model assert_allclose(model.norm.value, 0.901523, rtol=1e-4) assert_allclose(model.tilt.value, 0.071069, rtol=1e-4) assert_allclose(fov_bkg_maker.default_spectral_model.tilt.value, 0.0) assert_allclose(fov_bkg_maker.default_spectral_model.norm.value, 1.0)
def test_flux_estimator_compound_model(): pl = PowerLawSpectralModel() pl.amplitude.min = 1e-15 pl.amplitude.max = 1e-10 pln = PowerLawNormSpectralModel() pln.norm.value = 0.1 spectral_model = pl * pln model = SkyModel(spectral_model=spectral_model, name="test") estimator = FluxEstimator(source="test", selection_optional=[], reoptimize=True) scale_model = estimator.get_scale_model(Models([model])) assert_allclose(scale_model.norm.min, 1e-3) assert_allclose(scale_model.norm.max, 1e2)
def test_TemplateSpectralModel_compound(): energy = [1.00e06, 1.25e06, 1.58e06, 1.99e06] * u.MeV values = [4.39e-7, 1.96e-7, 8.80e-7, 3.94e-7] * u.Unit("MeV-1 s-1 sr-1") template = TemplateSpectralModel(energy=energy, values=values) correction = PowerLawNormSpectralModel(norm=2) model = CompoundSpectralModel(template, correction, operator=operator.mul) assert np.allclose(model(energy), 2 * values) model_mul = template * correction assert isinstance(model_mul, CompoundSpectralModel) assert np.allclose(model_mul(energy), 2 * values) model_dict = model.to_dict() assert model_dict["spectral"]["operator"] == "mul" model_class = SPECTRAL_MODEL_REGISTRY.get_cls(model_dict["spectral"]["type"]) new_model = model_class.from_dict(model_dict) assert isinstance(new_model, CompoundSpectralModel) assert np.allclose(new_model(energy), 2 * values)
def test_flux_estimator_norm_range_template(): energy = MapAxis.from_energy_bounds(0.1,10,3., unit='TeV', name="energy_true") template = WcsNDMap.create(npix=10, axes=[energy], unit="cm-2 s-1 sr-1 TeV-1") spatial = TemplateSpatialModel(template, normalize=False) spectral = PowerLawNormSpectralModel() model = SkyModel(spectral_model=spectral, spatial_model=spatial, name="test") model.spectral_model.norm.max = 10 model.spectral_model.norm.min = 0 estimator = FluxEstimator( source="test", selection_optional=[], reoptimize=True ) scale_model = estimator.get_scale_model(Models([model])) assert_allclose(scale_model.norm.min, 0) assert_allclose(scale_model.norm.max, 10) assert scale_model.norm.interp == "log"
dict( name="powerlaw", model=PowerLawSpectralModel( index=2 * u.Unit(""), amplitude=4 / u.cm ** 2 / u.s / u.TeV, reference=1 * u.TeV, ), val_at_2TeV=u.Quantity(1.0, "cm-2 s-1 TeV-1"), integral_1_10TeV=u.Quantity(3.6, "cm-2 s-1"), eflux_1_10TeV=u.Quantity(9.210340371976184, "TeV cm-2 s-1"), ), dict( name="norm-powerlaw", model=PowerLawNormSpectralModel( tilt=2 * u.Unit(""), norm=4.0 * u.Unit(""), reference=1 * u.TeV, ), val_at_2TeV=u.Quantity(1.0, ""), integral_1_10TeV=u.Quantity(3.6, "TeV"), eflux_1_10TeV=u.Quantity(9.210340371976184, "TeV2"), ), dict( name="powerlaw2", model=PowerLaw2SpectralModel( amplitude=u.Quantity(2.9227116204223784, "cm-2 s-1"), index=2.3 * u.Unit(""), emin=1 * u.TeV, emax=10 * u.TeV, ), val_at_2TeV=u.Quantity(4 * 2.0 ** (-2.3), "cm-2 s-1 TeV-1"),
# This operation create a new `gammapy.modeling.models.CompoundSpectralModel` # from gammapy.modeling.models import ( Models, PowerLawNormSpectralModel, SkyModel, TemplateSpectralModel, ) energy_range = [0.1, 1] * u.TeV energy = np.array([1e6, 3e6, 1e7, 3e7]) * u.MeV values = np.array([4.4e-38, 2.0e-38, 8.8e-39, 3.9e-39]) * u.Unit("MeV-1 s-1 cm-2") template = TemplateSpectralModel(energy=energy, values=values) template.plot(energy_range) plt.grid(which="both") new_model = template * PowerLawNormSpectralModel(norm=2, tilt=0) print(new_model) # %% # YAML representation # ------------------- # Here is an example YAML file using the model: model = SkyModel(spectral_model=template, name="template-model") models = Models([model]) print(models.to_yaml())
def run_region(self, kr, lon, lat, radius): # TODO: for now we have to read/create the allsky maps each in each job # because we can't pickle <functools._lru_cache_wrapper object # send this back to init when fixed log.info(f"ROI {kr}: loading data") # exposure exposure_hpx = Map.read( "$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_exposure_cube_hpx.fits.gz") exposure_hpx.unit = "cm2 s" # iem iem_filepath = BASE_PATH / "data" / "gll_iem_v06_extrapolated.fits" iem_fermi_extra = Map.read(iem_filepath) # norm=1.1, tilt=0.03 see paper appendix A model_iem = SkyModel( PowerLawNormSpectralModel(norm=1.1, tilt=0.03), TemplateSpatialModel(iem_fermi_extra, normalize=False), name="iem_extrapolated", ) # ROI roi_time = time() ROI_pos = SkyCoord(lon, lat, frame="galactic", unit="deg") width = 2 * (radius + self.psf_margin) # Counts counts = Map.create( skydir=ROI_pos, width=width, proj="CAR", frame="galactic", binsz=1 / 8.0, axes=[self.energy_axis], dtype=float, ) counts.fill_by_coord({ "skycoord": self.events.radec, "energy": self.events.energy }) axis = MapAxis.from_nodes(counts.geom.axes[0].center, name="energy_true", unit="GeV", interp="log") wcs = counts.geom.wcs geom = WcsGeom(wcs=wcs, npix=counts.geom.npix, axes=[axis]) coords = geom.get_coord() # expo data = exposure_hpx.interp_by_coord(coords) exposure = WcsNDMap(geom, data, unit=exposure_hpx.unit, dtype=float) # read PSF psf_kernel = PSFKernel.from_table_psf(self.psf, geom, max_radius=self.psf_margin * u.deg) # Energy Dispersion e_true = exposure.geom.axes[0].edges e_reco = counts.geom.axes[0].edges edisp = EDispKernel.from_diagonal_response(e_true=e_true, e_reco=e_reco) # fit mask if coords["lon"].min() < 90 * u.deg and coords["lon"].max( ) > 270 * u.deg: coords["lon"][coords["lon"].value > 180] -= 360 * u.deg mask = ( (coords["lon"] >= coords["lon"].min() + self.psf_margin * u.deg) & (coords["lon"] <= coords["lon"].max() - self.psf_margin * u.deg) & (coords["lat"] >= coords["lat"].min() + self.psf_margin * u.deg) & (coords["lat"] <= coords["lat"].max() - self.psf_margin * u.deg)) mask_fermi = WcsNDMap(counts.geom, mask) log.info(f"ROI {kr}: pre-computing diffuse") # IEM eval_iem = MapEvaluator(model=model_iem, exposure=exposure, psf=psf_kernel, edisp=edisp) bkg_iem = eval_iem.compute_npred() # ISO eval_iso = MapEvaluator(model=self.model_iso, exposure=exposure, edisp=edisp) bkg_iso = eval_iso.compute_npred() # merge iem and iso, only one local normalization is fitted dataset_name = "3FHL_ROI_num" + str(kr) background_total = bkg_iem + bkg_iso background_model = BackgroundModel(background_total, name="bkg_iem+iso", datasets_names=[dataset_name]) background_model.parameters["norm"].min = 0.0 # Sources model in_roi = self.FHL3.positions.galactic.contained_by(wcs) FHL3_roi = [] for ks in range(len(self.FHL3.table)): if in_roi[ks] == True: model = self.FHL3[ks].sky_model() model.spatial_model.parameters.freeze_all() # freeze spatial model.spectral_model.parameters["amplitude"].min = 0.0 if isinstance(model.spectral_model, PowerLawSpectralModel): model.spectral_model.parameters["index"].min = 0.1 model.spectral_model.parameters["index"].max = 10.0 else: model.spectral_model.parameters["alpha"].min = 0.1 model.spectral_model.parameters["alpha"].max = 10.0 FHL3_roi.append(model) model_total = Models([background_model] + FHL3_roi) # Dataset dataset = MapDataset( models=model_total, counts=counts, exposure=exposure, psf=psf_kernel, edisp=edisp, mask_fit=mask_fermi, name=dataset_name, ) cat_stat = dataset.stat_sum() datasets = Datasets([dataset]) log.info(f"ROI {kr}: running fit") fit = Fit(datasets) results = fit.run(**self.optimize_opts) print("ROI_num", str(kr), "\n", results) fit_stat = datasets.stat_sum() if results.message != "Optimization failed.": datasets.write(path=Path(self.resdir), prefix=dataset.name, overwrite=True) np.savez( self.resdir / f"3FHL_ROI_num{kr}_fit_infos.npz", message=results.message, stat=[cat_stat, fit_stat], ) exec_time = time() - roi_time print("ROI", kr, " time (s): ", exec_time) log.info(f"ROI {kr}: running flux points") for model in FHL3_roi: if (self.FHL3[model.name].data["ROI_num"] == kr and self.FHL3[model.name].data["Signif_Avg"] >= self.sig_cut): flux_points = FluxPointsEstimator( e_edges=self.El_flux, source=model.name, n_sigma_ul=2, ).run(datasets=datasets) filename = self.resdir / f"{model.name}_flux_points.fits" flux_points.write(filename, overwrite=True) exec_time = time() - roi_time - exec_time print("ROI", kr, " Flux points time (s): ", exec_time)