def dataset(): path = "$GAMMAPY_DATA/tests/spectrum/flux_points/diff_flux_points.fits" data = FluxPoints.read(path) data.table["e_ref"] = data.e_ref.to("TeV") model = SkyModel(spectral_model=PowerLawSpectralModel( index=2.3, amplitude="2e-13 cm-2 s-1 TeV-1", reference="1 TeV")) dataset = FluxPointsDataset(model, data) return dataset
def test_flux_point_dataset_serialization(tmp_path): path = "$GAMMAPY_DATA/tests/spectrum/flux_points/diff_flux_points.fits" data = FluxPoints.read(path) data.table["e_ref"] = data.e_ref.to("TeV") spectral_model = PowerLawSpectralModel(index=2.3, amplitude="2e-13 cm-2 s-1 TeV-1", reference="1 TeV") model = SkyModel(spectral_model=spectral_model, name="test_model") dataset = FluxPointsDataset(model, data, name="test_dataset") Datasets([dataset]).write(tmp_path, prefix="tmp") datasets = Datasets.read(tmp_path / "tmp_datasets.yaml", tmp_path / "tmp_models.yaml") new_dataset = datasets[0] assert_allclose(new_dataset.data.table["dnde"], dataset.data.table["dnde"], 1e-4) if dataset.mask_fit is None: assert np.all(new_dataset.mask_fit == dataset.mask_safe) assert np.all(new_dataset.mask_safe == dataset.mask_safe) assert new_dataset.name == "test_dataset"
def get_flux_points(self): """Calculate flux points for a specific model component.""" if not self.fit: raise RuntimeError("No results available from Fit.") fp_settings = self.config.flux_points log.info("Calculating flux points.") e_edges = self._make_energy_axis(fp_settings.energy).edges flux_point_estimator = FluxPointsEstimator( e_edges=e_edges, datasets=self.datasets, source=fp_settings.source, **fp_settings.params, ) fp = flux_point_estimator.run() fp.table["is_ul"] = fp.table["ts"] < 4 self.flux_points = FluxPointsDataset( data=fp, models=self.models[fp_settings.source]) cols = ["e_ref", "ref_flux", "dnde", "dnde_ul", "dnde_err", "is_ul"] log.info("\n{}".format(self.flux_points.data.table[cols]))
def test_flux_point_dataset_serialization(tmp_path): path = "$GAMMAPY_DATA/tests/spectrum/flux_points/diff_flux_points.fits" data = FluxPoints.read(path) data.table["e_ref"] = data.e_ref.to("TeV") # TODO: remove duplicate definition this once model is redefine as skymodel spatial_model = ConstantSpatialModel() spectral_model = PowerLawSpectralModel(index=2.3, amplitude="2e-13 cm-2 s-1 TeV-1", reference="1 TeV") model = SkyModel(spatial_model, spectral_model, name="test_model") dataset = FluxPointsDataset(SkyModels([model]), data, name="test_dataset") Datasets([dataset]).to_yaml(tmp_path, prefix="tmp") datasets = Datasets.from_yaml(tmp_path / "tmp_datasets.yaml", tmp_path / "tmp_models.yaml") new_dataset = datasets[0] assert_allclose(new_dataset.data.table["dnde"], dataset.data.table["dnde"], 1e-4) if dataset.mask_fit is None: assert np.all(new_dataset.mask_fit == dataset.mask_safe) assert np.all(new_dataset.mask_safe == dataset.mask_safe) assert new_dataset.name == "test_dataset"
def get_flux_points(self, source="source"): """Calculate flux points for a specific model component. Parameters ---------- source : string Name of the model component where to calculate the flux points. """ if not self._validate_fp_settings(): return False # TODO: add "source" to config log.info("Calculating flux points.") axis_params = self.settings["flux-points"]["fp_binning"] e_edges = MapAxis.from_bounds(**axis_params).edges flux_point_estimator = FluxPointsEstimator( e_edges=e_edges, datasets=self.datasets, source=source ) fp = flux_point_estimator.run() fp.table["is_ul"] = fp.table["ts"] < 4 model = self.model[source].spectral_model.copy() self.flux_points = FluxPointsDataset(data=fp, model=model) cols = ["e_ref", "ref_flux", "dnde", "dnde_ul", "dnde_err", "is_ul"] log.info("\n{}".format(self.flux_points.data.table[cols]))
# drop the flux upper limit values flux_points = flux_points.drop_ul() # ## Power Law Fit # # First we start with fitting a simple [power law](https://docs.gammapy.org/0.11/api/gammapy.spectrum.models.PowerLaw.html#gammapy.spectrum.models.PowerLaw). # In[ ]: pwl = PowerLaw(index=2, amplitude="1e-12 cm-2 s-1 TeV-1", reference="1 TeV") # After creating the model we run the fit by passing the `'flux_points'` and `'pwl'` objects: # In[ ]: dataset_pwl = FluxPointsDataset(pwl, flux_points, likelihood="chi2assym") fitter = Fit(dataset_pwl) result_pwl = fitter.run() # And print the result: # In[ ]: print(result_pwl) # In[ ]: print(pwl) # Finally we plot the data points and the best fit model:
fpe = FluxPointsEstimator(datasets=[dataset], e_edges=e_edges) flux_points = fpe.run() flux_points.table_formatted # ### Plot # # Let's plot the spectral model and points. You could do it directly, but there is a helper class. # Note that a spectral uncertainty band, a "butterfly" is drawn, but it is very thin, i.e. barely visible. # In[ ]: model.parameters.covariance = result.parameters.covariance flux_points_dataset = FluxPointsDataset(data=flux_points, model=model) # In[ ]: plt.figure(figsize=(8, 6)) flux_points_dataset.peek(); # ## Exercises # # * Re-run the analysis above, varying some analysis parameters, e.g. # * Select a few other observations # * Change the energy band for the map # * Change the spectral model for the fit
# Now we plot the flux points and their likelihood profiles. For the plotting of upper limits we choose a threshold of TS < 4. # In[ ]: plt.figure(figsize=(8, 5)) flux_points.table["is_ul"] = flux_points.table["ts"] < 4 ax = flux_points.plot(energy_power=2, flux_unit="erg-1 cm-2 s-1", color="darkorange") flux_points.to_sed_type("e2dnde").plot_likelihood(ax=ax) # The final plot with the best fit model, flux points and residuals can be quickly made like this: # In[ ]: flux_points_dataset = FluxPointsDataset(data=flux_points, model=model_best_joint) # In[ ]: plt.figure(figsize=(8, 6)) flux_points_dataset.peek() # ## Stack observations # # And alternative approach to fitting the spectrum is stacking all observations first and the fitting a model. For this we first stack the individual datasets: # In[ ]: dataset_stacked = Datasets(datasets_joint).stack_reduce() # Again we set the model on the dataset we would like to fit (in this case it's only a singel one) and pass it to the `Fit` object:
dataset = Datasets(datasets).stack_reduce() dataset.model = model fpe = FluxPointsEstimator(datasets=[dataset], e_edges=e_edges) flux_points = fpe.run() flux_points.table["is_ul"] = flux_points.table["ts"] < 1 amplitude_ref = 0.57 * 19.4e-14 * u.Unit("1 / (cm2 s MeV)") spec_model_true = PowerLawSpectralModel( index=4.5, amplitude=amplitude_ref, reference="20 GeV" ) flux_points_dataset = FluxPointsDataset(data=flux_points, models=model) # Now we can plot. # In[ ]: plt.figure(figsize=(8, 6)) ax_spectrum, ax_residual = flux_points_dataset.peek() ax_spectrum.set_ylim([1e-14, 3e-11]) ax_residual.set_ylim([-1.7, 1.7]) spec_model_true.plot( ax=ax_spectrum,
def spectrum_result(self): """`~gammapy.spectrum.FluxPointsDataset`""" return FluxPointsDataset(data=self.flux_points, model=self.fit.datasets.datasets[0].model)
# ## Power Law Fit # # First we start with fitting a simple `~gammapy.modeling.models.PowerLawSpectralModel`. # In[ ]: pwl = PowerLawSpectralModel(index=2, amplitude="1e-12 cm-2 s-1 TeV-1", reference="1 TeV") model = SkyModel(spectral_model=pwl) # After creating the model we run the fit by passing the `'flux_points'` and `'model'` objects: # In[ ]: dataset_pwl = FluxPointsDataset(model, flux_points) fitter = Fit([dataset_pwl]) result_pwl = fitter.run() # And print the result: # In[ ]: print(result_pwl) # In[ ]: print(pwl) # Finally we plot the data points and the best fit model:
dataset_hess = Datasets(datasets).stack_reduce() dataset_hess.name = "HESS" dataset_hess.models = crab_model # ### HAWC: 1D dataset for flux point fitting # # The HAWC flux point are taken from https://arxiv.org/pdf/1905.12518.pdf. Then these flux points are read from a pre-made FITS file and passed to a `FluxPointsDataset` together with the source spectral model. # # In[ ]: # read flux points from https://arxiv.org/pdf/1905.12518.pdf filename = "$GAMMAPY_DATA/hawc_crab/HAWC19_flux_points.fits" flux_points_hawc = FluxPoints.read(filename) dataset_hawc = FluxPointsDataset(crab_model, flux_points_hawc, name="HAWC") # ## Datasets serialization # # The `datasets` object contains each dataset previously defined. # It can be saved on disk as datasets.yaml, models.yaml, and several data files specific to each dataset. Then the `datasets` can be rebuild later from these files. # In[ ]: datasets = Datasets([dataset_fermi, dataset_hess, dataset_hawc]) path = Path("crab-3datasets") path.mkdir(exist_ok=True) datasets.write(path=path, prefix="crab_10GeV_100TeV", overwrite=True) filedata = path / "crab_10GeV_100TeV_datasets.yaml" filemodel = path / "crab_10GeV_100TeV_models.yaml"