def test_confidence_frozen(backend): dataset = MyDataset() dataset.parameters["x"].frozen = True fit = Fit(dataset) fit.optimize(backend=backend) result = fit.confidence("y") assert result["success"] is True assert_allclose(result["errp"], 1) assert_allclose(result["errn"], 1)
def test_confidence(backend): dataset = MyDataset() fit = Fit(dataset) fit.optimize(backend=backend) result = fit.confidence("x") assert result["success"] is True assert_allclose(result["errp"], 1) assert_allclose(result["errn"], 1) # Check that original value state wasn't changed assert_allclose(dataset.parameters["x"].value, 2)
def test_minos_contour(): dataset = MyDataset() dataset.parameters["x"].frozen = True fit = Fit(dataset) fit.optimize(backend="minuit") result = fit.minos_contour("y", "z") assert result["success"] is True x = result["x"] assert_allclose(len(x), 10) assert_allclose(x[0], 299, rtol=1e-5) assert_allclose(x[-1], 299.133975, rtol=1e-5) y = result["y"] assert_allclose(len(y), 10) assert_allclose(y[0], 0.04, rtol=1e-5) assert_allclose(y[-1], 0.54, rtol=1e-5) # Check that original value state wasn't changed assert_allclose(dataset.parameters["y"].value, 300)
def test_optimize(backend): dataset = MyDataset() fit = Fit(dataset) result = fit.optimize(backend=backend) pars = dataset.parameters assert result.success is True assert_allclose(result.total_stat, 0) assert_allclose(pars["x"].value, 2, rtol=1e-3) assert_allclose(pars["y"].value, 3e2, rtol=1e-3) assert_allclose(pars["z"].value, 4e-2, rtol=1e-3)
class FluxPointsEstimator: """Flux points estimator. Estimates flux points for a given list of spectral datasets, energies and spectral model. To estimate the flux point the amplitude of the reference spectral model is fitted within the energy range defined by the energy group. This is done for each group independently. The amplitude is re-normalized using the "norm" parameter, which specifies the deviation of the flux from the reference model in this energy group. See https://gamma-astro-data-formats.readthedocs.io/en/latest/spectra/binned_likelihoods/index.html for details. The method is also described in the Fermi-LAT catalog paper https://ui.adsabs.harvard.edu/#abs/2015ApJS..218...23A or the HESS Galactic Plane Survey paper https://ui.adsabs.harvard.edu/#abs/2018A%26A...612A...1H Parameters ---------- datasets : list of `~gammapy.spectrum.SpectrumDatatset` Spectrum datasets. e_edges : `~astropy.units.Quantity` Energy edges of the flux point bins. source : str For which source in the model to compute the flux points. norm_min : float Minimum value for the norm used for the likelihood profile evaluation. norm_max : float Maximum value for the norm used for the likelihood profile evaluation. norm_n_values : int Number of norm values used for the likelihood profile. norm_values : `numpy.ndarray` Array of norm values to be used for the likelihood profile. sigma : int Sigma to use for asymmetric error computation. sigma_ul : int Sigma to use for upper limit computation. reoptimize : bool Re-optimize other free model parameters. """ def __init__( self, datasets, e_edges, source="", norm_min=0.2, norm_max=5, norm_n_values=11, norm_values=None, sigma=1, sigma_ul=2, reoptimize=False, ): # make a copy to not modify the input datasets if not isinstance(datasets, Datasets): datasets = Datasets(datasets) if not datasets.is_all_same_type and datasets.is_all_same_shape: raise ValueError( "Flux point estimation requires a list of datasets" " of the same type and data shape.") self.datasets = datasets.copy() self.e_edges = e_edges dataset = self.datasets.datasets[0] if isinstance(dataset, SpectrumDatasetOnOff): model = dataset.model else: model = dataset.model[source].spectral_model self.model = ScaleModel(model) self.model.norm.min = 0 self.model.norm.max = 1e3 if norm_values is None: norm_values = np.logspace(np.log10(norm_min), np.log10(norm_max), norm_n_values) self.norm_values = norm_values self.sigma = sigma self.sigma_ul = sigma_ul self.reoptimize = reoptimize self.source = source self.fit = Fit(self.datasets) self._set_scale_model() def _freeze_parameters(self): # freeze other parameters for par in self.datasets.parameters: if par is not self.model.norm: par.frozen = True def _freeze_empty_background(self): from ..cube import MapDataset counts_all = self.estimate_counts()["counts"] for counts, dataset in zip(counts_all, self.datasets.datasets): if isinstance(dataset, MapDataset) and counts == 0: if dataset.background_model is not None: dataset.background_model.parameters.freeze_all() def _set_scale_model(self): # set the model on all datasets for dataset in self.datasets.datasets: if isinstance(dataset, SpectrumDatasetOnOff): dataset.model = self.model else: dataset.model[self.source].spectral_model = self.model @property def ref_model(self): return self.model.model @property def e_groups(self): """Energy grouping table `~astropy.table.Table`""" dataset = self.datasets.datasets[0] if isinstance(dataset, SpectrumDatasetOnOff): energy_axis = dataset.counts.energy else: energy_axis = dataset.counts.geom.get_axis_by_name("energy") return energy_axis.group_table(self.e_edges) def __str__(self): s = "{}:\n".format(self.__class__.__name__) s += str(self.datasets) + "\n" s += str(self.e_edges) + "\n" s += str(self.model) + "\n" return s def run(self, steps="all"): """Run the flux point estimator for all energy groups. Returns ------- flux_points : `FluxPoints` Estimated flux points. steps : list of str Which steps to execute. See `estimate_flux_point` for details and available options. """ rows = [] for e_group in self.e_groups: if e_group["bin_type"].strip() != "normal": log.debug( "Skipping under-/ overflow bin in flux point estimation.") continue row = self.estimate_flux_point(e_group, steps=steps) rows.append(row) meta = OrderedDict([("SED_TYPE", "likelihood")]) table = table_from_row_data(rows=rows, meta=meta) return FluxPoints(table).to_sed_type("dnde") def _energy_mask(self, e_group): energy_mask = np.zeros(self.datasets.datasets[0].data_shape) energy_mask[e_group["idx_min"]:e_group["idx_max"] + 1] = 1 return energy_mask.astype(bool) def estimate_flux_point(self, e_group, steps="all"): """Estimate flux point for a single energy group. Parameters ---------- e_group : `~astropy.table.Row` Energy group to compute the flux point for. steps : list of str Which steps to execute. Available options are: * "err": estimate symmetric error. * "errn-errp": estimate asymmetric errors. * "ul": estimate upper limits. * "ts": estimate ts and sqrt(ts) values. * "norm-scan": estimate likelihood profiles. By default all steps are executed. Returns ------- result : dict Dict with results for the flux point. """ e_min, e_max = e_group["energy_min"], e_group["energy_max"] # Put at log center of the bin e_ref = np.sqrt(e_min * e_max) result = OrderedDict([ ("e_ref", e_ref), ("e_min", e_min), ("e_max", e_max), ("ref_dnde", self.ref_model(e_ref)), ("ref_flux", self.ref_model.integral(e_min, e_max)), ("ref_eflux", self.ref_model.energy_flux(e_min, e_max)), ("ref_e2dnde", self.ref_model(e_ref) * e_ref**2), ]) contribute_to_likelihood = False for dataset in self.datasets.datasets: dataset.mask_fit = self._energy_mask(e_group) mask = dataset.mask_fit if dataset.mask_safe is not None: mask &= dataset.mask_safe contribute_to_likelihood |= mask.any() if not contribute_to_likelihood: raise ValueError( "No dataset contributes to the likelihood between" " {e_min:.3f} and {e_max:.3f}. Please adapt the " "flux point energy edges or check the dataset masks.".format( e_min=e_min, e_max=e_max)) with self.datasets.parameters.restore_values: self._freeze_empty_background() if not self.reoptimize: self._freeze_parameters() result.update(self.estimate_norm()) if not result.pop("success"): log.warning( "Fit failed for flux point between {e_min:.3f} and {e_max:.3f}," " setting NaN.".format(e_min=e_min, e_max=e_max)) if steps == "all": steps = ["err", "counts", "errp-errn", "ul", "ts", "norm-scan"] if "err" in steps: result.update(self.estimate_norm_err()) if "counts" in steps: result.update(self.estimate_counts()) if "errp-errn" in steps: result.update(self.estimate_norm_errn_errp()) if "ul" in steps: result.update(self.estimate_norm_ul()) if "ts" in steps: result.update(self.estimate_norm_ts()) if "norm-scan" in steps: result.update(self.estimate_norm_scan()) return result def estimate_norm_errn_errp(self): """Estimate asymmetric errors for a flux point. Returns ------- result : dict Dict with asymmetric errors for the flux point norm. """ result = self.fit.confidence(parameter=self.model.norm, sigma=self.sigma) return {"norm_errp": result["errp"], "norm_errn": result["errn"]} def estimate_norm_err(self): """Estimate covariance errors for a flux point. Returns ------- result : dict Dict with symmetric error for the flux point norm. """ result = self.fit.covariance() norm_err = result.parameters.error(self.model.norm) return {"norm_err": norm_err} def estimate_counts(self): """Estimate counts for the flux point. Returns ------- result : dict Dict with an array with one entry per dataset with counts for the flux point. """ counts = [] for dataset in self.datasets.datasets: mask = dataset.mask_fit if dataset.mask_safe is not None: mask &= dataset.mask_safe counts.append(dataset.counts.data[mask].sum()) return {"counts": np.array(counts, dtype=int)} def estimate_norm_ul(self): """Estimate upper limit for a flux point. Returns ------- result : dict Dict with upper limit for the flux point norm. """ norm = self.model.norm # TODO: the minuit backend has convergence problems when the likelihood is not # of parabolic shape, which is the case, when there are zero counts in the # energy bin. For this case we change to the scipy backend. counts = self.estimate_counts()["counts"] if np.all(counts == 0): result = self.fit.confidence( parameter=norm, sigma=self.sigma_ul, backend="scipy", reoptimize=self.reoptimize, ) else: result = self.fit.confidence(parameter=norm, sigma=self.sigma_ul) return {"norm_ul": result["errp"] + norm.value} def estimate_norm_ts(self): """Estimate ts and sqrt(ts) for the flux point. Returns ------- result : dict Dict with ts and sqrt(ts) for the flux point. """ loglike = self.datasets.likelihood() # store best fit amplitude, set amplitude of fit model to zero self.model.norm.value = 0 self.model.norm.frozen = True if self.reoptimize: _ = self.fit.optimize() loglike_null = self.datasets.likelihood() # compute sqrt TS ts = np.abs(loglike_null - loglike) sqrt_ts = np.sqrt(ts) return {"sqrt_ts": sqrt_ts, "ts": ts} def estimate_norm_scan(self): """Estimate likelihood profile for the norm parameter. Returns ------- result : dict Dict with norm_scan and dloglike_scan for the flux point. """ result = self.fit.likelihood_profile(self.model.norm, values=self.norm_values, reoptimize=self.reoptimize) dloglike_scan = result["likelihood"] return {"norm_scan": result["values"], "dloglike_scan": dloglike_scan} def estimate_norm(self): """Fit norm of the flux point. Returns ------- result : dict Dict with "norm" and "loglike" for the flux point. """ # start optimization with norm=1 self.model.norm.value = 1.0 self.model.norm.frozen = False result = self.fit.optimize() if result.success: norm = self.model.norm.value else: norm = np.nan return { "norm": norm, "loglike": result.total_stat, "success": result.success }
class LightCurveEstimator: """Estimate flux points for a given list of datasets, each per time bin. Parameters ---------- datasets : list of `~gammapy.spectrum.SpectrumDataset` or `~gammapy.cube.MapDataset` Spectrum or Map datasets. source : str For which source in the model to compute the flux points. Default is '' norm_min : float Minimum value for the norm used for the likelihood profile evaluation. norm_max : float Maximum value for the norm used for the likelihood profile evaluation. norm_n_values : int Number of norm values used for the likelihood profile. norm_values : `numpy.ndarray` Array of norm values to be used for the likelihood profile. sigma : int Sigma to use for asymmetric error computation. sigma_ul : int Sigma to use for upper limit computation. reoptimize : bool reoptimize other parameters during likelihod scan """ def __init__( self, datasets, source="", norm_min=0.2, norm_max=5, norm_n_values=11, norm_values=None, sigma=1, sigma_ul=2, reoptimize=False, ): if not isinstance(datasets, Datasets): datasets = Datasets(datasets) self.datasets = datasets if not datasets.is_all_same_type and datasets.is_all_same_shape: raise ValueError( "Light Curve estimation requires a list of datasets" " of the same type and data shape.") dataset = self.datasets.datasets[0] if isinstance(dataset, SpectrumDatasetOnOff): model = dataset.model else: model = dataset.model[source].spectral_model self.model = ScaleModel(model) self.model.norm.min = 0 self.model.norm.max = 1e5 if norm_values is None: norm_values = np.logspace(np.log10(norm_min), np.log10(norm_max), norm_n_values) self.norm_values = norm_values self.sigma = sigma self.sigma_ul = sigma_ul self.reoptimize = reoptimize self.source = source self._set_scale_model() def _set_scale_model(self): # set the model on all datasets for dataset in self.datasets.datasets: if isinstance(dataset, SpectrumDatasetOnOff): dataset.model = self.model else: dataset.model[self.source].spectral_model = self.model @property def ref_model(self): return self.model.model def run(self, e_ref, e_min, e_max, steps="all"): """Run light curve extraction. Normalize integral and energy flux between emin and emax. Parameters ---------- e_ref : `~astropy.unit.Quantity` reference energy of dnde flux normalization e_min : `~astropy.unit.Quantity` minimum energy of integral and energy flux normalization interval e_max : `~astropy.unit.Quantity` minimum energy of integral and energy flux normalization interval steps : list of str Which steps to execute. Available options are: * "err": estimate symmetric error. * "errn-errp": estimate asymmetric errors. * "ul": estimate upper limits. * "ts": estimate ts and sqrt(ts) values. * "norm-scan": estimate likelihood profiles. By default all steps are executed. Returns ------- lightcurve : `~gammapy.time.LightCurve` the Light Curve object """ self.e_ref = e_ref self.e_min = e_min self.e_max = e_max rows = [] for dataset in self.datasets.datasets: row = { "time_min": dataset.counts.meta["t_start"].mjd, "time_max": dataset.counts.meta["t_stop"].mjd, } row.update(self.estimate_time_bin_flux(dataset, steps)) rows.append(row) meta = OrderedDict([("SED_TYPE", "likelihood")]) table = table_from_row_data(rows=rows, meta=meta) table = FluxPoints(table).to_sed_type("flux").table return LightCurve(table) def estimate_time_bin_flux(self, dataset, steps="all"): """Estimate flux point for a single energy group. Parameters ---------- steps : list of str Which steps to execute. Available options are: * "err": estimate symmetric error. * "errn-errp": estimate asymmetric errors. * "ul": estimate upper limits. * "ts": estimate ts and sqrt(ts) values. * "norm-scan": estimate likelihood profiles. By default all steps are executed. Returns ------- result : dict Dict with results for the flux point. """ self.fit = Fit(dataset) result = OrderedDict([ ("e_ref", self.e_ref), ("e_min", self.e_min), ("e_max", self.e_max), ("ref_dnde", self.ref_model(self.e_ref)), ("ref_flux", self.ref_model.integral(self.e_min, self.e_max)), ("ref_eflux", self.ref_model.energy_flux(self.e_min, self.e_max)), ("ref_e2dnde", self.ref_model(self.e_ref) * self.e_ref**2), ]) result.update(self.estimate_norm()) if not result.pop("success"): log.warning("Fit failed for time bin between {t_min} and {t_max}," " setting NaN.".format( t_min=dataset.counts.meta["t_start"], t_max=dataset.counts.meta["t_stop"], )) if steps == "all": steps = ["err", "counts", "errp-errn", "ul", "ts", "norm-scan"] if "err" in steps: result.update(self.estimate_norm_err()) if "counts" in steps: result.update(self.estimate_counts(dataset)) if "errp-errn" in steps: result.update(self.estimate_norm_errn_errp()) if "ul" in steps: result.update(self.estimate_norm_ul(dataset)) if "ts" in steps: result.update(self.estimate_norm_ts()) if "norm-scan" in steps: result.update(self.estimate_norm_scan()) return result # TODO : most of the following code is copied from FluxPointsEstimator, can it be restructured? def estimate_norm_errn_errp(self): """Estimate asymmetric errors for a flux point. Returns ------- result : dict Dict with asymmetric errors for the flux point norm. """ result = self.fit.confidence(parameter=self.model.norm, sigma=self.sigma) return {"norm_errp": result["errp"], "norm_errn": result["errn"]} def estimate_norm_err(self): """Estimate covariance errors for a flux point. Returns ------- result : dict Dict with symmetric error for the flux point norm. """ result = self.fit.covariance() norm_err = result.parameters.error(self.model.norm) return {"norm_err": norm_err} def estimate_counts(self, dataset): """Estimate counts for the flux point. Parameters ---------- dataset : `~gammapy.utils.fitting.Dataset` the dataset object Returns ------- result : dict Dict with an array with one entry per dataset with counts for the flux point. """ # TODO : use e_min and e_max interval for counts calculation # TODO : add off counts and excess? for DatasetOnOff # TODO : this may require a loop once we support Datasets per time bin mask = dataset.mask if dataset.mask_safe is not None: mask &= dataset.mask_safe counts = dataset.counts.data[mask].sum() return {"counts": counts} def estimate_norm_ul(self, dataset): """Estimate upper limit for a flux point. Returns ------- result : dict Dict with upper limit for the flux point norm. """ norm = self.model.norm # TODO: the minuit backend has convergence problems when the likelihood is not # of parabolic shape, which is the case, when there are zero counts in the # bin. For this case we change to the scipy backend. counts = self.estimate_counts(dataset)["counts"] if np.all(counts == 0): result = self.fit.confidence( parameter=norm, sigma=self.sigma_ul, backend="scipy", reoptimize=self.reoptimize, ) else: result = self.fit.confidence(parameter=norm, sigma=self.sigma_ul) return {"norm_ul": result["errp"] + norm.value} def estimate_norm_ts(self): """Estimate ts and sqrt(ts) for the flux point. Returns ------- result : dict Dict with ts and sqrt(ts) for the flux point. """ loglike = self.datasets.likelihood() # store best fit amplitude, set amplitude of fit model to zero self.model.norm.value = 0 self.model.norm.frozen = True if self.reoptimize: _ = self.fit.optimize() loglike_null = self.datasets.likelihood() # compute sqrt TS ts = np.abs(loglike_null - loglike) sqrt_ts = np.sqrt(ts) return {"sqrt_ts": sqrt_ts, "ts": ts} def estimate_norm_scan(self): """Estimate likelihood profile for the norm parameter. Returns ------- result : dict Dict with norm_scan and dloglike_scan for the flux point. """ result = self.fit.likelihood_profile(self.model.norm, values=self.norm_values, reoptimize=self.reoptimize) dloglike_scan = result["likelihood"] return {"norm_scan": result["values"], "dloglike_scan": dloglike_scan} def estimate_norm(self): """Fit norm of the flux point. Returns ------- result : dict Dict with "norm" and "loglike" for the flux point. """ # start optimization with norm=1 self.model.norm.value = 1.0 self.model.norm.frozen = False result = self.fit.optimize() if result.success: norm = self.model.norm.value else: norm = np.nan return { "norm": norm, "loglike": result.total_stat, "success": result.success }