Beispiel #1
0
    def run_region(self, kr, lon, lat, radius):
        #    TODO: for now we have to read/create the allsky maps each in each job
        #    because we can't pickle <functools._lru_cache_wrapper object
        #    send this back to init when fixed

        log.info(f"ROI {kr}: loading data")

        # exposure
        exposure_hpx = Map.read(
            "$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_exposure_cube_hpx.fits.gz")
        exposure_hpx.unit = "cm2 s"

        # iem
        iem_filepath = BASE_PATH / "data" / "gll_iem_v06_extrapolated.fits"
        iem_fermi_extra = Map.read(iem_filepath)
        # norm=1.1, tilt=0.03 see paper appendix A
        model_iem = SkyModel(
            PowerLawNormSpectralModel(norm=1.1, tilt=0.03),
            TemplateSpatialModel(iem_fermi_extra, normalize=False),
            name="iem_extrapolated",
        )

        # ROI
        roi_time = time()
        ROI_pos = SkyCoord(lon, lat, frame="galactic", unit="deg")
        width = 2 * (radius + self.psf_margin)

        # Counts
        counts = Map.create(
            skydir=ROI_pos,
            width=width,
            proj="CAR",
            frame="galactic",
            binsz=1 / 8.0,
            axes=[self.energy_axis],
            dtype=float,
        )
        counts.fill_by_coord({
            "skycoord": self.events.radec,
            "energy": self.events.energy
        })

        axis = MapAxis.from_nodes(counts.geom.axes[0].center,
                                  name="energy_true",
                                  unit="GeV",
                                  interp="log")
        wcs = counts.geom.wcs
        geom = WcsGeom(wcs=wcs, npix=counts.geom.npix, axes=[axis])
        coords = geom.get_coord()
        # expo
        data = exposure_hpx.interp_by_coord(coords)
        exposure = WcsNDMap(geom, data, unit=exposure_hpx.unit, dtype=float)

        # read PSF
        psf_kernel = PSFKernel.from_table_psf(self.psf,
                                              geom,
                                              max_radius=self.psf_margin *
                                              u.deg)

        # Energy Dispersion
        e_true = exposure.geom.axes[0].edges
        e_reco = counts.geom.axes[0].edges
        edisp = EDispKernel.from_diagonal_response(e_true=e_true,
                                                   e_reco=e_reco)

        # fit mask
        if coords["lon"].min() < 90 * u.deg and coords["lon"].max(
        ) > 270 * u.deg:
            coords["lon"][coords["lon"].value > 180] -= 360 * u.deg
        mask = (
            (coords["lon"] >= coords["lon"].min() + self.psf_margin * u.deg)
            & (coords["lon"] <= coords["lon"].max() - self.psf_margin * u.deg)
            & (coords["lat"] >= coords["lat"].min() + self.psf_margin * u.deg)
            & (coords["lat"] <= coords["lat"].max() - self.psf_margin * u.deg))
        mask_fermi = WcsNDMap(counts.geom, mask)

        log.info(f"ROI {kr}: pre-computing diffuse")

        # IEM
        eval_iem = MapEvaluator(model=model_iem,
                                exposure=exposure,
                                psf=psf_kernel,
                                edisp=edisp)
        bkg_iem = eval_iem.compute_npred()

        # ISO
        eval_iso = MapEvaluator(model=self.model_iso,
                                exposure=exposure,
                                edisp=edisp)
        bkg_iso = eval_iso.compute_npred()

        # merge iem and iso, only one local normalization is fitted
        dataset_name = "3FHL_ROI_num" + str(kr)
        background_total = bkg_iem + bkg_iso
        background_model = BackgroundModel(background_total,
                                           name="bkg_iem+iso",
                                           datasets_names=[dataset_name])
        background_model.parameters["norm"].min = 0.0

        # Sources model
        in_roi = self.FHL3.positions.galactic.contained_by(wcs)
        FHL3_roi = []
        for ks in range(len(self.FHL3.table)):
            if in_roi[ks] == True:
                model = self.FHL3[ks].sky_model()
                model.spatial_model.parameters.freeze_all()  # freeze spatial
                model.spectral_model.parameters["amplitude"].min = 0.0
                if isinstance(model.spectral_model, PowerLawSpectralModel):
                    model.spectral_model.parameters["index"].min = 0.1
                    model.spectral_model.parameters["index"].max = 10.0
                else:
                    model.spectral_model.parameters["alpha"].min = 0.1
                    model.spectral_model.parameters["alpha"].max = 10.0

                FHL3_roi.append(model)
        model_total = Models([background_model] + FHL3_roi)

        # Dataset
        dataset = MapDataset(
            models=model_total,
            counts=counts,
            exposure=exposure,
            psf=psf_kernel,
            edisp=edisp,
            mask_fit=mask_fermi,
            name=dataset_name,
        )
        cat_stat = dataset.stat_sum()
        datasets = Datasets([dataset])

        log.info(f"ROI {kr}: running fit")
        fit = Fit(datasets)
        results = fit.run(**self.optimize_opts)
        print("ROI_num", str(kr), "\n", results)
        fit_stat = datasets.stat_sum()

        if results.message != "Optimization failed.":
            datasets.write(path=Path(self.resdir),
                           prefix=dataset.name,
                           overwrite=True)
            np.savez(
                self.resdir / f"3FHL_ROI_num{kr}_fit_infos.npz",
                message=results.message,
                stat=[cat_stat, fit_stat],
            )

            exec_time = time() - roi_time
            print("ROI", kr, " time (s): ", exec_time)

            log.info(f"ROI {kr}: running flux points")
            for model in FHL3_roi:
                if (self.FHL3[model.name].data["ROI_num"] == kr
                        and self.FHL3[model.name].data["Signif_Avg"] >=
                        self.sig_cut):
                    flux_points = FluxPointsEstimator(
                        e_edges=self.El_flux,
                        source=model.name,
                        n_sigma_ul=2,
                    ).run(datasets=datasets)
                    filename = self.resdir / f"{model.name}_flux_points.fits"
                    flux_points.write(filename, overwrite=True)

            exec_time = time() - roi_time - exec_time
            print("ROI", kr, " Flux points time (s): ", exec_time)
Beispiel #2
0
def fit(dataset):
    return Fit([dataset])
Beispiel #3
0
class TestSpectralFit:
    """Test fit in astrophysical scenario"""

    def setup(self):
        path = "$GAMMAPY_DATA/joint-crab/spectra/hess/"
        self.datasets = Datasets(
            [
                SpectrumDatasetOnOff.from_ogip_files(path + "pha_obs23523.fits"),
                SpectrumDatasetOnOff.from_ogip_files(path + "pha_obs23592.fits"),
            ]
        )

        self.pwl = SkyModel(
            spectral_model=PowerLawSpectralModel(
                index=2, amplitude=1e-12 * u.Unit("cm-2 s-1 TeV-1"), reference=1 * u.TeV
            )
        )

        self.ecpl = SkyModel(
            spectral_model=ExpCutoffPowerLawSpectralModel(
                index=2,
                amplitude=1e-12 * u.Unit("cm-2 s-1 TeV-1"),
                reference=1 * u.TeV,
                lambda_=0.1 / u.TeV,
            )
        )

        # Example fit for one observation
        self.datasets[0].models = self.pwl
        self.fit = Fit([self.datasets[0]])

    def set_model(self, model):
        for obs in self.datasets:
            obs.models = model

    @requires_dependency("iminuit")
    def test_basic_results(self):
        self.set_model(self.pwl)
        result = self.fit.run()
        pars = self.fit.datasets.parameters

        assert self.pwl is self.datasets[0].models[0]

        assert_allclose(result.total_stat, 38.343, rtol=1e-3)
        assert_allclose(pars["index"].value, 2.817, rtol=1e-3)
        assert pars["amplitude"].unit == "cm-2 s-1 TeV-1"
        assert_allclose(pars["amplitude"].value, 5.142e-11, rtol=1e-3)
        assert_allclose(self.datasets[0].npred().data[60], 0.6102, rtol=1e-3)
        pars.to_table()

    def test_basic_errors(self):
        self.set_model(self.pwl)
        result = self.fit.run()
        pars = result.parameters

        assert_allclose(pars["index"].error, 0.149633, rtol=1e-3)
        assert_allclose(pars["amplitude"].error, 6.423139e-12, rtol=1e-3)
        pars.to_table()

    def test_ecpl_fit(self):
        self.set_model(self.ecpl)
        fit = Fit([self.datasets[0]])
        fit.run()

        actual = fit.datasets.parameters["lambda_"].quantity
        assert actual.unit == "TeV-1"
        assert_allclose(actual.value, 0.145215, rtol=1e-2)

    def test_joint_fit(self):
        self.set_model(self.pwl)
        fit = Fit(self.datasets)
        fit.run()
        actual = fit.datasets.parameters["index"].value
        assert_allclose(actual, 2.7806, rtol=1e-3)

        actual = fit.datasets.parameters["amplitude"].quantity
        assert actual.unit == "cm-2 s-1 TeV-1"
        assert_allclose(actual.value, 5.200e-11, rtol=1e-3)

    def test_stats(self):
        dataset = self.datasets[0].copy()
        dataset.models = self.pwl

        fit = Fit([dataset])
        result = fit.run()

        stats = dataset.stat_array()
        actual = np.sum(stats[dataset.mask_safe])

        desired = result.total_stat
        assert_allclose(actual, desired)

    def test_fit_range(self):
        # Fit range not restriced fit range should be the thresholds
        obs = self.datasets[0]
        actual = obs.energy_range[0]

        assert actual.unit == "keV"
        assert_allclose(actual.value, 8.912509e08)

    def test_no_edisp(self):
        dataset = self.datasets[0].copy()

        dataset.edisp = None
        dataset.models = self.pwl

        fit = Fit([dataset])
        result = fit.run()
        assert_allclose(result.parameters["index"].value, 2.7961, atol=0.02)

    def test_stacked_fit(self):
        dataset = self.datasets[0].copy()
        dataset.stack(self.datasets[1])
        dataset.models = SkyModel(PowerLawSpectralModel())

        fit = Fit([dataset])
        result = fit.run()
        pars = result.parameters

        assert_allclose(pars["index"].value, 2.7767, rtol=1e-3)
        assert u.Unit(pars["amplitude"].unit) == "cm-2 s-1 TeV-1"
        assert_allclose(pars["amplitude"].value, 5.191e-11, rtol=1e-3)
Beispiel #4
0
class ParameterEstimator(Estimator):
    """Model parameter estimator.

    Estimates a model parameter for a group of datasets.
    Compute best fit value, symmetric and delta TS for a given null value.
    Additionnally asymmetric errors as well as parameter upper limit and fit statistic profile
    can be estimated.

    Parameters
    ----------
    n_sigma : int
        Sigma to use for asymmetric error computation. Default is 1.
    n_sigma_ul : int
        Sigma to use for upper limit computation. Default is 2.
    null_value : float
        Which null value to use for the parameter
    scan_n_sigma : int
        Range to scan in number of parameter error
    scan_min : float
        Minimum value to use for the stat scan
    scan_max : int
        Maximum value to use for the stat scan
    scan_n_values : int
        Number of values used to scan fit stat profile
    scan_values : `~numpy.ndarray`
        Values to use for the scan.
    backend : str
        Backend used for fitting, default : minuit
    optimize_opts : dict
        Options passed to `Fit.optimize`.
    covariance_opts : dict
        Options passed to `Fit.covariance`.
    reoptimize : bool
        Re-optimize other free model parameters. Default is True.
    selection_optional : list of str
        Which additional quantities to estimate. Available options are:

            * "all": all the optional steps are executed
            * "errn-errp": estimate asymmetric errors on parameter best fit value.
            * "ul": estimate upper limits.
            * "scan": estimate fit statistic profiles.

        Default is None so the optionnal steps are not executed.


    """

    tag = "ParameterEstimator"
    _available_selection_optional = ["errn-errp", "ul", "scan"]

    def __init__(
        self,
        n_sigma=1,
        n_sigma_ul=2,
        null_value=1e-150,
        scan_n_sigma=3,
        scan_min=None,
        scan_max=None,
        scan_n_values=30,
        scan_values=None,
        backend="minuit",
        optimize_opts=None,
        covariance_opts=None,
        reoptimize=True,
        selection_optional=None,
    ):
        self.n_sigma = n_sigma
        self.n_sigma_ul = n_sigma_ul
        self.null_value = null_value

        # scan parameters
        self.scan_n_sigma = scan_n_sigma
        self.scan_n_values = scan_n_values
        self.scan_values = scan_values
        self.scan_min = scan_min
        self.scan_max = scan_max

        self.backend = backend
        if optimize_opts is None:
            optimize_opts = {}
        if covariance_opts is None:
            covariance_opts = {}
        self.optimize_opts = optimize_opts
        self.covariance_opts = covariance_opts

        self.reoptimize = reoptimize
        self.selection_optional = selection_optional
        self._fit = None

    def fit(self, datasets):
        if self._fit is None or datasets is not self._fit.datasets:
            self._fit = Fit(
                datasets,
                backend=self.backend,
                optimize_opts=self.optimize_opts,
                covariance_opts=self.covariance_opts,
            )
        return self._fit

    def estimate_best_fit(self, datasets, parameter):
        """Estimate parameter assymetric errors

        Parameters
        ----------
        datasets : `~gammapy.datasets.Datasets`
            Datasets
        parameter : `Parameter`
            For which parameter to get the value

        Returns
        -------
        result : dict
            Dict with the various parameter estimation values.
        """
        self.fit(datasets)
        result_fit = self._fit.run()

        return {
            f"{parameter.name}": parameter.value,
            "stat": result_fit.total_stat,
            "success": result_fit.success,
            f"{parameter.name}_err": parameter.error * self.n_sigma,
        }

    def estimate_ts(self, datasets, parameter):
        """Estimate parameter ts

        Parameters
        ----------
        datasets : `~gammapy.datasets.Datasets`
            Datasets
        parameter : `Parameter`
            For which parameter to get the value

        Returns
        -------
        result : dict
            Dict with the various parameter estimation values.
        """
        stat = datasets.stat_sum()

        with datasets.parameters.restore_status():

            # compute ts value
            parameter.value = self.null_value

            if self.reoptimize:
                parameter.frozen = True
                _ = self._fit.optimize(**self.optimize_opts)

            ts = datasets.stat_sum() - stat

        return {"ts": ts}

    def estimate_errn_errp(self, datasets, parameter):
        """Estimate parameter assymetric errors

        Parameters
        ----------
        datasets : `~gammapy.datasets.Datasets`
            Datasets
        parameter : `Parameter`
            For which parameter to get the value

        Returns
        -------
        result : dict
            Dict with the various parameter estimation values.
        """
        # TODO: make Fit stateless and configurable
        self.fit(datasets)
        self._fit.optimize(**self.optimize_opts)

        res = self._fit.confidence(parameter=parameter,
                                   sigma=self.n_sigma,
                                   reoptimize=self.reoptimize)
        return {
            f"{parameter.name}_errp": res["errp"],
            f"{parameter.name}_errn": res["errn"],
        }

    def estimate_scan(self, datasets, parameter):
        """Estimate parameter stat scan.

        Parameters
        ----------
        datasets : `~gammapy.datasets.Datasets`
            The datasets used to estimate the model parameter
        parameter : `Parameter`
            For which parameter to get the value

        Returns
        -------
        result : dict
            Dict with the various parameter estimation values.

        """
        self.fit(datasets)
        self._fit.optimize(**self.optimize_opts)

        if self.scan_min and self.scan_max:
            bounds = (self.scan_min, self.scan_max)
        else:
            bounds = self.scan_n_sigma

        profile = self._fit.stat_profile(
            parameter=parameter,
            values=self.scan_values,
            bounds=bounds,
            nvalues=self.scan_n_values,
            reoptimize=self.reoptimize,
        )

        return {
            f"{parameter.name}_scan": profile[f"{parameter.name}_scan"],
            "stat_scan": profile["stat_scan"],
        }

    def estimate_ul(self, datasets, parameter):
        """Estimate parameter ul.

        Parameters
        ----------
        datasets : `~gammapy.datasets.Datasets`
            The datasets used to estimate the model parameter
        parameter : `Parameter`
            For which parameter to get the value

        Returns
        -------
        result : dict
            Dict with the various parameter estimation values.

        """
        self.fit(datasets)
        self._fit.optimize(**self.optimize_opts)
        res = self._fit.confidence(parameter=parameter, sigma=self.n_sigma_ul)
        return {f"{parameter.name}_ul": res["errp"] + parameter.value}

    def run(self, datasets, parameter):
        """Run the parameter estimator.

        Parameters
        ----------
        datasets : `~gammapy.datasets.Datasets`
            The datasets used to estimate the model parameter
        parameter : `str` or `Parameter`
            For which parameter to run the estimator

        Returns
        -------
        result : dict
            Dict with the various parameter estimation values.
        """
        datasets = Datasets(datasets)
        parameter = datasets.parameters[parameter]

        with datasets.parameters.restore_status():

            if not self.reoptimize:
                datasets.parameters.freeze_all()
                parameter.frozen = False

            result = self.estimate_best_fit(datasets, parameter)
            result.update(self.estimate_ts(datasets, parameter))

            if "errn-errp" in self.selection_optional:
                result.update(self.estimate_errn_errp(datasets, parameter))

            if "ul" in self.selection_optional:
                result.update(self.estimate_ul(datasets, parameter))

            if "scan" in self.selection_optional:
                result.update(self.estimate_scan(datasets, parameter))

        return result
Beispiel #5
0
#
# First we start with fitting a simple `~gammapy.modeling.models.PowerLawSpectralModel`.

# In[ ]:

pwl = PowerLawSpectralModel(index=2,
                            amplitude="1e-12 cm-2 s-1 TeV-1",
                            reference="1 TeV")
model = SkyModel(spectral_model=pwl)

# After creating the model we run the fit by passing the `'flux_points'` and `'model'` objects:

# In[ ]:

dataset_pwl = FluxPointsDataset(model, flux_points)
fitter = Fit([dataset_pwl])
result_pwl = fitter.run()

# And print the result:

# In[ ]:

print(result_pwl)

# In[ ]:

print(pwl)

# Finally we plot the data points and the best fit model:

# In[ ]:
Beispiel #6
0
def data_fit(stacked):
    fit = Fit([stacked])
    result = fit.run(optimize_opts={"print_level": 1})
def data_fit(stacked):
    # Data fitting
    fit = Fit(stacked)
    result = fit.run(optimize_opts={"print_level": 1})
Beispiel #8
0
class FluxPointsEstimator:
    """Flux points estimator.

    Estimates flux points for a given list of spectral datasets, energies and
    spectral model.

    To estimate the flux point the amplitude of the reference spectral model is
    fitted within the energy range defined by the energy group. This is done for
    each group independently. The amplitude is re-normalized using the "norm" parameter,
    which specifies the deviation of the flux from the reference model in this
    energy group. See https://gamma-astro-data-formats.readthedocs.io/en/latest/spectra/binned_likelihoods/index.html
    for details.

    The method is also described in the Fermi-LAT catalog paper
    https://ui.adsabs.harvard.edu/#abs/2015ApJS..218...23A
    or the HESS Galactic Plane Survey paper
    https://ui.adsabs.harvard.edu/#abs/2018A%26A...612A...1H

    Parameters
    ----------
    datasets : list of `~gammapy.spectrum.SpectrumDataset`
        Spectrum datasets.
    e_edges : `~astropy.units.Quantity`
        Energy edges of the flux point bins.
    source : str or int
        For which source in the model to compute the flux points.
    norm_min : float
        Minimum value for the norm used for the fit statistic profile evaluation.
    norm_max : float
        Maximum value for the norm used for the fit statistic profile evaluation.
    norm_n_values : int
        Number of norm values used for the fit statistic profile.
    norm_values : `numpy.ndarray`
        Array of norm values to be used for the fit statistic profile.
    sigma : int
        Sigma to use for asymmetric error computation.
    sigma_ul : int
        Sigma to use for upper limit computation.
    reoptimize : bool
        Re-optimize other free model parameters.
    """
    def __init__(
        self,
        datasets,
        e_edges,
        source=0,
        norm_min=0.2,
        norm_max=5,
        norm_n_values=11,
        norm_values=None,
        sigma=1,
        sigma_ul=2,
        reoptimize=False,
    ):
        # make a copy to not modify the input datasets
        if not isinstance(datasets, Datasets):
            datasets = Datasets(datasets)

        if not datasets.is_all_same_type and datasets.is_all_same_shape:
            raise ValueError(
                "Flux point estimation requires a list of datasets"
                " of the same type and data shape.")

        self.datasets = datasets.copy()
        self.e_edges = e_edges

        dataset = self.datasets[0]

        model = dataset.models[source].spectral_model

        self.model = ScaleSpectralModel(model)
        self.model.norm.min = 0
        self.model.norm.max = 1e3

        if norm_values is None:
            norm_values = np.logspace(np.log10(norm_min), np.log10(norm_max),
                                      norm_n_values)

        self.norm_values = norm_values
        self.sigma = sigma
        self.sigma_ul = sigma_ul
        self.reoptimize = reoptimize
        self.source = source
        self.fit = Fit(self.datasets)

        self._set_scale_model()
        self._contribute_to_stat = False

    def _freeze_parameters(self):
        # freeze other parameters
        for par in self.datasets.parameters:
            if par is not self.model.norm:
                par.frozen = True

    def _freeze_empty_background(self):
        counts_all = self.estimate_counts()["counts"]

        for counts, dataset in zip(counts_all, self.datasets):
            if isinstance(dataset, MapDataset) and counts == 0:
                if dataset.background_model is not None:
                    dataset.background_model.parameters.freeze_all()

    def _set_scale_model(self):
        # set the model on all datasets
        for dataset in self.datasets:
            if len(dataset.models) > 1:
                dataset.models[self.source].spectral_model = self.model
            else:
                dataset.models[0].spectral_model = self.model

    @property
    def ref_model(self):
        return self.model.model

    @property
    def e_groups(self):
        """Energy grouping table `~astropy.table.Table`"""
        dataset = self.datasets[0]
        energy_axis = dataset.counts.geom.get_axis_by_name("energy")
        return energy_axis.group_table(self.e_edges)

    def __str__(self):
        s = f"{self.__class__.__name__}:\n"
        s += str(self.datasets) + "\n"
        s += str(self.e_edges) + "\n"
        s += str(self.model) + "\n"
        return s

    def run(self, steps="all"):
        """Run the flux point estimator for all energy groups.

        Returns
        -------
        flux_points : `FluxPoints`
            Estimated flux points.
        steps : list of str
            Which steps to execute. See `estimate_flux_point` for details
            and available options.
        """
        rows = []
        for e_group in self.e_groups:
            if e_group["bin_type"].strip() != "normal":
                log.debug(
                    "Skipping under-/ overflow bin in flux point estimation.")
                continue

            row = self.estimate_flux_point(e_group, steps=steps)
            rows.append(row)

        table = table_from_row_data(rows=rows, meta={"SED_TYPE": "likelihood"})
        return FluxPoints(table).to_sed_type("dnde")

    def _energy_mask(self, e_group, dataset):
        energy_mask = np.zeros(dataset.data_shape)
        energy_mask[e_group["idx_min"]:e_group["idx_max"] + 1] = 1
        return energy_mask.astype(bool)

    def estimate_flux_point(self, e_group, steps="all"):
        """Estimate flux point for a single energy group.

        Parameters
        ----------
        e_group : `~astropy.table.Row`
            Energy group to compute the flux point for.
        steps : list of str
            Which steps to execute. Available options are:

                * "err": estimate symmetric error.
                * "errn-errp": estimate asymmetric errors.
                * "ul": estimate upper limits.
                * "ts": estimate ts and sqrt(ts) values.
                * "norm-scan": estimate fit statistic profiles.

            By default all steps are executed.

        Returns
        -------
        result : dict
            Dict with results for the flux point.
        """
        e_min, e_max = e_group["energy_min"], e_group["energy_max"]
        # Put at log center of the bin
        e_ref = np.sqrt(e_min * e_max)

        result = {
            "e_ref": e_ref,
            "e_min": e_min,
            "e_max": e_max,
            "ref_dnde": self.ref_model(e_ref),
            "ref_flux": self.ref_model.integral(e_min, e_max),
            "ref_eflux": self.ref_model.energy_flux(e_min, e_max),
            "ref_e2dnde": self.ref_model(e_ref) * e_ref**2,
        }

        for dataset in self.datasets:
            dataset.mask_fit = self._energy_mask(e_group=e_group,
                                                 dataset=dataset)
            mask = dataset.mask_fit

            if dataset.mask_safe is not None:
                mask &= dataset.mask_safe

            self._contribute_to_stat |= mask.any()

        with self.datasets.parameters.restore_values:

            self._freeze_empty_background()

            if not self.reoptimize:
                self._freeze_parameters()

            result.update(self.estimate_norm())

            if not result.pop("success"):
                log.warning(
                    "Fit failed for flux point between {e_min:.3f} and {e_max:.3f},"
                    " setting NaN.".format(e_min=e_min, e_max=e_max))

            if steps == "all":
                steps = ["err", "counts", "errp-errn", "ul", "ts", "norm-scan"]

            if "err" in steps:
                result.update(self.estimate_norm_err())

            if "counts" in steps:
                result.update(self.estimate_counts())

            if "errp-errn" in steps:
                result.update(self.estimate_norm_errn_errp())

            if "ul" in steps:
                result.update(self.estimate_norm_ul())

            if "ts" in steps:
                result.update(self.estimate_norm_ts())

            if "norm-scan" in steps:
                result.update(self.estimate_norm_scan())

        return result

    def estimate_norm_errn_errp(self):
        """Estimate asymmetric errors for a flux point.

        Returns
        -------
        result : dict
            Dict with asymmetric errors for the flux point norm.
        """
        if not self._contribute_to_stat:
            return {"norm_errp": np.nan, "norm_errn": np.nan}

        result = self.fit.confidence(parameter=self.model.norm,
                                     sigma=self.sigma)
        return {"norm_errp": result["errp"], "norm_errn": result["errn"]}

    def estimate_norm_err(self):
        """Estimate covariance errors for a flux point.

        Returns
        -------
        result : dict
            Dict with symmetric error for the flux point norm.
        """
        if not self._contribute_to_stat:
            return {"norm_err": np.nan}

        result = self.fit.covariance()
        norm_err = result.parameters.error(self.model.norm)
        return {"norm_err": norm_err}

    def estimate_counts(self):
        """Estimate counts for the flux point.

        Returns
        -------
        result : dict
            Dict with an array with one entry per dataset with counts for the flux point.
        """
        if not self._contribute_to_stat:
            return {"counts": np.zeros(len(self.datasets))}

        counts = []
        for dataset in self.datasets:
            mask = dataset.mask_fit
            if dataset.mask_safe is not None:
                mask &= dataset.mask_safe

            counts.append(dataset.counts.data[mask].sum())

        return {"counts": np.array(counts, dtype=int)}

    def estimate_norm_ul(self):
        """Estimate upper limit for a flux point.

        Returns
        -------
        result : dict
            Dict with upper limit for the flux point norm.
        """
        if not self._contribute_to_stat:
            return {"norm_ul": np.nan}

        norm = self.model.norm

        # TODO: the minuit backend has convergence problems when the fit statistic is not
        #  of parabolic shape, which is the case, when there are zero counts in the
        #  energy bin. For this case we change to the scipy backend.
        counts = self.estimate_counts()["counts"]

        if np.all(counts == 0):
            result = self.fit.confidence(
                parameter=norm,
                sigma=self.sigma_ul,
                backend="scipy",
                reoptimize=self.reoptimize,
            )
        else:
            result = self.fit.confidence(parameter=norm, sigma=self.sigma_ul)

        return {"norm_ul": result["errp"] + norm.value}

    def estimate_norm_ts(self):
        """Estimate ts and sqrt(ts) for the flux point.

        Returns
        -------
        result : dict
            Dict with ts and sqrt(ts) for the flux point.
        """
        if not self._contribute_to_stat:
            return {"sqrt_ts": np.nan, "ts": np.nan}

        stat = self.datasets.stat_sum()

        # store best fit amplitude, set amplitude of fit model to zero
        self.model.norm.value = 0
        self.model.norm.frozen = True

        if self.reoptimize:
            _ = self.fit.optimize()

        stat_null = self.datasets.stat_sum()

        # compute sqrt TS
        ts = np.abs(stat_null - stat)
        sqrt_ts = np.sqrt(ts)
        return {"sqrt_ts": sqrt_ts, "ts": ts}

    def estimate_norm_scan(self):
        """Estimate fit statistic profile for the norm parameter.

        Returns
        -------
        result : dict
            Keys: "norm_scan", "stat_scan"
        """
        if not self._contribute_to_stat:
            nans = np.nan * np.empty_like(self.norm_values)
            return {"norm_scan": nans, "stat_scan": nans}

        result = self.fit.stat_profile(self.model.norm,
                                       values=self.norm_values,
                                       reoptimize=self.reoptimize)
        return {"norm_scan": result["values"], "stat_scan": result["stat"]}

    def estimate_norm(self):
        """Fit norm of the flux point.

        Returns
        -------
        result : dict
            Dict with "norm" and "stat" for the flux point.
        """
        if not self._contribute_to_stat:
            return {"norm": np.nan, "stat": np.nan, "success": False}

        # start optimization with norm=1
        self.model.norm.value = 1.0
        self.model.norm.frozen = False

        result = self.fit.optimize()

        if result.success:
            norm = self.model.norm.value
        else:
            norm = np.nan

        return {
            "norm": norm,
            "stat": result.total_stat,
            "success": result.success
        }
Beispiel #9
0
class Analysis:
    """Config-driven high level analysis interface.

    It is initialized by default with a set of configuration parameters and values declared in
    an internal high level interface model, though the user can also provide configuration
    parameters passed as a nested dictionary at the moment of instantiation. In that case these
    parameters will overwrite the default values of those present in the configuration file.

    Parameters
    ----------
    config : dict or `AnalysisConfig`
        Configuration options following `AnalysisConfig` schema
    """

    def __init__(self, config):
        self.config = config
        self.config.set_logging()
        self.datastore = None
        self.observations = None
        self.datasets = None
        self.fit = Fit()
        self.fit_result = None
        self.flux_points = None

    @property
    def models(self):
        if not self.datasets:
            raise RuntimeError("No datasets defined. Impossible to set models.")
        return self.datasets.models
    
    @models.setter
    def models(self, models):
        self.set_models(models, extend=False)

    @property
    def config(self):
        """Analysis configuration (`AnalysisConfig`)"""
        return self._config

    @config.setter
    def config(self, value):
        if isinstance(value, dict):
            self._config = AnalysisConfig(**value)
        elif isinstance(value, AnalysisConfig):
            self._config = value
        else:
            raise TypeError("config must be dict or AnalysisConfig.")

    def _set_data_store(self):
        """Set the datastore on the Analysis object."""
        path = make_path(self.config.observations.datastore)
        if path.is_file():
            log.debug(f"Setting datastore from file: {path}")
            self.datastore = DataStore.from_file(path)
        elif path.is_dir():
            log.debug(f"Setting datastore from directory: {path}")
            self.datastore = DataStore.from_dir(path)
        else:
            raise FileNotFoundError(f"Datastore not found: {path}")

    def _make_obs_table_selection(self):
        """Return list of obs_ids after filtering on datastore observation table."""
        obs_settings = self.config.observations

        # Reject configs with list of obs_ids and obs_file set at the same time
        if len(obs_settings.obs_ids) and obs_settings.obs_file is not None:
            raise ValueError(
                "Values for both parameters obs_ids and obs_file are not accepted."
            )

        # First select input list of observations from obs_table
        if len(obs_settings.obs_ids):
            selected_obs_table = self.datastore.obs_table.select_obs_id(
                obs_settings.obs_ids
            )
        elif obs_settings.obs_file is not None:
            path = make_path(obs_settings.obs_file)
            ids = list(Table.read(path, format="ascii", data_start=0).columns[0])
            selected_obs_table = self.datastore.obs_table.select_obs_id(ids)
        else:
            selected_obs_table = self.datastore.obs_table

        # Apply cone selection
        if obs_settings.obs_cone.lon is not None:
            cone = dict(
                type="sky_circle",
                frame=obs_settings.obs_cone.frame,
                lon=obs_settings.obs_cone.lon,
                lat=obs_settings.obs_cone.lat,
                radius=obs_settings.obs_cone.radius,
                border="0 deg",
            )
            selected_obs_table = selected_obs_table.select_observations(cone)

        return selected_obs_table["OBS_ID"].tolist()

    def get_observations(self):
        """Fetch observations from the data store according to criteria defined in the configuration."""
        observations_settings = self.config.observations
        self._set_data_store()

        log.info("Fetching observations.")
        ids = self._make_obs_table_selection()
        required_irf = [_.value for _ in observations_settings.required_irf]
        self.observations = self.datastore.get_observations(
            ids, skip_missing=True, required_irf=required_irf
        )

        if observations_settings.obs_time.start is not None:
            start = observations_settings.obs_time.start
            stop = observations_settings.obs_time.stop
            if len(start.shape) == 0:
                time_intervals = [(start, stop)]
            else:
                time_intervals = [(tstart, tstop) for tstart, tstop in zip(start, stop)]
            self.observations = self.observations.select_time(time_intervals)

        log.info(f"Number of selected observations: {len(self.observations)}")

        for obs in self.observations:
            log.debug(obs)

    def get_datasets(self):
        """Produce reduced datasets."""
        datasets_settings = self.config.datasets
        if not self.observations or len(self.observations) == 0:
            raise RuntimeError("No observations have been selected.")

        if datasets_settings.type == "1d":
            self._spectrum_extraction()
        else:  # 3d
            self._map_making()


    def set_models(self, models, extend=True):
        """Set models on datasets.
        Adds `FoVBackgroundModel` if not present already
        
        Parameters
        ----------
        models : `~gammapy.modeling.models.Models` or str
            Models object or YAML models string
        extend : bool
            Extend the exiting models on the datasets or replace them.
        """
        if not self.datasets or len(self.datasets) == 0:
            raise RuntimeError("Missing datasets")

        log.info("Reading model.")
        if isinstance(models, str):
            models = Models.from_yaml(models)
        elif isinstance(models, Models):
            pass
        elif isinstance(models, DatasetModels) or isinstance(models, list):
            models = Models(models)
        else:
            raise TypeError(f"Invalid type: {models!r}")

        if extend:
            models.extend(self.datasets.models)

        self.datasets.models = models

        bkg_models = []
        for dataset in self.datasets:
            if dataset.tag == "MapDataset" and dataset.background_model is None:
               bkg_models.append(FoVBackgroundModel(dataset_name=dataset.name))
        if bkg_models:
            models.extend(bkg_models)
            self.datasets.models = models

        log.info(models)


    def read_models(self, path, extend=True):
        """Read models from YAML file.

        Parameters
        ----------
        path : str
            path to the model file
        extend : bool
            Extend the exiting models on the datasets or replace them.
        """

        path = make_path(path)
        models = Models.read(path)
        self.set_models(models, extend=extend)
        log.info(f"Models loaded from {path}.")


    def write_models(self, overwrite=True, write_covariance=True):
        """Write models to YAML file.
           File name is taken from the configuration file.
        """

        filename_models = self.config.general.models_file
        if filename_models is not None:
            self.models.write(filename_models,
                              overwrite=overwrite,
                              write_covariance=write_covariance)
            log.info(f"Models loaded from {filename_models}.")
        else:
            raise RuntimeError("Missing models_file in config.general")
   

    def read_datasets(self):
        """Read datasets from YAML file.
        File names are taken from the configuration file.

        """

        filename = self.config.general.datasets_file
        filename_models = self.config.general.models_file
        if filename is not None:
            self.datasets = Datasets.read(filename)
            log.info(f"Datasets loaded from {filename}.")
            if filename_models is not  None:
                self.read_models(filename_models, extend=False)
        else:
            raise RuntimeError("Missing datasets_file in config.general")


    def write_datasets(self, overwrite=True, write_covariance=True):
        """Write datasets to YAML file.
        File names are taken from the configuration file.

        Parameters
        ----------
        overwrite : bool
            overwrite datasets FITS files
        write_covariance : bool
            save covariance or not
        """

        filename = self.config.general.datasets_file
        filename_models = self.config.general.models_file
        if filename is not None:
            self.datasets.write(filename,
                                 filename_models,
                                 overwrite=overwrite,
                                 write_covariance=write_covariance)
            log.info(f"Datasets stored to {filename}.")
            log.info(f"Datasets stored to {filename_models}.")
        else:
            raise RuntimeError("Missing datasets_file in config.general")


    def run_fit(self):
        """Fitting reduced datasets to model."""
        if not self.models:
            raise RuntimeError("Missing models")

        fit_settings = self.config.fit
        for dataset in self.datasets:
            if fit_settings.fit_range:
                energy_min = fit_settings.fit_range.min
                energy_max = fit_settings.fit_range.max
                geom = dataset.counts.geom
                dataset.mask_fit = geom.energy_mask(energy_min, energy_max)

        log.info("Fitting datasets.")
        result = self.fit.run(datasets=self.datasets)
        self.fit_result = result
        log.info(self.fit_result)

    def get_flux_points(self):
        """Calculate flux points for a specific model component."""
        if not self.datasets:
            raise RuntimeError("No datasets defined. Impossible to compute flux points.")

        fp_settings = self.config.flux_points
        log.info("Calculating flux points.")
        energy_edges = self._make_energy_axis(fp_settings.energy).edges
        flux_point_estimator = FluxPointsEstimator(
            energy_edges=energy_edges,
            source=fp_settings.source,
            fit=self.fit,
            **fp_settings.parameters,
        )

        fp = flux_point_estimator.run(datasets=self.datasets)

        self.flux_points = FluxPointsDataset(
            data=fp, models=self.models[fp_settings.source]
        )
        cols = ["e_ref", "dnde", "dnde_ul", "dnde_err", "sqrt_ts"]
        table = self.flux_points.data.to_table(sed_type="dnde")
        log.info("\n{}".format(table[cols]))

    def get_excess_map(self):
        """Calculate excess map with respect to the current model."""
        excess_settings = self.config.excess_map
        log.info("Computing excess maps.")

        # TODO: Here we could possibly stack the datasets if needed
        # or allow to compute the excess map for each dataset
        if len(self.datasets) > 1:
            raise ValueError("Datasets must be stacked to compute the excess map")

        if self.datasets[0].tag not in ["MapDataset", "MapDatasetOnOff"]:
            raise ValueError("Cannot compute excess map for 1D dataset")

        energy_edges = self._make_energy_axis(excess_settings.energy_edges)
        if energy_edges is not None:
            energy_edges = energy_edges.edges

        excess_map_estimator = ExcessMapEstimator(
            correlation_radius=excess_settings.correlation_radius,
            energy_edges=energy_edges,
            **excess_settings.parameters,
        )
        self.excess_map = excess_map_estimator.run(self.datasets[0])

    def get_light_curve(self):
        """Calculate light curve for a specific model component."""
        lc_settings = self.config.light_curve
        log.info("Computing light curve.")
        energy_edges = self._make_energy_axis(lc_settings.energy_edges).edges

        if (
            lc_settings.time_intervals.start is None
            or lc_settings.time_intervals.stop is None
        ):
            log.info(
                "Time intervals not defined. Extract light curve on datasets GTIs."
            )
            time_intervals = None
        else:
            time_intervals = [
                (t1, t2)
                for t1, t2 in zip(
                    lc_settings.time_intervals.start, lc_settings.time_intervals.stop
                )
            ]

        light_curve_estimator = LightCurveEstimator(
            time_intervals=time_intervals,
            energy_edges=energy_edges,
            source=lc_settings.source,
            fit=self.fit,
            **lc_settings.parameters,
        )
        lc = light_curve_estimator.run(datasets=self.datasets)
        self.light_curve = lc
        log.info(
            "\n{}".format(
                self.light_curve.to_table(format="lightcurve", sed_type="flux")
            )
        )

    def update_config(self, config):
        self.config = self.config.update(config=config)

    @staticmethod
    def _create_wcs_geometry(wcs_geom_settings, axes):
        """Create the WCS geometry."""
        geom_params = {}
        skydir_settings = wcs_geom_settings.skydir
        if skydir_settings.lon is not None:
            skydir = SkyCoord(
                skydir_settings.lon, skydir_settings.lat, frame=skydir_settings.frame
            )
            geom_params["skydir"] = skydir

        if skydir_settings.frame in ["icrs", "galactic"]:
            geom_params["frame"] = skydir_settings.frame
        else:
            raise ValueError(
                f"Incorrect skydir frame: expect 'icrs' or 'galactic'. Got {skydir_settings.frame}"
            )

        geom_params["axes"] = axes
        geom_params["binsz"] = wcs_geom_settings.binsize
        width = wcs_geom_settings.width.width.to("deg").value
        height = wcs_geom_settings.width.height.to("deg").value
        geom_params["width"] = (width, height)

        return WcsGeom.create(**geom_params)

    @staticmethod
    def _create_region_geometry(on_region_settings, axes):
        """Create the region geometry."""
        on_lon = on_region_settings.lon
        on_lat = on_region_settings.lat
        on_center = SkyCoord(on_lon, on_lat, frame=on_region_settings.frame)
        on_region = CircleSkyRegion(on_center, on_region_settings.radius)

        return RegionGeom.create(region=on_region, axes=axes)

    def _create_geometry(self):
        """Create the geometry."""
        log.debug("Creating geometry.")
        datasets_settings = self.config.datasets
        geom_settings = datasets_settings.geom
        axes = [self._make_energy_axis(geom_settings.axes.energy)]
        if datasets_settings.type == "3d":
            geom = self._create_wcs_geometry(geom_settings.wcs, axes)
        elif datasets_settings.type == "1d":
            geom = self._create_region_geometry(datasets_settings.on_region, axes)
        else:
            raise ValueError(
                f"Incorrect dataset type. Expect '1d' or '3d'. Got {datasets_settings.type}."
            )
        return geom

    def _create_reference_dataset(self, name=None):
        """Create the reference dataset for the current analysis."""
        log.debug("Creating target Dataset.")
        geom = self._create_geometry()

        geom_settings = self.config.datasets.geom
        geom_irf = dict(energy_axis_true=None, binsz_irf=None)
        if geom_settings.axes.energy_true.min is not None:
            geom_irf["energy_axis_true"] = self._make_energy_axis(
                geom_settings.axes.energy_true, name="energy_true"
            )
        if geom_settings.wcs.binsize_irf is not None:
            geom_irf["binsz_irf"] = geom_settings.wcs.binsize_irf.to("deg").value

        if self.config.datasets.type == "1d":
            return SpectrumDataset.create(geom, name=name, **geom_irf)
        else:
            return MapDataset.create(geom, name=name, **geom_irf)

    def _create_dataset_maker(self):
        """Create the Dataset Maker."""
        log.debug("Creating the target Dataset Maker.")

        datasets_settings = self.config.datasets
        if datasets_settings.type == "3d":
            maker = MapDatasetMaker(selection=datasets_settings.map_selection)
        elif datasets_settings.type == "1d":
            maker_config = {}
            if datasets_settings.containment_correction:
                maker_config[
                    "containment_correction"
                ] = datasets_settings.containment_correction

            maker_config["selection"] = ["counts", "exposure", "edisp"]

            maker = SpectrumDatasetMaker(**maker_config)

        return maker

    def _create_safe_mask_maker(self):
        """Create the SafeMaskMaker."""
        log.debug("Creating the mask_safe Maker.")

        safe_mask_selection = self.config.datasets.safe_mask.methods
        safe_mask_settings = self.config.datasets.safe_mask.parameters
        return SafeMaskMaker(methods=safe_mask_selection, **safe_mask_settings)

    def _create_background_maker(self):
        """Create the Background maker."""
        log.info("Creating the background Maker.")

        datasets_settings = self.config.datasets
        bkg_maker_config = {}
        if datasets_settings.background.exclusion:
            path = make_path(datasets_settings.background.exclusion)
            exclusion_mask = Map.read(path)
            exclusion_mask.data = exclusion_mask.data.astype(bool)
            bkg_maker_config["exclusion_mask"] = exclusion_mask
        bkg_maker_config.update(datasets_settings.background.parameters)

        bkg_method = datasets_settings.background.method

        bkg_maker = None
        if bkg_method == "fov_background":
            log.debug(f"Creating FoVBackgroundMaker with arguments {bkg_maker_config}")
            bkg_maker = FoVBackgroundMaker(**bkg_maker_config)
        elif bkg_method == "ring":
            bkg_maker = RingBackgroundMaker(**bkg_maker_config)
            log.debug(f"Creating RingBackgroundMaker with arguments {bkg_maker_config}")
            if datasets_settings.geom.axes.energy.nbins > 1:
                raise ValueError(
                    "You need to define a single-bin energy geometry for your dataset."
                )
        elif bkg_method == "reflected":
            bkg_maker = ReflectedRegionsBackgroundMaker(**bkg_maker_config)
            log.debug(
                f"Creating ReflectedRegionsBackgroundMaker with arguments {bkg_maker_config}"
            )
        else:
            log.warning("No background maker set. Check configuration.")
        return bkg_maker

    def _map_making(self):
        """Make maps and datasets for 3d analysis"""
        datasets_settings = self.config.datasets
        offset_max = datasets_settings.geom.selection.offset_max

        log.info("Creating reference dataset and makers.")
        stacked = self._create_reference_dataset(name="stacked")

        maker = self._create_dataset_maker()
        maker_safe_mask = self._create_safe_mask_maker()
        bkg_maker = self._create_background_maker()

        makers = [maker, maker_safe_mask, bkg_maker]
        makers = [maker for maker in makers if maker is not None]

        log.info("Start the data reduction loop.")

        datasets_maker = DatasetsMaker(makers,
                                      stack_datasets=datasets_settings.stack,
                                      n_jobs=self.config.general.n_jobs,
                                      cutout_mode='trim',
                                      cutout_width=2 * offset_max)
        self.datasets = datasets_maker.run(stacked, self.observations)
        #TODO: move progress bar to DatasetsMaker but how with multiprocessing ?

    def _spectrum_extraction(self):
        """Run all steps for the spectrum extraction."""
        log.info("Reducing spectrum datasets.")
        datasets_settings = self.config.datasets
        dataset_maker = self._create_dataset_maker()
        safe_mask_maker = self._create_safe_mask_maker()
        bkg_maker = self._create_background_maker()

        reference = self._create_reference_dataset()

        datasets = []
        for obs in progress_bar(self.observations, desc="Observations"):
            log.debug(f"Processing observation {obs.obs_id}")
            dataset = dataset_maker.run(reference.copy(), obs)
            if bkg_maker is not None:
                dataset = bkg_maker.run(dataset, obs)
                if dataset.counts_off is None:
                    log.debug(
                        f"No OFF region found for observation {obs.obs_id}. Discarding."
                    )
                    continue
            dataset = safe_mask_maker.run(dataset, obs)
            log.debug(dataset)
            datasets.append(dataset)
        self.datasets = Datasets(datasets)

        if datasets_settings.stack:
            stacked = self.datasets.stack_reduce(name="stacked")
            self.datasets = Datasets([stacked])

    @staticmethod
    def _make_energy_axis(axis, name="energy"):
        if axis.min is None or axis.max is None:
            return None
        elif axis.nbins is None or axis.nbins < 1:
            return None
        else:
            return MapAxis.from_bounds(
                name=name,
                lo_bnd=axis.min.value,
                hi_bnd=axis.max.to_value(axis.min.unit),
                nbin=axis.nbins,
                unit=axis.min.unit,
                interp="log",
                node_type="edges",
            )
Beispiel #10
0
)

models = sky_model_1 + sky_model_2

# Define map geometry
axis = MapAxis.from_edges(np.logspace(-1.0, 1.0, 10), unit="TeV", name="energy")
geom = WcsGeom.create(
    skydir=(0, 0), binsz=0.02, width=(2, 2), frame="galactic", axes=[axis]
)


# Define some observation parameters
# we are not simulating many pointings / observations
pointing = SkyCoord(0.2, 0.5, unit="deg", frame="galactic")
livetime = 20 * u.hour

exposure_map = make_map_exposure_true_energy(
    pointing=pointing, livetime=livetime, aeff=aeff, geom=geom
)

dataset = MapDataset(models=models, exposure=exposure_map)
npred = dataset.npred()

dataset.fake()

fit = Fit([dataset])
results = fit.run()

print(results)
print(models)
Beispiel #11
0
def test_map_fit(sky_model, geom, geom_etrue):
    dataset_1 = get_map_dataset(sky_model,
                                geom,
                                geom_etrue,
                                evaluation_mode="local")
    dataset_1.background_model.norm.value = 0.5
    dataset_1.counts = dataset_1.npred()

    dataset_2 = get_map_dataset(sky_model,
                                geom,
                                geom_etrue,
                                evaluation_mode="global",
                                likelihood="cstat")
    dataset_2.counts = dataset_2.npred()

    sky_model.parameters["sigma"].frozen = True

    dataset_1.background_model.norm.value = 0.49
    dataset_2.background_model.norm.value = 0.99

    fit = Fit([dataset_1, dataset_2])
    result = fit.run()

    assert result.success
    assert "minuit" in repr(result)

    npred = dataset_1.npred().data.sum()
    assert_allclose(npred, 6220.529956, rtol=1e-3)
    assert_allclose(result.total_stat, 11802.750562, rtol=1e-3)

    pars = result.parameters
    assert_allclose(pars["lon_0"].value, 0.2, rtol=1e-2)
    assert_allclose(pars.error("lon_0"), 0.002651, rtol=1e-2)

    assert_allclose(pars["index"].value, 3, rtol=1e-2)
    assert_allclose(pars.error("index"), 0.021222, rtol=1e-2)

    assert_allclose(pars["amplitude"].value, 1e-11, rtol=1e-2)
    assert_allclose(pars.error("amplitude"), 3.117271e-13, rtol=1e-2)

    # background norm 1
    assert_allclose(pars[8].value, 0.5, rtol=1e-2)
    assert_allclose(pars.error(pars[8]), 0.015759, rtol=1e-2)

    # background norm 2
    assert_allclose(pars[11].value, 1, rtol=1e-2)
    assert_allclose(pars.error(pars[11]), 0.02147, rtol=1e-2)

    # test mask_safe evaluation
    mask_safe = geom.energy_mask(emin=1 * u.TeV)
    dataset_1.mask_safe = Map.from_geom(geom, data=mask_safe)
    dataset_2.mask_safe = Map.from_geom(geom, data=mask_safe)

    stat = fit.datasets.likelihood()
    assert_allclose(stat, 6425.389198)

    # test model evaluation outside image

    dataset_1.model[0].spatial_model.lon_0.value = 150
    dataset_1.npred()
    assert not dataset_1._evaluators[0].contributes

    with mpl_plot_check():
        dataset_1.plot_residuals()
Beispiel #12
0
    dataset = MapDataset(
        model=model,
        counts=counts,
        exposure=exposure,
        psf=psf,
        edisp=edisp,
        background_model=background_model,
        mask_fit=mask,
    )

    datasets.append(dataset)

# In[ ]:

fit = Fit(datasets)

# In[ ]:

get_ipython().run_cell_magic('time', '', 'result = fit.run()')

# In[ ]:

print(result)

# Best fit parameters:

# In[ ]:

fit.datasets.parameters.to_table()
Beispiel #13
0
class Analysis:
    """Config-driven high-level analysis interface.

    It is initialized by default with a set of configuration parameters and values declared in
    an internal configuration schema YAML file, though the user can also provide configuration
    parameters passed as a nested dictionary at the moment of instantiation. In that case these
    parameters will overwrite the default values of those present in the configuration file.

    For more info see  :ref:`analysis`.

    Parameters
    ----------
    config : dict or `AnalysisConfig`
        Configuration options following `AnalysisConfig` schema
    """

    def __init__(self, config=None):
        if isinstance(config, dict):
            self._config = AnalysisConfig(config)
        elif isinstance(config, AnalysisConfig):
            self._config = config
        else:
            raise ValueError("Dict or `AnalysiConfig` object required.")

        self._set_logging()
        self.datastore = None
        self.observations = None
        self.datasets = None
        self.model = None
        self.fit = None
        self.fit_result = None
        self.flux_points = None

    @property
    def config(self):
        """Analysis configuration (`AnalysisConfig`)"""
        return self._config

    @property
    def settings(self):
        """Configuration settings for the analysis session."""
        return self.config.settings

    def get_observations(self):
        """Fetch observations from the data store according to criteria defined in the configuration."""
        self.config.validate()
        log.info("Fetching observations.")
        datastore_path = make_path(self.settings["observations"]["datastore"])
        if datastore_path.is_file():
            self.datastore = DataStore().from_file(datastore_path)
        elif datastore_path.is_dir():
            self.datastore = DataStore().from_dir(datastore_path)
        else:
            raise FileNotFoundError(f"Datastore {datastore_path} not found.")
        ids = []
        selection = dict()
        for criteria in self.settings["observations"]["filters"]:
            selected_obs = ObservationTable()

            # TODO: Reduce significantly the code.
            # This block would be handled by datastore.obs_table.select_observations
            selection["type"] = criteria["filter_type"]
            for key, val in criteria.items():
                if key in ["lon", "lat", "radius", "border"]:
                    val = Angle(val)
                selection[key] = val
            if selection["type"] == "angle_box":
                selection["type"] = "par_box"
                selection["value_range"] = Angle(criteria["value_range"])
            if selection["type"] == "sky_circle" or selection["type"].endswith("_box"):
                selected_obs = self.datastore.obs_table.select_observations(selection)
            if selection["type"] == "par_value":
                mask = (
                    self.datastore.obs_table[criteria["variable"]]
                    == criteria["value_param"]
                )
                selected_obs = self.datastore.obs_table[mask]
            if selection["type"] == "ids":
                obs_list = self.datastore.get_observations(criteria["obs_ids"])
                selected_obs["OBS_ID"] = [obs.obs_id for obs in obs_list.list]
            if selection["type"] == "all":
                obs_list = self.datastore.get_observations()
                selected_obs["OBS_ID"] = [obs.obs_id for obs in obs_list.list]

            if len(selected_obs):
                if "exclude" in criteria and criteria["exclude"]:
                    exclude = selected_obs["OBS_ID"].tolist()
                    selection = np.isin(ids, exclude)
                    ids = list(np.array(ids)[~selection])
                else:
                    ids.extend(selected_obs["OBS_ID"].tolist())
        self.observations = self.datastore.get_observations(ids, skip_missing=True)
        log.info(f"{len(self.observations.list)} observations were selected.")
        for obs in self.observations.list:
            log.debug(obs)

    def get_datasets(self):
        """Produce reduced datasets."""
        if not self._validate_reduction_settings():
            return False
        if self.settings["datasets"]["dataset-type"] == "SpectrumDatasetOnOff":
            self._spectrum_extraction()
        elif self.settings["datasets"]["dataset-type"] == "MapDataset":
            self._map_making()
        else:
            # TODO raise error?
            log.info("Data reduction method not available.")
            return False

    def set_model(self, model=None, filename=""):
        """Read the model from dict or filename and attach it to datasets.

        Parameters
        ----------
        model: dict or string
            Dictionary or string in YAML format with the serialized model.
        filename : string
            Name of the model YAML file describing the model.
        """
        if not self._validate_set_model():
            return False
        log.info(f"Reading model.")
        if isinstance(model, str):
            model = yaml.safe_load(model)
        if model:
            self.model = SkyModels(dict_to_models(model))
        elif filename:
            filepath = make_path(filename)
            self.model = SkyModels.from_yaml(filepath)
        else:
            return False
        # TODO: Deal with multiple components
        for dataset in self.datasets:
            if isinstance(dataset, MapDataset):
                dataset.model = self.model
            else:
                if len(self.model) > 1:
                    raise ValueError("Cannot fit multiple spectral models")
                dataset.model = self.model[0].spectral_model
        log.info(self.model)

    def run_fit(self, optimize_opts=None):
        """Fitting reduced datasets to model."""
        if not self._validate_fitting_settings():
            return False

        for ds in self.datasets:
            # TODO: fit_range handled in jsonschema validation class
            if "fit" in self.settings and "fit_range" in self.settings["fit"]:
                e_min = u.Quantity(self.settings["fit"]["fit_range"]["min"])
                e_max = u.Quantity(self.settings["fit"]["fit_range"]["max"])
                if isinstance(ds, MapDataset):
                    ds.mask_fit = ds.counts.geom.energy_mask(e_min, e_max)
                else:
                    ds.mask_fit = ds.counts.energy_mask(e_min, e_max)
        log.info("Fitting reduced datasets.")
        self.fit = Fit(self.datasets)
        self.fit_result = self.fit.run(optimize_opts=optimize_opts)
        log.info(self.fit_result)

    def get_flux_points(self, source="source"):
        """Calculate flux points for a specific model component.

        Parameters
        ----------
        source : string
            Name of the model component where to calculate the flux points.
        """
        if not self._validate_fp_settings():
            return False

        # TODO: add "source" to config
        log.info("Calculating flux points.")
        axis_params = self.settings["flux-points"]["fp_binning"]
        e_edges = MapAxis.from_bounds(**axis_params).edges
        flux_point_estimator = FluxPointsEstimator(
            e_edges=e_edges, datasets=self.datasets, source=source
        )
        fp = flux_point_estimator.run()
        fp.table["is_ul"] = fp.table["ts"] < 4
        model = self.model[source].spectral_model.copy()
        self.flux_points = FluxPointsDataset(data=fp, model=model)
        cols = ["e_ref", "ref_flux", "dnde", "dnde_ul", "dnde_err", "is_ul"]
        log.info("\n{}".format(self.flux_points.data.table[cols]))

    @staticmethod
    def _create_geometry(params):
        """Create the geometry."""
        geom_params = copy.deepcopy(params)
        axes = []
        for axis_params in params.get("axes", []):
            ax = MapAxis.from_bounds(**axis_params)
            axes.append(ax)
        geom_params["axes"] = axes
        if "skydir" in geom_params:
            geom_params["skydir"] = tuple(geom_params["skydir"])
        return WcsGeom.create(**geom_params)

    def _map_making(self):
        """Make maps and datasets for 3d analysis."""
        log.info("Creating geometry.")

        geom = self._create_geometry(self.settings["datasets"]["geom"])

        geom_irf = dict(energy_axis_true=None, binsz_irf=None, margin_irf=None)
        if "energy-axis-true" in self.settings["datasets"]:
            axis_params = self.settings["datasets"]["energy-axis-true"]
            geom_irf["energy_axis_true"] = MapAxis.from_bounds(**axis_params)
        geom_irf["binsz_irf"] = self.settings["datasets"].get("binsz", None)
        geom_irf["margin_irf"] = self.settings["datasets"].get("margin", None)

        offset_max = Angle(self.settings["datasets"]["offset-max"])
        log.info("Creating datasets.")

        maker = MapDatasetMaker(offset_max=offset_max)
        maker_safe_mask = SafeMaskMaker(methods=["offset-max"], offset_max=offset_max)

        stacked = MapDataset.create(geom=geom, name="stacked", **geom_irf)

        if self.settings["datasets"]["stack-datasets"]:
            for obs in self.observations:
                log.info(f"Processing observation {obs.obs_id}")
                dataset = maker.run(stacked, obs)
                dataset = maker_safe_mask.run(dataset, obs)
                dataset.background_model.name =  f"bkg_{dataset.name}"
                # TODO remove this once dataset and model have unique identifiers
                log.debug(dataset)
                stacked.stack(dataset)
            self._extract_irf_kernels(stacked)
            datasets = [stacked]
        else:
            datasets = []
            for obs in self.observations:
                log.info(f"Processing observation {obs.obs_id}")
                dataset = maker.run(stacked, obs)
                dataset = maker_safe_mask.run(dataset, obs)
                dataset.background_model.name = f"bkg_{dataset.name}"
                # TODO remove this once dataset and model have unique identifiers
                self._extract_irf_kernels(dataset)
                log.debug(dataset)
                datasets.append(dataset)

        self.datasets = Datasets(datasets)

    def _extract_irf_kernels(self, dataset):
        # TODO: remove hard-coded default value
        max_radius = self.settings["datasets"].get("psf-kernel-radius", "0.6 deg")
        # TODO: handle IRF maps in fit
        geom = dataset.counts.geom
        geom_irf = dataset.exposure.geom
        position = geom.center_skydir
        geom_psf = geom.to_image().to_cube(geom_irf.axes)
        dataset.psf = dataset.psf.get_psf_kernel(
            position=position, geom=geom_psf, max_radius=max_radius
        )
        e_reco = geom.get_axis_by_name("energy").edges
        dataset.edisp = dataset.edisp.get_energy_dispersion(
            position=position, e_reco=e_reco
        )

    def _set_logging(self):
        """Set logging parameters for API."""
        logging.basicConfig(**self.settings["general"]["logging"])
        log.info(
            "Setting logging config: {!r}".format(self.settings["general"]["logging"])
        )

    def _spectrum_extraction(self):
        """Run all steps for the spectrum extraction."""
        region = self.settings["datasets"]["geom"]["region"]
        log.info("Reducing spectrum datasets.")
        on_lon = Angle(region["center"][0])
        on_lat = Angle(region["center"][1])
        on_center = SkyCoord(on_lon, on_lat, frame=region["frame"])
        on_region = CircleSkyRegion(on_center, Angle(region["radius"]))

        maker_config = {}
        if "containment_correction" in self.settings["datasets"]:
            maker_config["containment_correction"] = self.settings["datasets"][
                "containment_correction"
            ]
        params = self.settings["datasets"]["geom"]["axes"][0]
        e_reco = MapAxis.from_bounds(**params).edges
        maker_config["e_reco"] = e_reco

        # TODO: remove hard-coded e_true and make it configurable
        maker_config["e_true"] = np.logspace(-2, 2.5, 109) * u.TeV
        maker_config["region"] = on_region

        dataset_maker = SpectrumDatasetMaker(**maker_config)
        bkg_maker_config = {}
        background = self.settings["datasets"]["background"]

        if "exclusion_mask" in background:
            map_hdu = {}
            filename = background["exclusion_mask"]["filename"]
            if "hdu" in background["exclusion_mask"]:
                map_hdu = {"hdu": background["exclusion_mask"]["hdu"]}
            exclusion_region = Map.read(filename, **map_hdu)
            bkg_maker_config["exclusion_mask"] = exclusion_region
        if background["background_estimator"] == "reflected":
            reflected_bkg_maker = ReflectedRegionsBackgroundMaker(**bkg_maker_config)
        else:
            # TODO: raise error?
            log.info("Background estimation only for reflected regions method.")

        safe_mask_maker = SafeMaskMaker(methods=["aeff-default", "aeff-max"])

        datasets = []
        for obs in self.observations:
            log.info(f"Processing observation {obs.obs_id}")
            selection = ["counts", "aeff", "edisp"]
            dataset = dataset_maker.run(obs, selection=selection)
            dataset = reflected_bkg_maker.run(dataset, obs)
            dataset = safe_mask_maker.run(dataset, obs)
            log.debug(dataset)
            datasets.append(dataset)

        self.datasets = Datasets(datasets)

        if self.settings["datasets"]["stack-datasets"]:
            stacked = self.datasets.stack_reduce()
            stacked.name = "stacked"
            self.datasets = Datasets([stacked])

    def _validate_reduction_settings(self):
        """Validate settings before proceeding to data reduction."""
        if self.observations and len(self.observations):
            self.config.validate()
            return True
        else:
            log.info("No observations selected.")
            log.info("Data reduction cannot be done.")
            return False

    def _validate_set_model(self):
        if self.datasets and len(self.datasets) != 0:
            self.config.validate()
            return True
        else:
            log.info("No datasets reduced.")
            return False

    def _validate_fitting_settings(self):
        """Validate settings before proceeding to fit 1D."""
        if not self.model:
            log.info("No model fetched for datasets.")
            log.info("Fit cannot be done.")
            return False
        else:
            return True

    def _validate_fp_settings(self):
        """Validate settings before proceeding to flux points estimation."""
        valid = True
        if self.fit:
            self.config.validate()
        else:
            log.info("No results available from fit.")
            valid = False
        if "flux-points" not in self.settings:
            log.info("No values declared for the energy bins.")
            valid = False
        elif "fp_binning" not in self.settings["flux-points"]:
            log.info("No values declared for the energy bins.")
            valid = False
        if not valid:
            log.info("Flux points calculation cannot be done.")
        return valid
Beispiel #14
0
def fit(dataset):
    return Fit([dataset], backend="minuit")
Beispiel #15
0
class TestSpectralFit:
    """Test fit in astrophysical scenario"""
    def setup(self):
        path = "$GAMMAPY_DATA/joint-crab/spectra/hess/"
        obs1 = SpectrumDatasetOnOff.from_ogip_files(path + "pha_obs23523.fits")
        obs2 = SpectrumDatasetOnOff.from_ogip_files(path + "pha_obs23592.fits")
        self.obs_list = [obs1, obs2]

        self.pwl = PowerLawSpectralModel(index=2,
                                         amplitude=1e-12 *
                                         u.Unit("cm-2 s-1 TeV-1"),
                                         reference=1 * u.TeV)

        self.ecpl = ExpCutoffPowerLawSpectralModel(
            index=2,
            amplitude=1e-12 * u.Unit("cm-2 s-1 TeV-1"),
            reference=1 * u.TeV,
            lambda_=0.1 / u.TeV,
        )

        # Example fit for one observation
        self.obs_list[0].model = self.pwl
        self.fit = Fit(self.obs_list[0])

    def set_model(self, model):
        for obs in self.obs_list:
            obs.model = model

    @requires_dependency("iminuit")
    def test_basic_results(self):
        self.set_model(self.pwl)
        result = self.fit.run()
        pars = self.fit.datasets.parameters

        assert self.pwl is self.obs_list[0].model

        assert_allclose(result.total_stat, 38.343, rtol=1e-3)
        assert_allclose(pars["index"].value, 2.817, rtol=1e-3)
        assert pars["amplitude"].unit == "cm-2 s-1 TeV-1"
        assert_allclose(pars["amplitude"].value, 5.142e-11, rtol=1e-3)
        assert_allclose(self.obs_list[0].npred().data[60], 0.6102, rtol=1e-3)
        pars.to_table()

    def test_basic_errors(self):
        self.set_model(self.pwl)
        result = self.fit.run()
        pars = result.parameters

        assert_allclose(pars.error("index"), 0.1496, rtol=1e-3)
        assert_allclose(pars.error("amplitude"), 6.423e-12, rtol=1e-3)
        pars.to_table()

    def test_compound(self):
        model = self.pwl * 2
        self.set_model(model)
        fit = Fit(self.obs_list[0])
        fit.run()
        pars = fit.datasets.parameters

        assert_allclose(pars["index"].value, 2.8166, rtol=1e-3)
        p = pars["amplitude"]
        assert p.unit == "cm-2 s-1 TeV-1"
        assert_allclose(p.value, 5.0714e-12, rtol=1e-3)

    def test_ecpl_fit(self):
        self.set_model(self.ecpl)
        fit = Fit(self.obs_list[0])
        fit.run()

        actual = fit.datasets.parameters["lambda_"].quantity
        assert actual.unit == "TeV-1"
        assert_allclose(actual.value, 0.145215, rtol=1e-2)

    def test_joint_fit(self):
        self.set_model(self.pwl)
        fit = Fit(self.obs_list)
        fit.run()
        actual = fit.datasets.parameters["index"].value
        assert_allclose(actual, 2.7806, rtol=1e-3)

        actual = fit.datasets.parameters["amplitude"].quantity
        assert actual.unit == "cm-2 s-1 TeV-1"
        assert_allclose(actual.value, 5.200e-11, rtol=1e-3)
Beispiel #16
0
def test_fov_bkg_maker_with_source_model(obs_dataset, exclusion_mask, caplog):

    test_dataset = obs_dataset.copy(name="test-fov")

    # crab model
    spatial_model = PointSpatialModel(
        lon_0="83.619deg", lat_0="22.024deg", frame="icrs"
    )
    spectral_model = PowerLawSpectralModel(
        index=2.6, amplitude="4.5906e-11 cm-2 s-1 TeV-1", reference="1 TeV"
    )
    model = SkyModel(
        spatial_model=spatial_model, spectral_model=spectral_model, name="test-source"
    )

    bkg_model = FoVBackgroundModel(dataset_name="test-fov")
    test_dataset.models = [model, bkg_model]

    # pre-fit both source and background to get reference model
    Fit().run(test_dataset)
    bkg_model_spec = test_dataset.models[f"{test_dataset.name}-bkg"].spectral_model
    norm_ref = 0.897
    assert not bkg_model_spec.norm.frozen
    assert_allclose(bkg_model_spec.norm.value, norm_ref, rtol=1e-4)
    assert_allclose(bkg_model_spec.tilt.value, 0.0, rtol=1e-4)

    # apply scale method with pre-fitted source model and no exclusion_mask
    bkg_model_spec.norm.value = 1
    fov_bkg_maker = FoVBackgroundMaker(method="scale", exclusion_mask=None)
    dataset = fov_bkg_maker.run(test_dataset)

    bkg_model_spec = test_dataset.models[f"{dataset.name}-bkg"].spectral_model
    assert_allclose(bkg_model_spec.norm.value, norm_ref, rtol=1e-4)
    assert_allclose(bkg_model_spec.tilt.value, 0.0, rtol=1e-4)

    # apply fit method with pre-fitted source model and no exlusion mask
    bkg_model_spec.norm.value = 1
    fov_bkg_maker = FoVBackgroundMaker(method="fit", exclusion_mask=None)
    dataset = fov_bkg_maker.run(test_dataset)

    bkg_model_spec = test_dataset.models[f"{dataset.name}-bkg"].spectral_model
    assert_allclose(bkg_model_spec.norm.value, norm_ref, rtol=1e-4)
    assert_allclose(bkg_model_spec.tilt.value, 0.0, rtol=1e-4)

    # apply scale method with pre-fitted source model and exclusion_mask
    bkg_model_spec.norm.value = 1
    fov_bkg_maker = FoVBackgroundMaker(method="scale", exclusion_mask=exclusion_mask)
    dataset = fov_bkg_maker.run(test_dataset)

    bkg_model_spec = test_dataset.models[f"{dataset.name}-bkg"].spectral_model
    assert_allclose(bkg_model_spec.norm.value, 0.830779, rtol=1e-4)
    assert_allclose(bkg_model_spec.tilt.value, 0.0, rtol=1e-4)

    # apply fit method with pre-fitted source model and exlusion mask
    bkg_model_spec.norm.value = 1
    fov_bkg_maker = FoVBackgroundMaker(method="fit", exclusion_mask=exclusion_mask)
    dataset = fov_bkg_maker.run(test_dataset)

    bkg_model_spec = test_dataset.models[f"{dataset.name}-bkg"].spectral_model
    assert_allclose(bkg_model_spec.norm.value, 0.830779, rtol=1e-4)
    assert_allclose(bkg_model_spec.tilt.value, 0.0, rtol=1e-4)

    # Here we check that source parameters are correctly thawed after fit.
    assert not dataset.models.parameters["index"].frozen
    assert not dataset.models.parameters["lon_0"].frozen

    # test
    model.spectral_model.amplitude.value *= 1e5
    fov_bkg_maker = FoVBackgroundMaker(method="scale")
    dataset = fov_bkg_maker.run(test_dataset)
    assert "WARNING" in [_.levelname for _ in caplog.records]
    message1 = "FoVBackgroundMaker failed. Negative residuals counts for test-fov. Setting mask to False."
    assert message1 in [_.message for _ in caplog.records]
Beispiel #17
0
def data_fit(stacked):
    # Data fitting
    fit = Fit(optimize_opts={"print_level": 1})
    result = fit.run(datasets=stacked)
    print(result.success)
Beispiel #18
0
def test_map_fit(sky_model, geom, geom_etrue):
    dataset_1 = get_map_dataset(geom, geom_etrue, name="test-1")
    dataset_2 = get_map_dataset(geom, geom_etrue, name="test-2")
    datasets = Datasets([dataset_1, dataset_2])

    models = Models(datasets.models)
    models.insert(0, sky_model)

    models["test-1-bkg"].spectral_model.norm.value = 0.5
    models["test-model"].spatial_model.sigma.frozen = True

    datasets.models = models
    dataset_2.counts = dataset_2.npred()
    dataset_1.counts = dataset_1.npred()

    models["test-1-bkg"].spectral_model.norm.value = 0.49
    models["test-2-bkg"].spectral_model.norm.value = 0.99

    fit = Fit(datasets)
    result = fit.run()

    assert result.success
    assert "minuit" in repr(result)

    npred = dataset_1.npred().data.sum()
    assert_allclose(npred, 7525.790688, rtol=1e-3)
    assert_allclose(result.total_stat, 21659.2139, rtol=1e-3)

    pars = result.parameters
    assert_allclose(pars["lon_0"].value, 0.2, rtol=1e-2)
    assert_allclose(pars["lon_0"].error, 0.002244, rtol=1e-2)

    assert_allclose(pars["index"].value, 3, rtol=1e-2)
    assert_allclose(pars["index"].error, 0.024277, rtol=1e-2)

    assert_allclose(pars["amplitude"].value, 1e-11, rtol=1e-2)
    assert_allclose(pars["amplitude"].error, 4.216154e-13, rtol=1e-2)

    # background norm 1
    assert_allclose(pars[8].value, 0.5, rtol=1e-2)
    assert_allclose(pars[8].error, 0.015811, rtol=1e-2)

    # background norm 2
    assert_allclose(pars[11].value, 1, rtol=1e-2)
    assert_allclose(pars[11].error, 0.02147, rtol=1e-2)

    # test mask_safe evaluation
    mask_safe = geom.energy_mask(energy_min=1 * u.TeV)
    dataset_1.mask_safe = Map.from_geom(geom, data=mask_safe)
    dataset_2.mask_safe = Map.from_geom(geom, data=mask_safe)

    stat = fit.datasets.stat_sum()
    assert_allclose(stat, 14823.579908, rtol=1e-5)

    region = sky_model.spatial_model.to_region()

    initial_counts = dataset_1.counts.copy()
    with mpl_plot_check():
        dataset_1.plot_residuals(kwargs_spectral=dict(region=region))

    # check dataset has not changed
    assert initial_counts == dataset_1.counts

    # test model evaluation outside image
    dataset_1.models[0].spatial_model.lon_0.value = 150
    dataset_1.npred()
    assert not dataset_1._evaluators["test-model"].contributes
Beispiel #19
0
 def _setup_fit(self, datasets):
     # TODO: make fit stateless and configurable
     if self._fit is None or datasets is not self._fit.datasets:
         self._fit = Fit(datasets)
Beispiel #20
0
 def test_fit_pwl_sherpa(self, dataset):
     fit = Fit(backend="sherpa", optimize_opts={"method": "simplex"})
     result = fit.optimize(datasets=[dataset])
     self.assert_result(result, dataset.models)
Beispiel #21
0
def test_map_fit(sky_model, geom, geom_etrue):
    dataset_1 = get_map_dataset(sky_model,
                                geom,
                                geom_etrue,
                                evaluation_mode="local",
                                name="test-1")
    dataset_1.background_model.norm.value = 0.5
    dataset_1.counts = dataset_1.npred()

    dataset_2 = get_map_dataset(sky_model,
                                geom,
                                geom_etrue,
                                evaluation_mode="global",
                                name="test-2")
    dataset_2.counts = dataset_2.npred()

    sky_model.parameters["sigma"].frozen = True

    dataset_1.background_model.norm.value = 0.49
    dataset_2.background_model.norm.value = 0.99

    fit = Fit([dataset_1, dataset_2])
    result = fit.run()

    assert result.success
    assert "minuit" in repr(result)

    npred = dataset_1.npred().data.sum()
    assert_allclose(npred, 7525.790688, rtol=1e-3)
    assert_allclose(result.total_stat, 21700.253246, rtol=1e-3)

    pars = result.parameters
    assert_allclose(pars["lon_0"].value, 0.2, rtol=1e-2)
    assert_allclose(pars["lon_0"].error, 0.002244, rtol=1e-2)

    assert_allclose(pars["index"].value, 3, rtol=1e-2)
    assert_allclose(pars["index"].error, 0.024277, rtol=1e-2)

    assert_allclose(pars["amplitude"].value, 1e-11, rtol=1e-2)
    assert_allclose(pars["amplitude"].error, 4.216154e-13, rtol=1e-2)

    # background norm 1
    assert_allclose(pars[8].value, 0.5, rtol=1e-2)
    assert_allclose(pars[8].error, 0.015811, rtol=1e-2)

    # background norm 2
    assert_allclose(pars[11].value, 1, rtol=1e-2)
    assert_allclose(pars[11].error, 0.02147, rtol=1e-2)

    # test mask_safe evaluation
    mask_safe = geom.energy_mask(emin=1 * u.TeV)
    dataset_1.mask_safe = Map.from_geom(geom, data=mask_safe)
    dataset_2.mask_safe = Map.from_geom(geom, data=mask_safe)

    stat = fit.datasets.stat_sum()
    assert_allclose(stat, 14824.173099, rtol=1e-5)

    region = sky_model.spatial_model.to_region()

    with mpl_plot_check():
        dataset_1.plot_residuals(region=region)

    # test model evaluation outside image
    dataset_1.models[0].spatial_model.lon_0.value = 150
    dataset_1.npred()
    assert not dataset_1._evaluators[dataset_1.models[0]].contributes
Beispiel #22
0
# In[ ]:


pwl = PowerLawSpectralModel(
    index=2, amplitude="1e-12 cm-2 s-1 TeV-1", reference="1 TeV"
)


# After creating the model we run the fit by passing the `'flux_points'` and `'pwl'` objects:

# In[ ]:


dataset_pwl = FluxPointsDataset(pwl, flux_points, likelihood="chi2assym")
fitter = Fit(dataset_pwl)
result_pwl = fitter.run()


# And print the result:

# In[ ]:


print(result_pwl)


# In[ ]:


print(pwl)
Beispiel #23
0
def test_datasets_to_io(tmp_path):
    filedata = "$GAMMAPY_DATA/tests/models/gc_example_datasets.yaml"
    filemodel = "$GAMMAPY_DATA/tests/models/gc_example_models.yaml"

    datasets = Datasets.read(filedata, filemodel)

    assert len(datasets) == 2

    dataset0 = datasets[0]
    assert dataset0.name == "gc"
    assert dataset0.counts.data.sum() == 6824
    assert_allclose(dataset0.exposure.data.sum(), 2072125400000.0, atol=0.1)
    assert dataset0.psf is not None
    assert dataset0.edisp is not None

    assert_allclose(dataset0.background_model.evaluate().data.sum(), 4094.2, atol=0.1)

    assert dataset0.background_model.name == "background_irf_gc"

    dataset1 = datasets[1]
    assert dataset1.name == "g09"
    assert dataset1.background_model.name == "background_irf_g09"

    assert (
        dataset0.models["gll_iem_v06_cutout"] == dataset1.models["gll_iem_v06_cutout"]
    )

    assert isinstance(dataset0.models, Models)
    assert len(dataset0.models) == 5
    assert dataset0.models[0].name == "gc"
    assert dataset0.models[1].name == "gll_iem_v06_cutout"

    assert (
        dataset0.models["background_irf_gc"].parameters["norm"]
        is dataset1.models["background_irf_g09"].parameters["norm"]
    )

    assert (
        dataset0.models["gc"].parameters["reference"]
        is dataset1.models["g09"].parameters["reference"]
    )
    assert_allclose(dataset1.models["g09"].parameters["lon_0"].value, 0.9, atol=0.1)

    datasets.write(tmp_path, prefix="written")
    datasets_read = Datasets.read(
        tmp_path / "written_datasets.yaml", tmp_path / "written_models.yaml"
    )

    assert len(datasets.parameters) == 21

    assert len(datasets_read) == 2
    dataset0 = datasets_read[0]
    assert dataset0.counts.data.sum() == 6824
    assert_allclose(dataset0.exposure.data.sum(), 2072125400000.0, atol=0.1)
    assert dataset0.psf is not None
    assert dataset0.edisp is not None
    assert_allclose(dataset0.background_model.evaluate().data.sum(), 4094.2, atol=0.1)

    Fit(datasets).run()
    assert_allclose(
        datasets.models["background_irf_g09"].covariance,
        datasets.models["background_irf_gc"].covariance,
    )
Beispiel #24
0
def data_fit(datasets):
    fit = Fit(datasets)
    result = fit.run(optimize_opts={"print_level": 1})
Beispiel #25
0
def data_fit(datasets):
    fit = Fit(datasets)
    result = fit.run()
Beispiel #26
0
fig, ax, cbar = npred.sum_over_axes().plot(add_cbar=True)
ax.scatter(
    [lon_0_1, lon_0_2, pointing.galactic.l.degree],
    [lat_0_1, lat_0_2, pointing.galactic.b.degree],
    transform=ax.get_transform("galactic"),
    marker="+",
    color="cyan",
)
# plt.show()
plt.clf()

dataset.fake()

dataset.counts.sum_over_axes().plot()
# plt.show()
plt.clf()

models.parameters.set_error(spatial_model_1.lon_0, 0.1 * u.deg)
models.parameters.set_error(spatial_model_1.lat_0, 0.1 * u.deg)

models.parameters.set_error(spatial_model_2.lon_0, 0.1 * u.deg)
models.parameters.set_error(spatial_model_1.lat_0, 0.1 * u.deg)

models.parameters.set_error(spectral_model_1.amplitude,
                            1e-12 * u.Unit("cm-2 s-1 TeV-1"))
models.parameters.set_error(spectral_model_2.amplitude,
                            1e-12 * u.Unit("cm-2 s-1 TeV-1"))

fit = Fit(dataset)
fit.run()
Beispiel #27
0
class Analysis:
    """Config-driven high-level analysis interface.

    It is initialized by default with a set of configuration parameters and values declared in
    an internal high-level interface model, though the user can also provide configuration
    parameters passed as a nested dictionary at the moment of instantiation. In that case these
    parameters will overwrite the default values of those present in the configuration file.

    For more info see  :ref:`analysis`.

    Parameters
    ----------
    config : dict or `AnalysisConfig`
        Configuration options following `AnalysisConfig` schema
    """
    def __init__(self, config):
        self.config = config
        self.config.set_logging()
        self.datastore = None
        self.observations = None
        self.datasets = None
        self.models = None
        self.fit = None
        self.fit_result = None
        self.flux_points = None

    @property
    def config(self):
        """Analysis configuration (`AnalysisConfig`)"""
        return self._config

    @config.setter
    def config(self, value):
        if isinstance(value, dict):
            self._config = AnalysisConfig(**value)
        elif isinstance(value, AnalysisConfig):
            self._config = value
        else:
            raise TypeError("config must be dict or AnalysisConfig.")

    def get_observations(self):
        """Fetch observations from the data store according to criteria defined in the configuration."""
        observations_settings = self.config.observations
        path = make_path(observations_settings.datastore)
        if path.is_file():
            self.datastore = DataStore.from_file(path)
        elif path.is_dir():
            self.datastore = DataStore.from_dir(path)
        else:
            raise FileNotFoundError(f"Datastore not found: {path}")

        log.info("Fetching observations.")
        if (len(observations_settings.obs_ids)
                and observations_settings.obs_file is not None):
            raise ValueError(
                "Values for both parameters obs_ids and obs_file are not accepted."
            )
        elif (not len(observations_settings.obs_ids)
              and observations_settings.obs_file is None):
            obs_list = self.datastore.get_observations()
            ids = [obs.obs_id for obs in obs_list]
        elif len(observations_settings.obs_ids):
            obs_list = self.datastore.get_observations(
                observations_settings.obs_ids)
            ids = [obs.obs_id for obs in obs_list]
        else:
            path = make_path(observations_settings.obs_file)
            ids = list(
                Table.read(path, format="ascii", data_start=0).columns[0])

        if observations_settings.obs_cone.lon is not None:
            cone = dict(
                type="sky_circle",
                frame=observations_settings.obs_cone.frame,
                lon=observations_settings.obs_cone.lon,
                lat=observations_settings.obs_cone.lat,
                radius=observations_settings.obs_cone.radius,
                border="0 deg",
            )
            selected_cone = self.datastore.obs_table.select_observations(cone)
            ids = list(set(ids) & set(selected_cone["OBS_ID"].tolist()))
        self.observations = self.datastore.get_observations(ids,
                                                            skip_missing=True)
        if observations_settings.obs_time.start is not None:
            start = observations_settings.obs_time.start
            stop = observations_settings.obs_time.stop
            self.observations = self.observations.select_time([(start, stop)])
        log.info(f"Number of selected observations: {len(self.observations)}")
        for obs in self.observations:
            log.debug(obs)

    def get_datasets(self):
        """Produce reduced datasets."""
        datasets_settings = self.config.datasets
        if not self.observations or len(self.observations) == 0:
            raise RuntimeError("No observations have been selected.")

        if datasets_settings.type == "1d":
            self._spectrum_extraction()
        else:  # 3d
            self._map_making()

    def set_models(self, models):
        """Set models on datasets.

        Adds `FoVVackgroundModel` if not present already

        Parameters
        ----------
        models : `~gammapy.modeling.models.Models` or str
            Models object or YAML models string
        """
        if not self.datasets or len(self.datasets) == 0:
            raise RuntimeError("Missing datasets")

        log.info(f"Reading model.")
        if isinstance(models, str):
            self.models = Models.from_yaml(models)
        elif isinstance(models, Models):
            self.models = models
        else:
            raise TypeError(f"Invalid type: {models!r}")

        self.models.extend(self.datasets.models)

        if self.config.datasets.type == "3d":
            for dataset in self.datasets:
                if dataset.background_model is None:
                    bkg_model = FoVBackgroundModel(dataset_name=dataset.name)

                self.models.append(bkg_model)

        self.datasets.models = self.models

        log.info(self.models)

    def read_models(self, path):
        """Read models from YAML file."""
        path = make_path(path)
        models = Models.read(path)
        self.set_models(models)

    def run_fit(self, optimize_opts=None):
        """Fitting reduced datasets to model."""
        if not self.models:
            raise RuntimeError("Missing models")

        fit_settings = self.config.fit
        for dataset in self.datasets:
            if fit_settings.fit_range:
                energy_min = fit_settings.fit_range.min
                energy_max = fit_settings.fit_range.max
                geom = dataset.counts.geom
                data = geom.energy_mask(energy_min, energy_max)
                dataset.mask_fit = Map.from_geom(geom=geom, data=data)

        log.info("Fitting datasets.")
        self.fit = Fit(self.datasets)
        self.fit_result = self.fit.run(optimize_opts=optimize_opts)
        log.info(self.fit_result)

    def get_flux_points(self):
        """Calculate flux points for a specific model component."""
        if not self.fit:
            raise RuntimeError("No results available from Fit.")

        fp_settings = self.config.flux_points
        log.info("Calculating flux points.")
        energy_edges = self._make_energy_axis(fp_settings.energy).edges
        flux_point_estimator = FluxPointsEstimator(
            energy_edges=energy_edges,
            source=fp_settings.source,
            **fp_settings.parameters,
        )
        fp = flux_point_estimator.run(datasets=self.datasets)
        fp.table["is_ul"] = fp.table["ts"] < 4
        self.flux_points = FluxPointsDataset(
            data=fp, models=self.models[fp_settings.source])
        cols = ["e_ref", "ref_flux", "dnde", "dnde_ul", "dnde_err", "is_ul"]
        log.info("\n{}".format(self.flux_points.data.table[cols]))

    def update_config(self, config):
        self.config = self.config.update(config=config)

    def _create_geometry(self):
        """Create the geometry."""
        geom_params = {}
        geom_settings = self.config.datasets.geom
        skydir_settings = geom_settings.wcs.skydir
        if skydir_settings.lon is not None:
            skydir = SkyCoord(skydir_settings.lon,
                              skydir_settings.lat,
                              frame=skydir_settings.frame)
            geom_params["skydir"] = skydir
        if skydir_settings.frame == "icrs":
            geom_params["frame"] = "icrs"
        if skydir_settings.frame == "galactic":
            geom_params["frame"] = "galactic"
        axes = [self._make_energy_axis(geom_settings.axes.energy)]
        geom_params["axes"] = axes
        geom_params["binsz"] = geom_settings.wcs.binsize
        width = geom_settings.wcs.fov.width.to("deg").value
        height = geom_settings.wcs.fov.height.to("deg").value
        geom_params["width"] = (width, height)
        return WcsGeom.create(**geom_params)

    def _map_making(self):
        """Make maps and datasets for 3d analysis."""
        datasets_settings = self.config.datasets
        log.info("Creating geometry.")
        geom = self._create_geometry()
        geom_settings = datasets_settings.geom
        geom_irf = dict(energy_axis_true=None, binsz_irf=None)
        if geom_settings.axes.energy_true.min is not None:
            geom_irf["energy_axis_true"] = self._make_energy_axis(
                geom_settings.axes.energy_true, name="energy_true")
        geom_irf["binsz_irf"] = geom_settings.wcs.binsize_irf.to("deg").value
        offset_max = geom_settings.selection.offset_max
        log.info("Creating datasets.")

        maker = MapDatasetMaker(selection=datasets_settings.map_selection)

        safe_mask_selection = datasets_settings.safe_mask.methods
        safe_mask_settings = datasets_settings.safe_mask.parameters
        maker_safe_mask = SafeMaskMaker(methods=safe_mask_selection,
                                        **safe_mask_settings)

        bkg_maker_config = {}
        if datasets_settings.background.exclusion:
            exclusion_region = Map.read(datasets_settings.background.exclusion)
            bkg_maker_config["exclusion_mask"] = exclusion_region
        bkg_maker_config.update(datasets_settings.background.parameters)

        bkg_method = datasets_settings.background.method
        if bkg_method == "fov_background":
            log.debug(
                f"Creating FoVBackgroundMaker with arguments {bkg_maker_config}"
            )
            bkg_maker = FoVBackgroundMaker(**bkg_maker_config)
        elif bkg_method == "ring":
            bkg_maker = RingBackgroundMaker(**bkg_maker_config)
            log.debug(
                f"Creating RingBackgroundMaker with arguments {bkg_maker_config}"
            )
            if datasets_settings.geom.axes.energy.nbins > 1:
                raise ValueError(
                    "You need to define a single-bin energy geometry for your dataset."
                )
        else:
            bkg_maker = None
            log.warning(
                f"No background maker set for 3d analysis. Check configuration."
            )

        stacked = MapDataset.create(geom=geom, name="stacked", **geom_irf)

        if datasets_settings.stack:
            for obs in self.observations:
                log.info(f"Processing observation {obs.obs_id}")
                cutout = stacked.cutout(obs.pointing_radec,
                                        width=2 * offset_max)
                dataset = maker.run(cutout, obs)
                dataset = maker_safe_mask.run(dataset, obs)
                if bkg_maker is not None:
                    dataset = bkg_maker.run(dataset)

                if bkg_method == "ring":
                    dataset = dataset.to_map_dataset()

                log.debug(dataset)
                stacked.stack(dataset)
            datasets = [stacked]
        else:
            datasets = []

            for obs in self.observations:
                log.info(f"Processing observation {obs.obs_id}")
                cutout = stacked.cutout(obs.pointing_radec,
                                        width=2 * offset_max)
                dataset = maker.run(cutout, obs)
                dataset = maker_safe_mask.run(dataset, obs)
                if bkg_maker is not None:
                    dataset = bkg_maker.run(dataset)
                log.debug(dataset)
                datasets.append(dataset)

        self.datasets = Datasets(datasets)

    def _spectrum_extraction(self):
        """Run all steps for the spectrum extraction."""
        log.info("Reducing spectrum datasets.")
        datasets_settings = self.config.datasets
        on_lon = datasets_settings.on_region.lon
        on_lat = datasets_settings.on_region.lat
        on_center = SkyCoord(on_lon,
                             on_lat,
                             frame=datasets_settings.on_region.frame)
        on_region = CircleSkyRegion(on_center,
                                    datasets_settings.on_region.radius)

        maker_config = {}
        if datasets_settings.containment_correction:
            maker_config[
                "containment_correction"] = datasets_settings.containment_correction
        e_reco = self._make_energy_axis(datasets_settings.geom.axes.energy)

        maker_config["selection"] = ["counts", "exposure", "edisp"]
        dataset_maker = SpectrumDatasetMaker(**maker_config)

        bkg_maker_config = {}
        if datasets_settings.background.exclusion:
            exclusion_region = Map.read(datasets_settings.background.exclusion)
            bkg_maker_config["exclusion_mask"] = exclusion_region
        bkg_maker_config.update(datasets_settings.background.parameters)
        bkg_method = datasets_settings.background.method
        if bkg_method == "reflected":
            bkg_maker = ReflectedRegionsBackgroundMaker(**bkg_maker_config)
            log.debug(
                f"Creating ReflectedRegionsBackgroundMaker with arguments {bkg_maker_config}"
            )
        else:
            bkg_maker = None
            log.warning(
                f"No background maker set for 1d analysis. Check configuration."
            )

        safe_mask_selection = datasets_settings.safe_mask.methods
        safe_mask_settings = datasets_settings.safe_mask.parameters
        safe_mask_maker = SafeMaskMaker(methods=safe_mask_selection,
                                        **safe_mask_settings)

        e_true = self._make_energy_axis(
            datasets_settings.geom.axes.energy_true, name="energy_true")

        geom = RegionGeom.create(region=on_region, axes=[e_reco])
        reference = SpectrumDataset.create(geom=geom, energy_axis_true=e_true)

        datasets = []
        for obs in self.observations:
            log.info(f"Processing observation {obs.obs_id}")
            dataset = dataset_maker.run(reference.copy(), obs)
            if bkg_maker is not None:
                dataset = bkg_maker.run(dataset, obs)
                if dataset.counts_off is None:
                    log.info(
                        f"No OFF region found for observation {obs.obs_id}. Discarding."
                    )
                    continue
            dataset = safe_mask_maker.run(dataset, obs)
            log.debug(dataset)
            datasets.append(dataset)

        self.datasets = Datasets(datasets)

        if datasets_settings.stack:
            stacked = self.datasets.stack_reduce(name="stacked")
            self.datasets = Datasets([stacked])

    @staticmethod
    def _make_energy_axis(axis, name="energy"):
        return MapAxis.from_bounds(
            name=name,
            lo_bnd=axis.min.value,
            hi_bnd=axis.max.to_value(axis.min.unit),
            nbin=axis.nbins,
            unit=axis.min.unit,
            interp="log",
            node_type="edges",
        )
Beispiel #28
0
    def run_region(self, kr, lon, lat, radius):
        #    TODO: for now we have to read/create the allsky maps each in each job
        #    because we can't pickle <functools._lru_cache_wrapper object
        #    send this back to init when fixed

        # exposure
        exposure_hpx = Map.read(
            self.datadir + "/fermi_3fhl/fermi_3fhl_exposure_cube_hpx.fits.gz")
        exposure_hpx.unit = "cm2 s"

        # background iem
        infile = self.datadir + "/catalogs/fermi/gll_iem_v06.fits.gz"
        outfile = self.resdir + "/gll_iem_v06_extra.fits"
        model_iem = extrapolate_iem(infile, outfile, self.logEc_extra)

        # ROI
        roi_time = time()
        ROI_pos = SkyCoord(lon, lat, frame="galactic", unit="deg")
        width = 2 * (radius + self.psf_margin)

        # Counts
        counts = Map.create(
            skydir=ROI_pos,
            width=width,
            proj="CAR",
            coordsys="GAL",
            binsz=self.dlb,
            axes=[self.energy_axis],
            dtype=float,
        )
        counts.fill_by_coord({
            "skycoord": self.events.radec,
            "energy": self.events.energy
        })

        axis = MapAxis.from_nodes(counts.geom.axes[0].center,
                                  name="energy",
                                  unit="GeV",
                                  interp="log")
        wcs = counts.geom.wcs
        geom = WcsGeom(wcs=wcs, npix=counts.geom.npix, axes=[axis])
        coords = counts.geom.get_coord()

        # expo
        data = exposure_hpx.interp_by_coord(coords)
        exposure = WcsNDMap(geom, data, unit=exposure_hpx.unit, dtype=float)

        # read PSF
        psf_kernel = PSFKernel.from_table_psf(self.psf,
                                              counts.geom,
                                              max_radius=self.psf_margin *
                                              u.deg)

        # Energy Dispersion
        e_true = exposure.geom.axes[0].edges
        e_reco = counts.geom.axes[0].edges
        edisp = EnergyDispersion.from_diagonal_response(e_true=e_true,
                                                        e_reco=e_reco)

        # fit mask
        if coords["lon"].min() < 90 * u.deg and coords["lon"].max(
        ) > 270 * u.deg:
            coords["lon"][coords["lon"].value > 180] -= 360 * u.deg
        mask = (
            (coords["lon"] >= coords["lon"].min() + self.psf_margin * u.deg)
            & (coords["lon"] <= coords["lon"].max() - self.psf_margin * u.deg)
            & (coords["lat"] >= coords["lat"].min() + self.psf_margin * u.deg)
            & (coords["lat"] <= coords["lat"].max() - self.psf_margin * u.deg))
        mask_fermi = WcsNDMap(counts.geom, mask)

        # IEM
        eval_iem = MapEvaluator(model=model_iem,
                                exposure=exposure,
                                psf=psf_kernel,
                                edisp=edisp)
        bkg_iem = eval_iem.compute_npred()

        # ISO
        eval_iso = MapEvaluator(model=self.model_iso,
                                exposure=exposure,
                                edisp=edisp)
        bkg_iso = eval_iso.compute_npred()

        # merge iem and iso, only one local normalization is fitted
        background_total = bkg_iem + bkg_iso
        background_model = BackgroundModel(background_total)
        background_model.parameters["norm"].min = 0.0

        # Sources model
        in_roi = self.FHL3.positions.galactic.contained_by(wcs)
        FHL3_roi = []
        for ks in range(len(self.FHL3.table)):
            if in_roi[ks] == True:
                model = self.FHL3[ks].sky_model()
                model.spatial_model.parameters.freeze_all()  # freeze spatial
                model.spectral_model.parameters["amplitude"].min = 0.0
                if isinstance(model.spectral_model, PowerLawSpectralModel):
                    model.spectral_model.parameters["index"].min = 0.1
                    model.spectral_model.parameters["index"].max = 10.0
                else:
                    model.spectral_model.parameters["alpha"].min = 0.1
                    model.spectral_model.parameters["alpha"].max = 10.0

                FHL3_roi.append(model)
        model_total = SkyModels(FHL3_roi)

        # Dataset
        dataset = MapDataset(
            model=model_total,
            counts=counts,
            exposure=exposure,
            psf=psf_kernel,
            edisp=edisp,
            background_model=background_model,
            mask_fit=mask_fermi,
            name="3FHL_ROI_num" + str(kr),
        )
        cat_stat = dataset.stat_sum()

        datasets = Datasets([dataset])
        fit = Fit(datasets)
        results = fit.run(optimize_opts=self.optimize_opts)
        print("ROI_num", str(kr), "\n", results)
        fit_stat = datasets.stat_sum()

        if results.message == "Optimization failed.":
            pass
        else:
            datasets.to_yaml(path=Path(self.resdir),
                             prefix=dataset.name,
                             overwrite=True)
            np.save(
                self.resdir + "/3FHL_ROI_num" + str(kr) + "_covariance.npy",
                results.parameters.covariance,
            )
            np.savez(
                self.resdir + "/3FHL_ROI_num" + str(kr) + "_fit_infos.npz",
                message=results.message,
                stat=[cat_stat, fit_stat],
            )

            exec_time = time() - roi_time
            print("ROI", kr, " time (s): ", exec_time)

            for model in FHL3_roi:
                if (self.FHL3[model.name].data["ROI_num"] == kr
                        and self.FHL3[model.name].data["Signif_Avg"] >=
                        self.sig_cut):
                    flux_points = FluxPointsEstimator(
                        datasets=datasets,
                        e_edges=self.El_flux,
                        source=model.name,
                        sigma_ul=2.0,
                    ).run()
                    filename = self.resdir + "/" + model.name + "_flux_points.fits"
                    flux_points.write(filename, overwrite=True)

            exec_time = time() - roi_time - exec_time
            print("ROI", kr, " Flux points time (s): ", exec_time)
Beispiel #29
0
 def test_fit_pwl_minuit(self, dataset):
     fit = Fit()
     result = fit.run(dataset)
     self.assert_result(result)
Beispiel #30
0
# In[ ]:

model = SkyModel(
    PointSpatialModel("0 deg", "0 deg", frame="galactic"),
    PowerLawSpectralModel(index=2.5,
                          amplitude="1e-11 cm-2 s-1 TeV-1",
                          reference="100 GeV"),
)

model_total = SkyModels([model, model_diffuse, model_iso])

dataset = MapDataset(model=model_total,
                     counts=counts,
                     exposure=exposure,
                     psf=psf_kernel)
fit = Fit(dataset)
result = fit.run()

# In[ ]:

print(result)

# In[ ]:

dataset.parameters.to_table()

# In[ ]:

residual = counts - dataset.npred()
residual.sum_over_axes().smooth("0.1 deg").plot(cmap="coolwarm",
                                                vmin=-3,