示例#1
0
    def run(self, dataset):
        """Make the profiles

        Parameters
        ----------
        dataset : `~gammapy.datasets.MapDataset` or `~gammapy.datasets.MapDatasetOnOff`
            the dataset to use for profile extraction

        Returns
        --------
        imageprofile : `~gammapy.estimators.ImageProfile`
            Return an image profile class containing the result
        """
        if self.energy_edges is not None:
            axis = MapAxis.from_energy_edges(self.energy_edges)
            dataset = dataset.resample_energy_axis(energy_axis=axis)
        else:
            dataset = dataset.to_image()

        spectrum_datasets = self.get_spectrum_datasets(dataset)

        results = self.make_prof(spectrum_datasets)
        table = table_from_row_data(results)
        if isinstance(self.regions[0], RectangleSkyRegion):
            table.meta["PROFILE_TYPE"] = "orthogonal_rectangle"
        table.meta["SPECTRAL_MODEL"] = self.spectrum.to_dict()

        # return ImageProfile(table)
        return table
示例#2
0
def fit_gather(model_name, livetime, binned=False):
    rows = []

    path = (
        BASE_PATH /
        f"results/models/{model_name}/fit_{livetime.value:.0f}{livetime.unit}")
    if binned:
        path = Path(str(path).replace("/fit", "/fit_fake"))

    for filename in path.glob("*.yaml"):
        # model_best_fit = read_best_fit_model(filename)
        model_best_fit = Models.read(filename)
        model_best_fit = model_best_fit[model_name]
        row = {}

        for par in model_best_fit.parameters:
            row[par.name] = par.value
            row[par.name + "_err"] = par.error

        rows.append(row)

    table = table_from_row_data(rows)
    name = f"fit-results-all_{livetime.value:.0f}{livetime.unit}"
    if binned:
        name = "fit_binned-results-all"
    filename = f"results/models/{model_name}/{name}.fits.gz"
    log.info(f"Writing {filename}")
    table.write(str(filename), overwrite=True)
示例#3
0
    def run(self, datasets):
        """Run the flux point estimator for all energy groups.

        Parameters
        ----------
        datasets : list of `~gammapy.datasets.Dataset`
            Datasets

        Returns
        -------
        flux_points : `FluxPoints`
            Estimated flux points.
        """
        datasets = Datasets(datasets).copy()

        rows = []

        for energy_min, energy_max in zip(
            self.energy_edges[:-1], self.energy_edges[1:]
        ):
            row = self.estimate_flux_point(
                datasets, energy_min=energy_min, energy_max=energy_max,
            )
            rows.append(row)

        table = table_from_row_data(rows=rows, meta={"SED_TYPE": "likelihood"})

        model = datasets.models[self.source]
        return FluxPoints(table, reference_spectral_model=model.spectral_model.copy())
示例#4
0
    def run(self, steps="all"):
        """Run the flux point estimator for all energy groups.

        Returns
        -------
        flux_points : `FluxPoints`
            Estimated flux points.
        steps : list of str
            Which steps to execute. See `estimate_flux_point` for details
            and available options.
        """
        rows = []

        for e_group in self.e_groups:
            if e_group["bin_type"].strip() != "normal":
                log.debug(
                    "Skipping under-/ overflow bin in flux point estimation.")
                continue

            row = self.estimate_flux_point(e_group, steps=steps)
            rows.append(row)

        meta = OrderedDict([("SED_TYPE", "likelihood")])
        table = table_from_row_data(rows=rows, meta=meta)
        return FluxPoints(table).to_sed_type("dnde")
示例#5
0
    def info_table(self, cumulative=False, region=None):
        """Get info table for datasets.

        Parameters
        ----------
        cumulative : bool
            Cumulate info across all observations

        Returns
        -------
        info_table : `~astropy.table.Table`
            Info table.
        """
        if not self.is_all_same_type:
            raise ValueError("Info table not supported for mixed dataset type.")

        stacked = self[0].copy(name=self[0].name)

        rows = [stacked.info_dict()]

        for dataset in self[1:]:
            if cumulative:
                stacked.stack(dataset)
                row = stacked.info_dict()
            else:
                row = dataset.info_dict()

            rows.append(row)

        return table_from_row_data(rows=rows)
示例#6
0
    def run(self, datasets):
        """Run the flux point estimator for all energy groups.

        Parameters
        ----------
        datasets : list of `~gammapy.datasets.Dataset`
            Datasets

        Returns
        -------
        flux_points : `FluxPoints`
            Estimated flux points.
        """
        datasets = Datasets(datasets).copy()

        rows = []

        for e_min, e_max in zip(self.e_edges[:-1], self.e_edges[1:]):
            row = self.estimate_flux_point(datasets, e_min=e_min, e_max=e_max)
            rows.append(row)

        table = table_from_row_data(rows=rows, meta={"SED_TYPE": "likelihood"})

        #TODO: this should be changed once likelihood is fully supported
        return FluxPoints(table).to_sed_type("dnde")
示例#7
0
    def optimize(self, datasets):
        """Run the optimization.

        Parameters
        ----------
        datasets : `Datasets` or list of `Dataset`
            Datasets to optimize.

        Returns
        -------
        optimize_result : `OptimizeResult`
            Optimization result
        """
        datasets, parameters = self._parse_datasets(datasets=datasets)
        datasets.parameters.check_limits()

        parameters.autoscale()

        kwargs = self.optimize_opts.copy()
        backend = kwargs.pop("backend", self.backend)

        compute = registry.get("optimize", backend)
        # TODO: change this calling interface!
        # probably should pass a fit statistic, which has a model, which has parameters
        # and return something simpler, not a tuple of three things
        factors, info, optimizer = compute(
            parameters=parameters,
            function=datasets.stat_sum,
            store_trace=self.store_trace,
            **kwargs,
        )

        if backend == "minuit":
            self._minuit = optimizer
            kwargs["method"] = "migrad"

        trace = table_from_row_data(info.pop("trace"))

        if self.store_trace:
            idx = [
                parameters.index(par)
                for par in parameters.unique_parameters.free_parameters
            ]
            unique_names = np.array(
                datasets.models.parameters_unique_names)[idx]
            trace.rename_columns(trace.colnames[1:], list(unique_names))

        # Copy final results into the parameters object
        parameters.set_parameter_factors(factors)
        parameters.check_limits()
        return OptimizeResult(
            parameters=parameters,
            total_stat=datasets.stat_sum(),
            backend=backend,
            method=kwargs.get("method", backend),
            trace=trace,
            **info,
        )
示例#8
0
    def to_table(self):
        """Convert parameter attributes to `~astropy.table.Table`."""
        rows = [p.to_dict() for p in self._parameters]
        table = table_from_row_data(rows)

        for name in ["value", "error", "min", "max"]:
            table[name].format = ".3e"

        return table
示例#9
0
    def get_flux_points(self, coord=None):
        """Extract flux point at a given position.

        The flux points are returned in the the form of a `~gammapy.estimators.FluxPoints` object
        (which stores the flux points in an `~astropy.table.Table`)

        Parameters
        ---------
        coord : `~astropy.coordinates.SkyCoord`
            the coordinate where the flux points are extracted.

        Returns
        -------
        fluxpoints : `~gammapy.estimators.FluxPoints`
            the flux points object
        """
        if coord is None:
            coord = self.geom.center_skydir
        energies = self.energy_ref
        coords = MapCoord.create(dict(skycoord=coord, energy=energies))

        ref = self.dnde_ref.squeeze()

        fp = dict()
        fp["norm"] = self.norm.get_by_coord(coords) * self.norm.unit

        for quantity in self._available_quantities:
            norm_quantity = f"norm_{quantity}"
            res = getattr(self, norm_quantity).get_by_coord(coords)
            res *= getattr(self, norm_quantity).unit
            fp[norm_quantity] = res

        for additional_quantity in self._additional_maps:
            res = self.data[additional_quantity].get_by_coord(coords)
            res *= self.data[additional_quantity].unit
            fp[additional_quantity] = res

        # TODO: add support of norm and stat scan

        rows = []
        for idx, energy in enumerate(self.energy_ref):
            result = dict()
            result["e_ref"] = energy
            result["e_min"] = self.energy_min[idx]
            result["e_max"] = self.energy_max[idx]
            result["ref_dnde"] = ref[idx]
            result["norm"] = fp["norm"][idx]
            for quantity in self._available_quantities:
                norm_quantity = f"norm_{quantity}"
                result[norm_quantity] = fp[norm_quantity][idx]
            for key in self._additional_maps:
                result[key] = fp[key][idx]
            rows.append(result)
        table = table_from_row_data(rows=rows, meta={"SED_TYPE": "likelihood"})
        return FluxPoints(table).to_sed_type('dnde')
示例#10
0
    def run(self, datasets):
        """Run light curve extraction.

        Normalize integral and energy flux between emin and emax.

        Parameters
        ----------
        datasets : list of `~gammapy.datasets.SpectrumDataset` or `~gammapy.datasets.MapDataset`
            Spectrum or Map datasets.

        Returns
        -------
        lightcurve : `~gammapy.estimators.LightCurve`
            the Light Curve object
        """
        datasets = Datasets(datasets)

        if self.time_intervals is None:
            gti = datasets.gti
        else:
            gti = GTI.from_time_intervals(self.time_intervals)

        gti = gti.union(overlap_ok=False, merge_equal=False)

        rows = []

        for t_min, t_max in gti.time_intervals:
            datasets_to_fit = datasets.select_time(t_min=t_min,
                                                   t_max=t_max,
                                                   atol=self.atol)

            if len(datasets_to_fit) == 0:
                log.debug(
                    f"No Dataset for the time interval {t_min} to {t_max}")
                continue

            row = {"time_min": t_min.mjd, "time_max": t_max.mjd}
            row.update(self.estimate_time_bin_flux(datasets_to_fit))
            rows.append(row)

        if len(rows) == 0:
            raise ValueError(
                "LightCurveEstimator: No datasets in time intervals")

        table = table_from_row_data(rows=rows, meta={"SED_TYPE": "likelihood"})
        model = datasets.models[self.source]

        # TODO: cleanup here...
        fp = FluxPoints(table,
                        reference_spectral_model=model.spectral_model.copy())
        table_flux = fp.to_table(sed_type="flux")
        table_flux.remove_columns(["stat", "ts", "sqrt_ts", "e_min", "e_max"])
        return LightCurve(hstack([table, table_flux]))
示例#11
0
    def to_table(self):
        """Convert parameter attributes to `~astropy.table.Table`."""
        rows = []
        for p in self._parameters:
            d = p.to_dict()
            rows.append({**dict(type=p.type), **d})
        table = table_from_row_data(rows)

        table["value"].format = ".4e"
        for name in ["error", "min", "max"]:
            table[name].format = ".3e"

        return table
示例#12
0
    def _loops(self, run, nuisance, stat_profile_opts, optimize_opts,
               covariance_opts):
        """Loop in channels and masses."""

        for ch in self.channels:
            table_rows = []
            for mass in self.masses:

                # modify and set flux model
                dataset_loop = self._set_model_dataset(ch, mass)

                # build profile from fitting
                j_best, sv_best, likemin, statprofile = self._fit_dataset(
                    dataset_loop,
                    nuisance,
                    run,
                    ch,
                    mass,
                    stat_profile_opts=stat_profile_opts,
                    optimize_opts=optimize_opts,
                    covariance_opts=covariance_opts,
                )

                # calculate results from a profile
                fit_result = self._produce_results(j_best, sv_best, likemin,
                                                   statprofile)

                # not valid run
                if fit_result["sigma_v"] is None:
                    self._make_bad_run_row(run, fit_result)
                    return False

                # build table of results incrementally
                row = {
                    "mass": mass,
                    "sigma_v": fit_result["sigma_v"],
                    "sv_ul": fit_result["sv_ul"],
                    "sv_best": fit_result["sv_best"],
                    "j_best": fit_result["j_best"],
                    "statprofile": fit_result["statprofile"],
                }
                table_rows.append(row)
                self.sigmas[ch][mass.value][run] = row["sigma_v"]
                self.js[ch][mass.value][run] = row["j_best"]
            table = table_from_row_data(rows=table_rows)
            table["sigma_v"].unit = self.XSECTION.unit
            table["j_best"].unit = self.dataset.models.parameters[
                "jfactor"].unit
            self.result["runs"][ch][run] = table
        return True
示例#13
0
    def run(self, datasets):
        """Run light curve extraction.

        Normalize integral and energy flux between emin and emax.

        Parameters
        ----------
        datasets : list of `~gammapy.spectrum.SpectrumDataset` or `~gammapy.cube.MapDataset`
            Spectrum or Map datasets.

        Returns
        -------
        lightcurve : `~gammapy.time.LightCurve`
            the Light Curve object
        """
        datasets = Datasets(datasets)

        if self.time_intervals is None:
            gti = datasets.gti
        else:
            gti = GTI.from_time_intervals(self.time_intervals)

        gti = gti.union(overlap_ok=False, merge_equal=False)

        rows = []

        for t_min, t_max in gti.time_intervals:
            datasets_to_fit = datasets.select_time(t_min=t_min,
                                                   t_max=t_max,
                                                   atol=self.atol)

            if len(datasets_to_fit) == 0:
                log.debug(
                    f"No Dataset for the time interval {t_min} to {t_max}")
                continue

            row = {"time_min": t_min.mjd, "time_max": t_max.mjd}

            data = self.estimate_time_bin_flux(datasets_to_fit)
            row.update(data)
            row.update(self.estimate_counts(datasets_to_fit))
            rows.append(row)

        if len(rows) == 0:
            raise ValueError(
                "LightCurveEstimator: No datasets in time intervals")

        table = table_from_row_data(rows=rows, meta={"SED_TYPE": "likelihood"})
        table = FluxPoints(table).to_sed_type("flux").table
        return LightCurve(table)
示例#14
0
    def run(self, e_ref, e_min, e_max, steps="all"):
        """Run light curve extraction.

        Normalize integral and energy flux between emin and emax.

        Parameters
        ----------
        e_ref : `~astropy.unit.Quantity`
            reference energy of dnde flux normalization
        e_min : `~astropy.unit.Quantity`
            minimum energy of integral and energy flux normalization interval
        e_max : `~astropy.unit.Quantity`
            minimum energy of integral and energy flux normalization interval
        steps : list of str
            Which steps to execute. Available options are:

                * "err": estimate symmetric error.
                * "errn-errp": estimate asymmetric errors.
                * "ul": estimate upper limits.
                * "ts": estimate ts and sqrt(ts) values.
                * "norm-scan": estimate likelihood profiles.

            By default all steps are executed.

        Returns
        -------
        lightcurve : `~gammapy.time.LightCurve`
            the Light Curve object
        """
        self.e_ref = e_ref
        self.e_min = e_min
        self.e_max = e_max

        rows = []

        for dataset in self.datasets.datasets:
            row = {
                "time_min": dataset.counts.meta["t_start"].mjd,
                "time_max": dataset.counts.meta["t_stop"].mjd,
            }
            row.update(self.estimate_time_bin_flux(dataset, steps))
            rows.append(row)

        meta = OrderedDict([("SED_TYPE", "likelihood")])
        table = table_from_row_data(rows=rows, meta=meta)
        table = FluxPoints(table).to_sed_type("flux").table
        return LightCurve(table)
示例#15
0
    def _make_bad_run_row(self, run, fit_result):
        """Add only likelihood profile for a bad run."""

        for ch in self.channels:
            table_rows = []
            for mass in self.masses:
                row = {
                    "mass": mass,
                    "sigma_v": None,
                    "sv_ul": None,
                    "sv_best": None,
                    "j_best": None,
                    "statprofile": fit_result["statprofile"],
                }
                table_rows.append(row)
                self.sigmas[ch][mass.value][run] = None
            table = table_from_row_data(rows=table_rows)
            self.result["runs"][ch][run] = table
示例#16
0
    def to_table(self):
        """Convert parameter attributes to `~astropy.table.Table`."""
        rows = []
        for p in self._parameters:
            d = p.to_dict()
            if "link" not in d:
                d["link"] = ""
            for key in ["scale_method", "interp"]:
                if key in d:
                    del d[key]
            rows.append({**dict(type=p.type), **d})
        table = table_from_row_data(rows)

        table["value"].format = ".4e"
        for name in ["error", "min", "max"]:
            table[name].format = ".3e"

        return table
示例#17
0
    def run(self, datasets):
        """Run the flux point estimator for all energy groups.

        Parameters
        ----------
        datasets : list of `~gammapy.datasets.Dataset`
            Datasets

        Returns
        -------
        flux_points : `FluxPoints`
            Estimated flux points.
        """
        # TODO: remove copy here...
        datasets = Datasets(datasets).copy()

        rows = []

        for energy_min, energy_max in progress_bar(zip(self.energy_edges[:-1],
                                                       self.energy_edges[1:]),
                                                   desc="Energy bins"):
            row = self.estimate_flux_point(
                datasets,
                energy_min=energy_min,
                energy_max=energy_max,
            )
            rows.append(row)

        meta = {
            "n_sigma": self.n_sigma,
            "n_sigma_ul": self.n_sigma_ul,
            "sed_type_init": "likelihood"
        }

        table = table_from_row_data(rows=rows, meta=meta)
        model = datasets.models[self.source]
        return FluxPoints.from_table(table=table,
                                     reference_model=model.copy(),
                                     gti=datasets.gti,
                                     format="gadf-sed")
示例#18
0
    def run(self, dataset, steps="all"):
        """Make the profiles

        Parameters
        ----------
        dataset : `~gammapy.datasets.MapDataset` or `~gammapy.datasets.MapDatasetOnOff`
            the dataset to use for profile extraction
        steps : list of str
            the steps to be used.

        Returns
        --------
        imageprofile : `~gammapy.estimators.ImageProfile`
            Return an image profile class containing the result
        """
        spectrum_datasets = self.get_spectrum_datasets(dataset)
        results = self.make_prof(spectrum_datasets, steps)
        table = table_from_row_data(results)
        if isinstance(self.regions[0], RectangleSkyRegion):
            table.meta["PROFILE_TYPE"] = "orthogonal_rectangle"
        table.meta["SPECTRAL_MODEL"] = self.spectrum.to_dict()

        # return ImageProfile(table)
        return table
示例#19
0
    def run(self, datasets, steps="all"):
        """Run the flux point estimator for all energy groups.

        Parameters
        ----------
        datasets : list of `~gammapy.spectrum.SpectrumDataset`
            Spectrum datasets.
        steps : list of str
            Which steps to execute. See `estimate_flux_point` for details
            and available options.

        Returns
        -------
        flux_points : `FluxPoints`
            Estimated flux points.
        """
        datasets = self._check_datasets(datasets)

        if not datasets.is_all_same_type or not datasets.is_all_same_energy_shape:
            raise ValueError(
                "Flux point estimation requires a list of datasets"
                " of the same type and data shape."
            )
        self.datasets = datasets.copy()

        rows = []
        for e_group in self.e_groups:
            if e_group["bin_type"].strip() != "normal":
                log.debug("Skipping under-/ overflow bin in flux point estimation.")
                continue

            row = self._estimate_flux_point(e_group, steps=steps)
            rows.append(row)

        table = table_from_row_data(rows=rows, meta={"SED_TYPE": "likelihood"})
        return FluxPoints(table).to_sed_type("dnde")
示例#20
0
def test_table_from_row_data():
    rows = [dict(a=1, b=1 * u.m, c="x"), dict(a=2, b=2 * u.km, c="yy")]
    table = table_from_row_data(rows)
    assert isinstance(table, Table)
    assert table["b"].unit == "m"
    assert_allclose(table["b"].data, [1, 2000])
示例#21
0
    def optimize(self, datasets):
        """Run the optimization.

        Parameters
        ----------
        datasets : `Datasets` or list of `Dataset`
            Datasets to optimize.

        Returns
        -------
        fit_result : `FitResult`
            Results
        """
        datasets, parameters = self._parse_datasets(datasets=datasets)
        datasets.parameters.check_limits()

        # TODO: expose options if / when to scale? On the Fit class?
        if np.all(datasets.models.covariance.data == 0):
            parameters.autoscale()

        kwargs = self.optimize_opts.copy()
        backend = kwargs.pop("backend", self.backend)

        compute = registry.get("optimize", backend)
        # TODO: change this calling interface!
        # probably should pass a fit statistic, which has a model, which has parameters
        # and return something simpler, not a tuple of three things
        factors, info, optimizer = compute(
            parameters=parameters,
            function=datasets.stat_sum,
            store_trace=self.store_trace,
            **kwargs,
        )

        # TODO: Change to a stateless interface for minuit also, or if we must support
        # stateful backends, put a proper, backend-agnostic solution for this.
        # As preliminary solution would like to provide a possibility that the user
        # can access the Minuit object, because it features a lot useful functionality
        if backend == "minuit":
            self.minuit = optimizer

        trace = table_from_row_data(info.pop("trace"))

        if self.store_trace:
            idx = [
                parameters.index(par)
                for par in parameters.unique_parameters.free_parameters
            ]
            unique_names = np.array(
                datasets.models.parameters_unique_names)[idx]
            trace.rename_columns(trace.colnames[1:], list(unique_names))

        # Copy final results into the parameters object
        parameters.set_parameter_factors(factors)
        parameters.check_limits()
        return OptimizeResult(
            parameters=parameters,
            total_stat=datasets.stat_sum(),
            backend=backend,
            method=kwargs.get("method", backend),
            trace=trace,
            **info,
        )
示例#22
0
    def optimize(self, backend="minuit", **kwargs):
        """Run the optimization.

        Parameters
        ----------
        backend : str
            Which backend to use (see ``gammapy.modeling.registry``)
        **kwargs : dict
            Keyword arguments passed to the optimizer. For the `"minuit"` backend
            see https://iminuit.readthedocs.io/en/latest/api.html#iminuit.Minuit
            for a detailed description of the available options. If there is an entry
            'migrad_opts', those options will be passed to `iminuit.Minuit.migrad()`.

            For the `"sherpa"` backend you can from the options `method = {"simplex",  "levmar", "moncar", "gridsearch"}`
            Those methods are described and compared in detail on
            http://cxc.cfa.harvard.edu/sherpa/methods/index.html. The available
            options of the optimization methods are described on the following
            pages in detail:

                * http://cxc.cfa.harvard.edu/sherpa/ahelp/neldermead.html
                * http://cxc.cfa.harvard.edu/sherpa/ahelp/montecarlo.html
                * http://cxc.cfa.harvard.edu/sherpa/ahelp/gridsearch.html
                * http://cxc.cfa.harvard.edu/sherpa/ahelp/levmar.html

            For the `"scipy"` backend the available options are desribed in detail here:
            https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html

        Returns
        -------
        fit_result : `FitResult`
            Results
        """
        parameters = self._parameters
        parameters.check_limits()

        # TODO: expose options if / when to scale? On the Fit class?
        if np.all(self._models.covariance.data == 0):
            parameters.autoscale()

        compute = registry.get("optimize", backend)
        # TODO: change this calling interface!
        # probably should pass a fit statistic, which has a model, which has parameters
        # and return something simpler, not a tuple of three things
        factors, info, optimizer = compute(
            parameters=parameters,
            function=self.datasets.stat_sum,
            store_trace=self.store_trace,
            **kwargs,
        )

        # TODO: Change to a stateless interface for minuit also, or if we must support
        # stateful backends, put a proper, backend-agnostic solution for this.
        # As preliminary solution would like to provide a possibility that the user
        # can access the Minuit object, because it features a lot useful functionality
        if backend == "minuit":
            self.minuit = optimizer

        trace = table_from_row_data(info.pop("trace"))

        if self.store_trace:
            pars = self._models.parameters
            idx = [
                pars.index(par)
                for par in pars.unique_parameters.free_parameters
            ]
            unique_names = np.array(self._models.parameters_unique_names)[idx]
            trace.rename_columns(trace.colnames[1:], list(unique_names))

        # Copy final results into the parameters object
        parameters.set_parameter_factors(factors)
        parameters.check_limits()
        return OptimizeResult(
            parameters=parameters,
            total_stat=self.datasets.stat_sum(),
            backend=backend,
            method=kwargs.get("method", backend),
            trace=trace,
            **info,
        )
示例#23
0
    def run(
        self,
        runs,
        nuisance=False,
        stat_profile_opts=None,
        optimize_opts=None,
        covariance_opts=None,
    ):
        """Run the SigmaVEstimator for all channels and masses.

        Parameters
        ----------
        runs: int
            Number of runs where to perform the fitting.
        nuisance: bool
            Flag to perform fitting with nuisance parameters. Default False.
        stat_profile_opts : dict
            Options passed to `~gammapy.utils.fitting.Fit.stat_profile`.
        optimize_opts : dict
            Options passed to `~gammapy.utils.fitting.Fit.optimize`.
        covariance_opts : dict
            Options passed to `~gammapy.utils.fitting.Fit.covariance`.

        Returns
        -------
        result : dict
            result['mean'] provides mean and std values for sigma v vs. mass for each channel.
            result['runs'] provides a table of sigma v vs. mass and likelihood profiles for each run and channel.
        """

        # default options in sv curve
        if stat_profile_opts is None:
            stat_profile_opts = dict(bounds=(-25, 150), nvalues=50)

        # initialize data containers
        for ch in self.channels:
            self.result["mean"][ch] = None
            self.result["runs"][ch] = {}
            self.sigmas[ch] = {}
            self.js[ch] = {}
            for mass in self.masses:
                self.sigmas[ch][mass.value] = {}
                self.js[ch][mass.value] = {}

        okruns = 0
        # loop in runs
        for run in range(runs):
            self.dataset.fake(background_model=self.dataset.counts)
            valid = self._loops(run, nuisance, stat_profile_opts,
                                optimize_opts, covariance_opts)
            # skip the run and continue with the next on if fails for a mass of a specific channel
            if not valid:
                log.warning(f"Skipping run {run}")
                continue
            else:
                okruns += 1

        # calculate means / std
        if okruns:
            for ch in self.channels:
                table_rows = []
                for mass in self.masses:
                    row = {"mass": mass}
                    listsigmas = [
                        val
                        for key, val in self.sigmas[ch][mass.value].items()
                    ]
                    npsigmas = np.array(listsigmas, dtype=np.float)
                    sigma_mean = np.nanmean(npsigmas)
                    sigma_std = np.nanstd(npsigmas)
                    row["sigma_v"] = sigma_mean * self.XSECTION.unit
                    row["sigma_v_std"] = sigma_std * self.XSECTION.unit
                    listjs = [
                        val for key, val in self.js[ch][mass.value].items()
                    ]
                    if listjs.count(None) != len(listjs):
                        npjs = np.array(listjs, dtype=np.float)
                        js_mean = np.nanmean(npjs)
                        js_std = np.nanstd(npjs)
                        row["jfactor"] = js_mean * self.dataset.models.parameters[
                            "jfactor"].unit
                        row["jfactor_std"] = js_std * self.dataset.models.parameters[
                            "jfactor"].unit
                    else:
                        row["jfactor"] = None
                        row["jfactor_std"] = None
                    table_rows.append(row)
                table = table_from_row_data(rows=table_rows)
                self.result["mean"][ch] = table
        log.info(f"Number of good runs: {okruns}")
        return self.result
示例#24
0
    def run(self, datasets, steps="all"):
        """Run light curve extraction.

        Normalize integral and energy flux between emin and emax.

        Parameters
        ----------
        datasets : list of `~gammapy.spectrum.SpectrumDataset` or `~gammapy.cube.MapDataset`
            Spectrum or Map datasets.
        steps : list of str
            Which steps to execute. Available options are:

                * "err": estimate symmetric error.
                * "errn-errp": estimate asymmetric errors.
                * "ul": estimate upper limits.
                * "ts": estimate ts and sqrt(ts) values.
                * "norm-scan": estimate fit statistic profiles.

            By default all steps are executed.

        Returns
        -------
        lightcurve : `~gammapy.time.LightCurve`
            the Light Curve object
        """
        if self.input_time_intervals is None:
            time_intervals = [
                Time([d.gti.time_start[0], d.gti.time_stop[-1]])
                for d in datasets
            ]
        else:
            time_intervals = self.input_time_intervals

        time_intervals = self._check_and_sort_time_intervals(time_intervals)

        rows = []
        self.group_table_info = group_datasets_in_time_interval(
            datasets=datasets, time_intervals=time_intervals, atol=self.atol)
        if np.all(self.group_table_info["Group_ID"] == -1):
            raise ValueError(
                "LightCurveEstimator: No datasets in time intervals")

        for igroup, time_interval in enumerate(time_intervals):
            index_dataset = np.where(
                self.group_table_info["Group_ID"] == igroup)[0]
            if len(index_dataset) == 0:
                log.debug("No Dataset for the time interval " + str(igroup))
                continue

            row = {
                "time_min": time_interval[0].mjd,
                "time_max": time_interval[1].mjd
            }
            interval_list_dataset = Datasets(
                [datasets[int(_)] for _ in index_dataset])
            row.update(
                self.estimate_time_bin_flux(interval_list_dataset,
                                            time_interval, steps))
            rows.append(row)
        table = table_from_row_data(rows=rows, meta={"SED_TYPE": "likelihood"})
        table = FluxPoints(table).to_sed_type("flux").table
        return LightCurve(table)
示例#25
0
    def run(self, e_ref, e_min, e_max, steps="all", atol="1e-6 s"):
        """Run light curve extraction.

        Normalize integral and energy flux between emin and emax.

        Parameters
        ----------
        e_ref : `~astropy.units.Quantity`
            reference energy of dnde flux normalization
        e_min : `~astropy.units.Quantity`
            minimum energy of integral and energy flux normalization interval
        e_max : `~astropy.units.Quantity`
            minimum energy of integral and energy flux normalization interval
        steps : list of str
            Which steps to execute. Available options are:

                * "err": estimate symmetric error.
                * "errn-errp": estimate asymmetric errors.
                * "ul": estimate upper limits.
                * "ts": estimate ts and sqrt(ts) values.
                * "norm-scan": estimate fit statistic profiles.

            By default all steps are executed.
        atol : `~astropy.units.Quantity`
            Tolerance value for time comparison with different scale. Default 1e-6 sec.

        Returns
        -------
        lightcurve : `~gammapy.time.LightCurve`
            the Light Curve object
        """
        atol = u.Quantity(atol)
        self.e_ref = e_ref
        self.e_min = e_min
        self.e_max = e_max

        rows = []
        self.group_table_info = group_datasets_in_time_interval(
            datasets=self.datasets,
            time_intervals=self.time_intervals,
            atol=atol)
        if np.all(self.group_table_info["Group_ID"] == -1):
            raise ValueError(
                "LightCurveEstimator: No datasets in time intervals")
        for igroup, time_interval in enumerate(self.time_intervals):
            index_dataset = np.where(
                self.group_table_info["Group_ID"] == igroup)[0]
            if len(index_dataset) == 0:
                log.debug("No Dataset for the time interval " + str(igroup))
                continue

            row = {
                "time_min": time_interval[0].mjd,
                "time_max": time_interval[1].mjd
            }
            interval_list_dataset = Datasets(
                [self.datasets[int(_)].copy() for _ in index_dataset])
            self._set_scale_model(interval_list_dataset)
            row.update(
                self.estimate_time_bin_flux(interval_list_dataset,
                                            time_interval, steps))
            rows.append(row)
        table = table_from_row_data(rows=rows, meta={"SED_TYPE": "likelihood"})
        table = FluxPoints(table).to_sed_type("flux").table
        return LightCurve(table)
示例#26
0
 def position(self):
     """Source position (`~astropy.coordinates.SkyCoord`)."""
     table = table_from_row_data([self.data])
     return _skycoord_from_table(table)[0]
示例#27
0
    def run(self, datasets):
        """Run light curve extraction.

        Normalize integral and energy flux between emin and emax.

        Parameters
        ----------
        datasets : list of `~gammapy.datasets.SpectrumDataset` or `~gammapy.datasets.MapDataset`
            Spectrum or Map datasets.

        Returns
        -------
        lightcurve : `~gammapy.estimators.LightCurve`
            the Light Curve object
        """
        datasets = Datasets(datasets)

        if self.time_intervals is None:
            gti = datasets.gti
        else:
            gti = GTI.from_time_intervals(self.time_intervals)

        gti = gti.union(overlap_ok=False, merge_equal=False)

        rows = []
        for t_min, t_max in progress_bar(gti.time_intervals,
                                         desc="Time intervals"):
            datasets_to_fit = datasets.select_time(t_min=t_min,
                                                   t_max=t_max,
                                                   atol=self.atol)

            if len(datasets_to_fit) == 0:
                log.debug(
                    f"No Dataset for the time interval {t_min} to {t_max}")
                continue

            row = {"time_min": t_min.mjd, "time_max": t_max.mjd}
            fp = self.estimate_time_bin_flux(datasets_to_fit)
            fp_table = fp.to_table()

            for column in fp_table.colnames:
                if column == "counts":
                    data = fp_table[column].quantity.sum(axis=1)
                else:
                    data = fp_table[column].quantity
                row[column] = data

            fp_table_flux = fp.to_table(sed_type="flux")
            for column in fp_table_flux.colnames:
                if "flux" in column:
                    row[column] = fp_table_flux[column].quantity

            rows.append(row)

        if len(rows) == 0:
            raise ValueError(
                "LightCurveEstimator: No datasets in time intervals")

        table = table_from_row_data(rows=rows, meta={"SED_TYPE": "likelihood"})
        # TODO: use FluxPoints here
        return LightCurve(table=table)