Example #1
0
    def run(self, datasets):
        """Run."""
        datasets = Datasets(datasets)
        # find extension parameter
        # TODO: write something better
        model = datasets.models[self.source].spatial_model

        if hasattr(model, "sigma"):
            self.size_parameter = model.sigma
        elif hasattr(model, "r_0"):
            self.size_parameter = model.r_0
        elif hasattr(model, "radius"):
            self.size_parameter = model.radius
        else:
            raise ValueError(
                f"Cannot find size parameter on model {self.source}")

        rows = []

        for energy_min, energy_max in progress_bar(zip(self.energy_edges[:-1],
                                                       self.energy_edges[1:]),
                                                   desc="Energy bins"):
            datasets_sliced = datasets.slice_by_energy(energy_min=energy_min,
                                                       energy_max=energy_max)
            datasets_sliced = Datasets(
                [_.to_image(name=_.name) for _ in datasets_sliced])
            datasets_sliced.models = datasets.models  #.copy()
            row = self.estimate_size(datasets_sliced)
            rows.append(row)
        return rows
Example #2
0
    def run(self, datasets):
        """Estimate flux for a given energy range.

        Parameters
        ----------
        datasets : list of `~gammapy.datasets.SpectrumDataset`
            Spectrum datasets.

        Returns
        -------
        result : dict
            Dict with results for the flux point.
        """
        datasets = Datasets(datasets)

        datasets_sliced = datasets.slice_by_energy(energy_min=self.energy_min,
                                                   energy_max=self.energy_max)

        new_names = [name + "-sliced" for name in datasets.names]
        models = datasets.models.reassign(datasets.names, new_names)
        datasets_sliced.models = models
        for d in datasets_sliced:
            if d.background_model:
                d.background_model.reset_to_default()

        if len(datasets_sliced) > 0:
            # TODO: this relies on the energy binning of the first dataset
            energy_axis = datasets_sliced[0].counts.geom.axes["energy"]
            energy_min, energy_max = energy_axis.edges.min(
            ), energy_axis.edges.max()
        else:
            energy_min, energy_max = self.energy_min, self.energy_max

        contributions = []

        for dataset in datasets_sliced:
            if dataset.mask is not None:
                value = dataset.mask.data.any()
            else:
                value = True
            contributions.append(value)

        model = self.get_scale_model(models)

        with np.errstate(invalid="ignore", divide="ignore"):
            result = self.get_reference_flux_values(model.model, energy_min,
                                                    energy_max)

        if len(datasets) == 0 or not np.any(contributions):
            result.update(self.nan_result)
        else:
            models[self.source].spectral_model = model

            datasets_sliced.models = models
            result.update(
                self._parameter_estimator.run(datasets_sliced, model.norm))
            result["sqrt_ts"] = self.get_sqrt_ts(result["ts"], result["norm"])

        return result
Example #3
0
    def run(self, datasets):
        """Estimate flux for a given energy range.

        Parameters
        ----------
        datasets : list of `~gammapy.datasets.SpectrumDataset`
            Spectrum datasets.

        Returns
        -------
        result : dict
            Dict with results for the flux point.
        """
        datasets = Datasets(datasets)

        datasets_sliced = datasets.slice_by_energy(energy_min=self.energy_min,
                                                   energy_max=self.energy_max)

        # TODO: simplify model book-keeping!!
        models = Models()

        for model in datasets.models:
            if "sky-model" in model.tag:
                models.append(model)
            elif "fov-bkg" in model.tag:
                bkg_model = model.copy(dataset_name=model.datasets_names[0] +
                                       "-sliced")
                bkg_model.reset_to_default()
                models.append(bkg_model)

        if len(datasets) > 0:
            # TODO: this relies on the energy binning of the first dataset
            energy_axis = datasets_sliced[0].counts.geom.axes["energy"]
            energy_min, energy_max = energy_axis.edges.min(
            ), energy_axis.edges.max()
        else:
            energy_min, energy_max = self.energy_min, self.energy_max

        any_contribution = np.any(
            [dataset.mask.data.any() for dataset in datasets_sliced])

        model = self.get_scale_model(models)

        with np.errstate(invalid="ignore", divide="ignore"):
            result = self.get_reference_flux_values(model.model, energy_min,
                                                    energy_max)

        if len(datasets) == 0 or not any_contribution:
            result.update(self.nan_result)
        else:
            models[self.source].spectral_model = model

            datasets_sliced.models = models
            result.update(
                self._parameter_estimator.run(datasets_sliced, model.norm))
            result["sqrt_ts"] = self.get_sqrt_ts(result["ts"], result["norm"])

        return result
Example #4
0
    def run(self, datasets):
        """Estimate flux for a given energy range.

        Parameters
        ----------
        datasets : list of `~gammapy.datasets.SpectrumDataset`
            Spectrum datasets.

        Returns
        -------
        result : dict
            Dict with results for the flux point.
        """
        datasets = Datasets(datasets)
        models = datasets.models

        datasets = datasets.slice_by_energy(energy_min=self.energy_min,
                                            energy_max=self.energy_max)

        if len(datasets) > 0:
            # TODO: this relies on the energy binning of the first dataset
            energy_axis = datasets[0].counts.geom.axes["energy"]
            energy_min, energy_max = energy_axis.edges.min(
            ), energy_axis.edges.max()
        else:
            energy_min, energy_max = self.energy_min, self.energy_max

        any_contribution = np.any(
            [dataset.mask.data.any() for dataset in datasets])

        model = self.get_scale_model(models)

        with np.errstate(invalid="ignore", divide="ignore"):
            result = self.get_reference_flux_values(model.model, energy_min,
                                                    energy_max)

        if len(datasets) == 0 or not any_contribution:
            result.update(self.nan_result)
        else:
            models[self.source].spectral_model = model

            datasets.models = models
            result.update(self._parameter_estimator.run(datasets, model.norm))
            result["sqrt_ts"] = self.get_sqrt_ts(result["ts"], result["norm"])

        return result
Example #5
0
    def run(self, dataset):
        """Run adaptive smoothing on input MapDataset.

        Parameters
        ----------
        dataset : `~gammapy.datasets.MapDataset` or `~gammapy.datasets.MapDatasetOnOff`
            the input dataset (with one bin in energy at most)

        Returns
        -------
        images : dict of `~gammapy.maps.WcsNDMap`
            Smoothed images; keys are:
                * 'counts'
                * 'background'
                * 'flux' (optional)
                * 'scales'
                * 'sqrt_ts'.
        """
        datasets = Datasets([dataset])

        if self.energy_edges is None:
            energy_axis = dataset.counts.geom.axes["energy"]
            energy_edges = u.Quantity(
                [energy_axis.edges[0], energy_axis.edges[-1]])
        else:
            energy_edges = self.energy_edges

        results = []

        for energy_min, energy_max in zip(energy_edges[:-1], energy_edges[1:]):
            dataset = datasets.slice_by_energy(energy_min, energy_max)[0]
            result = self.estimate_maps(dataset)
            results.append(result)

        result_all = {}

        for name in result.keys():
            map_all = Map.from_images(images=[_[name] for _ in results])
            result_all[name] = map_all

        return result_all
Example #6
0
def test_inhomogeneous_datasets(fermi_datasets, hess_datasets):
    datasets = Datasets()

    datasets.extend(fermi_datasets)
    datasets.extend(hess_datasets)

    datasets = datasets.slice_by_energy(
        energy_min=1 * u.TeV,
        energy_max=10 * u.TeV,
    )
    datasets.models = fermi_datasets.models

    estimator = FluxEstimator(source="Crab Nebula",
                              selection_optional=[],
                              reoptimize=True)
    result = estimator.run(datasets)

    assert_allclose(result["norm"], 1.190622, atol=1e-3)
    assert_allclose(result["ts"], 612.50171, atol=1e-3)
    assert_allclose(result["norm_err"], 0.090744, atol=1e-3)
    assert_allclose(result["e_min"], 0.693145 * u.TeV, atol=1e-3)
    assert_allclose(result["e_max"], 10 * u.TeV, atol=1e-3)
Example #7
0
    def run(self, dataset):
        """
        Run TS map estimation.

        Requires a MapDataset with counts, exposure and background_model
        properly set to run.

        Parameters
        ----------
        dataset : `~gammapy.datasets.MapDataset`
            Input MapDataset.

        Returns
        -------
        maps : dict
             Dictionary containing result maps. Keys are:

                * ts : delta TS map
                * sqrt_ts : sqrt(delta TS), or significance map
                * flux : flux map
                * flux_err : symmetric error map
                * flux_ul : upper limit map

        """
        dataset_models = dataset.models

        if self.downsampling_factor:
            shape = dataset.counts.geom.to_image().data_shape
            pad_width = symmetric_crop_pad_width(shape, shape_2N(shape))[0]
            dataset = dataset.pad(pad_width).downsample(
                self.downsampling_factor)

        # TODO: add support for joint likelihood fitting to TSMapEstimator
        datasets = Datasets(dataset)

        if self.energy_edges is None:
            energy_axis = dataset.counts.geom.axes["energy"]
            energy_edges = u.Quantity(
                [energy_axis.edges[0], energy_axis.edges[-1]])
        else:
            energy_edges = self.energy_edges

        results = []

        for energy_min, energy_max in zip(energy_edges[:-1], energy_edges[1:]):
            sliced_dataset = datasets.slice_by_energy(energy_min,
                                                      energy_max)[0]

            if self.sum_over_energy_groups:
                sliced_dataset = sliced_dataset.to_image()

            sliced_dataset.models = dataset_models
            result = self.estimate_flux_map(sliced_dataset)
            results.append(result)

        result_all = {}

        for name in self.selection_all:
            map_all = Map.from_images(images=[_[name] for _ in results])

            if self.downsampling_factor:
                order = 0 if name == "niter" else 1
                map_all = map_all.upsample(factor=self.downsampling_factor,
                                           preserve_counts=False,
                                           order=order)
                map_all = map_all.crop(crop_width=pad_width)

            result_all[name] = map_all

        result_all["sqrt_ts"] = self.estimate_sqrt_ts(result_all["ts"],
                                                      result_all["norm"])
        return FluxMaps(data=result_all,
                        reference_model=self.model,
                        gti=dataset.gti)