예제 #1
0
    def run(self, datasets):
        """Run."""
        datasets = Datasets(datasets)
        # find extension parameter
        # TODO: write something better
        model = datasets.models[self.source].spatial_model

        if hasattr(model, "sigma"):
            self.size_parameter = model.sigma
        elif hasattr(model, "r_0"):
            self.size_parameter = model.r_0
        elif hasattr(model, "radius"):
            self.size_parameter = model.radius
        else:
            raise ValueError(
                f"Cannot find size parameter on model {self.source}")

        rows = []

        for energy_min, energy_max in progress_bar(zip(self.energy_edges[:-1],
                                                       self.energy_edges[1:]),
                                                   desc="Energy bins"):
            datasets_sliced = datasets.slice_by_energy(energy_min=energy_min,
                                                       energy_max=energy_max)
            datasets_sliced = Datasets(
                [_.to_image(name=_.name) for _ in datasets_sliced])
            datasets_sliced.models = datasets.models  #.copy()
            row = self.estimate_size(datasets_sliced)
            rows.append(row)
        return rows
예제 #2
0
    def _spectrum_extraction(self):
        """Run all steps for the spectrum extraction."""
        log.info("Reducing spectrum datasets.")
        datasets_settings = self.config.datasets
        dataset_maker = self._create_dataset_maker()
        safe_mask_maker = self._create_safe_mask_maker()
        bkg_maker = self._create_background_maker()

        reference = self._create_reference_dataset()

        datasets = []
        for obs in progress_bar(self.observations, desc="Observations"):
            log.debug(f"Processing observation {obs.obs_id}")
            dataset = dataset_maker.run(reference.copy(), obs)
            if bkg_maker is not None:
                dataset = bkg_maker.run(dataset, obs)
                if dataset.counts_off is None:
                    log.debug(
                        f"No OFF region found for observation {obs.obs_id}. Discarding."
                    )
                    continue
            dataset = safe_mask_maker.run(dataset, obs)
            log.debug(dataset)
            datasets.append(dataset)
        self.datasets = Datasets(datasets)

        if datasets_settings.stack:
            stacked = self.datasets.stack_reduce(name="stacked")
            self.datasets = Datasets([stacked])
예제 #3
0
def test_Datasets_mutation():
    dat = MyDataset(name="test-1")
    dats = Datasets([MyDataset(name="test-2"), MyDataset(name="test-3")])
    dats2 = Datasets([MyDataset(name="test-4"), MyDataset(name="test-5")])

    dats.insert(0, dat)
    assert dats.names == ["test-1", "test-2", "test-3"]

    dats.extend(dats2)
    assert dats.names == ["test-1", "test-2", "test-3", "test-4", "test-5"]

    dat3 = dats[3]
    dats.remove(dats[3])
    assert dats.names == ["test-1", "test-2", "test-3", "test-5"]
    dats.append(dat3)
    assert dats.names == ["test-1", "test-2", "test-3", "test-5", "test-4"]
    dats.pop(3)
    assert dats.names == ["test-1", "test-2", "test-3", "test-4"]

    with pytest.raises(ValueError, match="Dataset names must be unique"):
        dats.append(dat)
    with pytest.raises(ValueError, match="Dataset names must be unique"):
        dats.insert(0, dat)
    with pytest.raises(ValueError, match="Dataset names must be unique"):
        dats.extend(dats2)
예제 #4
0
def compactify(dsets, dtmin=1 * u.h, debug=False):
    """
    Returns a list of stacked Dataset having a minimal total duration from an
    original unstacked Dataset list.
    Note that the model stacking is not applied.

    Parameters
    ----------
    dsets : Dataset List
        The initial list of Datasets.
    dtmin : astropy.time, optional
        The stacked Dataset minimal duration. The default is 1*u.h.
    debug : Boolean, optional
        If True, let's talk a bit. The default is False.

    Returns
    -------
    ds_compacted : Dataset List
        The compacted Dataset list.

    """

    duration = 0 * u.s
    tmp_stack = Datasets()
    ds_compacted = Datasets()
    iset = 0

    for ds in dsets:
        tmp_stack.append(ds)
        duration += ds.gti.time_delta[0]

        if debug: print("  ", ds.name, " : ", ds.livetime, " appended")

        " If max duration reached, stack"
        if (duration >= dtmin):
            dset_stacked = stacking(tmp_stack, tryflux=False, debug=False)
            name = "Compacted-" + str(iset)
            ds_compacted.append(dset_stacked[-1].copy(name=name))
            if debug:
                print("  Dt exceeded - stack", len(tmp_stack), " datasets")
                print(tmp_stack)
                print("   Duration and stack reset")
                print(dset_stacked)
                print(dset_stacked[-1].name, " should be kept as ", name)

            # Reset stack and duration
            duration = 0 * u.s
            tmp_stack = Datasets()
            iset += 1

    return ds_compacted
예제 #5
0
    def estimate_size(self, datasets):
        """Estimate size for a given energy range.

        Parameters
        ----------
        datasets : `~gammapy.datasets.Datasets` or list of `~gammapy.datasets.MapDataset` 
            Map datasets.

        Returns
        -------
        result : dict
            Dict with results for the extension measurement.
        """
        datasets = Datasets(datasets)

        #        self.fit.run(datasets)

        if self.size_values:
            self.size_parameter.scan_values = self.size_values.to_value(
                self.size_parameter.unit)
        self.size_parameter.scan_min = self.size_min.to_value(
            self.size_parameter.unit)
        self.size_parameter.scan_max = self.size_max.to_value(
            self.size_parameter.unit)
        self.size_parameter.scan_n_values = self.size_n_values

        result = super().run(datasets, self.size_parameter)
        return result
예제 #6
0
파일: sed.py 프로젝트: mireianievas/gammapy
    def estimate_flux_point(self, datasets, energy_min, energy_max):
        """Estimate flux point for a single energy group.

        Parameters
        ----------
        datasets : `Datasets`
            Datasets
        energy_min, energy_max : `~astropy.units.Quantity`
            Energy bounds to compute the flux point for.

        Returns
        -------
        result : dict
            Dict with results for the flux point.
        """
        datasets_sliced = datasets.slice_by_energy(energy_min=energy_min,
                                                   energy_max=energy_max)
        if self.sum_over_energy_groups:
            datasets_sliced = Datasets(
                [_.to_image(name=_.name) for _ in datasets_sliced])

        if len(datasets_sliced) > 0:
            datasets_sliced.models = datasets.models.copy()
            return super().run(datasets=datasets_sliced)
        else:
            log.warning(
                f"No dataset contribute in range {energy_min}-{energy_max}")
            model = datasets.models[self.source].spectral_model
            return self._nan_result(datasets, model, energy_min, energy_max)
예제 #7
0
def data_reduction(instrument):
    log.info(f"data_reduction: {instrument}")
    config = AnalysisConfig.read(f"config.yaml")
    config.observations.datastore = f"$JOINT_CRAB/data/{instrument}"
    config.datasets.stack = instrument_opts[instrument]['stack']
    config.datasets.containment_correction = instrument_opts[instrument][
        'containment']
    config.datasets.on_region.radius = instrument_opts[instrument]['on_radius']

    analysis = Analysis(config)
    analysis.get_observations()
    analysis.get_datasets()

    # TODO remove when safe mask can be set on config
    if instrument is 'fact':
        from gammapy.datasets import SpectrumDatasetOnOff
        stacked = SpectrumDatasetOnOff.create(
            e_reco=analysis.datasets[0]._energy_axis.edges,
            e_true=analysis.datasets[0]._energy_axis.edges,
            region=None)
        for ds in analysis.datasets:
            ds.mask_safe[:] = True
            stacked.stack(ds)
        analysis.datasets = Datasets([stacked])

    analysis.datasets.write(f"reduced_{instrument}", overwrite=True)
    def run(self, n_obs=10, sigma=0.3 * u.deg, A0=5e-13):
        pos1 = SkyCoord(self.skydir.galactic.l + self.wobble_offset,
                        self.skydir.galactic.b,
                        frame="galactic")
        pos2 = SkyCoord(self.skydir.galactic.l - self.wobble_offset,
                        self.skydir.galactic.b,
                        frame="galactic")
        pos3 = SkyCoord(self.skydir.galactic.l,
                        self.skydir.galactic.b + self.wobble_offset,
                        frame="galactic")
        pos4 = SkyCoord(self.skydir.galactic.l,
                        self.skydir.galactic.b - self.wobble_offset,
                        frame="galactic")

        datasets = Datasets()

        for j, apos in enumerate([pos1, pos2, pos3, pos4]):
            print("Pointing position: \n", apos)
            for i in range(n_obs):
                empty = self.create_empty(name=f"dataset-{j}-{i}")
                models = self.sky_model(sigma.value, A0)
                dataset = self.simulate_single(pointing=apos,
                                               models=models,
                                               empty=empty)
                datasets.append(dataset)

        return datasets
예제 #9
0
def test_datasets_stack_reduce_no_off():
    datasets = Datasets()
    obs_ids = [23523, 23526, 23559, 23592]

    for obs_id in obs_ids:
        filename = f"$GAMMAPY_DATA/joint-crab/spectra/hess/pha_obs{obs_id}.fits"
        ds = SpectrumDatasetOnOff.read(filename)
        datasets.append(ds)

    datasets[-1].counts_off = None

    with pytest.raises(ValueError):
        stacked = datasets.stack_reduce(name="stacked")

    datasets[-1].mask_safe.data[...] = False
    stacked = datasets.stack_reduce(name="stacked")
    assert_allclose(stacked.exposure.meta["livetime"].to_value("s"),
                    4732.5469999)
    assert stacked.counts == 369

    datasets[0].mask_safe.data[...] = False

    stacked = datasets.stack_reduce(name="stacked")
    assert_allclose(stacked.exposure.meta["livetime"].to_value("s"),
                    3150.81024152)
    assert stacked.counts == 245
예제 #10
0
def test_flux_point_dataset_serialization(tmp_path):
    path = "$GAMMAPY_DATA/tests/spectrum/flux_points/diff_flux_points.fits"
    table = Table.read(make_path(path))
    table["e_ref"] = table["e_ref"].quantity.to("TeV")
    data = FluxPoints.from_table(table, format="gadf-sed")

    spectral_model = PowerLawSpectralModel(index=2.3,
                                           amplitude="2e-13 cm-2 s-1 TeV-1",
                                           reference="1 TeV")
    model = SkyModel(spectral_model=spectral_model, name="test_model")
    dataset = FluxPointsDataset(model, data, name="test_dataset")

    dataset2 = FluxPointsDataset.read(path, name="test_dataset2")
    assert_allclose(dataset.data.dnde.data, dataset2.data.dnde.data)
    assert dataset.mask_safe.data == dataset2.mask_safe.data
    assert dataset2.name == "test_dataset2"

    Datasets([dataset]).write(
        filename=tmp_path / "tmp_datasets.yaml",
        filename_models=tmp_path / "tmp_models.yaml",
    )

    datasets = Datasets.read(
        filename=tmp_path / "tmp_datasets.yaml",
        filename_models=tmp_path / "tmp_models.yaml",
    )

    new_dataset = datasets[0]
    assert_allclose(new_dataset.data.dnde, dataset.data.dnde, 1e-4)
    if dataset.mask_fit is None:
        assert np.all(new_dataset.mask_fit == dataset.mask_safe)
    assert np.all(new_dataset.mask_safe == dataset.mask_safe)
    assert new_dataset.name == "test_dataset"
예제 #11
0
    def run(self, datasets):
        """Run the flux point estimator for all energy groups.

        Parameters
        ----------
        datasets : list of `~gammapy.datasets.Dataset`
            Datasets

        Returns
        -------
        flux_points : `FluxPoints`
            Estimated flux points.
        """
        datasets = Datasets(datasets).copy()

        rows = []

        for e_min, e_max in zip(self.e_edges[:-1], self.e_edges[1:]):
            row = self.estimate_flux_point(datasets, e_min=e_min, e_max=e_max)
            rows.append(row)

        table = table_from_row_data(rows=rows, meta={"SED_TYPE": "likelihood"})

        #TODO: this should be changed once likelihood is fully supported
        return FluxPoints(table).to_sed_type("dnde")
예제 #12
0
    def setup(self):
        path = "$GAMMAPY_DATA/joint-crab/spectra/hess/"
        self.datasets = Datasets(
            [
                SpectrumDatasetOnOff.from_ogip_files(path + "pha_obs23523.fits"),
                SpectrumDatasetOnOff.from_ogip_files(path + "pha_obs23592.fits"),
            ]
        )

        self.pwl = SkyModel(
            spectral_model=PowerLawSpectralModel(
                index=2, amplitude=1e-12 * u.Unit("cm-2 s-1 TeV-1"), reference=1 * u.TeV
            )
        )

        self.ecpl = SkyModel(
            spectral_model=ExpCutoffPowerLawSpectralModel(
                index=2,
                amplitude=1e-12 * u.Unit("cm-2 s-1 TeV-1"),
                reference=1 * u.TeV,
                lambda_=0.1 / u.TeV,
            )
        )

        # Example fit for one observation
        self.datasets[0].models = self.pwl
        self.fit = Fit([self.datasets[0]])
예제 #13
0
    def run(self, datasets):
        """Run the flux point estimator for all energy groups.

        Parameters
        ----------
        datasets : list of `~gammapy.datasets.Dataset`
            Datasets

        Returns
        -------
        flux_points : `FluxPoints`
            Estimated flux points.
        """
        datasets = Datasets(datasets).copy()

        rows = []

        for energy_min, energy_max in zip(
            self.energy_edges[:-1], self.energy_edges[1:]
        ):
            row = self.estimate_flux_point(
                datasets, energy_min=energy_min, energy_max=energy_max,
            )
            rows.append(row)

        table = table_from_row_data(rows=rows, meta={"SED_TYPE": "likelihood"})

        model = datasets.models[self.source]
        return FluxPoints(table, reference_spectral_model=model.spectral_model.copy())
예제 #14
0
def test_flux_point_dataset_serialization(tmp_path):
    path = "$GAMMAPY_DATA/tests/spectrum/flux_points/diff_flux_points.fits"
    data = FluxPoints.read(path)
    data.table["e_ref"] = data.energy_ref.to("TeV")
    spectral_model = PowerLawSpectralModel(
        index=2.3, amplitude="2e-13 cm-2 s-1 TeV-1", reference="1 TeV"
    )
    model = SkyModel(spectral_model=spectral_model, name="test_model")
    dataset = FluxPointsDataset(model, data, name="test_dataset")

    Datasets([dataset]).write(
        filename=tmp_path / "tmp_datasets.yaml",
        filename_models=tmp_path / "tmp_models.yaml",
    )

    datasets = Datasets.read(
        filename=tmp_path / "tmp_datasets.yaml",
        filename_models=tmp_path / "tmp_models.yaml",
    )

    new_dataset = datasets[0]
    assert_allclose(new_dataset.data.dnde, dataset.data.dnde, 1e-4)
    if dataset.mask_fit is None:
        assert np.all(new_dataset.mask_fit == dataset.mask_safe)
    assert np.all(new_dataset.mask_safe == dataset.mask_safe)
    assert new_dataset.name == "test_dataset"
예제 #15
0
    def run(self, datasets):
        """Run flux profile estimation

        Parameters
        ----------
        datasets : list of `~gammapy.datasets.MapDataset`
            Map datasets.

        Returns
        -------
        profile : `~gammapy.estimators.FluxPoints`
            Profile flux points.
        """
        datasets = Datasets(datasets=datasets)

        maps = []

        for region in self.regions:
            datasets_to_fit = datasets.to_spectrum_datasets(region=region)
            datasets_to_fit.models = SkyModel(self.spectrum,
                                              name="test-source")
            fp = super().run(datasets_to_fit)
            maps.append(fp)

        return FluxPoints.from_stack(
            maps=maps,
            axis=self.projected_distance_axis,
        )
예제 #16
0
    def run(self, datasets):
        """Estimate flux for a given energy range.

        Parameters
        ----------
        datasets : list of `~gammapy.spectrum.SpectrumDataset`
            Spectrum datasets.

        Returns
        -------
        result : dict
            Dict with results for the flux point.
        """
        datasets = Datasets(datasets)

        model = self.get_scale_model(datasets.models)

        with np.errstate(invalid="ignore", divide="ignore"):
            result = self.get_reference_flux_values(model.model)

        any_contribution = np.any([dataset.mask.any() for dataset in datasets])

        if len(datasets) == 0 or not any_contribution:
            result.update(self.nan_result)
        else:
            for dataset in datasets:
                dataset.models[self.source].spectral_model = model

            result.update(self._parameter_estimator.run(datasets, model.norm))

        return result
예제 #17
0
    def run(self, datasets):
        """Estimate flux for a given energy range.

        Parameters
        ----------
        datasets : list of `~gammapy.datasets.SpectrumDataset`
            Spectrum datasets.

        Returns
        -------
        result : dict
            Dict with results for the flux point.
        """
        datasets = Datasets(datasets)
        models = datasets.models.copy()

        model = self.get_scale_model(models)

        energy_min, energy_max = datasets.energy_ranges
        energy_axis = MapAxis.from_energy_edges([energy_min.min(), energy_max.max()])

        with np.errstate(invalid="ignore", divide="ignore"):
            result = model.reference_fluxes(energy_axis=energy_axis)
            # convert to scalar values
            result = {key: value.item() for key, value in result.items()}

        models[self.source].spectral_model = model
        datasets.models = models
        result.update(super().run(datasets, model.norm))
        return result
예제 #18
0
def data_fitting(instrument, npoints):
    log.info("Running fit ...")
    # First define model
    crab_model = define_model()

    if instrument != "joint":
        datasets = read_datasets_and_set_model(instrument, crab_model)
    else:
        log.info("Performing joint analysis")
        ds_list = []
        for inst in AVAILABLE_DATA[:-1]:
            datasets = read_datasets_and_set_model(inst, crab_model)
            ds_list = [*ds_list, *datasets]
        datasets = Datasets(ds_list)

    # Perform fit
    fit = Fit(datasets)
    result = fit.run(optimize_opts={"tol": 0.1, "strategy": 0})
    log.info(result.parameters.to_table())

    path = f"results/fit_{instrument}.rst"
    log.info(f"Writing {path}")
    result.parameters.to_table().write(path,
                                       format="ascii.rst",
                                       overwrite=True)

    contours = make_contours(fit, result, npoints)
    with open(f"results/contours_{instrument}.yaml", "w") as file:
        yaml.dump(contours, file)
예제 #19
0
    def run(self, datasets):
        """Estimate flux for a given energy range.

        Parameters
        ----------
        datasets : list of `~gammapy.datasets.SpectrumDataset`
            Spectrum datasets.

        Returns
        -------
        result : dict
            Dict with results for the flux point.
        """
        datasets = Datasets(datasets)

        datasets_sliced = datasets.slice_by_energy(energy_min=self.energy_min,
                                                   energy_max=self.energy_max)

        new_names = [name + "-sliced" for name in datasets.names]
        models = datasets.models.reassign(datasets.names, new_names)
        datasets_sliced.models = models
        for d in datasets_sliced:
            if d.background_model:
                d.background_model.reset_to_default()

        if len(datasets_sliced) > 0:
            # TODO: this relies on the energy binning of the first dataset
            energy_axis = datasets_sliced[0].counts.geom.axes["energy"]
            energy_min, energy_max = energy_axis.edges.min(
            ), energy_axis.edges.max()
        else:
            energy_min, energy_max = self.energy_min, self.energy_max

        contributions = []

        for dataset in datasets_sliced:
            if dataset.mask is not None:
                value = dataset.mask.data.any()
            else:
                value = True
            contributions.append(value)

        model = self.get_scale_model(models)

        with np.errstate(invalid="ignore", divide="ignore"):
            result = self.get_reference_flux_values(model.model, energy_min,
                                                    energy_max)

        if len(datasets) == 0 or not np.any(contributions):
            result.update(self.nan_result)
        else:
            models[self.source].spectral_model = model

            datasets_sliced.models = models
            result.update(
                self._parameter_estimator.run(datasets_sliced, model.norm))
            result["sqrt_ts"] = self.get_sqrt_ts(result["ts"], result["norm"])

        return result
예제 #20
0
    def run(self, datasets):
        """Estimate flux for a given energy range.

        Parameters
        ----------
        datasets : list of `~gammapy.datasets.SpectrumDataset`
            Spectrum datasets.

        Returns
        -------
        result : dict
            Dict with results for the flux point.
        """
        datasets = Datasets(datasets)

        datasets_sliced = datasets.slice_by_energy(energy_min=self.energy_min,
                                                   energy_max=self.energy_max)

        # TODO: simplify model book-keeping!!
        models = Models()

        for model in datasets.models:
            if "sky-model" in model.tag:
                models.append(model)
            elif "fov-bkg" in model.tag:
                bkg_model = model.copy(dataset_name=model.datasets_names[0] +
                                       "-sliced")
                bkg_model.reset_to_default()
                models.append(bkg_model)

        if len(datasets) > 0:
            # TODO: this relies on the energy binning of the first dataset
            energy_axis = datasets_sliced[0].counts.geom.axes["energy"]
            energy_min, energy_max = energy_axis.edges.min(
            ), energy_axis.edges.max()
        else:
            energy_min, energy_max = self.energy_min, self.energy_max

        any_contribution = np.any(
            [dataset.mask.data.any() for dataset in datasets_sliced])

        model = self.get_scale_model(models)

        with np.errstate(invalid="ignore", divide="ignore"):
            result = self.get_reference_flux_values(model.model, energy_min,
                                                    energy_max)

        if len(datasets) == 0 or not any_contribution:
            result.update(self.nan_result)
        else:
            models[self.source].spectral_model = model

            datasets_sliced.models = models
            result.update(
                self._parameter_estimator.run(datasets_sliced, model.norm))
            result["sqrt_ts"] = self.get_sqrt_ts(result["ts"], result["norm"])

        return result
예제 #21
0
def data_prep():
    data_store = DataStore.from_dir("$GAMMAPY_DATA/cta-1dc/index/gps/")
    OBS_ID = 110380
    obs_ids = OBS_ID * np.ones(N_OBS)
    observations = data_store.get_observations(obs_ids)

    energy_axis = MapAxis.from_bounds(0.1,
                                      10,
                                      nbin=10,
                                      unit="TeV",
                                      name="energy",
                                      interp="log")
    geom = WcsGeom.create(
        skydir=(0, 0),
        binsz=0.02,
        width=(10, 8),
        frame="galactic",
        proj="CAR",
        axes=[energy_axis],
    )

    energy_axis_true = MapAxis.from_bounds(0.05,
                                           20,
                                           nbin=30,
                                           unit="TeV",
                                           name="energy_true",
                                           interp="log")

    offset_max = 4 * u.deg
    maker = MapDatasetMaker()
    safe_mask_maker = SafeMaskMaker(methods=["offset-max"],
                                    offset_max=offset_max)
    stacked = MapDataset.create(geom=geom, energy_axis_true=energy_axis_true)

    spatial_model = PointSpatialModel(lon_0="-0.05 deg",
                                      lat_0="-0.05 deg",
                                      frame="galactic")
    spectral_model = ExpCutoffPowerLawSpectralModel(
        index=2,
        amplitude=3e-12 * u.Unit("cm-2 s-1 TeV-1"),
        reference=1.0 * u.TeV,
        lambda_=0.1 / u.TeV,
    )
    model = SkyModel(spatial_model=spatial_model,
                     spectral_model=spectral_model,
                     name="gc-source")

    datasets = Datasets([])
    for idx, obs in enumerate(observations):
        cutout = stacked.cutout(obs.pointing_radec,
                                width=2 * offset_max,
                                name=f"dataset{idx}")
        dataset = maker.run(cutout, obs)
        dataset = safe_mask_maker.run(dataset, obs)
        dataset.models = [model, FoVBackgroundModel(dataset_name=dataset.name)]
        datasets.append(dataset)
    return datasets
예제 #22
0
def test_spectrum_dataset_on_off_to_yaml(tmpdir):
    spectrum_datasets_on_off = make_observation_list()
    datasets = Datasets(spectrum_datasets_on_off)
    datasets.write(path=tmpdir)
    datasets_read = Datasets.read(tmpdir / "_datasets.yaml", tmpdir / "_models.yaml")
    assert len(datasets_read) == len(datasets)
    assert datasets_read[0].name == datasets[0].name
    assert datasets_read[1].name == datasets[1].name
    assert datasets_read[1].counts.data.sum() == datasets[1].counts.data.sum()
예제 #23
0
    def run(self, datasets):
        """Run light curve extraction.

        Normalize integral and energy flux between emin and emax.

        Parameters
        ----------
        datasets : list of `~gammapy.datasets.SpectrumDataset` or `~gammapy.datasets.MapDataset`
            Spectrum or Map datasets.

        Returns
        -------
        lightcurve : `~gammapy.estimators.FluxPoints`
            Light curve flux points
        """
        datasets = Datasets(datasets)

        if self.time_intervals is None:
            gti = datasets.gti
        else:
            gti = GTI.from_time_intervals(self.time_intervals)

        gti = gti.union(overlap_ok=False, merge_equal=False)

        rows = []
        valid_intervals = []
        for t_min, t_max in progress_bar(gti.time_intervals,
                                         desc="Time intervals"):
            datasets_to_fit = datasets.select_time(time_min=t_min,
                                                   time_max=t_max,
                                                   atol=self.atol)

            if len(datasets_to_fit) == 0:
                log.info(
                    f"No Dataset for the time interval {t_min} to {t_max}. Skipping interval."
                )
                continue

            valid_intervals.append([t_min, t_max])
            fp = self.estimate_time_bin_flux(datasets=datasets_to_fit)

            for name in ["counts", "npred", "npred_null"]:
                fp._data[name] = self.expand_map(fp._data[name],
                                                 dataset_names=datasets.names)
            rows.append(fp)

        if len(rows) == 0:
            raise ValueError(
                "LightCurveEstimator: No datasets in time intervals")

        gti = GTI.from_time_intervals(valid_intervals)
        axis = TimeMapAxis.from_gti(gti=gti)
        return FluxPoints.from_stack(
            maps=rows,
            axis=axis,
        )
예제 #24
0
def hess_datasets():
    datasets = Datasets([])
    for obsid in [23523, 23526]:
        datasets.append(
            SpectrumDatasetOnOff.from_ogip_files(
                f"$GAMMAPY_DATA/joint-crab/spectra/hess/pha_obs{obsid}.fits"))
    PLmodel = PowerLawSpectralModel(amplitude="3.5e-11 cm-2s-1TeV-1",
                                    index=2.7)
    for dataset in datasets:
        dataset.models = SkyModel(spectral_model=PLmodel, name="Crab")
    return datasets
예제 #25
0
    def slice_datasets(datasets, e_min, e_max):
        """Select and slice datasets in energy range

        Parameters
        ----------
        datasets : Datasets
            Datasets
        e_min, e_max : `~astropy.units.Quantity`
            Energy bounds to compute the flux point for.

        Returns
        -------
        datasets : Datasets
            Datasets

        """
        datasets_to_fit = Datasets()

        for dataset in datasets:
            # TODO: implement slice_by_coord() and simplify?
            energy_axis = dataset.counts.geom.get_axis_by_name("energy")
            try:
                group = energy_axis.group_table(edges=[e_min, e_max])
            except ValueError:
                log.info(
                    f"Dataset {dataset.name} does not contribute in the energy range"
                )
                continue

            is_normal = group["bin_type"] == "normal   "
            group = group[is_normal]

            slices = {
                "energy":
                slice(int(group["idx_min"][0]),
                      int(group["idx_max"][0]) + 1)
            }

            name = f"{dataset.name}-{e_min:.3f}-{e_max:.3f}"
            dataset_sliced = dataset.slice_by_idx(slices, name=name)

            # TODO: Simplify model handling!!!!
            models = []

            for model in dataset.models:
                if isinstance(model, BackgroundModel):
                    models.append(dataset_sliced.background_model)
                else:
                    models.append(model)

            dataset_sliced.models = models
            datasets_to_fit.append(dataset_sliced)

        return datasets_to_fit
예제 #26
0
    def run(self, datasets):
        """Run light curve extraction.

        Normalize integral and energy flux between emin and emax.

        Parameters
        ----------
        datasets : list of `~gammapy.datasets.SpectrumDataset` or `~gammapy.datasets.MapDataset`
            Spectrum or Map datasets.

        Returns
        -------
        lightcurve : `~gammapy.estimators.LightCurve`
            the Light Curve object
        """
        datasets = Datasets(datasets)

        if self.time_intervals is None:
            gti = datasets.gti
        else:
            gti = GTI.from_time_intervals(self.time_intervals)

        gti = gti.union(overlap_ok=False, merge_equal=False)

        rows = []

        for t_min, t_max in gti.time_intervals:
            datasets_to_fit = datasets.select_time(t_min=t_min,
                                                   t_max=t_max,
                                                   atol=self.atol)

            if len(datasets_to_fit) == 0:
                log.debug(
                    f"No Dataset for the time interval {t_min} to {t_max}")
                continue

            row = {"time_min": t_min.mjd, "time_max": t_max.mjd}
            row.update(self.estimate_time_bin_flux(datasets_to_fit))
            rows.append(row)

        if len(rows) == 0:
            raise ValueError(
                "LightCurveEstimator: No datasets in time intervals")

        table = table_from_row_data(rows=rows, meta={"SED_TYPE": "likelihood"})
        model = datasets.models[self.source]

        # TODO: cleanup here...
        fp = FluxPoints(table,
                        reference_spectral_model=model.spectral_model.copy())
        table_flux = fp.to_table(sed_type="flux")
        table_flux.remove_columns(["stat", "ts", "sqrt_ts", "e_min", "e_max"])
        return LightCurve(hstack([table, table_flux]))
예제 #27
0
def hess_datasets():
    datasets = Datasets([])
    pwl = PowerLawSpectralModel(amplitude="3.5e-11 cm-2s-1TeV-1", index=2.7)
    model = SkyModel(spectral_model=pwl, name="Crab")

    for obsid in [23523, 23526]:
        dataset = SpectrumDatasetOnOff.read(
            f"$GAMMAPY_DATA/joint-crab/spectra/hess/pha_obs{obsid}.fits")
        dataset.models = model
        datasets.append(dataset)

    return datasets
예제 #28
0
    def run(self, dataset):
        """Compute correlated excess, Li & Ma significance and flux maps

        Parameters
        ----------
        dataset : `~gammapy.datasets.MapDataset` or `~gammapy.datasets.MapDatasetOnOff`
            input dataset

        Returns
        -------
        images : dict
            Dictionary containing result correlated maps. Keys are:

                * counts : correlated counts map
                * background : correlated background map
                * excess : correlated excess map
                * ts : TS map
                * sqrt_ts : sqrt(delta TS), or Li-Ma significance map
                * err : symmetric error map (from covariance)
                * flux : flux map. An exposure map must be present in the dataset to compute flux map
                * errn : negative error map
                * errp : positive error map
                * ul : upper limit map

        """
        if not isinstance(dataset, MapDataset):
            raise ValueError("Unsupported dataset type")

        # TODO: add support for joint excess estimate to ExcessMapEstimator?
        datasets = Datasets(dataset)

        if self.e_edges is None:
            energy_axis = dataset.counts.geom.axes["energy"]
            e_edges = u.Quantity([energy_axis.edges[0], energy_axis.edges[-1]])
        else:
            e_edges = self.e_edges

        results = []

        for e_min, e_max in zip(e_edges[:-1], e_edges[1:]):
            sliced_dataset = datasets.slice_energy(e_min, e_max)[0]

            result = self.estimate_excess_map(sliced_dataset)
            results.append(result)

        results_all = {}

        for name in results[0].keys():
            map_all = Map.from_images(images=[_[name] for _ in results])
            results_all[name] = map_all

        return results_all
예제 #29
0
    def run(self, datasets):
        """Run light curve extraction.

        Normalize integral and energy flux between emin and emax.

        Parameters
        ----------
        datasets : list of `~gammapy.spectrum.SpectrumDataset` or `~gammapy.cube.MapDataset`
            Spectrum or Map datasets.

        Returns
        -------
        lightcurve : `~gammapy.time.LightCurve`
            the Light Curve object
        """
        datasets = Datasets(datasets)

        if self.time_intervals is None:
            gti = datasets.gti
        else:
            gti = GTI.from_time_intervals(self.time_intervals)

        gti = gti.union(overlap_ok=False, merge_equal=False)

        rows = []

        for t_min, t_max in gti.time_intervals:
            datasets_to_fit = datasets.select_time(t_min=t_min,
                                                   t_max=t_max,
                                                   atol=self.atol)

            if len(datasets_to_fit) == 0:
                log.debug(
                    f"No Dataset for the time interval {t_min} to {t_max}")
                continue

            row = {"time_min": t_min.mjd, "time_max": t_max.mjd}

            data = self.estimate_time_bin_flux(datasets_to_fit)
            row.update(data)
            row.update(self.estimate_counts(datasets_to_fit))
            rows.append(row)

        if len(rows) == 0:
            raise ValueError(
                "LightCurveEstimator: No datasets in time intervals")

        table = table_from_row_data(rows=rows, meta={"SED_TYPE": "likelihood"})
        table = FluxPoints(table).to_sed_type("flux").table
        return LightCurve(table)
예제 #30
0
def test_group_datasets_in_time_interval():
    # Doing a LC on one hour bin
    datasets = Datasets(get_spectrum_datasets())
    time_intervals = [
        Time(["2010-01-01T00:00:00", "2010-01-01T01:00:00"]),
        Time(["2010-01-01T01:00:00", "2010-01-01T02:00:00"]),
    ]

    group_table = datasets.gti.group_table(time_intervals)

    assert len(group_table) == 2
    assert_allclose(group_table["time_min"], [55197.0, 55197.04166666667])
    assert_allclose(group_table["time_max"], [55197.04166666667, 55197.083333333336])
    assert_allclose(group_table["group_idx"], [0, 1])