コード例 #1
0
    def _map_making(self):
        """Make maps and datasets for 3d analysis."""
        log.info("Creating geometry.")
        geom = self._create_geometry(self.settings["datasets"]["geom"])

        if "geom-irf" in self.settings["datasets"]:
            geom_irf = self._create_geometry(self.settings["datasets"]["geom-irf"])
        else:
            geom_irf = geom.to_binsz(binsz=BINSZ_IRF)

        offset_max = Angle(self.settings["datasets"]["offset-max"])
        stack_datasets = self.settings["datasets"]["stack-datasets"]
        log.info("Creating datasets.")

        maker = MapDatasetMaker(
            geom=geom,
            geom_true=geom_irf,
            offset_max=offset_max,
        )
        if stack_datasets:
            stacked = MapDataset.create(geom=geom, geom_irf=geom_irf, name="stacked")
            for obs in self.observations:
                dataset = maker.run(obs)
                stacked.stack(dataset)
            self._extract_irf_kernels(stacked)
            datasets = [stacked]
        else:
            datasets = []
            for obs in self.observations:
                dataset = maker.run(obs)
                self._extract_irf_kernels(dataset)
                datasets.append(dataset)

        self.datasets = Datasets(datasets)
コード例 #2
0
    def _map_making(self):
        """Make maps and datasets for 3d analysis."""
        log.info("Creating geometry.")

        geom = self._create_geometry(self.settings["datasets"]["geom"])

        geom_irf = dict(energy_axis_true=None, binsz_irf=None, margin_irf=None)
        if "energy-axis-true" in self.settings["datasets"]:
            axis_params = self.settings["datasets"]["energy-axis-true"]
            geom_irf["energy_axis_true"] = MapAxis.from_bounds(**axis_params)
        geom_irf["binsz_irf"] = self.settings["datasets"].get("binsz", None)
        geom_irf["margin_irf"] = self.settings["datasets"].get("margin", None)

        offset_max = Angle(self.settings["datasets"]["offset-max"])
        log.info("Creating datasets.")

        maker = MapDatasetMaker(geom=geom, offset_max=offset_max, **geom_irf)
        if self.settings["datasets"]["stack-datasets"]:
            stacked = MapDataset.create(geom=geom, name="stacked", **geom_irf)
            for obs in self.observations:
                dataset = maker.run(obs)
                stacked.stack(dataset)
            self._extract_irf_kernels(stacked)
            datasets = [stacked]
        else:
            datasets = []
            for obs in self.observations:
                dataset = maker.run(obs)
                self._extract_irf_kernels(dataset)
                datasets.append(dataset)

        self.datasets = Datasets(datasets)
コード例 #3
0
ファイル: core.py プロジェクト: gfiusa/gammapy
    def _spectrum_extraction(self):
        """Run all steps for the spectrum extraction."""
        log.info("Reducing spectrum datasets.")
        datasets_settings = self.config.datasets
        on_lon = datasets_settings.on_region.lon
        on_lat = datasets_settings.on_region.lat
        on_center = SkyCoord(on_lon,
                             on_lat,
                             frame=datasets_settings.on_region.frame)
        on_region = CircleSkyRegion(on_center,
                                    datasets_settings.on_region.radius)

        maker_config = {}
        if datasets_settings.containment_correction:
            maker_config[
                "containment_correction"] = datasets_settings.containment_correction
        e_reco = self._make_energy_axis(
            datasets_settings.geom.axes.energy).edges

        maker_config["selection"] = ["counts", "aeff", "edisp"]
        dataset_maker = SpectrumDatasetMaker(**maker_config)
        bkg_maker_config = {}
        if datasets_settings.background.exclusion:
            exclusion_region = Map.read(datasets_settings.background.exclusion)
            bkg_maker_config["exclusion_mask"] = exclusion_region
        bkg_maker = ReflectedRegionsBackgroundMaker(**bkg_maker_config)

        safe_mask_selection = self.config.datasets.safe_mask.methods
        safe_mask_settings = self.config.datasets.safe_mask.settings
        safe_mask_maker = SafeMaskMaker(methods=safe_mask_selection,
                                        **safe_mask_settings)

        e_true = self._make_energy_axis(
            datasets_settings.geom.axes.energy_true).edges

        reference = SpectrumDataset.create(e_reco=e_reco,
                                           e_true=e_true,
                                           region=on_region)

        datasets = []
        for obs in self.observations:
            log.info(f"Processing observation {obs.obs_id}")
            dataset = dataset_maker.run(reference.copy(), obs)
            dataset = bkg_maker.run(dataset, obs)
            if dataset.counts_off is None:
                log.info(
                    f"No OFF region found for observation {obs.obs_id}. Discarding."
                )
                continue
            dataset = safe_mask_maker.run(dataset, obs)
            log.debug(dataset)
            datasets.append(dataset)

        self.datasets = Datasets(datasets)

        if self.config.datasets.stack:
            stacked = self.datasets.stack_reduce(name="stacked")
            self.datasets = Datasets([stacked])
コード例 #4
0
    def _spectrum_extraction(self):
        """Run all steps for the spectrum extraction."""
        region = self.settings["datasets"]["geom"]["region"]
        log.info("Reducing spectrum datasets.")
        on_lon = Angle(region["center"][0])
        on_lat = Angle(region["center"][1])
        on_center = SkyCoord(on_lon, on_lat, frame=region["frame"])
        on_region = CircleSkyRegion(on_center, Angle(region["radius"]))

        maker_config = {}
        if "containment_correction" in self.settings["datasets"]:
            maker_config["containment_correction"] = self.settings["datasets"][
                "containment_correction"
            ]
        params = self.settings["datasets"]["geom"]["axes"][0]
        e_reco = MapAxis.from_bounds(**params).edges
        maker_config["e_reco"] = e_reco

        # TODO: remove hard-coded e_true and make it configurable
        maker_config["e_true"] = np.logspace(-2, 2.5, 109) * u.TeV
        maker_config["region"] = on_region

        dataset_maker = SpectrumDatasetMaker(**maker_config)
        bkg_maker_config = {}
        background = self.settings["datasets"]["background"]

        if "exclusion_mask" in background:
            map_hdu = {}
            filename = background["exclusion_mask"]["filename"]
            if "hdu" in background["exclusion_mask"]:
                map_hdu = {"hdu": background["exclusion_mask"]["hdu"]}
            exclusion_region = Map.read(filename, **map_hdu)
            bkg_maker_config["exclusion_mask"] = exclusion_region
        if background["background_estimator"] == "reflected":
            reflected_bkg_maker = ReflectedRegionsBackgroundMaker(**bkg_maker_config)
        else:
            # TODO: raise error?
            log.info("Background estimation only for reflected regions method.")

        safe_mask_maker = SafeMaskMaker(methods=["aeff-default", "aeff-max"])

        datasets = []
        for obs in self.observations:
            log.info(f"Processing observation {obs.obs_id}")
            selection = ["counts", "aeff", "edisp"]
            dataset = dataset_maker.run(obs, selection=selection)
            dataset = reflected_bkg_maker.run(dataset, obs)
            dataset = safe_mask_maker.run(dataset, obs)
            log.debug(dataset)
            datasets.append(dataset)

        self.datasets = Datasets(datasets)

        if self.settings["datasets"]["stack-datasets"]:
            stacked = self.datasets.stack_reduce()
            stacked.name = "stacked"
            self.datasets = Datasets([stacked])
コード例 #5
0
def test_spectrum_dataset_on_off_to_yaml(tmpdir):
    spectrum_datasets_on_off = make_observation_list()
    datasets = Datasets(spectrum_datasets_on_off)
    datasets.write(path=tmpdir)
    datasets_read = Datasets.read(tmpdir / "_datasets.yaml",
                                  tmpdir / "_models.yaml")
    assert len(datasets_read) == len(datasets)
    assert datasets_read[0].name == datasets[0].name
    assert datasets_read[1].name == datasets[1].name
    assert datasets_read[1].counts.data.sum() == datasets[1].counts.data.sum()
コード例 #6
0
def test_datasets_stack_reduce():
    obs_ids = [23523, 23526, 23559, 23592]
    dataset_list = []
    for obs in obs_ids:
        filename = "$GAMMAPY_DATA/joint-crab/spectra/hess/pha_obs{}.fits"
        ds = SpectrumDatasetOnOff.from_ogip_files(filename.format(obs))
        dataset_list.append(ds)
    datasets = Datasets(dataset_list)
    stacked = datasets.stack_reduce()
    assert_allclose(stacked.livetime.to_value("s"), 6313.8116406202325)
コード例 #7
0
ファイル: test_serialize_yaml.py プロジェクト: fjhzwl/gammapy
def test_datasets_to_io(tmpdir):
    filedata = "$GAMMAPY_DATA/tests/models/gc_example_datasets.yaml"
    filemodel = "$GAMMAPY_DATA/tests/models/gc_example_models.yaml"

    datasets = Datasets.from_yaml(filedata, filemodel)

    assert len(datasets.datasets) == 2
    assert len(datasets.parameters.parameters) == 20

    dataset0 = datasets.datasets[0]
    assert dataset0.counts.data.sum() == 6824
    assert_allclose(dataset0.exposure.data.sum(), 2072125400000.0, atol=0.1)
    assert dataset0.psf is not None
    assert dataset0.edisp is not None

    assert_allclose(dataset0.background_model.evaluate().data.sum(),
                    4094.2,
                    atol=0.1)

    assert dataset0.background_model.name == "background_irf_gc"

    dataset1 = datasets.datasets[1]
    assert dataset1.background_model.name == "background_irf_g09"

    assert dataset0.model["gll_iem_v06_cutout"] == dataset1.model[
        "gll_iem_v06_cutout"]

    assert isinstance(dataset0.model, SkyModels)
    assert len(dataset0.model.skymodels) == 2
    assert dataset0.model.skymodels[0].name == "gc"
    assert dataset0.model.skymodels[1].name == "gll_iem_v06_cutout"

    assert (dataset0.model.skymodels[0].parameters["reference"] is
            dataset1.model.skymodels[1].parameters["reference"])

    assert_allclose(dataset1.model.skymodels[1].parameters["lon_0"].value,
                    0.9,
                    atol=0.1)

    path = str(tmpdir / "/written_")
    datasets.to_yaml(path, overwrite=True)
    datasets_read = Datasets.from_yaml(path + "datasets.yaml",
                                       path + "models.yaml")
    assert len(datasets_read.datasets) == 2
    dataset0 = datasets_read.datasets[0]
    assert dataset0.counts.data.sum() == 6824
    assert_allclose(dataset0.exposure.data.sum(), 2072125400000.0, atol=0.1)
    assert dataset0.psf is not None
    assert dataset0.edisp is not None
    assert_allclose(dataset0.background_model.evaluate().data.sum(),
                    4094.2,
                    atol=0.1)
コード例 #8
0
ファイル: test_serialize_yaml.py プロジェクト: gfiusa/gammapy
def test_datasets_to_io(tmp_path):
    filedata = "$GAMMAPY_DATA/tests/models/gc_example_datasets.yaml"
    filemodel = "$GAMMAPY_DATA/tests/models/gc_example_models.yaml"

    datasets = Datasets.read(filedata, filemodel)

    assert len(datasets) == 2
    assert len(datasets.parameters) == 22

    dataset0 = datasets[0]
    assert dataset0.counts.data.sum() == 6824
    assert_allclose(dataset0.exposure.data.sum(), 2072125400000.0, atol=0.1)
    assert dataset0.psf is not None
    assert dataset0.edisp is not None

    assert_allclose(dataset0.background_model.evaluate().data.sum(),
                    4094.2,
                    atol=0.1)

    assert dataset0.background_model.name == "background_irf_gc"

    dataset1 = datasets[1]
    assert dataset1.background_model.name == "background_irf_g09"

    assert (dataset0.models["gll_iem_v06_cutout"] ==
            dataset1.models["gll_iem_v06_cutout"])

    assert isinstance(dataset0.models, Models)
    assert len(dataset0.models) == 2
    assert dataset0.models[0].name == "gc"
    assert dataset0.models[1].name == "gll_iem_v06_cutout"

    assert (dataset0.models[0].parameters["reference"] is
            dataset1.models[1].parameters["reference"])

    assert_allclose(dataset1.models[1].parameters["lon_0"].value,
                    0.9,
                    atol=0.1)

    datasets.write(tmp_path, prefix="written")
    datasets_read = Datasets.read(tmp_path / "written_datasets.yaml",
                                  tmp_path / "written_models.yaml")
    assert len(datasets_read) == 2
    dataset0 = datasets_read[0]
    assert dataset0.counts.data.sum() == 6824
    assert_allclose(dataset0.exposure.data.sum(), 2072125400000.0, atol=0.1)
    assert dataset0.psf is not None
    assert dataset0.edisp is not None
    assert_allclose(dataset0.background_model.evaluate().data.sum(),
                    4094.2,
                    atol=0.1)
コード例 #9
0
def data_prep():
    data_store = DataStore.from_dir("$GAMMAPY_DATA/cta-1dc/index/gps/")
    OBS_ID = 110380
    obs_ids = OBS_ID * np.ones(N_OBS)
    observations = data_store.get_observations(obs_ids)

    energy_axis = MapAxis.from_bounds(0.1,
                                      10,
                                      nbin=10,
                                      unit="TeV",
                                      name="energy",
                                      interp="log")
    geom = WcsGeom.create(
        skydir=(0, 0),
        binsz=0.02,
        width=(10, 8),
        frame="galactic",
        proj="CAR",
        axes=[energy_axis],
    )

    offset_max = 4 * u.deg
    maker = MapDatasetMaker()
    safe_mask_maker = SafeMaskMaker(methods=["offset-max"],
                                    offset_max=offset_max)
    stacked = MapDataset.create(geom=geom)

    spatial_model = PointSpatialModel(lon_0="-0.05 deg",
                                      lat_0="-0.05 deg",
                                      frame="galactic")
    spectral_model = ExpCutoffPowerLawSpectralModel(
        index=2,
        amplitude=3e-12 * u.Unit("cm-2 s-1 TeV-1"),
        reference=1.0 * u.TeV,
        lambda_=0.1 / u.TeV,
    )
    model = SkyModel(spatial_model=spatial_model,
                     spectral_model=spectral_model,
                     name="gc-source")

    datasets = Datasets([])
    for idx, obs in enumerate(observations):
        cutout = stacked.cutout(obs.pointing_radec,
                                width=2 * offset_max,
                                name=f"dataset{idx}")
        dataset = maker.run(cutout, obs)
        dataset = safe_mask_maker.run(dataset, obs)
        dataset.models = model
        datasets.append(dataset)
    return datasets
コード例 #10
0
ファイル: test_fit.py プロジェクト: gfiusa/gammapy
def test_datasets_io_no_model(tmpdir):
    axis = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=2)
    geom = WcsGeom.create(npix=(5, 5), axes=[axis])
    dataset_1 = MapDataset.create(geom, name="1")
    dataset_2 = MapDataset.create(geom, name="2")

    datasets = Datasets([dataset_1, dataset_2])

    datasets.write(path=tmpdir, prefix="test")

    filename_1 = tmpdir / "test_data_1.fits"
    assert filename_1.exists()

    filename_2 = tmpdir / "test_data_2.fits"
    assert filename_2.exists()
コード例 #11
0
    def __init__(
        self,
        datasets,
        e_edges,
        source="",
        norm_min=0.2,
        norm_max=5,
        norm_n_values=11,
        norm_values=None,
        sigma=1,
        sigma_ul=2,
        reoptimize=False,
    ):
        # make a copy to not modify the input datasets
        if not isinstance(datasets, Datasets):
            datasets = Datasets(datasets)

        if not datasets.is_all_same_type and datasets.is_all_same_shape:
            raise ValueError(
                "Flux point estimation requires a list of datasets"
                " of the same type and data shape.")

        self.datasets = datasets.copy()
        self.e_edges = e_edges

        dataset = self.datasets.datasets[0]

        if isinstance(dataset, SpectrumDatasetOnOff):
            model = dataset.model
        else:
            model = dataset.model[source].spectral_model

        self.model = ScaleSpectralModel(model)
        self.model.norm.min = 0
        self.model.norm.max = 1e3

        if norm_values is None:
            norm_values = np.logspace(np.log10(norm_min), np.log10(norm_max),
                                      norm_n_values)

        self.norm_values = norm_values
        self.sigma = sigma
        self.sigma_ul = sigma_ul
        self.reoptimize = reoptimize
        self.source = source
        self.fit = Fit(self.datasets)

        self._set_scale_model()
コード例 #12
0
def test_datasets_stack_reduce():
    obs_ids = [23523, 23526, 23559, 23592]
    dataset_list = []
    for obs in obs_ids:
        filename = "$GAMMAPY_DATA/joint-crab/spectra/hess/pha_obs{}.fits"
        ds = SpectrumDatasetOnOff.from_ogip_files(filename.format(obs))
        dataset_list.append(ds)
    datasets = Datasets(dataset_list)
    stacked = datasets.stack_reduce()
    assert_allclose(stacked.livetime.to_value("s"), 6313.8116406202325)

    info_table = datasets.info_table()
    assert_allclose(info_table["n_on"], [124, 126, 119, 90])

    info_table_cum = datasets.info_table(cumulative=True)
    assert_allclose(info_table_cum["n_on"], [124, 250, 369, 459])
コード例 #13
0
    def _spectrum_extraction(self):
        """Run all steps for the spectrum extraction."""
        region = self.settings["datasets"]["geom"]["region"]
        log.info("Reducing spectrum datasets.")
        on_lon = Angle(region["center"][0])
        on_lat = Angle(region["center"][1])
        on_center = SkyCoord(on_lon, on_lat, frame=region["frame"])
        on_region = CircleSkyRegion(on_center, Angle(region["radius"]))
        background_params = {"on_region": on_region}
        background = self.settings["datasets"]["background"]
        if "exclusion_mask" in background:
            map_hdu = {}
            filename = background["exclusion_mask"]["filename"]
            if "hdu" in background["exclusion_mask"]:
                map_hdu = {"hdu": background["exclusion_mask"]["hdu"]}
            exclusion_region = Map.read(filename, **map_hdu)
            background_params["exclusion_mask"] = exclusion_region
        if background["background_estimator"] == "reflected":
            self.background_estimator = ReflectedRegionsBackgroundEstimator(
                observations=self.observations, **background_params
            )
            self.background_estimator.run()
        else:
            # TODO: raise error?
            log.info("Background estimation only for reflected regions method.")

        extraction_params = {}
        if "containment_correction" in self.settings["datasets"]:
            extraction_params["containment_correction"] = self.settings["datasets"][
                "containment_correction"
            ]
        params = self.settings["datasets"]["geom"]["axes"][0]
        e_reco = MapAxis.from_bounds(**params).edges
        extraction_params["e_reco"] = e_reco
        extraction_params["e_true"] = None
        self.extraction = SpectrumExtraction(
            observations=self.observations,
            bkg_estimate=self.background_estimator.result,
            **extraction_params,
        )
        self.extraction.run()
        self.datasets = Datasets(self.extraction.spectrum_observations)
        if self.settings["datasets"]["stack-datasets"]:
            stacked = self.datasets.stack_reduce()
            stacked.name = "stacked"
            self.datasets = Datasets([stacked])
コード例 #14
0
ファイル: test_asmooth.py プロジェクト: gfiusa/gammapy
def input_dataset():
    datasets = Datasets.read(
        filedata="$GAMMAPY_DATA/fermi-3fhl-crab/Fermi-LAT-3FHL_datasets.yaml",
        filemodel="$GAMMAPY_DATA/fermi-3fhl-crab/Fermi-LAT-3FHL_models.yaml",
    )
    dataset = datasets[0]
    dataset.psf = None
    return dataset
コード例 #15
0
    def _map_making(self):
        """Make maps and datasets for 3d analysis."""
        log.info("Creating geometry.")

        geom = self._create_geometry(self.settings["datasets"]["geom"])

        geom_irf = dict(energy_axis_true=None, binsz_irf=None, margin_irf=None)
        if "energy-axis-true" in self.settings["datasets"]:
            axis_params = self.settings["datasets"]["energy-axis-true"]
            geom_irf["energy_axis_true"] = MapAxis.from_bounds(**axis_params)
        geom_irf["binsz_irf"] = self.settings["datasets"].get("binsz", None)
        geom_irf["margin_irf"] = self.settings["datasets"].get("margin", None)

        offset_max = Angle(self.settings["datasets"]["offset-max"])
        log.info("Creating datasets.")

        maker = MapDatasetMaker(offset_max=offset_max)
        maker_safe_mask = SafeMaskMaker(methods=["offset-max"], offset_max=offset_max)

        stacked = MapDataset.create(geom=geom, name="stacked", **geom_irf)

        if self.settings["datasets"]["stack-datasets"]:
            for obs in self.observations:
                log.info(f"Processing observation {obs.obs_id}")
                dataset = maker.run(stacked, obs)
                dataset = maker_safe_mask.run(dataset, obs)
                dataset.background_model.name =  f"bkg_{dataset.name}"
                # TODO remove this once dataset and model have unique identifiers
                log.debug(dataset)
                stacked.stack(dataset)
            self._extract_irf_kernels(stacked)
            datasets = [stacked]
        else:
            datasets = []
            for obs in self.observations:
                log.info(f"Processing observation {obs.obs_id}")
                dataset = maker.run(stacked, obs)
                dataset = maker_safe_mask.run(dataset, obs)
                dataset.background_model.name = f"bkg_{dataset.name}"
                # TODO remove this once dataset and model have unique identifiers
                self._extract_irf_kernels(dataset)
                log.debug(dataset)
                datasets.append(dataset)

        self.datasets = Datasets(datasets)
コード例 #16
0
def data_prep():
    data_store = DataStore.from_dir("$GAMMAPY_DATA/hess-dl3-dr1/")
    OBS_ID = 23523
    obs_ids = OBS_ID * np.ones(N_OBS)
    observations = data_store.get_observations(obs_ids)
    target_position = SkyCoord(ra=83.63, dec=22.01, unit="deg", frame="icrs")
    on_region_radius = Angle("0.11 deg")
    on_region = CircleSkyRegion(center=target_position, radius=on_region_radius)

    exclusion_region = CircleSkyRegion(
        center=SkyCoord(183.604, -8.708, unit="deg", frame="galactic"),
        radius=0.5 * u.deg,
    )

    skydir = target_position.galactic
    exclusion_mask = Map.create(
        npix=(150, 150), binsz=0.05, skydir=skydir, proj="TAN", coordsys="GAL"
    )

    mask = exclusion_mask.geom.region_mask([exclusion_region], inside=False)
    exclusion_mask.data = mask

    e_reco = MapAxis.from_bounds(0.1, 40, nbin=40, interp="log", unit="TeV").edges
    e_true = MapAxis.from_bounds(0.05, 100, nbin=200, interp="log", unit="TeV").edges

    dataset_maker = SpectrumDatasetMaker(
        region=on_region, e_reco=e_reco, e_true=e_true, containment_correction=True
    )
    bkg_maker = ReflectedRegionsBackgroundMaker(exclusion_mask=exclusion_mask)
    safe_mask_masker = SafeMaskMaker(methods=["aeff-max"], aeff_percent=10)

    spectral_model = PowerLawSpectralModel(
        index=2, amplitude=2e-11 * u.Unit("cm-2 s-1 TeV-1"), reference=1 * u.TeV
    )
    spatial_model = PointSpatialModel(
        lon_0=target_position.ra, lat_0=target_position.dec, frame="icrs"
    )
    spatial_model.lon_0.frozen = True
    spatial_model.lat_0.frozen = True

    sky_model = SkyModel(
        spatial_model=spatial_model, spectral_model=spectral_model, name=""
    )

    # Data preparation
    datasets = []

    for ind, observation in enumerate(observations):
        dataset = dataset_maker.run(observation, selection=["counts", "aeff", "edisp"])
        dataset_on_off = bkg_maker.run(dataset, observation)
        dataset_on_off = safe_mask_masker.run(dataset_on_off, observation)
        dataset_on_off.name = f"dataset{ind}"
        dataset_on_off.models = sky_model
        datasets.append(dataset_on_off)

    return Datasets(datasets)
コード例 #17
0
ファイル: core.py プロジェクト: gfiusa/gammapy
    def _map_making(self):
        """Make maps and datasets for 3d analysis."""
        log.info("Creating geometry.")

        geom = self._create_geometry()
        geom_settings = self.config.datasets.geom
        geom_irf = dict(energy_axis_true=None, binsz_irf=None)
        if geom_settings.axes.energy_true.min is not None:
            geom_irf["energy_axis_true"] = self._make_energy_axis(
                geom_settings.axes.energy_true)
        geom_irf["binsz_irf"] = geom_settings.wcs.binsize_irf.to("deg").value
        offset_max = geom_settings.selection.offset_max
        log.info("Creating datasets.")

        maker = MapDatasetMaker(selection=self.config.datasets.map_selection)

        safe_mask_selection = self.config.datasets.safe_mask.methods
        safe_mask_settings = self.config.datasets.safe_mask.settings
        maker_safe_mask = SafeMaskMaker(methods=safe_mask_selection,
                                        **safe_mask_settings)
        stacked = MapDataset.create(geom=geom, name="stacked", **geom_irf)

        if self.config.datasets.stack:
            for obs in self.observations:
                log.info(f"Processing observation {obs.obs_id}")
                cutout = stacked.cutout(obs.pointing_radec,
                                        width=2 * offset_max)
                dataset = maker.run(cutout, obs)
                dataset = maker_safe_mask.run(dataset, obs)
                log.debug(dataset)
                stacked.stack(dataset)
            datasets = [stacked]
        else:
            datasets = []
            for obs in self.observations:
                log.info(f"Processing observation {obs.obs_id}")
                cutout = stacked.cutout(obs.pointing_radec,
                                        width=2 * offset_max)
                dataset = maker.run(cutout, obs)
                dataset = maker_safe_mask.run(dataset, obs)
                log.debug(dataset)
                datasets.append(dataset)
        self.datasets = Datasets(datasets)
コード例 #18
0
    def __init__(
        self,
        datasets,
        source="",
        norm_min=0.2,
        norm_max=5,
        norm_n_values=11,
        norm_values=None,
        sigma=1,
        sigma_ul=2,
        reoptimize=False,
    ):
        self.datasets = Datasets(datasets)

        if not self.datasets.is_all_same_type and self.datasets.is_all_same_shape:
            raise ValueError(
                "Light Curve estimation requires a list of datasets"
                " of the same type and data shape.")

        dataset = self.datasets[0]

        if isinstance(dataset, SpectrumDatasetOnOff):
            model = dataset.model
        else:
            model = dataset.model[source].spectral_model

        self.model = ScaleSpectralModel(model)
        self.model.norm.min = 0
        self.model.norm.max = 1e5

        if norm_values is None:
            norm_values = np.logspace(np.log10(norm_min), np.log10(norm_max),
                                      norm_n_values)

        self.norm_values = norm_values

        self.sigma = sigma
        self.sigma_ul = sigma_ul
        self.reoptimize = reoptimize
        self.source = source

        self._set_scale_model()
コード例 #19
0
def test_flux_point_dataset_serialization(tmp_path):
    path = "$GAMMAPY_DATA/tests/spectrum/flux_points/diff_flux_points.fits"
    data = FluxPoints.read(path)
    data.table["e_ref"] = data.e_ref.to("TeV")
    spectral_model = PowerLawSpectralModel(index=2.3,
                                           amplitude="2e-13 cm-2 s-1 TeV-1",
                                           reference="1 TeV")
    model = SkyModel(spectral_model=spectral_model, name="test_model")
    dataset = FluxPointsDataset(model, data, name="test_dataset")

    Datasets([dataset]).write(tmp_path, prefix="tmp")
    datasets = Datasets.read(tmp_path / "tmp_datasets.yaml",
                             tmp_path / "tmp_models.yaml")
    new_dataset = datasets[0]
    assert_allclose(new_dataset.data.table["dnde"], dataset.data.table["dnde"],
                    1e-4)
    if dataset.mask_fit is None:
        assert np.all(new_dataset.mask_fit == dataset.mask_safe)
    assert np.all(new_dataset.mask_safe == dataset.mask_safe)
    assert new_dataset.name == "test_dataset"
コード例 #20
0
def test_flux_point_dataset_serialization(tmp_path):
    path = "$GAMMAPY_DATA/tests/spectrum/flux_points/diff_flux_points.fits"
    data = FluxPoints.read(path)
    data.table["e_ref"] = data.e_ref.to("TeV")
    # TODO: remove duplicate definition this once model is redefine as skymodel
    spatial_model = ConstantSpatialModel()
    spectral_model = PowerLawSpectralModel(index=2.3,
                                           amplitude="2e-13 cm-2 s-1 TeV-1",
                                           reference="1 TeV")
    model = SkyModel(spatial_model, spectral_model, name="test_model")
    dataset = FluxPointsDataset(SkyModels([model]), data, name="test_dataset")

    Datasets([dataset]).to_yaml(tmp_path, prefix="tmp")
    datasets = Datasets.from_yaml(tmp_path / "tmp_datasets.yaml",
                                  tmp_path / "tmp_models.yaml")
    new_dataset = datasets[0]
    assert_allclose(new_dataset.data.table["dnde"], dataset.data.table["dnde"],
                    1e-4)
    if dataset.mask_fit is None:
        assert np.all(new_dataset.mask_fit == dataset.mask_safe)
    assert np.all(new_dataset.mask_safe == dataset.mask_safe)
    assert new_dataset.name == "test_dataset"
コード例 #21
0
    def __init__(
        self,
        datasets,
        time_intervals=None,
        source="",
        norm_min=0.2,
        norm_max=5,
        norm_n_values=11,
        norm_values=None,
        sigma=1,
        sigma_ul=2,
        reoptimize=False,
    ):
        self.datasets = Datasets(datasets)

        if not self.datasets.is_all_same_type and self.datasets.is_all_same_shape:
            raise ValueError(
                "Light Curve estimation requires a list of datasets"
                " of the same type and data shape.")

        if time_intervals is None:
            time_intervals = [
                Time([d.gti.time_start[0], d.gti.time_stop[-1]])
                for d in self.datasets
            ]

        self._check_and_sort_time_intervals(time_intervals)

        dataset = self.datasets[0]

        if isinstance(dataset, SpectrumDatasetOnOff):
            model = dataset.model
        else:
            model = dataset.model[source].spectral_model

        self.model = ScaleSpectralModel(model)
        self.model.norm.min = 0
        self.model.norm.max = 1e5

        if norm_values is None:
            norm_values = np.logspace(np.log10(norm_min), np.log10(norm_max),
                                      norm_n_values)

        self.norm_values = norm_values

        self.sigma = sigma
        self.sigma_ul = sigma_ul
        self.reoptimize = reoptimize
        self.source = source

        self.group_table_info = None
コード例 #22
0
    def read_regions(self):
        for kr in self.ROIs_sel:
            filedata = Path(self.resdir + "/3FHL_ROI_num" + str(kr) +
                            "_datasets.yaml")
            filemodel = Path(self.resdir + "/3FHL_ROI_num" + str(kr) +
                             "_models.yaml")
            try:
                dataset = list(Datasets.from_yaml(filedata, filemodel))[0]
            except (FileNotFoundError, IOError):
                continue

            pars = dataset.parameters
            pars.covariance = np.load(self.resdir + "/" + dataset.name +
                                      "_covariance.npy")

            infos = np.load(self.resdir + "/3FHL_ROI_num" + str(kr) +
                            "_fit_infos.npz")
            self.diags["message"].append(infos["message"])
            self.diags["stat"].append(infos["stat"])

            if self.savefig:
                self.plot_maps(dataset)

            for model in list(dataset.model):
                if (self.FHL3[model.name].data["ROI_num"] == kr
                        and self.FHL3[model.name].data["Signif_Avg"] >=
                        self.sig_cut):

                    model.spatial_model.parameters.covariance = pars.get_subcovariance(
                        model.spatial_model.parameters)
                    model.spectral_model.parameters.covariance = pars.get_subcovariance(
                        model.spectral_model.parameters)
                    dataset.background_model.parameters.covariance = pars.get_subcovariance(
                        dataset.background_model.parameters)
                    res_spec = model.spectral_model
                    cat_spec = self.FHL3[model.name].spectral_model()

                    res_fp = FluxPoints.read(self.resdir + "/" + model.name +
                                             "_flux_points.fits")
                    res_fp.table["is_ul"] = res_fp.table["ts"] < 1.0
                    cat_fp = self.FHL3[model.name].flux_points.to_sed_type(
                        "dnde")

                    self.update_spec_diags(dataset, model, cat_spec, res_spec,
                                           cat_fp, res_fp)
                    if self.savefig:
                        self.plot_spec(kr, model, cat_spec, res_spec, cat_fp,
                                       res_fp)
コード例 #23
0
def data_prep():
    data_store = DataStore.from_dir("$GAMMAPY_DATA/cta-1dc/index/gps/")
    OBS_ID = 110380
    obs_ids = OBS_ID * np.ones(N_OBS)
    observations = data_store.get_observations(obs_ids)

    energy_axis = MapAxis.from_bounds(0.1,
                                      10,
                                      nbin=10,
                                      unit="TeV",
                                      name="energy",
                                      interp="log")

    geom = WcsGeom.create(
        skydir=(0, 0),
        binsz=0.05,
        width=(10, 8),
        coordsys="GAL",
        proj="CAR",
        axes=[energy_axis],
    )

    stacked = MapDataset.create(geom)
    maker = MapDatasetMaker()
    safe_mask_maker = SafeMaskMaker(methods=["offset-max"], offset_max="4 deg")
    for obs in observations:
        dataset = maker.run(stacked, obs)
        dataset = safe_mask_maker.run(dataset, obs)
        stacked.stack(dataset)

    spatial_model = PointSpatialModel(lon_0="0.01 deg",
                                      lat_0="0.01 deg",
                                      frame="galactic")
    spectral_model = ExpCutoffPowerLawSpectralModel(
        index=2,
        amplitude=3e-12 * u.Unit("cm-2 s-1 TeV-1"),
        reference=1.0 * u.TeV,
        lambda_=0.1 / u.TeV,
    )
    model = SkyModel(spatial_model=spatial_model,
                     spectral_model=spectral_model,
                     name="gc-source")

    stacked.models = model
    stacked.name = "stacked_ds"

    return Datasets([stacked])
コード例 #24
0
    def _fit_bkg(self, dataset):
        """Fit the FoV background model on the dataset counts data"""

        # freeze all model components not related to background model
        datasets = Datasets([dataset])

        parameters_frozen = []
        for par in datasets.parameters:
            parameters_frozen.append(par.frozen)
            if par not in dataset.background_model.parameters:
                par.frozen = True

        fit = Fit(datasets)
        fit_result = fit.run()
        if fit_result.success is False:
            log.info(
                f"FoVBackgroundMaker failed. No fit convergence for {dataset.name}."
            )

        # Unfreeze parameters
        for i, par in enumerate(datasets.parameters):
            par.frozen = parameters_frozen[i]
コード例 #25
0
    def setup(self):
        path = "$GAMMAPY_DATA/joint-crab/spectra/hess/"
        self.datasets = Datasets([
            SpectrumDatasetOnOff.from_ogip_files(path + "pha_obs23523.fits"),
            SpectrumDatasetOnOff.from_ogip_files(path + "pha_obs23592.fits"),
        ])

        self.pwl = PowerLawSpectralModel(index=2,
                                         amplitude=1e-12 *
                                         u.Unit("cm-2 s-1 TeV-1"),
                                         reference=1 * u.TeV)

        self.ecpl = ExpCutoffPowerLawSpectralModel(
            index=2,
            amplitude=1e-12 * u.Unit("cm-2 s-1 TeV-1"),
            reference=1 * u.TeV,
            lambda_=0.1 / u.TeV,
        )

        # Example fit for one observation
        self.datasets[0].model = self.pwl
        self.fit = Fit([self.datasets[0]])
コード例 #26
0
class Analysis:
    """Config-driven high-level analysis interface.

    It is initialized by default with a set of configuration parameters and values declared in
    an internal configuration schema YAML file, though the user can also provide configuration
    parameters passed as a nested dictionary at the moment of instantiation. In that case these
    parameters will overwrite the default values of those present in the configuration file.

    For more info see  :ref:`HLI`.

    Parameters
    ----------
    config : dict or `AnalysisConfig`
        Configuration options following `AnalysisConfig` schema
    """

    def __init__(self, config=None):
        if isinstance(config, dict):
            self._config = AnalysisConfig(config)
        elif isinstance(config, AnalysisConfig):
            self._config = config
        else:
            raise ValueError("Dict or `AnalysiConfig` object required.")

        self._set_logging()
        self.observations = None
        self.background_estimator = None
        self.datasets = None
        self.extraction = None
        self.model = None
        self.fit = None
        self.fit_result = None
        self.flux_points = None

    @property
    def config(self):
        """Analysis configuration (`AnalysisConfig`)"""
        return self._config

    @property
    def settings(self):
        """Configuration settings for the analysis session."""
        return self.config.settings

    def get_observations(self):
        """Fetch observations from the data store according to criteria defined in the configuration."""
        self.config.validate()
        log.info("Fetching observations.")
        datastore_path = make_path(self.settings["observations"]["datastore"])
        if datastore_path.is_file():
            datastore = DataStore().from_file(datastore_path)
        elif datastore_path.is_dir():
            datastore = DataStore().from_dir(datastore_path)
        else:
            raise FileNotFoundError(f"Datastore {datastore_path} not found.")
        ids = set()
        selection = dict()
        for criteria in self.settings["observations"]["filters"]:
            selected_obs = ObservationTable()

            # TODO: Reduce significantly the code.
            # This block would be handled by datastore.obs_table.select_observations
            selection["type"] = criteria["filter_type"]
            for key, val in criteria.items():
                if key in ["lon", "lat", "radius", "border"]:
                    val = Angle(val)
                selection[key] = val
            if selection["type"] == "angle_box":
                selection["type"] = "par_box"
                selection["value_range"] = Angle(criteria["value_range"])
            if selection["type"] == "sky_circle" or selection["type"].endswith("_box"):
                selected_obs = datastore.obs_table.select_observations(selection)
            if selection["type"] == "par_value":
                mask = (
                    datastore.obs_table[criteria["variable"]] == criteria["value_param"]
                )
                selected_obs = datastore.obs_table[mask]
            if selection["type"] == "ids":
                obs_list = datastore.get_observations(criteria["obs_ids"])
                selected_obs["OBS_ID"] = [obs.obs_id for obs in obs_list.list]
            if selection["type"] == "all":
                obs_list = datastore.get_observations()
                selected_obs["OBS_ID"] = [obs.obs_id for obs in obs_list.list]

            if len(selected_obs):
                if "exclude" in criteria and criteria["exclude"]:
                    ids.difference_update(selected_obs["OBS_ID"].tolist())
                else:
                    ids.update(selected_obs["OBS_ID"].tolist())
        self.observations = datastore.get_observations(ids, skip_missing=True)
        for obs in self.observations.list:
            log.info(obs)

    def get_datasets(self):
        """Produce reduced datasets."""
        if not self._validate_reduction_settings():
            return False
        if self.settings["datasets"]["dataset-type"] == "SpectrumDatasetOnOff":
            self._spectrum_extraction()
        elif self.settings["datasets"]["dataset-type"] == "MapDataset":
            self._map_making()
        else:
            # TODO raise error?
            log.info("Data reduction method not available.")
            return False

    def set_model(self, model=None, filename=""):
        """Read the model from dict or filename and attach it to datasets.

        Parameters
        ----------
        model: dict or string
            Dictionary or string in YAML format with the serialized model.
        filename : string
            Name of the model YAML file describing the model.
        """
        if not self._validate_set_model():
            return False
        log.info(f"Reading model.")
        if isinstance(model, str):
            model = yaml.safe_load(model)
        if model:
            self.model = SkyModels(dict_to_models(model))
        elif filename:
            filepath = make_path(filename)
            self.model = SkyModels.from_yaml(filepath)
        else:
            return False
        # TODO: Deal with multiple components
        for dataset in self.datasets.datasets:
            if isinstance(dataset, MapDataset):
                dataset.model = self.model
            else:
                if len(self.model.skymodels) > 1:
                    raise ValueError(
                        "Can only fit a single spectral model at one time."
                    )
                dataset.model = self.model.skymodels[0].spectral_model
        log.info(self.model)

    def run_fit(self, optimize_opts=None):
        """Fitting reduced datasets to model."""
        if not self._validate_fitting_settings():
            return False

        for ds in self.datasets.datasets:
            # TODO: fit_range handled in jsonschema validation class
            if "fit_range" in self.settings["fit"]:
                e_min = u.Quantity(self.settings["fit"]["fit_range"]["min"])
                e_max = u.Quantity(self.settings["fit"]["fit_range"]["max"])
                if isinstance(ds, MapDataset):
                    ds.mask_fit = ds.counts.geom.energy_mask(e_min, e_max)
                else:
                    ds.mask_fit = ds.counts.energy_mask(e_min, e_max)
        log.info("Fitting reduced datasets.")
        self.fit = Fit(self.datasets)
        self.fit_result = self.fit.run(optimize_opts=optimize_opts)
        log.info(self.fit_result)

    def get_flux_points(self, source="source"):
        """Calculate flux points for a specific model component.

        Parameters
        ----------
        source : string
            Name of the model component where to calculate the flux points.
        """
        if not self._validate_fp_settings():
            return False

        # TODO: add "source" to config
        log.info("Calculating flux points.")
        axis_params = self.settings["flux-points"]["fp_binning"]
        e_edges = MapAxis.from_bounds(**axis_params).edges
        flux_point_estimator = FluxPointsEstimator(
            e_edges=e_edges, datasets=self.datasets, source=source
        )
        fp = flux_point_estimator.run()
        fp.table["is_ul"] = fp.table["ts"] < 4
        model = self.model[source].spectral_model.copy()
        self.flux_points = FluxPointsDataset(data=fp, model=model)
        cols = ["e_ref", "ref_flux", "dnde", "dnde_ul", "dnde_err", "is_ul"]
        log.info("\n{}".format(self.flux_points.data.table[cols]))

    @staticmethod
    def _create_geometry(params):
        """Create the geometry."""
        # TODO: handled in jsonschema validation class
        geom_params = copy.deepcopy(params)

        axes = []
        for axis_params in geom_params.get("axes", []):
            ax = MapAxis.from_bounds(**axis_params)
            axes.append(ax)

        geom_params["axes"] = axes
        geom_params["skydir"] = tuple(geom_params["skydir"])
        return WcsGeom.create(**geom_params)

    def _map_making(self):
        """Make maps and datasets for 3d analysis."""
        log.info("Creating geometry.")
        geom = self._create_geometry(self.settings["datasets"]["geom"])

        if "geom-irf" in self.settings["datasets"]:
            geom_irf = self._create_geometry(self.settings["datasets"]["geom-irf"])
        else:
            geom_irf = geom.to_binsz(binsz=BINSZ_IRF)

        offset_max = Angle(self.settings["datasets"]["offset-max"])
        stack_datasets = self.settings["datasets"]["stack-datasets"]
        log.info("Creating datasets.")

        maker = MapDatasetMaker(
            geom=geom,
            geom_true=geom_irf,
            offset_max=offset_max,
        )
        if stack_datasets:
            stacked = MapDataset.create(geom=geom, geom_irf=geom_irf, name="stacked")
            for obs in self.observations:
                dataset = maker.run(obs)
                stacked.stack(dataset)
            self._extract_irf_kernels(stacked)
            datasets = [stacked]
        else:
            datasets = []
            for obs in self.observations:
                dataset = maker.run(obs)
                self._extract_irf_kernels(dataset)
                datasets.append(dataset)

        self.datasets = Datasets(datasets)

    def _extract_irf_kernels(self, dataset):
        # TODO: remove hard-coded default value
        max_radius = self.settings["datasets"].get("psf-kernel-radius", "0.6 deg")
        # TODO: handle IRF maps in fit
        geom = dataset.counts.geom
        geom_irf = dataset.exposure.geom
        position = geom.center_skydir
        geom_psf = geom.to_image().to_cube(geom_irf.axes)
        dataset.psf = dataset.psf.get_psf_kernel(
            position=position, geom=geom_psf, max_radius=max_radius
        )
        e_reco = geom.get_axis_by_name("energy").edges
        dataset.edisp = dataset.edisp.get_energy_dispersion(
            position=position, e_reco=e_reco
        )

    def _set_logging(self):
        """Set logging parameters for API."""
        logging.basicConfig(**self.settings["general"]["logging"])
        log.info(
            "Setting logging config: {!r}".format(self.settings["general"]["logging"])
        )

    def _spectrum_extraction(self):
        """Run all steps for the spectrum extraction."""
        region = self.settings["datasets"]["geom"]["region"]
        log.info("Reducing spectrum datasets.")
        on_lon = Angle(region["center"][0])
        on_lat = Angle(region["center"][1])
        on_center = SkyCoord(on_lon, on_lat, frame=region["frame"])
        on_region = CircleSkyRegion(on_center, Angle(region["radius"]))
        background_params = {"on_region": on_region}
        background = self.settings["datasets"]["background"]
        if "exclusion_mask" in background:
            map_hdu = {}
            filename = background["exclusion_mask"]["filename"]
            if "hdu" in background["exclusion_mask"]:
                map_hdu = {"hdu": background["exclusion_mask"]["hdu"]}
            exclusion_region = Map.read(filename, **map_hdu)
            background_params["exclusion_mask"] = exclusion_region
        if background["background_estimator"] == "reflected":
            self.background_estimator = ReflectedRegionsBackgroundEstimator(
                observations=self.observations, **background_params
            )
            self.background_estimator.run()
        else:
            # TODO: raise error?
            log.info("Background estimation only for reflected regions method.")

        extraction_params = {}
        if "containment_correction" in self.settings["datasets"]:
            extraction_params["containment_correction"] = self.settings["datasets"][
                "containment_correction"
            ]
        params = self.settings["datasets"]["geom"]["axes"][0]
        e_reco = MapAxis.from_bounds(**params).edges
        extraction_params["e_reco"] = e_reco
        extraction_params["e_true"] = None
        self.extraction = SpectrumExtraction(
            observations=self.observations,
            bkg_estimate=self.background_estimator.result,
            **extraction_params,
        )
        self.extraction.run()
        self.datasets = Datasets(self.extraction.spectrum_observations)
        if self.settings["datasets"]["stack-datasets"]:
            stacked = self.datasets.stack_reduce()
            stacked.name = "stacked"
            self.datasets = Datasets([stacked])

    def _validate_reduction_settings(self):
        """Validate settings before proceeding to data reduction."""
        if self.observations and len(self.observations):
            self.config.validate()
            return True
        else:
            log.info("No observations selected.")
            log.info("Data reduction cannot be done.")
            return False

    def _validate_set_model(self):
        if self.datasets and self.datasets.datasets:
            self.config.validate()
            return True
        else:
            log.info("No datasets reduced.")
            return False

    def _validate_fitting_settings(self):
        """Validate settings before proceeding to fit 1D."""
        if not self.model:
            log.info("No model fetched for datasets.")
            log.info("Fit cannot be done.")
            return False
        else:
            return True

    def _validate_fp_settings(self):
        """Validate settings before proceeding to flux points estimation."""
        valid = True
        if self.fit:
            self.config.validate()
        else:
            log.info("No results available from fit.")
            valid = False
        if "flux-points" not in self.settings:
            log.info("No values declared for the energy bins.")
            valid = False
        elif "fp_binning" not in self.settings["flux-points"]:
            log.info("No values declared for the energy bins.")
            valid = False
        if not valid:
            log.info("Flux points calculation cannot be done.")
        return valid
コード例 #27
0
    def run(self, e_ref, e_min, e_max, steps="all", atol="1e-6 s"):
        """Run light curve extraction.

        Normalize integral and energy flux between emin and emax.

        Parameters
        ----------
        e_ref : `~astropy.units.Quantity`
            reference energy of dnde flux normalization
        e_min : `~astropy.units.Quantity`
            minimum energy of integral and energy flux normalization interval
        e_max : `~astropy.units.Quantity`
            minimum energy of integral and energy flux normalization interval
        steps : list of str
            Which steps to execute. Available options are:

                * "err": estimate symmetric error.
                * "errn-errp": estimate asymmetric errors.
                * "ul": estimate upper limits.
                * "ts": estimate ts and sqrt(ts) values.
                * "norm-scan": estimate fit statistic profiles.

            By default all steps are executed.
        atol : `~astropy.units.Quantity`
            Tolerance value for time comparison with different scale. Default 1e-6 sec.

        Returns
        -------
        lightcurve : `~gammapy.time.LightCurve`
            the Light Curve object
        """
        atol = u.Quantity(atol)
        self.e_ref = e_ref
        self.e_min = e_min
        self.e_max = e_max

        rows = []
        self.group_table_info = group_datasets_in_time_interval(
            datasets=self.datasets,
            time_intervals=self.time_intervals,
            atol=atol)
        if np.all(self.group_table_info["Group_ID"] == -1):
            raise ValueError(
                "LightCurveEstimator: No datasets in time intervals")
        for igroup, time_interval in enumerate(self.time_intervals):
            index_dataset = np.where(
                self.group_table_info["Group_ID"] == igroup)[0]
            if len(index_dataset) == 0:
                log.debug("No Dataset for the time interval " + str(igroup))
                continue

            row = {
                "time_min": time_interval[0].mjd,
                "time_max": time_interval[1].mjd
            }
            interval_list_dataset = Datasets(
                [self.datasets[int(_)].copy() for _ in index_dataset])
            self._set_scale_model(interval_list_dataset)
            row.update(
                self.estimate_time_bin_flux(interval_list_dataset,
                                            time_interval, steps))
            rows.append(row)
        table = table_from_row_data(rows=rows, meta={"SED_TYPE": "likelihood"})
        table = FluxPoints(table).to_sed_type("flux").table
        return LightCurve(table)
コード例 #28
0
# In[ ]:


get_ipython().run_cell_magic('time', '', 'model = PowerLawSpectralModel(\n    index=2, amplitude=1e-11 * u.Unit("cm-2 s-1 TeV-1"), reference=1 * u.TeV\n)\n\nfor dataset in extract.spectrum_observations:\n    dataset.model = model\n\nfit = Fit(extract.spectrum_observations)\nresult = fit.run()\nprint(result)')


# ### Spectral points
# 
# Finally, let's compute spectral points. The method used is to first choose an energy binning, and then to do a 1-dim likelihood fit / profile to compute the flux and flux error.

# In[ ]:


# Flux points are computed on stacked observation
stacked_obs = Datasets(extract.spectrum_observations).stack_reduce()

print(stacked_obs)


# In[ ]:


e_edges = np.logspace(0, 1.5, 5) * u.TeV

stacked_obs.model = model

fpe = FluxPointsEstimator(datasets=[dataset], e_edges=e_edges)
flux_points = fpe.run()
flux_points.table_formatted
コード例 #29
0
def read(filename):
    return Datasets.read(f"{filename}_datasets.yaml",
                         f"{filename}_models.yaml")
コード例 #30
0
ファイル: make.py プロジェクト: fjhzwl/gammapy
def make_datasets_example():
    # Define which data to use and print some information

    energy_axis = MapAxis.from_edges(
        np.logspace(-1.0, 1.0, 4), unit="TeV", name="energy", interp="log"
    )
    geom0 = WcsGeom.create(
        skydir=(0, 0),
        binsz=0.1,
        width=(1, 1),
        coordsys="GAL",
        proj="CAR",
        axes=[energy_axis],
    )
    geom1 = WcsGeom.create(
        skydir=(1, 0),
        binsz=0.1,
        width=(1, 1),
        coordsys="GAL",
        proj="CAR",
        axes=[energy_axis],
    )
    geoms = [geom0, geom1]

    sources_coords = [(0, 0), (0.9, 0.1)]
    names = ["gc", "g09"]
    models = []

    for ind, (lon, lat) in enumerate(sources_coords):
        spatial_model = PointSpatialModel(
            lon_0=lon * u.deg, lat_0=lat * u.deg, frame="galactic"
        )
        spectral_model = ExpCutoffPowerLawSpectralModel(
            index=2 * u.Unit(""),
            amplitude=3e-12 * u.Unit("cm-2 s-1 TeV-1"),
            reference=1.0 * u.TeV,
            lambda_=0.1 / u.TeV,
        )
        model_ecpl = SkyModel(
            spatial_model=spatial_model, spectral_model=spectral_model, name=names[ind]
        )
        models.append(model_ecpl)

    # test to link a spectral parameter
    params0 = models[0].spectral_model.parameters
    params1 = models[1].spectral_model.parameters
    ind = params0.parameters.index(params0["reference"])
    params0.parameters[ind] = params1["reference"]

    # update the sky model
    ind = models[0].parameters.parameters.index(models[0].parameters["reference"])
    models[0].parameters.parameters[ind] = params1["reference"]

    obs_ids = [110380, 111140, 111159]
    data_store = DataStore.from_dir("$GAMMAPY_DATA/cta-1dc/index/gps/")

    diffuse_model = SkyDiffuseCube.read(
        "$GAMMAPY_DATA/fermi_3fhl/gll_iem_v06_cutout.fits"
    )

    datasets_list = []
    for idx, geom in enumerate(geoms):
        observations = data_store.get_observations(obs_ids)

        stacked = MapDataset.create(geom=geom)
        stacked.background_model.name = "background_irf_" + names[idx]

        maker = MapDatasetMaker(geom=geom, offset_max=4.0 * u.deg)

        for obs in observations:
            dataset = maker.run(obs)
            stacked.stack(dataset)

        stacked.psf = stacked.psf.get_psf_kernel(position=geom.center_skydir, geom=geom, max_radius="0.3 deg")
        stacked.edisp = stacked.edisp.get_energy_dispersion(position=geom.center_skydir, e_reco=energy_axis.edges)

        stacked.name = names[idx]
        stacked.model = models[idx] + diffuse_model
        datasets_list.append(stacked)

    datasets = Datasets(datasets_list)

    dataset0 = datasets.datasets[0]
    print("dataset0")
    print("counts sum : ", dataset0.counts.data.sum())
    print("expo sum : ", dataset0.exposure.data.sum())
    print("bkg0 sum : ", dataset0.background_model.evaluate().data.sum())

    path = "$GAMMAPY_DATA/tests/models/gc_example_"
    datasets.to_yaml(path, overwrite=True)