예제 #1
0
    def test_extract(pars, results, observations, bkg_estimate):
        """Test quantitative output for various configs"""
        extraction = SpectrumExtraction(observations=observations,
                                        bkg_estimate=bkg_estimate,
                                        **pars)
        extraction.run()
        obs = extraction.spectrum_observations[0]
        aeff_actual = obs.aeff.data.evaluate(energy=5 * u.TeV)
        edisp_actual = obs.edisp.data.evaluate(e_true=5 * u.TeV,
                                               e_reco=5.2 * u.TeV)

        assert_quantity_allclose(aeff_actual, results["aeff"], rtol=1e-3)
        assert_quantity_allclose(edisp_actual, results["edisp"], rtol=1e-3)

        containment_actual = extraction.containment[60]

        # TODO: Introduce assert_stats_allclose
        stats = ObservationStats(**obs._info_dict())
        n_on_actual = stats.n_on
        sigma_actual = stats.sigma

        assert n_on_actual == results["n_on"]
        assert_allclose(sigma_actual, results["sigma"], atol=1e-2)
        assert_allclose(containment_actual, results["containment"], rtol=1e-3)

        gti_obs = obs.gti.table
        gti_dataset = extraction.spectrum_observations[0].gti.table
        assert_allclose(gti_dataset["START"], gti_obs["START"])
        assert_allclose(gti_dataset["STOP"], gti_obs["STOP"])
def create_data(input_dir, dataset_config, exclusion_map=None):
    telescope = dataset_config['telescope']
    on_region_radius = dataset_config['on_radius']
    energy_bins = dataset_config['e_reco_bins']
    containment = dataset_config['containment_correction']

    ds = DataStore.from_dir(os.path.join(input_dir, telescope))
    observations = ds.get_observations(ds.obs_table['OBS_ID'].data)
    t_obs = sum([o.observation_live_time_duration for o in observations])
    # from IPython import embed; embed()
    print(f'Total obstime for {telescope} is {t_obs.to("h")}')
    source_position = dataset_config['source_position']
    on_region = CircleSkyRegion(center=source_position,
                                radius=on_region_radius)

    print('Estimating Background')
    bkg_estimate = ReflectedRegionsBackgroundEstimator(
        observations=observations,
        on_region=on_region,
        exclusion_mask=exclusion_map)
    bkg_estimate.run()

    print('Extracting Count Spectra')
    extract = SpectrumExtraction(
        observations=observations,
        bkg_estimate=bkg_estimate.result,
        e_true=energy_bins,
        e_reco=energy_bins,
        containment_correction=containment,
        use_recommended_erange=False,  # TODO this might have to be checked.
    )
    extract.run()
    return extract
예제 #3
0
def load_data(input_dir, dataset_config, exclusion_map=None):
    on_region_radius = dataset_config['on_radius']
    e_reco_bins = dataset_config['e_reco_bins']
    e_true_bins = dataset_config['e_true_bins']
    containment = dataset_config['containment_correction']

    ds = DataStore.from_dir(input_dir)
    observations = ds.get_observations(ds.obs_table['OBS_ID'].data)
    # observations = ds.get_observations(ds.hdu_table['OBS_ID'].data)  # this is plenty wrong

    source_position = dataset_config['source_position']
    on_region = CircleSkyRegion(center=source_position,
                                radius=on_region_radius)

    print('Estimating Background')
    bkg_estimate = ReflectedRegionsBackgroundEstimator(
        observations=observations,
        on_region=on_region,
        exclusion_mask=exclusion_map)
    bkg_estimate.run()

    print('Extracting Count Spectra')
    extract = SpectrumExtraction(
        observations=observations,
        bkg_estimate=bkg_estimate.result,
        e_true=e_true_bins,
        e_reco=e_reco_bins,
        containment_correction=containment,
        use_recommended_erange=False,
    )
    extract.run()
    if dataset_config['stack']:
        return [extract.spectrum_observations.stack()]
    else:
        return extract.spectrum_observations
예제 #4
0
 def test_alpha(observations, bkg_estimate):
     bkg_estimate[0].a_off = 0
     bkg_estimate[1].a_off = 2
     extraction = SpectrumExtraction(observations=observations,
                                     bkg_estimate=bkg_estimate,
                                     max_alpha=0.2)
     extraction.run()
     assert len(extraction.spectrum_observations) == 0
예제 #5
0
    def run_extraction(self):
        """Run all steps for the spectrum extraction."""
        self.background_estimator = ReflectedRegionsBackgroundEstimator(
            observations=self.observations, **self.config["background"])
        self.background_estimator.run()

        self.extraction = SpectrumExtraction(
            observations=self.observations,
            bkg_estimate=self.background_estimator.result,
            **self.config["extraction"])

        self.extraction.run()
예제 #6
0
def extraction(bkg_estimate, observations):
    """An example SpectrumExtraction for tests."""
    # Restrict true energy range covered by HAP exporter
    e_true = np.logspace(-1, 1.9, 70) * u.TeV

    return SpectrumExtraction(bkg_estimate=bkg_estimate,
                              observations=observations,
                              e_true=e_true)
예제 #7
0
    def _spectrum_extraction(self):
        """Run all steps for the spectrum extraction."""
        region = self.settings["datasets"]["geom"]["region"]
        log.info("Reducing spectrum datasets.")
        on_lon = Angle(region["center"][0])
        on_lat = Angle(region["center"][1])
        on_center = SkyCoord(on_lon, on_lat, frame=region["frame"])
        on_region = CircleSkyRegion(on_center, Angle(region["radius"]))
        background_params = {"on_region": on_region}
        background = self.settings["datasets"]["background"]
        if "exclusion_mask" in background:
            map_hdu = {}
            filename = background["exclusion_mask"]["filename"]
            if "hdu" in background["exclusion_mask"]:
                map_hdu = {"hdu": background["exclusion_mask"]["hdu"]}
            exclusion_region = Map.read(filename, **map_hdu)
            background_params["exclusion_mask"] = exclusion_region
        if background["background_estimator"] == "reflected":
            self.background_estimator = ReflectedRegionsBackgroundEstimator(
                observations=self.observations, **background_params
            )
            self.background_estimator.run()
        else:
            # TODO: raise error?
            log.info("Background estimation only for reflected regions method.")

        extraction_params = {}
        if "containment_correction" in self.settings["datasets"]:
            extraction_params["containment_correction"] = self.settings["datasets"][
                "containment_correction"
            ]
        params = self.settings["datasets"]["geom"]["axes"][0]
        e_reco = MapAxis.from_bounds(**params).edges
        extraction_params["e_reco"] = e_reco
        extraction_params["e_true"] = None
        self.extraction = SpectrumExtraction(
            observations=self.observations,
            bkg_estimate=self.background_estimator.result,
            **extraction_params,
        )
        self.extraction.run()
        self.datasets = Datasets(self.extraction.spectrum_observations)
        if self.settings["datasets"]["stack-datasets"]:
            stacked = self.datasets.stack_reduce()
            stacked.name = "stacked"
            self.datasets = Datasets([stacked])
예제 #8
0
def extract_spectra_iact(dataset):
    """Extract 1d spectra for IACT dataset"""
    log.info(f"Extracting 1d spectra for {dataset.name} dataset")
    datastore = DataStore.from_dir(f"data/{dataset.name}")
    observations = datastore.get_observations(dataset.obs_ids)

    on_region = CircleSkyRegion(center=config.source_pos,
                                radius=dataset.on_radius)

    exclusion_mask = config.get_exclusion_mask()

    bkg_estimate = ReflectedRegionsBackgroundEstimator(
        observations=observations,
        on_region=on_region,
        exclusion_mask=exclusion_mask)
    bkg_estimate.run()

    extract = SpectrumExtraction(
        observations=observations,
        bkg_estimate=bkg_estimate.result,
        e_true=config.energy_bins,
        e_reco=config.energy_bins,
        containment_correction=dataset.containment_correction,
    )
    extract.run()

    path = f"results/spectra/{dataset.name}/"
    log.info(f"Writing to {path}")

    if dataset.name == "fact":
        # For FACT the IRFs are the same for all observations
        # So we only store a stacked spectrum and response
        # plus we add a LO_THRESHOLD keyword was missing
        obs = extract.spectrum_observations.stack()
        obs.lo_threshold = 0.4 * u.TeV
        # we are writing a single observation, as for Fermi
        obs.write(path, use_sherpa=True, overwrite=True)
    else:
        extract.write(path, ogipdir="", use_sherpa=True, overwrite=True)
예제 #9
0
def test_extract_cta_1dc_data(caplog):
    datastore = DataStore.from_dir("$GAMMAPY_DATA/cta-1dc/index/gps/")
    obs_ids = [110380, 111140]
    observations = datastore.get_observations(obs_ids)

    pos = SkyCoord(0.0, 0.0, unit="deg", frame="galactic")
    radius = Angle(0.11, "deg")
    on_region = CircleSkyRegion(pos, radius)

    est = ReflectedRegionsBackgroundEstimator(observations=observations,
                                              on_region=on_region,
                                              min_distance_input="0.2 deg")
    est.run()
    # This will test non PSF3D input as well as absence of default thresholds
    extract = SpectrumExtraction(bkg_estimate=est.result,
                                 observations=observations,
                                 containment_correction=True)
    extract.run()

    extract.compute_energy_threshold(method_lo="area_max", area_percent_lo=10)
    actual = extract.spectrum_observations[0].energy_range[0]
    assert_quantity_allclose(actual, 0.774263 * u.TeV, rtol=1e-3)
예제 #10
0
def extract_spectra_IACT(dataset):
    """Extract 1d spectra for IACT dataset"""
    log.info(f"Extracting 1d spectra for {dataset.name} dataset")
    # Dataset class has already the method to obtain the gammapy DataStore object
    datastore = dataset.get_DataStore()
    obs_list = datastore.obs_list(dataset.obs_ids)

    on_region = CircleSkyRegion(center=dataset.source_pos,
                                radius=dataset.on_radius)

    exclusion_mask = config.get_exclusion_mask()

    bkg_estimate = ReflectedRegionsBackgroundEstimator(
        obs_list=obs_list, on_region=on_region, exclusion_mask=exclusion_mask)
    bkg_estimate.run()

    extract = SpectrumExtraction(
        obs_list=obs_list,
        bkg_estimate=bkg_estimate.result,
        containment_correction=dataset.containment_correction,
    )
    extract.run()

    path = f"{config.repo_path}/results/spectra/{dataset.name}/"
    log.info(f"Writing to {path}")

    if dataset.name == "fact":
        # For FACT the IRFs are the same for all observations
        # So we only store a stacked spectrum and response
        # plus we add a LO_THRESHOLD keyword was missing
        obs = extract.observations.stack()
        obs.lo_threshold = 0.4 * u.TeV
        # we are writing a single observation, as for Fermi
        obs.write(path, use_sherpa=True, overwrite=True)
    else:
        extract.write(path, ogipdir="", use_sherpa=True, overwrite=True)
예제 #11
0
bkg_estimator.run()
bkg_estimate = bkg_estimator.result

# The rest of the analysis is the same as for a standard spectral analysis with Gammapy. All the specificity of a phase-resolved analysis is contained in the PhaseBackgroundEstimator, where the background is estimated in the ON-region OFF-phase rather than in an OFF-region.
#
# We can now extract a spectrum with the SpectrumExtraction class. It takes the reconstructed and the true energy binning. Both are expected to be a Quantity with unit energy, i.e. an array with an energy unit. EnergyBounds is a dedicated class to do it.

# In[ ]:

etrue = EnergyBounds.equal_log_spacing(0.005, 10.0, 100, unit="TeV")
ereco = EnergyBounds.equal_log_spacing(0.01, 10, 30, unit="TeV")

extraction = SpectrumExtraction(
    observations=obs_list_vela,
    bkg_estimate=bkg_estimate,
    containment_correction=True,
    e_true=etrue,
    e_reco=ereco,
)

extraction.run()
extraction.compute_energy_threshold(method_lo="energy_bias",
                                    bias_percent_lo=20)

# Now let's a look at the files we just created with spectrum_observation.

# In[ ]:

extraction.spectrum_observations[0].peek()

# Now we'll fit a model to the spectrum with SpectrumFit. First we load a power law model with an initial value for the index and the amplitude and then wo do a likelihood fit. The fit results are printed below.
예제 #12
0
# Now, we're going to extract a spectrum using the [SpectrumExtraction](https://docs.gammapy.org/0.10/api/gammapy.spectrum.SpectrumExtraction.html) class. We provide the reconstructed energy binning we want to use. It is expected to be a Quantity with unit energy, i.e. an array with an energy unit. We use a utility function to create it. We also provide the true energy binning to use.

# In[ ]:

e_reco = EnergyBounds.equal_log_spacing(0.1, 40, 40, unit="TeV")
e_true = EnergyBounds.equal_log_spacing(0.05, 100.0, 200, unit="TeV")

# Instantiate a [SpectrumExtraction](https://docs.gammapy.org/0.10/api/gammapy.spectrum.SpectrumExtraction.html) object that will do the extraction. The containment_correction parameter is there to allow for PSF leakage correction if one is working with full enclosure IRFs. We also compute a threshold energy and store the result in OGIP compliant files (pha, rmf, arf). This last step might be omitted though.

# In[ ]:

ANALYSIS_DIR = "crab_analysis"

extraction = SpectrumExtraction(
    observations=observations,
    bkg_estimate=background_estimator.result,
    containment_correction=False,
)
extraction.run()

# Add a condition on correct energy range in case it is not set by default
extraction.compute_energy_threshold(method_lo="area_max", area_percent_lo=10.0)

print(extraction.spectrum_observations[0])
# Write output in the form of OGIP files: PHA, ARF, RMF, BKG
# extraction.run(observations=observations, bkg_estimate=background_estimator.result, outdir=ANALYSIS_DIR)

# ## Look at observations
#
# Now we will look at the files we just created. We will use the [SpectrumObservation](https://docs.gammapy.org/0.10/api/gammapy.spectrum.SpectrumObservation.html) object that are still in memory from the extraction step. Note, however, that you could also read them from disk if you have written them in the step above. The ``ANALYSIS_DIR`` folder contains 4 ``FITS`` files for each observation. These files are described in detail [here](https://gamma-astro-data-formats.readthedocs.io/en/latest/spectra/ogip/index.html). In short, they correspond to the on vector, the off vector, the effectie area, and the energy dispersion.
예제 #13
0
from gammapy.spectrum.tests.test_extract import obs, target, bkg
from gammapy.datasets import gammapy_extra
from gammapy.spectrum import SpectrumExtraction
import numpy as np 
import astropy.units as u

outdir = gammapy_extra.filename("datasets/hess-crab4_pha")

# Restrict energy range to what is covered by HAP exporters
e_true = np.logspace(-1, 1.9, 70) * u.TeV

extraction = SpectrumExtraction(obs=obs(),
                                target=target(),
                                background=bkg(),
                                e_true=e_true,
                               )
extraction.estimate_background(extraction.background)
extraction.extract_spectrum()
extraction.observations.write(outdir, use_sherpa=True)
예제 #14
0
from gammapy.spectrum.tests.test_extract import obs, target, bkg
from gammapy.datasets import gammapy_extra
from gammapy.spectrum import SpectrumExtraction
import numpy as np
import astropy.units as u

outdir = gammapy_extra.filename("datasets/hess-crab4_pha")

# Restrict energy range to what is covered by HAP exporters
e_true = np.logspace(-1, 1.9, 70) * u.TeV

extraction = SpectrumExtraction(
    obs=obs(),
    target=target(),
    background=bkg(),
    e_true=e_true,
)
extraction.estimate_background(extraction.background)
extraction.extract_spectrum()
extraction.observations.write(outdir, use_sherpa=True)
예제 #15
0
# In[24]:

bkg_estimator = ReflectedRegionsBackgroundEstimator(
    obs_list=obs_list,
    on_region=on_region,
    exclusion_mask=exclusion_mask,
)
bkg_estimator.run()
bkg_estimate = bkg_estimator.result
bkg_estimator.plot()

# In[25]:

extract = SpectrumExtraction(
    obs_list=obs_list,
    bkg_estimate=bkg_estimate,
)
extract.run()

# ### Model fit
#
# The next step is to fit a spectral model, using all data (i.e. a "global" fit, using all energies).

# In[26]:

model = models.PowerLaw(
    index=2 * u.Unit(''),
    amplitude=1e-11 * u.Unit('cm-2 s-1 TeV-1'),
    reference=1 * u.TeV,
)
예제 #16
0
    on_region=on_region,
    obs_list=obs_list,
    exclusion_mask=exclusion_mask,
)
bkg_estimator.run()

# In[6]:

# Extract the spectral data
e_reco = EnergyBounds.equal_log_spacing(0.7, 100, 50,
                                        unit='TeV')  # fine binning
e_true = EnergyBounds.equal_log_spacing(0.05, 100, 200, unit='TeV')
extraction = SpectrumExtraction(
    obs_list=obs_list,
    bkg_estimate=bkg_estimator.result,
    containment_correction=False,
    e_reco=e_reco,
    e_true=e_true,
)
extraction.run()
extraction.compute_energy_threshold(
    method_lo='area_max',
    area_percent_lo=10.0,
)

# ## Light curve estimation

# In[7]:

# Define the time intervals. Here, we only select intervals corresponding to an observation
intervals = []
예제 #17
0
sig
obs_summary = ObservationSummary(stats)
obs_summary
obs_summary.sigma
obs_summary.bg_rate
fig = plt.figure(figsize=(10,6))
ax1=fig.add_subplot(121)
obs_summary.plot_excess_vs_livetime(ax=ax1)
ax2=fig.add_subplot(122)
obs_summary.plot_significance_vs_livetime(ax=ax2)
plt.plot()
plt.show()
e_reco = EnergyBounds.equal_log_spacing(0.1, 40, 40, unit='TeV')
e_true = EnergyBounds.equal_log_spacing(0.05, 100., 200, unit='TeV')
ANALYSIS_DIR = 'crab_analysis'
extraction = SpectrumExtraction(obs_list=crablist, bkg_estimate=background_estimator.result, containment_correction=False,)
extraction.run()
extraction.compute_energy_threshold(method_lo='area_max', area_percent_lo=10.0)

print(extraction.observations[0])
extraction.run(obs_list=obs_list, bkg_estimate=background_estimator.result, outdir=ANALYSIS_DIR)
extraction.run(obs_list=crablist, bkg_estimate=background_estimator.result, outdir=ANALYSIS_DIR)
extraction.write(obs_list=crablist, bkg_estimate=background_estimator.result, outdir=ANALYSIS_DIR)
extraction.write(crablist)
extraction.write(crablist, bkg_estimate=background_estimator.result, outdir=ANALYSIS_DIR)
extraction.write(crablist, background_estimator.result, outdir=ANALYSIS_DIR)
get_ipython().magic(u'pinfo extraction.write')
extraction.observations[0].peek()
model = PowerLaw(
    index=2 * u.Unit(''),
    amplitude=2e-11 * u.Unit('cm-2 s-1 TeV-1'),
예제 #18
0
class SpectrumAnalysisIACT:
    """High-level analysis class to perform a full 1D IACT spectral analysis.

    Observation selection must have happened before.

    Config options:

    * outdir : `pathlib.Path`, str
        Output folder, None means no output
    * background : dict
        Forwarded to `~gammapy.background.ReflectedRegionsBackgroundEstimator`
    * extraction : dict
        Forwarded to `~gammapy.spectrum.SpectrumExtraction`
    * fp_binning : `~astropy.units.Quantity`
        Flux points binning

    Parameters
    ----------
    observations : `~gammapy.data.Observations`
        Observations to analyse
    config : dict
        Config dict
    """
    def __init__(self, observations, config):
        self.observations = observations
        self.config = config

    def __str__(self):
        ss = self.__class__.__name__
        ss += "\n{}".format(self.observations)
        ss += "\n{}".format(self.config)
        return ss

    def run(self, optimize_opts=None):
        """Run all steps."""
        log.info("Running {}".format(self.__class__.__name__))
        self.run_extraction()
        self.run_fit(optimize_opts)

    def run_extraction(self):
        """Run all steps for the spectrum extraction."""
        self.background_estimator = ReflectedRegionsBackgroundEstimator(
            observations=self.observations, **self.config["background"])
        self.background_estimator.run()

        self.extraction = SpectrumExtraction(
            observations=self.observations,
            bkg_estimate=self.background_estimator.result,
            **self.config["extraction"])

        self.extraction.run()

    @property
    def _result_dict(self):
        """Convert to dict."""
        val = dict()
        model = self.config["fit"]["model"]
        val["model"] = model.to_dict()

        fit_range = self.config["fit"].get("fit_range")

        if fit_range is not None:
            val["fit_range"] = dict(
                min=fit_range[0].value,
                max=fit_range[1].value,
                unit=fit_range.unit.to_string("fits"),
            )

        val["statval"] = float(self.fit_result.total_stat)
        val["statname"] = "wstat"

        return val

    def write(self, filename, mode="w"):
        """Write to YAML file.

        Parameters
        ----------
        filename : str
            File to write
        mode : str
            Write mode
        """
        d = self._result_dict
        val = yaml.safe_dump(d, default_flow_style=False)

        with open(str(filename), mode) as outfile:
            outfile.write(val)

    def run_fit(self, optimize_opts=None):
        """Run all step for the spectrum fit."""
        fit_range = self.config["fit"].get("fit_range")
        model = self.config["fit"]["model"]

        for obs in self.extraction.spectrum_observations:
            if fit_range is not None:
                obs.mask_fit = obs.counts.energy_mask(fit_range[0],
                                                      fit_range[1])
            obs.model = model

        self.fit = Fit(self.extraction.spectrum_observations)
        self.fit_result = self.fit.run(optimize_opts=optimize_opts)

        model = self.config["fit"]["model"]
        modelname = model.__class__.__name__

        model.parameters.covariance = self.fit_result.parameters.covariance

        filename = make_path(
            self.config["outdir"]) / "fit_result_{}.yaml".format(modelname)

        self.write(filename=filename)

        obs_stacker = SpectrumDatasetOnOffStacker(
            self.extraction.spectrum_observations)
        obs_stacker.run()

        datasets_fp = obs_stacker.stacked_obs
        datasets_fp.model = model
        self.flux_point_estimator = FluxPointsEstimator(
            e_edges=self.config["fp_binning"], datasets=datasets_fp)
        fp = self.flux_point_estimator.run()
        fp.table["is_ul"] = fp.table["ts"] < 4
        self.flux_points = fp

    @property
    def spectrum_result(self):
        """`~gammapy.spectrum.FluxPointsDataset`"""
        return FluxPointsDataset(data=self.flux_points,
                                 model=self.fit.datasets.datasets[0].model)
예제 #19
0
class Analysis:
    """Config-driven high-level analysis interface.

    It is initialized by default with a set of configuration parameters and values declared in
    an internal configuration schema YAML file, though the user can also provide configuration
    parameters passed as a nested dictionary at the moment of instantiation. In that case these
    parameters will overwrite the default values of those present in the configuration file.

    For more info see  :ref:`HLI`.

    Parameters
    ----------
    config : dict or `AnalysisConfig`
        Configuration options following `AnalysisConfig` schema
    """

    def __init__(self, config=None):
        if isinstance(config, dict):
            self._config = AnalysisConfig(config)
        elif isinstance(config, AnalysisConfig):
            self._config = config
        else:
            raise ValueError("Dict or `AnalysiConfig` object required.")

        self._set_logging()
        self.observations = None
        self.background_estimator = None
        self.datasets = None
        self.extraction = None
        self.model = None
        self.fit = None
        self.fit_result = None
        self.flux_points = None

    @property
    def config(self):
        """Analysis configuration (`AnalysisConfig`)"""
        return self._config

    @property
    def settings(self):
        """Configuration settings for the analysis session."""
        return self.config.settings

    def get_observations(self):
        """Fetch observations from the data store according to criteria defined in the configuration."""
        self.config.validate()
        log.info("Fetching observations.")
        datastore_path = make_path(self.settings["observations"]["datastore"])
        if datastore_path.is_file():
            datastore = DataStore().from_file(datastore_path)
        elif datastore_path.is_dir():
            datastore = DataStore().from_dir(datastore_path)
        else:
            raise FileNotFoundError(f"Datastore {datastore_path} not found.")
        ids = set()
        selection = dict()
        for criteria in self.settings["observations"]["filters"]:
            selected_obs = ObservationTable()

            # TODO: Reduce significantly the code.
            # This block would be handled by datastore.obs_table.select_observations
            selection["type"] = criteria["filter_type"]
            for key, val in criteria.items():
                if key in ["lon", "lat", "radius", "border"]:
                    val = Angle(val)
                selection[key] = val
            if selection["type"] == "angle_box":
                selection["type"] = "par_box"
                selection["value_range"] = Angle(criteria["value_range"])
            if selection["type"] == "sky_circle" or selection["type"].endswith("_box"):
                selected_obs = datastore.obs_table.select_observations(selection)
            if selection["type"] == "par_value":
                mask = (
                    datastore.obs_table[criteria["variable"]] == criteria["value_param"]
                )
                selected_obs = datastore.obs_table[mask]
            if selection["type"] == "ids":
                obs_list = datastore.get_observations(criteria["obs_ids"])
                selected_obs["OBS_ID"] = [obs.obs_id for obs in obs_list.list]
            if selection["type"] == "all":
                obs_list = datastore.get_observations()
                selected_obs["OBS_ID"] = [obs.obs_id for obs in obs_list.list]

            if len(selected_obs):
                if "exclude" in criteria and criteria["exclude"]:
                    ids.difference_update(selected_obs["OBS_ID"].tolist())
                else:
                    ids.update(selected_obs["OBS_ID"].tolist())
        self.observations = datastore.get_observations(ids, skip_missing=True)
        for obs in self.observations.list:
            log.info(obs)

    def get_datasets(self):
        """Produce reduced datasets."""
        if not self._validate_reduction_settings():
            return False
        if self.settings["datasets"]["dataset-type"] == "SpectrumDatasetOnOff":
            self._spectrum_extraction()
        elif self.settings["datasets"]["dataset-type"] == "MapDataset":
            self._map_making()
        else:
            # TODO raise error?
            log.info("Data reduction method not available.")
            return False

    def set_model(self, model=None, filename=""):
        """Read the model from dict or filename and attach it to datasets.

        Parameters
        ----------
        model: dict or string
            Dictionary or string in YAML format with the serialized model.
        filename : string
            Name of the model YAML file describing the model.
        """
        if not self._validate_set_model():
            return False
        log.info(f"Reading model.")
        if isinstance(model, str):
            model = yaml.safe_load(model)
        if model:
            self.model = SkyModels(dict_to_models(model))
        elif filename:
            filepath = make_path(filename)
            self.model = SkyModels.from_yaml(filepath)
        else:
            return False
        # TODO: Deal with multiple components
        for dataset in self.datasets.datasets:
            if isinstance(dataset, MapDataset):
                dataset.model = self.model
            else:
                if len(self.model.skymodels) > 1:
                    raise ValueError(
                        "Can only fit a single spectral model at one time."
                    )
                dataset.model = self.model.skymodels[0].spectral_model
        log.info(self.model)

    def run_fit(self, optimize_opts=None):
        """Fitting reduced datasets to model."""
        if not self._validate_fitting_settings():
            return False

        for ds in self.datasets.datasets:
            # TODO: fit_range handled in jsonschema validation class
            if "fit_range" in self.settings["fit"]:
                e_min = u.Quantity(self.settings["fit"]["fit_range"]["min"])
                e_max = u.Quantity(self.settings["fit"]["fit_range"]["max"])
                if isinstance(ds, MapDataset):
                    ds.mask_fit = ds.counts.geom.energy_mask(e_min, e_max)
                else:
                    ds.mask_fit = ds.counts.energy_mask(e_min, e_max)
        log.info("Fitting reduced datasets.")
        self.fit = Fit(self.datasets)
        self.fit_result = self.fit.run(optimize_opts=optimize_opts)
        log.info(self.fit_result)

    def get_flux_points(self, source="source"):
        """Calculate flux points for a specific model component.

        Parameters
        ----------
        source : string
            Name of the model component where to calculate the flux points.
        """
        if not self._validate_fp_settings():
            return False

        # TODO: add "source" to config
        log.info("Calculating flux points.")
        axis_params = self.settings["flux-points"]["fp_binning"]
        e_edges = MapAxis.from_bounds(**axis_params).edges
        flux_point_estimator = FluxPointsEstimator(
            e_edges=e_edges, datasets=self.datasets, source=source
        )
        fp = flux_point_estimator.run()
        fp.table["is_ul"] = fp.table["ts"] < 4
        model = self.model[source].spectral_model.copy()
        self.flux_points = FluxPointsDataset(data=fp, model=model)
        cols = ["e_ref", "ref_flux", "dnde", "dnde_ul", "dnde_err", "is_ul"]
        log.info("\n{}".format(self.flux_points.data.table[cols]))

    @staticmethod
    def _create_geometry(params):
        """Create the geometry."""
        # TODO: handled in jsonschema validation class
        geom_params = copy.deepcopy(params)

        axes = []
        for axis_params in geom_params.get("axes", []):
            ax = MapAxis.from_bounds(**axis_params)
            axes.append(ax)

        geom_params["axes"] = axes
        geom_params["skydir"] = tuple(geom_params["skydir"])
        return WcsGeom.create(**geom_params)

    def _map_making(self):
        """Make maps and datasets for 3d analysis."""
        log.info("Creating geometry.")
        geom = self._create_geometry(self.settings["datasets"]["geom"])

        if "geom-irf" in self.settings["datasets"]:
            geom_irf = self._create_geometry(self.settings["datasets"]["geom-irf"])
        else:
            geom_irf = geom.to_binsz(binsz=BINSZ_IRF)

        offset_max = Angle(self.settings["datasets"]["offset-max"])
        stack_datasets = self.settings["datasets"]["stack-datasets"]
        log.info("Creating datasets.")

        maker = MapDatasetMaker(
            geom=geom,
            geom_true=geom_irf,
            offset_max=offset_max,
        )
        if stack_datasets:
            stacked = MapDataset.create(geom=geom, geom_irf=geom_irf, name="stacked")
            for obs in self.observations:
                dataset = maker.run(obs)
                stacked.stack(dataset)
            self._extract_irf_kernels(stacked)
            datasets = [stacked]
        else:
            datasets = []
            for obs in self.observations:
                dataset = maker.run(obs)
                self._extract_irf_kernels(dataset)
                datasets.append(dataset)

        self.datasets = Datasets(datasets)

    def _extract_irf_kernels(self, dataset):
        # TODO: remove hard-coded default value
        max_radius = self.settings["datasets"].get("psf-kernel-radius", "0.6 deg")
        # TODO: handle IRF maps in fit
        geom = dataset.counts.geom
        geom_irf = dataset.exposure.geom
        position = geom.center_skydir
        geom_psf = geom.to_image().to_cube(geom_irf.axes)
        dataset.psf = dataset.psf.get_psf_kernel(
            position=position, geom=geom_psf, max_radius=max_radius
        )
        e_reco = geom.get_axis_by_name("energy").edges
        dataset.edisp = dataset.edisp.get_energy_dispersion(
            position=position, e_reco=e_reco
        )

    def _set_logging(self):
        """Set logging parameters for API."""
        logging.basicConfig(**self.settings["general"]["logging"])
        log.info(
            "Setting logging config: {!r}".format(self.settings["general"]["logging"])
        )

    def _spectrum_extraction(self):
        """Run all steps for the spectrum extraction."""
        region = self.settings["datasets"]["geom"]["region"]
        log.info("Reducing spectrum datasets.")
        on_lon = Angle(region["center"][0])
        on_lat = Angle(region["center"][1])
        on_center = SkyCoord(on_lon, on_lat, frame=region["frame"])
        on_region = CircleSkyRegion(on_center, Angle(region["radius"]))
        background_params = {"on_region": on_region}
        background = self.settings["datasets"]["background"]
        if "exclusion_mask" in background:
            map_hdu = {}
            filename = background["exclusion_mask"]["filename"]
            if "hdu" in background["exclusion_mask"]:
                map_hdu = {"hdu": background["exclusion_mask"]["hdu"]}
            exclusion_region = Map.read(filename, **map_hdu)
            background_params["exclusion_mask"] = exclusion_region
        if background["background_estimator"] == "reflected":
            self.background_estimator = ReflectedRegionsBackgroundEstimator(
                observations=self.observations, **background_params
            )
            self.background_estimator.run()
        else:
            # TODO: raise error?
            log.info("Background estimation only for reflected regions method.")

        extraction_params = {}
        if "containment_correction" in self.settings["datasets"]:
            extraction_params["containment_correction"] = self.settings["datasets"][
                "containment_correction"
            ]
        params = self.settings["datasets"]["geom"]["axes"][0]
        e_reco = MapAxis.from_bounds(**params).edges
        extraction_params["e_reco"] = e_reco
        extraction_params["e_true"] = None
        self.extraction = SpectrumExtraction(
            observations=self.observations,
            bkg_estimate=self.background_estimator.result,
            **extraction_params,
        )
        self.extraction.run()
        self.datasets = Datasets(self.extraction.spectrum_observations)
        if self.settings["datasets"]["stack-datasets"]:
            stacked = self.datasets.stack_reduce()
            stacked.name = "stacked"
            self.datasets = Datasets([stacked])

    def _validate_reduction_settings(self):
        """Validate settings before proceeding to data reduction."""
        if self.observations and len(self.observations):
            self.config.validate()
            return True
        else:
            log.info("No observations selected.")
            log.info("Data reduction cannot be done.")
            return False

    def _validate_set_model(self):
        if self.datasets and self.datasets.datasets:
            self.config.validate()
            return True
        else:
            log.info("No datasets reduced.")
            return False

    def _validate_fitting_settings(self):
        """Validate settings before proceeding to fit 1D."""
        if not self.model:
            log.info("No model fetched for datasets.")
            log.info("Fit cannot be done.")
            return False
        else:
            return True

    def _validate_fp_settings(self):
        """Validate settings before proceeding to flux points estimation."""
        valid = True
        if self.fit:
            self.config.validate()
        else:
            log.info("No results available from fit.")
            valid = False
        if "flux-points" not in self.settings:
            log.info("No values declared for the energy bins.")
            valid = False
        elif "fp_binning" not in self.settings["flux-points"]:
            log.info("No values declared for the energy bins.")
            valid = False
        if not valid:
            log.info("Flux points calculation cannot be done.")
        return valid
예제 #20
0
# ### Creation of the datasets
#
# We now apply spectral extraction to create the datasets.
#
# NB: we are using here time intervals defined by the observations start and stop times. The standard observation based spectral extraction is therefore defined in the right time bins.
#
# A proper time resolved spectral extraction will be included in a coming gammapy release.

# In[ ]:

# Note that we are not performing the extraction in time bins
extraction = SpectrumExtraction(
    observations=crab_obs,
    bkg_estimate=bkg_estimator.result,
    containment_correction=True,
    e_reco=energy_axis.edges,
    e_true=etrue_axis.edges,
)
extraction.run()
datasets_1d = extraction.spectrum_observations

# we need to set the times manually for now
for dataset, time_interval in zip(datasets_1d, time_intervals):
    dataset.counts.meta = dict()
    dataset.counts.meta["t_start"] = time_interval[0]
    dataset.counts.meta["t_stop"] = time_interval[1]

# ## Light Curve estimation for 1D spectra
#
# Now that we've reduced the 1D data we assign again the model to the datasets