# ## Fit spectrum
# 
# Now we'll fit a global model to the spectrum. First we do a joint likelihood fit to all observations. If you want to stack the observations see below. We will also produce a debug plot in order to show how the global fit matches one of the individual observations.

# In[ ]:


spectral_model = PowerLawSpectralModel(
    index=2, amplitude=2e-11 * u.Unit("cm-2 s-1 TeV-1"), reference=1 * u.TeV
)
model = SkyModel(spectral_model=spectral_model)

for dataset in datasets:
    dataset.models = model

fit_joint = Fit(datasets)
result_joint = fit_joint.run()

# we make a copy here to compare it later
model_best_joint = model.copy()
model_best_joint.spectral_model.parameters.covariance = (
    result_joint.parameters.covariance
)


# In[ ]:


print(result_joint)

# Define map geometry
axis = MapAxis.from_edges(np.logspace(-1.0, 1.0, 10),
                          unit="TeV",
                          name="energy")
geom = WcsGeom.create(skydir=(0, 0),
                      binsz=0.02,
                      width=(2, 2),
                      frame="galactic",
                      axes=[axis])

# Define some observation parameters
# we are not simulating many pointings / observations
pointing = SkyCoord(0.2, 0.5, unit="deg", frame="galactic")
livetime = 20 * u.hour

exposure_map = make_map_exposure_true_energy(pointing=pointing,
                                             livetime=livetime,
                                             aeff=aeff,
                                             geom=geom)

dataset = MapDataset(models=models, exposure=exposure_map)
npred = dataset.npred()

dataset.fake()

fit = Fit([dataset])
results = fit.run()

print(results)
print(models)
Exemple #3
0
def test_map_fit(sky_model, geom, geom_etrue):
    dataset_1 = get_map_dataset(sky_model,
                                geom,
                                geom_etrue,
                                evaluation_mode="local")
    dataset_1.background_model.norm.value = 0.5
    dataset_1.counts = dataset_1.npred()

    dataset_2 = get_map_dataset(sky_model,
                                geom,
                                geom_etrue,
                                evaluation_mode="global")
    dataset_2.counts = dataset_2.npred()

    sky_model.parameters["sigma"].frozen = True

    dataset_1.background_model.norm.value = 0.49
    dataset_2.background_model.norm.value = 0.99

    fit = Fit([dataset_1, dataset_2])
    result = fit.run()

    assert result.success
    assert "minuit" in repr(result)

    npred = dataset_1.npred().data.sum()
    assert_allclose(npred, 6220.529956, rtol=1e-3)
    assert_allclose(result.total_stat, 26008.040889, rtol=1e-3)

    pars = result.parameters
    assert_allclose(pars["lon_0"].value, 0.2, rtol=1e-2)
    assert_allclose(pars.error("lon_0"), 0.002622, rtol=1e-2)

    assert_allclose(pars["index"].value, 3, rtol=1e-2)
    assert_allclose(pars.error("index"), 0.028967, rtol=1e-2)

    assert_allclose(pars["amplitude"].value, 1e-11, rtol=1e-2)
    assert_allclose(pars.error("amplitude"), 4.086756e-13, rtol=1e-2)

    # background norm 1
    assert_allclose(pars[8].value, 0.5, rtol=1e-2)
    assert_allclose(pars.error(pars[8]), 0.0156, rtol=1e-2)

    # background norm 2
    assert_allclose(pars[11].value, 1, rtol=1e-2)
    assert_allclose(pars.error(pars[11]), 0.02147, rtol=1e-2)

    # test mask_safe evaluation
    mask_safe = geom.energy_mask(emin=1 * u.TeV)
    dataset_1.mask_safe = Map.from_geom(geom, data=mask_safe)
    dataset_2.mask_safe = Map.from_geom(geom, data=mask_safe)

    stat = fit.datasets.stat_sum()
    assert_allclose(stat, 14447.196919)

    # test model evaluation outside image

    dataset_1.models[0].spatial_model.lon_0.value = 150
    dataset_1.npred()
    assert not dataset_1._evaluators[0].contributes

    with mpl_plot_check():
        dataset_1.plot_residuals()
Exemple #4
0
 def test_fit_pwl_sherpa(self, dataset):
     fit = Fit(backend="sherpa", optimize_opts={"method": "simplex"})
     result = fit.optimize(datasets=[dataset])
     self.assert_result(result)
Exemple #5
0
def test_map_fit(sky_model, geom, geom_etrue):
    dataset_1 = get_map_dataset(sky_model,
                                geom,
                                geom_etrue,
                                evaluation_mode="local",
                                name="test-1")
    dataset_1.background_model.norm.value = 0.5
    dataset_1.counts = dataset_1.npred()

    dataset_2 = get_map_dataset(sky_model,
                                geom,
                                geom_etrue,
                                evaluation_mode="global",
                                name="test-2")
    dataset_2.counts = dataset_2.npred()

    sky_model.parameters["sigma"].frozen = True

    dataset_1.background_model.norm.value = 0.49
    dataset_2.background_model.norm.value = 0.99

    fit = Fit([dataset_1, dataset_2])
    result = fit.run()

    assert result.success
    assert "minuit" in repr(result)

    npred = dataset_1.npred().data.sum()
    assert_allclose(npred, 7525.790688, rtol=1e-3)
    assert_allclose(result.total_stat, 21700.253246, rtol=1e-3)

    pars = result.parameters
    assert_allclose(pars["lon_0"].value, 0.2, rtol=1e-2)
    assert_allclose(pars["lon_0"].error, 0.002244, rtol=1e-2)

    assert_allclose(pars["index"].value, 3, rtol=1e-2)
    assert_allclose(pars["index"].error, 0.024277, rtol=1e-2)

    assert_allclose(pars["amplitude"].value, 1e-11, rtol=1e-2)
    assert_allclose(pars["amplitude"].error, 4.216154e-13, rtol=1e-2)

    # background norm 1
    assert_allclose(pars[8].value, 0.5, rtol=1e-2)
    assert_allclose(pars[8].error, 0.015811, rtol=1e-2)

    # background norm 2
    assert_allclose(pars[11].value, 1, rtol=1e-2)
    assert_allclose(pars[11].error, 0.02147, rtol=1e-2)

    # test mask_safe evaluation
    mask_safe = geom.energy_mask(emin=1 * u.TeV)
    dataset_1.mask_safe = Map.from_geom(geom, data=mask_safe)
    dataset_2.mask_safe = Map.from_geom(geom, data=mask_safe)

    stat = fit.datasets.stat_sum()
    assert_allclose(stat, 14824.282955)

    # test model evaluation outside image

    dataset_1.models[0].spatial_model.lon_0.value = 150
    dataset_1.npred()
    assert not dataset_1._evaluators[dataset_1.models[0]].contributes

    region = sky_model.spatial_model.to_region()
    with mpl_plot_check():
        dataset_1.plot_residuals(region=region)
Exemple #6
0
#
# First we start with fitting a simple `~gammapy.modeling.models.PowerLawSpectralModel`.

# In[ ]:

pwl = PowerLawSpectralModel(index=2,
                            amplitude="1e-12 cm-2 s-1 TeV-1",
                            reference="1 TeV")
model = SkyModel(spectral_model=pwl)

# After creating the model we run the fit by passing the `'flux_points'` and `'model'` objects:

# In[ ]:

dataset_pwl = FluxPointsDataset(model, flux_points)
fitter = Fit([dataset_pwl])
result_pwl = fitter.run()

# And print the result:

# In[ ]:

print(result_pwl)

# In[ ]:

print(pwl)

# Finally we plot the data points and the best fit model:

# In[ ]:
Exemple #7
0
def fit(dataset):
    return Fit([dataset])
Exemple #8
0
# Define map geometry
axis = MapAxis.from_edges(np.logspace(-1.0, 1.0, 10),
                          unit="TeV",
                          name="energy")
geom = WcsGeom.create(skydir=(0, 0),
                      binsz=0.02,
                      width=(2, 2),
                      frame="galactic",
                      axes=[axis])

# Define some observation parameters
# we are not simulating many pointings / observations
pointing = SkyCoord(0.2, 0.5, unit="deg", frame="galactic")
livetime = 20 * u.hour

exposure_map = make_map_exposure_true_energy(pointing=pointing,
                                             livetime=livetime,
                                             aeff=aeff,
                                             geom=geom)

dataset = MapDataset(models=models, exposure=exposure_map)
npred = dataset.npred()

dataset.fake()

fit = Fit()
results = fit.run([dataset])

print(results)
print(models)
Exemple #9
0
def test_fov_bkg_maker_with_source_model(obs_dataset, exclusion_mask, caplog):

    test_dataset = obs_dataset.copy(name="test-fov")

    # crab model
    spatial_model = PointSpatialModel(
        lon_0="83.619deg", lat_0="22.024deg", frame="icrs"
    )
    spectral_model = PowerLawSpectralModel(
        index=2.6, amplitude="4.5906e-11 cm-2 s-1 TeV-1", reference="1 TeV"
    )
    model = SkyModel(
        spatial_model=spatial_model, spectral_model=spectral_model, name="test-source"
    )

    bkg_model = FoVBackgroundModel(dataset_name="test-fov")
    test_dataset.models = [model, bkg_model]

    # pre-fit both source and background to get reference model
    Fit().run(test_dataset)
    bkg_model_spec = test_dataset.models[f"{test_dataset.name}-bkg"].spectral_model
    norm_ref = 0.897
    assert not bkg_model_spec.norm.frozen
    assert_allclose(bkg_model_spec.norm.value, norm_ref, rtol=1e-4)
    assert_allclose(bkg_model_spec.tilt.value, 0.0, rtol=1e-4)

    # apply scale method with pre-fitted source model and no exclusion_mask
    bkg_model_spec.norm.value = 1
    fov_bkg_maker = FoVBackgroundMaker(method="scale", exclusion_mask=None)
    dataset = fov_bkg_maker.run(test_dataset)

    bkg_model_spec = test_dataset.models[f"{dataset.name}-bkg"].spectral_model
    assert_allclose(bkg_model_spec.norm.value, norm_ref, rtol=1e-4)
    assert_allclose(bkg_model_spec.tilt.value, 0.0, rtol=1e-4)

    # apply fit method with pre-fitted source model and no exlusion mask
    bkg_model_spec.norm.value = 1
    fov_bkg_maker = FoVBackgroundMaker(method="fit", exclusion_mask=None)
    dataset = fov_bkg_maker.run(test_dataset)

    bkg_model_spec = test_dataset.models[f"{dataset.name}-bkg"].spectral_model
    assert_allclose(bkg_model_spec.norm.value, norm_ref, rtol=1e-4)
    assert_allclose(bkg_model_spec.tilt.value, 0.0, rtol=1e-4)

    # apply scale method with pre-fitted source model and exclusion_mask
    bkg_model_spec.norm.value = 1
    fov_bkg_maker = FoVBackgroundMaker(method="scale", exclusion_mask=exclusion_mask)
    dataset = fov_bkg_maker.run(test_dataset)

    bkg_model_spec = test_dataset.models[f"{dataset.name}-bkg"].spectral_model
    assert_allclose(bkg_model_spec.norm.value, 0.830779, rtol=1e-4)
    assert_allclose(bkg_model_spec.tilt.value, 0.0, rtol=1e-4)

    # apply fit method with pre-fitted source model and exlusion mask
    bkg_model_spec.norm.value = 1
    fov_bkg_maker = FoVBackgroundMaker(method="fit", exclusion_mask=exclusion_mask)
    dataset = fov_bkg_maker.run(test_dataset)

    bkg_model_spec = test_dataset.models[f"{dataset.name}-bkg"].spectral_model
    assert_allclose(bkg_model_spec.norm.value, 0.830779, rtol=1e-4)
    assert_allclose(bkg_model_spec.tilt.value, 0.0, rtol=1e-4)

    # Here we check that source parameters are correctly thawed after fit.
    assert not dataset.models.parameters["index"].frozen
    assert not dataset.models.parameters["lon_0"].frozen

    # test
    model.spectral_model.amplitude.value *= 1e5
    fov_bkg_maker = FoVBackgroundMaker(method="scale")
    dataset = fov_bkg_maker.run(test_dataset)
    assert "WARNING" in [_.levelname for _ in caplog.records]
    message1 = "FoVBackgroundMaker failed. Negative residuals counts for test-fov. Setting mask to False."
    assert message1 in [_.message for _ in caplog.records]
Exemple #10
0
    def run_region(self, kr, lon, lat, radius):
        #    TODO: for now we have to read/create the allsky maps each in each job
        #    because we can't pickle <functools._lru_cache_wrapper object
        #    send this back to init when fixed

        log.info(f"ROI {kr}: loading data")

        # exposure
        exposure_hpx = Map.read(
            "$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_exposure_cube_hpx.fits.gz")
        exposure_hpx.unit = "cm2 s"

        # iem
        iem_filepath = BASE_PATH / "data" / "gll_iem_v06_extrapolated.fits"
        iem_fermi_extra = Map.read(iem_filepath)
        # norm=1.1, tilt=0.03 see paper appendix A
        model_iem = SkyModel(
            PowerLawNormSpectralModel(norm=1.1, tilt=0.03),
            TemplateSpatialModel(iem_fermi_extra, normalize=False),
            name="iem_extrapolated",
        )

        # ROI
        roi_time = time()
        ROI_pos = SkyCoord(lon, lat, frame="galactic", unit="deg")
        width = 2 * (radius + self.psf_margin)

        # Counts
        counts = Map.create(
            skydir=ROI_pos,
            width=width,
            proj="CAR",
            frame="galactic",
            binsz=1 / 8.0,
            axes=[self.energy_axis],
            dtype=float,
        )
        counts.fill_by_coord({
            "skycoord": self.events.radec,
            "energy": self.events.energy
        })

        axis = MapAxis.from_nodes(counts.geom.axes[0].center,
                                  name="energy_true",
                                  unit="GeV",
                                  interp="log")
        wcs = counts.geom.wcs
        geom = WcsGeom(wcs=wcs, npix=counts.geom.npix, axes=[axis])
        coords = geom.get_coord()
        # expo
        data = exposure_hpx.interp_by_coord(coords)
        exposure = WcsNDMap(geom, data, unit=exposure_hpx.unit, dtype=float)

        # read PSF
        psf_kernel = PSFKernel.from_table_psf(self.psf,
                                              geom,
                                              max_radius=self.psf_margin *
                                              u.deg)

        # Energy Dispersion
        e_true = exposure.geom.axes[0].edges
        e_reco = counts.geom.axes[0].edges
        edisp = EDispKernel.from_diagonal_response(e_true=e_true,
                                                   e_reco=e_reco)

        # fit mask
        if coords["lon"].min() < 90 * u.deg and coords["lon"].max(
        ) > 270 * u.deg:
            coords["lon"][coords["lon"].value > 180] -= 360 * u.deg
        mask = (
            (coords["lon"] >= coords["lon"].min() + self.psf_margin * u.deg)
            & (coords["lon"] <= coords["lon"].max() - self.psf_margin * u.deg)
            & (coords["lat"] >= coords["lat"].min() + self.psf_margin * u.deg)
            & (coords["lat"] <= coords["lat"].max() - self.psf_margin * u.deg))
        mask_fermi = WcsNDMap(counts.geom, mask)

        log.info(f"ROI {kr}: pre-computing diffuse")

        # IEM
        eval_iem = MapEvaluator(model=model_iem,
                                exposure=exposure,
                                psf=psf_kernel,
                                edisp=edisp)
        bkg_iem = eval_iem.compute_npred()

        # ISO
        eval_iso = MapEvaluator(model=self.model_iso,
                                exposure=exposure,
                                edisp=edisp)
        bkg_iso = eval_iso.compute_npred()

        # merge iem and iso, only one local normalization is fitted
        dataset_name = "3FHL_ROI_num" + str(kr)
        background_total = bkg_iem + bkg_iso
        background_model = BackgroundModel(background_total,
                                           name="bkg_iem+iso",
                                           datasets_names=[dataset_name])
        background_model.parameters["norm"].min = 0.0

        # Sources model
        in_roi = self.FHL3.positions.galactic.contained_by(wcs)
        FHL3_roi = []
        for ks in range(len(self.FHL3.table)):
            if in_roi[ks] == True:
                model = self.FHL3[ks].sky_model()
                model.spatial_model.parameters.freeze_all()  # freeze spatial
                model.spectral_model.parameters["amplitude"].min = 0.0
                if isinstance(model.spectral_model, PowerLawSpectralModel):
                    model.spectral_model.parameters["index"].min = 0.1
                    model.spectral_model.parameters["index"].max = 10.0
                else:
                    model.spectral_model.parameters["alpha"].min = 0.1
                    model.spectral_model.parameters["alpha"].max = 10.0

                FHL3_roi.append(model)
        model_total = Models([background_model] + FHL3_roi)

        # Dataset
        dataset = MapDataset(
            models=model_total,
            counts=counts,
            exposure=exposure,
            psf=psf_kernel,
            edisp=edisp,
            mask_fit=mask_fermi,
            name=dataset_name,
        )
        cat_stat = dataset.stat_sum()
        datasets = Datasets([dataset])

        log.info(f"ROI {kr}: running fit")
        fit = Fit(datasets)
        results = fit.run(**self.optimize_opts)
        print("ROI_num", str(kr), "\n", results)
        fit_stat = datasets.stat_sum()

        if results.message != "Optimization failed.":
            datasets.write(path=Path(self.resdir),
                           prefix=dataset.name,
                           overwrite=True)
            np.savez(
                self.resdir / f"3FHL_ROI_num{kr}_fit_infos.npz",
                message=results.message,
                stat=[cat_stat, fit_stat],
            )

            exec_time = time() - roi_time
            print("ROI", kr, " time (s): ", exec_time)

            log.info(f"ROI {kr}: running flux points")
            for model in FHL3_roi:
                if (self.FHL3[model.name].data["ROI_num"] == kr
                        and self.FHL3[model.name].data["Signif_Avg"] >=
                        self.sig_cut):
                    flux_points = FluxPointsEstimator(
                        e_edges=self.El_flux,
                        source=model.name,
                        n_sigma_ul=2,
                    ).run(datasets=datasets)
                    filename = self.resdir / f"{model.name}_flux_points.fits"
                    flux_points.write(filename, overwrite=True)

            exec_time = time() - roi_time - exec_time
            print("ROI", kr, " Flux points time (s): ", exec_time)
Exemple #11
0
def fit(dataset):
    return Fit([dataset], backend="minuit")
Exemple #12
0
def run_analysis_1d(target_dict, e_reco, fluxp_edges, debug):
    """Run joint spectral analysis for the selected target"""
    tag = target_dict["tag"]
    name = target_dict["name"]

    log.info(f"Running 1d analysis, {tag}")
    path_res = Path(tag + "/results/")

    ra = target_dict["ra"]
    dec = target_dict["dec"]
    on_size = target_dict["on_size"]
    e_decorr = target_dict["e_decorr"]

    target_pos = SkyCoord(ra, dec, frame="icrs")
    on_radius = Angle(on_size)

    log.info(f"Running observations selection")
    data_store = DataStore.from_dir("$GAMMAPY_DATA/hess-dl3-dr1/")
    mask = data_store.obs_table["TARGET_NAME"] == name
    obs_table = data_store.obs_table[mask]
    observations = data_store.get_observations(obs_table["OBS_ID"])

    if debug is True:
        observations = [observations[0]]

    log.info(f"Running data reduction")
    # Reflected regions background estimation
    on_region = CircleSkyRegion(center=target_pos, radius=on_radius)
    dataset_maker = SpectrumDatasetMaker(region=on_region,
                                         e_reco=e_reco,
                                         e_true=e_reco,
                                         containment_correction=True)
    bkg_maker = ReflectedRegionsBackgroundMaker()
    safe_mask_masker = SafeMaskMaker(methods=["edisp-bias"], bias_percent=10)

    datasets = []

    for observation in observations:
        dataset = dataset_maker.run(observation,
                                    selection=["counts", "aeff", "edisp"])
        dataset_on_off = bkg_maker.run(dataset, observation)
        dataset_on_off = safe_mask_masker.run(dataset_on_off, observation)
        datasets.append(dataset_on_off)

    log.info(f"Running fit ...")
    model = PowerLawSpectralModel(index=2,
                                  amplitude=2e-11 * u.Unit("cm-2 s-1 TeV-1"),
                                  reference=e_decorr)
    for dataset in datasets:
        dataset.models = SkyModel(spectral_model=model)

    fit_joint = Fit(datasets)
    result_joint = fit_joint.run()
    parameters = model.parameters
    parameters.covariance = result_joint.parameters.covariance
    log.info(f"Writing {path_res}")
    write_fit_summary(parameters,
                      str(path_res / "results-summary-fit-1d.yaml"))

    log.info(f"Running flux points estimation")
    fpe = FluxPointsEstimator(datasets=datasets, e_edges=fluxp_edges)
    flux_points = fpe.run()
    flux_points.table["is_ul"] = flux_points.table["ts"] < 4
    keys = [
        "e_ref",
        "e_min",
        "e_max",
        "dnde",
        "dnde_errp",
        "dnde_errn",
        "is_ul",
        "dnde_ul",
    ]
    log.info(f"Writing {path_res}")
    flux_points.table_formatted[keys].write(path_res / "flux-points-1d.ecsv",
                                            format="ascii.ecsv")
Exemple #13
0
def run_analysis_3d(target_dict, fluxp_edges, debug):
    """Run stacked 3D analysis for the selected target.

    Notice that, for the sake of time saving, we run a stacked analysis, as opposed
     to the joint analysis that is performed in the reference paper.
    """
    tag = target_dict["tag"]
    log.info(f"running 3d analysis, {tag}")

    path_res = Path(tag + "/results/")

    txt = Path("config_template.yaml").read_text()
    txt = txt.format_map(target_dict)
    config = AnalysisConfig.from_yaml(txt)

    log.info(f"Running observations selection")
    analysis = Analysis(config)
    analysis.get_observations()

    log.info(f"Running data reduction")
    analysis.get_datasets()

    # TODO: Improve safe mask handling in Analysis. the mask should be applied run-by-run
    maker_safe_mask = SafeMaskMaker(methods=["edisp-bias", "bkg-peak"])
    stacked = maker_safe_mask.run(analysis.datasets[0])

    log.info(f"Running fit ...")
    ra = target_dict["ra"]
    dec = target_dict["dec"]
    e_decorr = target_dict["e_decorr"]
    spectral_model = Model.create("PowerLawSpectralModel", reference=e_decorr)
    spatial_model = Model.create(target_dict["spatial_model"],
                                 lon_0=ra,
                                 lat_0=dec)
    if target_dict["spatial_model"] == "DiskSpatialModel":
        spatial_model.e.frozen = False
    sky_model = SkyModel(spatial_model=spatial_model,
                         spectral_model=spectral_model,
                         name=tag)

    stacked.models = sky_model
    stacked.background_model.norm.frozen = False
    fit = Fit([stacked])
    result = fit.run()

    parameters = stacked.models.parameters
    model_npars = len(sky_model.parameters.names)
    parameters.covariance = result.parameters.covariance[0:model_npars,
                                                         0:model_npars]
    log.info(f"Writing {path_res}")
    write_fit_summary(parameters,
                      str(path_res / "results-summary-fit-3d.yaml"))

    log.info("Running flux points estimation")
    # TODO: This is a workaround to re-optimize the bkg. Remove it once it's added to the Analysis class
    for par in stacked.parameters:
        if par is not stacked.background_model.norm:
            par.frozen = True

    reoptimize = True if debug is False else False
    fpe = FluxPointsEstimator(datasets=[stacked],
                              e_edges=fluxp_edges,
                              source=tag,
                              reoptimize=reoptimize)

    flux_points = fpe.run()
    flux_points.table["is_ul"] = flux_points.table["ts"] < 4
    keys = [
        "e_ref",
        "e_min",
        "e_max",
        "dnde",
        "dnde_errp",
        "dnde_errn",
        "is_ul",
        "dnde_ul",
    ]
    log.info(f"Writing {path_res}")
    flux_points.table_formatted[keys].write(path_res / "flux-points-3d.ecsv",
                                            format="ascii.ecsv")
Exemple #14
0
 def datasets(self, datasets):
     self._datasets = self._check_datasets(datasets)
     self.fit = Fit(datasets)
     self.fit_result = None
def data_fit(stacked):
    # Data fitting
    fit = Fit(stacked)
    result = fit.run(optimize_opts={"print_level": 1})
Exemple #16
0
def test_map_fit(sky_model, geom, geom_etrue):
    dataset_1 = get_map_dataset(geom, geom_etrue, name="test-1")
    dataset_2 = get_map_dataset(geom, geom_etrue, name="test-2")
    datasets = Datasets([dataset_1, dataset_2])

    models = Models(datasets.models)
    models.insert(0, sky_model)

    models["test-1-bkg"].spectral_model.norm.value = 0.5
    models["test-model"].spatial_model.sigma.frozen = True

    datasets.models = models
    dataset_2.counts = dataset_2.npred()
    dataset_1.counts = dataset_1.npred()

    models["test-1-bkg"].spectral_model.norm.value = 0.49
    models["test-2-bkg"].spectral_model.norm.value = 0.99

    fit = Fit(datasets)
    result = fit.run()

    assert result.success
    assert "minuit" in repr(result)

    npred = dataset_1.npred().data.sum()
    assert_allclose(npred, 7525.790688, rtol=1e-3)
    assert_allclose(result.total_stat, 21659.2139, rtol=1e-3)

    pars = result.parameters
    assert_allclose(pars["lon_0"].value, 0.2, rtol=1e-2)
    assert_allclose(pars["lon_0"].error, 0.002244, rtol=1e-2)

    assert_allclose(pars["index"].value, 3, rtol=1e-2)
    assert_allclose(pars["index"].error, 0.024277, rtol=1e-2)

    assert_allclose(pars["amplitude"].value, 1e-11, rtol=1e-2)
    assert_allclose(pars["amplitude"].error, 4.216154e-13, rtol=1e-2)

    # background norm 1
    assert_allclose(pars[8].value, 0.5, rtol=1e-2)
    assert_allclose(pars[8].error, 0.015811, rtol=1e-2)

    # background norm 2
    assert_allclose(pars[11].value, 1, rtol=1e-2)
    assert_allclose(pars[11].error, 0.02147, rtol=1e-2)

    # test mask_safe evaluation
    mask_safe = geom.energy_mask(energy_min=1 * u.TeV)
    dataset_1.mask_safe = Map.from_geom(geom, data=mask_safe)
    dataset_2.mask_safe = Map.from_geom(geom, data=mask_safe)

    stat = fit.datasets.stat_sum()
    assert_allclose(stat, 14823.579908, rtol=1e-5)

    region = sky_model.spatial_model.to_region()

    initial_counts = dataset_1.counts.copy()
    with mpl_plot_check():
        dataset_1.plot_residuals(kwargs_spectral=dict(region=region))

    # check dataset has not changed
    assert initial_counts == dataset_1.counts

    # test model evaluation outside image
    dataset_1.models[0].spatial_model.lon_0.value = 150
    dataset_1.npred()
    assert not dataset_1._evaluators["test-model"].contributes
Exemple #17
0
def test_datasets_to_io(tmp_path):
    filedata = "$GAMMAPY_DATA/tests/models/gc_example_datasets.yaml"
    filemodel = "$GAMMAPY_DATA/tests/models/gc_example_models.yaml"

    datasets = Datasets.read(filedata, filemodel)

    assert len(datasets) == 2

    dataset0 = datasets[0]
    assert dataset0.name == "gc"
    assert dataset0.counts.data.sum() == 6824
    assert_allclose(dataset0.exposure.data.sum(), 2072125400000.0, atol=0.1)
    assert dataset0.psf is not None
    assert dataset0.edisp is not None

    assert_allclose(dataset0.background_model.evaluate().data.sum(), 4094.2, atol=0.1)

    assert dataset0.background_model.name == "background_irf_gc"

    dataset1 = datasets[1]
    assert dataset1.name == "g09"
    assert dataset1.background_model.name == "background_irf_g09"

    assert (
        dataset0.models["gll_iem_v06_cutout"] == dataset1.models["gll_iem_v06_cutout"]
    )

    assert isinstance(dataset0.models, Models)
    assert len(dataset0.models) == 5
    assert dataset0.models[0].name == "gc"
    assert dataset0.models[1].name == "gll_iem_v06_cutout"

    assert (
        dataset0.models["background_irf_gc"].parameters["norm"]
        is dataset1.models["background_irf_g09"].parameters["norm"]
    )

    assert (
        dataset0.models["gc"].parameters["reference"]
        is dataset1.models["g09"].parameters["reference"]
    )
    assert_allclose(dataset1.models["g09"].parameters["lon_0"].value, 0.9, atol=0.1)

    datasets.write(tmp_path, prefix="written")
    datasets_read = Datasets.read(
        tmp_path / "written_datasets.yaml", tmp_path / "written_models.yaml"
    )

    assert len(datasets.parameters) == 21

    assert len(datasets_read) == 2
    dataset0 = datasets_read[0]
    assert dataset0.counts.data.sum() == 6824
    assert_allclose(dataset0.exposure.data.sum(), 2072125400000.0, atol=0.1)
    assert dataset0.psf is not None
    assert dataset0.edisp is not None
    assert_allclose(dataset0.background_model.evaluate().data.sum(), 4094.2, atol=0.1)

    Fit(datasets).run()
    assert_allclose(
        datasets.models["background_irf_g09"].covariance,
        datasets.models["background_irf_gc"].covariance,
    )
Exemple #18
0
# In[ ]:


pwl = PowerLawSpectralModel(
    index=2, amplitude="1e-12 cm-2 s-1 TeV-1", reference="1 TeV"
)


# After creating the model we run the fit by passing the `'flux_points'` and `'pwl'` objects:

# In[ ]:


dataset_pwl = FluxPointsDataset(pwl, flux_points, likelihood="chi2assym")
fitter = Fit(dataset_pwl)
result_pwl = fitter.run()


# And print the result:

# In[ ]:


print(result_pwl)


# In[ ]:


print(pwl)
Exemple #19
0
def data_fit(datasets):
    fit = Fit(datasets)
    result = fit.run()
Exemple #20
0
def data_fit(datasets):
    fit = Fit(datasets)
    result = fit.run(optimize_opts={"print_level": 1})
Exemple #21
0
 def test_fit_pwl_minuit(self, dataset):
     fit = Fit()
     result = fit.run(dataset)
     self.assert_result(result)
Exemple #22
0
fig, ax, cbar = npred.sum_over_axes().plot(add_cbar=True)
ax.scatter(
    [lon_0_1, lon_0_2, pointing.galactic.l.degree],
    [lat_0_1, lat_0_2, pointing.galactic.b.degree],
    transform=ax.get_transform("galactic"),
    marker="+",
    color="cyan",
)
# plt.show()
plt.clf()

dataset.fake()

dataset.counts.sum_over_axes().plot()
# plt.show()
plt.clf()

models.parameters.set_error(spatial_model_1.lon_0, 0.1 * u.deg)
models.parameters.set_error(spatial_model_1.lat_0, 0.1 * u.deg)

models.parameters.set_error(spatial_model_2.lon_0, 0.1 * u.deg)
models.parameters.set_error(spatial_model_1.lat_0, 0.1 * u.deg)

models.parameters.set_error(spectral_model_1.amplitude,
                            1e-12 * u.Unit("cm-2 s-1 TeV-1"))
models.parameters.set_error(spectral_model_2.amplitude,
                            1e-12 * u.Unit("cm-2 s-1 TeV-1"))

fit = Fit(dataset)
fit.run()
    def run(self,
            datasets,
            parameter,
            steps="all",
            null_value=1e-150,
            scan_values=None):
        """Run the parameter estimator.

        Parameters
        ----------
        datasets : `~gammapy.datasets.Datasets`
            The datasets used to estimate the model parameter
        parameter : `~gammapy.modeling.Parameter`
            the parameter to be estimated
        steps : list of str
            Which steps to execute. Available options are:
                * "err": estimate symmetric error from covariance
                * "ts": estimate delta TS with parameter null (reference) value
                * "errn-errp": estimate asymmetric errors.
                * "ul": estimate upper limits.
                * "scan": estimate fit statistic profiles.

            By default all steps are executed.
        null_value : float
            the null value to be used for delta TS estimation.
            Default is 1e-150 since 0 can be an issue for some parameters.
        scan_values : `numpy.ndarray`
            Array of parameter values to be used for the fit statistic profile.
            If set to None, scan values are automatically calculated. Default is None.

        Returns
        -------
        result : dict
            Dict with the various parameter estimation values.
        """
        self.datasets = self._check_datasets(datasets)
        self.fit = Fit(datasets)
        self.fit_result = None

        with self.datasets.parameters.restore_values:

            if not self.reoptimize:
                self._freeze_parameters(parameter)

            if steps == "all":
                steps = ["err", "ts", "errp-errn", "ul", "scan"]

            result = self._find_best_fit(parameter)
            TS1 = result["stat"]

            value_max = result[parameter.name]

            if "err" in steps:
                res = self.fit.covariance()
                value_err = res.parameters[parameter].error
                result.update({f"{parameter.name}_err": value_err})

            if "errp-errn" in steps:
                res = self.fit.confidence(parameter=parameter,
                                          sigma=self.sigma)
                result.update({
                    f"{parameter.name}_errp": res["errp"],
                    f"{parameter.name}_errn": res["errn"],
                })

            if "ul" in steps:
                res = self.fit.confidence(parameter=parameter,
                                          sigma=self.sigma_ul)
                result.update(
                    {f"{parameter.name}_ul": res["errp"] + value_max})

            if "ts" in steps:
                TS0 = self._estimate_ts_for_null_value(parameter, null_value)
                res = TS0 - TS1
                result.update({
                    "sqrt_ts": np.sqrt(res),
                    "ts": res,
                    "null_value": null_value
                })
                # TODO: should not need this
                self.fit.optimize()

            if "scan" in steps:
                if scan_values is None:
                    scan_values = self._compute_scan_values(
                        value_max, value_err, parameter.min, parameter.max)

                res = self.fit.stat_profile(parameter,
                                            values=scan_values,
                                            reoptimize=self.reoptimize)
                result.update({
                    f"{parameter.name}_scan": res["values"],
                    "stat_scan": res["stat"]
                })
        return result
Exemple #24
0
    def run_region(self, kr, lon, lat, radius):
        #    TODO: for now we have to read/create the allsky maps each in each job
        #    because we can't pickle <functools._lru_cache_wrapper object
        #    send this back to init when fixed

        # exposure
        exposure_hpx = Map.read(
            self.datadir + "/fermi_3fhl/fermi_3fhl_exposure_cube_hpx.fits.gz")
        exposure_hpx.unit = "cm2 s"

        # background iem
        infile = self.datadir + "/catalogs/fermi/gll_iem_v06.fits.gz"
        outfile = self.resdir + "/gll_iem_v06_extra.fits"
        model_iem = extrapolate_iem(infile, outfile, self.logEc_extra)

        # ROI
        roi_time = time()
        ROI_pos = SkyCoord(lon, lat, frame="galactic", unit="deg")
        width = 2 * (radius + self.psf_margin)

        # Counts
        counts = Map.create(
            skydir=ROI_pos,
            width=width,
            proj="CAR",
            coordsys="GAL",
            binsz=self.dlb,
            axes=[self.energy_axis],
            dtype=float,
        )
        counts.fill_by_coord({
            "skycoord": self.events.radec,
            "energy": self.events.energy
        })

        axis = MapAxis.from_nodes(counts.geom.axes[0].center,
                                  name="energy",
                                  unit="GeV",
                                  interp="log")
        wcs = counts.geom.wcs
        geom = WcsGeom(wcs=wcs, npix=counts.geom.npix, axes=[axis])
        coords = counts.geom.get_coord()

        # expo
        data = exposure_hpx.interp_by_coord(coords)
        exposure = WcsNDMap(geom, data, unit=exposure_hpx.unit, dtype=float)

        # read PSF
        psf_kernel = PSFKernel.from_table_psf(self.psf,
                                              counts.geom,
                                              max_radius=self.psf_margin *
                                              u.deg)

        # Energy Dispersion
        e_true = exposure.geom.axes[0].edges
        e_reco = counts.geom.axes[0].edges
        edisp = EnergyDispersion.from_diagonal_response(e_true=e_true,
                                                        e_reco=e_reco)

        # fit mask
        if coords["lon"].min() < 90 * u.deg and coords["lon"].max(
        ) > 270 * u.deg:
            coords["lon"][coords["lon"].value > 180] -= 360 * u.deg
        mask = (
            (coords["lon"] >= coords["lon"].min() + self.psf_margin * u.deg)
            & (coords["lon"] <= coords["lon"].max() - self.psf_margin * u.deg)
            & (coords["lat"] >= coords["lat"].min() + self.psf_margin * u.deg)
            & (coords["lat"] <= coords["lat"].max() - self.psf_margin * u.deg))
        mask_fermi = WcsNDMap(counts.geom, mask)

        # IEM
        eval_iem = MapEvaluator(model=model_iem,
                                exposure=exposure,
                                psf=psf_kernel,
                                edisp=edisp)
        bkg_iem = eval_iem.compute_npred()

        # ISO
        eval_iso = MapEvaluator(model=self.model_iso,
                                exposure=exposure,
                                edisp=edisp)
        bkg_iso = eval_iso.compute_npred()

        # merge iem and iso, only one local normalization is fitted
        background_total = bkg_iem + bkg_iso
        background_model = BackgroundModel(background_total)
        background_model.parameters["norm"].min = 0.0

        # Sources model
        in_roi = self.FHL3.positions.galactic.contained_by(wcs)
        FHL3_roi = []
        for ks in range(len(self.FHL3.table)):
            if in_roi[ks] == True:
                model = self.FHL3[ks].sky_model()
                model.spatial_model.parameters.freeze_all()  # freeze spatial
                model.spectral_model.parameters["amplitude"].min = 0.0
                if isinstance(model.spectral_model, PowerLawSpectralModel):
                    model.spectral_model.parameters["index"].min = 0.1
                    model.spectral_model.parameters["index"].max = 10.0
                else:
                    model.spectral_model.parameters["alpha"].min = 0.1
                    model.spectral_model.parameters["alpha"].max = 10.0

                FHL3_roi.append(model)
        model_total = SkyModels(FHL3_roi)

        # Dataset
        dataset = MapDataset(
            model=model_total,
            counts=counts,
            exposure=exposure,
            psf=psf_kernel,
            edisp=edisp,
            background_model=background_model,
            mask_fit=mask_fermi,
            name="3FHL_ROI_num" + str(kr),
        )
        cat_stat = dataset.stat_sum()

        datasets = Datasets([dataset])
        fit = Fit(datasets)
        results = fit.run(optimize_opts=self.optimize_opts)
        print("ROI_num", str(kr), "\n", results)
        fit_stat = datasets.stat_sum()

        if results.message == "Optimization failed.":
            pass
        else:
            datasets.to_yaml(path=Path(self.resdir),
                             prefix=dataset.name,
                             overwrite=True)
            np.save(
                self.resdir + "/3FHL_ROI_num" + str(kr) + "_covariance.npy",
                results.parameters.covariance,
            )
            np.savez(
                self.resdir + "/3FHL_ROI_num" + str(kr) + "_fit_infos.npz",
                message=results.message,
                stat=[cat_stat, fit_stat],
            )

            exec_time = time() - roi_time
            print("ROI", kr, " time (s): ", exec_time)

            for model in FHL3_roi:
                if (self.FHL3[model.name].data["ROI_num"] == kr
                        and self.FHL3[model.name].data["Signif_Avg"] >=
                        self.sig_cut):
                    flux_points = FluxPointsEstimator(
                        datasets=datasets,
                        e_edges=self.El_flux,
                        source=model.name,
                        sigma_ul=2.0,
                    ).run()
                    filename = self.resdir + "/" + model.name + "_flux_points.fits"
                    flux_points.write(filename, overwrite=True)

            exec_time = time() - roi_time - exec_time
            print("ROI", kr, " Flux points time (s): ", exec_time)
Exemple #25
0
 def _setup_fit(self, datasets):
     # TODO: make fit stateless and configurable
     if self._fit is None or datasets is not self._fit.datasets:
         self._fit = Fit(datasets)
Exemple #26
0
def data_fit(stacked):
    # Data fitting
    fit = Fit(optimize_opts={"print_level": 1})
    result = fit.run(datasets=stacked)
    print(result.success)
    def estimate_time_bin_flux(self, datasets, time_interval, steps="all"):
        """Estimate flux point for a single energy group.

        Parameters
        ----------
        datasets : `~gammapy.modeling.Datasets`
            the list of dataset object
        time_interval : astropy.time.Time`
            Start and stop time for each interval
        steps : list of str
            Which steps to execute. Available options are:

                * "err": estimate symmetric error.
                * "errn-errp": estimate asymmetric errors.
                * "ul": estimate upper limits.
                * "ts": estimate ts and sqrt(ts) values.
                * "norm-scan": estimate likelihood profiles.

            By default all steps are executed.

        Returns
        -------
        result : dict
            Dict with results for the flux point.
        """
        self.fit = Fit(datasets)

        result = {
            "e_ref": self.e_ref,
            "e_min": self.e_min,
            "e_max": self.e_max,
            "ref_dnde": self.ref_model(self.e_ref),
            "ref_flux": self.ref_model.integral(self.e_min, self.e_max),
            "ref_eflux": self.ref_model.energy_flux(self.e_min, self.e_max),
            "ref_e2dnde": self.ref_model(self.e_ref) * self.e_ref**2,
        }

        result.update(self.estimate_norm())

        if not result.pop("success"):
            log.warning("Fit failed for time bin between {t_min} and {t_max},"
                        " setting NaN.".format(t_min=time_interval[0].mjd,
                                               t_max=time_interval[1].mjd))

        if steps == "all":
            steps = ["err", "counts", "errp-errn", "ul", "ts", "norm-scan"]

        if "err" in steps:
            result.update(self.estimate_norm_err())

        if "counts" in steps:
            result.update(self.estimate_counts(datasets))

        if "ul" in steps:
            result.update(self.estimate_norm_ul(datasets))

        if "errp-errn" in steps:
            result.update(self.estimate_norm_errn_errp())

        if "ts" in steps:
            result.update(self.estimate_norm_ts(datasets))

        if "norm-scan" in steps:
            result.update(self.estimate_norm_scan())

        return result
Exemple #28
0
def data_fit(stacked):
    fit = Fit([stacked])
    result = fit.run(optimize_opts={"print_level": 1})
Exemple #29
0
    def __init__(self, energy_edges=[1, 10] * u.TeV, **kwargs):
        self.energy_edges = energy_edges

        fit = Fit(confidence_opts={"backend": "scipy"})
        kwargs.setdefault("fit", fit)
        super().__init__(**kwargs)
# Now we'll fit a model to the spectrum with the `Fit` class. First we load a power law model with an initial value for the index and the amplitude and then wo do a likelihood fit. The fit results are printed below.

# In[ ]:

model = PowerLawSpectralModel(index=4,
                              amplitude="1.3e-9 cm-2 s-1 TeV-1",
                              reference="0.02 TeV")

emin_fit, emax_fit = (0.04 * u.TeV, 0.4 * u.TeV)

for obs in extraction.spectrum_observations:
    obs.model = model
    obs.mask_fit = obs.counts.energy_mask(emin=emin_fit, emax=emax_fit)

joint_fit = Fit(extraction.spectrum_observations)
joint_result = joint_fit.run()

model.parameters.covariance = joint_result.parameters.covariance
print(joint_result)

# Now you might want to do the stacking here even if in our case there is only one observation which makes it superfluous.
# We can compute flux points by fitting the norm of the global model in energy bands.

# In[ ]:

e_edges = np.logspace(np.log10(0.04), np.log10(0.4), 7) * u.TeV

dataset = Datasets(extraction.spectrum_observations).stack_reduce()

dataset.model = model