Ejemplo n.º 1
0
    def run_fit(self, optimize_opts=None):
        """Run all step for the spectrum fit."""
        fit_range = self.config["fit"].get("fit_range")
        model = self.config["fit"]["model"]

        for obs in self.extraction.spectrum_observations:
            if fit_range is not None:
                obs.mask_fit = obs.counts.energy_mask(fit_range[0],
                                                      fit_range[1])
            obs.model = model

        self.fit = Fit(self.extraction.spectrum_observations)
        self.fit_result = self.fit.run(optimize_opts=optimize_opts)

        model = self.config["fit"]["model"]
        modelname = model.__class__.__name__

        model.parameters.covariance = self.fit_result.parameters.covariance

        filename = make_path(
            self.config["outdir"]) / "fit_result_{}.yaml".format(modelname)

        self.write(filename=filename)

        obs_stacker = SpectrumDatasetOnOffStacker(
            self.extraction.spectrum_observations)
        obs_stacker.run()

        datasets_fp = obs_stacker.stacked_obs
        datasets_fp.model = model
        self.flux_point_estimator = FluxPointsEstimator(
            e_edges=self.config["fp_binning"], datasets=datasets_fp)
        fp = self.flux_point_estimator.run()
        fp.table["is_ul"] = fp.table["ts"] < 4
        self.flux_points = fp
Ejemplo n.º 2
0
def flux_point(datasets):
    e_edges = [0.3, 1, 3, 10] * u.TeV
    fpe = FluxPointsEstimator(datasets=datasets,
                              e_edges=e_edges,
                              source="gc-source")

    fpe.run()
Ejemplo n.º 3
0
def test_mask_shape():
    axis = MapAxis.from_edges([1, 3, 10],
                              unit="TeV",
                              interp="log",
                              name="energy")
    geom_1 = WcsGeom.create(binsz=1, width=3, axes=[axis])
    geom_2 = WcsGeom.create(binsz=1, width=5, axes=[axis])

    dataset_1 = MapDataset.create(geom_1)
    dataset_2 = MapDataset.create(geom_2)
    dataset_1.psf = None
    dataset_2.psf = None
    dataset_1.edisp = None
    dataset_2.edisp = None

    model = SkyModel(spectral_model=PowerLawSpectralModel(),
                     spatial_model=GaussianSpatialModel(),
                     name="source")

    dataset_1.models = model
    dataset_2.models = model

    fpe = FluxPointsEstimator(datasets=[dataset_2, dataset_1],
                              e_edges=[1, 10] * u.TeV,
                              source="source")

    fp = fpe.run()

    assert_allclose(fp.table["counts"], 0)
Ejemplo n.º 4
0
def flux_point(datasets):
    e_edges = MapAxis.from_bounds(0.7, 30, nbin=11, interp="log",
                                  unit="TeV").edges
    fpe = FluxPointsEstimator(datasets=datasets,
                              e_edges=e_edges,
                              source="gc-source")
    fpe.run()
Ejemplo n.º 5
0
def test_mask_shape():
    axis = MapAxis.from_edges([1, 3, 10], unit="TeV", interp="log", name="energy")
    geom_1 = WcsGeom.create(binsz=1, width=3, axes=[axis])
    geom_2 = WcsGeom.create(binsz=1, width=5, axes=[axis])

    dataset_1 = MapDataset.create(geom_1)
    dataset_2 = MapDataset.create(geom_2)
    dataset_1.psf = None
    dataset_2.psf = None
    dataset_1.edisp = None
    dataset_2.edisp = None

    model = SkyModel(
        spectral_model=PowerLawSpectralModel(), spatial_model=GaussianSpatialModel()
    )

    dataset_1.model = model
    dataset_2.model = model

    fpe = FluxPointsEstimator(
        datasets=[dataset_2, dataset_1], e_edges=[1, 10] * u.TeV, source="source"
    )

    with pytest.raises(ValueError):
        fpe.run()
Ejemplo n.º 6
0
def create_fpe(model):
    dataset = simulate_spectrum_dataset(model)
    e_edges = [0.1, 1, 10, 100] * u.TeV
    dataset.model = model
    return FluxPointsEstimator(datasets=[dataset],
                               e_edges=e_edges,
                               norm_n_values=11)
Ejemplo n.º 7
0
def test_no_likelihood_contribution():
    dataset = simulate_spectrum_dataset(PowerLawSpectralModel())
    dataset.model = PowerLawSpectralModel()
    dataset.mask_safe = np.zeros(dataset.data_shape, dtype=bool)

    fpe = FluxPointsEstimator([dataset], e_edges=[1, 10] * u.TeV)

    with pytest.raises(ValueError):
        fpe.run()
Ejemplo n.º 8
0
def create_fpe(model):
    model = SkyModel(spectral_model=model, name="source")
    dataset = simulate_spectrum_dataset(model)
    e_edges = [0.1, 1, 10, 100] * u.TeV
    dataset.models = model
    return FluxPointsEstimator(datasets=[dataset],
                               e_edges=e_edges,
                               norm_n_values=11,
                               source="source")
Ejemplo n.º 9
0
def fpe_map_pwl():
    dataset_1 = simulate_map_dataset()
    dataset_2 = dataset_1.copy()
    dataset_2.mask_safe = np.zeros(dataset_2.data_shape).astype(bool)

    e_edges = [0.1, 1, 10, 100] * u.TeV
    return FluxPointsEstimator(
        datasets=[dataset_1, dataset_2],
        e_edges=e_edges,
        norm_n_values=3,
        source="source",
    )
Ejemplo n.º 10
0
def fpe_map_pwl_reoptimize():
    dataset = simulate_map_dataset()
    e_edges = [1, 10] * u.TeV
    dataset.parameters["lon_0"].frozen = True
    dataset.parameters["lat_0"].frozen = True
    dataset.parameters["index"].frozen = True
    return FluxPointsEstimator(
        datasets=[dataset],
        e_edges=e_edges,
        norm_values=[1],
        reoptimize=True,
        source="source",
    )
Ejemplo n.º 11
0
def test_no_likelihood_contribution():
    dataset = simulate_spectrum_dataset(PowerLawSpectralModel())
    dataset.model = PowerLawSpectralModel()
    dataset.mask_safe = np.zeros(dataset.data_shape, dtype=bool)

    fpe = FluxPointsEstimator([dataset], e_edges=[1, 3, 10] * u.TeV)
    fp = fpe.run()

    assert np.isnan(fp.table["norm"]).all()
    assert np.isnan(fp.table["norm_err"]).all()
    assert np.isnan(fp.table["norm_ul"]).all()
    assert np.isnan(fp.table["norm_scan"]).all()
    assert_allclose(fp.table["counts"], 0)
Ejemplo n.º 12
0
    def get_flux_points(self):
        """Calculate flux points for a specific model component."""
        if not self.fit:
            raise RuntimeError("No results available from Fit.")

        fp_settings = self.config.flux_points
        log.info("Calculating flux points.")
        e_edges = self._make_energy_axis(fp_settings.energy).edges
        flux_point_estimator = FluxPointsEstimator(
            e_edges=e_edges,
            datasets=self.datasets,
            source=fp_settings.source,
            **fp_settings.params,
        )
        fp = flux_point_estimator.run()
        fp.table["is_ul"] = fp.table["ts"] < 4
        self.flux_points = FluxPointsDataset(
            data=fp, models=self.models[fp_settings.source])
        cols = ["e_ref", "ref_flux", "dnde", "dnde_ul", "dnde_err", "is_ul"]
        log.info("\n{}".format(self.flux_points.data.table[cols]))
Ejemplo n.º 13
0
    def get_flux_points(self, source="source"):
        """Calculate flux points for a specific model component.

        Parameters
        ----------
        source : string
            Name of the model component where to calculate the flux points.
        """
        if not self._validate_fp_settings():
            return False

        # TODO: add "source" to config
        log.info("Calculating flux points.")
        axis_params = self.settings["flux-points"]["fp_binning"]
        e_edges = MapAxis.from_bounds(**axis_params).edges
        flux_point_estimator = FluxPointsEstimator(
            e_edges=e_edges, datasets=self.datasets, source=source
        )
        fp = flux_point_estimator.run()
        fp.table["is_ul"] = fp.table["ts"] < 4
        model = self.model[source].spectral_model.copy()
        self.flux_points = FluxPointsDataset(data=fp, model=model)
        cols = ["e_ref", "ref_flux", "dnde", "dnde_ul", "dnde_err", "is_ul"]
        log.info("\n{}".format(self.flux_points.data.table[cols]))
Ejemplo n.º 14
0
def run_analysis_3d(target_dict):
    """Run 3D analysis for the selected target"""
    tag = target_dict["tag"]
    name = target_dict["name"]
    log.info(f"running 3d analysis, {tag}")

    path_res = Path(tag + "/results/")

    ra = target_dict["ra"]
    dec = target_dict["dec"]
    e_decorr = target_dict["e_decorr"]

    config_str = f"""
    general:
        logging:
            level: INFO
        outdir: .

    observations:
        datastore: $GAMMAPY_DATA/hess-dl3-dr1/
        filters:
            - filter_type: par_value
              value_param: {name}
              variable: TARGET_NAME

    datasets:
        dataset-type: MapDataset
        stack-datasets: true
        offset-max: 2.5 deg
        geom:
            skydir: [{ra}, {dec}]
            width: [5, 5]
            binsz: 0.02
            coordsys: CEL
            proj: TAN
            axes:
              - name: energy
                hi_bnd: 100
                lo_bnd: 0.1
                nbin: 24
                interp: log
                node_type: edges
                unit: TeV
        energy-axis-true:
            name: energy
            hi_bnd: 100
            lo_bnd: 0.1
            nbin: 72
            interp: log
            node_type: edges
            unit: TeV
    """
    print(config_str)
    config = AnalysisConfig(config_str)

    #  Observation selection
    analysis = Analysis(config)
    analysis.get_observations()

    if DEBUG is True:
        analysis.observations.list = [analysis.observations.list[0]]

    # Data reduction
    analysis.get_datasets()

    # Set runwise energy threshold. See reference paper, section 5.1.1.
    for dataset in analysis.datasets:
        # energy threshold given by the 10% edisp criterium
        e_thr_bias = dataset.edisp.get_bias_energy(0.1)

        # energy at which the background peaks
        background_model = dataset.background_model
        bkg_spectrum = background_model.map.get_spectrum()
        peak = bkg_spectrum.data.max()
        idx = list(bkg_spectrum.data).index(peak)
        e_thr_bkg = bkg_spectrum.energy.center[idx]

        esafe = max(e_thr_bias, e_thr_bkg)
        dataset.mask_fit = dataset.counts.geom.energy_mask(emin=esafe)

    # Model fitting
    spatial_model = target_dict["spatial_model"]
    model_config = f"""
    components:
        - name: {tag}
          type: SkyModel
          spatial:
            type: {spatial_model}
            frame: icrs
            parameters:
            - name: lon_0
              value: {ra}
              unit: deg
            - name: lat_0 
              value: {dec}    
              unit: deg
          spectral:
            type: PowerLawSpectralModel
            parameters:
            - name: amplitude      
              value: 1.0e-12
              unit: cm-2 s-1 TeV-1
            - name: index
              value: 2.0
              unit: ''
            - name: reference
              value: {e_decorr}
              unit: TeV
              frozen: true
    """
    model_npars = 5
    if spatial_model == "DiskSpatialModel":
        model_config = yaml.load(model_config)
        parameters = model_config["components"][0]["spatial"]["parameters"]
        parameters.append(
            {
                "name": "r_0",
                "value": 0.2,
                "unit": "deg",
                "frozen": False
            }
        )
        parameters.append(
            {
                "name": "e",
                "value": 0.8,
                "unit": "",
                "frozen": False
            }
        )
        parameters.append(
            {
                "name": "phi",
                "value": 150,
                "unit": "deg",
                "frozen": False
            }
        )
        parameters.append(
            {
                "name": "edge",
                "value": 0.01,
                "unit": "deg",
                "frozen": True
            }
        )
        model_npars += 4
    analysis.set_model(model=model_config)

    for dataset in analysis.datasets:
        dataset.background_model.norm.frozen = False

    analysis.run_fit()

    parameters = analysis.model.parameters
    parameters.covariance = analysis.fit_result.parameters.covariance[0:model_npars, 0:model_npars]
    write_fit_summary(parameters, str(path_res / "results-summary-fit-3d.yaml"))

    # Flux points
    # TODO: This is a workaround to re-optimize the bkg in each energy bin. Add has to be added to the Analysis class
    datasets = analysis.datasets.copy()
    for dataset in datasets:
        for par in dataset.parameters:
            if par is not dataset.background_model.norm:
                par.frozen = True

    reoptimize = True if DEBUG is False else False
    fpe = FluxPointsEstimator(
        datasets=datasets, e_edges=FLUXP_EDGES, source=tag, reoptimize=reoptimize
    )

    flux_points = fpe.run()
    flux_points.table["is_ul"] = flux_points.table["ts"] < 4
    keys = ["e_ref", "e_min", "e_max", "dnde", "dnde_errp", "dnde_errn"]
    flux_points.table_formatted[keys].write(
        path_res / "flux-points-3d.ecsv", format="ascii.ecsv"
    )
Ejemplo n.º 15
0
def run_analysis_1d(target_dict):
    """Run spectral analysis for the selected target"""
    tag = target_dict["tag"]
    name = target_dict["name"]

    log.info(f"running 1d analysis, {tag}")
    path_res = Path(tag + "/results/")

    ra = target_dict["ra"]
    dec = target_dict["dec"]
    on_size = target_dict["on_size"]
    e_decorr = target_dict["e_decorr"]

    target_pos = SkyCoord(ra, dec, unit="deg", frame="icrs")
    on_radius = Angle(on_size * u.deg)
    containment_corr = True

    # Observations selection
    data_store = DataStore.from_dir("$GAMMAPY_DATA/hess-dl3-dr1/")
    mask = data_store.obs_table["TARGET_NAME"] == name
    obs_table = data_store.obs_table[mask]
    observations = data_store.get_observations(obs_table["OBS_ID"])

    if DEBUG is True:
        observations = [observations[0]]

    # Reflected regions background estimation
    on_region = CircleSkyRegion(center=target_pos, radius=on_radius)
    dataset_maker = SpectrumDatasetMaker(
        region=on_region,
        e_reco=E_RECO,
        e_true=E_RECO,
        containment_correction=containment_corr,
    )
    bkg_maker = ReflectedRegionsBackgroundMaker()
    safe_mask_masker = SafeMaskMaker(methods=["edisp-bias"], bias_percent=10)

    datasets = []

    for observation in observations:
        dataset = dataset_maker.run(observation, selection=["counts", "aeff", "edisp"])
        dataset_on_off = bkg_maker.run(dataset, observation)
        dataset_on_off = safe_mask_masker.run(dataset_on_off, observation)
        datasets.append(dataset_on_off)

    # Fit spectrum
    model = PowerLawSpectralModel(
        index=2, amplitude=2e-11 * u.Unit("cm-2 s-1 TeV-1"), reference=e_decorr * u.TeV
    )

    for dataset in datasets:
        dataset.model = model

    fit_joint = Fit(datasets)
    result_joint = fit_joint.run()

    parameters = model.parameters
    parameters.covariance = result_joint.parameters.covariance
    write_fit_summary(parameters, str(path_res / "results-summary-fit-1d.yaml"))

    # Flux points
    fpe = FluxPointsEstimator(datasets=datasets, e_edges=FLUXP_EDGES)
    flux_points = fpe.run()
    flux_points.table["is_ul"] = flux_points.table["ts"] < 4
    keys = ["e_ref", "e_min", "e_max", "dnde", "dnde_errp", "dnde_errn", "is_ul"]
    flux_points.table_formatted[keys].write(
        path_res / "flux-points-1d.ecsv", format="ascii.ecsv"
    )
Ejemplo n.º 16
0
crab_spec = datasets[0].models["Crab Nebula"].spectral_model
crab_spec.parameters.covariance = results_joint.parameters.get_subcovariance(
    crab_spec.parameters)
print(crab_spec)

# We can compute flux points for Fermi-LAT and HESS datasets in order plot them together with the HAWC flux point.

# In[ ]:

# compute Fermi-LAT and HESS flux points
e_edges = MapAxis.from_bounds(0.01, 2.0, nbin=6, interp="log",
                              unit="TeV").edges

flux_points_fermi = FluxPointsEstimator(datasets=[dataset_fermi],
                                        e_edges=e_edges,
                                        source="Crab Nebula").run()

e_edges = MapAxis.from_bounds(1, 15, nbin=6, interp="log", unit="TeV").edges
flux_points_hess = FluxPointsEstimator(datasets=[dataset_hess],
                                       e_edges=e_edges).run()

# Now, Let's plot the Crab spectrum fitted and the flux points of each instrument.
#

# In[ ]:

# display spectrum and flux points
energy_range = [0.01, 120] * u.TeV
plt.figure(figsize=(8, 6))
ax = crab_spec.plot(energy_range=energy_range, energy_power=2, label="Model")
Ejemplo n.º 17
0

# Flux points are computed on stacked observation
stacked_obs = Datasets(extract.spectrum_observations).stack_reduce()

print(stacked_obs)


# In[ ]:


e_edges = np.logspace(0, 1.5, 5) * u.TeV

stacked_obs.model = model

fpe = FluxPointsEstimator(datasets=[dataset], e_edges=e_edges)
flux_points = fpe.run()
flux_points.table_formatted


# ### Plot
# 
# Let's plot the spectral model and points. You could do it directly, but there is a helper class.
# Note that a spectral uncertainty band, a "butterfly" is drawn, but it is very thin, i.e. barely visible.

# In[ ]:


model.parameters.covariance = result.parameters.covariance
flux_points_dataset = FluxPointsDataset(data=flux_points, model=model)
Ejemplo n.º 18
0
    def run_region(self, kr, lon, lat, radius):
        #    TODO: for now we have to read/create the allsky maps each in each job
        #    because we can't pickle <functools._lru_cache_wrapper object
        #    send this back to init when fixed

        # exposure
        exposure_hpx = Map.read(
            self.datadir + "/fermi_3fhl/fermi_3fhl_exposure_cube_hpx.fits.gz")
        exposure_hpx.unit = "cm2 s"

        # background iem
        infile = self.datadir + "/catalogs/fermi/gll_iem_v06.fits.gz"
        outfile = self.resdir + "/gll_iem_v06_extra.fits"
        model_iem = extrapolate_iem(infile, outfile, self.logEc_extra)

        # ROI
        roi_time = time()
        ROI_pos = SkyCoord(lon, lat, frame="galactic", unit="deg")
        width = 2 * (radius + self.psf_margin)

        # Counts
        counts = Map.create(
            skydir=ROI_pos,
            width=width,
            proj="CAR",
            coordsys="GAL",
            binsz=self.dlb,
            axes=[self.energy_axis],
            dtype=float,
        )
        counts.fill_by_coord({
            "skycoord": self.events.radec,
            "energy": self.events.energy
        })

        axis = MapAxis.from_nodes(counts.geom.axes[0].center,
                                  name="energy",
                                  unit="GeV",
                                  interp="log")
        wcs = counts.geom.wcs
        geom = WcsGeom(wcs=wcs, npix=counts.geom.npix, axes=[axis])
        coords = counts.geom.get_coord()

        # expo
        data = exposure_hpx.interp_by_coord(coords)
        exposure = WcsNDMap(geom, data, unit=exposure_hpx.unit, dtype=float)

        # read PSF
        psf_kernel = PSFKernel.from_table_psf(self.psf,
                                              counts.geom,
                                              max_radius=self.psf_margin *
                                              u.deg)

        # Energy Dispersion
        e_true = exposure.geom.axes[0].edges
        e_reco = counts.geom.axes[0].edges
        edisp = EnergyDispersion.from_diagonal_response(e_true=e_true,
                                                        e_reco=e_reco)

        # fit mask
        if coords["lon"].min() < 90 * u.deg and coords["lon"].max(
        ) > 270 * u.deg:
            coords["lon"][coords["lon"].value > 180] -= 360 * u.deg
        mask = (
            (coords["lon"] >= coords["lon"].min() + self.psf_margin * u.deg)
            & (coords["lon"] <= coords["lon"].max() - self.psf_margin * u.deg)
            & (coords["lat"] >= coords["lat"].min() + self.psf_margin * u.deg)
            & (coords["lat"] <= coords["lat"].max() - self.psf_margin * u.deg))
        mask_fermi = WcsNDMap(counts.geom, mask)

        # IEM
        eval_iem = MapEvaluator(model=model_iem,
                                exposure=exposure,
                                psf=psf_kernel,
                                edisp=edisp)
        bkg_iem = eval_iem.compute_npred()

        # ISO
        eval_iso = MapEvaluator(model=self.model_iso,
                                exposure=exposure,
                                edisp=edisp)
        bkg_iso = eval_iso.compute_npred()

        # merge iem and iso, only one local normalization is fitted
        background_total = bkg_iem + bkg_iso
        background_model = BackgroundModel(background_total)
        background_model.parameters["norm"].min = 0.0

        # Sources model
        in_roi = self.FHL3.positions.galactic.contained_by(wcs)
        FHL3_roi = []
        for ks in range(len(self.FHL3.table)):
            if in_roi[ks] == True:
                model = self.FHL3[ks].sky_model()
                model.spatial_model.parameters.freeze_all()  # freeze spatial
                model.spectral_model.parameters["amplitude"].min = 0.0
                if isinstance(model.spectral_model, PowerLawSpectralModel):
                    model.spectral_model.parameters["index"].min = 0.1
                    model.spectral_model.parameters["index"].max = 10.0
                else:
                    model.spectral_model.parameters["alpha"].min = 0.1
                    model.spectral_model.parameters["alpha"].max = 10.0

                FHL3_roi.append(model)
        model_total = SkyModels(FHL3_roi)

        # Dataset
        dataset = MapDataset(
            model=model_total,
            counts=counts,
            exposure=exposure,
            psf=psf_kernel,
            edisp=edisp,
            background_model=background_model,
            mask_fit=mask_fermi,
            name="3FHL_ROI_num" + str(kr),
        )
        cat_stat = dataset.stat_sum()

        datasets = Datasets([dataset])
        fit = Fit(datasets)
        results = fit.run(optimize_opts=self.optimize_opts)
        print("ROI_num", str(kr), "\n", results)
        fit_stat = datasets.stat_sum()

        if results.message == "Optimization failed.":
            pass
        else:
            datasets.to_yaml(path=Path(self.resdir),
                             prefix=dataset.name,
                             overwrite=True)
            np.save(
                self.resdir + "/3FHL_ROI_num" + str(kr) + "_covariance.npy",
                results.parameters.covariance,
            )
            np.savez(
                self.resdir + "/3FHL_ROI_num" + str(kr) + "_fit_infos.npz",
                message=results.message,
                stat=[cat_stat, fit_stat],
            )

            exec_time = time() - roi_time
            print("ROI", kr, " time (s): ", exec_time)

            for model in FHL3_roi:
                if (self.FHL3[model.name].data["ROI_num"] == kr
                        and self.FHL3[model.name].data["Signif_Avg"] >=
                        self.sig_cut):
                    flux_points = FluxPointsEstimator(
                        datasets=datasets,
                        e_edges=self.El_flux,
                        source=model.name,
                        sigma_ul=2.0,
                    ).run()
                    filename = self.resdir + "/" + model.name + "_flux_points.fits"
                    flux_points.write(filename, overwrite=True)

            exec_time = time() - roi_time - exec_time
            print("ROI", kr, " Flux points time (s): ", exec_time)
Ejemplo n.º 19
0
                                                         ax=ax_1)
residual2.sum_over_axes().smooth(width=0.05 * u.deg).plot(cmap="coolwarm",
                                                          vmin=-1,
                                                          vmax=1,
                                                          add_cbar=True,
                                                          ax=ax_2)

# ## Computing Flux Points
#
# Finally we compute flux points for the galactic center source. For this we first define an energy binning:

# In[ ]:

e_edges = [0.3, 1, 3, 10] * u.TeV
fpe = FluxPointsEstimator(datasets=[dataset_combined],
                          e_edges=e_edges,
                          source="gc-source")

# In[ ]:

get_ipython().run_cell_magic('time', '', 'flux_points = fpe.run()')

# Now let's plot the best fit model and flux points:

# In[ ]:

flux_points.table["is_ul"] = flux_points.table["ts"] < 4
ax = flux_points.plot(energy_power=2)
model_ecpl.spectral_model.plot(ax=ax,
                               energy_range=energy_range,
                               energy_power=2)
Ejemplo n.º 20
0
def flux_point(stacked):
    e_edges = MapAxis.from_bounds(0.7, 30, nbin=11, interp="log",
                                  unit="TeV").edges
    fpe = FluxPointsEstimator(datasets=[stacked], e_edges=e_edges)
    fpe.run()
Ejemplo n.º 21
0
covar = result.parameters.get_subcovariance(spec.parameters)
spec.parameters.covariance = covar

# Now we can actually do the plot using the `plot_error` method:

# In[ ]:

energy_range = [1, 10] * u.TeV
spec.plot(energy_range=energy_range, energy_power=2)
ax = spec.plot_error(energy_range=energy_range, energy_power=2)

# ### Computing flux points
#
# We can now compute some flux points using the `~gammapy.spectrum.FluxPointsEstimator`.
#
# Besides the list of datasets to use, we must provide it the energy intervals on which to compute flux points as well as the model component name.

# In[ ]:

e_edges = [1, 2, 4, 10] * u.TeV
fpe = FluxPointsEstimator(datasets=[stacked], e_edges=e_edges, source="crab")

# In[ ]:

get_ipython().run_cell_magic('time', '', 'flux_points = fpe.run()')

# In[ ]:

ax = spec.plot_error(energy_range=energy_range, energy_power=2)
flux_points.plot(ax=ax, energy_power=2)
Ejemplo n.º 22
0
# Finally, let's compute spectral points. The method used is to first choose an energy binning, and then to do a 1-dim likelihood fit / profile to compute the flux and flux error.

# In[ ]:

# Flux points are computed on stacked observation
stacked_dataset = Datasets(datasets).stack_reduce()

print(stacked_dataset)

# In[ ]:

e_edges = np.logspace(0, 1.5, 5) * u.TeV

stacked_dataset.model = model

fpe = FluxPointsEstimator(datasets=[stacked_dataset], e_edges=e_edges)
flux_points = fpe.run()
flux_points.table_formatted

# ### Plot
#
# Let's plot the spectral model and points. You could do it directly, but there is a helper class.
# Note that a spectral uncertainty band, a "butterfly" is drawn, but it is very thin, i.e. barely visible.

# In[ ]:

model.spectral_model.parameters.covariance = result.parameters.covariance
flux_points_dataset = FluxPointsDataset(data=flux_points, models=model)

# In[ ]:
Ejemplo n.º 23
0
ax_spectrum.set_ylim(0, 25)

# ## Compute Flux Points
#
# To round up our analysis we can compute flux points by fitting the norm of the global model in energy bands. We'll use a fixed energy binning for now:

# In[ ]:

e_min, e_max = 0.7, 30
e_edges = np.logspace(np.log10(e_min), np.log10(e_max), 11) * u.TeV

# Now we create an instance of the `FluxPointsEstimator`, by passing the dataset and the energy binning:

# In[ ]:

fpe = FluxPointsEstimator(datasets=datasets_joint, e_edges=e_edges)
flux_points = fpe.run()

# Here is a the table of the resulting flux points:

# In[ ]:

flux_points.table_formatted

# Now we plot the flux points and their likelihood profiles. For the plotting of upper limits we choose a threshold of TS < 4.

# In[ ]:

plt.figure(figsize=(8, 5))
flux_points.table["is_ul"] = flux_points.table["ts"] < 4
ax = flux_points.plot(energy_power=2,
Ejemplo n.º 24
0
def run_analysis_3d(target_dict, fluxp_edges, debug):
    """Run stacked 3D analysis for the selected target.

    Notice that, for the sake of time saving, we run a stacked analysis, as opposed
     to the joint analysis that is performed in the reference paper.
    """
    tag = target_dict["tag"]
    log.info(f"running 3d analysis, {tag}")

    path_res = Path(tag + "/results/")

    txt = Path("config_template.yaml").read_text()
    txt = txt.format_map(target_dict)
    config = AnalysisConfig.from_yaml(txt)

    log.info(f"Running observations selection")
    analysis = Analysis(config)
    analysis.get_observations()

    log.info(f"Running data reduction")
    analysis.get_datasets()

    # TODO: Improve safe mask handling in Analysis. the mask should be applied run-by-run
    maker_safe_mask = SafeMaskMaker(methods=["edisp-bias", "bkg-peak"])
    stacked = maker_safe_mask.run(analysis.datasets[0])

    log.info(f"Running fit ...")
    ra = target_dict["ra"]
    dec = target_dict["dec"]
    e_decorr = target_dict["e_decorr"]
    spectral_model = Model.create("PowerLawSpectralModel", reference=e_decorr)
    spatial_model = Model.create(target_dict["spatial_model"],
                                 lon_0=ra,
                                 lat_0=dec)
    if target_dict["spatial_model"] == "DiskSpatialModel":
        spatial_model.e.frozen = False
    sky_model = SkyModel(spatial_model=spatial_model,
                         spectral_model=spectral_model,
                         name=tag)

    stacked.models = sky_model
    stacked.background_model.norm.frozen = False
    fit = Fit([stacked])
    result = fit.run()

    parameters = stacked.models.parameters
    model_npars = len(sky_model.parameters.names)
    parameters.covariance = result.parameters.covariance[0:model_npars,
                                                         0:model_npars]
    log.info(f"Writing {path_res}")
    write_fit_summary(parameters,
                      str(path_res / "results-summary-fit-3d.yaml"))

    log.info("Running flux points estimation")
    # TODO: This is a workaround to re-optimize the bkg. Remove it once it's added to the Analysis class
    for par in stacked.parameters:
        if par is not stacked.background_model.norm:
            par.frozen = True

    reoptimize = True if debug is False else False
    fpe = FluxPointsEstimator(datasets=[stacked],
                              e_edges=fluxp_edges,
                              source=tag,
                              reoptimize=reoptimize)

    flux_points = fpe.run()
    flux_points.table["is_ul"] = flux_points.table["ts"] < 4
    keys = [
        "e_ref",
        "e_min",
        "e_max",
        "dnde",
        "dnde_errp",
        "dnde_errn",
        "is_ul",
        "dnde_ul",
    ]
    log.info(f"Writing {path_res}")
    flux_points.table_formatted[keys].write(path_res / "flux-points-3d.ecsv",
                                            format="ascii.ecsv")