Exemplo n.º 1
0
def test_absorption():
    # absorption values for given redshift
    redshift = 0.117
    absorption = Absorption.read_builtin("dominguez")

    # Spectral model corresponding to PKS 2155-304 (quiescent state)
    index = 3.53
    amplitude = 1.81 * 1e-12 * u.Unit("cm-2 s-1 TeV-1")
    reference = 1 * u.TeV
    pwl = PowerLawSpectralModel(index=index,
                                amplitude=amplitude,
                                reference=reference)

    # EBL + PWL model
    model = AbsorbedSpectralModel(spectral_model=pwl,
                                  absorption=absorption,
                                  parameter=redshift)

    desired = u.Quantity(5.140765e-13, "TeV-1 s-1 cm-2")
    assert_quantity_allclose(model(1 * u.TeV), desired, rtol=1e-3)
    assert model.alpha_norm.value == 1.0

    # EBL + PWL model: test if norm of EBL=0: it mean model =pwl
    model = AbsorbedSpectralModel(spectral_model=pwl,
                                  absorption=absorption,
                                  alpha_norm=0,
                                  parameter=redshift)
    assert_quantity_allclose(model(1 * u.TeV), pwl(1 * u.TeV), rtol=1e-3)

    # EBL + PWL model: Test with a norm different of 1
    model = AbsorbedSpectralModel(spectral_model=pwl,
                                  absorption=absorption,
                                  alpha_norm=1.5,
                                  parameter=redshift)
    desired = u.Quantity(2.739695e-13, "TeV-1 s-1 cm-2")
    assert model.alpha_norm.value == 1.5
    assert_quantity_allclose(model(1 * u.TeV), desired, rtol=1e-3)

    # Test error propagation
    model.spectral_model.parameters.set_error(
        amplitude=0.1 * model.spectral_model.amplitude.value)
    dnde, dnde_err = model.evaluate_error(1 * u.TeV)
    assert_allclose(dnde_err / dnde, 0.1)
Exemplo n.º 2
0
def fake_dataset():
    axis = MapAxis.from_energy_bounds(0.1, 10, 5, unit="TeV", name="energy")
    axis_true = MapAxis.from_energy_bounds(0.05, 20, 10, unit="TeV", name="energy_true")

    geom = WcsGeom.create(npix=50, binsz=0.02, axes=[axis])
    dataset = MapDataset.create(geom)
    dataset.psf = PSFMap.from_gauss(axis_true, sigma="0.05 deg")
    dataset.mask_safe += np.ones(dataset.data_shape, dtype=bool)
    dataset.background += 1
    dataset.exposure += 1e12 * u.cm ** 2 * u.s

    spatial_model = PointSpatialModel()
    spectral_model = PowerLawSpectralModel(amplitude="1e-10 cm-2s-1TeV-1", index=2)
    model = SkyModel(
        spatial_model=spatial_model, spectral_model=spectral_model, name="source"
    )
    dataset.models = [model]
    dataset.fake(random_state=42)
    return dataset
Exemplo n.º 3
0
def spectrum_dataset():
    name = "test"
    energy = np.logspace(-1, 1, 31) * u.TeV
    livetime = 100 * u.s

    pwl = PowerLawSpectralModel(
        index=2.1,
        amplitude="1e5 cm-2 s-1 TeV-1",
        reference="0.1 TeV",
    )

    temp_mod = ConstantTemporalModel()

    model = SkyModel(spectral_model=pwl,
                     temporal_model=temp_mod,
                     name="test-source")
    axis = MapAxis.from_edges(energy, interp="log", name="energy")
    axis_true = MapAxis.from_edges(energy, interp="log", name="energy_true")

    background = RegionNDMap.create(region="icrs;circle(0, 0, 0.1)",
                                    axes=[axis])

    models = Models([model])
    exposure = RegionNDMap.create(region="icrs;circle(0, 0, 0.1)",
                                  axes=[axis_true])
    exposure.quantity = u.Quantity("1 cm2") * livetime
    bkg_rate = np.ones(30) / u.s
    background.quantity = bkg_rate * livetime

    start = [1, 3, 5] * u.day
    stop = [2, 3.5, 6] * u.day
    t_ref = Time(55555, format="mjd")
    gti = GTI.create(start, stop, reference_time=t_ref)

    dataset = SpectrumDataset(
        models=models,
        exposure=exposure,
        background=background,
        name=name,
        gti=gti,
    )
    dataset.fake(random_state=23)
    return dataset
Exemplo n.º 4
0
def test_flux_estimator_compound_model():
    pl = PowerLawSpectralModel()
    pl.amplitude.min = 1e-15
    pl.amplitude.max = 1e-10

    pln = PowerLawNormSpectralModel()
    pln.norm.value = 0.1
    pln.norm.frozen = True
    spectral_model = pl * pln
    model = SkyModel(spectral_model=spectral_model, name="test")

    estimator = FluxEstimator(source="test",
                              selection_optional=[],
                              reoptimize=True)

    scale_model = estimator.get_scale_model(Models([model]))

    assert_allclose(scale_model.norm.min, 1e-3)
    assert_allclose(scale_model.norm.max, 1e2)
Exemplo n.º 5
0
    def test_table_properties(self, table_flux_estimate):
        model = PowerLawSpectralModel(amplitude="1e-10 cm-2s-1TeV-1", index=2)
        fe = FluxEstimate(data=table_flux_estimate, spectral_model=model)

        assert fe.dnde.unit == u.Unit("cm-2s-1TeV-1")
        assert_allclose(fe.dnde.value, [1e-9, 1e-11])
        assert_allclose(fe.dnde_err.value, [1e-10, 1e-12])
        assert_allclose(fe.dnde_errn.value, [2e-10, 2e-12])
        assert_allclose(fe.dnde_errp.value, [1.5e-10, 1.5e-12])
        assert_allclose(fe.dnde_ul.value, [2e-9, 2e-11])

        assert fe.e2dnde.unit == u.Unit("TeV cm-2s-1")
        assert_allclose(fe.e2dnde.value, [1e-10, 1e-10])

        assert fe.flux.unit == u.Unit("cm-2s-1")
        assert_allclose(fe.flux.value, [9e-10, 9e-11])

        assert fe.eflux.unit == u.Unit("TeV cm-2s-1")
        assert_allclose(fe.eflux.value, [2.302585e-10, 2.302585e-10])
Exemplo n.º 6
0
def simulate_spectrum_dataset(model, random_state=0):
    energy_edges = np.logspace(-0.5, 1.5, 21) * u.TeV
    energy_axis = MapAxis.from_edges(energy_edges, interp="log", name="energy")

    aeff = EffectiveAreaTable.from_parametrization(
        energy=energy_edges).to_region_map()
    bkg_model = SkyModel(
        spectral_model=PowerLawSpectralModel(index=2.5,
                                             amplitude="1e-12 cm-2 s-1 TeV-1"),
        name="background",
    )
    bkg_model.spectral_model.amplitude.frozen = True
    bkg_model.spectral_model.index.frozen = True

    geom = RegionGeom(region=None, axes=[energy_axis])
    acceptance = RegionNDMap.from_geom(geom=geom, data=1)
    edisp = EDispKernelMap.from_diagonal_response(
        energy_axis=energy_axis,
        energy_axis_true=energy_axis.copy(name="energy_true"),
        geom=geom,
    )

    livetime = 100 * u.h
    exposure = aeff * livetime

    mask_safe = RegionNDMap.from_geom(geom=geom, dtype=bool)
    mask_safe.data += True

    dataset = SpectrumDatasetOnOff(name="test_onoff",
                                   exposure=exposure,
                                   acceptance=acceptance,
                                   acceptance_off=5,
                                   edisp=edisp,
                                   mask_safe=mask_safe)
    dataset.models = bkg_model
    bkg_npred = dataset.npred_signal()

    dataset.models = model
    dataset.fake(
        random_state=random_state,
        npred_background=bkg_npred,
    )
    return dataset
Exemplo n.º 7
0
def test_model_plot():
    pars, errs = {}, {}
    pars["amplitude"] = 1e-12 * u.Unit("TeV-1 cm-2 s-1")
    pars["reference"] = 1 * u.Unit("TeV")
    pars["index"] = 2 * u.Unit("")
    errs["amplitude"] = 0.1e-12 * u.Unit("TeV-1 cm-2 s-1")

    pwl = PowerLawSpectralModel(**pars)
    pwl.parameters.set_parameter_errors(errs)
    with mpl_plot_check():
        pwl.plot((1 * u.TeV, 10 * u.TeV))

    with mpl_plot_check():
        pwl.plot_error((1 * u.TeV, 10 * u.TeV))
Exemplo n.º 8
0
def simulate_map_dataset(random_state=0, name=None):
    irfs = load_cta_irfs(
        "$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits"
    )

    skydir = SkyCoord("0 deg", "0 deg", frame="galactic")
    energy_edges = np.logspace(-1, 2, 15) * u.TeV
    energy_axis = MapAxis.from_edges(edges=energy_edges,
                                     name="energy",
                                     interp="log")

    geom = WcsGeom.create(skydir=skydir,
                          width=(4, 4),
                          binsz=0.1,
                          axes=[energy_axis],
                          frame="galactic")

    gauss = GaussianSpatialModel(lon_0="0 deg",
                                 lat_0="0 deg",
                                 sigma="0.4 deg",
                                 frame="galactic")
    pwl = PowerLawSpectralModel(amplitude="1e-11 cm-2 s-1 TeV-1")
    skymodel = SkyModel(spatial_model=gauss, spectral_model=pwl, name="source")

    obs = Observation.create(
        pointing=skydir,
        livetime=1 * u.h,
        irfs=irfs,
        location=EarthLocation(lon="-70d18m58.84s",
                               lat="-24d41m0.34s",
                               height="2000m"),
    )
    empty = MapDataset.create(geom, name=name)
    maker = MapDatasetMaker(
        selection=["exposure", "background", "psf", "edisp"])
    dataset = maker.run(empty, obs)

    bkg_model = FoVBackgroundModel(dataset_name=dataset.name)

    dataset.models = [bkg_model, skymodel]
    dataset.fake(random_state=random_state)
    return dataset
Exemplo n.º 9
0
def trapz_loglog(y, x, axis=-1):
    """Integrate using the composite trapezoidal rule in log-log space.

    Integrate `y` (`x`) along given axis in loglog space.

    Parameters
    ----------
    y : array_like
        Input array to integrate.
    x : array_like, optional
        Independent variable to integrate over.
    axis : int, optional
        Specify the axis.

    Returns
    -------
    trapz : float
        Definite integral as approximated by trapezoidal rule in loglog space.
    """
    from gammapy.modeling.models import PowerLawSpectralModel as pl

    # see https://stackoverflow.com/a/56840428
    x, y = np.moveaxis(x, axis, 0), np.moveaxis(y, axis, 0)

    energy_min, energy_max = x[:-1], x[1:]
    vals_energy_min, vals_energy_max = y[:-1], y[1:]

    # log scale has the build-in zero clipping
    log = LogScale()

    with np.errstate(invalid="ignore", divide="ignore"):
        index = -log(vals_energy_min / vals_energy_max) / log(energy_min / energy_max)

    index[np.isnan(index)] = np.inf

    return pl.evaluate_integral(
        energy_min=energy_min,
        energy_max=energy_max,
        index=index,
        reference=energy_min,
        amplitude=vals_energy_min,
    )
Exemplo n.º 10
0
    def table_psf_in_energy_band(self,
                                 energy_band,
                                 spectrum=None,
                                 n_bins=11,
                                 **kwargs):
        """Average PSF in a given energy band.

        Expected counts in sub energy bands given the given exposure
        and spectrum are used as weights.

        Parameters
        ----------
        energy_band : `~astropy.units.Quantity`
            Energy band
        spectrum : `~gammapy.modeling.models.SpectralModel`
            Spectral model used for weighting the PSF. Default is a power law
            with index=2.
        n_bins : int
            Number of energy points in the energy band, used to compute the
            weigthed PSF.

        Returns
        -------
        psf : `TablePSF`
            Table PSF
        """
        from gammapy.modeling.models import PowerLawSpectralModel, TemplateSpectralModel

        if spectrum is None:
            spectrum = PowerLawSpectralModel()

        exposure = TemplateSpectralModel(self.energy, self.exposure)

        e_min, e_max = energy_band
        energy = MapAxis.from_energy_bounds(e_min, e_max, n_bins).edges

        weights = spectrum(energy) * exposure(energy)
        weights /= weights.sum()

        psf_value = self.evaluate(energy=energy)
        psf_value_weighted = weights[:, np.newaxis] * psf_value
        return TablePSF(self.rad, psf_value_weighted.sum(axis=0), **kwargs)
Exemplo n.º 11
0
def test_table_properties(region_map_flux_estimate):
    model = SkyModel(PowerLawSpectralModel(amplitude="1e-10 cm-2s-1TeV-1", index=2))

    fe = FluxMaps(data=region_map_flux_estimate, reference_model=model)

    assert fe.dnde.unit == u.Unit("cm-2s-1TeV-1")
    assert_allclose(fe.dnde.data.flat, [1e-9, 1e-11])
    assert_allclose(fe.dnde_err.data.flat, [1e-10, 1e-12])
    assert_allclose(fe.dnde_errn.data.flat, [2e-10, 2e-12])
    assert_allclose(fe.dnde_errp.data.flat, [1.5e-10, 1.5e-12])
    assert_allclose(fe.dnde_ul.data.flat, [2e-9, 2e-11])

    assert fe.e2dnde.unit == u.Unit("TeV cm-2s-1")
    assert_allclose(fe.e2dnde.data.flat, [1e-10, 1e-10])

    assert fe.flux.unit == u.Unit("cm-2s-1")
    assert_allclose(fe.flux.data.flat, [9e-10, 9e-11])

    assert fe.eflux.unit == u.Unit("TeV cm-2s-1")
    assert_allclose(fe.eflux.data.flat, [2.302585e-10, 2.302585e-10])
Exemplo n.º 12
0
def test_compute_ts_map(input_dataset):
    """Minimal test of compute_ts_image"""
    spatial_model = GaussianSpatialModel(sigma="0.1 deg")
    spectral_model = PowerLawSpectralModel(index=2)
    model = SkyModel(spatial_model=spatial_model, spectral_model=spectral_model)
    ts_estimator = TSMapEstimator(
        model=model, threshold=1, kernel_width="1 deg", selection_optional=[]
    )
    result = ts_estimator.run(input_dataset)

    assert_allclose(result["ts"].data[99, 99], 1704.23, rtol=1e-2)
    assert_allclose(result["niter"].data[99, 99], 8)
    assert_allclose(result["flux"].data[99, 99], 1.02e-09, rtol=1e-2)
    assert_allclose(result["flux_err"].data[99, 99], 3.84e-11, rtol=1e-2)

    assert result["flux"].unit == u.Unit("cm-2s-1")
    assert result["flux_err"].unit == u.Unit("cm-2s-1")

    # Check mask is correctly taken into account
    assert np.isnan(result["ts"].data[30, 40])
Exemplo n.º 13
0
    def __init__(
        self,
        regions,
        e_edges=None,
        spectrum=None,
        n_sigma=1.0,
        n_sigma_ul=3.0,
        selection_optional="all",
    ):
        self.regions = regions
        self.n_sigma = n_sigma
        self.n_sigma_ul = n_sigma_ul

        self.e_edges = u.Quantity(e_edges) if e_edges is not None else None

        if spectrum is None:
            spectrum = PowerLawSpectralModel()

        self.spectrum = spectrum
        self.selection_optional = selection_optional
Exemplo n.º 14
0
def test_compute_ts_map_psf(fermi_dataset):
    spatial_model = PointSpatialModel()
    spectral_model = PowerLawSpectralModel(amplitude="1e-22 cm-2 s-1 keV-1")
    model = SkyModel(spatial_model=spatial_model, spectral_model=spectral_model)

    estimator = TSMapEstimator(model=model, kernel_width="1 deg", selection_optional="all")
    result = estimator.run(fermi_dataset)

    assert_allclose(result["ts"].data[0, 29, 29], 833.38, atol=0.1)
    assert_allclose(result["niter"].data[0, 29, 29], 7)
    assert_allclose(result["flux"].data[0, 29, 29], 1.34984e-09, rtol=1e-3)

    assert_allclose(result["flux_err"].data[0, 29, 29], 7.93751176e-11, rtol=1e-3)
    assert_allclose(result["flux_errp"].data[0, 29, 29], 7.948953e-11, rtol=1e-3)
    assert_allclose(result["flux_errn"].data[0, 29, 29], 7.508168e-11, rtol=1e-3)
    assert_allclose(result["flux_ul"].data[0, 29, 29], 1.63222157e-10, rtol=1e-3)

    assert result["flux"].unit == u.Unit("cm-2s-1")
    assert result["flux_err"].unit == u.Unit("cm-2s-1")
    assert result["flux_ul"].unit == u.Unit("cm-2s-1")
Exemplo n.º 15
0
    def spectral_model(self, which="point"):
        """Spectral model (`~gammapy.modeling.models.PowerLawSpectralModel`).

        * ``which="point"`` -- Spectral model under the point source assumption.
        * ``which="extended"`` -- Spectral model under the extended source assumption.
          Only available for some sources. Raise ValueError if not available.
        """
        idx = self._get_idx(which)

        pars, errs = {}, {}
        pars["amplitude"] = self.data[f"spec{idx}_dnde"]
        errs["amplitude"] = self.data[f"spec{idx}_dnde_err"]
        pars["index"] = -self.data[f"spec{idx}_index"]
        errs["index"] = self.data[f"spec{idx}_index_err"]
        pars["reference"] = "7 TeV"

        model = PowerLawSpectralModel(**pars)
        model.parameters.set_error(**errs)

        return model
Exemplo n.º 16
0
def test_flux_point_dataset_serialization(tmp_path):
    path = "$GAMMAPY_DATA/tests/spectrum/flux_points/diff_flux_points.fits"
    data = FluxPoints.read(path)
    data.table["e_ref"] = data.e_ref.to("TeV")
    spectral_model = PowerLawSpectralModel(index=2.3,
                                           amplitude="2e-13 cm-2 s-1 TeV-1",
                                           reference="1 TeV")
    model = SkyModel(spectral_model=spectral_model, name="test_model")
    dataset = FluxPointsDataset(model, data, name="test_dataset")

    Datasets([dataset]).write(tmp_path, prefix="tmp")
    datasets = Datasets.read(tmp_path / "tmp_datasets.yaml",
                             tmp_path / "tmp_models.yaml")
    new_dataset = datasets[0]
    assert_allclose(new_dataset.data.table["dnde"], dataset.data.table["dnde"],
                    1e-4)
    if dataset.mask_fit is None:
        assert np.all(new_dataset.mask_fit == dataset.mask_safe)
    assert np.all(new_dataset.mask_safe == dataset.mask_safe)
    assert new_dataset.name == "test_dataset"
Exemplo n.º 17
0
    def test_verify_npred(self):
        """Verifying npred is preserved during the stacking"""
        pwl = SkyModel(
            spectral_model=PowerLawSpectralModel(index=2,
                                                 amplitude=2e-11 *
                                                 u.Unit("cm-2 s-1 TeV-1"),
                                                 reference=1 * u.TeV))

        self.stacked_dataset.models = pwl

        npred_stacked = self.stacked_dataset.npred_signal().data
        npred_stacked[~self.stacked_dataset.mask_safe.data] = 0
        npred_summed = np.zeros_like(npred_stacked)

        for dataset in self.datasets:
            dataset.models = pwl
            npred_summed[dataset.mask_safe] += dataset.npred_signal().data[
                dataset.mask_safe]

        assert_allclose(npred_stacked, npred_summed, rtol=1e-6)
Exemplo n.º 18
0
    def test_map_properties(self, map_flux_estimate):
        model = PowerLawSpectralModel(amplitude="1e-10 cm-2s-1TeV-1", index=2)
        fe = FluxEstimate(data=map_flux_estimate,
                          reference_spectral_model=model)

        assert fe.dnde.unit == u.Unit("cm-2s-1TeV-1")
        assert_allclose(fe.dnde.quantity.value[:, 2, 2], [1e-9, 1e-11])
        assert_allclose(fe.dnde_err.quantity.value[:, 2, 2], [1e-10, 1e-12])
        assert_allclose(fe.dnde_errn.quantity.value[:, 2, 2], [2e-10, 2e-12])
        assert_allclose(fe.dnde_errp.quantity.value[:, 2, 2],
                        [1.5e-10, 1.5e-12])
        assert_allclose(fe.dnde_ul.quantity.value[:, 2, 2], [2e-9, 2e-11])

        assert fe.e2dnde.unit == u.Unit("TeV cm-2s-1")
        assert_allclose(fe.e2dnde.quantity.value[:, 2, 2], [1e-10, 1e-10])
        assert_allclose(fe.e2dnde_err.quantity.value[:, 2, 2], [1e-11, 1e-11])
        assert_allclose(fe.e2dnde_errn.quantity.value[:, 2, 2], [2e-11, 2e-11])
        assert_allclose(fe.e2dnde_errp.quantity.value[:, 2, 2],
                        [1.5e-11, 1.5e-11])
        assert_allclose(fe.e2dnde_ul.quantity.value[:, 2, 2], [2e-10, 2e-10])

        assert fe.flux.unit == u.Unit("cm-2s-1")
        assert_allclose(fe.flux.quantity.value[:, 2, 2], [9e-10, 9e-11])
        assert_allclose(fe.flux_err.quantity.value[:, 2, 2], [9e-11, 9e-12])
        assert_allclose(fe.flux_errn.quantity.value[:, 2, 2],
                        [1.8e-10, 1.8e-11])
        assert_allclose(fe.flux_errp.quantity.value[:, 2, 2],
                        [1.35e-10, 1.35e-11])
        assert_allclose(fe.flux_ul.quantity.value[:, 2, 2], [1.8e-9, 1.8e-10])

        assert fe.eflux.unit == u.Unit("TeV cm-2s-1")
        assert_allclose(fe.eflux.quantity.value[:, 2, 2],
                        [2.302585e-10, 2.302585e-10])
        assert_allclose(fe.eflux_err.quantity.value[:, 2, 2],
                        [2.302585e-11, 2.302585e-11])
        assert_allclose(fe.eflux_errn.quantity.value[:, 2, 2],
                        [4.60517e-11, 4.60517e-11])
        assert_allclose(fe.eflux_errp.quantity.value[:, 2, 2],
                        [3.4538775e-11, 3.4538775e-11])
        assert_allclose(fe.eflux_ul.quantity.value[:, 2, 2],
                        [4.60517e-10, 4.60517e-10])
Exemplo n.º 19
0
    def __init__(
        self,
        model=None,
        kernel_width="0.2 deg",
        downsampling_factor=None,
        n_sigma=1,
        n_sigma_ul=2,
        threshold=None,
        rtol=0.01,
        selection_optional=None,
        energy_edges=None,
        sum_over_energy_groups=True,
        n_jobs=None,
    ):
        self.kernel_width = Angle(kernel_width)

        if model is None:
            model = SkyModel(
                spectral_model=PowerLawSpectralModel(),
                spatial_model=PointSpatialModel(),
                name="ts-kernel",
            )

        self.model = model
        self.downsampling_factor = downsampling_factor
        self.n_sigma = n_sigma
        self.n_sigma_ul = n_sigma_ul
        self.threshold = threshold
        self.rtol = rtol
        self.n_jobs = n_jobs
        self.sum_over_energy_groups = sum_over_energy_groups

        self.selection_optional = selection_optional
        self.energy_edges = energy_edges
        self._flux_estimator = BrentqFluxEstimator(
            rtol=self.rtol,
            n_sigma=self.n_sigma,
            n_sigma_ul=self.n_sigma_ul,
            selection_optional=selection_optional,
            ts_threshold=threshold,
        )
Exemplo n.º 20
0
    def __init__(
        self,
        correlation_radius="0.1 deg",
        n_sigma=1,
        n_sigma_ul=2,
        selection_optional=None,
        energy_edges=None,
        correlate_off=True,
        spectral_model=None,
    ):
        self.correlation_radius = correlation_radius
        self.n_sigma = n_sigma
        self.n_sigma_ul = n_sigma_ul
        self.selection_optional = selection_optional
        self.energy_edges = energy_edges
        self.correlate_off = correlate_off

        if spectral_model is None:
            spectral_model = PowerLawSpectralModel(index=2)

        self.spectral_model = spectral_model
Exemplo n.º 21
0
    def _dnde_from_flux(flux, model, e_ref, e_min, e_max, pwl_approx):
        """Helper for `to_sed_type`.

        Compute dnde under the assumption that flux equals expected
        flux from model.
        """
        dnde_model = model(e_ref)

        if pwl_approx:
            index = model.spectral_index(e_ref)
            flux_model = PowerLawSpectralModel.evaluate_integral(
                emin=e_min,
                emax=e_max,
                index=index,
                reference=e_ref,
                amplitude=dnde_model,
            )
        else:
            flux_model = model.integral(e_min, e_max, intervals=True)

        return dnde_model * (flux / flux_model)
Exemplo n.º 22
0
def test_contributes():
    center_sky = SkyCoord(3, 4, unit="deg", frame="galactic")
    circle_sky_12 = CircleSkyRegion(center=center_sky, radius=1 * u.deg)
    axis = MapAxis.from_edges(np.logspace(-1, 1, 3), unit=u.TeV, name="energy")
    geom = WcsGeom.create(skydir=(3, 4),
                          npix=(5, 4),
                          frame="galactic",
                          axes=[axis])

    mask = geom.region_mask([circle_sky_12])
    spatial_model = GaussianSpatialModel(lon_0="0 deg",
                                         lat_0="0 deg",
                                         sigma="0.9 deg",
                                         frame="galactic")
    assert spatial_model.evaluation_region.height == 2 * spatial_model.evaluation_radius
    model4 = SkyModel(
        spatial_model=spatial_model,
        spectral_model=PowerLawSpectralModel(),
        name="source-4",
    )
    assert model4.contributes(mask, margin=0 * u.deg)
Exemplo n.º 23
0
    def setup(self):
        path = "$GAMMAPY_DATA/joint-crab/spectra/hess/"
        obs1 = SpectrumDatasetOnOff.from_ogip_files(path + "pha_obs23523.fits")
        obs2 = SpectrumDatasetOnOff.from_ogip_files(path + "pha_obs23592.fits")
        self.obs_list = [obs1, obs2]

        self.pwl = PowerLawSpectralModel(index=2,
                                         amplitude=1e-12 *
                                         u.Unit("cm-2 s-1 TeV-1"),
                                         reference=1 * u.TeV)

        self.ecpl = ExpCutoffPowerLawSpectralModel(
            index=2,
            amplitude=1e-12 * u.Unit("cm-2 s-1 TeV-1"),
            reference=1 * u.TeV,
            lambda_=0.1 / u.TeV,
        )

        # Example fit for one observation
        self.obs_list[0].model = self.pwl
        self.fit = Fit(self.obs_list[0])
Exemplo n.º 24
0
    def spectral_model(self):
        """Best fit spectral model (`~gammapy.modeling.models.SpectralModel`)."""
        spec_type = self.data["SpectrumType"].strip()

        pars, errs = {}, {}
        pars["amplitude"] = self.data["Flux_Density"]
        errs["amplitude"] = self.data["Unc_Flux_Density"]
        pars["reference"] = self.data["Pivot_Energy"]

        if spec_type == "PowerLaw":
            pars["index"] = self.data["Spectral_Index"]
            errs["index"] = self.data["Unc_Spectral_Index"]
            model = PowerLawSpectralModel(**pars)
        elif spec_type == "PLExpCutoff":
            pars["index"] = self.data["Spectral_Index"]
            pars["ecut"] = self.data["Cutoff"]
            errs["index"] = self.data["Unc_Spectral_Index"]
            errs["ecut"] = self.data["Unc_Cutoff"]
            model = ExpCutoffPowerLaw3FGLSpectralModel(**pars)
        elif spec_type == "LogParabola":
            pars["alpha"] = self.data["Spectral_Index"]
            pars["beta"] = self.data["beta"]
            errs["alpha"] = self.data["Unc_Spectral_Index"]
            errs["beta"] = self.data["Unc_beta"]
            model = LogParabolaSpectralModel(**pars)
        elif spec_type == "PLSuperExpCutoff":
            # TODO: why convert to GeV here? Remove?
            pars["reference"] = pars["reference"].to("GeV")
            pars["index_1"] = self.data["Spectral_Index"]
            pars["index_2"] = self.data["Exp_Index"]
            pars["ecut"] = self.data["Cutoff"].to("GeV")
            errs["index_1"] = self.data["Unc_Spectral_Index"]
            errs["index_2"] = self.data["Unc_Exp_Index"]
            errs["ecut"] = self.data["Unc_Cutoff"].to("GeV")
            model = SuperExpCutoffPowerLaw3FGLSpectralModel(**pars)
        else:
            raise ValueError(f"Invalid spec_type: {spec_type!r}")

        model.parameters.set_parameter_errors(errs)
        return model
Exemplo n.º 25
0
def models(backgrounds):
    spatial_model = GaussianSpatialModel(lon_0="3 deg",
                                         lat_0="4 deg",
                                         sigma="3 deg",
                                         frame="galactic")
    spectral_model = PowerLawSpectralModel(index=2,
                                           amplitude="1e-11 cm-2 s-1 TeV-1",
                                           reference="1 TeV")
    model1 = SkyModel(
        spatial_model=spatial_model,
        spectral_model=spectral_model,
        name="source-1",
    )

    model2 = model1.copy(name="source-2")
    model2.datasets_names = ["dataset-1"]
    model3 = model1.copy(name="source-3")
    model3.datasets_names = "dataset-2"
    model3.spatial_model = PointSpatialModel()
    model3.parameters.freeze_all()
    models = Models([model1, model2, model3] + backgrounds)
    return models
Exemplo n.º 26
0
def test_flux_point_dataset_serialization(tmp_path):
    path = "$GAMMAPY_DATA/tests/spectrum/flux_points/diff_flux_points.fits"
    data = FluxPoints.read(path)
    data.table["e_ref"] = data.e_ref.to("TeV")
    # TODO: remove duplicate definition this once model is redefine as skymodel
    spatial_model = ConstantSpatialModel()
    spectral_model = PowerLawSpectralModel(index=2.3,
                                           amplitude="2e-13 cm-2 s-1 TeV-1",
                                           reference="1 TeV")
    model = SkyModel(spatial_model, spectral_model, name="test_model")
    dataset = FluxPointsDataset(SkyModels([model]), data, name="test_dataset")

    Datasets([dataset]).to_yaml(tmp_path, prefix="tmp")
    datasets = Datasets.from_yaml(tmp_path / "tmp_datasets.yaml",
                                  tmp_path / "tmp_models.yaml")
    new_dataset = datasets[0]
    assert_allclose(new_dataset.data.table["dnde"], dataset.data.table["dnde"],
                    1e-4)
    if dataset.mask_fit is None:
        assert np.all(new_dataset.mask_fit == dataset.mask_safe)
    assert np.all(new_dataset.mask_safe == dataset.mask_safe)
    assert new_dataset.name == "test_dataset"
Exemplo n.º 27
0
    def __init__(
        self,
        scales=None,
        kernel=Gaussian2DKernel,
        spectrum=None,
        method="lima",
        threshold=5,
        energy_edges=None,
    ):
        if spectrum is None:
            spectrum = PowerLawSpectralModel()

        self.spectrum = spectrum

        if scales is None:
            scales = self.get_scales(n_scales=9, kernel=kernel)

        self.scales = scales
        self.kernel = kernel
        self.threshold = threshold
        self.method = method
        self.energy_edges = energy_edges
Exemplo n.º 28
0
    def __init__(
        self,
        model=None,
        kernel_width="0.2 deg",
        downsampling_factor=None,
        method="root brentq",
        error_method="covar",
        error_sigma=1,
        ul_method="covar",
        ul_sigma=2,
        threshold=None,
        rtol=0.001,
    ):
        if method not in ["root brentq", "root newton", "leastsq iter"]:
            raise ValueError(f"Not a valid method: '{method}'")

        if error_method not in ["covar", "conf"]:
            raise ValueError(f"Not a valid error method '{error_method}'")

        self.kernel_width = Angle(kernel_width)

        if model is None:
            model = SkyModel(
                spectral_model=PowerLawSpectralModel(),
                spatial_model=PointSpatialModel(),
            )

        self.model = model
        self.downsampling_factor = downsampling_factor

        self.parameters = {
            "method": method,
            "error_method": error_method,
            "error_sigma": error_sigma,
            "ul_method": ul_method,
            "ul_sigma": ul_sigma,
            "threshold": threshold,
            "rtol": rtol,
        }
Exemplo n.º 29
0
def test_significance_map_estimator_map_dataset_exposure(simple_dataset):
    simple_dataset.exposure += 1e10 * u.cm**2 * u.s
    axis = simple_dataset.exposure.geom.axes[0]
    simple_dataset.psf = PSFMap.from_gauss(axis, sigma="0.05 deg")

    model = SkyModel(
        PowerLawSpectralModel(amplitude="1e-9 cm-2 s-1 TeV-1"),
        GaussianSpatialModel(lat_0=0.0 * u.deg,
                             lon_0=0.0 * u.deg,
                             sigma=0.1 * u.deg,
                             frame="icrs"),
        name="sky_model",
    )

    simple_dataset.models = [model]
    simple_dataset.npred()

    estimator = ExcessMapEstimator(0.1 * u.deg, selection_optional="all")
    result = estimator.run(simple_dataset)

    assert_allclose(result["npred_excess"].data.sum(), 19733.602, rtol=1e-3)
    assert_allclose(result["sqrt_ts"].data[0, 10, 10], 4.217129, rtol=1e-3)
Exemplo n.º 30
0
def test_integrate_geom():
    model = GaussianSpatialModel(lon="0d",
                                 lat="0d",
                                 sigma=0.1 * u.deg,
                                 frame='icrs')
    spectral_model = PowerLawSpectralModel(amplitude="1e-11 cm-2 s-1 TeV-1")
    sky_model = SkyModel(spectral_model=spectral_model, spatial_model=model)

    center = SkyCoord("0d", "0d", frame='icrs')
    radius = 0.3 * u.deg
    square = CircleSkyRegion(center, radius)

    axis = MapAxis.from_energy_bounds("1 TeV",
                                      "10 TeV",
                                      nbin=3,
                                      name='energy_true')
    geom = RegionGeom(region=square, axes=[axis])

    integral = sky_model.integrate_geom(geom).data

    assert_allclose(integral / 1e-12, [[[5.299]], [[2.460]], [[1.142]]],
                    rtol=1e-3)