Ejemplo n.º 1
0
 def integrate_geom(geom):
     """Evaluate model."""
     return Map.from_geom(geom=geom, data=1)
Ejemplo n.º 2
0
def test_significance_map_estimator_map_dataset_on_off_with_correlation(
    simple_dataset_on_off, ):
    exposure = simple_dataset_on_off.exposure
    exposure.data += 1e6

    # First without exposure
    simple_dataset_on_off.exposure = None

    estimator = ExcessMapEstimator(0.11 * u.deg,
                                   energy_edges=[0.1, 1, 10] * u.TeV,
                                   correlate_off=True)
    result = estimator.run(simple_dataset_on_off)

    assert result["counts"].data.shape == (2, 20, 20)
    assert_allclose(result["counts"].data[:, 10, 10], 194)
    assert_allclose(result["excess"].data[:, 10, 10], 97)
    assert_allclose(result["background"].data[:, 10, 10], 97)
    assert_allclose(result["sqrt_ts"].data[:, 10, 10], 5.741116, atol=1e-5)
    assert_allclose(result["flux"].data[:, 10, 10], np.nan)

    # Test with exposure
    simple_dataset_on_off.exposure = exposure
    estimator_image = ExcessMapEstimator(0.11 * u.deg,
                                         energy_edges=[0.1, 1] * u.TeV,
                                         correlate_off=True)

    result_image = estimator_image.run(simple_dataset_on_off)
    assert result_image["counts"].data.shape == (1, 20, 20)
    assert_allclose(result_image["counts"].data[0, 10, 10], 194)
    assert_allclose(result_image["excess"].data[0, 10, 10], 97)
    assert_allclose(result_image["background"].data[0, 10, 10], 97)
    assert_allclose(result_image["sqrt_ts"].data[0, 10, 10],
                    5.741116,
                    atol=1e-3)
    assert_allclose(result_image["flux"].data[:, 10, 10], 9.7e-9, atol=1e-5)

    # Test with mask fit
    mask_fit = Map.from_geom(
        simple_dataset_on_off._geom,
        data=np.ones(simple_dataset_on_off.counts.data.shape, dtype=bool),
    )
    mask_fit.data[:, :, 10] = False
    mask_fit.data[:, 10, :] = False
    simple_dataset_on_off.mask_fit = mask_fit

    estimator_image = ExcessMapEstimator(0.11 * u.deg,
                                         apply_mask_fit=True,
                                         correlate_off=True)

    result_image = estimator_image.run(simple_dataset_on_off)
    assert result_image["counts"].data.shape == (1, 20, 20)

    assert_allclose(result_image["sqrt_ts"].data[0, 10, 10], np.nan, atol=1e-3)
    assert_allclose(result_image["counts"].data[0, 10, 10], np.nan)
    assert_allclose(result_image["excess"].data[0, 10, 10], np.nan)
    assert_allclose(result_image["background"].data[0, 10, 10], np.nan)
    assert_allclose(result_image["sqrt_ts"].data[0, 9, 9], 7.186745, atol=1e-3)
    assert_allclose(result_image["counts"].data[0, 9, 9], 304)
    assert_allclose(result_image["excess"].data[0, 9, 9], 152)
    assert_allclose(result_image["background"].data[0, 9, 9], 152)

    assert result_image["flux"].unit == u.Unit("cm-2s-1")
    assert_allclose(result_image["flux"].data[0, 9, 9], 1.52e-8, rtol=1e-3)

    simple_dataset_on_off.psf = None

    # TODO: this has never worked...
    model = SkyModel(
        PowerLawSpectralModel(amplitude="1e-9 cm-2 s-1TeV-1"),
        GaussianSpatialModel(lat_0=0.0 * u.deg,
                             lon_0=0.0 * u.deg,
                             sigma=0.1 * u.deg,
                             frame="icrs"),
        name="sky_model",
    )

    simple_dataset_on_off.models = [model]

    estimator_mod = ExcessMapEstimator(0.11 * u.deg,
                                       apply_mask_fit=False,
                                       correlate_off=True)
    result_mod = estimator_mod.run(simple_dataset_on_off)
    assert result_mod["counts"].data.shape == (1, 20, 20)

    assert_allclose(result_mod["sqrt_ts"].data[0, 10, 10], 8.899278, atol=1e-3)

    assert_allclose(result_mod["counts"].data[0, 10, 10], 388)
    assert_allclose(result_mod["excess"].data[0, 10, 10], 190.68057)
    assert_allclose(result_mod["background"].data[0, 10, 10], 197.31943)

    assert result_mod["flux"].unit == "cm-2s-1"
    assert_allclose(result_mod["flux"].data[0, 10, 10],
                    1.906806e-08,
                    rtol=1e-3)
    assert_allclose(result_mod["flux"].data.sum(), 5.920642e-06, rtol=1e-3)

    spectral_model = PowerLawSpectralModel(index=15)
    estimator_mod = ExcessMapEstimator(
        0.11 * u.deg,
        apply_mask_fit=False,
        correlate_off=True,
        spectral_model=spectral_model,
    )
    result_mod = estimator_mod.run(simple_dataset_on_off)

    assert result_mod["flux"].unit == "cm-2s-1"
    assert_allclose(result_mod["flux"].data.sum(), 5.920642e-06, rtol=1e-3)

    reco_exposure = estimate_exposure_reco_energy(
        simple_dataset_on_off, spectral_model=spectral_model)
    assert_allclose(reco_exposure.data.sum(), 7.977796e+12, rtol=0.001)
Ejemplo n.º 3
0
plt.title("Phasogram with angular cut of {}".format(on_radius))

# ## Phase-resolved map

# Now that the phases are computed, we want to do a phase-resolved sky map : a map of the ON-phase events minus alpha times the OFF-phase events. Alpha is the ratio between the size of the ON-phase zone (here 0.1) and the OFF-phase zone (0.3).
# It's a map of the excess events in phase, which are the pulsed events.

# In[ ]:

geom = WcsGeom.create(binsz=0.02 * u.deg, skydir=pos_target, width="5 deg")

#  Let's create an ON-map and an OFF-map:

# In[ ]:

on_map = Map.from_geom(geom)
off_map = Map.from_geom(geom)

events_vela_on = events_vela.select_parameter("PHASE", on_phase_range)
events_vela_off = events_vela.select_parameter("PHASE", off_phase_range)

# In[ ]:

fill_map_counts(on_map, events_vela_on)
fill_map_counts(off_map, events_vela_off)

# Defining alpha as the ratio of the ON and OFF phase zones
alpha = (on_phase_range[1] - on_phase_range[0]) / (off_phase_range[1] -
                                                   off_phase_range[0])

# Create and fill excess map
Ejemplo n.º 4
0
    def estimate_excess_map(self, dataset):
        """Estimate excess and ts maps for single dataset.

        If exposure is defined, a flux map is also computed.

        Parameters
        ----------
        dataset : `MapDataset`
            Map dataset
        """

        pixel_size = np.mean(np.abs(dataset.counts.geom.wcs.wcs.cdelt))
        size = self.correlation_radius.deg / pixel_size
        kernel = Tophat2DKernel(size)

        geom = dataset.counts.geom

        if self.apply_mask_fit:
            mask = dataset.mask
        elif dataset.mask_safe:
            mask = dataset.mask_safe
        else:
            mask = np.ones(dataset.data_shape, dtype=bool)

        counts_stat = convolved_map_dataset_counts_statistics(dataset, kernel, mask)

        n_on = Map.from_geom(geom, data=counts_stat.n_on)
        bkg = Map.from_geom(geom, data=counts_stat.n_on - counts_stat.n_sig)
        excess = Map.from_geom(geom, data=counts_stat.n_sig)

        result = {"counts": n_on, "background": bkg, "excess": excess}

        tsmap = Map.from_geom(geom, data=counts_stat.ts)
        sqrt_ts = Map.from_geom(geom, data=counts_stat.sqrt_ts)
        result.update({"ts": tsmap, "sqrt_ts": sqrt_ts})

        err = Map.from_geom(geom, data=counts_stat.error * self.n_sigma)
        result.update({"err": err})

        if dataset.exposure:
            reco_exposure = estimate_exposure_reco_energy(dataset)
            with np.errstate(invalid="ignore", divide="ignore"):
                flux = excess / reco_exposure
            flux.quantity = flux.quantity.to("1 / (cm2 s)")
        else:
            flux = Map.from_geom(
                geom=dataset.counts.geom, data=np.nan * np.ones(dataset.data_shape)
            )
        result.update({"flux": flux})

        if "errn-errp" in self.selection_optional:
            errn = Map.from_geom(geom, data=counts_stat.compute_errn(self.n_sigma))
            errp = Map.from_geom(geom, data=counts_stat.compute_errp(self.n_sigma))
            result.update({"errn": errn, "errp": errp})

        if "ul" in self.selection_optional:
            ul = Map.from_geom(
                geom, data=counts_stat.compute_upper_limit(self.n_sigma_ul)
            )
            result.update({"ul": ul})

        # return nan values outside mask
        for key in result:
            result[key].data[~mask] = np.nan

        return result
Ejemplo n.º 5
0
    def integrate_geom(self, geom, oversampling_factor=None):
        """Integrate model on `~gammapy.maps.Geom` or `~gammapy.maps.RegionGeom`.

        Integration is performed by simple rectangle approximation, the pixel center model value
        is multiplied by the pixel solid angle.
        An oversampling factor can be used for precision. By default, this parameter is set to None
        and an oversampling factor is automatically estimated based on the model estimation maximal
        bin width.

        For a RegionGeom, the model is integrated on a tangent WCS projection in the region.

        Parameters
        ----------
        geom : `~gammapy.maps.WcsGeom` or `~gammapy.maps.RegionGeom`
            The geom on which the integration is performed
        oversampling_factor : int or None
            The oversampling factor to use for integration.
            Default is None: the factor is estimated from the model minimimal bin size

        Returns
        ---------
        `~gammapy.maps.Map` or `gammapy.maps.RegionNDMap`, containing
                the integral value in each spatial bin.
        """
        wcs_geom = geom
        mask = None

        if geom.is_region:
            wcs_geom = geom.to_wcs_geom().to_image()

        result = Map.from_geom(geom=wcs_geom)  #, unit='1/sr')

        pix_scale = np.max(wcs_geom.pixel_scales.to_value("deg"))
        if oversampling_factor is None:
            if self.evaluation_bin_size_min is not None:
                res_scale = self.evaluation_bin_size_min.to_value("deg")
                oversampling_factor = int(np.ceil(pix_scale / res_scale))
            else:
                oversampling_factor = 1

        if oversampling_factor > 1:
            if self.evaluation_radius is not None:
                # Is it still needed?
                width = 2 * np.maximum(self.evaluation_radius.to_value("deg"),
                                       pix_scale)
                wcs_geom = wcs_geom.cutout(self.position, width)

            upsampled_geom = wcs_geom.upsample(oversampling_factor)

            # assume the upsampled solid angles are approximately factor**2 smaller
            values = self.evaluate_geom(
                upsampled_geom) / oversampling_factor**2
            upsampled = Map.from_geom(upsampled_geom, unit=values.unit)
            upsampled += values

            if geom.is_region:
                mask = geom.contains(upsampled_geom.get_coord()).astype('int')

            integrated = upsampled.downsample(oversampling_factor,
                                              preserve_counts=True,
                                              weights=mask)

            # Finally stack result
            result.unit = integrated.unit
            result.stack(integrated)
        else:
            values = self.evaluate_geom(wcs_geom)
            result.unit = values.unit
            result += values

        result *= result.geom.solid_angle()

        if geom.is_region:
            mask = result.geom.region_mask([geom.region])
            result = Map.from_geom(geom,
                                   data=np.sum(result.data[mask]),
                                   unit=result.unit)
        return result
Ejemplo n.º 6
0
    def run_region(self, kr, lon, lat, radius):
        #    TODO: for now we have to read/create the allsky maps each in each job
        #    because we can't pickle <functools._lru_cache_wrapper object
        #    send this back to init when fixed

        # exposure
        exposure_hpx = Map.read(
            "$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_exposure_cube_hpx.fits.gz"
        )
        exposure_hpx.unit = "cm2 s"

        # iem
        iem_fermi_extra = Map.read("data/gll_iem_v06_extrapolated.fits")
        # norm=1.1, tilt=0.03 see paper appendix A
        model_iem = SkyDiffuseCube(
            iem_fermi_extra, norm=1.1, tilt=0.03, name="iem_extrapolated"
        )

        # ROI
        roi_time = time()
        ROI_pos = SkyCoord(lon, lat, frame="galactic", unit="deg")
        width = 2 * (radius + self.psf_margin)

        # Counts
        counts = Map.create(
            skydir=ROI_pos,
            width=width,
            proj="CAR",
            coordsys="GAL",
            binsz=1 / 8.0,
            axes=[self.energy_axis],
            dtype=float,
        )
        counts.fill_by_coord(
            {"skycoord": self.events.radec, "energy": self.events.energy}
        )

        axis = MapAxis.from_nodes(
            counts.geom.axes[0].center, name="energy", unit="GeV", interp="log"
        )
        wcs = counts.geom.wcs
        geom = WcsGeom(wcs=wcs, npix=counts.geom.npix, axes=[axis])
        coords = counts.geom.get_coord()

        # expo
        data = exposure_hpx.interp_by_coord(coords)
        exposure = WcsNDMap(geom, data, unit=exposure_hpx.unit, dtype=float)

        # read PSF
        psf_kernel = PSFKernel.from_table_psf(
            self.psf, counts.geom, max_radius=self.psf_margin * u.deg
        )

        # Energy Dispersion
        e_true = exposure.geom.axes[0].edges
        e_reco = counts.geom.axes[0].edges
        edisp = EnergyDispersion.from_diagonal_response(e_true=e_true, e_reco=e_reco)

        # fit mask
        if coords["lon"].min() < 90 * u.deg and coords["lon"].max() > 270 * u.deg:
            coords["lon"][coords["lon"].value > 180] -= 360 * u.deg
        mask = (
            (coords["lon"] >= coords["lon"].min() + self.psf_margin * u.deg)
            & (coords["lon"] <= coords["lon"].max() - self.psf_margin * u.deg)
            & (coords["lat"] >= coords["lat"].min() + self.psf_margin * u.deg)
            & (coords["lat"] <= coords["lat"].max() - self.psf_margin * u.deg)
        )
        mask_fermi = WcsNDMap(counts.geom, mask)

        # IEM
        eval_iem = MapEvaluator(
            model=model_iem, exposure=exposure, psf=psf_kernel, edisp=edisp
        )
        bkg_iem = eval_iem.compute_npred()

        # ISO
        eval_iso = MapEvaluator(model=self.model_iso, exposure=exposure, edisp=edisp)
        bkg_iso = eval_iso.compute_npred()

        # merge iem and iso, only one local normalization is fitted
        background_total = bkg_iem + bkg_iso
        background_model = BackgroundModel(background_total)
        background_model.parameters["norm"].min = 0.0

        # Sources model
        in_roi = self.FHL3.positions.galactic.contained_by(wcs)
        FHL3_roi = []
        for ks in range(len(self.FHL3.table)):
            if in_roi[ks] == True:
                model = self.FHL3[ks].sky_model()
                model.spatial_model.parameters.freeze_all()  # freeze spatial
                model.spectral_model.parameters["amplitude"].min = 0.0
                if isinstance(model.spectral_model, PowerLawSpectralModel):
                    model.spectral_model.parameters["index"].min = 0.1
                    model.spectral_model.parameters["index"].max = 10.0
                else:
                    model.spectral_model.parameters["alpha"].min = 0.1
                    model.spectral_model.parameters["alpha"].max = 10.0

                FHL3_roi.append(model)
        model_total = SkyModels(FHL3_roi)

        # Dataset
        dataset = MapDataset(
            models=model_total,
            counts=counts,
            exposure=exposure,
            psf=psf_kernel,
            edisp=edisp,
            background_model=background_model,
            mask_fit=mask_fermi,
            name="3FHL_ROI_num" + str(kr),
        )
        cat_stat = dataset.stat_sum()

        datasets = Datasets([dataset])
        fit = Fit(datasets)
        results = fit.run(optimize_opts=self.optimize_opts)
        print("ROI_num", str(kr), "\n", results)
        fit_stat = datasets.stat_sum()

        if results.message == "Optimization failed.":
            pass
        else:
            datasets.write(path=Path(self.resdir), prefix=dataset.name, overwrite=True)
            np.save(
                self.resdir / f"3FHL_ROI_num{kr}_covariance.npy",
                results.parameters.covariance,
            )
            np.savez(
                self.resdir / f"3FHL_ROI_num{kr}_fit_infos.npz",
                message=results.message,
                stat=[cat_stat, fit_stat],
            )

            exec_time = time() - roi_time
            print("ROI", kr, " time (s): ", exec_time)

            for model in FHL3_roi:
                if (
                    self.FHL3[model.name].data["ROI_num"] == kr
                    and self.FHL3[model.name].data["Signif_Avg"] >= self.sig_cut
                ):
                    flux_points = FluxPointsEstimator(
                        datasets=datasets,
                        e_edges=self.El_flux,
                        source=model.name,
                        sigma_ul=2.0,
                    ).run()
                    filename = self.resdir / f"{model.name}_flux_points.fits"
                    flux_points.write(filename, overwrite=True)

            exec_time = time() - roi_time - exec_time
            print("ROI", kr, " Flux points time (s): ", exec_time)
Ejemplo n.º 7
0
    def test_no_peak(self):
        image = Map.create(npix=(10, 5))
        image.data[3, 5] = 12

        table = find_peaks(image, threshold=12.1)
        assert len(table) == 0
Ejemplo n.º 8
0
import numpy as np

# In[ ]:

from astropy.io import fits
from astropy.coordinates import Angle, SkyCoord
from gammapy.maps import Map
from gammapy.detect import CWTKernels, CWT, CWTData

# ## CWT Algorithm

# First of all we import the data which should be analysied.

# In[ ]:

counts = Map.read("$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-counts.fits.gz")

background = Map.read(
    "$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-background.fits.gz")

maps = {"counts": counts, "background": background}

# In[ ]:

fig = plt.figure(figsize=(15, 3))

ax = fig.add_subplot(121, projection=maps["counts"].geom.wcs)
maps["counts"].plot(vmax=8, ax=ax)

ax = fig.add_subplot(122, projection=maps["background"].geom.wcs)
maps["background"].plot(vmax=8, ax=ax)
Ejemplo n.º 9
0
def test_map_repr(map_type, unit):
    m = Map.create(binsz=0.1, width=10.0, map_type=map_type, unit=unit)
    assert m.__class__.__name__ in repr(m)
Ejemplo n.º 10
0
    def create(
        cls,
        geom,
        geom_irf=None,
        migra_axis=None,
        rad_axis=None,
        reference_time="2000-01-01",
        name="",
        **kwargs
    ):
        """Creates a MapDataset object with zero filled maps

        Parameters
        ----------
        geom: `~gammapy.maps.WcsGeom`
            Reference target geometry in reco energy, used for counts and background maps
        geom_irf: `~gammapy.maps.WcsGeom`
            Reference image geometry in true energy, used for IRF maps.
        migra_axis: `~gammapy.maps.MapAxis`
            Migration axis for the energy dispersion map
        rad_axis: `~gammapy.maps.MapAxis`
            Rad axis for the psf map
        name : str
            Name of the dataset.
        """
        geom_irf = geom_irf or geom.to_binsz(BINSZ_IRF)
        migra_axis = migra_axis or MIGRA_AXIS_DEFAULT
        rad_axis = rad_axis or RAD_AXIS_DEFAULT

        counts = Map.from_geom(geom, unit="")

        background = Map.from_geom(geom, unit="")
        background_model = BackgroundModel(background)

        energy_axis = geom_irf.get_axis_by_name("ENERGY")

        exposure_geom = geom.to_image().to_cube([energy_axis])
        exposure = Map.from_geom(exposure_geom, unit="m2 s")
        exposure_irf = Map.from_geom(geom_irf, unit="m2 s")

        mask_safe = np.zeros(geom.data_shape, dtype=bool)

        gti = GTI.create([] * u.s, [] * u.s, reference_time=reference_time)

        geom_migra = geom_irf.to_image().to_cube([migra_axis, energy_axis])
        edisp_map = Map.from_geom(geom_migra, unit="")
        loc = migra_axis.edges.searchsorted(1.0)
        edisp_map.data[:, loc, :, :] = 1.0
        edisp = EDispMap(edisp_map, exposure_irf)

        geom_rad = geom_irf.to_image().to_cube([rad_axis, energy_axis])
        psf_map = Map.from_geom(geom_rad, unit="sr-1")
        psf = PSFMap(psf_map, exposure_irf)

        return cls(
            counts=counts,
            exposure=exposure,
            psf=psf,
            edisp=edisp,
            background_model=background_model,
            gti=gti,
            mask_safe=mask_safe,
            name=name,
            **kwargs
        )
Ejemplo n.º 11
0
exposure = maps["exposure"].sum_over_axes()
exposure.smooth(width=0.1 * u.deg).plot(stretch="sqrt", add_cbar=True)

# We can also compute an excess image just with  a few lines of code:

# In[ ]:

excess = counts - background
excess.smooth(5).plot(stretch="sqrt", add_cbar=True)

# For a more realistic excess plot we can also take into account the diffuse galactic emission. For this tutorial we will load a Fermi diffuse model map that represents a small cutout for the Galactic center region:

# In[ ]:

diffuse_gal = Map.read("$GAMMAPY_DATA/fermi-3fhl-gc/gll_iem_v06_gc.fits.gz")

# In[ ]:

print("Diffuse image: ", diffuse_gal.geom)
print("counts: ", maps["counts"].geom)

# We see that the geometry of the images is completely different, so we need to apply our geometric configuration to the diffuse emission file:

# In[ ]:

coord = maps["counts"].geom.get_coord()

data = diffuse_gal.interp_by_coord(
    {
        "skycoord":
Ejemplo n.º 12
0
def test_wcsndmap_read_ccube():
    counts = Map.read(
        "$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-counts-cube.fits.gz")
    energy_axis = counts.geom.get_axis_by_name("energy")
    # for the 3FGL data the lower energy threshold should be at 10 GeV
    assert_allclose(energy_axis.edges.min().to_value("GeV"), 10, rtol=1e-3)
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from gammapy.image.models.core import SkyEllipse
from gammapy.maps import Map, WcsGeom

model = SkyEllipse("2 deg", "2 deg", "1 deg", 0.8, "30 deg", frame="galactic")

m_geom = WcsGeom.create(binsz=0.01,
                        width=(3, 3),
                        skydir=(2, 2),
                        coordsys="GAL",
                        proj="AIT")
coords = m_geom.get_coord()
lon = coords.lon * u.deg
lat = coords.lat * u.deg
vals = model(lon, lat)
skymap = Map.from_geom(m_geom, data=vals.value)

_, ax, _ = skymap.smooth("0.05 deg").plot()

transform = ax.get_transform('galactic')
ax.scatter(2, 2, transform=transform, s=20, edgecolor='red', facecolor='red')
ax.text(1.7, 1.85, r"$(l_0, b_0)$", transform=transform, ha="center")
ax.plot([2, 2 + np.sin(np.pi / 6)], [2, 2 + np.cos(np.pi / 6)],
        color="r",
        transform=transform)
ax.vlines(x=2, color='r', linestyle='--', transform=transform, ymin=0, ymax=5)
ax.text(2.15, 2.3, r"$\theta$", transform=transform)

plt.show()
Ejemplo n.º 14
0
 def read(cls, *args, **kwargs):
     """Read kernel Map from file."""
     psf_kernel_map = Map.read(*args, **kwargs)
     return cls(psf_kernel_map)
Ejemplo n.º 15
0
"""Make 3FHL example files."""
from astropy.coordinates import SkyCoord, Angle
from gammapy.maps import Map

m = Map.read('gll_iem_v06.fits')
m2 = m.cutout(
    SkyCoord(0, 0, unit='deg', frame='galactic'),
    (Angle('6 deg'), Angle('11 deg')),
)
print(m2.geom)
m2.write('gll_iem_v06_cutout.fits', overwrite=True)
Ejemplo n.º 16
0
def test_map_arithmetics(map_type):

    m1 = Map.create(binsz=0.1,
                    width=1.0,
                    map_type=map_type,
                    skydir=(0, 0),
                    unit="m2")

    m2 = Map.create(binsz=0.1,
                    width=1.0,
                    map_type=map_type,
                    skydir=(0, 0),
                    unit="m2")
    m2.data += 1.0

    # addition
    m1 += 1 * u.cm**2
    assert m1.unit == u.Unit("m2")
    assert_allclose(m1.data, 1e-4)

    m3 = m1 + m2
    assert m3.unit == u.Unit("m2")
    assert_allclose(m3.data, 1.0001)

    # substraction
    m3 -= 1 * u.cm**2
    assert m3.unit == u.Unit("m2")
    assert_allclose(m3.data, 1.0)

    m3 = m2 - m1
    assert m3.unit == u.Unit("m2")
    assert_allclose(m3.data, 0.9999)

    m4 = Map.create(binsz=0.1,
                    width=1.0,
                    map_type=map_type,
                    skydir=(0, 0),
                    unit="s")
    m4.data += 1.0

    # multiplication
    m1 *= 1e4
    assert m1.unit == u.Unit("m2")
    assert_allclose(m1.data, 1)

    m5 = m2 * m4
    assert m5.unit == u.Unit("m2s")
    assert_allclose(m5.data, 1)

    # division
    m5 /= 10 * u.s
    assert m5.unit == u.Unit("m2")
    assert_allclose(m5.data, 0.1)

    # check unit consistency
    with pytest.raises(u.UnitConversionError):
        m5 += 1 * u.W

    m1.data *= 0.0
    m1.unit = ""
    m1 += 4
    assert m1.unit == u.Unit("")
    assert_allclose(m1.data, 4)
Ejemplo n.º 17
0
def main():

    import sys
    import argparse

    # Argument defintion
    usage = "usage: %(prog)s [options]" 
    description = "Plot a WCS-based image"

    parser = argparse.ArgumentParser(usage,description=__abstract__)

    parser.add_argument("-i", "--input",type=argparse.FileType('r'),required=True,
                        help="Input file")

    parser.add_argument("-e", "--extension", type=str, default=None,
                        help="FITS HDU with map")
 
    parser.add_argument("--ebin",type=str,default=None,
                        help="Energy bin, integer or 'ALL'")

    parser.add_argument("--zscale",type=str, default='log',
                        help="Scaling for color scale")

    parser.add_argument("--zmin",type=float, default=None,
                        help="Minimum z-axis value")
    
    parser.add_argument("--zmax",type=float, default=None,
                        help="Maximum z-axis value")
    
    parser.add_argument("-o", "--output",type=argparse.FileType('w'),
                        help="Output file.  Leave blank for interactive.")
    
    

    # Parse the command line
    args = parser.parse_args(sys.argv[1:])

    # Get the map 
    themap = Map.read(args.input.name)

    outdata = []

    if args.ebin == "ALL":
        for i,data in enumerate(counts):
            ip =  ImagePlotter(themap)
            fig = plt.figure(i)
            im,ax = ip.plot(zscale=args.zscale, vmin=args.zmin, vmax=args.zmax)
            outdata.append(fig)

    elif args.ebin is None:
        ip =  ImagePlotter(themap.sum_over_axes())
        im,ax = ip.plot(zscale=args.zscale, vmin=args.zmin, vmax=args.zmax)
        outdata.append((im,ax))        
    else:
        try:
            ibin = int(args.ebin)
            ip =  ImagePlotter(themap)
            im,ax = ip.plot(zscale=args.zscale, vmin=args.zmin, vmax=args.zmax)
            outdata.append((im,ax))        
        except:
            raise ValueError("--ebin argument must be an integer or 'ALL'")


    if args.output is None:
        plt.show()
    else:
        if len(outdata) == 1:
            plt.savefig(args.output.name)
        else:
            base,ext = os.path.splitext(args.output.name)
            for i, fig in enumerate(outdata):
                fig.savefig("%s_%02i%s"%(base,i,ext))
Ejemplo n.º 18
0
def make_all_models():
    """Make an instance of each model, for testing."""
    yield Model.create("ConstantSpatialModel", "spatial")
    map_constantmodel = Map.create(npix=(10, 20), unit="sr-1")
    yield Model.create("TemplateSpatialModel", "spatial", map=map_constantmodel)
    yield Model.create(
        "DiskSpatialModel", "spatial", lon_0="1 deg", lat_0="2 deg", r_0="3 deg"
    )
    yield Model.create("gauss", "spatial", lon_0="1 deg", lat_0="2 deg", sigma="3 deg")
    yield Model.create("PointSpatialModel", "spatial", lon_0="1 deg", lat_0="2 deg")
    yield Model.create(
        "ShellSpatialModel",
        "spatial",
        lon_0="1 deg",
        lat_0="2 deg",
        radius="3 deg",
        width="4 deg",
    )
    yield Model.create("ConstantSpectralModel", "spectral", const="99 cm-2 s-1 TeV-1")
    yield Model.create(
        "CompoundSpectralModel",
        "spectral",
        model1=Model.create("PowerLawSpectralModel", "spectral"),
        model2=Model.create("PowerLawSpectralModel", "spectral"),
        operator=np.add,
    )
    yield Model.create("PowerLawSpectralModel", "spectral")
    yield Model.create("PowerLawNormSpectralModel", "spectral")
    yield Model.create("PowerLaw2SpectralModel", "spectral")
    yield Model.create("ExpCutoffPowerLawSpectralModel", "spectral")
    yield Model.create("ExpCutoffPowerLawNormSpectralModel", "spectral")
    yield Model.create("ExpCutoffPowerLaw3FGLSpectralModel", "spectral")
    yield Model.create("SuperExpCutoffPowerLaw3FGLSpectralModel", "spectral")
    yield Model.create("SuperExpCutoffPowerLaw4FGLDR3SpectralModel", "spectral")
    yield Model.create("SuperExpCutoffPowerLaw4FGLSpectralModel", "spectral")
    yield Model.create("LogParabolaSpectralModel", "spectral")
    yield Model.create("LogParabolaNormSpectralModel", "spectral")
    yield Model.create(
        "TemplateSpectralModel", "spectral", energy=[1, 2] * u.cm, values=[3, 4] * u.cm
    )  # TODO: add unit validation?
    yield Model.create(
        "PiecewiseNormSpectralModel",
        "spectral",
        energy=[1, 2] * u.cm,
        norms=[3, 4] * u.cm,
    )
    yield Model.create("GaussianSpectralModel", "spectral")
    # TODO: yield Model.create("EBLAbsorptionNormSpectralModel")
    # TODO: yield Model.create("NaimaSpectralModel")
    # TODO: yield Model.create("ScaleSpectralModel")
    yield Model.create("ConstantTemporalModel", "temporal")
    yield Model.create("LinearTemporalModel", "temporal")
    yield Model.create("PowerLawTemporalModel", "temporal")
    yield Model.create("SineTemporalModel", "temporal")
    yield Model.create("LightCurveTemplateTemporalModel", "temporal", Table())
    yield Model.create(
        "SkyModel",
        spatial_model=Model.create("ConstantSpatialModel", "spatial"),
        spectral_model=Model.create("PowerLawSpectralModel", "spectral"),
    )
    m1 = Map.create(
        npix=(10, 20, 30), axes=[MapAxis.from_nodes([1, 2] * u.TeV, name="energy")]
    )
    yield Model.create("TemplateSpatialModel", "spatial", map=m1)
    m2 = Map.create(
        npix=(10, 20, 30), axes=[MapAxis.from_edges([1, 2] * u.TeV, name="energy")]
    )
    yield Model.create("TemplateNPredModel", map=m2)
Ejemplo n.º 19
0
table_psf_2d = table_psf.table_psf_in_energy_band((emin, emax),
                                                  spectrum=spectrum)

# PSF kernel used for the model convolution
psf_kernel = PSFKernel.from_table_psf(table_psf_2d,
                                      geom2d,
                                      max_radius="0.3 deg")

# Now, the analysis proceeds as usual. Just take care to use the proper geometry in this case.

# ## Define a mask

# In[ ]:

mask = Map.from_geom(geom2d)

region = CircleSkyRegion(center=src_pos, radius=0.6 * u.deg)
mask.data = mask.geom.region_mask([region])

# ## Modeling the source
#
# This is the important thing to note in this analysis. Since modelling and fitting in `gammapy.maps` needs to have a combination of spectral models, we have to use a dummy Powerlaw as for the spectral model and fix its index to 2. Since we are interested only in the integral flux, we will use the `PowerLaw2` model which directly fits an integral flux.

# In[ ]:

spatial_model = SkyPointSource(lon_0="0.01 deg", lat_0="0.01 deg")
spectral_model = PowerLaw2(emin=emin,
                           emax=emax,
                           index=2.0,
                           amplitude="3e-12 cm-2 s-1")
Ejemplo n.º 20
0
    def estimate_flux_map(self, dataset):
        """Estimate flux and ts maps for single dataset

        Parameters
        ----------
        dataset : `MapDataset`
            Map dataset
        """
        # First create 2D map arrays
        counts = dataset.counts
        background = dataset.npred()

        exposure = estimate_exposure_reco_energy(dataset,
                                                 self.model.spectral_model)

        kernel = self.estimate_kernel(dataset)

        mask = self.estimate_mask_default(dataset, kernel.data)

        flux = self.estimate_flux_default(dataset,
                                          kernel.data,
                                          exposure=exposure)

        energy_axis = counts.geom.axes["energy"]
        flux_ref = self.model.spectral_model.integral(energy_axis.edges[0],
                                                      energy_axis.edges[-1])
        exposure_npred = (exposure * flux_ref).quantity.to_value("")

        wrap = functools.partial(
            _ts_value,
            counts=counts.data.astype(float),
            exposure=exposure_npred.astype(float),
            background=background.data.astype(float),
            kernel=kernel.data,
            norm=(flux.quantity / flux_ref).to_value(""),
            flux_estimator=self._flux_estimator,
        )

        x, y = np.where(np.squeeze(mask.data))
        positions = list(zip(x, y))

        if self.n_jobs is None:
            results = list(map(wrap, positions))
        else:
            with contextlib.closing(Pool(processes=self.n_jobs)) as pool:
                log.info("Using {} jobs to compute TS map.".format(
                    self.n_jobs))
                results = pool.map(wrap, positions)

            pool.join()

        result = {}

        j, i = zip(*positions)

        geom = counts.geom.squash(axis_name="energy")

        for name in self.selection_all:
            unit = 1 / exposure.unit if "flux" in name else ""
            m = Map.from_geom(geom=geom, data=np.nan, unit=unit)
            m.data[0, j,
                   i] = [_[name.replace("flux", "norm")] for _ in results]
            if "flux" in name:
                m.data *= flux_ref.to_value(m.unit)
                m.quantity = m.quantity.to("1 / (cm2 s)")
            result[name] = m

        return result
Ejemplo n.º 21
0
    def test_constant(self):
        image = Map.create(npix=(10, 5))

        table = find_peaks(image, threshold=3)
        assert len(table) == 0
Ejemplo n.º 22
0
    def run(self, dataset):
        """
        Run TS map estimation.

        Requires a MapDataset with counts, exposure and background_model
        properly set to run.

        Parameters
        ----------
        dataset : `~gammapy.datasets.MapDataset`
            Input MapDataset.

        Returns
        -------
        maps : dict
             Dictionary containing result maps. Keys are:

                * ts : delta TS map
                * sqrt_ts : sqrt(delta TS), or significance map
                * flux : flux map
                * flux_err : symmetric error map
                * flux_ul : upper limit map

        """
        dataset_models = dataset.models
        if self.downsampling_factor:
            shape = dataset.counts.geom.to_image().data_shape
            pad_width = symmetric_crop_pad_width(shape, shape_2N(shape))[0]
            dataset = dataset.pad(pad_width).downsample(
                self.downsampling_factor)

        # TODO: add support for joint likelihood fitting to TSMapEstimator
        datasets = Datasets(dataset)

        if self.energy_edges is None:
            energy_axis = dataset.counts.geom.axes["energy"]
            energy_edges = u.Quantity(
                [energy_axis.edges[0], energy_axis.edges[-1]])
        else:
            energy_edges = self.energy_edges

        results = []

        for energy_min, energy_max in zip(energy_edges[:-1], energy_edges[1:]):
            sliced_dataset = datasets.slice_by_energy(energy_min,
                                                      energy_max)[0]

            if self.sum_over_energy_groups:
                sliced_dataset = sliced_dataset.to_image()

            sliced_dataset.models = dataset_models
            result = self.estimate_flux_map(sliced_dataset)
            results.append(result)

        result_all = {}

        for name in self.selection_all:
            map_all = Map.from_images(images=[_[name] for _ in results])

            if self.downsampling_factor:
                order = 0 if name == "niter" else 1
                map_all = map_all.upsample(factor=self.downsampling_factor,
                                           preserve_counts=False,
                                           order=order)
                map_all = map_all.crop(crop_width=pad_width)

            result_all[name] = map_all

        result_all["sqrt_ts"] = self.estimate_sqrt_ts(result_all["ts"],
                                                      result_all["flux"])
        return result_all
Ejemplo n.º 23
0
def test_significance_map_estimator_map_dataset_on_off(simple_dataset_on_off):
    estimator = ExcessMapEstimator(
        0.11 * u.deg,
        selection_optional=None,
        e_edges=[0.1 * u.TeV, 1 * u.TeV, 10 * u.TeV],
    )
    result = estimator.run(simple_dataset_on_off)

    assert result["counts"].data.shape == (2, 20, 20)
    assert_allclose(result["counts"].data[:, 10, 10], 194)
    assert_allclose(result["excess"].data[:, 10, 10], 97)
    assert_allclose(result["background"].data[:, 10, 10], 97)
    assert_allclose(result["significance"].data[:, 10, 10], 5.741116, atol=1e-5)

    estimator_image = ExcessMapEstimator(0.11 * u.deg, e_edges=[0.1 * u.TeV, 1 * u.TeV])

    result_image = estimator_image.run(simple_dataset_on_off)
    assert result_image["counts"].data.shape == (1, 20, 20)
    assert_allclose(result_image["significance"].data[0, 10, 10], 5.741116, atol=1e-3)

    mask_fit = Map.from_geom(
        simple_dataset_on_off._geom,
        data=np.ones(simple_dataset_on_off.counts.data.shape, dtype=bool),
    )
    mask_fit.data[:, :, 10] = False
    mask_fit.data[:, 10, :] = False
    simple_dataset_on_off.mask_fit = mask_fit

    estimator_image = ExcessMapEstimator(0.11 * u.deg, apply_mask_fit=True)

    simple_dataset_on_off.exposure.data = (
        np.ones(simple_dataset_on_off.exposure.data.shape) * 1e6
    )
    result_image = estimator_image.run(simple_dataset_on_off)
    assert result_image["counts"].data.shape == (1, 20, 20)

    assert_allclose(result_image["significance"].data[0, 10, 10], 7.186745, atol=1e-3)

    assert_allclose(result_image["counts"].data[0, 10, 10], 304)
    assert_allclose(result_image["excess"].data[0, 10, 10], 152)
    assert_allclose(result_image["background"].data[0, 10, 10], 152)

    assert result_image["flux"].unit == u.Unit("cm-2s-1")
    assert_allclose(result_image["flux"].data[0, 10, 10], 7.6e-9, rtol=1e-3)

    # test with an npred()
    simple_dataset_on_off.exposure.data = (
        np.ones(simple_dataset_on_off.exposure.data.shape) * 1e10
    )
    simple_dataset_on_off.psf = None
    model = SkyModel(
        PowerLawSpectralModel(),
        GaussianSpatialModel(
            lat_0=0.0 * u.deg, lon_0=0.0 * u.deg, sigma=0.1 * u.deg, frame="icrs"
        ),
        datasets_names=[simple_dataset_on_off.name],
        name="sky_model",
    )
    simple_dataset_on_off.models.append(model)
    estimator_mod = ExcessMapEstimator(0.11 * u.deg, apply_mask_fit=False)
    result_mod = estimator_mod.run(simple_dataset_on_off)
    assert result_mod["counts"].data.shape == (1, 20, 20)

    assert_allclose(result_mod["significance"].data[0, 10, 10], 8.119164, atol=1e-3)

    assert_allclose(result_mod["counts"].data[0, 10, 10], 388)
    assert_allclose(result_mod["excess"].data[0, 10, 10], 194)
    assert_allclose(result_mod["background"].data[0, 10, 10], 194)

    assert result_mod["flux"].unit == u.Unit("cm-2s-1")
    assert_allclose(result_image["flux"].data[0, 10, 10], 7.6e-9, rtol=1e-3)
Ejemplo n.º 24
0
def test_read_fgst_exposure():
    exposure = Map.read(
        "$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_exposure_cube_hpx.fits.gz")
    energy_axis = exposure.geom.axes["energy_true"]
    assert energy_axis.node_type == "center"
    assert exposure.unit == "cm2 s"
Ejemplo n.º 25
0
 def integrate_geom(geom, oversampling_factor=None):
     """Evaluate model."""
     return Map.from_geom(geom=geom, data=1)
Ejemplo n.º 26
0
def data_prep():
    data_store = DataStore.from_dir("$GAMMAPY_DATA/hess-dl3-dr1/")
    OBS_ID = 23523
    obs_ids = OBS_ID * np.ones(N_OBS)
    observations = data_store.get_observations(obs_ids)

    target_position = SkyCoord(ra=83.63, dec=22.01, unit="deg", frame="icrs")
    on_region_radius = Angle("0.11 deg")
    on_region = CircleSkyRegion(center=target_position,
                                radius=on_region_radius)

    exclusion_region = CircleSkyRegion(
        center=SkyCoord(183.604, -8.708, unit="deg", frame="galactic"),
        radius=0.5 * u.deg,
    )

    skydir = target_position.galactic
    exclusion_mask = Map.create(npix=(150, 150),
                                binsz=0.05,
                                skydir=skydir,
                                proj="TAN",
                                coordsys="GAL")

    mask = exclusion_mask.geom.region_mask([exclusion_region], inside=False)
    exclusion_mask.data = mask

    e_reco = MapAxis.from_bounds(0.1, 40, nbin=40, interp="log",
                                 unit="TeV").edges
    e_true = MapAxis.from_bounds(0.05, 100, nbin=200, interp="log",
                                 unit="TeV").edges

    stacked = SpectrumDatasetOnOff.create(e_reco=e_reco, e_true=e_true)
    stacked.name = "stacked"

    dataset_maker = SpectrumDatasetMaker(region=on_region,
                                         e_reco=e_reco,
                                         e_true=e_true,
                                         containment_correction=False)
    bkg_maker = ReflectedRegionsBackgroundMaker(exclusion_mask=exclusion_mask)
    safe_mask_masker = SafeMaskMaker(methods=["aeff-max"], aeff_percent=10)

    spectral_model = PowerLawSpectralModel(index=2,
                                           amplitude=2e-11 *
                                           u.Unit("cm-2 s-1 TeV-1"),
                                           reference=1 * u.TeV)
    spatial_model = PointSpatialModel(lon_0=target_position.ra,
                                      lat_0=target_position.dec,
                                      frame="icrs")
    spatial_model.lon_0.frozen = True
    spatial_model.lat_0.frozen = True

    sky_model = SkyModel(spatial_model=spatial_model,
                         spectral_model=spectral_model,
                         name="")

    for observation in observations:
        dataset = dataset_maker.run(observation,
                                    selection=["counts", "aeff", "edisp"])
        dataset_on_off = bkg_maker.run(dataset, observation)
        dataset_on_off = safe_mask_masker.run(dataset_on_off, observation)
        stacked.stack(dataset_on_off)

    stacked.models = sky_model
    return Datasets([stacked])
Ejemplo n.º 27
0
def image_to_cube(input_map, energy_min, energy_max):
    energy_min = u.Quantity(energy_min)
    energy_max = u.Quantity(energy_max)
    axis = MapAxis.from_energy_bounds(energy_min, energy_max, nbin=1)
    geom = input_map.geom.to_cube([axis])
    return Map.from_geom(geom, data=input_map.data[np.newaxis, :, :])
Ejemplo n.º 28
0
    def from_hdulist(
        cls,
        hdulist,
        hdu=None,
        hdu_bands=None,
        exposure_hdu=None,
        exposure_hdu_bands=None,
        format="gadf",
    ):
        """Create from `~astropy.io.fits.HDUList`.

        Parameters
        ----------
        hdulist : `~astropy.fits.HDUList`
            HDU list.
        hdu : str
            Name or index of the HDU with the IRF map.
        hdu_bands : str
            Name or index of the HDU with the IRF map BANDS table.
        exposure_hdu : str
            Name or index of the HDU with the exposure map data.
        exposure_hdu_bands : str
            Name or index of the HDU with the exposure map BANDS table.
        format : {"gadf", "gtpsf"}
            File format

        Returns
        -------
        irf_map : `IRFMap`
            IRF map.
        """
        if format == "gadf":
            if hdu is None:
                hdu = IRF_MAP_HDU_SPECIFICATION[cls.tag]

            irf_map = Map.from_hdulist(
                hdulist, hdu=hdu, hdu_bands=hdu_bands, format=format
            )

            if exposure_hdu is None:
                exposure_hdu = IRF_MAP_HDU_SPECIFICATION[cls.tag] + "_exposure"

            if exposure_hdu in hdulist:
                exposure_map = Map.from_hdulist(
                    hdulist,
                    hdu=exposure_hdu,
                    hdu_bands=exposure_hdu_bands,
                    format=format,
                )
            else:
                exposure_map = None
        elif format == "gtpsf":
            rad_axis = MapAxis.from_table_hdu(hdulist["THETA"], format=format)

            table = Table.read(hdulist["PSF"])
            energy_axis_true = MapAxis.from_table(table, format=format)

            geom_psf = RegionGeom.create(region=None, axes=[rad_axis, energy_axis_true])

            psf_map = Map.from_geom(geom=geom_psf, data=table["Psf"].data, unit="sr-1")

            geom_exposure = geom_psf.squash("rad")
            exposure_map = Map.from_geom(
                geom=geom_exposure, data=table["Exposure"].data, unit="cm2 s"
            )
            return cls(psf_map=psf_map, exposure_map=exposure_map)
        else:
            raise ValueError(f"Format {format} not supported")

        return cls(irf_map, exposure_map)
Ejemplo n.º 29
0
 def integrate_geom(self, geom):
     """Integrate model on `~gammapy.maps.Geom`."""
     values = self.evaluate_geom(geom)
     data = values * geom.solid_angle()
     return Map.from_geom(geom=geom, data=data.value, unit=data.unit)
Ejemplo n.º 30
0
def exposure(geom_true):
    m = Map.from_geom(geom_true)
    m.quantity = np.ones(geom_true.data_shape) * u.Quantity("100 m2 s")
    m.data[1] *= 10
    return m
Ejemplo n.º 31
0
 def from_dict(cls, data):
     filename = data["filename"]
     normalize = data.get("normalize", True)
     m = Map.read(filename)
     return cls(m, normalize=normalize, filename=filename)
Ejemplo n.º 32
0
def background(geom):
    m = Map.from_geom(geom)
    m.quantity = np.ones(geom.data_shape) * 1e-7
    return m
Ejemplo n.º 33
0
"""Plot significance image with HESS and MILAGRO colormap.
"""
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization.mpl_normalize import ImageNormalize
from astropy.visualization import LinearStretch
from gammapy.image import colormap_hess, colormap_milagro
from gammapy.maps import Map

filename = "$GAMMAPY_DATA/tests/unbundled/poisson_stats_image/expected_ts_0.000.fits.gz"
image = Map.read(filename, hdu="SQRT_TS")

# Plot with the HESS and Milagro colormap
vmin, vmax, vtransition = -5, 15, 5
fig = plt.figure(figsize=(15.5, 6))

normalize = ImageNormalize(vmin=vmin, vmax=vmax, stretch=LinearStretch())
transition = normalize(vtransition)

ax = fig.add_subplot(121, projection=image.geom.wcs)
cmap = colormap_hess(transition=transition)
image.plot(ax=ax, cmap=cmap, norm=normalize, add_cbar=True)
plt.title("HESS-style colormap")

ax = fig.add_subplot(122, projection=image.geom.wcs)
cmap = colormap_milagro(transition=transition)
image.plot(ax=ax, cmap=cmap, norm=normalize, add_cbar=True)
plt.title("MILAGRO-style colormap")

plt.tight_layout()
plt.show()
Ejemplo n.º 34
0
def main():

    import sys
    import argparse

    # Argument defintion
    usage = "usage: %(prog)s [options]"
    description = "Collect all the new source"

    parser = argparse.ArgumentParser(usage, description=__abstract__)

    parser.add_argument("-i", "--input", type=argparse.FileType('r'), required=True,
                        help="Input file")

    parser.add_argument("-e", "--extension", type=str, default="SKYMAP",
                        help="FITS HDU with HEALPix map")

    parser.add_argument("--ebin", type=str, default=None,
                        help="Energy bin, integer or 'ALL'")

    parser.add_argument("--zscale", type=str, default='log',
                        help="Scaling for color scale")

    parser.add_argument("--zmin", type=float, default=None,
                        help="Minimum z-axis value")

    parser.add_argument("--zmax", type=float, default=None,
                        help="Maximum z-axis value")

    parser.add_argument("--cbar", action='store_true', default=False, 
                        help="draw color bar")

    parser.add_argument("-o", "--output", type=argparse.FileType('w'),
                        help="Output file.  Leave blank for interactive.")

    # Parse the command line
    args = parser.parse_args(sys.argv[1:])

    hpxmap = Map.read(args.input.name, hdu=args.extension)
    outdata = []

    if args.zscale == 'sqrt':
        the_norm = PowerNorm(gamma=0.5)
    elif args.zscale == 'log':
        the_norm= LogNorm()
    elif args.zscale == 'lin':
        the_norm = Normalize()
    else:
        the_norm = Normalize()

    fig, ax, im = hpxmap.plot(norm=the_norm, vmin=args.zmin, vmax=args.zmax)
    outdata.append(fig)

    if args.cbar:
        cbar = plt.colorbar(im, orientation='horizontal',shrink=0.7,pad=0.15, fraction=0.05)


    """
    if args.ebin == "ALL":
        wcsproj = hpxmap.geom.make_wcs(
            naxis=2, proj='MOL', energies=None, oversample=2)
        mapping = HpxToWcsMapping(hpxmap.hpx, wcsproj)

        for i, data in enumerate(hpxmap.counts):
            ip = ImagePlotter(data=data, proj=hpxmap.hpx, mapping=mapping)
            fig = plt.figure(i)
            im, ax = ip.plot(zscale=args.zscale,
                             vmin=args.zmin, vmax=args.zmax)
            outdata.append(fig)

    elif args.ebin is None:
        ip = ImagePlotter(data=hpxmap.counts, proj=hpxmap.hpx)
        im, ax = ip.plot(zscale=args.zscale, vmin=args.zmin, vmax=args.zmax)
        outdata.append((im, ax))
    else:
        try:
            ibin = int(args.ebin)
            ip = ImagePlotter(data=hpxmap.counts[ibin], proj=hpxmap.hpx)
            im, ax = ip.plot(zscale=args.zscale,
                             vmin=args.zmin, vmax=args.zmax)
            outdata.append((im, ax))
        except:
            raise ValueError("--ebin argument must be an integer or 'ALL'")
    """

    if args.output is None:
        plt.show()
    else:
        if len(outdata) == 1:
            plt.savefig(args.output.name)
        else:
            base, ext = os.path.splitext(args.output.name)
            for i, fig in enumerate(outdata):
                fig.savefig("%s_%02i%s" % (base, i, ext))