Exemple #1
0
 def _significance_cube(self, cubes):
     p = self.parameters
     if p['method'] == 'lima':
         scube = significance(cubes['counts'], cubes['background'], method='lima')
     elif p['method'] == 'simple':
         scube = significance(cubes['counts'], cubes['background'], method='simple')
     elif p['method'] == 'asmooth':
         scube = _significance_asmooth(cubes['counts'], cubes['background'])
     elif p['method'] == 'ts':
         raise NotImplementedError
     else:
         raise ValueError("Not a valid significance estimation method."
                          " Choose one of the following: 'lima', 'simple',"
                          " 'asmooth' or 'ts'")
     return scube
Exemple #2
0
def cli(n_observed, mu_background, method):
    """Compute significance for a Poisson count observation.

    The significance is the tail probability to observe N_OBSERVED counts
    or more, given a known background level MU_BACKGROUND."""
    s = significance(n_observed, mu_background, method)
    print(s)
Exemple #3
0
def cli(n_observed, mu_background, method):
    """Compute significance for a Poisson count observation.

    The significance is the tail probability to observe N_OBSERVED counts
    or more, given a known background level MU_BACKGROUND."""
    s = significance(n_observed, mu_background, method)
    print(s)
Exemple #4
0
    def info_dict(self, in_safe_energy_range=True):
        """Info dict with summary statistics, summed over energy

        Parameters
        ----------
        in_safe_energy_range : bool
            Whether to sum only in the safe energy range

        Returns
        -------
        info_dict : dict
            Dictionary with summary info.
        """
        info = dict()
        mask = self.mask_safe if in_safe_energy_range else slice(None)

        info["name"] = self.name
        info["livetime"] = self.livetime.copy()

        info["n_on"] = self.counts.data[mask].sum()

        info["background"] = self.background.data[mask].sum()
        info["excess"] = self.excess.data[mask].sum()
        info["significance"] = significance(
            self.counts.data[mask].sum(),
            self.background.data[mask].sum(),
        )

        info["background_rate"] = info["background"] / info["livetime"]
        info["gamma_rate"] = info["excess"] / info["livetime"]
        return info
Exemple #5
0
def make_significance_image():
    counts_image = fits.open("counts_image.fits")[1]
    bkg_image = fits.open("bkg_image.fits")[1]
    counts = disk_correlate(counts_image.data, 10)
    bkg = disk_correlate(bkg_image.data, 10)
    s = significance(counts, bkg)
    s_image = fits.ImageHDU(data=s, header=counts_image.header)
    s_image.writeto("significance_image.fits", clobber=True)
Exemple #6
0
def make_significance_image():
    counts_image = fits.open("counts_image.fits")[1]
    bkg_image = fits.open("bkg_image.fits")[1]
    counts = disk_correlate(counts_image.data, 10)
    bkg = disk_correlate(bkg_image.data, 10)
    s = significance(counts, bkg)
    s_image = fits.ImageHDU(data=s, header=counts_image.header)
    s_image.writeto("significance_image.fits", clobber=True)
    def compute_correlated_maps(self, kernel):
        """Compute significance image for a given kernel.
        """
        self.counts_corr = convolve(self.counts, kernel)
        self.background_corr = convolve(self.background, kernel)
        self.significance = significance(self.counts_corr, self.background_corr)

        return self
Exemple #8
0
    def compute_correlated_maps(self, kernel):
        """Compute significance image for a given kernel.
        """
        #import IPython; IPython.embed()
        self.counts_corr = convolve(self.counts, kernel)
        self.background_corr = convolve(self.background, kernel)
        self.significance = significance(self.counts_corr, self.background_corr)

        return self
Exemple #9
0
 def _significance_cube(self, cubes):
     p = self.parameters
     if p['method'] == 'lima':
         scube = significance(cubes['counts'],
                              cubes['background'],
                              method='lima')
     elif p['method'] == 'simple':
         scube = significance(cubes['counts'],
                              cubes['background'],
                              method='simple')
     elif p['method'] == 'asmooth':
         scube = _significance_asmooth(cubes['counts'], cubes['background'])
     elif p['method'] == 'ts':
         raise NotImplementedError
     else:
         raise ValueError("Not a valid significance estimation method."
                          " Choose one of the following: 'lima', 'simple',"
                          " 'asmooth' or 'ts'")
     return scube
Exemple #10
0
def test_significance_on_off_against_known_background():
    # Check that the Li & Ma limit formula is correct
    # With small alpha and high counts, the significance
    # and significance_on_off should be very close
    actual = significance(n_on=1300, mu_bkg=1100, method="lima")
    assert_allclose(actual, 5.8600870406703329)
    actual = significance_on_off(n_on=1300,
                                 n_off=1100 / 1.0e-8,
                                 alpha=1e-8,
                                 method="lima")
    assert_allclose(actual, 5.8600864348078519)
Exemple #11
0
 def _significance_cube(cubes, method):
     if method in {"lima", "simple"}:
         scube = significance(cubes["counts"],
                              cubes["background"],
                              method="lima")
     elif method == "asmooth":
         scube = _significance_asmooth(cubes["counts"], cubes["background"])
     elif method == "ts":
         raise NotImplementedError()
     else:
         raise ValueError("Not a valid significance estimation method."
                          " Choose one of the following: 'lima', 'simple',"
                          " 'asmooth' or 'ts'")
     return scube
Exemple #12
0
def residual_images(model_file, data_file, out_file, thetas, overwrite):
    """Compute source model residual images.

    The input `data_file` must contain the following HDU extensions:

    * 'On' -- Counts image
    * 'Background' -- Background image
    """
    import logging
    logging.basicConfig(level=logging.DEBUG,
                        format='%(levelname)s - %(message)s')
    import numpy as np
    from astropy.io import fits
    from gammapy.image import disk_correlate
    from gammapy.stats import significance

    logging.info('Reading {0}'.format(data_file))
    hdu_list = fits.open(data_file)
    header = hdu_list[0].header
    counts = hdu_list['On'].data.astype(np.float64)
    background = hdu_list['Background'].data.astype(np.float64)
    diffuse = hdu_list['Diffuse'].data.astype(np.float64)

    logging.info('Reading {0}'.format(model_file))
    model = fits.getdata(model_file)

    background_plus_model_diffuse = background + model + diffuse

    out_hdu_list = fits.HDUList()

    for theta in thetas:
        logging.info('Processing theta = {0} deg'.format(theta))

        theta_pix = theta / header['CDELT2']
        counts_corr = disk_correlate(counts, theta_pix)
        background_plus_model_corr = disk_correlate(
            background_plus_model_diffuse, theta_pix)

        # excess_corr = counts_corr - background_plus_model_corr
        significance_corr = significance(counts_corr,
                                         background_plus_model_corr)

        name = 'RESIDUAL_SIGNIFICANCE_{0}'.format(theta)
        logging.info('Appending HDU extension: {0}'.format(name))
        hdu = fits.ImageHDU(significance_corr, header, name)
        out_hdu_list.append(hdu)

    logging.info('Writing {0}'.format(out_file))
    out_hdu_list.writeto(out_file, clobber=overwrite)
Exemple #13
0
    def significance_image(self, radius):
        """Make the significance image from the counts and bkg images.

        Parameters
        ----------
        radius : float
            Disk radius in pixels.
        """
        image = SkyImage.empty_like(self.empty_image)
        disk = Tophat2DKernel(radius)
        disk.normalize('peak')
        counts = self.images["counts"].convolve(disk.array)
        bkg = self.images["bkg"].convolve(disk.array)
        image.data = significance(counts.data, bkg.data)
        self.images["significance"] = image
Exemple #14
0
def residual_images(model_file,
                    data_file,
                    out_file,
                    thetas,
                    overwrite):
    """Compute source model residual images.

    The input `data_file` must contain the following HDU extensions:

    * 'On' -- Counts image
    * 'Background' -- Background image
    """
    import numpy as np
    from astropy.io import fits
    from gammapy.image import disk_correlate
    from gammapy.stats import significance

    log.info('Reading {0}'.format(data_file))
    hdu_list = fits.open(data_file)
    header = hdu_list[0].header
    counts = hdu_list['On'].data.astype(np.float64)
    background = hdu_list['Background'].data.astype(np.float64)
    diffuse = hdu_list['Diffuse'].data.astype(np.float64)

    log.info('Reading {0}'.format(model_file))
    model = fits.getdata(model_file)

    background_plus_model_diffuse = background + model + diffuse

    out_hdu_list = fits.HDUList()

    for theta in thetas:
        log.info('Processing theta = {0} deg'.format(theta))

        theta_pix = theta / header['CDELT2']
        counts_corr = disk_correlate(counts, theta_pix)
        background_plus_model_corr = disk_correlate(background_plus_model_diffuse, theta_pix)

        # excess_corr = counts_corr - background_plus_model_corr
        significance_corr = significance(counts_corr, background_plus_model_corr)

        name = 'RESIDUAL_SIGNIFICANCE_{0}'.format(theta)
        log.info('Appending HDU extension: {0}'.format(name))
        hdu = fits.ImageHDU(significance_corr, header, name)
        out_hdu_list.append(hdu)

    log.info('Writing {0}'.format(out_file))
    out_hdu_list.writeto(out_file, clobber=overwrite)
Exemple #15
0
def compute_lima_image(counts, background, kernel):
    """Compute Li & Ma significance and flux images for known background.

    Parameters
    ----------
    counts : `~gammapy.maps.WcsNDMap`
        Counts image
    background : `~gammapy.maps.WcsNDMap`
        Background image
    kernel : `astropy.convolution.Kernel2D`
        Convolution kernel

    Returns
    -------
    images : dict
        Dictionary containing result maps
        Keys are: significance, counts, background and excess

    See Also
    --------
    gammapy.stats.significance
    """
    # Kernel is modified later make a copy here
    kernel = copy.deepcopy(kernel)
    kernel.normalize("peak")

    # fft convolution adds numerical noise, to ensure integer results we call
    # np.rint
    counts_conv = np.rint(counts.convolve(kernel.array).data)
    background_conv = background.convolve(kernel.array).data
    excess_conv = counts_conv - background_conv
    significance_conv = significance(counts_conv,
                                     background_conv,
                                     method="lima")

    return {
        "significance": counts.copy(data=significance_conv),
        "counts": counts.copy(data=counts_conv),
        "background": counts.copy(data=background_conv),
        "excess": counts.copy(data=excess_conv),
    }
from gammapy.stats import significance
from gammapy.image.utils import disk_correlate
from aplpy import FITSFigure
from npred_general import prepare_images

model, gtmodel, ratio, counts, header = prepare_images()

# Top hat correlation
correlation_radius = 3

correlated_gtmodel = disk_correlate(gtmodel, correlation_radius)
correlated_counts = disk_correlate(counts, correlation_radius)
correlated_model = disk_correlate(model, correlation_radius)

# Fermi significance
fermi_significance = np.nan_to_num(significance(correlated_counts, gtmodel,
                                                method='lima'))
# Gammapy significance
significance = np.nan_to_num(significance(correlated_counts, correlated_model,
                                          method='lima'))

titles = ['Gammapy Significance', 'Fermi Tools Significance']

# Plot

fig = plt.figure(figsize=(10, 5))
hdu1 = fits.ImageHDU(significance, header)
f1 = FITSFigure(hdu1, figure=fig, convention='wells', subplot=(1, 2, 1))
f1.set_tick_labels_font(size='x-small')
f1.tick_labels.set_xformat('ddd')
f1.tick_labels.set_yformat('ddd')
f1.show_colorscale(vmin=0, vmax=10, cmap='afmhot')
Exemple #17
0
from gammapy.stats import significance
from gammapy.image.utils import disk_correlate
from npred_general import prepare_images
from aplpy import FITSFigure

model, gtmodel, ratio, counts, header = prepare_images()

# Top hat correlation
correlation_radius = 3

correlated_gtmodel = disk_correlate(gtmodel, correlation_radius)
correlated_counts = disk_correlate(counts, correlation_radius)
correlated_model = disk_correlate(model, correlation_radius)

# Fermi significance
fermi_significance = np.nan_to_num(significance(correlated_counts, gtmodel,
                                                method='lima'))
# Gammapy significance
significance = np.nan_to_num(significance(correlated_counts, correlated_model,
                                          method='lima'))

titles = ['Gammapy Significance', 'Fermi Tools Significance']

# Plot

fig = plt.figure(figsize=(10, 5))
hdu1 = fits.ImageHDU(significance, header)
f1 = FITSFigure(hdu1, figure=fig, convention='wells', subplot=(1,2,1))
f1.set_tick_labels_font(size='x-small')
f1.tick_labels.set_xformat('ddd')
f1.tick_labels.set_yformat('ddd')
f1.show_colorscale(vmin=0, vmax=10, cmap='afmhot')
Exemple #18
0
"""Plot significance image with HESS and MILAGRO colormap.
"""
import numpy as np
import matplotlib.pyplot as plt
from gammapy.datasets import load_poisson_stats_image
from gammapy.image import disk_correlate
from gammapy.stats import significance
from gammapy.image import colormap_hess, colormap_milagro
from astropy.visualization.mpl_normalize import ImageNormalize
from astropy.visualization import LinearStretch

# Compute an example significance image
counts = load_poisson_stats_image()
counts = disk_correlate(counts, radius=5, mode='reflect')
background = np.median(counts) * np.ones_like(counts)
image = significance(counts, background)

# Plot with the HESS and Milagro colormap
vmin, vmax, vtransition = -5, 15, 5
plt.figure(figsize=(8, 4))

normalize = ImageNormalize(vmin=vmin, vmax=vmax, stretch=LinearStretch())
transition = normalize(vtransition)

plt.subplot(121)
cmap = colormap_hess(transition=transition)
plt.imshow(image, cmap=cmap, norm=normalize)
plt.axis('off')
plt.colorbar()
plt.title('HESS-style colormap')
from gammapy.image.utils import disk_correlate
from aplpy import FITSFigure
from npred_general import prepare_images

model, gtmodel, ratio, counts, header = prepare_images()

# Top hat correlation
correlation_radius = 3

correlated_gtmodel = disk_correlate(gtmodel, correlation_radius)
correlated_counts = disk_correlate(counts, correlation_radius)
correlated_model = disk_correlate(model, correlation_radius)

# Fermi significance
fermi_significance = np.nan_to_num(
    significance(correlated_counts, gtmodel, method='lima'))
# Gammapy significance
significance = np.nan_to_num(
    significance(correlated_counts, correlated_model, method='lima'))

titles = ['Gammapy Significance', 'Fermi Tools Significance']

# Plot

fig = plt.figure(figsize=(10, 5))
hdu1 = fits.ImageHDU(significance, header)
f1 = FITSFigure(hdu1, figure=fig, convention='wells', subplot=(1, 2, 1))
f1.set_tick_labels_font(size='x-small')
f1.tick_labels.set_xformat('ddd')
f1.tick_labels.set_yformat('ddd')
f1.show_colorscale(vmin=0, vmax=10, cmap='afmhot')
Exemple #20
0
def test_significance(p):
    s = significance(p["n_on"], p["mu_bkg"], p["method"])
    assert_allclose(s, p["s"], atol=1e-5)
show_image(images['excess'], vmax=2)

# In[18]:

# Significance image
# Just for fun, let's compute it by hand ...
from astropy.convolution import Tophat2DKernel
kernel = Tophat2DKernel(4)
kernel.normalize('peak')

counts_conv = images['counts'].convolve(kernel.array)
background_conv = images['background'].convolve(kernel.array)

from gammapy.stats import significance
significance_image = SkyImage.empty_like(ref_image)
significance_image.data = significance(counts_conv.data, background_conv.data)
show_image(significance_image, vmax=8)

# ## Source Detection
#
# Use the class [TSImageEstimator](http://docs.gammapy.org/dev/api/gammapy.detect.compute_ts_image.html#gammapy.detect.TSImageEstimator.html) and [photutils.find_peaks](http://photutils.readthedocs.io/en/stable/api/photutils.find_peaks.html) to detect point-like sources on the images:

# In[19]:

# cut out smaller piece of the PSF image to save computing time
# for covenience we're "misusing" the SkyImage class represent the PSF on the sky.
kernel = images['psf'].cutout(target_position, size=1.1 * u.deg)
kernel.show()

# In[20]: