Пример #1
0
def prepare_images():
    # Reads in data
    background_file = FermiVelaRegion.filenames()['diffuse_model']
    exposure_file = FermiVelaRegion.filenames()['exposure_cube']
    counts_file = FermiVelaRegion.filenames()['counts_cube']
    background_model = SpectralCube.read(background_file)
    exposure_cube = SpectralCube.read(exposure_file)
    # Add correct units
    exposure_cube.data = Quantity(exposure_cube.data.value, 'cm2 s')
    # Re-project background cube
    repro_bg_cube = background_model.reproject_to(exposure_cube)
    # Define energy band required for output
    energies = Quantity([10, 500], 'GeV')

    # Compute the predicted counts cube
    npred_cube = compute_npred_cube(repro_bg_cube, exposure_cube, energies)

    # Convolve with Energy-dependent Fermi LAT PSF

    psf = EnergyDependentTablePSF.read(FermiVelaRegion.filenames()['psf'])
    convolved_npred_cube = convolve_cube(npred_cube,
                                         psf,
                                         offset_max=Angle(3, 'deg'))

    # Counts data
    counts_data = fits.open(counts_file)[0].data
    counts_wcs = WCS(fits.open(counts_file)[0].header)
    counts_cube = SpectralCube(data=Quantity(counts_data, ''),
                               wcs=counts_wcs,
                               energy=energies)
    counts_cube = counts_cube.reproject_to(npred_cube)

    counts = counts_cube.data[0]
    model = convolved_npred_cube.data[0]

    # Load Fermi tools gtmodel background-only result

    gtmodel = fits.open(
        FermiVelaRegion.filenames()['background_image'])[0].data.astype(float)
    # Ratio for the two background images
    ratio = np.nan_to_num(model / gtmodel)

    # Header is required for plotting, so returned here
    wcs = npred_cube.wcs
    wcs = wcs.dropaxis(2)
    header = wcs.to_header()

    return model, gtmodel, ratio, counts, header
Пример #2
0
def prepare_images():
    # Reads in data
    background_file = FermiVelaRegion.filenames()['diffuse_model']
    exposure_file = FermiVelaRegion.filenames()['exposure_cube']
    counts_file = FermiVelaRegion.filenames()['counts_cube']
    background_model = SpectralCube.read(background_file)
    exposure_cube = SpectralCube.read(exposure_file)
    # Add correct units
    exposure_cube.data = Quantity(exposure_cube.data.value, 'cm2 s')
    # Re-project background cube
    repro_bg_cube = background_model.reproject_to(exposure_cube)
    # Define energy band required for output
    energies = Quantity([10, 500], 'GeV')

    # Compute the predicted counts cube
    npred_cube = compute_npred_cube(repro_bg_cube, exposure_cube, energies)

    # Convolve with Energy-dependent Fermi LAT PSF

    psf = EnergyDependentTablePSF.read(FermiVelaRegion.filenames()['psf'])
    convolved_npred_cube = convolve_cube(npred_cube, psf,
                                         offset_max=Angle(3, 'deg'))

    # Counts data
    counts_data = fits.open(counts_file)[0].data
    counts_wcs = WCS(fits.open(counts_file)[0].header)
    counts_cube = SpectralCube(data=Quantity(counts_data, ''),
                               wcs=counts_wcs,
                               energy=energies)
    counts_cube = counts_cube.reproject_to(npred_cube)

    counts = counts_cube.data[0]
    model = convolved_npred_cube.data[0]

    # Load Fermi tools gtmodel background-only result

    gtmodel = fits.open(FermiVelaRegion.filenames()['background_image'])[0].data.astype(float)
    # Ratio for the two background images
    ratio = np.nan_to_num(model / gtmodel)

    # Header is required for plotting, so returned here
    wcs = npred_cube.wcs
    wcs = wcs.dropaxis(2)
    header = wcs.to_header() 

    return model, gtmodel, ratio, counts, header
Пример #3
0
psf_file = FermiGalacticCenter.filenames()['psf']
psf = EnergyDependentTablePSF.read(psf_file)

# *** LOADING INPUT ***

# Counts must be provided as a counts ImageHDU
flux_file = raw_input('Flux Map: ')
exposure_file = raw_input('Exposure Map: ')
spec_ind = input('Spectral Index (for reprojection): ')
flux_hdu = fits.open(flux_file)[1]
flux_wcs = WCS(flux_hdu.header)
energy_flux = Quantity([_energy_lafferty_power_law(10000, 500000, spec_ind)],
                       'MeV')
flux_data = np.zeros((1, 1800, 3600))
flux_data[0] = Quantity(flux_hdu.data, '')
flux_spec_cube = SpectralCube(data=flux_data, wcs=flux_wcs, energy=energy_flux)

exposure_hdu = fits.open(exposure_file)[0]
exposure_wcs = WCS(exposure_hdu.header)
energy_exp = Quantity([10000, 500000], 'MeV')
exposure_data = Quantity(exposure_hdu.data, '')
exposure_spec_cube = SpectralCube(data=exposure_data,
                                  wcs=exposure_wcs,
                                  energy=energy_exp)
exposure_spec_cube = exposure_spec_cube.reproject_to(flux_spec_cube)

counts_data = flux_spec_cube.data * exposure_spec_cube.data
counts = fits.ImageHDU(data=counts_data[0], header=flux_hdu.header)
# Start with flat background estimate
# Background must be provided as an ImageHDU
background_data = np.ones_like(counts_data, dtype=float)
Пример #4
0
# Counts must be provided as a counts ImageHDU
flux_file = raw_input("Flux Map: ")
exposure_file = raw_input("Exposure Map: ")
spec_ind = input("Spectral Index (for reprojection): ")
flux_hdu = fits.open(flux_file)[1]
flux_wcs = WCS(flux_hdu.header)
energy_flux = Quantity([_energy_lafferty_power_law(10000, 500000, spec_ind)], "MeV")
flux_data = np.zeros((1, 1800, 3600))
flux_data[0] = Quantity(flux_hdu.data, "")
flux_spec_cube = SpectralCube(data=flux_data, wcs=flux_wcs, energy=energy_flux)

exposure_hdu = fits.open(exposure_file)[0]
exposure_wcs = WCS(exposure_hdu.header)
energy_exp = Quantity([10000, 500000], "MeV")
exposure_data = Quantity(exposure_hdu.data, "")
exposure_spec_cube = SpectralCube(data=exposure_data, wcs=exposure_wcs, energy=energy_exp)
exposure_spec_cube = exposure_spec_cube.reproject_to(flux_spec_cube)

counts_data = flux_spec_cube.data * exposure_spec_cube.data
counts = fits.ImageHDU(data=counts_data[0], header=flux_hdu.header)
# Start with flat background estimate
# Background must be provided as an ImageHDU
background_data = np.ones_like(counts_data, dtype=float)
background = fits.ImageHDU(data=background_data[0], header=flux_hdu.header)
images = GammaImages(counts=counts, background=background)

source_kernel = binary_disk(CORRELATION_RADIUS).astype(float)
source_kernel /= source_kernel.sum()

background_kernel = np.ones((5, 100))
background_kernel /= background_kernel.sum()
Пример #5
0
# *** LOADING INPUT ***

# Counts must be provided as a counts ImageHDU
fermi_diffuse = 'gll_iem_v05_rev1.fit'
comparison_diffuse = raw_input('Diffuse Model: ')

spec_ind = input('Spectral Index (for reprojection): ')
flux_hdu = fits.open(comparison_diffuse)[1]
flux_wcs = WCS(flux_hdu.header)
energy_flux = Quantity([_energy_lafferty_power_law(10000, 500000, spec_ind)],
                       'MeV')
flux_data = np.zeros((1, 103, 2001))
flux_data[0] = Quantity(flux_hdu.data, '')
flux_spec_cube = SpectralCube(data=Quantity(flux_data, '1 / (cm2 MeV s sr)'),
                              wcs=flux_wcs,
                              energy=energy_flux)

diffuse_spec_cube = SpectralCube.read(fermi_diffuse)
diffuse_spec_cube.data = Quantity(diffuse_spec_cube.data, '1 / (cm2 MeV s sr)')
fermi_diffuse_spec_cube = diffuse_spec_cube.reproject_to(flux_spec_cube)

energy_band = Quantity((10, 500), 'GeV')

a = fermi_diffuse_spec_cube.integral_flux_image(energy_band)
angles = fermi_diffuse_spec_cube.solid_angle_image
b = angles.value
c = a.data * b

diffuse = fits.ImageHDU(data=c, header=flux_hdu.header)
diffuse.writeto('diffuse_correct.fits')
Пример #6
0
# Parameters

CORRELATION_RADIUS = 3 # pix
SIGNIFICANCE_THRESHOLD = 5
MASK_DILATION_RADIUS = 0.3

psf_file = FermiGalacticCenter.filenames()['psf']
psf = EnergyDependentTablePSF.read(psf_file)

# *** LOADING INPUT ***

# Counts must be provided as a counts ImageHDU
fermi_diffuse = 'gll_iem_v05_rev1.fit'
comparison_diffuse = raw_input('Diffuse Model: ')

spec_ind = input('Spectral Index (for reprojection): ')
flux_hdu = fits.open(comparison_diffuse)[0]
flux_wcs = WCS(flux_hdu.header)
energy_flux = Quantity([_energy_lafferty_power_law(10000, 500000, spec_ind)], 'MeV')
flux_data = np.zeros((1, 2000, 100))
flux_data[0] = Quantity(flux_hdu.data, '')
flux_spec_cube = SpectralCube(data=flux_data, wcs=flux_wcs, energy=energy_flux)

diffuse_spec_cube = SpectralCube.read(fermi_diffuse)
fermi_diffuse_spec_cube = diffuse_spec_cube.reproject_to(flux_spec_cube)

ratio = fermi_diffuse_spec_cube.data / flux_spec_cube.data

ratio_hdu = fits.ImageHDU(data=ratio, header=flux_hdu.header)
ratio_hdu.writeto('ratio.fits', clobber=True)
Пример #7
0
"""Test npred model image computation.
"""
from astropy.units import Quantity
from astropy.coordinates import Angle
from gammapy.datasets import FermiGalacticCenter
from gammapy.irf import EnergyDependentTablePSF
from gammapy.data import (SpectralCube, compute_npred_cube, convolve_cube)

filenames = FermiGalacticCenter.filenames()
spectral_cube = SpectralCube.read(filenames['diffuse_model'])
exposure_cube = SpectralCube.read(filenames['exposure_cube'])
psf = EnergyDependentTablePSF.read(filenames['psf'])

spectral_cube = spectral_cube.reproject_to(exposure_cube)

energy_bounds = Quantity([10, 30, 100, 500], 'GeV')
npred_cube = compute_npred_cube(spectral_cube, exposure_cube, energy_bounds)

offset_max = Angle(1, 'deg')
npred_cube_convolved = convolve_cube(npred_cube, psf, offset_max)
counts_file = raw_input('Counts Map: ')
background_file = raw_input('Background Map: ')
exposure_file = raw_input('Exposure Map: ')
spec_ind = input('Spectral Index (for reprojection): ')
counts_hdu = fits.open(counts_file)[0]
counts_wcs = WCS(counts_hdu.header)
energy_counts = Quantity([_energy_lafferty_power_law(10000, 500000, spec_ind)], 'MeV')
counts_data = np.zeros((1, 1800, 3600))
counts_data[0] = Quantity(counts_hdu.data, '')
counts_spec_cube = SpectralCube(data=counts_data, wcs=counts_wcs, energy=energy_counts)

exposure_hdu = fits.open(exposure_file)[0]
exposure_wcs = WCS(exposure_hdu.header)
energy_exp = Quantity([10000, 500000], 'MeV')
exposure_data = Quantity(exposure_hdu.data, '')
exposure_spec_cube = SpectralCube(data=exposure_data, wcs=exposure_wcs, energy=energy_exp)
exposure_spec_cube = exposure_spec_cube.reproject_to(counts_spec_cube)

flux_data = counts_spec_cube.data / exposure_spec_cube.data
flux = fits.ImageHDU(data=flux_data[0], header=counts_hdu.header)



binsz=input('Bin size: ')

label1='Total Flux'
smooth = 0.2
lons, lats = coordinates(flux)
lat_profile_total = image_profile(profile_axis='lat', image=flux, lats=[-10, 10], lons=[-100, 100], binsz=binsz, errors=True, counts=counts_hdu)

label2='Separated Background'
Пример #9
0
from gammapy.image import make_empty_image, catalog_image, binary_disk
from gammapy.image.utils import cube_to_image, solid_angle
from gammapy.data import SpectralCube

counts_file = raw_input('Counts Map: ')
background_file = raw_input('Background Map: ')
exposure_file = raw_input('Exposure Map: ')
spec_ind = input('Spectral Index (for reprojection): ')
counts_hdu = fits.open(counts_file)[0]
counts_wcs = WCS(counts_hdu.header)
energy_counts = Quantity([_energy_lafferty_power_law(10000, 500000, spec_ind)],
                         'MeV')
counts_data = np.zeros((1, 1800, 3600))
counts_data[0] = Quantity(counts_hdu.data, '')
counts_spec_cube = SpectralCube(data=counts_data,
                                wcs=counts_wcs,
                                energy=energy_counts)

exposure_hdu = fits.open(exposure_file)[0]
exposure_wcs = WCS(exposure_hdu.header)
energy_exp = Quantity([10000, 500000], 'MeV')
exposure_data = Quantity(exposure_hdu.data, '')
exposure_spec_cube = SpectralCube(data=exposure_data,
                                  wcs=exposure_wcs,
                                  energy=energy_exp)
exposure_spec_cube = exposure_spec_cube.reproject_to(counts_spec_cube)

flux_data = counts_spec_cube.data / exposure_spec_cube.data
flux = fits.ImageHDU(data=flux_data[0], header=counts_hdu.header)

binsz = input('Bin size: ')