def correlate_fermi_psf(image, max_offset, resolution=0.1, energy = 'None', energy_band=[10, 500]): from astropy.coordinates import Angle from astropy.units import Quantity from gammapy.datasets import FermiGalacticCenter from gammapy.irf import EnergyDependentTablePSF # Parameters filename = FermiGalacticCenter.filenames()['psf'] pixel_size = Angle(resolution, 'deg') offset_max = Angle(max_offset, 'deg') if energy == 'None': energy_band = Quantity(energy_band, 'GeV') fermi_psf = EnergyDependentTablePSF.read(filename) psf = fermi_psf.table_psf_in_energy_band(energy_band=energy_band, spectral_index=2.5) else: energy = Quantity(energy, 'GeV') fermi_psf = EnergyDependentTablePSF.read(filename) psf = fermi_psf.table_psf_at_energy(energy=energy) psf.normalize() kernel = psf.kernel(pixel_size=pixel_size, offset_max=offset_max) kernel_image = kernel.value/kernel.value.sum() # TODO: Write unit test (this will be useful): #kernel_image_integral = kernel_image.sum() * pixel_size.to('radian').value ** 2 #print('Kernel image integral: {0}'.format(kernel_image_integral)) #print('shape: {0}'.format(kernel_image.shape)) return convolve(image, kernel_image, mode='constant')
def correlate_fermi_psf(image, max_offset, resolution=0.1, energy='None', energy_band=[10, 500]): from astropy.coordinates import Angle from astropy.units import Quantity from gammapy.datasets import FermiGalacticCenter from gammapy.irf import EnergyDependentTablePSF # Parameters filename = FermiGalacticCenter.filenames()['psf'] pixel_size = Angle(resolution, 'deg') offset_max = Angle(max_offset, 'deg') if energy == 'None': energy_band = Quantity(energy_band, 'GeV') fermi_psf = EnergyDependentTablePSF.read(filename) psf = fermi_psf.table_psf_in_energy_band(energy_band=energy_band, spectral_index=2.5) else: energy = Quantity(energy, 'GeV') fermi_psf = EnergyDependentTablePSF.read(filename) psf = fermi_psf.table_psf_at_energy(energy=energy) psf.normalize() kernel = psf.kernel(pixel_size=pixel_size, offset_max=offset_max) kernel_image = kernel.value / kernel.value.sum() # TODO: Write unit test (this will be useful): #kernel_image_integral = kernel_image.sum() * pixel_size.to('radian').value ** 2 #print('Kernel image integral: {0}'.format(kernel_image_integral)) #print('shape: {0}'.format(kernel_image.shape)) return convolve(image, kernel_image, mode='constant')
def extract_spectra_fermi(target_position, on_radius): """Extract 1d spectra for Fermi-LAT""" log.info("Extracting 1d spectra for Fermi-LAT") events = EventList.read("data/fermi/events.fits.gz") exposure = HpxNDMap.read("data/fermi/exposure_cube.fits.gz") psf = EnergyDependentTablePSF.read("data/fermi/psf.fits.gz") emin, emax, dex = 0.03, 2, 0.1 num = int(np.log10(emax / emin) / dex) energy = np.logspace(start=np.log10(emin), stop=np.log10(emax), num=num) * u.TeV bkg_estimate = fermi_ring_background_extract(events, target_position, on_radius) extract = SpectrumExtractionFermi1D( events=events, exposure=exposure, psf=psf, bkg_estimate=bkg_estimate, target_position=target_position, on_radius=on_radius, energy=energy, containment_correction=True, ) obs = extract.run() path = f"{config.repo_path}/results/spectra/fermi" log.info(f"Writing to {path}") obs.write(path, use_sherpa=True, overwrite=True)
def fermi_dataset(): size = Angle("3 deg", "3.5 deg") counts = Map.read("$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-counts-cube.fits.gz") counts = counts.cutout(counts.geom.center_skydir, size) background = Map.read( "$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-background-cube.fits.gz" ) background = background.cutout(background.geom.center_skydir, size) background = BackgroundModel(background, datasets_names=["fermi-3fhl-gc"]) exposure = Map.read( "$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-exposure-cube.fits.gz" ) exposure = exposure.cutout(exposure.geom.center_skydir, size) exposure.unit = "cm2s" mask_safe = counts.copy(data=np.ones_like(counts.data).astype("bool")) psf = EnergyDependentTablePSF.read( "$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-psf-cube.fits.gz" ) psfmap = PSFMap.from_energy_dependent_table_psf(psf) dataset = MapDataset( counts=counts, models=[background], exposure=exposure, mask_safe=mask_safe, psf=psfmap, name="fermi-3fhl-gc", ) dataset = dataset.to_image() return dataset
def test_write(self, tmp_path): self.psf.write(tmp_path / "test.fits") new = EnergyDependentTablePSF.read(tmp_path / "test.fits") assert_allclose(new.axes["rad"].center, self.psf.axes["rad"].center) assert_allclose(new.axes["energy_true"].center, self.psf.axes["energy_true"].center) assert_allclose(new.quantity, self.psf.quantity)
def fermi_dataset(): size = Angle("3 deg", "3.5 deg") counts = Map.read( "$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-counts-cube.fits.gz") counts = counts.cutout(counts.geom.center_skydir, size) background = Map.read( "$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-background-cube.fits.gz") background = background.cutout(background.geom.center_skydir, size) background = BackgroundModel(background, datasets_names=["fermi-3fhl-gc"]) exposure = Map.read( "$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-exposure-cube.fits.gz") exposure = exposure.cutout(exposure.geom.center_skydir, size) exposure.unit = "cm2 s" psf = EnergyDependentTablePSF.read( "$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-psf-cube.fits.gz") psfmap = PSFMap.from_energy_dependent_table_psf(psf) edisp = EDispKernelMap.from_diagonal_response( energy_axis=counts.geom.axes["energy"], energy_axis_true=exposure.geom.axes["energy_true"], ) return MapDataset( counts=counts, models=[background], exposure=exposure, psf=psfmap, name="fermi-3fhl-gc", edisp=edisp, )
def test_write(self, tmp_path): self.psf.write(tmp_path / "test.fits") new = EnergyDependentTablePSF.read(tmp_path / "test.fits") assert_allclose(new.rad_axis.center, self.psf.rad_axis.center) assert_allclose(new.energy_axis_true.center, self.psf.energy_axis_true.center) assert_allclose(new.psf_value.value, self.psf.psf_value.value)
def to_energy_dependent_table_psf(self, offset, rad=None): """Convert to energy-dependent table PSF. Parameters ---------- offset : `~astropy.coordinates.Angle` Offset in the field of view. Default theta = 0 deg rad : `~astropy.coordinates.Angle` Offset from PSF center used for evaluating the PSF on a grid. Default offset = [0, 0.005, ..., 1.495, 1.5] deg. Returns ------- table_psf : `~gammapy.irf.EnergyDependentTablePSF` Energy-dependent PSF """ from gammapy.irf import EnergyDependentTablePSF from gammapy.datasets.map import RAD_AXIS_DEFAULT energy_axis_true = self.axes["energy_true"] if rad is None: rad_axis = RAD_AXIS_DEFAULT else: rad_axis = MapAxis.from_edges(rad, name="rad") axes = MapAxes([energy_axis_true, rad_axis]) data = self.evaluate(**axes.get_coord(), offset=offset) return EnergyDependentTablePSF(axes=axes, data=data.value, unit=data.unit)
def extract_spectra_fermi(target_position, on_radius): """Extract 1d spectra for Fermi-LAT""" log.info("Extracting 1d spectra for Fermi-LAT") events = EventList.read("data/fermi/events.fits.gz") exposure = HpxNDMap.read("data/fermi/exposure_cube.fits.gz") psf = EnergyDependentTablePSF.read("data/fermi/psf.fits.gz") valid_range = (config.energy_bins >= 30 * u.GeV) * (config.energy_bins <= 2 * u.TeV) energy = config.energy_bins[valid_range] bkg_estimate = ring_background_estimate( pos=target_position, on_radius=on_radius, inner_radius=1 * u.deg, outer_radius=2 * u.deg, events=events, ) extract = SpectrumExtractionFermi1D( events=events, exposure=exposure, psf=psf, bkg_estimate=bkg_estimate, target_position=target_position, on_radius=on_radius, energy=energy, ) obs = extract.run() path = "results/spectra/fermi" log.info(f"Writing to {path}") obs.write(path, use_sherpa=True, overwrite=True)
def test_write(self, tmp_path): self.psf.write(tmp_path / "test.fits") new = EnergyDependentTablePSF.read(tmp_path / "test.fits") assert_allclose(new.rad.to_value("deg"), self.psf.rad.to_value("deg")) assert_allclose(new.energy.to_value("GeV"), self.psf.energy.to_value("GeV")) assert_allclose(new.psf_value.value, self.psf.psf_value.value)
def __init__(self, evt_file="$JOINT_CRAB/data/fermi/events.fits.gz", exp_file="$JOINT_CRAB/data/fermi/exposure_cube.fits.gz", psf_file="$JOINT_CRAB/data/fermi/psf.fits.gz", max_psf_radius='0.5 deg'): # Read data self.events = EventList.read(evt_file) self.exposure = HpxNDMap.read(exp_file) self.exposure.unit = u.Unit('cm2s') # no unit stored on map... self.psf = EnergyDependentTablePSF.read(psf_file)
def get_energy_dependent_table_psf(self, position): """Get energy-dependent PSF at a given position. Parameters ---------- position : `~astropy.coordinates.SkyCoord` the target position. Should be a single coordinates Returns ------- psf_table : `~gammapy.irf.EnergyDependentTablePSF` the table PSF """ if position.size != 1: raise ValueError( "EnergyDependentTablePSF can be extracted at one single position only." ) # axes ordering fixed. Could be changed. pix_ener = np.arange(self.psf_map.geom.axes[1].nbin) pix_rad = np.arange(self.psf_map.geom.axes[0].nbin) # Convert position to pixels pix_lon, pix_lat = self.psf_map.geom.to_image().coord_to_pix(position) # Build the pixels tuple pix = np.meshgrid(pix_lon, pix_lat, pix_rad, pix_ener) # Interpolate in the PSF map. Squeeze to remove dimensions of length 1 psf_values = np.squeeze( self.psf_map.interp_by_pix(pix) * u.Unit(self.psf_map.unit) ) energies = self.psf_map.geom.axes[1].center rad = self.psf_map.geom.axes[0].center if self.exposure_map is not None: exposure_3d = self.exposure_map.slice_by_idx({"theta": 0}) coords = { "skycoord": position, "energy": energies.reshape((-1, 1, 1)) } data = exposure_3d.interp_by_coord(coords).squeeze() exposure = data * self.exposure_map.unit else: exposure = None # Beware. Need to revert rad and energies to follow the TablePSF scheme. return EnergyDependentTablePSF( energy=energies, rad=rad, psf_value=psf_values.T, exposure=exposure )
def __init__( self, evt_file="../data/joint-crab/fermi/events.fits.gz", exp_file="../data/joint-crab/fermi/exposure_cube.fits.gz", psf_file="../data/joint-crab/fermi/psf.fits.gz", max_psf_radius="0.5 deg", ): # Read data self.events = EventList.read(evt_file) self.exposure = HpxNDMap.read(exp_file) self.exposure.unit = u.Unit("cm2s") # no unit stored on map... self.psf = EnergyDependentTablePSF.read(psf_file)
def test_psf_map_from_table_psf(position): position = SkyCoord(position) filename = "$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_psf_gc.fits.gz" table_psf = EnergyDependentTablePSF.read(filename) psf_map = PSFMap.from_energy_dependent_table_psf(table_psf) table_psf_new = psf_map.get_energy_dependent_table_psf(position) assert_allclose(table_psf_new.data.data, table_psf.data.data) assert table_psf_new.data.data.unit == "sr-1" assert_allclose(table_psf_new.exposure.value, table_psf.exposure.value) assert table_psf_new.exposure.unit == "cm2 s"
def prepare_images(): # Read in data background_file = FermiVelaRegion.filenames()['diffuse_model'] exposure_file = FermiVelaRegion.filenames()['exposure_cube'] counts_file = FermiVelaRegion.filenames()['counts_cube'] background_model = SkyCube.read(background_file) exposure_cube = SkyCube.read(exposure_file) # Add correct units exposure_cube.data = Quantity(exposure_cube.data.value, 'cm2 s') # Re-project background cube repro_bg_cube = background_model.reproject_to(exposure_cube) # Define energy band required for output energies = EnergyBounds([10, 500], 'GeV') # Compute the predicted counts cube npred_cube = compute_npred_cube(repro_bg_cube, exposure_cube, energies) # Convolve with Energy-dependent Fermi LAT PSF psf = EnergyDependentTablePSF.read(FermiVelaRegion.filenames()['psf']) convolved_npred_cube = convolve_cube(npred_cube, psf, offset_max=Angle(3, 'deg')) # Counts data counts_data = fits.open(counts_file)[0].data counts_wcs = WCS(fits.open(counts_file)[0].header) counts_cube = SkyCube(data=Quantity(counts_data, ''), wcs=counts_wcs, energy=energies) counts_cube = counts_cube.reproject_to(npred_cube, projection_type='nearest-neighbor') counts = counts_cube.data[0] model = convolved_npred_cube.data[0] # Load Fermi tools gtmodel background-only result gtmodel = fits.open( FermiVelaRegion.filenames()['background_image'])[0].data.astype(float) # Ratio for the two background images ratio = np.nan_to_num(model / gtmodel) # Header is required for plotting, so returned here wcs = npred_cube.wcs header = wcs.to_header() return model, gtmodel, ratio, counts, header
def get_energy_dependent_table_psf(self, position): """Get energy-dependent PSF at a given position. Parameters ---------- position : `~astropy.coordinates.SkyCoord` the target position. Should be a single coordinates Returns ------- psf_table : `~gammapy.irf.EnergyDependentTablePSF` the table PSF """ if position.size != 1: raise ValueError( "EnergyDependentTablePSF can be extracted at one single position only." ) energy = self.psf_map.geom.get_axis_by_name("energy").center rad = self.psf_map.geom.get_axis_by_name("theta").center coords = { "skycoord": position, "energy": energy.reshape((-1, 1, 1, 1)), "theta": rad.reshape((1, -1, 1, 1)), } data = self.psf_map.interp_by_coord(coords) psf_values = u.Quantity(data[:, :, 0, 0], unit=self.psf_map.unit, copy=False) if self.exposure_map is not None: coords = { "skycoord": position, "energy": energy.reshape((-1, 1, 1)), "theta": 0 * u.deg, } data = self.exposure_map.interp_by_coord(coords).squeeze() exposure = data * self.exposure_map.unit else: exposure = None # Beware. Need to revert rad and energies to follow the TablePSF scheme. return EnergyDependentTablePSF(energy=energy, rad=rad, psf_value=psf_values, exposure=exposure)
def prepare_images(): # Read in data background_file = FermiVelaRegion.filenames()['diffuse_model'] exposure_file = FermiVelaRegion.filenames()['exposure_cube'] counts_file = FermiVelaRegion.filenames()['counts_cube'] background_model = SpectralCube.read(background_file) exposure_cube = SpectralCube.read(exposure_file) # Add correct units exposure_cube.data = Quantity(exposure_cube.data.value, 'cm2 s') # Re-project background cube repro_bg_cube = background_model.reproject_to(exposure_cube) # Define energy band required for output energies = EnergyBounds([10, 500], 'GeV') # Compute the predicted counts cube npred_cube = compute_npred_cube(repro_bg_cube, exposure_cube, energies) # Convolve with Energy-dependent Fermi LAT PSF psf = EnergyDependentTablePSF.read(FermiVelaRegion.filenames()['psf']) convolved_npred_cube = convolve_cube(npred_cube, psf, offset_max=Angle(3, 'deg')) # Counts data counts_data = fits.open(counts_file)[0].data counts_wcs = WCS(fits.open(counts_file)[0].header) counts_cube = SpectralCube(data=Quantity(counts_data, ''), wcs=counts_wcs, energy=energies) counts_cube = counts_cube.reproject_to(npred_cube, projection_type='nearest-neighbor') counts = counts_cube.data[0] model = convolved_npred_cube.data[0] # Load Fermi tools gtmodel background-only result gtmodel = fits.open(FermiVelaRegion.filenames()['background_image'])[0].data.astype(float) # Ratio for the two background images ratio = np.nan_to_num(model / gtmodel) # Header is required for plotting, so returned here wcs = npred_cube.wcs header = wcs.to_header() return model, gtmodel, ratio, counts, header
def prepare_images(): # Read in data fermi_vela = FermiVelaRegion() background_file = FermiVelaRegion.filenames()['diffuse_model'] exposure_file = FermiVelaRegion.filenames()['exposure_cube'] counts_file = FermiVelaRegion.filenames()['counts_cube'] background_model = SkyCube.read(background_file, format='fermi-background') exposure_cube = SkyCube.read(exposure_file, format='fermi-exposure') # Re-project background cube repro_bg_cube = background_model.reproject(exposure_cube) # Define energy band required for output energies = EnergyBounds([10, 500], 'GeV') # Compute the predicted counts cube npred_cube = compute_npred_cube(repro_bg_cube, exposure_cube, energies, integral_resolution=5) # Convolve with Energy-dependent Fermi LAT PSF psf = EnergyDependentTablePSF.read(FermiVelaRegion.filenames()['psf']) kernels = psf.kernels(npred_cube) convolved_npred_cube = npred_cube.convolve(kernels, mode='reflect') # Counts data counts_cube = SkyCube.read(counts_file, format='fermi-counts') counts_cube = counts_cube.reproject(npred_cube) counts = counts_cube.data[0] model = convolved_npred_cube.data[0] # Load Fermi tools gtmodel background-only result gtmodel = fits.open( FermiVelaRegion.filenames()['background_image'])[0].data.astype(float) # Ratio for the two background images ratio = np.nan_to_num(model / gtmodel) # Header is required for plotting, so returned here wcs = npred_cube.wcs header = wcs.to_header() return model, gtmodel, ratio, counts, header
def test_apply_containment_fraction(): n_edges_energy = 5 energy = energy_logspace(0.1, 10.0, nbins=n_edges_energy + 1, unit="TeV") area = np.ones(n_edges_energy) * 4 * u.m**2 aeff = EffectiveAreaTable(energy[:-1], energy[1:], data=area) nrad = 100 rad = Angle(np.linspace(0, 0.5, nrad), "deg") psf_table = TablePSF.from_shape(shape="disk", width="0.2 deg", rad=rad) psf_values = (np.resize(psf_table.psf_value.value, (n_edges_energy, nrad)) * psf_table.psf_value.unit) edep_psf_table = EnergyDependentTablePSF(aeff.energy.center, rad, psf_value=psf_values) new_aeff = apply_containment_fraction(aeff, edep_psf_table, Angle("0.1 deg")) assert_allclose(new_aeff.data.data.value, 1.0, rtol=5e-4) assert new_aeff.data.data.unit == "m2"
def prepare_images(): # Read in data fermi_vela = FermiVelaRegion() background_file = FermiVelaRegion.filenames()['diffuse_model'] exposure_file = FermiVelaRegion.filenames()['exposure_cube'] counts_file = FermiVelaRegion.filenames()['counts_cube'] background_model = SkyCube.read(background_file, format='fermi-background') exposure_cube = SkyCube.read(exposure_file, format='fermi-exposure') # Re-project background cube repro_bg_cube = background_model.reproject(exposure_cube) # Define energy band required for output energies = EnergyBounds([10, 500], 'GeV') # Compute the predicted counts cube npred_cube = compute_npred_cube(repro_bg_cube, exposure_cube, energies, integral_resolution=5) # Convolve with Energy-dependent Fermi LAT PSF psf = EnergyDependentTablePSF.read(FermiVelaRegion.filenames()['psf']) kernels = psf.kernels(npred_cube) convolved_npred_cube = npred_cube.convolve(kernels, mode='reflect') # Counts data counts_cube = SkyCube.read(counts_file, format='fermi-counts') counts_cube = counts_cube.reproject(npred_cube) counts = counts_cube.data[0] model = convolved_npred_cube.data[0] # Load Fermi tools gtmodel background-only result gtmodel = fits.open(FermiVelaRegion.filenames()['background_image'])[0].data.astype(float) # Ratio for the two background images ratio = np.nan_to_num(model / gtmodel) # Header is required for plotting, so returned here wcs = npred_cube.wcs header = wcs.to_header() return model, gtmodel, ratio, counts, header
from gammapy.image import make_empty_image, catalog_image, binary_disk from gammapy.image.utils import cube_to_image, solid_angle from gammapy.data import SpectralCube from gammapy.image.utils import WCS from gammapy.spectrum.flux_point import _energy_lafferty_power_law # *** PREPARATION *** # Parameters CORRELATION_RADIUS = 3 # pix SIGNIFICANCE_THRESHOLD = 5 MASK_DILATION_RADIUS = 0.3 psf_file = FermiGalacticCenter.filenames()["psf"] psf = EnergyDependentTablePSF.read(psf_file) # *** LOADING INPUT *** # Counts must be provided as a counts ImageHDU flux_file = raw_input("Flux Map: ") exposure_file = raw_input("Exposure Map: ") spec_ind = input("Spectral Index (for reprojection): ") flux_hdu = fits.open(flux_file)[1] flux_wcs = WCS(flux_hdu.header) energy_flux = Quantity([_energy_lafferty_power_law(10000, 500000, spec_ind)], "MeV") flux_data = np.zeros((1, 1800, 3600)) flux_data[0] = Quantity(flux_hdu.data, "") flux_spec_cube = SpectralCube(data=flux_data, wcs=flux_wcs, energy=energy_flux) exposure_hdu = fits.open(exposure_file)[0]
"""Test npred model image computation. """ from astropy.coordinates import Angle from gammapy.datasets import FermiGalacticCenter from gammapy.utils.energy import EnergyBounds from gammapy.irf import EnergyDependentTablePSF from gammapy.cube import SkyCube, compute_npred_cube, convolve_cube filenames = FermiGalacticCenter.filenames() flux_cube = SkyCube.read(filenames['diffuse_model']) exposure_cube = SkyCube.read(filenames['exposure_cube']) psf = EnergyDependentTablePSF.read(filenames['psf']) flux_cube = flux_cube.reproject_to(exposure_cube) energy_bounds = EnergyBounds([10, 30, 100, 500], 'GeV') npred_cube = compute_npred_cube(flux_cube, exposure_cube, energy_bounds) offset_max = Angle(1, 'deg') npred_cube_convolved = convolve_cube(npred_cube, psf, offset_max)
def __init__(self, selection="short", savefig=True): self.datadir = "$GAMMAPY_DATA" self.resdir = "./res" self.savefig = savefig # event list self.events = EventList.read( self.datadir + "/fermi_3fhl/fermi_3fhl_events_selected.fits.gz") # psf self.psf = EnergyDependentTablePSF.read( self.datadir + "/fermi_3fhl/fermi_3fhl_psf_gc.fits.gz") # mask margin psf_r99max = self.psf.containment_radius(10 * u.GeV, fraction=0.99) self.psf_margin = np.ceil(psf_r99max.value[0] * 10) / 10.0 # energies self.dlb = 1 / 8.0 El_extra = 10**np.arange(3.8, 6.51, 0.1) # MeV self.logEc_extra = (np.log10(El_extra)[1:] + np.log10(El_extra)[:-1]) / 2.0 self.El_flux = [10.0, 20.0, 50.0, 150.0, 500.0, 2000.0] El_fit = 10**np.arange(1, 3.31, 0.1) self.energy_axis = MapAxis.from_edges(El_fit, name="energy", unit="GeV", interp="log") # background iso infile = Path(self.datadir + "/fermi_3fhl/iso_P8R2_SOURCE_V6_v06.txt") outfile = Path(self.resdir + "/iso_P8R2_SOURCE_V6_v06_extra.txt") self.model_iso = extrapolate_iso(infile, outfile, self.logEc_extra) # regions selection file3fhl = self.datadir + "/catalogs/fermi/gll_psch_v13.fit.gz" self.FHL3 = SourceCatalog3FHL(file3fhl) hdulist = fits.open(make_path(file3fhl)) self.ROIs = hdulist["ROIs"].data Scat = hdulist[1].data order = np.argsort(Scat.Signif_Avg)[::-1] ROIs_ord = Scat.ROI_num[order] if selection == "short": self.ROIs_sel = [430, 135, 118, 212, 277, 42, 272, 495] # Crab, Vela, high-lat, +some fast regions elif selection == "long": # get small regions with few sources among the most significant indexes = np.unique(ROIs_ord, return_index=True)[1] ROIs_ord = [ROIs_ord[index] for index in sorted(indexes)] self.ROIs_sel = [ kr for kr in ROIs_ord if sum(Scat.ROI_num == kr) <= 4 and self.ROIs.RADIUS[kr] < 6 ][:100] elif selection == "debug": self.ROIs_sel = [135] # Vela region # fit options self.optimize_opts = { "backend": "minuit", "tol": 10.0, "strategy": 2, } # calculate flux points only for sources significant above this threshold self.sig_cut = 8.0 # diagnostics stored to produce plots and outputs self.diags = { "message": [], "stat": [], "params": {}, "errel": {}, "compatibility": {}, "cat_fp_sel": [], } self.diags["errel"]["flux_points"] = [] keys = [ "PL_tags", "PL_index", "PL_amplitude", "LP_tags", "LP_alpha", "LP_beta", "LP_amplitude", ] for key in keys: self.diags["params"][key] = []
def setup(self): filename = "$GAMMAPY_DATA/tests/unbundled/fermi/psf.fits" self.psf = EnergyDependentTablePSF.read(filename)
def __init__(self, selection="short", savefig=True): log.info("Executing __init__()") self.resdir = BASE_PATH / "results" self.savefig = savefig # event list self.events = EventList.read( "$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_events_selected.fits.gz" ) # psf self.psf = EnergyDependentTablePSF.read( "$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_psf_gc.fits.gz" ) # mask margin psf_r99max = self.psf.containment_radius(10 * u.GeV, fraction=0.99) self.psf_margin = np.ceil(psf_r99max.value[0] * 10) / 10.0 # energies self.El_flux = [10.0, 20.0, 50.0, 150.0, 500.0, 2000.0] El_fit = 10 ** np.arange(1, 3.31, 0.1) self.energy_axis = MapAxis.from_edges( El_fit, name="energy", unit="GeV", interp="log" ) # iso norm=0.92 see paper appendix A self.model_iso = create_fermi_isotropic_diffuse_model( filename="data/iso_P8R2_SOURCE_V6_v06_extrapolated.txt", norm=0.92, interp_kwargs={"fill_value": None}, ) # regions selection file3fhl = "$GAMMAPY_DATA/catalogs/fermi/gll_psch_v13.fit.gz" self.FHL3 = SourceCatalog3FHL(file3fhl) hdulist = fits.open(make_path(file3fhl)) self.ROIs = hdulist["ROIs"].data Scat = hdulist[1].data order = np.argsort(Scat.Signif_Avg)[::-1] ROIs_ord = Scat.ROI_num[order] if selection == "short": self.ROIs_sel = [430, 135, 118, 212, 277, 42, 272, 495] # Crab, Vela, high-lat, +some fast regions elif selection == "long": # get small regions with few sources among the most significant indexes = np.unique(ROIs_ord, return_index=True)[1] ROIs_ord = [ROIs_ord[index] for index in sorted(indexes)] self.ROIs_sel = [ kr for kr in ROIs_ord if sum(Scat.ROI_num == kr) <= 4 and self.ROIs.RADIUS[kr] < 6 ][:100] elif selection == "debug": self.ROIs_sel = [135] # Vela region else: raise ValueError(f"Invalid selection: {selection!r}") # fit options self.optimize_opts = { "backend": "minuit", "optimize_opts": {"tol": 10.0, "strategy": 2}, } # calculate flux points only for sources significant above this threshold self.sig_cut = 8.0 # diagnostics stored to produce plots and outputs self.diags = { "message": [], "stat": [], "params": {}, "errel": {}, "compatibility": {}, "cat_fp_sel": [], } self.diags["errel"]["flux_points"] = [] keys = [ "PL_tags", "PL_index", "PL_amplitude", "LP_tags", "LP_alpha", "LP_beta", "LP_amplitude", ] for key in keys: self.diags["params"][key] = []
from astropy.coordinates import Angle from astropy.units import Quantity from astropy.io import fits from gammapy.datasets import FermiGalacticCenter from gammapy.irf import EnergyDependentTablePSF # Parameters filename = FermiGalacticCenter.filenames()['psf'] pixel_size = Angle(0.1, 'deg') offset_max = Angle(2, 'deg') energy = Quantity(10, 'GeV') energy_band = Quantity([10, 500], 'GeV') outfile = 'fermi_psf_image.fits' # Compute PSF image fermi_psf = EnergyDependentTablePSF.read(filename) #psf = fermi_psf.table_psf_at_energy(energy=energy) psf = fermi_psf.table_psf_in_energy_band(energy_band=energy_band, spectral_index=2.5) psf.normalize() kernel = psf.kernel(pixel_size=pixel_size, offset_max=offset_max) kernel_image = kernel.value kernel_image_integral = kernel_image.sum() * pixel_size.to('radian').value ** 2 print('Kernel image integral: {0}'.format(kernel_image_integral)) print('shape: {0}'.format(kernel_image.shape)) #import IPython; IPython.embed() # Print some info and save to FITS file #print(fermi_psf.info()) print(psf.info())
from astropy.coordinates import Angle from astropy.units import Quantity from astropy.io import fits from gammapy.datasets import FermiGalacticCenter from gammapy.irf import EnergyDependentTablePSF # Parameters filename = FermiGalacticCenter.filenames()['psf'] pixel_size = Angle(0.1, 'deg') offset_max = Angle(2, 'deg') energy = Quantity(10, 'GeV') energy_band = Quantity([10, 500], 'GeV') outfile = 'fermi_psf_image.fits' # Compute PSF image fermi_psf = EnergyDependentTablePSF.read(filename) # psf = fermi_psf.table_psf_at_energy(energy=energy) psf = fermi_psf.table_psf_in_energy_band(energy_band=energy_band, spectral_index=2.5) psf.normalize() kernel = psf.kernel(pixel_size=pixel_size, offset_max=offset_max) kernel_image = kernel.value kernel_image_integral = kernel_image.sum() * pixel_size.to('radian').value**2 print('Kernel image integral: {0}'.format(kernel_image_integral)) print('shape: {0}'.format(kernel_image.shape)) print(psf.info()) print('Writing {0}'.format(outfile)) fits.writeto(outfile, data=kernel_image, clobber=True)
# In[ ]: erange = [50, 2000] * u.GeV diffuse_iso.plot(erange, flux_unit="1 / (cm2 MeV s sr)"); # ## PSF # # Next we will tke a look at the PSF. It was computed using ``gtpsf``, in this case for the Galactic center position. Note that generally for Fermi-LAT, the PSF only varies little within a given regions of the sky, especially at high energies like what we have here. We use the [gammapy.irf.EnergyDependentTablePSF](http://docs.gammapy.org/dev/api/gammapy.irf.EnergyDependentTablePSF.html) class to load the PSF and use some of it's methods to get some information about it. # In[ ]: psf = EnergyDependentTablePSF.read( "$GAMMAPY_FERMI_LAT_DATA/3fhl/allsky/fermi_3fhl_psf_gc.fits.gz" ) print(psf) # To get an idea of the size of the PSF we check how the containment radii of the Fermi-LAT PSF vari with energy and different containment fractions: # In[ ]: plt.figure(figsize=(8, 5)) psf.plot_containment_vs_energy(linewidth=2, fractions=[0.68, 0.95]) plt.xlim(50, 2000) plt.show()
from gammapy.image import make_empty_image, catalog_image, binary_disk from gammapy.image.utils import cube_to_image, solid_angle from gammapy.data import SpectralCube from gammapy.image.utils import WCS from gammapy.spectrum.flux_point import _energy_lafferty_power_law # *** PREPARATION *** # Parameters CORRELATION_RADIUS = 3 # pix SIGNIFICANCE_THRESHOLD = 5 MASK_DILATION_RADIUS = 0.3 psf_file = FermiGalacticCenter.filenames()['psf'] psf = EnergyDependentTablePSF.read(psf_file) # *** LOADING INPUT *** # Counts must be provided as a counts ImageHDU flux_file = raw_input('Flux Map: ') exposure_file = raw_input('Exposure Map: ') spec_ind = input('Spectral Index (for reprojection): ') flux_hdu = fits.open(flux_file)[1] flux_wcs = WCS(flux_hdu.header) energy_flux = Quantity([_energy_lafferty_power_law(10000, 500000, spec_ind)], 'MeV') flux_data = np.zeros((1, 1800, 3600)) flux_data[0] = Quantity(flux_hdu.data, '') flux_spec_cube = SpectralCube(data=flux_data, wcs=flux_wcs, energy=energy_flux)
filename=filename, interp_kwargs=interp_kwargs) # We can plot the model in the energy range between 50 GeV and 2000 GeV: # In[ ]: erange = [50, 2000] * u.GeV diffuse_iso.plot(erange, flux_unit="1 / (cm2 MeV s sr)") # ## PSF # # Next we will tke a look at the PSF. It was computed using ``gtpsf``, in this case for the Galactic center position. Note that generally for Fermi-LAT, the PSF only varies little within a given regions of the sky, especially at high energies like what we have here. We use the [gammapy.irf.EnergyDependentTablePSF](https://docs.gammapy.org/0.12/api/gammapy.irf.EnergyDependentTablePSF.html) class to load the PSF and use some of it's methods to get some information about it. # In[ ]: psf = EnergyDependentTablePSF.read( "$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_psf_gc.fits.gz") print(psf) # To get an idea of the size of the PSF we check how the containment radii of the Fermi-LAT PSF vari with energy and different containment fractions: # In[ ]: plt.figure(figsize=(8, 5)) psf.plot_containment_vs_energy(linewidth=2, fractions=[0.68, 0.95]) plt.xlim(50, 2000) plt.show() # In addition we can check how the actual shape of the PSF varies with energy and compare it against the mean PSF between 50 GeV and 2000 GeV: # In[ ]: