示例#1
0
    def from_arrays(cls,
                    ebounds,
                    int_flux,
                    int_flux_err_hi=None,
                    int_flux_err_lo=None):
        """Create `~gammapy.spectrum.IntegralFluxPoints` from numpy arrays"""
        t = Table()
        ebounds = EnergyBounds(ebounds)
        int_flux = Quantity(int_flux)
        if not int_flux.unit.is_equivalent('cm-2 s-1'):
            raise ValueError('Flux (unit {}) not an integrated flux'.format(
                int_flux.unit))

        # Set errors to zero by default
        def_f = np.zeros(ebounds.nbins) * int_flux.unit
        int_flux_err_hi = def_f if int_flux_err_hi is None else int_flux_err_hi
        int_flux_err_lo = def_f if int_flux_err_lo is None else int_flux_err_lo

        t['ENERGY_MIN'] = ebounds.lower_bounds
        t['ENERGY_MAX'] = ebounds.upper_bounds
        t['INT_FLUX'] = int_flux
        t['INT_FLUX_ERR_HI'] = int_flux_err_hi
        t['INT_FLUX_ERR_LO'] = int_flux_err_lo

        t['INT_FLUX_ERR_HI_%'] = 100 * int_flux_err_hi / int_flux
        t['INT_FLUX_ERR_LO_%'] = 100 * int_flux_err_lo / int_flux
        return cls(t)
示例#2
0
    def from_2fhl(cls, source):
        """Get `~gammapy.spectrum.IntegralFluxPoints` for a 2FHL source

        Parameters
        ----------
        source : dict
            2FHL source
        """
        ebounds = EnergyBounds([50, 171, 585, 2000], 'GeV')
        fluxkeys = ['Flux50_171GeV', 'Flux171_585GeV', 'Flux585_2000GeV']
        temp_fluxes = [source.data[_] for _ in fluxkeys]

        fluxerrkeys = [
            'Unc_Flux50_171GeV', 'Unc_Flux171_585GeV', 'Unc_Flux585_2000GeV'
        ]

        temp_fluxes_err_hi = [source.data[_][1] for _ in fluxerrkeys]
        temp_fluxes_err_lo = [-1 * source.data[_][0] for _ in fluxerrkeys]

        int_fluxes = Quantity(temp_fluxes, 'cm-2 s-1')
        int_fluxes_err_hi = Quantity(temp_fluxes_err_hi, 'cm-2 s-1')
        int_fluxes_err_lo = Quantity(temp_fluxes_err_lo, 'cm-2 s-1')

        return cls.from_arrays(ebounds, int_fluxes, int_fluxes_err_hi,
                               int_fluxes_err_lo)
示例#3
0
 def e_reco(self):
     """Reconstructed energy binning."""
     if self.edisp is not None:
         temp = self.edisp.e_reco.bins
     else:
         temp = self.aeff.energy.bins
     return EnergyBounds(temp)
示例#4
0
    def from_3fgl(cls, source, x_method='log_center'):
        """Get `~gammapy.spectrum.DifferentialFluxPoints` for a 3FGL source

        Parameters
        ----------
        sourcename : dict
            3FGL source
        """
        ebounds = EnergyBounds([100, 300, 1000, 3000, 10000, 100000], 'MeV')

        if x_method == 'log_center':
            energy = ebounds.log_centers
        else:
            raise ValueError('Undefined x method: {}'.format(x_method))
        fluxkeys = ['nuFnu100_300', 'nuFnu300_1000', 'nuFnu1000_3000', 'nuFnu3000_10000', 'nuFnu10000_100000']
        temp_fluxes = [source.data[_] for _ in fluxkeys]
        energy_fluxes = Quantity(temp_fluxes, 'erg cm-2 s-1')
        diff_fluxes = (energy_fluxes * energy ** -2).to('erg-1 cm-2 s-1')

        # Get relativ error on integral fluxes
        int_flux_points = IntegralFluxPoints.from_3fgl(source)
        val = int_flux_points['INT_FLUX'].quantity
        rel_error_hi = int_flux_points['INT_FLUX_ERR_HI'] / val
        rel_error_lo = int_flux_points['INT_FLUX_ERR_LO'] / val

        diff_fluxes_err_hi = diff_fluxes * rel_error_hi
        diff_fluxes_err_lo = diff_fluxes * rel_error_lo

        return cls.from_arrays(energy=energy, diff_flux=diff_fluxes,
                               diff_flux_err_lo=diff_fluxes_err_lo,
                               diff_flux_err_hi=diff_fluxes_err_hi)
示例#5
0
def prepare_images():
    # Read in data
    background_file = FermiVelaRegion.filenames()['diffuse_model']
    exposure_file = FermiVelaRegion.filenames()['exposure_cube']
    counts_file = FermiVelaRegion.filenames()['counts_cube']
    background_model = SkyCube.read(background_file)
    exposure_cube = SkyCube.read(exposure_file)

    # Add correct units
    exposure_cube.data = Quantity(exposure_cube.data.value, 'cm2 s')

    # Re-project background cube
    repro_bg_cube = background_model.reproject_to(exposure_cube)

    # Define energy band required for output
    energies = EnergyBounds([10, 500], 'GeV')

    # Compute the predicted counts cube
    npred_cube = compute_npred_cube(repro_bg_cube, exposure_cube, energies)

    # Convolve with Energy-dependent Fermi LAT PSF
    psf = EnergyDependentTablePSF.read(FermiVelaRegion.filenames()['psf'])
    convolved_npred_cube = convolve_cube(npred_cube,
                                         psf,
                                         offset_max=Angle(3, 'deg'))

    # Counts data
    counts_data = fits.open(counts_file)[0].data
    counts_wcs = WCS(fits.open(counts_file)[0].header)
    counts_cube = SkyCube(data=Quantity(counts_data, ''),
                          wcs=counts_wcs,
                          energy=energies)
    counts_cube = counts_cube.reproject_to(npred_cube,
                                           projection_type='nearest-neighbor')

    counts = counts_cube.data[0]
    model = convolved_npred_cube.data[0]

    # Load Fermi tools gtmodel background-only result
    gtmodel = fits.open(
        FermiVelaRegion.filenames()['background_image'])[0].data.astype(float)

    # Ratio for the two background images
    ratio = np.nan_to_num(model / gtmodel)

    # Header is required for plotting, so returned here
    wcs = npred_cube.wcs
    header = wcs.to_header()

    return model, gtmodel, ratio, counts, header
示例#6
0
def test_model(model):
    print(model)
    print(model(energy=Q(10, 'TeV')))
    print(model.integral(emin=Q(1, 'TeV'), emax=Q(2, 'TeV')))

    # plot
    # butterfly
    # npred
    reco_bins = 5
    true_bins = 10
    e_reco = Q(np.logspace(-1, 1, reco_bins + 1), 'TeV')
    e_true = Q(np.logspace(-1.5, 1.5, true_bins + 1), 'TeV')
    livetime = Q(26, 'min')
    aeff_data = Q(np.ones(true_bins) * 1e5, 'cm2')
    aeff = EffectiveAreaTable(energy=e_true, data=aeff_data)
    edisp_data = make_perfect_resolution(e_true, e_reco)
    edisp = EnergyDispersion(edisp_data, EnergyBounds(e_true),
                             EnergyBounds(e_reco))
    npred = calculate_predicted_counts(model=model,
                                       livetime=livetime,
                                       aeff=aeff,
                                       edisp=edisp)
    print(npred.data)
示例#7
0
    def make_1d_expected_counts(self,
                                spectral_index=2.3,
                                for_integral_flux=False,
                                eref=None):
        """Compute the 1D exposure table for one observation for an offset table.

        Parameters
        ----------
        spectral_index : float
            Assumed power-law spectral index
        for_integral_flux : bool
            True if you want that the total excess / exposure gives the integrated flux
        eref: `~gammapy.utils.energy.Energy`
            Reference energy at which you want to compute the exposure. Default is the log center of the energy band of
             the image.
        Returns
        -------
        table : `astropy.table.Table`
            Two columns: offset in the FOV "theta" and expected counts "npred"
        """
        energy = EnergyBounds.equal_log_spacing(self.energy_band[0].value,
                                                self.energy_band[1].value, 100,
                                                self.energy_band.unit)
        energy_band = energy.bands
        energy_bin = energy.log_centers
        if not eref:
            eref = EnergyBounds(self.energy_band).log_centers
        spectrum = (energy_bin / eref)**(-spectral_index)
        offset = Angle(
            np.linspace(self.offset_band[0].value, self.offset_band[1].value,
                        10), self.offset_band.unit)
        arf = self.aeff.data.evaluate(offset=offset, energy=energy_bin).T
        npred = np.sum(arf * spectrum * energy_band, axis=1)
        npred *= self.livetime

        if for_integral_flux:
            norm = np.sum(spectrum * energy_band)
            npred /= norm

        table = Table()
        table['theta'] = offset
        table['npred'] = npred

        return table
示例#8
0
def prepare_images():
    # Read in data
    fermi_vela = FermiVelaRegion()
    background_file = FermiVelaRegion.filenames()['diffuse_model']
    exposure_file = FermiVelaRegion.filenames()['exposure_cube']
    counts_file = FermiVelaRegion.filenames()['counts_cube']
    background_model = SkyCube.read(background_file, format='fermi-background')
    exposure_cube = SkyCube.read(exposure_file, format='fermi-exposure')

    # Re-project background cube
    repro_bg_cube = background_model.reproject(exposure_cube)

    # Define energy band required for output
    energies = EnergyBounds([10, 500], 'GeV')

    # Compute the predicted counts cube
    npred_cube = compute_npred_cube(repro_bg_cube,
                                    exposure_cube,
                                    energies,
                                    integral_resolution=5)

    # Convolve with Energy-dependent Fermi LAT PSF
    psf = EnergyDependentTablePSF.read(FermiVelaRegion.filenames()['psf'])
    kernels = psf.kernels(npred_cube)
    convolved_npred_cube = npred_cube.convolve(kernels, mode='reflect')

    # Counts data
    counts_cube = SkyCube.read(counts_file, format='fermi-counts')
    counts_cube = counts_cube.reproject(npred_cube)

    counts = counts_cube.data[0]
    model = convolved_npred_cube.data[0]

    # Load Fermi tools gtmodel background-only result
    gtmodel = fits.open(
        FermiVelaRegion.filenames()['background_image'])[0].data.astype(float)

    # Ratio for the two background images
    ratio = np.nan_to_num(model / gtmodel)

    # Header is required for plotting, so returned here
    wcs = npred_cube.wcs
    header = wcs.to_header()
    return model, gtmodel, ratio, counts, header
示例#9
0
def compute_sum_cube(flux_cube, flux_cube2, config):
    """Compute sum of two flux cubes.
        
    Parameters
    ----------
    flux_cube : `SkyCube`
        Flux cube 1, really differential surface brightness in 'cm-2 s-1 TeV-1 sr-1'.
    flux_cube2 : `SkyCube`
        Flux cube 2.
    config : `dict`
        Configuration dictionary.
        
    Returns
    -------
    nflux_cube_sum: `SkyCube`
        Sum of flux_cube and flux_cube2.
    
    See also
    -------
    read_config
    
    """
    ebin = flux_cube.energies(mode="edges")
    ebounds = EnergyBounds(ebin)
    
    nflux_cube_sum = make_ref_cube(config)
    for idx in range(len(ebounds) - 1):
        npred1 = flux_cube.sky_image_idx(idx)
        npred2 =flux_cube2.sky_image_idx(idx)
        
        ## DEBUG
        #print npred1.data
        #print npred2.data
        
        nflux_sum = u.Quantity(npred1.data.value + npred2.data.value,'1 / (cm2 s sr TeV)')
        nflux_cube_sum.data[idx] = nflux_sum.value

    return nflux_cube_sum
示例#10
0
    def from_3fgl(cls, source):
        """Get `~gammapy.spectrum.IntegralFluxPoints` for a 3FGL source

        Parameters
        ----------
        source : dict
            3FGL source
        """
        ebounds = EnergyBounds([100, 300, 1000, 3000, 10000, 100000], 'MeV')
        fluxkeys = ['Flux100_300', 'Flux300_1000', 'Flux1000_3000', 'Flux3000_10000', 'Flux10000_100000']
        temp_fluxes = [source.data[_] for _ in fluxkeys]

        fluxerrkeys = ['Unc_Flux100_300', 'Unc_Flux300_1000', 'Unc_Flux1000_3000', 'Unc_Flux3000_10000', 'Unc_Flux10000_100000']

        temp_fluxes_err_hi = [source.data[_][1] for _ in fluxerrkeys]
        temp_fluxes_err_lo = [-1 * source.data[_][0] for _ in fluxerrkeys]

        int_fluxes = Quantity(temp_fluxes, 'cm-2 s-1')
        int_fluxes_err_hi = Quantity(temp_fluxes_err_hi, 'cm-2 s-1')
        int_fluxes_err_lo = Quantity(temp_fluxes_err_lo, 'cm-2 s-1')

        return cls.from_arrays(ebounds, int_fluxes, int_fluxes_err_hi,
                               int_fluxes_err_lo)
示例#11
0
"""Test npred model image computation.
"""
from astropy.coordinates import Angle
from gammapy.datasets import FermiGalacticCenter
from gammapy.utils.energy import EnergyBounds
from gammapy.irf import EnergyDependentTablePSF
from gammapy.cube import SkyCube, compute_npred_cube, convolve_cube

filenames = FermiGalacticCenter.filenames()
flux_cube = SkyCube.read(filenames['diffuse_model'])
exposure_cube = SkyCube.read(filenames['exposure_cube'])
psf = EnergyDependentTablePSF.read(filenames['psf'])

flux_cube = flux_cube.reproject_to(exposure_cube)

energy_bounds = EnergyBounds([10, 30, 100, 500], 'GeV')
npred_cube = compute_npred_cube(flux_cube, exposure_cube, energy_bounds)

offset_max = Angle(1, 'deg')
npred_cube_convolved = convolve_cube(npred_cube, psf, offset_max)
示例#12
0
def compute_nexcess_cube(npred_cube, livetime, pointing, offset_max, bkg_rate, config):
    '''Compute excess cube.
        
    Parameters
    ----------
    npred_cube : `SkyCube`
        Predicted counts cube.
    livetime : `Quantity`
        Observation time.
    pointing : `SkyCoord`
        Pointing coordinates.
    offset_max : `Angle`
        Offset.
    bkg_rate : `Background3D`
        Background rate.
    config : `dict`
        Configuration dictionary.

    Returns
    -------
    nexcess_cube : `SkyCube`
        Predicted counts cube.
    
    non_cube : `SkyCube`
        On observation.

    noff_cube : `SkyCube`
        Off observation.
    
    See also
    --------
    read_config
    See get_irfs
    
    '''
    ebin = npred_cube.energies(mode="edges")
    ebounds = EnergyBounds(ebin)

    nexcess_cube = make_ref_cube(config)
    non_cube = make_ref_cube(config)
    noff_cube = make_ref_cube(config)
    
    # Compute two background cubes
    nbkg1_cube =  make_background_cube(pointing = pointing, obstime = livetime, bkg = bkg_rate, ref_cube=npred_cube, offset_max = offset_max) #compute_bkg_cube(npred_cube,bkg_rate,livetime)
    nbkg2_cube =  make_background_cube(pointing = pointing, obstime = livetime, bkg = bkg_rate, ref_cube=npred_cube, offset_max = offset_max) #compute_bkg_cube(npred_cube,bkg_rate,livetime)
    
    # For each energy bin, I need to obtain the correct background rate (two, one for the on and one for the off)
    for idx in range(len(ebounds) - 1):
        emin, emax = ebounds[idx: idx + 2]
        ecenter = np.sqrt(emin * emax)
        print('Energy bins:')
        print emin, emax
        
        npred = npred_cube.sky_image_idx(idx)
        npred.unit = u.Unit('TeV')
        solid_angle = npred.solid_angle()
        npred.data.value[np.isnan(npred.data.value)]=0.
    
        
        nbkg1_ebin = nbkg1_cube.data[idx]
        nbkg2_ebin = nbkg2_cube.data[idx]
        
        ## DEBUG
        #print npred
        
        n_on = np.random.poisson(npred.data) + np.random.poisson(abs(nbkg1_ebin))
        n_off = np.random.poisson(abs(nbkg2_ebin))
        nexcess = n_on - n_off
        nexcess_cube.data[idx] = nexcess
        non_cube.data[idx] = n_on
        noff_cube.data[idx] = n_off
    return nexcess_cube, non_cube, noff_cube
示例#13
0
def compute_npred_cube(flux_cube, exposure_cube, ebounds, config, irfs,
                       integral_resolution=10):
    """Compute predicted counts cube.
        
    Parameters
    ----------
    flux_cube : `SkyCube`
        Flux cube, really differential surface brightness in 'cm-2 s-1 TeV-1 sr-1'.
    exposure_cube : `SkyCube`
        Exposure cube.
    ebounds : `~astropy.units.Quantity`
        Energy bounds for the output cube.
    config : `dict`
        Configuration dictionary.
    irfs : `dict`
        IRFs dictionary.
    integral_resolution : int (optional)
        Number of integration steps in energy bin when computing integral flux.
        
    Returns
    -------
    npred_cube : `SkyCube`
        Predicted counts cube with energy bounds as given by the input ``ebounds``.
        
    See also
    --------
    compute_npred_cube_simple
    read_config
    See get_irfs
    """
    _validate_inputs(flux_cube, exposure_cube)
    
    # Make an empty cube with the requested energy binning
    sky_geom = exposure_cube.sky_image_ref
    energies = EnergyBounds(ebounds)
    npred_cube = SkyCube.empty_like(sky_geom, energies=energies, unit='', fill=np.nan)
    
    # Process and fill one energy bin at a time
    for idx in range(len(ebounds) - 1):
        emin, emax = ebounds[idx: idx + 2]
        ecenter = np.sqrt(emin * emax)
        
        flux = flux_cube.sky_image_integral(emin, emax, interpolation='linear', nbins=integral_resolution)
        
        exposure = exposure_cube.sky_image(ecenter, interpolation='linear')
        solid_angle = exposure.solid_angle()
        
        flux.data.value[np.isnan(flux.data.value)] = 0
        exposure.data.value[np.isnan(exposure.data.value)] = 0
        npred = flux.data.value * u.Unit('1 / (cm2 s sr)') * exposure.data * solid_angle
        
        ##Debug
        #print npred.to('')
        
        npred_cube.data[idx] = npred.to('')
    
    # Apply EnergyDispersion
    edisp = irfs['edisp']
    offset = irfs['offset']
    
    edisp_idx = edisp.to_energy_dispersion(offset=offset, e_reco = ebounds, e_true = ebounds)
    
    for pos_x in range(npred_cube.data.shape[1]):
        for pos_y in range(npred_cube.data.shape[2]):
            npred_pos = npred_cube.data[0:len(ebounds) - 1,pos_x,pos_y]
            if npred_pos.sum() != 0:
                for idx in range(len(ebounds) - 1):
                    npred_cube.data[idx][pos_x][pos_y] = np.dot(npred_pos, edisp_idx.data.data[idx])


    return npred_cube
示例#14
0
        ax = fig.add_subplot(2, 1, 2)
        pt.errorbar(b_rebin,
                    resid_b_rebin,
                    yerr=err_b_rebin,
                    linestyle='None',
                    marker="o",
                    label="Step= " + str(i_src))
        pt.axhline(y=0, color='red', linewidth=2)
        pt.legend()
        pt.ylabel("residual")
        pt.xlabel("latitude (degrees)")
        pt.title("latitude profile")
        pt.xlim(-1, 1)
        pdf_lat.savefig()

        E_center = EnergyBounds(energy_band).log_centers
        if E_center < 1 * u.TeV:
            pix = 5
        elif ((1 * u.TeV < E_center) & (E_center < 5 * u.TeV)):
            pix = 4
        else:
            pix = 2.5
        kernel = Gaussian2DKernel(pix)
        TS = compute_ts_image(map_data, model_map, exp_map, kernel)
        TS.write(outdir_plot + "/TS_map_step_" + str(i_src) + "_" + name +
                 "_" + str("%.2f" % E1) + "_" + str("%.2f" % E2) + "_TeV.fits",
                 clobber=True)
        sig = SkyImage.empty(TS["ts"])
        sig.data = np.sqrt(TS["ts"].data)
        sig.name = "sig"
        sig.write(outdir_plot + "/significance_map_step_" + str(i_src) + "_" +
edisp1 = EnergyDispersion.from_gauss(
    e_true=e_true,
    e_reco=e_reco,
    sigma=0.2,
    bias=0,
)
edisp2 = EnergyDispersion.from_gauss(
    e_true=e_true,
    e_reco=e_reco,
    sigma=0.2,
    bias=0,
)

#Aeff
ee = EnergyBounds(np.logspace(-2, 2.5, 109) * u.TeV)
p1 = 6.85e9
p2 = 0.0891
p3 = 5.0e5

f = lambda x: p1 * (x / u.MeV)**(-p2) * np.exp(((-p3) * u.MeV) / x)
value = f(ee.log_centers.to('MeV'))
data = value * u.cm**2
aeff1 = EffectiveAreaTable(ee.lower_bounds, ee.upper_bounds, data)

f2 = lambda x: p1 * (x / u.MeV)**(-p2) * np.exp(((-p3 * factor) * u.MeV) / x)
value2 = f2(ee.log_centers.to('MeV'))
data2 = value2 * u.cm**2
aeff2 = EffectiveAreaTable(ee.lower_bounds, ee.upper_bounds, data2)

#======================================
示例#16
0
print(err)


def PWL(E,phi_0,gamma):
    return phi_0*E**(-gamma)
def EXP(E,phi_0,gamma,beta):
    return phi_0*E**(-gamma)*np.exp(-beta*E)
coord=exposure_3D.sky_image_ref.coordinates(mode="edges")
d = coord.separation(center)
pix_size=exposure_3D.wcs.to_header()["CDELT2"]
i=np.where(d<pix_size*u.deg)
#i permet de faire la moyenne exposure autour de pixel autour de la source
mean_exposure=list()
for ie in range(len(exposure_3D.energies())):
    mean_exposure.append(exposure_3D.data[ie,i[0],i[1]].value.mean())
etrue=EnergyBounds(exposure_3D.energies("edges")).log_centers
etrue_band=EnergyBounds(exposure_3D.energies("edges")).bands
dic_result_fit=dict()
if "spectral-model.beta" in err.parnames:
    for name,val in zip(err.parnames,err.parvals):
        dic_result_fit[name]=val
    spectre=EXP(etrue.value,dic_result_fit["spatial-model.ampl"]*1e-11,dic_result_fit["spectral-model.gamma"],dic_result_fit["spectral-model.beta"])
else:   
    for name,val in zip(err.parnames,err.parvals):
        dic_result_fit[name]=val
    spectre=PWL(etrue.value,dic_result_fit["spatial-model.ampl"]*1e-11,dic_result_fit["spectral-model.gamma"])
covolve_edisp=np.zeros((rmf.data.shape[1],exposure_3D.data.shape[0]))
for ireco in range(rmf.data.shape[1]):
    covolve_edisp[ireco,:]=spectre*np.asarray(mean_exposure)*rmf.data[:,ireco]*etrue_band
npred=np.sum(covolve_edisp,axis=1)
err_npred=np.sqrt(npred)