Esempio n. 1
0
def make_mean_rmf(energy_true, energy_reco, center, ObsList):
    """
    Compute the mean psf for a set of observation and a given energy band
    Parameters
    ----------
    energy_true: Tuple for the energy axis: (Emin,Emax,nbins)
         for the true energy array
    energy_reco: Tuple for the energy axis: (Emin,Emax,nbins)   
         for the reco energy array
    source_name: name of the source you want to compute the image
    center: SkyCoord of the source
    ObsList: ObservationList to use to compute the psf (could be different that the data_store for G0p9 for the GC for example)


    Returns
    -------
    rmf: `~gammapy.irf.EnergyDispersion`
        Stacked EDISP for a set of observation
    """

    # Here all the observations have a center at less than 2 degrees from the Crab so it will be ok to estimate the mean psf on the Crab source postion (the area is define for offset equal to 2 degrees...)
    emin_true, emax_true, nbin_true = energy_true
    emin_reco, emax_reco, nbin_reco = energy_reco
    energy_true_bins = EnergyBounds.equal_log_spacing(emin_true, emax_true,
                                                      nbin_true, 'TeV')
    energy_reco_bins = EnergyBounds.equal_log_spacing(emin_reco, emax_reco,
                                                      nbin_reco, 'TeV')
    rmf = ObsList.make_mean_edisp(position=center,
                                  e_true=energy_true_bins,
                                  e_reco=energy_reco_bins)
    return rmf
Esempio n. 2
0
def make_mean_rmf(energy_true,
                  energy_reco,
                  center,
                  ObsList,
                  outdir,
                  source_name=""):
    """
    Compute the mean psf for a set of observation and a given energy band
    Parameters
    ----------
    energy_true: Tuple for the energy true bin: (Emin,Emax,nbins)
    energy_reco: Tuple for the energy reco bin: (Emin,Emax,nbins)
    center: SkyCoord of the source
    ObsList: ObservationList to use to compute the psf (could be different that the data_store for G0p9 for the GC for example)
    outdir: directory where the fits image will go
    source_name: name of the source for which you want to compute the mean RMF


    Returns
    -------

    """

    # Here all the observations have a center at less than 2 degrees from the Crab so it will be ok to estimate the mean psf on the Crab source postion (the area is define for offset equal to 2 degrees...)
    emin_true, emax_true, nbin_true = energy_true
    emin_reco, emax_reco, nbin_reco = energy_reco
    energy_true_bins = EnergyBounds.equal_log_spacing(emin_true, emax_true,
                                                      nbin_true, 'TeV')
    energy_reco_bins = EnergyBounds.equal_log_spacing(emin_reco, emax_reco,
                                                      nbin_reco, 'TeV')
    rmf = ObsList.make_mean_edisp(position=center,
                                  e_true=energy_true_bins,
                                  e_reco=energy_reco_bins)
    rmf.write(outdir + "/mean_rmf" + source_name + ".fits", clobber=True)
Esempio n. 3
0
    def read(cls, filename, offset='0.5 deg'):
        """Read from a FITS file.

        Compute RMF at 0.5 deg offset on fly.

        Parameters
        ----------
        filename : `str`
            File containing the IRFs
        """
        filename = str(make_path(filename))

        with fits.open(filename, memmap=False) as hdulist:
            aeff = EffectiveAreaTable.from_hdulist(hdulist=hdulist)
            edisp = EnergyDispersion2D.read(filename, hdu='ENERGY DISPERSION')
            bkg = BgRateTable.from_hdulist(hdulist=hdulist)
            psf = Psf68Table.from_hdulist(hdulist=hdulist)
            sens = SensitivityTable.from_hdulist(hdulist=hdulist)

        # Create rmf with appropriate dimensions (e_reco->bkg, e_true->area)
        e_reco_min = bkg.energy.lo[0]
        e_reco_max = bkg.energy.hi[-1]
        e_reco_bin = bkg.energy.nbins
        e_reco_axis = EnergyBounds.equal_log_spacing(
            e_reco_min,
            e_reco_max,
            e_reco_bin,
            'TeV',
        )

        e_true_min = aeff.energy.lo[0]
        e_true_max = aeff.energy.hi[-1]
        e_true_bin = aeff.energy.nbins
        e_true_axis = EnergyBounds.equal_log_spacing(
            e_true_min,
            e_true_max,
            e_true_bin,
            'TeV',
        )

        rmf = edisp.to_energy_dispersion(
            offset=offset,
            e_reco=e_reco_axis,
            e_true=e_true_axis,
        )

        return cls(aeff=aeff,
                   bkg=bkg,
                   edisp=edisp,
                   psf=psf,
                   sens=sens,
                   rmf=rmf)
Esempio n. 4
0
 def e_reco(self):
     """Reconstructed energy binning."""
     if self.edisp is not None:
         temp = self.edisp.e_reco.bins
     else:
         temp = self.aeff.energy.bins
     return EnergyBounds(temp)
Esempio n. 5
0
def get_psf_table(psf, emin, emax, bins):
    """Returns a table of energy and containment radius
    from an EnergyDependentTablePSF object."""

    # Container for data
    data = []

    # Loop over energies and determine PSF containment radius
    ebounds = EnergyBounds.equal_log_spacing(emin, emax, bins, 'MeV')
    for energy in ebounds:
        energy_psf = psf.table_psf_at_energy(energy)

        containment_68 = energy_psf.containment_radius(0.68)
        containment_95 = energy_psf.containment_radius(0.95)

        row = dict(ENERGY=energy.value,
                   CONT_68=containment_68.value,
                   CONT_95=containment_95.value)

        data.append(row)

    # Construct table and add correct units to columns
    table = Table(data)
    table['ENERGY'].units = energy.unit
    table['CONT_68'].units = containment_68.unit
    table['CONT_95'].units = containment_95.unit

    return table
Esempio n. 6
0
    def from_3fgl(cls, source, x_method='log_center'):
        """Get `~gammapy.spectrum.DifferentialFluxPoints` for a 3FGL source

        Parameters
        ----------
        sourcename : dict
            3FGL source
        """
        ebounds = EnergyBounds([100, 300, 1000, 3000, 10000, 100000], 'MeV')

        if x_method == 'log_center':
            energy = ebounds.log_centers
        else:
            raise ValueError('Undefined x method: {}'.format(x_method))
        fluxkeys = ['nuFnu100_300', 'nuFnu300_1000', 'nuFnu1000_3000', 'nuFnu3000_10000', 'nuFnu10000_100000']
        temp_fluxes = [source.data[_] for _ in fluxkeys]
        energy_fluxes = Quantity(temp_fluxes, 'erg cm-2 s-1')
        diff_fluxes = (energy_fluxes * energy ** -2).to('erg-1 cm-2 s-1')

        # Get relativ error on integral fluxes
        int_flux_points = IntegralFluxPoints.from_3fgl(source)
        val = int_flux_points['INT_FLUX'].quantity
        rel_error_hi = int_flux_points['INT_FLUX_ERR_HI'] / val
        rel_error_lo = int_flux_points['INT_FLUX_ERR_LO'] / val

        diff_fluxes_err_hi = diff_fluxes * rel_error_hi
        diff_fluxes_err_lo = diff_fluxes * rel_error_lo

        return cls.from_arrays(energy=energy, diff_flux=diff_fluxes,
                               diff_flux_err_lo=diff_fluxes_err_lo,
                               diff_flux_err_hi=diff_fluxes_err_hi)
Esempio n. 7
0
def get_psf_table(psf, emin, emax, bins):
    """Returns a table of energy and containment radius
    from an EnergyDependentTablePSF object."""

    # Container for data
    data = []

    # Loop over energies and determine PSF containment radius
    ebounds = EnergyBounds.equal_log_spacing(emin, emax, bins, 'MeV')
    for energy in ebounds:
        energy_psf = psf.table_psf_at_energy(energy)

        containment_68 = energy_psf.containment_radius(0.68)
        containment_95 = energy_psf.containment_radius(0.95)

        row = dict(ENERGY=energy.value,
                   CONT_68=containment_68.value,
                   CONT_95=containment_95.value)

        data.append(row)

    # Construct table and add correct units to columns
    table = Table(data)
    table['ENERGY'].units = energy.unit
    table['CONT_68'].units = containment_68.unit
    table['CONT_95'].units = containment_95.unit

    return table
Esempio n. 8
0
    def from_arrays(cls,
                    ebounds,
                    int_flux,
                    int_flux_err_hi=None,
                    int_flux_err_lo=None):
        """Create `~gammapy.spectrum.IntegralFluxPoints` from numpy arrays"""
        t = Table()
        ebounds = EnergyBounds(ebounds)
        int_flux = Quantity(int_flux)
        if not int_flux.unit.is_equivalent('cm-2 s-1'):
            raise ValueError('Flux (unit {}) not an integrated flux'.format(
                int_flux.unit))

        # Set errors to zero by default
        def_f = np.zeros(ebounds.nbins) * int_flux.unit
        int_flux_err_hi = def_f if int_flux_err_hi is None else int_flux_err_hi
        int_flux_err_lo = def_f if int_flux_err_lo is None else int_flux_err_lo

        t['ENERGY_MIN'] = ebounds.lower_bounds
        t['ENERGY_MAX'] = ebounds.upper_bounds
        t['INT_FLUX'] = int_flux
        t['INT_FLUX_ERR_HI'] = int_flux_err_hi
        t['INT_FLUX_ERR_LO'] = int_flux_err_lo

        t['INT_FLUX_ERR_HI_%'] = 100 * int_flux_err_hi / int_flux
        t['INT_FLUX_ERR_LO_%'] = 100 * int_flux_err_lo / int_flux
        return cls(t)
Esempio n. 9
0
    def from_2fhl(cls, source):
        """Get `~gammapy.spectrum.IntegralFluxPoints` for a 2FHL source

        Parameters
        ----------
        source : dict
            2FHL source
        """
        ebounds = EnergyBounds([50, 171, 585, 2000], 'GeV')
        fluxkeys = ['Flux50_171GeV', 'Flux171_585GeV', 'Flux585_2000GeV']
        temp_fluxes = [source.data[_] for _ in fluxkeys]

        fluxerrkeys = [
            'Unc_Flux50_171GeV', 'Unc_Flux171_585GeV', 'Unc_Flux585_2000GeV'
        ]

        temp_fluxes_err_hi = [source.data[_][1] for _ in fluxerrkeys]
        temp_fluxes_err_lo = [-1 * source.data[_][0] for _ in fluxerrkeys]

        int_fluxes = Quantity(temp_fluxes, 'cm-2 s-1')
        int_fluxes_err_hi = Quantity(temp_fluxes_err_hi, 'cm-2 s-1')
        int_fluxes_err_lo = Quantity(temp_fluxes_err_lo, 'cm-2 s-1')

        return cls.from_arrays(ebounds, int_fluxes, int_fluxes_err_hi,
                               int_fluxes_err_lo)
Esempio n. 10
0
    def load_irf(self):
        filename = os.path.join(self.outdir, "irf.fits.gz")
        with fits.open(filename, memmap=False) as hdulist:
            aeff = EffectiveAreaTable2D.from_hdulist(hdulist=hdulist)
            edisp = EnergyDispersion2D.read(filename, hdu="ENERGY DISPERSION")

            bkg_fits_table = hdulist["BACKGROUND"]
            bkg_table = Table.read(bkg_fits_table)
            energy_lo = bkg_table["ENERG_LO"].quantity
            energy_hi = bkg_table["ENERG_HI"].quantity
            bkg = bkg_table["BGD"].quantity

            axes = [
                BinnedDataAxis(energy_lo,
                               energy_hi,
                               interpolation_mode="log",
                               name="energy")
            ]
            bkg = BkgData(data=NDDataArray(axes=axes, data=bkg))

        # Create rmf with appropriate dimensions (e_reco->bkg, e_true->area)
        e_reco_min = bkg.energy.lo[0]
        e_reco_max = bkg.energy.hi[-1]
        e_reco_bin = bkg.energy.nbins
        e_reco_axis = EnergyBounds.equal_log_spacing(e_reco_min, e_reco_max,
                                                     e_reco_bin, "TeV")

        e_true_min = aeff.data.axes[0].lo[0]
        e_true_max = aeff.data.axes[0].hi[-1]
        e_true_bin = len(aeff.data.axes[0].bins) - 1
        e_true_axis = EnergyBounds.equal_log_spacing(e_true_min, e_true_max,
                                                     e_true_bin, "TeV")

        # Fake offset...
        rmf = edisp.to_energy_dispersion(offset=0.5 * u.deg,
                                         e_reco=e_reco_axis,
                                         e_true=e_true_axis)

        # This is required because in gammapy v0.8
        # gammapy.spectrum.utils.integrate_model
        # calls the attribute aeff.energy which is an attribute of
        # EffectiveAreaTable and not of  EffectiveAreaTable2D
        # WARNING the angle is not important, but only because we started with
        # on-axis data! TO UPDATE
        aeff = aeff.to_effective_area_table(Angle("1d"))

        self.irf = Irf(bkg=bkg, aeff=aeff, rmf=rmf)
Esempio n. 11
0
    def read(cls, filename, offset='0.5 deg'):
        """Read from a FITS file.

        Compute RMF at 0.5 deg offset on fly.

        Parameters
        ----------
        filename : `str`
            File containing the IRFs
        """
        filename = str(make_path(filename))

        with fits.open(filename, memmap=False) as hdulist:
            aeff = EffectiveAreaTable.from_hdulist(hdulist=hdulist)
            edisp = EnergyDispersion2D.read(filename, hdu='ENERGY DISPERSION')
            bkg = BgRateTable.from_hdulist(hdulist=hdulist)
            psf = Psf68Table.from_hdulist(hdulist=hdulist)
            sens = SensitivityTable.from_hdulist(hdulist=hdulist)

        # Create rmf with appropriate dimensions (e_reco->bkg, e_true->area)
        e_reco_min = bkg.energy.lo[0]
        e_reco_max = bkg.energy.hi[-1]
        e_reco_bin = bkg.energy.nbins
        e_reco_axis = EnergyBounds.equal_log_spacing(
            e_reco_min, e_reco_max, e_reco_bin, 'TeV',
        )

        e_true_min = aeff.energy.lo[0]
        e_true_max = aeff.energy.hi[-1]
        e_true_bin = aeff.energy.nbins
        e_true_axis = EnergyBounds.equal_log_spacing(
            e_true_min, e_true_max, e_true_bin, 'TeV',
        )

        rmf = edisp.to_energy_dispersion(
            offset=offset, e_reco=e_reco_axis, e_true=e_true_axis,
        )

        return cls(
            aeff=aeff,
            bkg=bkg,
            edisp=edisp,
            psf=psf,
            sens=sens,
            rmf=rmf
        )
Esempio n. 12
0
    def make_1d_expected_counts(self,
                                spectral_index=2.3,
                                for_integral_flux=False,
                                eref=None):
        """Compute the 1D exposure table for one observation for an offset table.

        Parameters
        ----------
        spectral_index : float
            Assumed power-law spectral index
        for_integral_flux : bool
            True if you want that the total excess / exposure gives the integrated flux
        eref: `~gammapy.utils.energy.Energy`
            Reference energy at which you want to compute the exposure. Default is the log center of the energy band of
             the image.
        Returns
        -------
        table : `astropy.table.Table`
            Two columns: offset in the FOV "theta" and expected counts "npred"
        """
        energy = EnergyBounds.equal_log_spacing(self.energy_band[0].value,
                                                self.energy_band[1].value, 100,
                                                self.energy_band.unit)
        energy_band = energy.bands
        energy_bin = energy.log_centers
        if not eref:
            eref = EnergyBounds(self.energy_band).log_centers
        spectrum = (energy_bin / eref)**(-spectral_index)
        offset = Angle(
            np.linspace(self.offset_band[0].value, self.offset_band[1].value,
                        10), self.offset_band.unit)
        arf = self.aeff.data.evaluate(offset=offset, energy=energy_bin).T
        npred = np.sum(arf * spectrum * energy_band, axis=1)
        npred *= self.livetime

        if for_integral_flux:
            norm = np.sum(spectrum * energy_band)
            npred /= norm

        table = Table()
        table['theta'] = offset
        table['npred'] = npred

        return table
Esempio n. 13
0
def table_model():
    energy_edges = EnergyBounds.equal_log_spacing(0.1 * u.TeV, 100 * u.TeV, 1000)
    energy = energy_edges.log_centers

    index = 2.3 * u.Unit('')
    amplitude = 4 / u.cm ** 2 / u.s / u.TeV
    reference = 1 * u.TeV
    pl = PowerLaw(index, amplitude, reference)
    flux = pl(energy)

    return TableModel(energy, flux, 1 * u.Unit(''))
def make_counts_array():
    """Make an example counts array with energy and offset axes."""
    data_store = DataStore.from_dir('$GAMMAPY_EXTRA/datasets/hess-crab4-hd-hap-prod2')

    event_lists = data_store.load_all('events')
    ebounds = EnergyBounds.equal_log_spacing(0.1, 100, 100, 'TeV')
    offset = Angle(np.linspace(0, 2.5, 100), "deg")
    array = EnergyOffsetArray(ebounds, offset)
    array.fill_events(event_lists)

    return array
Esempio n. 15
0
def table_model():
    energy_edges = EnergyBounds.equal_log_spacing(0.1 * u.TeV, 100 * u.TeV, 1000)
    energy = energy_edges.log_centers

    index = 2.3 * u.Unit('')
    amplitude = 4 / u.cm ** 2 / u.s / u.TeV
    reference = 1 * u.TeV
    pl = PowerLaw(index, amplitude, reference)
    flux = pl(energy)

    return TableModel(energy, flux, 1 * u.Unit(''))
def make_counts_array():
    """Make an example counts array with energy and offset axes."""
    data_store = DataStore.from_dir(gammapy_extra.dir / 'datasets/hess-crab4')

    event_lists = data_store.load_all('events')
    ebounds = EnergyBounds.equal_log_spacing(0.1, 100, 100, 'TeV')
    offset = np.linspace(0, 2.5, 100)
    array = EnergyOffsetArray(ebounds, offset)
    array.fill_events(event_lists)

    return array
Esempio n. 17
0
    def load_irf(self):
        filename = os.path.join(self.outdir, 'irf.fits.gz')
        with fits.open(filename, memmap=False) as hdulist:
            aeff = EffectiveAreaTable.from_hdulist(hdulist=hdulist)
            edisp = EnergyDispersion2D.read(filename, hdu="ENERGY DISPERSION")

            bkg_fits_table = hdulist["BACKGROUND"]
            bkg_table = Table.read(bkg_fits_table)
            energy_lo = bkg_table["ENERG_LO"].quantity
            energy_hi = bkg_table["ENERG_HI"].quantity
            bkg = bkg_table["BGD"].quantity

            axes = [
                BinnedDataAxis(
                    energy_lo, energy_hi, interpolation_mode="log", name="energy"
                )
            ]
            bkg = BkgData(data=NDDataArray(axes=axes, data=bkg))

        # Create rmf with appropriate dimensions (e_reco->bkg, e_true->area)
        e_reco_min = bkg.energy.lo[0]
        e_reco_max = bkg.energy.hi[-1]
        e_reco_bin = bkg.energy.nbins
        e_reco_axis = EnergyBounds.equal_log_spacing(
            e_reco_min, e_reco_max, e_reco_bin, "TeV"
        )

        e_true_min = aeff.energy.lo[0]
        e_true_max = aeff.energy.hi[-1]
        e_true_bin = aeff.energy.nbins
        e_true_axis = EnergyBounds.equal_log_spacing(
            e_true_min, e_true_max, e_true_bin, "TeV"
        )

        # Fake offset...
        rmf = edisp.to_energy_dispersion(
            offset=0.5 * u.deg, e_reco=e_reco_axis, e_true=e_true_axis
        )

        self.irf = Irf(bkg=bkg, aeff=aeff, rmf=rmf)
Esempio n. 18
0
def make_psf_cube(image_size,
                  energy_cube,
                  source_name,
                  center_maps,
                  center,
                  ObsList,
                  outdir,
                  spectral_index=2.3):
    """
    Compute the mean psf for a set of observation for different energy bands
    Parameters
    ----------
    image_size:int, Total number of pixel of the 2D map
    energy: Tuple for the energy axis: (Emin,Emax,nbins)
    source_name: name of the source you want to compute the image
    center_maps: SkyCoord
            center of the images
    center: SkyCoord 
            position where we want to compute the psf
    ObsList: ObservationList to use to compute the psf (could be different that the data_store for G0p9 for the GC for example)
    outdir: directory where the fits image will go
    spectral_index: assumed spectral index to compute the psf

    Returns
    -------

    """
    ref_cube = make_empty_cube(image_size, energy_cube, center_maps)
    header = ref_cube.sky_image_ref.to_image_hdu().header
    energy_bins = ref_cube.energies(mode="edges")
    for i_E, E in enumerate(energy_bins[0:-1]):
        energy_band = Energy(
            [energy_bins[i_E].value, energy_bins[i_E + 1].value],
            energy_bins.unit)
        energy = EnergyBounds.equal_log_spacing(energy_band[0].value,
                                                energy_band[1].value, 100,
                                                energy_band.unit)
        # Here all the observations have a center at less than 2 degrees from the Crab so it will be ok to estimate the mean psf on the Crab source postion (the area is define for offset equal to 2 degrees...)
        psf_energydependent = ObsList.make_mean_psf(center, energy, theta=None)
        try:
            psf_table = psf_energydependent.table_psf_in_energy_band(
                energy_band, spectral_index=spectral_index)
        except:
            psf_table = TablePSF(
                psf_energydependent.offset,
                Quantity(np.zeros(len(psf_energydependent.offset)), u.sr**-1))
        ref_cube.data[i_E, :, :] = fill_acceptance_image(
            header, center_maps, psf_table._offset.to("deg"),
            psf_table._dp_domega,
            psf_table._offset.to("deg")[-1]).data
    ref_cube.write(outdir + "/mean_psf_cube_" + source_name + ".fits",
                   format="fermi-counts")
Esempio n. 19
0
def prepare_images():
    # Read in data
    background_file = FermiVelaRegion.filenames()['diffuse_model']
    exposure_file = FermiVelaRegion.filenames()['exposure_cube']
    counts_file = FermiVelaRegion.filenames()['counts_cube']
    background_model = SkyCube.read(background_file)
    exposure_cube = SkyCube.read(exposure_file)

    # Add correct units
    exposure_cube.data = Quantity(exposure_cube.data.value, 'cm2 s')

    # Re-project background cube
    repro_bg_cube = background_model.reproject_to(exposure_cube)

    # Define energy band required for output
    energies = EnergyBounds([10, 500], 'GeV')

    # Compute the predicted counts cube
    npred_cube = compute_npred_cube(repro_bg_cube, exposure_cube, energies)

    # Convolve with Energy-dependent Fermi LAT PSF
    psf = EnergyDependentTablePSF.read(FermiVelaRegion.filenames()['psf'])
    convolved_npred_cube = convolve_cube(npred_cube,
                                         psf,
                                         offset_max=Angle(3, 'deg'))

    # Counts data
    counts_data = fits.open(counts_file)[0].data
    counts_wcs = WCS(fits.open(counts_file)[0].header)
    counts_cube = SkyCube(data=Quantity(counts_data, ''),
                          wcs=counts_wcs,
                          energy=energies)
    counts_cube = counts_cube.reproject_to(npred_cube,
                                           projection_type='nearest-neighbor')

    counts = counts_cube.data[0]
    model = convolved_npred_cube.data[0]

    # Load Fermi tools gtmodel background-only result
    gtmodel = fits.open(
        FermiVelaRegion.filenames()['background_image'])[0].data.astype(float)

    # Ratio for the two background images
    ratio = np.nan_to_num(model / gtmodel)

    # Header is required for plotting, so returned here
    wcs = npred_cube.wcs
    header = wcs.to_header()

    return model, gtmodel, ratio, counts, header
Esempio n. 20
0
def test_model(model):
    print(model)
    print(model(energy=Q(10, 'TeV')))
    print(model.integral(emin=Q(1, 'TeV'), emax=Q(2, 'TeV')))

    # plot
    # butterfly
    # npred
    reco_bins = 5
    true_bins = 10
    e_reco = Q(np.logspace(-1, 1, reco_bins + 1), 'TeV')
    e_true = Q(np.logspace(-1.5, 1.5, true_bins + 1), 'TeV')
    livetime = Q(26, 'min')
    aeff_data = Q(np.ones(true_bins) * 1e5, 'cm2')
    aeff = EffectiveAreaTable(energy=e_true, data=aeff_data)
    edisp_data = make_perfect_resolution(e_true, e_reco)
    edisp = EnergyDispersion(edisp_data, EnergyBounds(e_true),
                             EnergyBounds(e_reco))
    npred = calculate_predicted_counts(model=model,
                                       livetime=livetime,
                                       aeff=aeff,
                                       edisp=edisp)
    print(npred.data)
Esempio n. 21
0
def make_mean_psf_cube(image_size,
                       energy_cube,
                       center_maps,
                       center,
                       ObsList,
                       spectral_index=2.3):
    """
    Compute the mean psf for a set of observation for different energy bands
    Parameters
    ----------
    image_size:int, Total number of pixel of the 2D map
    energy: Tuple for the energy axis: (Emin,Emax,nbins)
    center_maps: SkyCoord
            center of the images
    center: SkyCoord 
            position where we want to compute the psf
    ObsList: ObservationList to use to compute the psf (could be different that the data_store for G0p9 for the GC for example)
    spectral_index: assumed spectral index to compute the psf

    Returns
    -------
    ref_cube : `~gammapy.cube.SkyCube`
             PSF mean cube

    """
    ref_cube = make_empty_cube(image_size, energy_cube, center_maps)
    header = ref_cube.sky_image_ref.to_image_hdu().header
    energy_bins = ref_cube.energies()
    for i_E, E in enumerate(energy_bins[0:-1]):
        energy_band = Energy(
            [energy_bins[i_E].value, energy_bins[i_E + 1].value],
            energy_bins.unit)
        energy = EnergyBounds.equal_log_spacing(energy_band[0].value,
                                                energy_band[1].value, 100,
                                                energy_band.unit)
        psf_energydependent = ObsList.make_psf(center, energy, theta=None)
        try:
            psf_table = psf_energydependent.table_psf_in_energy_band(
                energy_band, spectral_index=spectral_index)
        except:
            psf_table = TablePSF(
                psf_energydependent.offset,
                Quantity(np.zeros(len(psf_energydependent.offset)), u.sr**-1))
        ref_cube.data[i_E, :, :] = fill_acceptance_image(
            header, center_maps, psf_table._offset.to("deg"),
            psf_table._dp_domega,
            psf_table._offset.to("deg")[-1]).data
    return ref_cube
Esempio n. 22
0
    def plot_energy_hist(self, ax=None, ebounds=None, **kwargs):
        """
        A plot showing counts as a function of energy.

        Convert to a `~gammapy.spectrum.CountsSpectrum` internally
        """

        if ebounds is None:
            emin = np.min(self['ENERGY'].quantity)
            emax = np.max(self['ENERGY'].quantity)
            ebounds = EnergyBounds.equal_log_spacing(emin, emax, 100)

        from gammapy.spectrum import CountsSpectrum
        spec = CountsSpectrum.from_eventlist(self, ebounds)
        spec.plot(ax=ax, **kwargs)

        return ax
Esempio n. 23
0
    def plot_energy_hist(self, ax=None, ebounds=None, **kwargs):
        """
        A plot showing counts as a function of energy.

        Convert to a `~gammapy.spectrum.CountsSpectrum` internally
        """

        if ebounds is None:
            emin = np.min(self['ENERGY'].quantity)
            emax = np.max(self['ENERGY'].quantity)
            ebounds = EnergyBounds.equal_log_spacing(emin, emax, 100)

        from gammapy.spectrum import CountsSpectrum
        spec = CountsSpectrum.from_eventlist(self, ebounds)
        spec.plot(ax=ax, **kwargs)

        return ax
Esempio n. 24
0
def prepare_images():
    # Read in data
    fermi_vela = FermiVelaRegion()
    background_file = FermiVelaRegion.filenames()['diffuse_model']
    exposure_file = FermiVelaRegion.filenames()['exposure_cube']
    counts_file = FermiVelaRegion.filenames()['counts_cube']
    background_model = SkyCube.read(background_file, format='fermi-background')
    exposure_cube = SkyCube.read(exposure_file, format='fermi-exposure')

    # Re-project background cube
    repro_bg_cube = background_model.reproject(exposure_cube)

    # Define energy band required for output
    energies = EnergyBounds([10, 500], 'GeV')

    # Compute the predicted counts cube
    npred_cube = compute_npred_cube(repro_bg_cube,
                                    exposure_cube,
                                    energies,
                                    integral_resolution=5)

    # Convolve with Energy-dependent Fermi LAT PSF
    psf = EnergyDependentTablePSF.read(FermiVelaRegion.filenames()['psf'])
    kernels = psf.kernels(npred_cube)
    convolved_npred_cube = npred_cube.convolve(kernels, mode='reflect')

    # Counts data
    counts_cube = SkyCube.read(counts_file, format='fermi-counts')
    counts_cube = counts_cube.reproject(npred_cube)

    counts = counts_cube.data[0]
    model = convolved_npred_cube.data[0]

    # Load Fermi tools gtmodel background-only result
    gtmodel = fits.open(
        FermiVelaRegion.filenames()['background_image'])[0].data.astype(float)

    # Ratio for the two background images
    ratio = np.nan_to_num(model / gtmodel)

    # Header is required for plotting, so returned here
    wcs = npred_cube.wcs
    header = wcs.to_header()
    return model, gtmodel, ratio, counts, header
Esempio n. 25
0
def make_psf(energy_band,
             source_name,
             center,
             ObsList,
             outdir,
             spectral_index=2.3):
    """
    Compute the mean psf for a set of observation and a given energy band
    Parameters
    ----------
    energy_band: energy band on which you want to compute the map
    source_name: name of the source you want to compute the image
    center: SkyCoord of the source
    ObsList: ObservationList to use to compute the psf (could be different that the data_store for G0p9 for the GC for example)
    outdir: directory where the fits image will go
    spectral_index: assumed spectral index to compute the psf

    Returns
    -------

    """
    energy = EnergyBounds.equal_log_spacing(energy_band[0].value,
                                            energy_band[1].value, 100,
                                            energy_band.unit)
    # Here all the observations have a center at less than 2 degrees from the Crab so it will be ok to estimate the mean psf on the Crab source postion (the area is define for offset equal to 2 degrees...)
    psf_energydependent = ObsList.make_mean_psf(center, energy, theta=None)
    #import IPython; IPython.embed()
    try:
        psf_table = psf_energydependent.table_psf_in_energy_band(
            energy_band, spectral_index=spectral_index)
    except:
        psf_table = TablePSF(
            psf_energydependent.offset,
            Quantity(np.zeros(len(psf_energydependent.offset)), u.sr**-1))
    Table_psf = Table()
    c1 = Column(psf_table._dp_domega,
                name='psf_value',
                unit=psf_table._dp_domega.unit)
    c2 = Column(psf_table._offset, name='theta', unit=psf_table._offset.unit)
    Table_psf.add_column(c1)
    Table_psf.add_column(c2)
    filename_psf = outdir + "/psf_table_" + source_name + "_" + str(
        energy_band[0].value) + '_' + str(energy_band[1].value) + ".fits"
    Table_psf.write(filename_psf, overwrite=True)
Esempio n. 26
0
def make_model():
    dir = str(gammapy_extra.dir) + '/datasets/hess-crab4-hd-hap-prod2'
    data_store = DataStore.from_dir(dir)
    obs_table = data_store.obs_table
    ebounds = EnergyBounds.equal_log_spacing(0.1, 100, 100, 'TeV')
    offset = sqrt_space(start=0, stop=2.5, num=100) * u.deg

    excluded_sources = make_excluded_sources()

    multi_array = EnergyOffsetBackgroundModel(ebounds, offset)
    multi_array.fill_obs(obs_table, data_store, excluded_sources)
    #multi_array.fill_obs(obs_table, data_store)
    multi_array.compute_rate()
    bgarray = multi_array.bg_rate
    energy_range = Energy([1, 10], 'TeV')
    table = bgarray.acceptance_curve_in_energy_band(energy_range, energy_bins=10)

    multi_array.write('energy_offset_array.fits', overwrite=True)
    table.write('acceptance_curve.fits', overwrite=True)
Esempio n. 27
0
def make_model():
    dir = str(gammapy_extra.dir) + '/datasets/hess-crab4-hd-hap-prod2'
    data_store = DataStore.from_dir(dir)
    obs_table = data_store.obs_table
    ebounds = EnergyBounds.equal_log_spacing(0.1, 100, 100, 'TeV')
    offset = sqrt_space(start=0, stop=2.5, num=100) * u.deg

    excluded_sources = make_excluded_sources()

    multi_array = EnergyOffsetBackgroundModel(ebounds, offset)
    multi_array.fill_obs(obs_table, data_store, excluded_sources)
    # multi_array.fill_obs(obs_table, data_store)
    multi_array.compute_rate()
    bgarray = multi_array.bg_rate
    energy_range = Energy([1, 10], 'TeV')
    table = bgarray.acceptance_curve_in_energy_band(energy_range,
                                                    energy_bins=10)

    multi_array.write('energy_offset_array.fits', overwrite=True)
    table.write('acceptance_curve.fits', overwrite=True)
Esempio n. 28
0
def compute_sum_cube(flux_cube, flux_cube2, config):
    """Compute sum of two flux cubes.
        
    Parameters
    ----------
    flux_cube : `SkyCube`
        Flux cube 1, really differential surface brightness in 'cm-2 s-1 TeV-1 sr-1'.
    flux_cube2 : `SkyCube`
        Flux cube 2.
    config : `dict`
        Configuration dictionary.
        
    Returns
    -------
    nflux_cube_sum: `SkyCube`
        Sum of flux_cube and flux_cube2.
    
    See also
    -------
    read_config
    
    """
    ebin = flux_cube.energies(mode="edges")
    ebounds = EnergyBounds(ebin)
    
    nflux_cube_sum = make_ref_cube(config)
    for idx in range(len(ebounds) - 1):
        npred1 = flux_cube.sky_image_idx(idx)
        npred2 =flux_cube2.sky_image_idx(idx)
        
        ## DEBUG
        #print npred1.data
        #print npred2.data
        
        nflux_sum = u.Quantity(npred1.data.value + npred2.data.value,'1 / (cm2 s sr TeV)')
        nflux_cube_sum.data[idx] = nflux_sum.value

    return nflux_cube_sum
Esempio n. 29
0
    def from_3fgl(cls, source):
        """Get `~gammapy.spectrum.IntegralFluxPoints` for a 3FGL source

        Parameters
        ----------
        source : dict
            3FGL source
        """
        ebounds = EnergyBounds([100, 300, 1000, 3000, 10000, 100000], 'MeV')
        fluxkeys = ['Flux100_300', 'Flux300_1000', 'Flux1000_3000', 'Flux3000_10000', 'Flux10000_100000']
        temp_fluxes = [source.data[_] for _ in fluxkeys]

        fluxerrkeys = ['Unc_Flux100_300', 'Unc_Flux300_1000', 'Unc_Flux1000_3000', 'Unc_Flux3000_10000', 'Unc_Flux10000_100000']

        temp_fluxes_err_hi = [source.data[_][1] for _ in fluxerrkeys]
        temp_fluxes_err_lo = [-1 * source.data[_][0] for _ in fluxerrkeys]

        int_fluxes = Quantity(temp_fluxes, 'cm-2 s-1')
        int_fluxes_err_hi = Quantity(temp_fluxes_err_hi, 'cm-2 s-1')
        int_fluxes_err_lo = Quantity(temp_fluxes_err_lo, 'cm-2 s-1')

        return cls.from_arrays(ebounds, int_fluxes, int_fluxes_err_hi,
                               int_fluxes_err_lo)
Esempio n. 30
0
input_param = yaml.load(open(sys.argv[1]))
image_size = input_param["general"]["image_size"]
# Input param fit and source configuration
# Sur quelle taille de la carte on fait le fit
freeze_bkg = input_param["param_fit_morpho"]["freeze_bkg"]
source_name = input_param["general"]["source_name"]
name_method_fond = input_param["general"]["name_method_fond"]
if freeze_bkg:
    name = "_bkg_fix"
else:
    name = "_bkg_free"
for_integral_flux = input_param["exposure"]["for_integral_flux"]
# Energy binning
energy_bins = EnergyBounds.equal_log_spacing(
    input_param["energy binning"]["Emin"],
    input_param["energy binning"]["Emax"],
    input_param["energy binning"]["nbin"], 'TeV')
energy_centers = energy_bins.log_centers
energy_reco = [
    Energy(input_param["energy binning"]["Emin"], "TeV"),
    Energy(input_param["energy binning"]["Emax"], "TeV"),
    input_param["energy binning"]["nbin"]
]

# outdir data and result
config_name = input_param["general"]["config_name"]
outdir_data = make_outdir_data(source_name,
                               name_method_fond,
                               config_name,
                               image_size,
                               for_integral_flux=False,
# shouldn't matter, but must contain sufficient number of digits,
# so that CDELT1 and CDELT2 are not truncated, when wcs.to_header() is called
# seems to be a bug...
refheader['CDELT3'] = 2.02  

refheader['CTYPE1'] = 'RA---CAR'
refheader['CTYPE2'] = 'DEC--CAR'
refheader['CTYPE3'] = 'log_Energy'  # shouldn't matter
refheader['CUNIT1'] = 'deg'
refheader['CUNIT2'] = 'deg'
refheader['CRVAL1'] = events.meta['RA_OBJ']
refheader['CRVAL2'] = events.meta['DEC_OBJ']
refheader['CRVAL3'] = 10.0  # shouldn't matter

energies = EnergyBounds.equal_log_spacing(0.5, 80, 8, 'TeV')
data = Quantity(np.zeros((len(energies), 200, 200)))
wcs = WCS(refheader)
refcube = SpectralCube(data, wcs, energy=energies)

# Counts cube
log.info('Bin events into cube.')
counts_hdu = bin_events_in_cube(events, refcube, energies)
counts = SpectralCube(Quantity(counts_hdu.data, 'count'), wcs, energies)
log.info('Counts cube shape: {}'.format(counts_hdu.shape))
log.info('Number of events in cube: {}'.format(counts_hdu.data.sum()))
counts.writeto('counts.fits', clobber=True)

# Exposure cube
pointing = SkyCoord(events.meta['RA_PNT'], events.meta['DEC_PNT'], "icrs", unit="deg")
livetime = Quantity(events.meta['LIVETIME'], 's')
Esempio n. 32
0
large_gaus = Gauss2D("g2")
source_center_SgrA = SkyCoord.from_name("SgrA*")
large_gaus.xpos,large_gaus.ypos=skycoord_to_pixel(source_center_SgrA, excess.wcs)
CS_map=SkyMap.read("CStot.fits")
cs_reproj=CS_map.reproject(excess)
cs_reproj.data[np.where(np.isnan(cs_reproj.data))]=0
#cs_reproj.data[np.where(cs_reproj.data<50)]=0
cs_reproj.write("cs_map_reproj.fits", clobber=True)
load_table_model("CS","cs_map_reproj.fits")
set_full_model(psf_SgrA(large_gaus*CS))
#large_gaus.fwhm=150
#freeze(large_gaus.fwhm)
fit()
"""
pt.ion()
energy_bins = EnergyBounds.equal_log_spacing(0.5, 100, 5, 'TeV')

#E1=energy_bins[1].value
# E2=energy_bins[2].value
#for i_E, E in enumerate(energy_bins[0:-3]):
for i_E, E in enumerate(energy_bins[0:-5]):
    #E1 = energy_bins[i_E].value
    #E2 = energy_bins[i_E+1].value
    E1 = energy_bins[0].value
    E2 = energy_bins[3].value
    print "Energy band= E1=" + str(E1) + "et E2=" + str(E2)
    # on = SkyMapCollection.read("fov_bg_maps"+str(E1)+"_"+str(E2)+"_TeV.fits")["excess"]
    on = SkyMapCollection.read("fov_bg_maps" + str(E1) + "_" + str(E2) + "_TeV.fits")["counts"]
    bkgmap = SkyMapCollection.read("fov_bg_maps" + str(E1) + "_" + str(E2) + "_TeV.fits")["bkg"]
    exp = SkyMapCollection.read("fov_bg_maps" + str(E1) + "_" + str(E2) + "_TeV.fits")["exposure"]
    # source_J1745_303 = SkyCoord(358.76,-0.51, unit='deg',frame="galactic")
Esempio n. 33
0
bkg_estimator = PhaseBackgroundEstimator(
    observations=obs_list_vela,
    on_region=on_region,
    on_phase=on_phase_range,
    off_phase=off_phase_range,
)
bkg_estimator.run()
bkg_estimate = bkg_estimator.result

# The rest of the analysis is the same as for a standard spectral analysis with Gammapy. All the specificity of a phase-resolved analysis is contained in the PhaseBackgroundEstimator, where the background is estimated in the ON-region OFF-phase rather than in an OFF-region.
#
# We can now extract a spectrum with the SpectrumExtraction class. It takes the reconstructed and the true energy binning. Both are expected to be a Quantity with unit energy, i.e. an array with an energy unit. EnergyBounds is a dedicated class to do it.

# In[ ]:

etrue = EnergyBounds.equal_log_spacing(0.005, 10.0, 100, unit="TeV")
ereco = EnergyBounds.equal_log_spacing(0.01, 10, 30, unit="TeV")

extraction = SpectrumExtraction(
    observations=obs_list_vela,
    bkg_estimate=bkg_estimate,
    containment_correction=True,
    e_true=etrue,
    e_reco=ereco,
)

extraction.run()
extraction.compute_energy_threshold(method_lo="energy_bias",
                                    bias_percent_lo=20)

# Now let's a look at the files we just created with spectrum_observation.
Esempio n. 34
0
"""Test npred model image computation.
"""
from astropy.coordinates import Angle
from gammapy.datasets import FermiGalacticCenter
from gammapy.utils.energy import EnergyBounds
from gammapy.irf import EnergyDependentTablePSF
from gammapy.cube import SkyCube, compute_npred_cube, convolve_cube

filenames = FermiGalacticCenter.filenames()
flux_cube = SkyCube.read(filenames['diffuse_model'])
exposure_cube = SkyCube.read(filenames['exposure_cube'])
psf = EnergyDependentTablePSF.read(filenames['psf'])

flux_cube = flux_cube.reproject_to(exposure_cube)

energy_bounds = EnergyBounds([10, 30, 100, 500], 'GeV')
npred_cube = compute_npred_cube(flux_cube, exposure_cube, energy_bounds)

offset_max = Angle(1, 'deg')
npred_cube_convolved = convolve_cube(npred_cube, psf, offset_max)
obs_summary = ObservationSummary(stats)
fig = plt.figure(figsize=(10, 6))
ax1 = fig.add_subplot(121)

obs_summary.plot_excess_vs_livetime(ax=ax1)
ax2 = fig.add_subplot(122)
obs_summary.plot_significance_vs_livetime(ax=ax2)

# ## Extract spectrum
#
# Now, we're going to extract a spectrum using the [SpectrumExtraction](https://docs.gammapy.org/0.10/api/gammapy.spectrum.SpectrumExtraction.html) class. We provide the reconstructed energy binning we want to use. It is expected to be a Quantity with unit energy, i.e. an array with an energy unit. We use a utility function to create it. We also provide the true energy binning to use.

# In[ ]:

e_reco = EnergyBounds.equal_log_spacing(0.1, 40, 40, unit="TeV")
e_true = EnergyBounds.equal_log_spacing(0.05, 100.0, 200, unit="TeV")

# Instantiate a [SpectrumExtraction](https://docs.gammapy.org/0.10/api/gammapy.spectrum.SpectrumExtraction.html) object that will do the extraction. The containment_correction parameter is there to allow for PSF leakage correction if one is working with full enclosure IRFs. We also compute a threshold energy and store the result in OGIP compliant files (pha, rmf, arf). This last step might be omitted though.

# In[ ]:

ANALYSIS_DIR = "crab_analysis"

extraction = SpectrumExtraction(
    observations=observations,
    bkg_estimate=background_estimator.result,
    containment_correction=False,
)
extraction.run()
Esempio n. 36
0
# ### Spectral points
# 
# Finally, let's compute spectral points. The method used is to first choose an energy binning, and then to do a 1-dim likelihood fit / profile to compute the flux and flux error.

# In[ ]:


# Flux points are computed on stacked observation
stacked_obs = extract.observations.stack()
print(stacked_obs)


# In[ ]:


ebounds = EnergyBounds.equal_log_spacing(1, 40, 4, unit=u.TeV)

seg = SpectrumEnergyGroupMaker(obs=stacked_obs)
seg.compute_groups_fixed(ebounds=ebounds)

fpe = FluxPointEstimator(
    obs=stacked_obs, groups=seg.groups, model=fit.result[0].model
)
fpe.compute_points()
fpe.flux_points.table


# ### Plot
# 
# Let's plot the spectral model and points. You could do it directly, but there is a helper class.
# Note that a spectral uncertainty band, a "butterfly" is drawn, but it is very thin, i.e. barely visible.
Esempio n. 37
0
"""

from astropy.coordinates import SkyCoord, Angle
from gammapy.datasets import gammapy_extra
from gammapy.image import ExclusionMask
from gammapy.data import DataStore
from gammapy.region import SkyCircleRegion
from gammapy.spectrum import SpectrumAnalysis
from gammapy.utils.energy import EnergyBounds

center = SkyCoord(83.63, 22.01, unit='deg', frame='icrs')
radius = Angle('0.3 deg')
on_region = SkyCircleRegion(pos=center, radius=radius)

bkg_method = dict(type='reflected', n_min=3)

exclusion_file = gammapy_extra.filename("datasets/exclusion_masks/"
                                        "tevcat_exclusion.fits")
excl = ExclusionMask.from_fits(exclusion_file)

bounds = EnergyBounds.equal_log_spacing(1, 10, 40, unit='TeV')

store = gammapy_extra.filename("datasets/hess-crab4")
ds = DataStore.from_dir(store)
obs = [23523, 23559]

ana = SpectrumAnalysis(datastore=ds, obs=obs, on_region=on_region,
                       bkg_method=bkg_method, exclusion=excl, ebounds=bounds)

ana.write_ogip_data(outdir='ogip_data')
Esempio n. 38
0
# Now we'll define the input for the spectrum analysis. It will be done the python way, i.e. by creating a config dict containing python objects. We plan to add also the convenience to configure the analysis using a plain text config file.

# In[3]:


crab_pos = SkyCoord.from_name('crab')
on_region = CircleSkyRegion(crab_pos, 0.15 * u.deg)

model = models.LogParabola(
    alpha = 2.3,
    beta = 0,
    amplitude = 1e-11 * u.Unit('cm-2 s-1 TeV-1'),
    reference = 1 * u.TeV,
)

flux_point_binning = EnergyBounds.equal_log_spacing(0.7, 30, 5, u.TeV)

exclusion_mask = SkyImage.read('$GAMMAPY_EXTRA/datasets/exclusion_masks/tevcat_exclusion.fits')


# In[4]:


config = dict(
    outdir = None,
    background = dict(
        on_region=on_region,
        exclusion_mask=exclusion_mask,
        min_distance = 0.1 * u.rad,
    ),
    extraction = dict(containment_correction=False),
Esempio n. 39
0
 def ebounds(self):
     """Energy bounds"""
     return EnergyBounds.from_lower_and_upper_bounds(
         self['ENERGY_MIN'], self['ENERGY_MAX'])
Esempio n. 40
0
# load catalogs
fermi_3fgl = SourceCatalog3FGL()
fermi_2fhl = SourceCatalog2FHL()

# access crab data by corresponding identifier
crab_3fgl = fermi_3fgl['3FGL J0534.5+2201']
crab_2fhl = fermi_2fhl['2FHL J0534.5+2201']

ax = crab_3fgl.spectral_model.plot(crab_3fgl.energy_range, energy_power=2,
                                   label='Fermi 3FGL', color='r',
                                   flux_unit='erg-1 cm-2 s-1')
ax.set_ylim(1e-12, 1E-9)

# set up an energy array to evaluate the butterfly
emin, emax = crab_3fgl.energy_range
energy = EnergyBounds.equal_log_spacing(emin, emax, 100)
butterfly_3fg = crab_3fgl.spectrum.butterfly(energy)

butterfly_3fg.plot(crab_3fgl.energy_range, ax=ax, energy_power=2, color='r',
                   flux_unit='erg-1 cm-2 s-1')

crab_3fgl.flux_points.plot(ax=ax, sed_type='eflux', color='r',
                           y_unit='erg cm-2 s-1')

crab_2fhl.spectral_model.plot(crab_2fhl.energy_range, ax=ax, energy_power=2,
                              c='g', label='Fermi 2FHL', flux_unit='erg-1 cm-2 s-1')

# set up an energy array to evaluate the butterfly using the 2FHL energy range
emin, emax = crab_2fhl.energy_range
energy = EnergyBounds.equal_log_spacing(emin, emax, 100)
butterfly_2fhl = crab_2fhl.spectrum.butterfly(energy)