Ejemplo n.º 1
0
def calc_drm(skydir, ltc, event_class, event_types,
             egy_bins, cth_bins, nbin=64):
    """Calculate the detector response matrix."""
    npts = int(np.ceil(128. / bins_per_dec(egy_bins)))
    egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))

    etrue_bins = 10**np.linspace(1.0, 6.5, nbin * 5.5 + 1)
    egy = 10**utils.edge_to_center(np.log10(egy_bins))
    egy_width = utils.edge_to_width(egy_bins)
    etrue = 10**utils.edge_to_center(np.log10(etrue_bins))
    edisp = create_avg_edisp(skydir, ltc, event_class, event_types,
                             egy, etrue, cth_bins)
    edisp = edisp * egy_width[:, None, None]
    edisp = sum_bins(edisp, 0, npts)
    return edisp
Ejemplo n.º 2
0
Archivo: irfs.py Proyecto: NAH8/fermipy
def calc_drm(skydir, ltc, event_class, event_types,
             egy_bins, cth_bins, nbin=64):
    """Calculate the detector response matrix."""
    npts = int(np.ceil(128. / bins_per_dec(egy_bins)))
    egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))
    
    etrue_bins = 10**np.linspace(1.0, 6.5, nbin * 5.5 + 1)
    egy = 10**utils.edge_to_center(np.log10(egy_bins))
    egy_width = utils.edge_to_width(egy_bins)
    etrue = 10**utils.edge_to_center(np.log10(etrue_bins))
    edisp = create_avg_edisp(skydir, ltc, event_class, event_types,
                             egy, etrue, cth_bins)
    edisp = edisp * egy_width[:, None, None]
    edisp = sum_bins(edisp,0,npts)
    return edisp
Ejemplo n.º 3
0
    def get_skydir_lthist(self, skydir, cth_bins):
        """Get the livetime distribution (observing profile) for a given sky
        direction with binning in incidence angle defined by
        ``cth_bins``.

        Parameters
        ----------
        skydir : `~astropy.coordinates.SkyCoord`
            Sky coordinate for which the observing profile will be
            computed.

        cth_bins : `~numpy.ndarray`
            Bin edges in cosine of the incidence angle.

        """
        ra = skydir.ra.deg
        dec = skydir.dec.deg

        npts = 1
        bins = utils.split_bin_edges(cth_bins, npts)

        center = edge_to_center(bins)
        width = edge_to_width(bins)
        ipix = hp.ang2pix(self.hpx.nside, np.pi / 2. - np.radians(dec),
                          np.radians(ra), nest=self.hpx.nest)
        lt = np.histogram(self._cth_center,
                          weights=self.data[:, ipix], bins=bins)[0]
        lt = np.sum(lt.reshape(-1, npts), axis=1)
        return lt
Ejemplo n.º 4
0
    def create_average_psf(skydir,ltc,event_class,event_types,dtheta,egy):

        if isinstance(event_types,int):
            event_types = bitmask_to_bits(event_types)

        cth_edge = np.linspace(0.0,1.0,51)
        cth = edge_to_center(cth_edge)

        wpsf = np.zeros((len(dtheta),len(egy)))
        exps = np.zeros(len(egy))

        ltw = ltc.get_src_lthist(skydir,cth_edge)

#        print cth_edge
#        print ltw
        #ltw = np.ones(len(cth))

        for et in event_types:
            psf = create_psf(event_class,et,dtheta,egy,cth)
            aeff = create_exposure(event_class,et,egy,cth)

            wpsf += np.sum(psf*aeff[np.newaxis,:,:]*
                           ltw[np.newaxis,np.newaxis,:],axis=2)
            exps += np.sum(aeff*ltw[np.newaxis,:],axis=1)

        wpsf /= exps[np.newaxis,:]

        return wpsf
Ejemplo n.º 5
0
def calc_exp(skydir, ltc, event_class, event_types, egy, cth_bins, npts=None):
    """Calculate the exposure on a 2D grid of energy and incidence angle.

    Parameters
    ----------
    npts : int    
        Number of points by which to sample the response in each
        incidence angle bin.  If None then npts will be automatically
        set such that incidence angle is sampled on intervals of <
        0.05 in Cos(Theta).

    Returns
    -------
    exp : `~numpy.ndarray`
        2D Array of exposures vs. energy and incidence angle.

    """

    if npts is None:
        npts = int(np.ceil(np.max(cth_bins[1:] - cth_bins[:-1]) / 0.025))

    exp = np.zeros((len(egy), len(cth_bins) - 1))
    cth_bins = utils.split_bin_edges(cth_bins, npts)
    cth = edge_to_center(cth_bins)
    ltw = ltc.get_skydir_lthist(skydir, cth_bins).reshape(-1, npts)
    for et in event_types:
        aeff = create_aeff(event_class, et, egy, cth)
        aeff = aeff.reshape(exp.shape + (npts, ))
        exp += np.sum(aeff * ltw[np.newaxis, :, :], axis=-1)

    return exp
Ejemplo n.º 6
0
def create_avg_rsp(rsp_fn, skydir, ltc, event_class, event_types, x,
                   egy, cth_bins, npts=None):
    """Calculate the weighted response function.
    """
    if npts is None:
        npts = int(np.ceil(np.max(cth_bins[1:] - cth_bins[:-1]) / 0.05))

    wrsp = np.zeros((len(x), len(egy), len(cth_bins) - 1))
    exps = np.zeros((len(egy), len(cth_bins) - 1))

    cth_bins = utils.split_bin_edges(cth_bins, npts)
    cth = edge_to_center(cth_bins)
    ltw = ltc.get_skydir_lthist(skydir, cth_bins)
    ltw = ltw.reshape(-1, npts)

    for et in event_types:
        rsp = rsp_fn(event_class, et, x, egy, cth)
        aeff = create_aeff(event_class, et, egy, cth)
        rsp = rsp.reshape(wrsp.shape + (npts,))
        aeff = aeff.reshape(exps.shape + (npts,))
        wrsp += np.sum(rsp * aeff[np.newaxis, :, :, :] *
                       ltw[np.newaxis, np.newaxis, :, :], axis=-1)
        exps += np.sum(aeff * ltw[np.newaxis, :, :], axis=-1)

    exps_inv = np.zeros_like(exps)
    exps_inv[exps > 0] = 1./exps[exps>0]        
    wrsp *= exps_inv[np.newaxis, :, :]
    return wrsp
Ejemplo n.º 7
0
    def __init__(self, gdiff, iso, ltc, ebins, event_class, event_types=None,
                 gdiff_fit=None, iso_fit=None, spatial_model='PointSource',
                 spatial_size=None):

        self._gdiff = gdiff
        self._gdiff_fit = gdiff_fit
        self._iso = iso
        self._iso_fit = iso_fit
        self._ltc = ltc
        self._ebins = ebins
        self._log_ebins = np.log10(ebins)
        self._ectr = np.exp(utils.edge_to_center(np.log(self._ebins)))
        self._event_class = event_class
        self._spatial_model = spatial_model
        self._spatial_size = spatial_size
        if event_types is None:
            self._event_types = [['FRONT'], ['BACK']]
        else:
            self._event_types = event_types

        self._psf = []
        self._exp = []

        ebins = 10**np.linspace(1.0, 6.0, 5 * 8 + 1)
        skydir = SkyCoord(0.0, 0.0, unit='deg')
        for et in self._event_types:
            self._psf += [irfs.PSFModel.create(skydir.icrs, self._ltc,
                                               self._event_class, et,
                                               ebins)]
            self._exp += [irfs.ExposureMap.create(self._ltc,
                                                  self._event_class, et,
                                                  ebins)]
Ejemplo n.º 8
0
    def interp_bin(self, egy_bins, dtheta, scale_fn=None):
        """Evaluate the bin-averaged PSF model over the energy bins ``egy_bins``.

        Parameters
        ----------
        egy_bins : array_like
            Energy bin edges in MeV.

        dtheta : array_like
            Array of angular separations in degrees.

        scale_fn : callable        
            Function that evaluates the PSF scaling function.
            Argument is energy in MeV.
        """

        npts = 4
        egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))
        egy = np.exp(utils.edge_to_center(np.log(egy_bins)))
        log_energies = np.log10(egy)

        vals = self.interp(egy[None, :], dtheta[:, None],
                           scale_fn=scale_fn)
        wts = np.exp(self._wts_fn((log_energies,)))
        wts = wts.reshape((1,) + wts.shape)
        vals = np.sum(
            (vals * wts).reshape((vals.shape[0], int(vals.shape[1] / npts), npts)), axis=2)
        vals /= np.sum(wts.reshape(wts.shape[0],
                                   int(wts.shape[1] / npts), npts), axis=2)
        return vals
Ejemplo n.º 9
0
def calc_exp(skydir, ltc, event_class, event_types,
             egy, cth_bins, npts=None):
    """Calculate the exposure on a 2D grid of energy and incidence angle.

    Parameters
    ----------
    npts : int    
        Number of points by which to sample the response in each
        incidence angle bin.  If None then npts will be automatically
        set such that incidence angle is sampled on intervals of <
        0.05 in Cos(Theta).

    Returns
    -------
    exp : `~numpy.ndarray`
        2D Array of exposures vs. energy and incidence angle.

    """

    if npts is None:
        npts = int(np.ceil(np.max(cth_bins[1:] - cth_bins[:-1]) / 0.025))

    exp = np.zeros((len(egy), len(cth_bins) - 1))
    cth_bins = utils.split_bin_edges(cth_bins, npts)
    cth = edge_to_center(cth_bins)
    ltw = ltc.get_skydir_lthist(skydir, cth_bins).reshape(-1, npts)
    for et in event_types:
        aeff = create_aeff(event_class, et, egy, cth)
        aeff = aeff.reshape(exp.shape + (npts,))
        exp += np.sum(aeff * ltw[np.newaxis, :, :], axis=-1)

    return exp
Ejemplo n.º 10
0
    def interp_bin(self, egy_bins, dtheta, scale_fn=None):
        """Evaluate the bin-averaged PSF model over the energy bins ``egy_bins``.

        Parameters
        ----------
        egy_bins : array_like
            Energy bin edges in MeV.

        dtheta : array_like
            Array of angular separations in degrees.

        scale_fn : callable        
            Function that evaluates the PSF scaling function.
            Argument is energy in MeV.
        """

        npts = 4
        egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))
        egy = np.exp(utils.edge_to_center(np.log(egy_bins)))
        log_energies = np.log10(egy)

        vals = self.interp(egy[None, :], dtheta[:, None], scale_fn=scale_fn)
        wts = np.exp(self._wts_fn((log_energies, )))
        wts = wts.reshape((1, ) + wts.shape)
        vals = np.sum((vals * wts).reshape(
            (vals.shape[0], int(vals.shape[1] / npts), npts)),
                      axis=2)
        vals /= np.sum(wts.reshape(wts.shape[0], int(wts.shape[1] / npts),
                                   npts),
                       axis=2)
        return vals
Ejemplo n.º 11
0
def create_avg_rsp(rsp_fn,
                   skydir,
                   ltc,
                   event_class,
                   event_types,
                   x,
                   egy,
                   cth_bins,
                   npts=None):

    if npts is None:
        npts = int(np.ceil(np.max(cth_bins[1:] - cth_bins[:-1]) / 0.05))

    wrsp = np.zeros((len(x), len(egy), len(cth_bins) - 1))
    exps = np.zeros((len(egy), len(cth_bins) - 1))

    cth_bins = utils.split_bin_edges(cth_bins, npts)
    cth = edge_to_center(cth_bins)
    ltw = ltc.get_skydir_lthist(skydir, cth_bins)
    ltw = ltw.reshape(-1, npts)

    for et in event_types:
        rsp = rsp_fn(event_class, et, x, egy, cth)
        aeff = create_aeff(event_class, et, egy, cth)
        rsp = rsp.reshape(wrsp.shape + (npts, ))
        aeff = aeff.reshape(exps.shape + (npts, ))
        wrsp += np.sum(rsp * aeff[np.newaxis, :, :, :] *
                       ltw[np.newaxis, np.newaxis, :, :],
                       axis=-1)
        exps += np.sum(aeff * ltw[np.newaxis, :, :], axis=-1)

    wrsp /= exps[np.newaxis, :, :]
    return wrsp
Ejemplo n.º 12
0
Archivo: irfs.py Proyecto: NAH8/fermipy
    def get_skydir_lthist(self, skydir, cth_bins):
        """Get the livetime distribution (observing profile) for a given sky
        direction with binning in incidence angle defined by
        ``cth_bins``.

        Parameters
        ----------
        skydir : `~astropy.coordinates.SkyCoord`
            Sky coordinate for which the observing profile will be
            computed.

        cth_bins : `~numpy.ndarray`
            Bin edges in cosine of the incidence angle.

        """
        ra = skydir.ra.deg
        dec = skydir.dec.deg

        bins = np.linspace(cth_bins[0], cth_bins[-1],
                            (len(cth_bins) - 1) * 4 + 1)
        center = edge_to_center(bins)
        width = edge_to_width(bins)
        ipix = hp.ang2pix(self.hpx.nside, np.pi / 2. - np.radians(dec),
                          np.radians(ra), nest=self.hpx.nest)
        lt = np.interp(center, self._cth_center,
                       self.data[:, ipix] / self._cth_width) * width
        lt = np.sum(lt.reshape(-1, 4), axis=1)
        return lt
Ejemplo n.º 13
0
    def get_skydir_lthist(self, skydir, cth_bins):
        """Get the livetime distribution (observing profile) for a given sky
        direction with binning in incidence angle defined by
        ``cth_bins``.

        Parameters
        ----------
        skydir : `~astropy.coordinates.SkyCoord`
            Sky coordinate for which the observing profile will be
            computed.

        cth_bins : `~numpy.ndarray`
            Bin edges in cosine of the incidence angle.

        """
        ra = skydir.ra.deg
        dec = skydir.dec.deg

        npts = 1
        bins = utils.split_bin_edges(cth_bins, npts)

        center = edge_to_center(bins)
        width = edge_to_width(bins)
        ipix = hp.ang2pix(self.hpx.nside, np.pi / 2. - np.radians(dec),
                          np.radians(ra), nest=self.hpx.nest)
        lt = np.histogram(self._cth_center,
                          weights=self.data[:, ipix], bins=bins)[0]
        lt = np.sum(lt.reshape(-1, npts), axis=1)
        return lt
Ejemplo n.º 14
0
def compute_ps_counts(ebins, exp, psf, bkg, fn, egy_dim=0, spatial_model='PointSource',
                      spatial_size=1E-3):
    """Calculate the observed signal and background counts given models
    for the exposure, background intensity, PSF, and source flux.

    Parameters
    ----------
    ebins : `~numpy.ndarray`
        Array of energy bin edges.

    exp : `~numpy.ndarray`
        Model for exposure.

    psf : `~fermipy.irfs.PSFModel`
        Model for average PSF.

    bkg : `~numpy.ndarray`
        Array of background intensities.

    fn : `~fermipy.spectrum.SpectralFunction`

    egy_dim : int
        Index of energy dimension in ``bkg`` and ``exp`` arrays.

    """
    ewidth = utils.edge_to_width(ebins)
    ectr = np.exp(utils.edge_to_center(np.log(ebins)))

    r68 = psf.containment_angle(ectr, fraction=0.68)
    if spatial_model != 'PointSource':
        r68[r68 < spatial_size] = spatial_size

    # * np.ones((len(ectr), 31))
    theta_edges = np.linspace(0.0, 3.0, 31)[np.newaxis, :]
    theta_edges = theta_edges * r68[:, np.newaxis]
    theta = 0.5 * (theta_edges[:, :-1] + theta_edges[:, 1:])
    domega = np.pi * (theta_edges[:, 1:]**2 - theta_edges[:, :-1]**2)

    if spatial_model == 'PointSource':
        sig_pdf = domega * psf.interp(ectr[:, np.newaxis], theta)
    elif spatial_model == 'RadialGaussian':
        sig_pdf = domega * utils.convolve2d_gauss(lambda t: psf.interp(ectr[:, np.newaxis, np.newaxis], t),
                                                  theta, spatial_size / 1.5095921854516636, nstep=2000)
    elif spatial_model == 'RadialDisk':
        sig_pdf = domega * utils.convolve2d_disk(lambda t: psf.interp(ectr[:, np.newaxis, np.newaxis], t),
                                                 theta, spatial_size / 0.8246211251235321)
    else:
        raise ValueError('Invalid spatial model: {}'.format(spatial_model))

    sig_pdf *= (np.pi / 180.)**2
    sig_flux = fn.flux(ebins[:-1], ebins[1:])

    # Background and signal counts
    bkgc = bkg[..., np.newaxis] * domega * exp[..., np.newaxis] * \
        ewidth[..., np.newaxis] * (np.pi / 180.)**2
    sigc = sig_pdf * sig_flux[..., np.newaxis] * exp[..., np.newaxis]

    return sigc, bkgc
Ejemplo n.º 15
0
Archivo: irfs.py Proyecto: NAH8/fermipy
 def __init__(self, data, hpx, cth_edges, tstart=None, tstop=None):
     HpxMap.__init__(self, data, hpx)
     self._cth_edges = cth_edges
     self._cth_center = edge_to_center(self._cth_edges)
     self._cth_width = edge_to_width(self._cth_edges)
     self._domega = (self._cth_edges[1:] -
                     self._cth_edges[:-1]) * 2 * np.pi
     self._tstart = tstart
     self._tstop = tstop
Ejemplo n.º 16
0
Archivo: irfs.py Proyecto: NAH8/fermipy
def create_wtd_psf(skydir, ltc, event_class, event_types, dtheta,
                   egy_bins, cth_bins, fn, nbin=64, npts=1):
    """Create an exposure- and dispersion-weighted PSF model for a source
    with spectral parameterization ``fn``.  The calculation performed
    by this method accounts for the influence of energy dispersion on
    the PSF.

    Parameters
    ----------
    dtheta : `~numpy.ndarray`

    egy_bins : `~numpy.ndarray`
        Bin edges in observed energy.

    cth_bins : `~numpy.ndarray`
        Bin edges in cosine of the true incidence angle.

    nbin : int
        Number of bins per decade in true energy.

    npts : int
        Number of points by which to oversample each energy bin.

    """
    #npts = int(np.ceil(32. / bins_per_dec(egy_bins)))
    egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))
    etrue_bins = 10**np.linspace(1.0, 6.5, nbin * 5.5 + 1)
    etrue = 10**utils.edge_to_center(np.log10(etrue_bins))

    psf = create_avg_psf(skydir, ltc, event_class, event_types, dtheta,
                         etrue, cth_bins)
    drm = calc_drm(skydir, ltc, event_class, event_types,
                   egy_bins, cth_bins, nbin=nbin)
    cnts = calc_counts(skydir, ltc, event_class, event_types,
                       etrue_bins, cth_bins, fn)
    
    wts = drm * cnts[None, :, :]
    wts_norm = np.sum(wts,axis=1)
    wts_norm[wts_norm == 0] = 1.0    
    wts = wts / wts_norm[:, None, :]
    wpsf = np.sum(wts[None, :, :, :] * psf[:, None, :, :], axis=2)
    wts = np.sum(wts[None, :, :, :],axis=2)
    
    if npts > 1:
        shape = (wpsf.shape[0], int(wpsf.shape[1] / npts), npts, wpsf.shape[2])
        wpsf = np.sum((wpsf * wts).reshape(shape), axis=2)
        shape = (wts.shape[0], int(wts.shape[1] / npts), npts, wts.shape[2])
        wpsf = wpsf / np.sum(wts.reshape(shape), axis=2)

    return wpsf
Ejemplo n.º 17
0
def create_wtd_psf(skydir, ltc, event_class, event_types, dtheta,
                   egy_bins, cth_bins, fn, nbin=64, npts=1):
    """Create an exposure- and dispersion-weighted PSF model for a source
    with spectral parameterization ``fn``.  The calculation performed
    by this method accounts for the influence of energy dispersion on
    the PSF.

    Parameters
    ----------
    dtheta : `~numpy.ndarray`

    egy_bins : `~numpy.ndarray`
        Bin edges in observed energy.

    cth_bins : `~numpy.ndarray`
        Bin edges in cosine of the true incidence angle.

    nbin : int
        Number of bins per decade in true energy.

    npts : int
        Number of points by which to oversample each energy bin.

    """
    #npts = int(np.ceil(32. / bins_per_dec(egy_bins)))
    egy_bins = np.exp(utils.split_bin_edges(np.log(egy_bins), npts))
    etrue_bins = 10**np.linspace(1.0, 6.5, nbin * 5.5 + 1)
    etrue = 10**utils.edge_to_center(np.log10(etrue_bins))

    psf = create_avg_psf(skydir, ltc, event_class, event_types, dtheta,
                         etrue, cth_bins)
    drm = calc_drm(skydir, ltc, event_class, event_types,
                   egy_bins, cth_bins, nbin=nbin)
    cnts = calc_counts(skydir, ltc, event_class, event_types,
                       etrue_bins, cth_bins, fn)

    wts = drm * cnts[None, :, :]
    wts_norm = np.sum(wts, axis=1)
    wts_norm[wts_norm == 0] = 1.0
    wts = wts / wts_norm[:, None, :]
    wpsf = np.sum(wts[None, :, :, :] * psf[:, None, :, :], axis=2)
    wts = np.sum(wts[None, :, :, :], axis=2)

    if npts > 1:
        shape = (wpsf.shape[0], int(wpsf.shape[1] / npts), npts, wpsf.shape[2])
        wpsf = np.sum((wpsf * wts).reshape(shape), axis=2)
        shape = (wts.shape[0], int(wts.shape[1] / npts), npts, wts.shape[2])
        wpsf = wpsf / np.sum(wts.reshape(shape), axis=2)

    return wpsf
Ejemplo n.º 18
0
    def compute_counts(self, skydir, fn, ebins=None):
        """Compute signal and background counts for a point source at
        position ``skydir`` with spectral parameterization ``fn``.

        Parameters
        ----------
        skydir : `~astropy.coordinates.SkyCoord`

        ebins : `~numpy.ndarray`

        Returns
        -------
        sig : `~numpy.ndarray`
            Signal counts array.  Dimensions are energy, angular
            separation, and event type.

        bkg : `~numpy.ndarray`
            Background counts array.  Dimensions are energy, angular
            separation, and event type.

        """

        if ebins is None:
            ebins = self.ebins
            ectr = self.ectr
        else:
            ectr = np.exp(utils.edge_to_center(np.log(ebins)))

        skydir_cel = skydir.transform_to('icrs')
        skydir_gal = skydir.transform_to('galactic')

        sig = []
        bkg = []
        for psf, exp in zip(self._psf, self._exp):
            expv = exp.interpolate(
                skydir_cel.icrs.ra.deg, skydir_cel.icrs.dec.deg, ectr)
            bkgv = self._gdiff.interpolate(
                skydir_gal.l.deg, skydir_gal.b.deg, ectr)
            isov = np.exp(np.interp(np.log(ectr), np.log(self._iso[0]),
                                    np.log(self._iso[1])))
            bkgv += isov
            s, b = irfs.compute_ps_counts(ebins, expv, psf, bkgv, fn)
            sig += [s]
            bkg += [b]

        sig = np.concatenate([np.expand_dims(t, -1) for t in sig])
        bkg = np.concatenate([np.expand_dims(t, -1) for t in bkg])

        return sig, bkg
Ejemplo n.º 19
0
    def plot_projection(self,iaxis,**kwargs):

        data = kwargs.pop('data',self._data)
        noerror = kwargs.pop('noerror',False)
        
        axes = wcs_to_axes(self._wcs,self._data.shape[::-1])
        x = edge_to_center(axes[iaxis])
        w = edge_to_width(axes[iaxis])
        
        c = self.get_data_projection(data,axes,iaxis,erange=self._erange)
        
        if noerror:
            plt.errorbar(x,c,**kwargs)
        else:
            plt.errorbar(x,c,yerr=c**0.5,xerr=w/2.,**kwargs)
Ejemplo n.º 20
0
    def __init__(self, dtheta, energies, cth_bins, exp, psf, wts):
        """Create a PSFModel.

        Parameters
        ----------
        dtheta : `~numpy.ndarray`
            Array of angular offsets in degrees at which the PSF is
            evaluated.

        energies : `~numpy.ndarray`
            Array of energies in MeV at which the PSF is evaluated.

        cth_bins : `~numpy.ndarray`
            Interval in cosine of the incidence angle for which this
            model of the PSF was generated.

        exp : `~numpy.ndarray`
            Array of exposure vs. energy in cm^2 s.

        psf : `~numpy.ndarray`
            2D array of PSF values evaluated on an NxM grid of N
            offset angles and M energies (defined by ``dtheta`` and
            ``energies``).

        wts : `~numpy.ndarray`
            Array of weights vs. energy.  These are used to evaluate
            the bin-averaged PSF model.

        """

        self._dtheta = dtheta
        self._log_energies = np.log10(energies)
        self._energies = energies
        self._cth_bins = cth_bins
        self._cth = utils.edge_to_center(cth_bins)
        self._scale_fn = None
        self._exp = exp
        self._psf = psf
        self._wts = wts
        self._psf_fn = RegularGridInterpolator(
            (self._dtheta, self._log_energies),
            np.log(self._psf),
            bounds_error=False,
            fill_value=None)
        self._wts_fn = RegularGridInterpolator((self._log_energies, ),
                                               np.log(self._wts),
                                               bounds_error=False,
                                               fill_value=None)
Ejemplo n.º 21
0
    def get_src_lthist(self,skydir,cth_edges):

        ra = skydir.ra.deg
        dec = skydir.dec.deg        
        
        edges = np.linspace(cth_edges[0],cth_edges[-1],(len(cth_edges)-1)*4+1)
        center = edge_to_center(edges)
        width = edge_to_width(edges)
        
        ipix = hp.ang2pix(64,np.pi/2. - np.radians(dec),
                          np.radians(ra),nest=True)
        
        lt = np.interp(center,self._cth_center,
                       self._ltmap[ipix,::-1]/self._cth_width)*width
        lt = np.sum(lt.reshape(-1,4),axis=1)  
        return lt
Ejemplo n.º 22
0
    def __init__(self, dtheta, energies, cth_bins, exp, psf, wts):
        """Create a PSFModel.

        Parameters
        ----------
        dtheta : `~numpy.ndarray`
            Array of angular offsets in degrees at which the PSF is
            evaluated.

        energies : `~numpy.ndarray`
            Array of energies in MeV at which the PSF is evaluated.

        cth_bins : `~numpy.ndarray`
            Interval in cosine of the incidence angle for which this
            model of the PSF was generated.

        exp : `~numpy.ndarray`
            Array of exposure vs. energy in cm^2 s.

        psf : `~numpy.ndarray`
            2D array of PSF values evaluated on an NxM grid of N
            offset angles and M energies (defined by ``dtheta`` and
            ``energies``).

        wts : `~numpy.ndarray`
            Array of weights vs. energy.  These are used to evaluate
            the bin-averaged PSF model.

        """

        self._dtheta = dtheta
        self._log_energies = np.log10(energies)
        self._energies = energies
        self._cth_bins = cth_bins
        self._cth = utils.edge_to_center(cth_bins)
        self._scale_fn = None
        self._exp = exp
        self._psf = psf
        self._wts = wts
        self._psf_fn = RegularGridInterpolator((self._dtheta, self._log_energies),
                                               np.log(self._psf),
                                               bounds_error=False,
                                               fill_value=None)
        self._wts_fn = RegularGridInterpolator((self._log_energies,),
                                               np.log(self._wts),
                                               bounds_error=False,
                                               fill_value=None)
Ejemplo n.º 23
0
    def load_ltfile(self,ltfile):
        
        hdulist = pyfits.open(ltfile)
                
        if self._ltmap is None:
            self._ltmap = hdulist[1].data.field(0)
            self._tstart = hdulist[0].header['TSTART']
            self._tstop = hdulist[0].header['TSTOP']
        else:
            self._ltmap += hdulist[1].data.field(0)
            self._tstart = min(self._tstart,hdulist[0].header['TSTART'])
            self._tstop = max(self._tstop,hdulist[0].header['TSTOP'])

        cth_edges = np.array(hdulist[3].data.field(0))
        cth_edges = np.concatenate(([1],cth_edges))
        self._cth_edges = cth_edges[::-1]
        self._cth_center = edge_to_center(self._cth_edges)
        self._cth_width = edge_to_width(self._cth_edges)
Ejemplo n.º 24
0
    def compute_counts(self, skydir, fn, ebins=None):
        """Compute signal and background counts for a point source at
        position ``skydir`` with spectral parameterization ``fn``.

        Parameters
        ----------
        ebins : `~numpy.ndarray`

        Returns
        -------
        sig : `~numpy.ndarray`
            Signal counts array.  Dimensions are energy, angular
            separation, and event type.

        bkg : `~numpy.ndarray`
            Background counts array.  Dimensions are energy, angular
            separation, and event type.

        """

        if ebins is None:
            ebins = self.ebins
            ectr = self.ectr
        else:
            ectr = np.exp(utils.edge_to_center(np.log(ebins)))

        sig = []
        bkg = []
        for psf, exp in zip(self._psf, self._exp):
            expv = exp.interpolate(skydir.icrs.ra.deg, skydir.icrs.dec.deg,
                                   ectr)
            bkgv = self._gdiff.interpolate(skydir.l.deg, skydir.b.deg, ectr)
            isov = np.exp(
                np.interp(np.log(ectr), np.log(self._iso[0]),
                          np.log(self._iso[1])))
            bkgv += isov
            s, b = irfs.compute_ps_counts(ebins, expv, psf, bkgv, fn)
            sig += [s]
            bkg += [b]

        sig = np.concatenate([np.expand_dims(t, -1) for t in sig])
        bkg = np.concatenate([np.expand_dims(t, -1) for t in bkg])
        return sig, bkg
Ejemplo n.º 25
0
    def __init__(self,skydir,ltc,event_class,event_types,egy):

        if isinstance(event_types,int):
            event_types = bitmask_to_bits(event_types)
        
        self._dtheta = np.logspace(-4,1.5,1000)
        self._dtheta = np.insert(self._dtheta,0,[0])
        self._egy = egy

        self._exp = np.zeros(len(egy))
        self._psf = self.create_average_psf(skydir,ltc,event_class,event_types,
                                            self._dtheta,egy)

        cth_edge = np.linspace(0.0,1.0,51)
        cth = edge_to_center(cth_edge)
        ltw = ltc.get_src_lthist(skydir,cth_edge)
        for et in event_types:
            aeff = create_exposure(event_class,et,egy,cth)
            self._exp += np.sum(aeff*ltw[np.newaxis,:],axis=1)
Ejemplo n.º 26
0
Archivo: irfs.py Proyecto: NAH8/fermipy
def compute_ps_counts(ebins, exp, psf, bkg, fn, egy_dim=0):
    """Calculate the observed signal and background counts given models
    for the exposure, background intensity, PSF, and source flux.

    Parameters
    ----------
    ebins : `~numpy.ndarray`
        Array of energy bin edges.

    exp : `~numpy.ndarray`
        Model for exposure.

    psf : `~fermipy.irfs.PSFModel`
        Model for average PSF.

    bkg : `~numpy.ndarray`
        Array of background intensities.

    fn : `~fermipy.spectrum.SpectralFunction`

    egy_dim : int
        Index of energy dimension in ``bkg`` and ``exp`` arrays.

    """
    ewidth = utils.edge_to_width(ebins)
    ectr = np.exp(utils.edge_to_center(np.log(ebins)))

    theta_edges = np.linspace(0.0, 3.0, 31)[
        np.newaxis, :] * np.ones((len(ectr), 31))
    theta_edges *= psf.containment_angle(ectr, fraction=0.68)[:, np.newaxis]
    theta = 0.5 * (theta_edges[:, :-1] + theta_edges[:, 1:])
    domega = np.pi * (theta_edges[:, 1:]**2 - theta_edges[:, :-1]**2)

    sig_pdf = domega * \
        psf.interp(ectr[:, np.newaxis], theta) * (np.pi / 180.)**2
    sig_flux = fn.flux(ebins[:-1], ebins[1:])

    # Background and signal counts
    bkgc = bkg[:, np.newaxis] * domega * exp[:, np.newaxis] * \
        ewidth[:, np.newaxis] * (np.pi / 180.)**2
    sigc = sig_pdf * sig_flux[:, np.newaxis] * exp[:, np.newaxis]

    return sigc, bkgc
Ejemplo n.º 27
0
def compute_ps_counts(ebins, exp, psf, bkg, fn, egy_dim=0):
    """Calculate the observed signal and background counts given models
    for the exposure, background intensity, PSF, and source flux.

    Parameters
    ----------
    ebins : `~numpy.ndarray`
        Array of energy bin edges.

    exp : `~numpy.ndarray`
        Model for exposure.

    psf : `~fermipy.irfs.PSFModel`
        Model for average PSF.

    bkg : `~numpy.ndarray`
        Array of background intensities.

    fn : `~fermipy.spectrum.SpectralFunction`

    egy_dim : int
        Index of energy dimension in ``bkg`` and ``exp`` arrays.

    """
    ewidth = utils.edge_to_width(ebins)
    ectr = np.exp(utils.edge_to_center(np.log(ebins)))

    theta_edges = np.linspace(0.0, 3.0, 31)[
        np.newaxis, :] * np.ones((len(ectr), 31))
    theta_edges *= psf.containment_angle(ectr, fraction=0.68)[:, np.newaxis]
    theta = 0.5 * (theta_edges[:, :-1] + theta_edges[:, 1:])
    domega = np.pi * (theta_edges[:, 1:]**2 - theta_edges[:, :-1]**2)

    sig_pdf = domega * \
        psf.interp(ectr[:, np.newaxis], theta) * (np.pi / 180.)**2
    sig_flux = fn.flux(ebins[:-1], ebins[1:])

    # Background and signal counts
    bkgc = bkg[:, np.newaxis] * domega * exp[:, np.newaxis] * \
        ewidth[:, np.newaxis] * (np.pi / 180.)**2
    sigc = sig_pdf * sig_flux[:, np.newaxis] * exp[:, np.newaxis]

    return sigc, bkgc
Ejemplo n.º 28
0
    def __init__(self, gdiff, iso, ltc, ebins, event_class, event_types):
        """ 

        Parameters
        ----------
        gdiff : `~fermipy.skymap.SkyMap`
            Galactic diffuse map cube object.

        iso : `~numpy.ndarray`
            Array of background isotropic intensity vs. energy.

        ltc : `~fermipy.irfs.LTCube`

        event_class : str

        event_types : list        

        """
        self._gdiff = gdiff
        self._iso = iso
        self._ltc = ltc
        self._ebins = ebins
        self._log_ebins = np.log10(ebins)
        self._ectr = np.exp(utils.edge_to_center(np.log(self._ebins)))
        self._event_class = event_class
        self._event_types = event_types

        self._psf = []
        self._exp = []

        ebins = 10**np.linspace(1.0, 6.0, 5 * 8 + 1)
        skydir = SkyCoord(0.0, 0.0, unit='deg')
        for et in self._event_types:
            self._psf += [
                irfs.PSFModel.create(skydir.icrs, self._ltc, self._event_class,
                                     et, ebins)
            ]
            self._exp += [
                irfs.ExposureMap.create(self._ltc, self._event_class, et,
                                        ebins)
            ]
Ejemplo n.º 29
0
    def __init__(self, counts, wcs, ebins=None):
        """
        Parameters
        ----------
        counts : `~numpy.ndarray`
            Counts array in row-wise ordering (LON is first dimension).
        """
        Map_Base.__init__(self, counts)
        self._wcs = wcs

        self._npix = counts.shape[::-1]

        if len(self._npix) == 3:
            self._xindex = 2
            self._yindex = 1
        elif len(self._npix) == 2:
            self._xindex = 1
            self._yindex = 0
        else:
            raise Exception('Wrong number of dimensions for Map object.')

        # if len(self._npix) != 3 and len(self._npix) != 2:
        #    raise Exception('Wrong number of dimensions for Map object.')

        self._width = np.array([
            np.abs(self.wcs.wcs.cdelt[0]) * self.npix[0],
            np.abs(self.wcs.wcs.cdelt[1]) * self.npix[1]
        ])
        self._pix_center = np.array([(self.npix[0] - 1.0) / 2.,
                                     (self.npix[1] - 1.0) / 2.])
        self._pix_size = np.array(
            [np.abs(self.wcs.wcs.cdelt[0]),
             np.abs(self.wcs.wcs.cdelt[1])])

        self._skydir = SkyCoord.from_pixel(self._pix_center[0],
                                           self._pix_center[1], self.wcs)
        self._ebins = ebins
        if ebins is not None:
            self._ectr = np.exp(utils.edge_to_center(np.log(ebins)))
        else:
            self._ectr = None
Ejemplo n.º 30
0
    def __init__(self, gdiff, iso, ltc, ebins, event_class, event_types):
        """ 

        Parameters
        ----------
        gdiff : `~fermipy.skymap.SkyMap`
            Galactic diffuse map cube object.

        iso : `~numpy.ndarray`
            Array of background isotropic intensity vs. energy.

        ltc : `~fermipy.ltcube.LTCube`

        event_class : str

        event_types : list        

        """
        self._gdiff = gdiff
        self._iso = iso
        self._ltc = ltc
        self._ebins = ebins
        self._log_ebins = np.log10(ebins)
        self._ectr = np.exp(utils.edge_to_center(np.log(self._ebins)))
        self._event_class = event_class
        self._event_types = event_types

        self._psf = []
        self._exp = []

        ebins = 10**np.linspace(1.0, 6.0, 5 * 8 + 1)
        skydir = SkyCoord(0.0, 0.0, unit='deg')
        for et in self._event_types:
            self._psf += [irfs.PSFModel.create(skydir.icrs, self._ltc,
                                               self._event_class, et,
                                               ebins)]
            self._exp += [irfs.ExposureMap.create(self._ltc,
                                                  self._event_class, et,
                                                  ebins)]
Ejemplo n.º 31
0
    def __init__(self, data, hpx, cth_edges, **kwargs):
        HpxMap.__init__(self, data, hpx)
        self._cth_edges = cth_edges
        self._cth_center = edge_to_center(self._cth_edges)
        self._cth_width = edge_to_width(self._cth_edges)
        self._domega = (self._cth_edges[1:] -
                        self._cth_edges[:-1]) * 2 * np.pi
        self._tstart = kwargs.get('tstart', None)
        self._tstop = kwargs.get('tstop', None)
        self._zmin = kwargs.get('zmin', 0.0)
        self._zmax = kwargs.get('zmax', 180.0)
        self._tab_gti = kwargs.get('tab_gti', None)
        self._header = kwargs.get('header', None)
        self._data_wt = kwargs.get('data_wt', None)

        if self._data_wt is None:
            self._data_wt = np.zeros_like(self.data)

        if self._tab_gti is None:
            cols = [Column(name='START', dtype='f8', unit='s'),
                    Column(name='STOP', dtype='f8', unit='s')]
            self._tab_gti = Table(cols)
Ejemplo n.º 32
0
    def __init__(self, counts, wcs, ebins=None):
        """
        Parameters
        ----------
        counts : `~numpy.ndarray`
            Counts array in row-wise ordering (LON is first dimension).
        """
        Map_Base.__init__(self, counts)
        self._wcs = wcs

        self._npix = counts.shape[::-1]

        if len(self._npix) == 3:
            self._xindex = 2
            self._yindex = 1
        elif len(self._npix) == 2:
            self._xindex = 1
            self._yindex = 0
        else:
            raise Exception('Wrong number of dimensions for Map object.')

        # if len(self._npix) != 3 and len(self._npix) != 2:
        #    raise Exception('Wrong number of dimensions for Map object.')

        self._width = np.array([np.abs(self.wcs.wcs.cdelt[0]) * self.npix[0],
                                np.abs(self.wcs.wcs.cdelt[1]) * self.npix[1]])
        self._pix_center = np.array([(self.npix[0] - 1.0) / 2.,
                                     (self.npix[1] - 1.0) / 2.])
        self._pix_size = np.array([np.abs(self.wcs.wcs.cdelt[0]),
                                   np.abs(self.wcs.wcs.cdelt[1])])

        self._skydir = SkyCoord.from_pixel(self._pix_center[0],
                                           self._pix_center[1],
                                           self.wcs)
        self._ebins = ebins
        if ebins is not None:
            self._ectr = np.exp(utils.edge_to_center(np.log(ebins)))
        else:
            self._ectr = None
Ejemplo n.º 33
0
    def __init__(self, data, hpx, cth_edges, **kwargs):
        HpxMap.__init__(self, data, hpx)
        self._cth_edges = cth_edges
        self._cth_center = edge_to_center(self._cth_edges)
        self._cth_width = edge_to_width(self._cth_edges)
        self._domega = (self._cth_edges[1:] -
                        self._cth_edges[:-1]) * 2 * np.pi
        self._tstart = kwargs.get('tstart', None)
        self._tstop = kwargs.get('tstop', None)
        self._zmin = kwargs.get('zmin', 0.0)
        self._zmax = kwargs.get('zmax', 180.0)
        self._tab_gti = kwargs.get('tab_gti', None)
        self._header = kwargs.get('header', None)
        self._data_wt = kwargs.get('data_wt', None)

        if self._data_wt is None:
            self._data_wt = np.zeros_like(self.data)

        if self._tab_gti is None:
            cols = [Column(name='START', dtype='f8', unit='s'),
                    Column(name='STOP', dtype='f8', unit='s')]
            self._tab_gti = Table(cols)
Ejemplo n.º 34
0
def run_flux_sensitivity(**kwargs):

    index = kwargs.get('index', 2.0)
    sedshape = kwargs.get('sedshape', 'PowerLaw')
    cutoff = kwargs.get('cutoff', 1e3)
    curvindex = kwargs.get('curvindex', 1.0)
    beta = kwargs.get('beta', 0.0)
    dmmass = kwargs.get('DMmass', 100.0)
    dmchannel = kwargs.get('DMchannel', 'bb')
    emin = kwargs.get('emin', 10**1.5)
    emax = kwargs.get('emax', 10**6.0)
    nbin = kwargs.get('nbin', 18)
    glon = kwargs.get('glon', 0.0)
    glat = kwargs.get('glat', 0.0)
    ltcube_filepath = kwargs.get('ltcube', None)
    galdiff_filepath = kwargs.get('galdiff', None)
    isodiff_filepath = kwargs.get('isodiff', None)
    galdiff_fit_filepath = kwargs.get('galdiff_fit', None)
    isodiff_fit_filepath = kwargs.get('isodiff_fit', None)
    wcs_npix = kwargs.get('wcs_npix', 40)
    wcs_cdelt = kwargs.get('wcs_cdelt', 0.5)
    wcs_proj = kwargs.get('wcs_proj', 'AIT')
    map_type = kwargs.get('map_type', None)
    spatial_model = kwargs.get('spatial_model', 'PointSource')
    spatial_size = kwargs.get('spatial_size', 1E-2)

    obs_time_yr = kwargs.get('obs_time_yr', None)
    event_class = kwargs.get('event_class', 'P8R2_SOURCE_V6')
    min_counts = kwargs.get('min_counts', 3.0)
    ts_thresh = kwargs.get('ts_thresh', 25.0)
    nside = kwargs.get('hpx_nside', 16)
    output = kwargs.get('output', None)

    event_types = [['FRONT', 'BACK']]

    if sedshape == 'PowerLaw':
        fn = spectrum.PowerLaw([1E-13, -index], scale=1E3)
    elif sedshape == 'PLSuperExpCutoff':
        fn = spectrum.PLSuperExpCutoff(
            [1E-13, -index, cutoff, curvindex], scale=1E3)
    elif sedshape == 'LogParabola':
        fn = spectrum.LogParabola([1E-13, -index, beta], scale=1E3)
    elif sedshape == 'DM':
        fn = spectrum.DMFitFunction([1E-26, dmmass], chan=dmchannel)

    log_ebins = np.linspace(np.log10(emin),
                            np.log10(emax), nbin + 1)
    ebins = 10**log_ebins
    ectr = np.exp(utils.edge_to_center(np.log(ebins)))

    c = SkyCoord(glon, glat, unit='deg', frame='galactic')

    if ltcube_filepath is None:

        if obs_time_yr is None:
            raise Exception('No observation time defined.')

        ltc = LTCube.create_from_obs_time(obs_time_yr * 365 * 24 * 3600.)
    else:
        ltc = LTCube.create(ltcube_filepath)
        if obs_time_yr is not None:
            ltc._counts *= obs_time_yr * 365 * \
                24 * 3600. / (ltc.tstop - ltc.tstart)

    gdiff = skymap.Map.create_from_fits(galdiff_filepath)
    gdiff_fit = None
    if galdiff_fit_filepath is not None:
        gdiff_fit = skymap.Map.create_from_fits(galdiff_fit_filepath)

    if isodiff_filepath is None:
        isodiff = utils.resolve_file_path('iso_%s_v06.txt' % event_class,
                                          search_dirs=[os.path.join('$FERMIPY_ROOT', 'data'),
                                                       '$FERMI_DIFFUSE_DIR'])
        isodiff = os.path.expandvars(isodiff)
    else:
        isodiff = isodiff_filepath

    iso = np.loadtxt(isodiff, unpack=True)
    iso_fit = None
    if isodiff_fit_filepath is not None:
        iso_fit = np.loadtxt(isodiff_fit_filepath, unpack=True)

    scalc = SensitivityCalc(gdiff, iso, ltc, ebins,
                            event_class, event_types, gdiff_fit=gdiff_fit,
                            iso_fit=iso_fit, spatial_model=spatial_model,
                            spatial_size=spatial_size)

    # Compute Maps
    map_diff_flux = None
    map_diff_npred = None
    map_int_flux = None
    map_int_npred = None

    map_nstep = 500

    if map_type == 'hpx':

        hpx = HPX(nside, True, 'GAL', ebins=ebins)
        map_diff_flux = HpxMap(np.zeros((nbin, hpx.npix)), hpx)
        map_diff_npred = HpxMap(np.zeros((nbin, hpx.npix)), hpx)
        map_skydir = map_diff_flux.hpx.get_sky_dirs()

        for i in range(0, len(map_skydir), map_nstep):
            s = slice(i, i + map_nstep)
            o = scalc.diff_flux_threshold(
                map_skydir[s], fn, ts_thresh, min_counts)
            map_diff_flux.data[:, s] = o['flux'].T
            map_diff_npred.data[:, s] = o['npred'].T

        hpx = HPX(nside, True, 'GAL')
        map_int_flux = HpxMap(np.zeros((hpx.npix)), hpx)
        map_int_npred = HpxMap(np.zeros((hpx.npix)), hpx)
        map_skydir = map_int_flux.hpx.get_sky_dirs()

        for i in range(0, len(map_skydir), map_nstep):
            s = slice(i, i + map_nstep)
            o = scalc.int_flux_threshold(
                map_skydir[s], fn, ts_thresh, min_counts)
            map_int_flux.data[s] = o['flux']
            map_int_npred.data[s] = o['npred']

    elif map_type == 'wcs':

        wcs_shape = [wcs_npix, wcs_npix]
        wcs_size = wcs_npix * wcs_npix

        map_diff_flux = Map.create(
            c, wcs_cdelt, wcs_shape, 'GAL', wcs_proj, ebins=ebins)
        map_diff_npred = Map.create(
            c, wcs_cdelt, wcs_shape, 'GAL', wcs_proj, ebins=ebins)
        map_skydir = map_diff_flux.get_pixel_skydirs()

        for i in range(0, len(map_skydir), map_nstep):
            idx = np.unravel_index(
                np.arange(i, min(i + map_nstep, wcs_size)), wcs_shape)
            s = (slice(None), idx[1], idx[0])
            o = scalc.diff_flux_threshold(
                map_skydir[slice(i, i + map_nstep)], fn, ts_thresh, min_counts)
            map_diff_flux.data[s] = o['flux'].T
            map_diff_npred.data[s] = o['npred'].T

        map_int_flux = Map.create(c, wcs_cdelt, wcs_shape, 'GAL', wcs_proj)
        map_int_npred = Map.create(c, wcs_cdelt, wcs_shape, 'GAL', wcs_proj)
        map_skydir = map_int_flux.get_pixel_skydirs()

        for i in range(0, len(map_skydir), map_nstep):
            idx = np.unravel_index(
                np.arange(i, min(i + map_nstep, wcs_size)), wcs_shape)
            s = (idx[1], idx[0])
            o = scalc.int_flux_threshold(
                map_skydir[slice(i, i + map_nstep)], fn, ts_thresh, min_counts)
            map_int_flux.data[s] = o['flux']
            map_int_npred.data[s] = o['npred']

    o = scalc.diff_flux_threshold(c, fn, ts_thresh, min_counts)

    cols = [Column(name='e_min', dtype='f8', data=scalc.ebins[:-1], unit='MeV'),
            Column(name='e_ref', dtype='f8', data=o['e_ref'], unit='MeV'),
            Column(name='e_max', dtype='f8', data=scalc.ebins[1:], unit='MeV'),
            Column(name='flux', dtype='f8', data=o[
                   'flux'], unit='ph / (cm2 s)'),
            Column(name='eflux', dtype='f8', data=o[
                   'eflux'], unit='MeV / (cm2 s)'),
            Column(name='dnde', dtype='f8', data=o['dnde'],
                   unit='ph / (MeV cm2 s)'),
            Column(name='e2dnde', dtype='f8',
                   data=o['e2dnde'], unit='MeV / (cm2 s)'),
            Column(name='npred', dtype='f8', data=o['npred'], unit='ph')]

    tab_diff = Table(cols)

    cols = [Column(name='index', dtype='f8'),
            Column(name='e_min', dtype='f8', unit='MeV'),
            Column(name='e_ref', dtype='f8', unit='MeV'),
            Column(name='e_max', dtype='f8', unit='MeV'),
            Column(name='flux', dtype='f8', unit='ph / (cm2 s)'),
            Column(name='eflux', dtype='f8', unit='MeV / (cm2 s)'),
            Column(name='dnde', dtype='f8', unit='ph / (MeV cm2 s)'),
            Column(name='e2dnde', dtype='f8', unit='MeV / (cm2 s)'),
            Column(name='npred', dtype='f8', unit='ph'),
            Column(name='ebin_e_min', dtype='f8',
                   unit='MeV', shape=(len(ectr),)),
            Column(name='ebin_e_ref', dtype='f8',
                   unit='MeV', shape=(len(ectr),)),
            Column(name='ebin_e_max', dtype='f8',
                        unit='MeV', shape=(len(ectr),)),
            Column(name='ebin_flux', dtype='f8',
                   unit='ph / (cm2 s)', shape=(len(ectr),)),
            Column(name='ebin_eflux', dtype='f8',
                   unit='MeV / (cm2 s)', shape=(len(ectr),)),
            Column(name='ebin_dnde', dtype='f8',
                   unit='ph / (MeV cm2 s)', shape=(len(ectr),)),
            Column(name='ebin_e2dnde', dtype='f8',
                   unit='MeV / (cm2 s)', shape=(len(ectr),)),
            Column(name='ebin_npred', dtype='f8', unit='ph', shape=(len(ectr),))]

    cols_ebounds = [Column(name='E_MIN', dtype='f8',
                           unit='MeV', data=ebins[:-1]),
                    Column(name='E_MAX', dtype='f8',
                           unit='MeV', data=ebins[1:]), ]

    tab_int = Table(cols)
    tab_ebounds = Table(cols_ebounds)

    index = np.linspace(1.0, 5.0, 4 * 4 + 1)

    for g in index:
        fn = spectrum.PowerLaw([1E-13, -g], scale=10**3.5)
        o = scalc.int_flux_threshold(c, fn, ts_thresh, 3.0)
        row = [g]
        for colname in tab_int.columns:
            if colname == 'index':
                continue
            if 'ebin' in colname:
                row += [o['bins'][colname.replace('ebin_', '')]]
            else:
                row += [o[colname]]

        tab_int.add_row(row)

    hdulist = fits.HDUList()
    hdulist.append(fits.table_to_hdu(tab_diff))
    hdulist.append(fits.table_to_hdu(tab_int))
    hdulist.append(fits.table_to_hdu(tab_ebounds))

    hdulist[1].name = 'DIFF_FLUX'
    hdulist[2].name = 'INT_FLUX'
    hdulist[3].name = 'EBOUNDS'

    if map_type is not None:
        hdu = map_diff_flux.create_image_hdu()
        hdu.name = 'MAP_DIFF_FLUX'
        hdulist.append(hdu)
        hdu = map_diff_npred.create_image_hdu()
        hdu.name = 'MAP_DIFF_NPRED'
        hdulist.append(hdu)

        hdu = map_int_flux.create_image_hdu()
        hdu.name = 'MAP_INT_FLUX'
        hdulist.append(hdu)
        hdu = map_int_npred.create_image_hdu()
        hdu.name = 'MAP_INT_NPRED'
        hdulist.append(hdu)

    hdulist.writeto(output, overwrite=True)
Ejemplo n.º 35
0
    def compute_counts(self, skydir, fn, ebins=None):
        """Compute signal and background counts for a point source at
        position ``skydir`` with spectral parameterization ``fn``.

        Parameters
        ----------
        skydir : `~astropy.coordinates.SkyCoord`

        ebins : `~numpy.ndarray`

        Returns
        -------
        sig : `~numpy.ndarray`
            Signal counts array.  Dimensions are energy, angular
            separation, and event type.

        bkg : `~numpy.ndarray`
            Background counts array.  Dimensions are energy, angular
            separation, and event type.

        """

        if ebins is None:
            ebins = self.ebins
            ectr = self.ectr
        else:
            ectr = np.exp(utils.edge_to_center(np.log(ebins)))

        skydir_cel = skydir.transform_to('icrs')
        skydir_gal = skydir.transform_to('galactic')

        sig = []
        bkg = []
        bkg_fit = None
        if self._gdiff_fit is not None:
            bkg_fit = []


        for psf, exp in zip(self._psf, self._exp):

            coords0 = np.meshgrid(*[skydir_cel.ra.deg, ectr], indexing='ij')
            coords1 = np.meshgrid(*[skydir_cel.dec.deg, ectr], indexing='ij')

            # expv = exp.interpolate(skydir_cel.icrs.ra.deg,
            #                       skydir_cel.icrs.dec.deg,
            #                       ectr)

            expv = exp.interpolate(coords0[0], coords1[0], coords0[1])

            coords0 = np.meshgrid(*[skydir_gal.l.deg, ectr], indexing='ij')
            coords1 = np.meshgrid(*[skydir_gal.b.deg, ectr], indexing='ij')

            bkgv = self._gdiff.interpolate(np.ravel(coords0[0]),
                                           np.ravel(coords1[0]),
                                           np.ravel(coords0[1]))
            bkgv = bkgv.reshape(expv.shape)
            

            # bkgv = self._gdiff.interpolate(
            #    skydir_gal.l.deg, skydir_gal.b.deg, ectr)

            isov = np.exp(np.interp(np.log(ectr), np.log(self._iso[0]),
                                    np.log(self._iso[1])))
            bkgv += isov

            
            s0, b0 = irfs.compute_ps_counts(ebins, expv, psf, bkgv, fn,
                                            egy_dim=1,
                                            spatial_model=self.spatial_model,
                                            spatial_size=self.spatial_size)

            sig += [s0]
            bkg += [b0]

            if self._iso_fit is not None:
                isov_fit = np.exp(np.interp(np.log(ectr), np.log(self._iso_fit[0]),
                                            np.log(self._iso_fit[1])))
            else:
                isov_fit = isov

            if self._gdiff_fit is not None:
                bkgv_fit = self._gdiff_fit.interpolate(np.ravel(coords0[0]),
                                                       np.ravel(coords1[0]),
                                                       np.ravel(coords0[1]))
                bkgv_fit = bkgv_fit.reshape(expv.shape)
                bkgv_fit += isov_fit
                s0, b0 = irfs.compute_ps_counts(ebins, expv, psf,
                                                bkgv_fit, fn, egy_dim=1,
                                                spatial_model=self.spatial_model,
                                                spatial_size=self.spatial_size)
                bkg_fit += [b0]

        sig = np.concatenate([np.expand_dims(t, -1) for t in sig])
        bkg = np.concatenate([np.expand_dims(t, -1) for t in bkg])
        if self._gdiff_fit is not None:
            bkg_fit = np.concatenate([np.expand_dims(t, -1) for t in bkg_fit])

        return sig, bkg, bkg_fit
Ejemplo n.º 36
0
def run_flux_sensitivity(**kwargs):

    index = kwargs.get('index', 2.0)
    emin = kwargs.get('emin', 10**1.5)
    emax = kwargs.get('emax', 10**6.0)
    nbin = kwargs.get('nbin', 18)
    glon = kwargs.get('glon', 0.0)
    glat = kwargs.get('glat', 0.0)
    ltcube_filepath = kwargs.get('ltcube', None)
    galdiff_filepath = kwargs.get('galdiff', None)
    isodiff_filepath = kwargs.get('isodiff', None)
    obs_time_yr = kwargs.get('obs_time_yr', None)
    event_class = kwargs.get('event_class', 'P8R2_SOURCE_V6')
    min_counts = kwargs.get('min_counts', 3.0)
    ts_thresh = kwargs.get('ts_thresh', 25.0)
    output = kwargs.get('output', None)

    event_types = [['FRONT', 'BACK']]
    fn = spectrum.PowerLaw([1E-13, -index], scale=1E3)

    log_ebins = np.linspace(np.log10(emin),
                            np.log10(emax), nbin + 1)
    ebins = 10**log_ebins
    ectr = np.exp(utils.edge_to_center(np.log(ebins)))

    c = SkyCoord(glon, glat, unit='deg', frame='galactic')

    if ltcube_filepath is None:

        if obs_time_yr is None:
            raise Exception('No observation time defined.')

        ltc = LTCube.create_from_obs_time(obs_time_yr * 365 * 24 * 3600.)
    else:
        ltc = LTCube.create(ltcube_filepath)
        if obs_time_yr is not None:
            ltc._counts *= obs_time_yr * 365 * \
                24 * 3600. / (ltc.tstop - ltc.tstart)

    gdiff = skymap.Map.create_from_fits(galdiff_filepath)

    if isodiff_filepath is None:
        isodiff = utils.resolve_file_path('iso_%s_v06.txt' % event_class,
                                          search_dirs=[os.path.join('$FERMIPY_ROOT', 'data'),
                                                       '$FERMI_DIFFUSE_DIR'])
        isodiff = os.path.expandvars(isodiff)
    else:
        isodiff = isodiff_filepath

    iso = np.loadtxt(isodiff, unpack=True)

    scalc = SensitivityCalc(gdiff, iso, ltc, ebins,
                            event_class, event_types)

    o = scalc.diff_flux_threshold(c, fn, ts_thresh, min_counts)

    cols = [Column(name='e_min', dtype='f8', data=scalc.ebins[:-1], unit='MeV'),
            Column(name='e_ref', dtype='f8', data=o['e_ref'], unit='MeV'),
            Column(name='e_max', dtype='f8', data=scalc.ebins[1:], unit='MeV'),
            Column(name='flux', dtype='f8', data=o[
                   'flux'], unit='ph / (cm2 s)'),
            Column(name='eflux', dtype='f8', data=o[
                   'eflux'], unit='MeV / (cm2 s)'),
            Column(name='dnde', dtype='f8', data=o['dnde'],
                   unit='ph / (MeV cm2 s)'),
            Column(name='e2dnde', dtype='f8',
                   data=o['e2dnde'], unit='MeV / (cm2 s)'),
            Column(name='npred', dtype='f8', data=o['npred'], unit='ph')]

    tab_diff = Table(cols)

    cols = [Column(name='index', dtype='f8'),
            Column(name='e_min', dtype='f8', unit='MeV'),
            Column(name='e_ref', dtype='f8', unit='MeV'),
            Column(name='e_max', dtype='f8', unit='MeV'),
            Column(name='flux', dtype='f8', unit='ph / (cm2 s)'),
            Column(name='eflux', dtype='f8', unit='MeV / (cm2 s)'),
            Column(name='dnde', dtype='f8', unit='ph / (MeV cm2 s)'),
            Column(name='e2dnde', dtype='f8', unit='MeV / (cm2 s)'),
            Column(name='npred', dtype='f8', unit='ph')]

    cols_ebin = [Column(name='index', dtype='f8'),
                 Column(name='e_min', dtype='f8',
                        unit='MeV', shape=(len(ectr),)),
                 Column(name='e_ref', dtype='f8',
                        unit='MeV', shape=(len(ectr),)),
                 Column(name='e_max', dtype='f8',
                        unit='MeV', shape=(len(ectr),)),
                 Column(name='flux', dtype='f8',
                        unit='ph / (cm2 s)', shape=(len(ectr),)),
                 Column(name='eflux', dtype='f8',
                        unit='MeV / (cm2 s)', shape=(len(ectr),)),
                 Column(name='dnde', dtype='f8',
                        unit='ph / (MeV cm2 s)', shape=(len(ectr),)),
                 Column(name='e2dnde', dtype='f8',
                        unit='MeV / (cm2 s)', shape=(len(ectr),)),
                 Column(name='npred', dtype='f8', unit='ph', shape=(len(ectr),))]

    tab_int = Table(cols)
    tab_int_ebin = Table(cols_ebin)

    index = np.linspace(1.0, 5.0, 4 * 4 + 1)

    for g in index:
        fn = spectrum.PowerLaw([1E-13, -g], scale=10**3.5)
        o = scalc.int_flux_threshold(c, fn, ts_thresh, 3.0)
        row = [g]
        for colname in tab_int.columns:
            if not colname in o:
                continue
            row += [o[colname]]

        tab_int.add_row(row)

        row = [g]
        for colname in tab_int.columns:
            if not colname in o:
                continue
            row += [o['bins'][colname]]
        tab_int_ebin.add_row(row)

    hdulist = fits.HDUList()
    hdulist.append(fits.table_to_hdu(tab_diff))
    hdulist.append(fits.table_to_hdu(tab_int))
    hdulist.append(fits.table_to_hdu(tab_int_ebin))

    hdulist[1].name = 'DIFF_FLUX'
    hdulist[2].name = 'INT_FLUX'
    hdulist[3].name = 'INT_FLUX_EBIN'

    hdulist.writeto(output, clobber=True)
Ejemplo n.º 37
0
def main():
    usage = "usage: %(prog)s [options]"
    description = "Calculate the LAT point-source flux sensitivity."
    parser = argparse.ArgumentParser(usage=usage, description=description)

    parser.add_argument('--ltcube',
                        default=None,
                        help='Set the path to the livetime cube.')
    parser.add_argument('--galdiff',
                        default=None,
                        required=True,
                        help='Set the path to the galactic diffuse model.')
    parser.add_argument(
        '--isodiff',
        default=None,
        help='Set the path to the isotropic model.  If none then the '
        'default model will be used for the given event class.')
    parser.add_argument('--ts_thresh',
                        default=25.0,
                        type=float,
                        help='Set the detection threshold.')
    parser.add_argument('--min_counts',
                        default=3.0,
                        type=float,
                        help='Set the minimum number of counts.')
    parser.add_argument(
        '--joint',
        default=False,
        action='store_true',
        help='Compute sensitivity using joint-likelihood of all event types.')
    parser.add_argument('--event_class',
                        default='P8R2_SOURCE_V6',
                        help='Set the IRF name (e.g. P8R2_SOURCE_V6).')
    parser.add_argument('--glon',
                        default=0.0,
                        type=float,
                        help='Galactic longitude.')
    parser.add_argument('--glat',
                        default=0.0,
                        type=float,
                        help='Galactic latitude.')
    parser.add_argument('--index',
                        default=2.0,
                        type=float,
                        help='Source power-law index.')
    parser.add_argument('--emin',
                        default=10**1.5,
                        type=float,
                        help='Minimum energy in MeV.')
    parser.add_argument('--emax',
                        default=10**6.0,
                        type=float,
                        help='Maximum energy in MeV.')
    parser.add_argument(
        '--nbin',
        default=18,
        type=int,
        help='Number of energy bins for differential flux calculation.')
    parser.add_argument('--output',
                        default='output.fits',
                        type=str,
                        help='Output filename.')
    parser.add_argument(
        '--obs_time_yr',
        default=None,
        type=float,
        help=
        'Rescale the livetime cube to this observation time in years.  If none then the '
        'calculation will use the intrinsic observation time of the livetime cube.'
    )

    args = parser.parse_args()
    event_types = [['FRONT', 'BACK']]
    fn = spectrum.PowerLaw([1E-13, -args.index], scale=1E3)

    log_ebins = np.linspace(np.log10(args.emin), np.log10(args.emax),
                            args.nbin + 1)
    ebins = 10**log_ebins
    ectr = np.exp(utils.edge_to_center(np.log(ebins)))

    c = SkyCoord(args.glon, args.glat, unit='deg', frame='galactic')

    if args.ltcube is None:

        if args.obs_time_yr is None:
            raise Exception('No observation time defined.')

        ltc = irfs.LTCube.create_from_obs_time(args.obs_time_yr * 365 * 24 *
                                               3600.)
    else:
        ltc = irfs.LTCube.create(args.ltcube)
        if args.obs_time_yr is not None:
            ltc._counts *= args.obs_time_yr * 365 * \
                24 * 3600. / (ltc.tstop - ltc.tstart)

    gdiff = skymap.Map.create_from_fits(args.galdiff)

    if args.isodiff is None:
        isodiff = utils.resolve_file_path('iso_%s_v06.txt' % args.event_class,
                                          search_dirs=[
                                              os.path.join(
                                                  '$FERMIPY_ROOT', 'data'),
                                              '$FERMI_DIFFUSE_DIR'
                                          ])
        isodiff = os.path.expandvars(isodiff)
    else:
        isodiff = args.isodiff

    iso = np.loadtxt(isodiff, unpack=True)

    scalc = SensitivityCalc(gdiff, iso, ltc, ebins, args.event_class,
                            event_types)

    o = scalc.diff_flux_threshold(c, fn, args.ts_thresh, args.min_counts)

    cols = [
        Column(name='e_min', dtype='f8', data=scalc.ebins[:-1], unit='MeV'),
        Column(name='e_ref', dtype='f8', data=o['e_ref'], unit='MeV'),
        Column(name='e_max', dtype='f8', data=scalc.ebins[1:], unit='MeV'),
        Column(name='flux', dtype='f8', data=o['flux'], unit='ph / (cm2 s)'),
        Column(name='eflux', dtype='f8', data=o['eflux'],
               unit='MeV / (cm2 s)'),
        Column(name='dnde',
               dtype='f8',
               data=o['dnde'],
               unit='ph / (MeV cm2 s)'),
        Column(name='e2dnde',
               dtype='f8',
               data=o['e2dnde'],
               unit='MeV / (cm2 s)'),
        Column(name='npred', dtype='f8', data=o['npred'], unit='ph')
    ]

    tab_diff = Table(cols)

    cols = [
        Column(name='index', dtype='f8'),
        Column(name='e_min', dtype='f8', unit='MeV'),
        Column(name='e_ref', dtype='f8', unit='MeV'),
        Column(name='e_max', dtype='f8', unit='MeV'),
        Column(name='flux', dtype='f8', unit='ph / (cm2 s)'),
        Column(name='eflux', dtype='f8', unit='MeV / (cm2 s)'),
        Column(name='dnde', dtype='f8', unit='ph / (MeV cm2 s)'),
        Column(name='e2dnde', dtype='f8', unit='MeV / (cm2 s)'),
        Column(name='npred', dtype='f8', unit='ph')
    ]

    cols_ebin = [
        Column(name='index', dtype='f8'),
        Column(name='e_min', dtype='f8', unit='MeV', shape=(len(ectr), )),
        Column(name='e_ref', dtype='f8', unit='MeV', shape=(len(ectr), )),
        Column(name='e_max', dtype='f8', unit='MeV', shape=(len(ectr), )),
        Column(name='flux',
               dtype='f8',
               unit='ph / (cm2 s)',
               shape=(len(ectr), )),
        Column(name='eflux',
               dtype='f8',
               unit='MeV / (cm2 s)',
               shape=(len(ectr), )),
        Column(name='dnde',
               dtype='f8',
               unit='ph / (MeV cm2 s)',
               shape=(len(ectr), )),
        Column(name='e2dnde',
               dtype='f8',
               unit='MeV / (cm2 s)',
               shape=(len(ectr), )),
        Column(name='npred', dtype='f8', unit='ph', shape=(len(ectr), ))
    ]

    tab_int = Table(cols)
    tab_int_ebin = Table(cols_ebin)

    index = np.linspace(1.0, 5.0, 4 * 4 + 1)

    for g in index:
        fn = spectrum.PowerLaw([1E-13, -g], scale=10**3.5)
        o = scalc.int_flux_threshold(c, fn, args.ts_thresh, 3.0)
        row = [g]
        for colname in tab_int.columns:
            if not colname in o:
                continue
            row += [o[colname]]

        tab_int.add_row(row)

        row = [g]
        for colname in tab_int.columns:
            if not colname in o:
                continue
            row += [o['bins'][colname]]
        tab_int_ebin.add_row(row)

    hdulist = fits.HDUList()
    hdulist.append(fits.table_to_hdu(tab_diff))
    hdulist.append(fits.table_to_hdu(tab_int))
    hdulist.append(fits.table_to_hdu(tab_int_ebin))

    hdulist[1].name = 'DIFF_FLUX'
    hdulist[2].name = 'INT_FLUX'
    hdulist[3].name = 'INT_FLUX_EBIN'

    hdulist.writeto(args.output, clobber=True)
Ejemplo n.º 38
0
def run_flux_sensitivity(**kwargs):

    index = kwargs.get('index', 2.0)
    sedshape = kwargs.get('sedshape', 'PowerLaw')
    cutoff = kwargs.get('cutoff', 1e3)
    curvindex = kwargs.get('curvindex', 1.0)
    beta = kwargs.get('beta', 0.0)
    emin = kwargs.get('emin', 10**1.5)
    emax = kwargs.get('emax', 10**6.0)
    nbin = kwargs.get('nbin', 18)
    glon = kwargs.get('glon', 0.0)
    glat = kwargs.get('glat', 0.0)
    ltcube_filepath = kwargs.get('ltcube', None)
    galdiff_filepath = kwargs.get('galdiff', None)
    isodiff_filepath = kwargs.get('isodiff', None)
    galdiff_fit_filepath = kwargs.get('galdiff_fit', None)
    isodiff_fit_filepath = kwargs.get('isodiff_fit', None)
    wcs_npix = kwargs.get('wcs_npix', 40)
    wcs_cdelt = kwargs.get('wcs_cdelt', 0.5)
    wcs_proj = kwargs.get('wcs_proj', 'AIT')
    map_type = kwargs.get('map_type', None)
    spatial_model = kwargs.get('spatial_model', 'PointSource')
    spatial_size = kwargs.get('spatial_size', 1E-2)

    obs_time_yr = kwargs.get('obs_time_yr', None)
    event_class = kwargs.get('event_class', 'P8R2_SOURCE_V6')
    min_counts = kwargs.get('min_counts', 3.0)
    ts_thresh = kwargs.get('ts_thresh', 25.0)
    nside = kwargs.get('hpx_nside', 16)
    output = kwargs.get('output', None)

    event_types = [['FRONT', 'BACK']]

    if sedshape == 'PowerLaw':
        fn = spectrum.PowerLaw([1E-13, -index], scale=1E3)
    elif sedshape == 'PLSuperExpCutoff':
        fn = spectrum.PLSuperExpCutoff([1E-13, -index, cutoff, curvindex],
                                       scale=1E3)
    elif sedshape == 'LogParabola':
        fn = spectrum.LogParabola([1E-13, -index, beta], scale=1E3)

    log_ebins = np.linspace(np.log10(emin), np.log10(emax), nbin + 1)
    ebins = 10**log_ebins
    ectr = np.exp(utils.edge_to_center(np.log(ebins)))

    c = SkyCoord(glon, glat, unit='deg', frame='galactic')

    if ltcube_filepath is None:

        if obs_time_yr is None:
            raise Exception('No observation time defined.')

        ltc = LTCube.create_from_obs_time(obs_time_yr * 365 * 24 * 3600.)
    else:
        ltc = LTCube.create(ltcube_filepath)
        if obs_time_yr is not None:
            ltc._counts *= obs_time_yr * 365 * \
                24 * 3600. / (ltc.tstop - ltc.tstart)

    gdiff = skymap.Map.create_from_fits(galdiff_filepath)
    gdiff_fit = None
    if galdiff_fit_filepath is not None:
        gdiff_fit = skymap.Map.create_from_fits(galdiff_fit_filepath)

    if isodiff_filepath is None:
        isodiff = utils.resolve_file_path('iso_%s_v06.txt' % event_class,
                                          search_dirs=[
                                              os.path.join(
                                                  '$FERMIPY_ROOT', 'data'),
                                              '$FERMI_DIFFUSE_DIR'
                                          ])
        isodiff = os.path.expandvars(isodiff)
    else:
        isodiff = isodiff_filepath

    iso = np.loadtxt(isodiff, unpack=True)
    iso_fit = None
    if isodiff_fit_filepath is not None:
        iso_fit = np.loadtxt(isodiff_fit_filepath, unpack=True)

    scalc = SensitivityCalc(gdiff,
                            iso,
                            ltc,
                            ebins,
                            event_class,
                            event_types,
                            gdiff_fit=gdiff_fit,
                            iso_fit=iso_fit,
                            spatial_model=spatial_model,
                            spatial_size=spatial_size)

    # Compute Maps
    map_diff_flux = None
    map_diff_npred = None
    map_int_flux = None
    map_int_npred = None

    map_nstep = 500

    if map_type == 'hpx':

        hpx = HPX(nside, True, 'GAL', ebins=ebins)
        map_diff_flux = HpxMap(np.zeros((nbin, hpx.npix)), hpx)
        map_diff_npred = HpxMap(np.zeros((nbin, hpx.npix)), hpx)
        map_skydir = map_diff_flux.hpx.get_sky_dirs()

        for i in range(0, len(map_skydir), map_nstep):
            s = slice(i, i + map_nstep)
            o = scalc.diff_flux_threshold(map_skydir[s], fn, ts_thresh,
                                          min_counts)
            map_diff_flux.data[:, s] = o['flux'].T
            map_diff_npred.data[:, s] = o['npred'].T

        hpx = HPX(nside, True, 'GAL')
        map_int_flux = HpxMap(np.zeros((hpx.npix)), hpx)
        map_int_npred = HpxMap(np.zeros((hpx.npix)), hpx)
        map_skydir = map_int_flux.hpx.get_sky_dirs()

        for i in range(0, len(map_skydir), map_nstep):
            s = slice(i, i + map_nstep)
            o = scalc.int_flux_threshold(map_skydir[s], fn, ts_thresh,
                                         min_counts)
            map_int_flux.data[s] = o['flux']
            map_int_npred.data[s] = o['npred']

    elif map_type == 'wcs':

        wcs_shape = [wcs_npix, wcs_npix]
        wcs_size = wcs_npix * wcs_npix

        map_diff_flux = Map.create(c,
                                   wcs_cdelt,
                                   wcs_shape,
                                   'GAL',
                                   wcs_proj,
                                   ebins=ebins)
        map_diff_npred = Map.create(c,
                                    wcs_cdelt,
                                    wcs_shape,
                                    'GAL',
                                    wcs_proj,
                                    ebins=ebins)
        map_skydir = map_diff_flux.get_pixel_skydirs()

        for i in range(0, len(map_skydir), map_nstep):
            idx = np.unravel_index(np.arange(i, min(i + map_nstep, wcs_size)),
                                   wcs_shape)
            s = (slice(None), idx[1], idx[0])
            o = scalc.diff_flux_threshold(map_skydir[slice(i, i + map_nstep)],
                                          fn, ts_thresh, min_counts)
            map_diff_flux.data[s] = o['flux'].T
            map_diff_npred.data[s] = o['npred'].T

        map_int_flux = Map.create(c, wcs_cdelt, wcs_shape, 'GAL', wcs_proj)
        map_int_npred = Map.create(c, wcs_cdelt, wcs_shape, 'GAL', wcs_proj)
        map_skydir = map_int_flux.get_pixel_skydirs()

        for i in range(0, len(map_skydir), map_nstep):
            idx = np.unravel_index(np.arange(i, min(i + map_nstep, wcs_size)),
                                   wcs_shape)
            s = (idx[1], idx[0])
            o = scalc.int_flux_threshold(map_skydir[slice(i, i + map_nstep)],
                                         fn, ts_thresh, min_counts)
            map_int_flux.data[s] = o['flux']
            map_int_npred.data[s] = o['npred']

    o = scalc.diff_flux_threshold(c, fn, ts_thresh, min_counts)

    cols = [
        Column(name='e_min', dtype='f8', data=scalc.ebins[:-1], unit='MeV'),
        Column(name='e_ref', dtype='f8', data=o['e_ref'], unit='MeV'),
        Column(name='e_max', dtype='f8', data=scalc.ebins[1:], unit='MeV'),
        Column(name='flux', dtype='f8', data=o['flux'], unit='ph / (cm2 s)'),
        Column(name='eflux', dtype='f8', data=o['eflux'],
               unit='MeV / (cm2 s)'),
        Column(name='dnde',
               dtype='f8',
               data=o['dnde'],
               unit='ph / (MeV cm2 s)'),
        Column(name='e2dnde',
               dtype='f8',
               data=o['e2dnde'],
               unit='MeV / (cm2 s)'),
        Column(name='npred', dtype='f8', data=o['npred'], unit='ph')
    ]

    tab_diff = Table(cols)

    cols = [
        Column(name='index', dtype='f8'),
        Column(name='e_min', dtype='f8', unit='MeV'),
        Column(name='e_ref', dtype='f8', unit='MeV'),
        Column(name='e_max', dtype='f8', unit='MeV'),
        Column(name='flux', dtype='f8', unit='ph / (cm2 s)'),
        Column(name='eflux', dtype='f8', unit='MeV / (cm2 s)'),
        Column(name='dnde', dtype='f8', unit='ph / (MeV cm2 s)'),
        Column(name='e2dnde', dtype='f8', unit='MeV / (cm2 s)'),
        Column(name='npred', dtype='f8', unit='ph'),
        Column(name='ebin_e_min', dtype='f8', unit='MeV', shape=(len(ectr), )),
        Column(name='ebin_e_ref', dtype='f8', unit='MeV', shape=(len(ectr), )),
        Column(name='ebin_e_max', dtype='f8', unit='MeV', shape=(len(ectr), )),
        Column(name='ebin_flux',
               dtype='f8',
               unit='ph / (cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_eflux',
               dtype='f8',
               unit='MeV / (cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_dnde',
               dtype='f8',
               unit='ph / (MeV cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_e2dnde',
               dtype='f8',
               unit='MeV / (cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_npred', dtype='f8', unit='ph', shape=(len(ectr), ))
    ]

    cols_ebounds = [
        Column(name='E_MIN', dtype='f8', unit='MeV', data=ebins[:-1]),
        Column(name='E_MAX', dtype='f8', unit='MeV', data=ebins[1:]),
    ]

    tab_int = Table(cols)
    tab_ebounds = Table(cols_ebounds)

    index = np.linspace(1.0, 5.0, 4 * 4 + 1)

    for g in index:
        fn = spectrum.PowerLaw([1E-13, -g], scale=10**3.5)
        o = scalc.int_flux_threshold(c, fn, ts_thresh, 3.0)
        row = [g]
        for colname in tab_int.columns:
            if colname == 'index':
                continue
            if 'ebin' in colname:
                row += [o['bins'][colname.replace('ebin_', '')]]
            else:
                row += [o[colname]]

        tab_int.add_row(row)

    hdulist = fits.HDUList()
    hdulist.append(fits.table_to_hdu(tab_diff))
    hdulist.append(fits.table_to_hdu(tab_int))
    hdulist.append(fits.table_to_hdu(tab_ebounds))

    hdulist[1].name = 'DIFF_FLUX'
    hdulist[2].name = 'INT_FLUX'
    hdulist[3].name = 'EBOUNDS'

    if map_type is not None:
        hdu = map_diff_flux.create_image_hdu()
        hdu.name = 'MAP_DIFF_FLUX'
        hdulist.append(hdu)
        hdu = map_diff_npred.create_image_hdu()
        hdu.name = 'MAP_DIFF_NPRED'
        hdulist.append(hdu)

        hdu = map_int_flux.create_image_hdu()
        hdu.name = 'MAP_INT_FLUX'
        hdulist.append(hdu)
        hdu = map_int_npred.create_image_hdu()
        hdu.name = 'MAP_INT_NPRED'
        hdulist.append(hdu)

    hdulist.writeto(output, clobber=True)
Ejemplo n.º 39
0
def compute_ps_counts(ebins,
                      exp,
                      psf,
                      bkg,
                      fn,
                      egy_dim=0,
                      spatial_model='PointSource',
                      spatial_size=1E-3):
    """Calculate the observed signal and background counts given models
    for the exposure, background intensity, PSF, and source flux.

    Parameters
    ----------
    ebins : `~numpy.ndarray`
        Array of energy bin edges.

    exp : `~numpy.ndarray`
        Model for exposure.

    psf : `~fermipy.irfs.PSFModel`
        Model for average PSF.

    bkg : `~numpy.ndarray`
        Array of background intensities.

    fn : `~fermipy.spectrum.SpectralFunction`

    egy_dim : int
        Index of energy dimension in ``bkg`` and ``exp`` arrays.

    """
    ewidth = utils.edge_to_width(ebins)
    ectr = np.exp(utils.edge_to_center(np.log(ebins)))

    r68 = psf.containment_angle(ectr, fraction=0.68)
    if spatial_model != 'PointSource':
        r68[r68 < spatial_size] = spatial_size

    # * np.ones((len(ectr), 31))
    theta_edges = np.linspace(0.0, 3.0, 31)[np.newaxis, :]
    theta_edges = theta_edges * r68[:, np.newaxis]
    theta = 0.5 * (theta_edges[:, :-1] + theta_edges[:, 1:])
    domega = np.pi * (theta_edges[:, 1:]**2 - theta_edges[:, :-1]**2)

    if spatial_model == 'PointSource':
        sig_pdf = domega * psf.interp(ectr[:, np.newaxis], theta)
    elif spatial_model == 'RadialGaussian':
        sig_pdf = domega * utils.convolve2d_gauss(
            lambda t: psf.interp(ectr[:, np.newaxis, np.newaxis], t),
            theta,
            spatial_size / 1.5095921854516636,
            nstep=2000)
    elif spatial_model == 'RadialDisk':
        sig_pdf = domega * utils.convolve2d_disk(
            lambda t: psf.interp(ectr[:, np.newaxis, np.newaxis], t), theta,
            spatial_size / 0.8246211251235321)
    else:
        raise ValueError('Invalid spatial model: {}'.format(spatial_model))

    sig_pdf *= (np.pi / 180.)**2
    sig_flux = fn.flux(ebins[:-1], ebins[1:])

    # Background and signal counts
    bkgc = bkg[..., np.newaxis] * domega * exp[..., np.newaxis] * \
        ewidth[..., np.newaxis] * (np.pi / 180.)**2
    sigc = sig_pdf * sig_flux[..., np.newaxis] * exp[..., np.newaxis]

    return sigc, bkgc