示例#1
0
    def create_from_gti(cls, skydir, tab_sc, tab_gti, zmax, **kwargs):

        radius = kwargs.get('radius', 180.0)
        cth_edges = kwargs.get('cth_edges', None)
        if cth_edges is None:
            cth_edges = 1.0 - np.linspace(0, 1.0, 41)**2
            cth_edges = cth_edges[::-1]

        hpx = HPX(2**4, True, 'CEL', ebins=cth_edges)

        hpx_skydir = hpx.get_sky_dirs()

        m = skydir.separation(hpx_skydir).deg < radius
        map_lt = HpxMap(np.zeros((40, hpx.npix)), hpx)
        map_lt_wt = HpxMap(np.zeros((40, hpx.npix)), hpx)

        lt, lt_wt = fill_livetime_hist(
            hpx_skydir[m], tab_sc, tab_gti, zmax, cth_edges)
        map_lt.data[:, m] = lt
        map_lt_wt.data[:, m] = lt_wt

        hpx2 = HPX(2**6, True, 'CEL', ebins=cth_edges)

        ltc = cls(np.zeros((len(cth_edges) - 1, hpx2.npix)), hpx2, cth_edges)
        ltc_skydir = ltc.hpx.get_sky_dirs()
        m = skydir.separation(ltc_skydir).deg < radius

        ltc.data[:, m] = map_lt.interpolate(ltc_skydir[m].ra.deg,
                                            ltc_skydir[m].dec.deg,
                                            interp_log=False)
        ltc.data_wt[:, m] = map_lt_wt.interpolate(ltc_skydir[m].ra.deg,
                                                  ltc_skydir[m].dec.deg,
                                                  interp_log=False)
        return ltc
示例#2
0
 def _compute_intensity(ccube, bexpcube):
     """ Compute the intensity map
     """
     bexp_data = np.sqrt(bexpcube.data[0:-1, 0:] * bexpcube.data[1:, 0:])
     intensity_data = ccube.data / bexp_data
     intensity_map = HpxMap(intensity_data, ccube.hpx)
     return intensity_map
示例#3
0
def test_hpxmap(tmpdir):
    n = np.ones((10, 192), 'd')
    hpx = HPX(4, False, 'GAL')

    filename = str(tmpdir / 'test_hpx.fits')
    hpx.write_fits(n, filename, clobber=True)

    ebins = np.logspace(2, 5, 8)

    hpx_2 = HPX(1024, False, 'GAL', region='DISK(110.,75.,2.)', ebins=ebins)
    npixels = hpx_2.npix

    n2 = np.ndarray((8, npixels), 'd')
    for i in range(8):
        n2[i].flat = np.arange(npixels)

    hpx_map = HpxMap(n2, hpx_2)
    wcs, wcs_data = hpx_map.make_wcs_from_hpx(normalize=True)

    wcs_out = hpx_2.make_wcs(3)

    filename = str(tmpdir / 'test_hpx_2_wcs.fits')
    write_fits_image(wcs_data, wcs_out.wcs, filename)

    assert_allclose(wcs_data[0, 160, 160], 87.28571429)
    assert_allclose(wcs_data[4, 160, 160], 87.28571429)
示例#4
0
 def _make_bright_pixel_mask(intensity_mean, mask_factor=5.0):
     """ Make of mask of all the brightest pixels """
     mask = np.zeros((intensity_mean.data.shape), bool)
     nebins = len(intensity_mean.data)
     sum_intensity = intensity_mean.data.sum(0)
     mean_intensity = sum_intensity.mean()
     for i in range(nebins):
         mask[i, 0:] = sum_intensity > (mask_factor * mean_intensity)
     return HpxMap(mask, intensity_mean.hpx)
示例#5
0
 def _compute_counts_from_model(model, bexpcube):
     """ Make the counts maps from teh mdoe
     """
     data = model.data * bexpcube.data
     ebins = model.hpx.ebins
     ratio = ebins[1:] / ebins[0:-1]
     half_log_ratio = np.log(ratio) / 2.
     int_map = ((data[0:-1].T * ebins[0:-1]) + (data[1:].T * ebins[1:])) * half_log_ratio
     return HpxMap(int_map.T, model.hpx)
示例#6
0
 def _fill_masked_intensity_resid(intensity_resid, bright_pixel_mask):
     """ Fill the pixels used to compute the effective area correction with the mean intensity
     """
     filled_intensity = np.zeros((intensity_resid.data.shape))
     nebins = len(intensity_resid.data)
     for i in range(nebins):
         masked = bright_pixel_mask.data[i]
         unmasked = np.invert(masked)
         mean_intensity = intensity_resid.data[i][unmasked].mean()
         filled_intensity[i] = np.where(masked, mean_intensity, intensity_resid.data[i])
     return HpxMap(filled_intensity, intensity_resid.hpx)
示例#7
0
    def _differential_to_integral(hpx_map):
        """ Convert a differential map to an integral map

        Here we are using log-log-quadrature to compute the integral quantities.
        """
        ebins = hpx_map.hpx.ebins
        ratio = ebins[1:] / ebins[0:-1]
        half_log_ratio = np.log(ratio) / 2.
        int_map = ((hpx_map.data[0:-1].T * ebins[0:-1]) +
                   (hpx_map.data[1:].T * ebins[1:])) * half_log_ratio
        return HpxMap(int_map.T, hpx_map.hpx)
示例#8
0
def stack_energy_planes_hpx(filelist, **kwargs):
    """
    """
    from fermipy.skymap import HpxMap
    from fermipy.hpx_utils import HPX
    maplist = [HpxMap.create_from_fits(fname, **kwargs) for fname in filelist]
    energies = np.log10(np.hstack([amap.hpx.evals
                                   for amap in maplist])).squeeze()

    counts = np.hstack([amap.counts.flat for amap in maplist])
    counts = counts.reshape((len(energies), int(len(counts) / len(energies))))

    template_map = maplist[0]
    hpx = HPX.create_from_header(template_map.hpx.make_header(), energies)
    return HpxMap(counts, hpx)
示例#9
0
def intensity_cube(ccube, bexpcube, hpx_order):
    """
    """
    if hpx_order == ccube.hpx.order:
        ccube_at_order = ccube
    else:
        ccube_at_order = ccube.ud_grade(hpx_order, preserve_counts=True)
    
    if hpx_order == bexpcube.hpx.order:
        bexpcube_at_order = bexpcube
    else:
        bexpcube_at_order = bexpcube.ud_grade(hpx_order, preserve_counts=True)
    
    bexpcube_data = np.sqrt(bexpcube_at_order.data[0:-1,0:]*bexpcube_at_order.data[1:,0:])
    out_data = ccube_at_order.counts / bexpcube_data
    return HpxMap(out_data, ccube_at_order.hpx)    
示例#10
0
def update_hpx_skymap_allsky(map_in, map_out):
    """ 'Update' a HEALPix skymap

    This checks map_out exists and creates it from map_in if it does not.
    If map_out does exist, this adds the data in map_in to map_out
    """
    if map_out is None:
        in_hpx = map_in.hpx
        out_hpx = HPX.create_hpx(in_hpx.nside, in_hpx.nest, in_hpx.coordsys,
                                 None, in_hpx.ebins, None, in_hpx.conv, None)
        data_out = map_in.expanded_counts_map()
        print(data_out.shape, data_out.sum())
        map_out = HpxMap(data_out, out_hpx)
    else:
        map_out.data += map_in.expanded_counts_map()
    return map_out
示例#11
0
    def _intergral_to_differential(hpx_map, gamma=-2.0):
        """ Convert integral quantity to differential quantity

        Here we are assuming the spectrum is a powerlaw with index gamma and we
        are using log-log-quadrature to compute the integral quantities.
        """
        nebins = len(hpx_map.data)
        diff_map = np.zeros((nebins + 1, hpx_map.hpx.npix))
        ebins = hpx_map.hpx.ebins
        ratio = ebins[1:] / ebins[0:-1]
        half_log_ratio = np.log(ratio) / 2.
        ratio_gamma = np.power(ratio, gamma)
        #ratio_inv_gamma = np.power(ratio, -1. * gamma)

        diff_map[0] = hpx_map.data[0] / ((ebins[0] + ratio_gamma[0] * ebins[1]) * half_log_ratio[0])
        for i in range(nebins):
            diff_map[i + 1] = (hpx_map.data[i] / (ebins[i + 1] *
                                                  half_log_ratio[i])) - (diff_map[i] / ratio[i])
        return HpxMap(diff_map, hpx_map.hpx)
示例#12
0
    def _smooth_hpx_map(hpx_map, sigma):
        """ Smooth a healpix map using a Gaussian
        """
        if hpx_map.hpx.ordering == "NESTED":
            ring_map = hpx_map.swap_scheme()
        else:
            ring_map = hpx_map
        ring_data = ring_map.data.copy()
        nebins = len(hpx_map.data)
        smoothed_data = np.zeros((hpx_map.data.shape))
        for i in range(nebins):
            smoothed_data[i] = healpy.sphtfunc.smoothing(
                ring_data[i], sigma=np.radians(sigma), verbose=False)

        smoothed_data.clip(0., 1e99)
        smoothed_ring_map = HpxMap(smoothed_data, ring_map.hpx)
        if hpx_map.hpx.ordering == "NESTED":
            return smoothed_ring_map.swap_scheme()
        return smoothed_ring_map
示例#13
0
def run_flux_sensitivity(**kwargs):

    index = kwargs.get('index', 2.0)
    sedshape = kwargs.get('sedshape', 'PowerLaw')
    cutoff = kwargs.get('cutoff', 1e3)
    curvindex = kwargs.get('curvindex', 1.0)
    beta = kwargs.get('beta', 0.0)
    emin = kwargs.get('emin', 10**1.5)
    emax = kwargs.get('emax', 10**6.0)
    nbin = kwargs.get('nbin', 18)
    glon = kwargs.get('glon', 0.0)
    glat = kwargs.get('glat', 0.0)
    ltcube_filepath = kwargs.get('ltcube', None)
    galdiff_filepath = kwargs.get('galdiff', None)
    isodiff_filepath = kwargs.get('isodiff', None)
    galdiff_fit_filepath = kwargs.get('galdiff_fit', None)
    isodiff_fit_filepath = kwargs.get('isodiff_fit', None)
    wcs_npix = kwargs.get('wcs_npix', 40)
    wcs_cdelt = kwargs.get('wcs_cdelt', 0.5)
    wcs_proj = kwargs.get('wcs_proj', 'AIT')
    map_type = kwargs.get('map_type', None)
    spatial_model = kwargs.get('spatial_model', 'PointSource')
    spatial_size = kwargs.get('spatial_size', 1E-2)

    obs_time_yr = kwargs.get('obs_time_yr', None)
    event_class = kwargs.get('event_class', 'P8R2_SOURCE_V6')
    min_counts = kwargs.get('min_counts', 3.0)
    ts_thresh = kwargs.get('ts_thresh', 25.0)
    nside = kwargs.get('hpx_nside', 16)
    output = kwargs.get('output', None)

    event_types = [['FRONT', 'BACK']]

    if sedshape == 'PowerLaw':
        fn = spectrum.PowerLaw([1E-13, -index], scale=1E3)
    elif sedshape == 'PLSuperExpCutoff':
        fn = spectrum.PLSuperExpCutoff([1E-13, -index, cutoff, curvindex],
                                       scale=1E3)
    elif sedshape == 'LogParabola':
        fn = spectrum.LogParabola([1E-13, -index, beta], scale=1E3)

    log_ebins = np.linspace(np.log10(emin), np.log10(emax), nbin + 1)
    ebins = 10**log_ebins
    ectr = np.exp(utils.edge_to_center(np.log(ebins)))

    c = SkyCoord(glon, glat, unit='deg', frame='galactic')

    if ltcube_filepath is None:

        if obs_time_yr is None:
            raise Exception('No observation time defined.')

        ltc = LTCube.create_from_obs_time(obs_time_yr * 365 * 24 * 3600.)
    else:
        ltc = LTCube.create(ltcube_filepath)
        if obs_time_yr is not None:
            ltc._counts *= obs_time_yr * 365 * \
                24 * 3600. / (ltc.tstop - ltc.tstart)

    gdiff = skymap.Map.create_from_fits(galdiff_filepath)
    gdiff_fit = None
    if galdiff_fit_filepath is not None:
        gdiff_fit = skymap.Map.create_from_fits(galdiff_fit_filepath)

    if isodiff_filepath is None:
        isodiff = utils.resolve_file_path('iso_%s_v06.txt' % event_class,
                                          search_dirs=[
                                              os.path.join(
                                                  '$FERMIPY_ROOT', 'data'),
                                              '$FERMI_DIFFUSE_DIR'
                                          ])
        isodiff = os.path.expandvars(isodiff)
    else:
        isodiff = isodiff_filepath

    iso = np.loadtxt(isodiff, unpack=True)
    iso_fit = None
    if isodiff_fit_filepath is not None:
        iso_fit = np.loadtxt(isodiff_fit_filepath, unpack=True)

    scalc = SensitivityCalc(gdiff,
                            iso,
                            ltc,
                            ebins,
                            event_class,
                            event_types,
                            gdiff_fit=gdiff_fit,
                            iso_fit=iso_fit,
                            spatial_model=spatial_model,
                            spatial_size=spatial_size)

    # Compute Maps
    map_diff_flux = None
    map_diff_npred = None
    map_int_flux = None
    map_int_npred = None

    map_nstep = 500

    if map_type == 'hpx':

        hpx = HPX(nside, True, 'GAL', ebins=ebins)
        map_diff_flux = HpxMap(np.zeros((nbin, hpx.npix)), hpx)
        map_diff_npred = HpxMap(np.zeros((nbin, hpx.npix)), hpx)
        map_skydir = map_diff_flux.hpx.get_sky_dirs()

        for i in range(0, len(map_skydir), map_nstep):
            s = slice(i, i + map_nstep)
            o = scalc.diff_flux_threshold(map_skydir[s], fn, ts_thresh,
                                          min_counts)
            map_diff_flux.data[:, s] = o['flux'].T
            map_diff_npred.data[:, s] = o['npred'].T

        hpx = HPX(nside, True, 'GAL')
        map_int_flux = HpxMap(np.zeros((hpx.npix)), hpx)
        map_int_npred = HpxMap(np.zeros((hpx.npix)), hpx)
        map_skydir = map_int_flux.hpx.get_sky_dirs()

        for i in range(0, len(map_skydir), map_nstep):
            s = slice(i, i + map_nstep)
            o = scalc.int_flux_threshold(map_skydir[s], fn, ts_thresh,
                                         min_counts)
            map_int_flux.data[s] = o['flux']
            map_int_npred.data[s] = o['npred']

    elif map_type == 'wcs':

        wcs_shape = [wcs_npix, wcs_npix]
        wcs_size = wcs_npix * wcs_npix

        map_diff_flux = Map.create(c,
                                   wcs_cdelt,
                                   wcs_shape,
                                   'GAL',
                                   wcs_proj,
                                   ebins=ebins)
        map_diff_npred = Map.create(c,
                                    wcs_cdelt,
                                    wcs_shape,
                                    'GAL',
                                    wcs_proj,
                                    ebins=ebins)
        map_skydir = map_diff_flux.get_pixel_skydirs()

        for i in range(0, len(map_skydir), map_nstep):
            idx = np.unravel_index(np.arange(i, min(i + map_nstep, wcs_size)),
                                   wcs_shape)
            s = (slice(None), idx[1], idx[0])
            o = scalc.diff_flux_threshold(map_skydir[slice(i, i + map_nstep)],
                                          fn, ts_thresh, min_counts)
            map_diff_flux.data[s] = o['flux'].T
            map_diff_npred.data[s] = o['npred'].T

        map_int_flux = Map.create(c, wcs_cdelt, wcs_shape, 'GAL', wcs_proj)
        map_int_npred = Map.create(c, wcs_cdelt, wcs_shape, 'GAL', wcs_proj)
        map_skydir = map_int_flux.get_pixel_skydirs()

        for i in range(0, len(map_skydir), map_nstep):
            idx = np.unravel_index(np.arange(i, min(i + map_nstep, wcs_size)),
                                   wcs_shape)
            s = (idx[1], idx[0])
            o = scalc.int_flux_threshold(map_skydir[slice(i, i + map_nstep)],
                                         fn, ts_thresh, min_counts)
            map_int_flux.data[s] = o['flux']
            map_int_npred.data[s] = o['npred']

    o = scalc.diff_flux_threshold(c, fn, ts_thresh, min_counts)

    cols = [
        Column(name='e_min', dtype='f8', data=scalc.ebins[:-1], unit='MeV'),
        Column(name='e_ref', dtype='f8', data=o['e_ref'], unit='MeV'),
        Column(name='e_max', dtype='f8', data=scalc.ebins[1:], unit='MeV'),
        Column(name='flux', dtype='f8', data=o['flux'], unit='ph / (cm2 s)'),
        Column(name='eflux', dtype='f8', data=o['eflux'],
               unit='MeV / (cm2 s)'),
        Column(name='dnde',
               dtype='f8',
               data=o['dnde'],
               unit='ph / (MeV cm2 s)'),
        Column(name='e2dnde',
               dtype='f8',
               data=o['e2dnde'],
               unit='MeV / (cm2 s)'),
        Column(name='npred', dtype='f8', data=o['npred'], unit='ph')
    ]

    tab_diff = Table(cols)

    cols = [
        Column(name='index', dtype='f8'),
        Column(name='e_min', dtype='f8', unit='MeV'),
        Column(name='e_ref', dtype='f8', unit='MeV'),
        Column(name='e_max', dtype='f8', unit='MeV'),
        Column(name='flux', dtype='f8', unit='ph / (cm2 s)'),
        Column(name='eflux', dtype='f8', unit='MeV / (cm2 s)'),
        Column(name='dnde', dtype='f8', unit='ph / (MeV cm2 s)'),
        Column(name='e2dnde', dtype='f8', unit='MeV / (cm2 s)'),
        Column(name='npred', dtype='f8', unit='ph'),
        Column(name='ebin_e_min', dtype='f8', unit='MeV', shape=(len(ectr), )),
        Column(name='ebin_e_ref', dtype='f8', unit='MeV', shape=(len(ectr), )),
        Column(name='ebin_e_max', dtype='f8', unit='MeV', shape=(len(ectr), )),
        Column(name='ebin_flux',
               dtype='f8',
               unit='ph / (cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_eflux',
               dtype='f8',
               unit='MeV / (cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_dnde',
               dtype='f8',
               unit='ph / (MeV cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_e2dnde',
               dtype='f8',
               unit='MeV / (cm2 s)',
               shape=(len(ectr), )),
        Column(name='ebin_npred', dtype='f8', unit='ph', shape=(len(ectr), ))
    ]

    cols_ebounds = [
        Column(name='E_MIN', dtype='f8', unit='MeV', data=ebins[:-1]),
        Column(name='E_MAX', dtype='f8', unit='MeV', data=ebins[1:]),
    ]

    tab_int = Table(cols)
    tab_ebounds = Table(cols_ebounds)

    index = np.linspace(1.0, 5.0, 4 * 4 + 1)

    for g in index:
        fn = spectrum.PowerLaw([1E-13, -g], scale=10**3.5)
        o = scalc.int_flux_threshold(c, fn, ts_thresh, 3.0)
        row = [g]
        for colname in tab_int.columns:
            if colname == 'index':
                continue
            if 'ebin' in colname:
                row += [o['bins'][colname.replace('ebin_', '')]]
            else:
                row += [o[colname]]

        tab_int.add_row(row)

    hdulist = fits.HDUList()
    hdulist.append(fits.table_to_hdu(tab_diff))
    hdulist.append(fits.table_to_hdu(tab_int))
    hdulist.append(fits.table_to_hdu(tab_ebounds))

    hdulist[1].name = 'DIFF_FLUX'
    hdulist[2].name = 'INT_FLUX'
    hdulist[3].name = 'EBOUNDS'

    if map_type is not None:
        hdu = map_diff_flux.create_image_hdu()
        hdu.name = 'MAP_DIFF_FLUX'
        hdulist.append(hdu)
        hdu = map_diff_npred.create_image_hdu()
        hdu.name = 'MAP_DIFF_NPRED'
        hdulist.append(hdu)

        hdu = map_int_flux.create_image_hdu()
        hdu.name = 'MAP_INT_FLUX'
        hdulist.append(hdu)
        hdu = map_int_npred.create_image_hdu()
        hdu.name = 'MAP_INT_NPRED'
        hdulist.append(hdu)

    hdulist.writeto(output, clobber=True)
示例#14
0
 def _compute_mean(map1, map2):
     """ Make a map that is the mean of two maps
     """
     data = (map1.data + map2.data) / 2.
     return HpxMap(data, map1.hpx)
示例#15
0
 def _compute_ratio(top, bot):
     """ Make a map that is the ratio of two maps
     """
     data = np.where(bot.data > 0, top.data / bot.data, 0.)
     return HpxMap(data, top.hpx)
示例#16
0
 def _compute_diff(map1, map2):
     """ Make a map that is the difference of two maps
     """
     data = map1.data - map2.data
     return HpxMap(data, map1.hpx)
示例#17
0
 def _apply_aeff_corrections(intensity_map, aeff_corrections):
     """ Multipy a map by the effective area correction
     """
     data = aeff_corrections * intensity_map.data.T
     return HpxMap(data.T, intensity_map.hpx)
示例#18
0
 def _compute_product(map1, map2):
     """ Make a map that is the product of two maps
     """
     data = map1.data * map2.data
     return HpxMap(data, map1.hpx)
示例#19
0
 def _compute_counts_from_intensity(intensity, bexpcube):
     """ Make the counts map from the intensity
     """
     data = intensity.data * np.sqrt(bexpcube.data[1:] * bexpcube.data[0:-1])
     return HpxMap(data, intensity.hpx)