Example #1
0
    def test_accept_ma_allows_only_keywords(self):
        """ Test whether 'smoothing' wrapped with accept_ma works with only
            keyword arguments. """

        ma = np.ones(12 * 16 ** 2)
        try:
            hp.smoothing(map_in=ma)
        except IndexError:
            self.fail()
def smoothing(m, fwhm, nest=True):
    full_map = np.ones(npix)
    full_map[total_mask] = m
    if fwhm <= 0:
        return m
    if nest:
        smoothed_map = hpf.reorder(hp.smoothing(hpf.reorder(full_map, n2r=True), fwhm=fwhm), r2n=True)
    else:
        smoothed_map = hp.smoothing(full_map, fwhm=fwhm)
    return smoothed_map[total_mask]
Example #3
0
    def beam(self, feed, freq):

        # bm = visibility.cylinder_beam(self._angpos, self.zenith,
        #                               self.cylinder_width / self.wavelengths[freq])

        # sigma = np.radians(self.fwhm) / (8.0*np.log(2.0))**0.5 / (self.frequencies[freq] / 150.0)

        #     pointing = np.array([np.pi / 2.0 - np.radians(self.pointing), self.zenith[1]])

        #     x2 = (1.0 - coord.sph_dot(self._angpos, pointing)**2) / (4*sigma**2)
        #     self._bc_map = np.exp(-x2)

        #     self._bc_freq = freq
        #     self._bc_nside = self._nside

        uhatc, vhatc = visibility.uv_plane_cart(self.zenith)

        ## Note sinc function is normalised hence lack of pi
        bmh = np.sinc(np.inner(coord.sph_to_cart(self._angpos), self.cylinder_width * uhatc / self.wavelengths[freq]))

        bmv = np.where(
            np.abs(np.inner(coord.sph_to_cart(self._angpos), vhatc)) * 180.0 / np.pi < self.vwidth / 2.0,
            np.ones_like(bmh),
            np.zeros_like(bmh),
        )

        bmv = healpy.smoothing(bmv, degree=True, fwhm=(self.vwidth / 10.0))

        return bmv * bmh
Example #4
0
 def direct(self, input, output):
     if input.ndim > 1:
         input = [_ for _ in input.T]
     output.T[...] = hp.smoothing(
         input, fwhm=self.fwhm, iter=self.iter, lmax=self.lmax,
         mmax=self.mmax, use_weights=self.use_weights,
         datapath=self.datapath, pol=self.pol, verbose=False)
Example #5
0
def surv_sum_diff(surveydict,ss=1,nside=128,freq=70,mask_ps=True,fwhm=10.0):
    """
    function to genreate smoothed difference between chosen survey and sum of the other 5 surveys from andreas interactive destriper/binner. uses common masks smooth to 10 degrees
    fwhm is in degrees, if zero or negative don't smooth at all, default 10 degrees
    """
    sumlist=surveydict.keys()
    sumlist.remove(ss)
    nsum=len(sumlist)
    m=hp.ma(np.array(surveydict[sumlist[0]]))/nsum
    totalmask=m.mask
    if mask_ps==True:
        psmask = np.logical_not(np.floor(hp.ud_grade(hp.read_map(glob('/project/projectdirs/planck/data/mission/DPC_maps/dx8/lfi/DX8_MASKs/' + 'mask_ps_%dGHz_*.fits' % freq)[0]), nside,order_out='NEST')))
        totalmask=m.mask | psmask
    for ss in sumlist[1:]:
        m1=hp.ma(np.array(surveydict[ss]))
        m=m+m1/nsum
        totalmask=m1.mask | totalmask
    m.mask=totalmask
    m.mask |= np.isnan(m)
    
    m1=hp.ma(np.array(surveydict[ss]))
    totalmask=totalmask | m1.mask
    d=(m1-m)/2.
    d=hp.ud_grade(d,nside,order_in='NEST',order_out='RING')
    if fwhm>0:
        dsm=hp.ma(hp.smoothing(d.filled(),fwhm*np.pi/180.))
        dsm.mask=m.mask
    if fwhm<=0:
        dsm=d
    hp.mollview(dsm,min=-1e-5,max=1e-5,title='SS'+np.str(ss)+ '  - sum of others')
    return dsm
Example #6
0
    def smoother(self, map_array):
        """Function to smooth an array of N (T, Q, U) maps with N beams in
        units of arcmin.

        :param map_array:
        :type map_array:
        
        """
        if not self.Use_Smoothing:
            return map_array
        elif self.Use_Smoothing:
            if self.pixel_indices is None:
                full_map = map_array
            else:
                full_map = build_full_map(self.pixel_indices, map_array, self.Nside)
            smoothed_map_array = np.array([hp.smoothing(m, fwhm = np.pi / 180. * b / 60., verbose = False) for (m, b) in zip(full_map, self.Beams)])
            if self.pixel_indices is None:
                return smoothed_map_array
            else:
                assert smoothed_map_array.ndim == 3, \
                    "Assuming map array is 3 dimensional (n_freqs x n_maps x n_pixels)"
                return smoothed_map_array[..., self.pixel_indices]
        else:
            print("Please set 'Use_Smoothing' in Instrument object.")
            sys.exit(1)
Example #7
0
    def test_smooth(self):
        op = LibSharpSmooth(comm=self.comm, signal_map="signal_map",
            lmax=self.lmax, grid=self.dist_rings.libsharp_grid,
            fwhm_deg=self.fwhm_deg, beam=None)
        op.exec(self.data)

        # Copy our local piece into a buffer of zeros that we will
        # reduce to the root process.  We could also use a gather, but
        # this is a small buffer.
        local_output_map = np.zeros(self.input_map.shape, dtype=np.float64)
        local_output_map[:, self.dist_rings.local_pixels] = \
            self.data["smoothed_signal_map"]

        output_map = None
        if self.comm.rank == 0:
            output_map = np.zeros(self.input_map.shape, dtype=np.float64)
        self.comm.Reduce(local_output_map, output_map,
            root=0, op=MPI.SUM)

        if self.comm.rank == 0:
            hp_smoothed = hp.smoothing(self.input_map,
                fwhm=np.radians(self.fwhm_deg), lmax=self.lmax)
            np.testing.assert_array_almost_equal(hp_smoothed, output_map,
                decimal=2)
            print("Std of difference between libsharp and healpy",
                (hp_smoothed-output_map).std())

        return
Example #8
0
    def _load_data(self):
        print "Loading data for galaxy simulation."

        _haslam_file = join(_datadir, "haslam.fits")
        _sp_ind_file = join(_datadir, "spectral.hdf5")

        if not exists(_haslam_file):
            print "Needed data files missing! Try and fetch the Haslam map from LAMBDA [y/n]?"

            choice = raw_input().lower()
            if choice == "y":
                import urllib

                _haslam_url = "http://lambda.gsfc.nasa.gov/data/foregrounds/haslam/lambda_haslam408_dsds.fits"

                print "Downloading %s ...." % _haslam_url
                urllib.urlretrieve(_haslam_url, _haslam_file)
                print "Done."

            elif choice == "n":
                raise Exception("No Haslam map found. Can not continue.")
            else:
                print "Please respond with 'y' or 'n'."

        self._haslam = healpy.read_map(join(_datadir, "haslam.fits"))
        self._sp_ind = h5py.File(_sp_ind_file)["spectral_index"][:]

        # Upgrade the map resolution to the same as the Healpix map (nside=512).
        self._sp_ind = healpy.smoothing(healpy.ud_grade(self._sp_ind, 512), degree=True, sigma=1.0)
Example #9
0
def smooth_variance_map(var_m, fwhm):
    """Smooth a variance map

    Algorithm from 'Pixel errors in convolved maps'
    J.P. Leahy, version 0.2

    Parameters
    ----------
    var_m : array
        input variance map
    fwhm : float (radians)
        target fwhm

    Returns
    -------
    smoothed_var_m : array
        smoothed variance map
    """

    # smooth map
    fwhm_variance = fwhm / np.sqrt(2)
    smoothed_var_m = hp.smoothing(var_m, fwhm=fwhm_variance, regression=False)

    # normalization factor
    pix_area = hp.nside2pixarea(hp.npix2nside(len(var_m)))
    orig_beam_width = fwhm/np.sqrt(8*np.log(2))
    A_vb = pix_area / (4. * np.pi * orig_beam_width**2)
    smoothed_var_m *= A_vb

    return smoothed_var_m
Example #10
0
def timemaps(maps,fwhm_degrees, eortime):
    timemaps=[]
    for index in range(len(eortime)):
        testmap = galmap2eqmap(maps)
        testmap = eqmap2azelmap(testmap,ut=eortime[index])
        testmap = replace_with_earth(testmap)
        timemaps.append(hp.smoothing(testmap,np.radians(fwhm_degrees),regression=False ))
    return testmap
Example #11
0
def mask_from_map(nmap, fwhm_deg=3.0, final_fwhm_deg=5.0, 
                  thresh_min=0.9, thresh_max=3.0,
                  lat_gal_cut=20., ecl_pole_rad=5.,
                  coord='G'):

    import healpy as hp
    mask = np.ones_like(nmap)

    # mask out low galactic latitudes
    npix = len(nmap)
    nside = hp.npix2nside(npix)
    ipix = np.arange(npix)
    theta_c, phi_c = hp.pix2ang(nside, ipix, nest=False)
    r = hp.Rotator(coord=[coord,'G'])  # transforms to galactic
    theta_gal, phi_gal = r(theta_c, phi_c)
    lat_gal_deg = (np.pi/2.-theta_gal)*180./np.pi
    wh_gal = np.where(np.abs(lat_gal_deg)<lat_gal_cut)[0]
    mask[wh_gal]=0.

    # mask out the ecliptic poles
    # actually, isn't the coverage *better* at the poles??
    r = hp.Rotator(coord=[coord,'E'])  # transforms to ecliptic
    theta_ecl, phi_ecl = r(theta_c, phi_c)
    lat_ecl_deg = (np.pi/2.-theta_ecl)*180./np.pi
    wh_ecl_pole = np.where(np.abs(np.abs(lat_ecl_deg)-90.)<ecl_pole_rad)[0]
    mask[wh_ecl_pole]=0.

    # mask out Magellanic clouds.
    # tmpp

    # mask according to <nmap>, which is a proxy for coverage.
    sm = hp.smoothing(nmap*mask, fwhm=fwhm_deg*np.pi/180.)
    wh=np.where((sm<(thresh_min*np.median(sm))))[0]
    mask[wh]=0.
    wh=np.where((sm>(thresh_max*np.median(sm))))[0]
    mask[wh]=0.

    # apply a  final smoothing/threshold/smoothing
    mask = hp.smoothing(mask, fwhm=final_fwhm_deg*np.pi/180.)
    #hp.mollview(mask);ipdb.set_trace()
    mask = hp.smoothing(mask>0.95, fwhm=0.5*final_fwhm_deg*np.pi/180.)
    if False:
        hp.mollview(mask*nmap)
        pdb.set_trace()
    return mask
Example #12
0
 def test_smoothing_masked(self):
     smoothed = hp.smoothing(self.map1, fwhm=np.radians(10), lmax=self.lmax, regression=False)
     smoothed_f90 = hp.ma(hp.read_map(os.path.join(self.path, 'data',
               'wmap_band_iqumap_r9_7yr_W_v4_udgraded32_masked_smoothed10deg_fortran.fits'), (0,1,2)))
     # fortran does not restore the mask
     for mm in smoothed_f90:
         mm.mask = smoothed[0].mask
     for i in range(3):
         np.testing.assert_array_almost_equal(smoothed[i].filled(), smoothed_f90[i].filled(), decimal=6)
def make_healpix_image(events, nside, sigma):
    theta = np.deg2rad(90 - events['B'])
    phi = np.deg2rad(events['L'])
    bins = np.arange(hp.nside2npix(nside) + 1) - 0.5
    data = hp.ang2pix(nside, theta, phi)
    counts, _ = np.histogram(data, bins)
    counts = hp.smoothing(counts, sigma=sigma)

    return counts
Example #14
0
def skypatch(phi, theta, radius, skyres=2.):
    import healpy
    baseres = (skyres / 4.) * np.pi/180. # require this resolution from healpix grid
    nside = 2**int(min(10, np.ceil(np.log2(healpy.nside2resol(1) / baseres)))) # don't go past 2**10 (12.6 million pixels)
    npix = healpy.nside2npix(nside)
    p = healpy.ang2pix(nside, theta, phi)
    n = np.zeros(npix)
    n[p] = 1. # put all probability at (p, t)
    m = healpy.smoothing(n, sigma=radius) # smooth out Gaussian
    return m
Example #15
0
    def getsky(self, debug=False, celestial=True):

        # Read in data files.
        haslam = healpy.smoothing(
            healpy.ud_grade(self._haslam, self.nside), degree=True, fwhm=3.0
        )  # hputil.coord_g2c()

        beam = 1.0
        syn = FullSkySynchrotron()

        lmax = 3 * self.nside - 1

        efreq = np.concatenate((np.array([408.0, 1420.0]), self.nu_pixels))

        cla = skysim.clarray(syn.angular_powerspectrum, lmax, efreq)

        fg = skysim.mkfullsky(cla, self.nside)

        sub408 = healpy.smoothing(fg[0], fwhm=3.0, degree=True)
        sub1420 = healpy.smoothing(fg[1], fwhm=5.8, degree=True)

        fgs = skysim.mkconstrained(cla, [(0, sub408), (1, sub1420)], self.nside)

        sc = healpy.ud_grade(self._sp_ind, self.nside)
        am = healpy.ud_grade(self._amp_map, self.nside)
        mv = healpy.smoothing(
            map_variance(healpy.smoothing(fg[0], sigma=0.5, degree=True), 16) ** 0.5, degree=True, sigma=1.0
        ).mean()

        fgt = (am / mv) * (fg - fgs)

        fg2 = (haslam[np.newaxis, :] * ((efreq / 408.0)[:, np.newaxis] ** sc) + fgt)[2:]

        if celestial:
            for i in range(fg2.shape[0]):
                fg2[i] = hputil.coord_g2c(fg2[i])

        if debug:
            return fg2, fg, fgs, fgt

        return fg2
def smooth_and_combine_maps(maps_and_weights,
                            output_file_name,
                            reader_function,
                            stokes_components=(0, 1, 2),
                            smoothing_angle=2.0,
                            degraded_nside=32):
    '''Combine a set of FITS maps into another.

    The maps are specified by `maps_and_weights', a list of tuples
    of the form (PATH, WEIGHT), where PATH is a string containing the
    path of the FITS file, and '''

    combined_map = []
    for map_file_name, weight in maps_and_weights:
        log.info("Reading map %s (weight %f)", map_file_name, weight)
        pixels = reader_function(map_file_name)
        if len(combined_map) == 0:
            combined_map = [x * weight for x in pixels]
        else:
            for idx in xrange(len(pixels)):
                combined_map[idx] = combined_map[idx] + weight * pixels[idx]

        # Maps with NSIDE=1024 are large, so it's better to free memory we
        # are not going to use any longer
        del pixels

    component_name = {0: 'I', 1: 'Q', 2: 'U'}
    smoothed_map = []
    if smoothing_angle > 0.0:
        log.info('Applying smoothing filter to %d Stokes maps',
                 len(combined_map))
        sm_angle = np.deg2rad(smoothing_angle)
        for idx, diff_component in enumerate(combined_map):
            nan_mask = np.isnan(diff_component)
            diff_component[nan_mask] = 0.0

            log.info('Applying a smoothing filter to %s map...',
                     component_name[stokes_components[idx]])
            smoothed_component = hp.smoothing(diff_component,
                                                  sm_angle)
            smoothed_component[nan_mask] = np.NaN

            smoothed_map.append(smoothed_component)
    else:
        smoothed_map = combined_map

    if degraded_nside > 0:
        log.info('Degrading the map to NSIDE = %d', degraded_nside)
        degraded_map = hp.ud_grade(smoothed_map, degraded_nside)

    log.info('Saving the map into %s', output_file_name)
    hp.write_map(output_file_name, degraded_map)
Example #17
0
 def test_smoothing_notmasked(self):
     smoothed = hp.smoothing(
         [m.data for m in self.map1], fwhm=np.radians(10), lmax=self.lmax
     )
     smoothed_f90 = hp.read_map(
         os.path.join(
             self.path,
             "data",
             "wmap_band_iqumap_r9_7yr_W_v4_udgraded32_smoothed10deg_fortran.fits",
         ),
         (0, 1, 2),
     )
     np.testing.assert_array_almost_equal(smoothed, smoothed_f90, decimal=6)
Example #18
0
def mk_map_onescale(nside,lmin=0,alpha=-2.17,smooth=1.):
    """
    One power law model -> random field realization
    """
    lmax = 3*nside - 1
    ll = np.array(range(lmin,lmax,1))
    Cl = np.power(ll,alpha)
    Cl[0] = 0. #zero mean

    Q = hp.synfast(Cl,nside,lmax=lmax)
    Qsm = hp.smoothing(Q,fwhm=np.radians(smooth))

    return Qsm
Example #19
0
def skyhist(posteriorsamples='posterior_samples.dat', skyres=2.):
    import healpy
    if type(posteriorsamples) is str:
        posteriorsamples = loadposteriorsamples(posteriorsamples)
    baseres = (skyres / 4.) * np.pi/180. # require this resolution from healpix grid
    nside = 2**int(min(10, np.ceil(np.log2(healpy.nside2resol(1) / baseres)))) # don't go past 2**10 (12.6 million pixels)
    npix = healpy.nside2npix(nside)
    p = healpy.ang2pix(nside, posteriorsamples[:,2], posteriorsamples[:,1]) # convert (theta, phi) to healpix numbers
    # n = np.bincount(p, minlength=npix) # bin the samples
    n = np.zeros(npix)
    n[:max(p)+1] = np.bincount(p) # support old version of numpy that does not have minlength argument
    m = healpy.smoothing(n, sigma=skyres*np.pi/180.) # smoothed map
    return m
Example #20
0
def test_healpix_convolution():
    nside = 16
    keywords = {'fwhm': np.radians(30),
                'iter': 2,
                'lmax': 8,
                'use_weights': False}

    input = np.arange(12 * nside**2)
    op = HealpixConvolutionGaussianOperator(**keywords)

    for i in input, np.repeat(input[:, None], 3, 1):
        expected = np.transpose(hp.smoothing(i.T, verbose=False, **keywords))
        assert_same(op(i), expected)

    if hp.__version__ <= '1.8.6':  # healpy #259
        return

    op = HealpixConvolutionGaussianOperator(pol=False, **keywords)
    input_ = np.arange(12 * nside**2)
    input = np.array([input_, input_, input_]).T
    expected_ = hp.smoothing(input_, verbose=False, **keywords)
    expected = np.array([expected_, expected_, expected_]).T
    assert_same(op(input), expected)
Example #21
0
    def smooth(self, fwhm, lmax=None, pol_only=False):
        """Smooth the map with a Gaussian kernel.
        """
        if self.rank == 0:
            if pol_only:
                print("Smoothing the polarization to {} arcmin".format(fwhm),
                      flush=True)
            else:
                print("Smoothing the map to {} arcmin".format(fwhm),
                      flush=True)

        if lmax is None:
            lmax = min(np.int(fwhm / 60 * 512), 2 * self.nside)

        # If the map is in node-shared memory, only the root process on each
        # node does the smoothing.
        if not self.shmem or self._map.nodecomm.rank == 0:
            if self.pol:
                m = np.vstack([self._map[:], self._map_Q[:], self._map_U[:]])
            else:
                m = self._map[:]
            if self.nest:
                m = hp.reorder(m, n2r=True)
            smap = hp.smoothing(m,
                                fwhm=fwhm * arcmin,
                                lmax=lmax,
                                verbose=False)
            del m
            if self.nest:
                smap = hp.reorder(smap, r2n=True)
        else:
            # Convenience dummy variable
            smap = np.zeros([3, 12])

        if not pol_only:
            if self.shmem:
                self._map.set(smap[0].astype(DTYPE), (0, ), fromrank=0)
            else:
                self._map[:] = smap[0]

        if self.pol:
            if self.shmem:
                self._map_Q.set(smap[1].astype(DTYPE), (0, ), fromrank=0)
                self._map_U.set(smap[2].astype(DTYPE), (0, ), fromrank=0)
            else:
                self._map_Q[:] = smap[1]
                self._map_U[:] = smap[2]

        self.pol_fwhm = fwhm
        return
Example #22
0
def process_healpix(filename, out_nside):
    print 'Process {0} on {1}'.format(MP.current_process().name, filename)
    pixval = HP.read_map(filename, verbose=False)
    nside = HP.npix2nside(pixval.size)
    out_pixres = HP.nside2resol(out_nside)
    print 'Before: ', pixval.min(), pixval.max(), NP.mean(pixval), NP.std(pixval)
    if nside > out_nside:
        if nside/out_nside > 4:
            pixval = HP.ud_grade(pixval, 4*out_nside)
            nside = 4*out_nside
        pix_smoothed = HP.smoothing(pixval, fwhm=out_pixres, regression=False, verbose=False)
    pix_resampled = HP.ud_grade(pix_smoothed, out_nside)
    print 'After: ', pixval.min(), pixval.max(), NP.mean(pix_resampled), NP.std(pix_resampled)
    return pix_resampled
Example #23
0
 def test_smoothing_notmasked(self):
     smoothed = hp.smoothing([m.data for m in self.map1],
                             fwhm=np.radians(10),
                             lmax=self.lmax)
     smoothed_f90 = hp.read_map(
         os.path.join(
             self.path,
             "data",
             "wmap_band_iqumap_r9_7yr_W_v4_udgraded32_smoothed10deg_fortran.fits",
         ),
         (0, 1, 2),
         np.float64,
     )
     np.testing.assert_array_almost_equal(smoothed, smoothed_f90, decimal=6)
Example #24
0
def smooth(params):
    inf, opf, fwhm, nside = params
    print('input = {:s}; output = {:s}; fwhm = {:.5f} deg'
          .format(inf, opf, fwhm))
    m = hp.read_map(inf, verbose=False)
    m -= m.mean()
    m_smooth = hp.smoothing(m, fwhm=fwhm*np.pi/180)
    if nside_out is not None:
        m_out = hp.ud_grade(m_smooth, nside, order_in='RING',
                            order_out='RING', dtype=np.float64)
    else:
        m_out = m_smooth
    hp.write_map(opf, m_out, fits_IDL=False, coord='C',
                 dtype=np.float64)
Example #25
0
def apodize_mask(mask, sigma_arcmin=12., lmax=None, method='hybrid', cache_dir='caches/',
                 mult_factor=3, min_factor=0.1):
    """Apodize a mask so it can safely be used for Pseudo-CL inversion.

    Args:
        mask: input healpix map array
        sigma_arcmin: characteritic width of smoothing
        lmax: lmax when apodizing mask
        method: gaussian or hybrid (hybrid mainly smooths outside existing mask, so reduces fsky)
        cache_dir: if not None, cache result here if possible
        mult_factor: for hybrid method, multiply (1-mask) by this factor and truncate (enlarges mask, before resmoothing)
        min_factor: for hybrid method, set to unity tails larger than 1-min_factor after scaling by mult_factor
    Returns:
        the apodized map array
    """

    if not sigma_arcmin: return mask
    sigma_rad = sigma_arcmin / 180. / 60. * np.pi
    if cache_dir: name = os.path.join(cache_dir, 'ap_mask_' + '_'.join(
        '%s' % s for s in
        [sigma_arcmin, method, lmax, mult_factor, min_factor, hashlib.sha1(mask).hexdigest()])) + '.fits'
    if cache_dir and os.path.exists(name):
        ap_mask = hp.read_map(name)
    else:
        print('apodizing... (fsky_unapodized=%s)' % (np.sum(mask ** 2) / mask.size))
        ap_mask = hp.smoothing(mask, sigma=sigma_rad, lmax=lmax)
        print('Min/max mask smoothed mask', np.min(ap_mask), np.max(ap_mask))
        print('fsky=', np.sum(ap_mask ** 2) / ap_mask.size)
        if method == 'gaussian': return ap_mask
        if method != 'hybrid': raise ValueError('Unknown apodization method')
        ap_mask = 1 - np.minimum(1., np.maximum(0., mult_factor * (1 - ap_mask) - min_factor))
        ap_mask = hp.smoothing(ap_mask, sigma=sigma_rad / 2, lmax=lmax)
        print('Min/max mask re-smoothed mask', np.min(ap_mask), np.max(ap_mask))
        print('fsky=', np.sum(ap_mask ** 2) / ap_mask.size)
        if cache_dir:
            hp.write_map(name, ap_mask)
    return ap_mask
Example #26
0
def load_map(fn_in, fn_out, nside, fwhm, lmax):
    if os.path.isfile(fn_out):
        print('Reading', fn_out, flush=True)
        m = hp.read_map(fn_out, None, verbose=False)
    else:
        if not os.path.isfile(fn_in):
            print('File not found:', fn_in)
            return None
        print('Reading', fn_in, flush=True)
        m = hp.read_map(fn_in, None, nest=True, verbose=False)
        m = hp.ud_grade(m, nside, order_in='nest', order_out='ring')
        m = hp.smoothing(m, fwhm=fwhm, lmax=lmax, iter=0, verbose=False)
        print('Writing', fn_out)
        write_map(fn_out, m)
    return m
Example #27
0
 def test_smoothing_masked(self):
     smoothed = hp.smoothing(self.map1, fwhm=np.radians(10), lmax=self.lmax)
     smoothed_f90 = hp.ma(
         hp.read_map(
             os.path.join(
                 self.path, 'data',
                 'wmap_band_iqumap_r9_7yr_W_v4_udgraded32_masked_smoothed10deg_fortran.fits'
             ), (0, 1, 2)))
     # fortran does not restore the mask
     for mm in smoothed_f90:
         mm.mask = smoothed[0].mask
     for i in range(3):
         np.testing.assert_array_almost_equal(smoothed[i].filled(),
                                              smoothed_f90[i].filled(),
                                              decimal=6)
Example #28
0
def changeResolution(map, fwhmCurrent, fwhmNew):
    """
    Given a HEALPix map with full width at half max resolution 'fwhmCurrent'
    degrees, smooth it to a new FWHM of 'fwhmNew' degrees.
    """

    if fwhmNew <= fwhmCurrent:
        return map
    else:
        smth = np.sqrt(fwhmNew ** 2 - fwhmCurrent ** 2)
        # smth /= 2*np.sqrt(2*np.log(2))

        if getattr(map, 'mask', None) is not None:
            data = map.data * 1.0
            data[np.where(map.mask == 1)] = 0
            map2 = hp.smoothing(data, fwhm=smth, degree=True)
            mask2 = hp.smoothing(map.mask.astype(np.float64), fwhm=smth, degree=True)

            map2 = hp.ma(map2)
            map2.mask = np.where(mask2 >= 0.005, 1, 0).astype(np.bool)
        else:
            map2 = hp.smoothing(map, fwhm=smth, degree=True)

        return map2
Example #29
0
    def smooth(self, fwhm, lmax=None, pol_only=False):
        """ Smooth the map with a Gaussian kernel.
        """
        autotimer = timing.auto_timer(type(self).__name__)
        if self.rank == 0:
            if pol_only:
                print('Smoothing the polarization to {} arcmin'.format(fwhm),
                      flush=True)
            else:
                print('Smoothing the map to {} arcmin'.format(fwhm), flush=True)

        if lmax is None:
            lmax = min(np.int(fwhm / 60 * 512), 2 * self.nside)

        # If the map is in node-shared memory, only the root process on each
        # node does the smoothing.
        if not self.shmem or self._map.nodecomm.rank == 0:
            if self.pol:
                m = np.vstack([self._map[:], self._map_Q[:], self._map_U[:]])
            else:
                m = self._map[:]
            if self.nest:
                m = hp.reorder(m, n2r=True)
            smap = hp.smoothing(m, fwhm=fwhm * arcmin, lmax=lmax, verbose=False)
            del m
            if self.nest:
                smap = hp.reorder(smap, r2n=True)
        else:
            # Convenience dummy variable
            smap = np.zeros([3, 12])

        if not pol_only:
            if self.shmem:
                self._map.set(smap[0].astype(DTYPE), (0,), fromrank=0)
            else:
                self._map[:] = smap[0]

        if self.pol:
            if self.shmem:
                self._map_Q.set(smap[1].astype(DTYPE), (0,), fromrank=0)
                self._map_U.set(smap[2].astype(DTYPE), (0,), fromrank=0)
            else:
                self._map_Q[:] = smap[1]
                self._map_U[:] = smap[2]

        self.pol_fwhm = fwhm
        del autotimer
        return
Example #30
0
 def load_map(fn_in, fn_out, nside, fwhm, lmax):
     if os.path.isfile(fn_out):
         print('Loading', fn_out)
         m = hp.read_map(fn_out, None, verbose=False)
     else:
         print('Reading', fn_in)
         m = hp.read_map(fn_in, None, nest=True, verbose=False)
         m = hp.ud_grade(m, nside, order_in='nest', order_out='ring')
         m = hp.smoothing(m,
                          fwhm=fwhm,
                          lmax=lmax,
                          iter=0,
                          verbose=False)
         print('Writing', fn_out)
         hp.write_map(fn_out, m, coord='g')
     return m
def apply_window(parameters,ar,window_badval,window_unseen,smoothing_radius):

        
    window = sp.ones_like(ar)
    if window_badval:
        window[ar == parameters.badval] = 0
    if window_unseen:
        window[ar == parameters.unseen] = 0            
            
        
    window = hp.smoothing(window,fwhm=smoothing_radius,verbose=False)
    
    ar_windowed = window*ar
    
    
    return ar_windowed, window
Example #32
0
    def update_mask(self,rand_sigma_arcmin=2.,rand_threshold=1e-3):
        if rand_sigma_arcmin>1.e-3:
            if self.verbose: print( "Smoothing...")
            if self.curved:
                smap = hp.smoothing(self.rand_map,sigma=rand_sigma_arcmin*np.pi/180./60.)
            else:
                smap = enmap.smooth_gauss(self.rand_map,rand_sigma_arcmin*np.pi/180./60.)
            if self.verbose: print( "Done smoothing...")
        else:
            if self.verbose: smap = self.rand_map

        self.mask = np.zeros(self.shape)
        self.mask[smap>rand_threshold] = 1
        if not self.curved:
            self.mask = enmap.enmap(self.mask,self.wcs)
        self._counts()
Example #33
0
 def test_smoothing_masked(self):
     smoothed = hp.smoothing(self.map1, fwhm=np.radians(10), lmax=self.lmax)
     smoothed_f90 = hp.ma(
         hp.read_map(
             os.path.join(
                 self.path,
                 "data",
                 "wmap_band_iqumap_r9_7yr_W_v4_udgraded32_masked_smoothed10deg_fortran.fits",
             ),
             (0, 1, 2),
         ))
     # fortran does not restore the mask
     smoothed_f90.mask = smoothed.mask
     np.testing.assert_array_almost_equal(smoothed.filled(),
                                          smoothed_f90.filled(),
                                          decimal=6)
Example #34
0
    def run(self):
        print 'Starting Thread-%s' % self.threadID
        self.smoothed_map = hp.smoothing(self.input_map,
                                         fwhm=np.radians(self.smooth_fwhm))
        self.smoothed_degraded_map = hp.ud_grade(self.smoothed_map,
                                                 nside_out=self.nside_out,
                                                 order_in='RING',
                                                 pess=True)
        self.write_smoothed_map('Smoothed_' +
                                setup.frequencies[self.threadID] + 'GHz' +
                                '.fits')
        self.write_smoothed_and_degraded_map('Smoothed_' +
                                             setup.frequencies[self.threadID] +
                                             'GHz_' + str(self.nside_out) +
                                             'N' + '.fits')

        print 'Thread-%s Finished' % self.threadID
Example #35
0
def integrate_surface_flux(flux_map,
                           r,
                           smooth=False,
                           ret_map=False,
                           **smooth_kwargs):
    '''
    Integrates a healpix surface flux to compute the total
    net flux out of the sphere.
    r is the radius of the sphere in meters
    '''
    import healpy as hp
    from scipy.integrate import trapz
    from seren3.array import SimArray

    raise Exception("Function deprecated")

    if not ((isinstance(flux_map, SimArray) or isinstance(r, SimArray))):
        raise Exception("Must pass SimArrays")

    # Compute theta/phi
    npix = len(flux_map)
    nside = hp.npix2nside(npix)
    # theta, phi = hp.pix2ang(nside, range(npix))
    theta, phi = hp.pix2ang(nside, range(npix))
    r = r.in_units("m")  # make sure r is in meters

    # Smoothing?
    if smooth:
        flux_map = hp.smoothing(flux_map, **smooth_kwargs)

    # Compute the integral
    integrand = np.zeros(len(theta))

    for i in range(len(theta)):
        th, ph = (theta[i], phi[i])
        unit_r = unit_vec_r(th, ph)
        integrand[i] = r**2 * np.sin(th)\
         * np.dot(flux_map[i], unit_r)\
         * heaviside(np.dot(flux_map[i], unit_r))

    integrand = integrand[:, None] + np.zeros(
        len(phi))  # 2D over theta and phi

    I = trapz(trapz(integrand, phi), theta)

    return SimArray(I, "s**-1")
Example #36
0
 def test_smoothing_masked(self):
     smoothed = hp.smoothing(self.map1, fwhm=np.radians(10), lmax=self.lmax)
     smoothed_f90 = hp.ma(
         hp.read_map(
             os.path.join(
                 self.path,
                 "data",
                 "wmap_band_iqumap_r9_7yr_W_v4_udgraded32_masked_smoothed10deg_fortran.fits",
             ),
             (0, 1, 2),
         )
     )
     # fortran does not restore the mask
     smoothed_f90.mask = smoothed.mask
     np.testing.assert_array_almost_equal(
         smoothed.filled(), smoothed_f90.filled(), decimal=6
     )
Example #37
0
def test_k2g2k_rand():
    """Test that transformation of k->g->k recovers the input convergence map."""

    nside = 16
    npix = hp.nside2npix(nside)
    lmax = 32

    k = np.random.standard_normal(npix)
    k = hp.smoothing(k, lmax=lmax, verbose=False)
    k = hp.remove_monopole(k)
    k = hp.remove_dipole(k)

    g1, g2 = transformations.conv2shear(k, lmax)

    k_recov = transformations.shear2conv(g1, g2, lmax)

    np.testing.assert_almost_equal(k, k_recov, decimal=3)
Example #38
0
    def smoother(self, map_array):
        """Function to smooth an array of N (T, Q, U) maps with N beams in
        units of arcmin.

        :param map_array:
        :type map_array:
        
        """
        if not self.Use_Smoothing:
            return map_array
        elif self.Use_Smoothing:
            return np.array([
                hp.smoothing(m, fwhm=np.pi / 180. * b / 60., verbose=False)
                for (m, b) in zip(map_array, self.Beams)
            ])
        else:
            print("Please set 'Use_Smoothing' in Instrument object.")
            sys.exit(1)
Example #39
0
def smooth(hpxmap, badval=hp.UNSEEN, sigma=None):
    """ Smooth a healpix map

    Parameters
    ----------
    hpxmap : full healpix map
    badval : bad value for masking
    sigma  : smoothing kernel (deg)

    Returns
    -------
    smooth : smoothed map
    """
    check_hpxmap(hpxmap, None, None)
    hpxmap = masked_array(hpxmap, badval)
    hpxmap.fill_value = np.ma.median(hpxmap)
    smooth = hp.smoothing(hpxmap, sigma=np.radians(sigma), verbose=False)
    return np.ma.array(smooth, mask=hpxmap.mask)
Example #40
0
def get_sdss_mask(sample='LOWZ', nside=512, fwhm1=5.0, fwhm2=5.0, quick=True):
    # FWHM1 and FWHM2 are in degrees.
    savename = datadir+'sdss_mask_'+sample+'_%i_%i.pkl'%(fwhm1,fwhm2)
    if quick: return pickle.load(open(savename,'r'))
    nmap = np.zeros(hp.nside2npix(nside))
    for ind in ['1','2']:
        for reg in ['North','South']:
            filename = datadir+'random'+ind+'_DR10v8_'+sample+'_'+reg+'.fits'
            data = fits.open(filename)[1].data
            nmap += ra_dec_to_hpix(data.RA, data.DEC, nside=nside)
    mask = nmap>0
    mask = hp.smoothing(mask, fwhm=fwhm1*np.pi/180., verbose=False)
    mask = mask>0.05
    mask = hp.sphtfunc.smoothing(mask, fwhm=fwhm2*np.pi/180., verbose=False)
    mask -= mask.min()
    mask /= mask.max()
    pickle.dump(mask, open(savename,'w'))
    return mask
Example #41
0
def deconv(maps, beam_in, beam_out, lmax):
    ''' 
    Beam in unit of arc-miniute.
    This function changes the value of the input map itself.
    
    '''

    _maps = np.copy(maps)
    for j in range(1, 3):  ### only for Q\U;
        _maps[j] = hp.sphtfunc.decovlving(_maps[j],
                                          fwhm=beam_in / 60 / 180 * np.pi,
                                          lmax=lmax,
                                          verbose=False)
        _maps[j] = hp.smoothing(_maps[j],
                                fwhm=beam_out / 60 / 180 * np.pi,
                                lmax=lmax,
                                verbose=False)
    return _maps
Example #42
0
def ilc_map_from_weights(maps,
                         weights,
                         regions_map=None,
                         sigma=1.5 * np.pi / 180,
                         return_weights_map=False):
    """
    Construct an ILC map from a set of raw temperature maps, a set of weights
    per region, and a map of regions.

    `maps` must have shape (Nfreq, Npix). `weights` must have shape
    (Nregions, Nfreq). `regions_map` must have shape (Npix,) and each pixel
    should contain the integer identifier to which region the pixel is
    assigned.

    If `regions_map` is None, simply performs the linear combination of the
    maps according to the weights. #TODO NEEDS TO BE IMPLEMENTED

    `sigma` is the smoothing factor to reduce edge effects. It is applied to
    each region's weight map before multiplying into the raw maps. See
    Bennett 2003 or Hinshaw 2007 for details. `sigma` is the kernel radius
    (standard deviation) in radians.

    If `return_weights_map` is True, then also returns the summed weight map
    as a diagnostic tool.

    All of the maps must be in RING format!
    """
    Thats = np.dot(weights, maps)
    That = np.zeros(Thats.shape[1])
    weights_map = np.zeros(Thats.shape[1])
    for i in range(len(Thats)):
        m = np.zeros_like(That)
        m[regions_map == i] = 1
        if sigma is not None:
            # hp.smoothing does not preserve the mean, so add it back in
            mbar = m.mean()
            m = hp.smoothing(m, sigma=sigma, verbose=False) + mbar
        That += Thats[i] * m
        weights_map += m
    That = That / weights_map
    if return_weights_map:
        return That, weights_map
    else:
        return That
Example #43
0
def hpmollview(map1,
               unit,
               figax,
               smooth=False,
               cmap=plt.cm.bwr,
               galaxy=False,
               **kw):
    '''
    Example:
    
    kw  = dict(min=-0.5, max=.5, cmap=dataviz.mycolor(), rot=-85, title='')
    fig, ax = plt.subplots(nrows=2, figsize=(7, 7))
    plt.subplots_adjust(hspace=0.05)
    dataviz.hpmollview(d0, r'$\delta_{\rm ELG}$', [fig,ax[0]], **kw)
    dataviz.hpmollview(d1, r'$\delta_{\rm LRG}$', [fig,ax[1]], **kw)
    plt.savefig('delta_dr8.png', bbox_inches='tight', dpi=300)
    
    '''
    fig, ax = figax
    cmap.set_over(cmap(1.0))
    cmap.set_under('w')
    cmap.set_bad('white')

    fig.sca(ax)

    if smooth:
        map1 = hp.smoothing(map1, fwhm=np.deg2rad(0.5))

    hp.mollview(map1, hold=True, unit=unit, cmap=cmap, **kw)
    hp.graticule(dpar=45,
                 dmer=45,
                 coord='C',
                 verbose=False,
                 alpha=0.5,
                 color='grey')

    # galactic plane
    if galaxy:
        r = hp.Rotator(coord=['G', 'C'])
        theta_gal, phi_gal = np.zeros(1000) + np.pi / 2, np.linspace(
            0, 360, 1000)
        theta_cl, phi_cl = r(theta_gal, phi_gal)
        hp.projplot(theta_cl, phi_cl, 'r.', alpha=1.0, markersize=1.)
Example #44
0
    def MapToEqNorm(self, eqmap, smoothing):
        print 'wtf', eqmap, np.sum(eqmap)
        if NSIDE != 32:
            if DEBUG:
                print PF("warn"), "rebinning EqNormMap to ", NSIDE
            eqmap = hp.pixelfunc.ud_grade(eqmap, NSIDE)

        if not np.sum(eqmap):
            return np.zeros(len(eqmap))
        if smoothing:
            eqmap = hp.smoothing(eqmap,
                                 sigma=np.deg2rad(smoothing),
                                 verbose=False,
                                 lmax=64)

        posmap = eqmap - 1. * np.min(eqmap)
        eqmap[eqmap < 0.] = 0.
        if not np.sum(eqmap):
            eqmap = np.ones(len(eqmap))
        return eqmap / float(np.sum(eqmap)), posmap / float(np.sum(posmap))
Example #45
0
    def calc_residual_cmb_amp_map(self, nmc, fwhm=None):
        """ Method to calculate the stacked residual map of a set of Nmc
        cleaned CMB maps, removing the true CMB.

        Parameters
        ----------
        nmc: int
            Number of MC realizations to use.

        Returns
        -------
        ndarray
            Array containing map of residuals.
        """
        cmb = self.calc_stacked_amp_map('cmb', nmc)
        if fwhm is not None:
            cmb_true = hp.smoothing(self.cmb_true,
                                    pol=True,
                                    fwhm=np.pi / 180. * fwhm / 60.)
        return cmb - cmb_true[1:]
Example #46
0
def skyhist(posteriorsamples='posterior_samples.dat', skyres=2.):
    import healpy
    if type(posteriorsamples) is str:
        posteriorsamples = loadposteriorsamples(posteriorsamples)
    baseres = (skyres /
               4.) * np.pi / 180.  # require this resolution from healpix grid
    nside = 2**int(min(10, np.ceil(
        np.log2(healpy.nside2resol(1) /
                baseres))))  # don't go past 2**10 (12.6 million pixels)
    npix = healpy.nside2npix(nside)
    p = healpy.ang2pix(
        nside, posteriorsamples[:, 2],
        posteriorsamples[:, 1])  # convert (theta, phi) to healpix numbers
    # n = np.bincount(p, minlength=npix) # bin the samples
    n = np.zeros(npix)
    n[:max(p) + 1] = np.bincount(
        p
    )  # support old version of numpy that does not have minlength argument
    m = healpy.smoothing(n, sigma=skyres * np.pi / 180.)  # smoothed map
    return m
Example #47
0
def add_beam(name1,name2,nameout):
    f1=h5py.File(name1)
    f2=h5py.File(name2)
    map1=f1['map'].value
    map2=f2['map'].value
    f1.close()
    f2.close()
    fr=map1.shape[0]
    def fwhm(freq):
        D=40
        return 1.22*1440.*0.21/freq/D
    freq=np.linspace(700,800,fr,endpoint=True)
    Fwhm=map(fwhm,freq)
    map0=map1[:,0,:]+map2[:,0,:]
    del map1
    del map2
    for i in range(fr):
        map0[i]=hp.smoothing(map0[i],fwhm=Fwhm[i])
    f=h5py.File(nameout,mode='w')
    f.create_dataset(name='map',data=map0)
    f.close()
Example #48
0
def surv_diff(surveydict,ss1=1,ss2=2,nside=128,freq=70,mask_ps=True,fwhm=10.0):
    """
    function to make differences among 5 surveys from andreas interactive destriper/binner. uses common masks smooth to 10 degrees
    fwhm is in degrees, if zero or negative don't smooth at all, default 10 degrees
    """
    m1=hp.ma(np.array(surveydict[ss1]))
    m2=hp.ma(np.array(surveydict[ss2])) 
    totalmask=m1.mask|m2.mask
    if mask_ps==True:
        psmask = np.logical_not(np.floor(hp.ud_grade(hp.read_map(glob('/project/projectdirs/planck/data/mission/DPC_maps/dx8/lfi/DX8_MASKs/' + 'mask_ps_%GHz_*.fits' % freq)[0]), nside,order_out='NEST')))
        totalmask=m1.mask | m2.mask | psmask
    dif=(m1-m2)/2.
    dif.mask=totalmask
    dif.mask |= np.isnan(dif)
    dif=hp.ud_grade(dif,nside,order_in='NEST',order_out='RING')
    if fwhm>0:
        difsm=hp.ma(hp.smoothing(dif.filled(),fwhm*np.pi/180.))
        difsm.mask=dif.mask
    if fwhm<=0:
        difsm=dif
    return difsm
Example #49
0
def fname2real_alms(fname, l_max):
    the_map = np.genfromtxt(fname)
    the_map = hp.pixelfunc.ud_grade(the_map, 128, pess=True, power=1.)
    the_map /= the_map.sum()
    the_map = hp.smoothing(the_map, sigma=np.deg2rad(2), verbose=False)
    the_map -= 1. * np.min(the_map)
    the_map /= the_map.sum()

    #	the_map[hp.pixelfunc.ang2pix(128, np.pi/2 - 0.222, 3.26)] += 0.1 # virgo cluster in equatorial coordinates
    #	the_map[hp.pixelfunc.ang2pix(128, 1.23, 5.4)] += 0.1 # cen a in galactic coordinates

    cplx_alms = hp.sphtfunc.map2alm(the_map, l_max)

    cplx_alms /= cplx_alms[Alm.getidx(l_max, 0, 0)]
    cplx_alms /= np.sqrt(4 * np.pi)

    #	hp.visufunc.mollview(hp.sphtfunc.alm2map(cplx_alms, 128))
    #	plt.show()

    real_alms = cplx2real_alms(cplx_alms)
    return real_alms
def do_harmonic_analysis(parameters,vrmap,bincenter):


    radius_largest_hole = find_largest_hole(parameters,vrmap)
    smoothing_fwhm_Mpch = parameters.smoothing_radius_fwhm # Mpc/h
    # bincenter is currently calculated as the mean of the distances to the halos in the bin
    smoothing_fwhm = smoothing_fwhm_Mpch/bincenter 
    

    
    
    lmax = int(sp.floor(sp.pi/smoothing_fwhm))
    print "The mean distance to the halos in the bin is", bincenter,"Mpc/h"
    print "The radius of the largest hole is", radius_largest_hole*bincenter, "Mpc/h"     
    print "The smoothing fwhm is", smoothing_fwhm_Mpch    
    print "The smoothing fwhm is", smoothing_fwhm, "rad."
    print "This corresponds to l = pi/%s = %s" \
        %(smoothing_fwhm,sp.pi/smoothing_fwhm)
    print "The pixel size is", pixelsize_in_radians(parameters), "rad."
    
            
    empty_pixels_total = len(vrmap[(vrmap == parameters.unseen)\
                                 | (vrmap == parameters.badval)])
    empty_pixels_fraction = empty_pixels_total/hp.nside2npix(parameters.nside)
    print "empty_pixels_fraction = ", empty_pixels_fraction
    
    
    ar_dummy, window = apply_window(parameters,vrmap,1,1,smoothing_fwhm)
    ar_masked, mask = apply_mask(parameters,vrmap,1,1)
    ar_final = hp.smoothing(ar_masked*window,fwhm=smoothing_fwhm,verbose=False)
    
    pixels_total = hp.nside2npix(parameters.nside)
    factor = pixels_total/(pixels_total-empty_pixels_total)
    print "Note: I am correcting the pseudo and the master coefficients by factor."
    print "factor = ", factor    
    
    ls, pseudo_cls = get_pseudo_powerspectrum(ar_final,lmax)    
    ls, master_cls = get_MASTER_corrected_powerspectrum(pseudo_cls,window,lmax)
        
    return ls,master_cls
Example #51
0
def smooth_and_norm_healpy_map(logl_map, smooth_sigma=None):
    """
    Takes a lnLLH map, converts it to normal space, applies gaussian smoothing
    and normalizes it, so that the integral over the unit sphere is 1.

    Parameters
    ----------
    logl_map : array-like
        healpy map array with logLLH values.
    smooth_sigma : float or None, optional
        Width in sigma of gaussian smoothing kernel, must be ``>0.``.
        (default: None)

    Returns
    -------
    pdf_map : array-like
        Smoothed and normalized spatial PDF map.
    """
    if smooth_sigma < 0.:
        raise ValueError("`smooth_sigma` can be in range [0, *].")

    # Normalize to sane values in [*, 0] for conversion llh = exp(logllh)
    pdf_map = np.exp(logl_map - np.amax(logl_map))

    # Smooth with a gaussian kernel
    pdf_map = hp.smoothing(map_in=pdf_map, sigma=smooth_sigma, verbose=False)
    # Healpy smoothing may produce numerical erros, so fix them after smoothing
    pdf_map[pdf_map < 0.] = 0.

    # Normalize to PDF, integral is the sum over discrete pixels here
    NSIDE = hp.get_nside(logl_map)
    dA = hp.nside2pixarea(NSIDE)
    norm = dA * np.sum(pdf_map)
    if norm > 0.:
        pdf_map = pdf_map / norm
        assert np.isclose(np.sum(pdf_map) * dA, 1.)
    else:
        print("  !! Map norm is < 0. Returning unnormed map instead !!")

    return pdf_map
Example #52
0
def many_spectra():
    import healpy as hp
    chunks = ['f','g','h']
    thresh = {'f':1.0, 'g':1.0, 'h':1.8}
    fwhm_deg = {'f':7.0, 'g':7.0, 'h':5.0}
    final_fwhm_deg = {'f':7.0, 'g':7.0, 'h':7.0}
    cl={}
    pl.figure(1)
    pl.clf()
    xlim = [10,700]
    ylim = [1e-7, 3e-4]
    for k in chunks:
        print k
        nmap = get_hpix(quick=True,name=k)
        mask = mask_from_map(nmap, fwhm_deg=fwhm_deg[k], final_fwhm_deg=final_fwhm_deg[k], thresh=thresh[k])
#        mean_nmap = np.mean(nmap[np.where(mask!=0)[0]])
        mean_nmap = np.mean(nmap[np.where(mask>0.5)[0]])
        delta = (nmap-mean_nmap)/mean_nmap
        hp.mollview(hp.smoothing(delta*mask,fwhm=1.*np.pi/180.),title=k,min=-0.3,max=0.3)
        continue
        this_cl = hp.anafast(delta*mask)/np.mean(mask**2.)
        l=np.arange(len(this_cl))
        # smooth in l*cl
        lcl = this_cl*l
        sm_lcl = np.zeros_like(lcl)
        rbox = 15
        for i in l:
            imin = np.max([0,i-rbox])
            imax = np.min([np.max(l), i+rbox])
            sm_lcl[i]=np.mean(lcl[imin:imax])
#        cl[k:this_cl]
#        pl.loglog(l,this_cl,linewidth=2)
        pl.loglog(l,sm_lcl,linewidth=2)
        pdb.set_trace()
    pl.xlim(xlim)
#    pl.ylim(ylim)
    pl.legend(chunks)
    pl.xlabel('L')
    pl.ylabel('L*CL')
Example #53
0
def reconvolve(maps, fwhms, ref_fwhm, verbose=False):
    if verbose:
        print('Reconvolution to common FWHM')
    sig_conv = np.sqrt(ref_fwhm**2 - fwhms**2)
    maps_out = np.zeros_like(maps)
    for i in range(len(fwhms)):
        if sig_conv[i] == 0:
            if verbose:
                print(
                    'Map {0:} fwhmin={1:6.3f} fwhmout={2:6.3f} => We do not reconvolve'
                    .format(i, fwhms[i], ref_fwhm))
            maps_out[i, :] = maps[i, :]
        else:
            if verbose:
                print(
                    'Map {0:} fwhmin={1:6.3f} fwhmout={2:6.3f} => We reconvolve with {3:6.3f}'
                    .format(i, fwhms[i], ref_fwhm, sig_conv[i]))
            maps_out[i, :] = hp.smoothing(maps[i, :],
                                          fwhm=np.deg2rad(sig_conv[i]),
                                          pol=True,
                                          verbose=False)
    return maps_out
Example #54
0
    def updateCoverage(self, time=None):
        time = time or datetime.datetime.utcnow()
        night = self.favor2.get_night(time)
        images = self.favor2.query(
            "SELECT * FROM images WHERE type='avg' AND night=%s AND time>%s ORDER BY time",
            (night, self.latest_time),
            simplify=False)

        cmap = np.zeros_like(self.coverage)

        for i, image in enumerate(images):
            #print i,len(images)
            ra, dec = image['ra0'], image['dec0']
            sr0 = float(image['keywords']['PIXSCALE']) * 0.5 * min(
                image['width'], image['height'])

            cmap += self.snapshot_fn(ra, dec, sr0, smooth=False)
            self.latest_time = image['time']

        if images:
            self.coverage += hp.smoothing(cmap,
                                          fwhm=np.deg2rad(self.r0),
                                          verbose=False)
            self.storeCoverage()
Example #55
0
 def _simulate_sss(self, key1, key2, counter1, counter2, weather, comm):
     """
     Create a map of the ground signal to observe with all detectors
     """
     # FIXME: we could store the map in node-shared memory
     #
     # Surface temperature is made available but not used yet
     # to scale the SSS
     if comm is None or comm.rank == 0:
         # Only the root process loads or simulates the map
         temperature = weather.surface_temperature
         if self._path:
             sssmap = hp.read_map(self._path, verbose=False)
         else:
             npix = 12 * self._nside**2
             sssmap = random(
                 npix,
                 key=(key1, key2),
                 counter=(counter1, counter2),
                 sampler="gaussian",
             )
             sssmap = np.array(sssmap, dtype=np.float)
             sssmap = hp.smoothing(sssmap,
                                   fwhm=np.radians(self._fwhm),
                                   lmax=self._lmax)
             sssmap /= np.std(sssmap)
             lon, lat = hp.pix2ang(self._nside,
                                   np.arange(npix, dtype=np.int),
                                   lonlat=True)
             scale = self._scale * (np.abs(lat) / 90 + 0.5)**self._power
             sssmap *= scale
     else:
         sssmap = None
     if comm is not None:
         sssmap = comm.bcast(sssmap)
     return sssmap
Example #56
0
def get_mask(fname_hits):
    fname_mask = "mask_" + os.path.basename(fname_hits)
    if os.path.isfile(fname_mask):
        mask = hp.read_map(fname_mask)
    else:
        hits = hp.read_map(fname_hits)
        good = hits > 0
        ngood = np.sum(good)
        sorted_hits = np.sort(hits[good])

        hit_lim = sorted_hits[np.int(ngood * .01)]
        mask = hits > hit_lim

        pix = np.arange(hits.size)
        lon, lat = hp.pix2ang(nside, pix, lonlat=True)
        lat_min = np.amin(lat[mask])
        lat_max = np.amax(lat[mask])

        mask = np.zeros(npix)
        tol = 10.0  # degrees
        mask[np.logical_and(lat_min + tol < lat, lat < lat_max - tol)] = 1
        mask = hp.smoothing(mask, fwhm=np.radians(3), lmax=2048)
        hp.write_map(fname_mask, mask)
    return mask
Example #57
0
import healpy as hp
import numpy as np
from pycsphere.mesh import HEALPixPointSet
from pycsphere.linop import DiscreteSphericalLaplacian

nside = 16
rng = np.random.default_rng(0)
map_in = rng.binomial(n=1, p=0.005, size=hp.nside2npix(nside=nside))
map_in = hp.smoothing(map_in, sigma=10 * np.pi / 180)
laplacian = DiscreteSphericalLaplacian(point_set=HEALPixPointSet(nside=nside))
map_d2 = laplacian(map_in)
hp.mollview(map=map_in, title='Input Map', cmap='viridis')
hp.mollview(map=np.abs(map_d2),
            title='Magnitude of Laplacian Map',
            cmap='viridis')
Example #58
0
def main_hp(args):
    '''Main function for simple Healpy estimation.'''
    print('>> Simple fsky estimation.')
    bbs = np.loadtxt(args.fb, dtype='int32')
    lmax = bbs[-1, -1]

    print('>> Loading mask 1: {}'.format(args.mask1))
    mask1 = hp.read_map(args.mask1)
    if args.fwhm1 != -1:
        fwhm1 = args.fwhm1 * np.pi / 180  # get fwhm in radians
        print('>> Smoothing mask1, FWHM: {0:f} degrees'.format(args.fwhm1))
        mask1 = hp.smoothing(mask1, fwhm=fwhm1, pol=False)

    if args.alm1 != '':
        print('>> Loading alm 1: {}'.format(args.alm1))
        alm1 = hp.read_alm(args.alm1)
    elif args.map1 != '':
        print('>> Loading map 1: {}'.format(args.map1))
        map1 = hp.read_map(args.map1)
        if args.eccl[0] == '0':
            print(':: Multiplying mask on the map ::')
            map1 = map1 * mask1
        alm1 = hp.map2alm(map1, lmax=lmax, pol=False)
    else:
        sys.exit('No input map or alm 1.')

    if args.tp == 'cross':  # cross correlation
        print('>> Loading mask 2: {}'.format(args.mask2))
        mask2 = hp.read_map(args.mask2)
        if args.fwhm2 != -1:
            fwhm2 = args.fwhm2 * np.pi / 180  # get fwhm in radians
            print('>> Smoothing mask2, FWHM: {0:f} degrees'.format(args.fwhm2))
            mask2 = hp.smoothing(mask2, fwhm=fwhm2, pol=False)

        if args.alm2 != '':
            print('>> Loading alm 2: {}'.format(args.alm2))
            alm2 = hp.read_alm(args.alm2)
        elif args.map2 != '':
            print('>> Loading map 2: {}'.format(args.map2))
            map2 = hp.read_map(args.map2)
            if args.eccl[1] == '0':
                print(':: Multiplying mask on the map ::')
                map2 = map2 * mask2
            alm2 = hp.map2alm(map2, lmax=lmax, pol=False)
        else:
            sys.exit('No input map or alm 2.')

    elif args.tp == 'auto':
        alm2 = None

    else:
        sys.exit('>> Wrong correlation type!')

    cl = hp.alm2cl(alm1, alms2=alm2, lmax_out=lmax)
    cl = cl / args.fsky

    data = bin_cl(cl, bbs)
    header = 'ell   cl   xerr   yerr'
    fn = args.focl
    np.savetxt(fn, data, header=header)
    print(':: Written to: {}'.format(fn))