Пример #1
0
def rotate_map_inv(Imag,
                   a1,
                   a2,
                   a3,
                   X=True,
                   Y=False,
                   ZYX=False,
                   deg=False,
                   nested=False):
    npix = np.shape(Imag)[0]
    nside = npix2nside(npix)
    indices = np.arange(0, npix)
    ang_coord = pix2vec(nside, indices, nested)
    ang_coord_array = np.vstack((ang_coord[0], ang_coord[1], ang_coord[2]))
    eul = np.linalg.inv(
        euler_matrix_new(a1, a2, a3, X=X, Y=Y, ZYX=ZYX, deg=deg))
    new_coord = np.dot(eul, ang_coord_array)
    theta_arr, phi_arr = vec2ang(new_coord.T)
    neigh, weigh = get_interp_weights(nside,
                                      theta_arr,
                                      phi=phi_arr,
                                      nest=nested,
                                      lonlat=False)
    thr_val = 1e-8
    weigh[np.where(np.abs(weigh) < thr_val)] = 0
    weigh = weigh / np.sum(weigh, axis=0)
    rotIm = np.zeros_like(Imag)
    for k in range(neigh.shape[0]):
        rotIm = rotIm + weigh[k] * Imag[neigh[k]]
    return rotIm
Пример #2
0
def get_faces_par(im, nested=False, num_cores=None):
    npix = np.shape(im)[0]
    assert npix % 12 == 0

    nside = npix2nside(npix)

    if not nested:
        new_im = reorder(im, r2n=True)
    else:
        new_im = im

    # index = np.array([xyf2pix(nside, x, range(nside), 0, True)
    #                     for x in range(nside-1, -1, -1)])
    index = make_index_par(nside, num_cores)

    CubeFace = sm.empty((12, nside, nside))
    with sharedmem_pool(num_cores, numexpr=False) as pool:
        # for face in range(12):
        #     CubeFace[face] = np.resize(new_im[index + nside**2 * face],
        #                                (nside, nside))
        def work(i):
            CubeFace[i] = np.resize(new_im[index + nside**2 * i],
                                    (nside, nside))

        pool.map(work, range(12))

    return np.array(CubeFace)
Пример #3
0
def set_faces_par(CubeFace, nested=False, num_cores=None):
    npix = np.size(CubeFace)
    assert npix % 12 == 0

    nside = npix2nside(npix)

    index = make_index_par(nside, num_cores)
    # index = np.array([xyf2pix(nside, x, range(nside), 0, True)
    #                     for x in range(nside-1, -1, -1)])

    imag = sm.empty((npix))
    with sharedmem_pool(num_cores, numexpr=False) as pool:

        def work(i):
            for face in range(12):
                imag[index + nside**2 * face] = np.resize(
                    CubeFace[face], (nside, nside))

        #imag[index + nside**2 * face] = np.resize(CubeFace[face],
        #                                          (nside,nside))

        pool.map(work, range(12))

    imag = np.array(imag)

    if not nested:
        new_im = reorder(imag, n2r=True)
    else:
        new_im = imag

    return new_im
def smoothing(m, fwhm, nest=True):
    if fwhm <= 0:
        return m
    nside = hpf.npix2nside(len(m))
    if nest:
        return hps.smoothing(m[hpf.ring2nest(nside, np.arange(hpf.nside2npix(nside)))], fwhm=fwhm)[hpf.nest2ring(nside, np.arange(hpf.nside2npix(nside)))]
    else:
        return hps.smoothing(m, fwhm=fwhm)
Пример #5
0
 def projmap(self,map,nest=False,**kwds):
     nside = pixelfunc.npix2nside(pixelfunc.get_map_size(map))
     f = lambda x,y,z: pixelfunc.vec2pix(nside,x,y,z,nest=nest)
     xsize = kwds.pop('xsize',200)
     ysize = kwds.pop('ysize',None)
     reso = kwds.pop('reso',1.5)
     return super(HpxGnomonicAxes,self).projmap(map,f,xsize=xsize,
                                         ysize=ysize,reso=reso,**kwds)
def ud_grade(m, nside, nest=True):
    if nside != hpf.npix2nside(len(m)):
        if nest:
            order_in = 'NESTED'
        else:
            order_in = 'RING'

        bad_mask = (np.isnan(m) | np.isinf(m) | (m == 0))
        if bad_mask.all():
            return np.zeros(hpf.nside2npix(nside)) * np.nan
        bad_mask = hpf.ud_grade(bad_mask.astype('float'), nside, order_in=order_in, pess=True) > 0

        result = hpf.ud_grade(m, nside, order_in=order_in, pess=True)
        result[bad_mask] = np.nan
        return result
    return m
Пример #7
0
def rotate_map(Imag,
               a1,
               a2,
               a3,
               X=True,
               Y=False,
               ZYX=False,
               deg=False,
               nested=False):
    # X :   rotation a1 around original Z
    #       rotation a2 around interm   X
    #       rotation a3 around final    Z
    #            DEFAULT,  classical mechanics convention

    #  Y :  rotation a1 around original Z
    #       rotation a2 around interm   Y
    #       rotation a3 around final    Z
    #            quantum mechanics convention (override X)

    #  ZYX :rotation a1 around original Z
    #       rotation a2 around interm   Y
    #       rotation a3 around final    X
    #            aeronautics convention (override X)
    #  * these last three keywords are obviously mutually exclusive *

    npix = np.shape(Imag)[0]
    nside = npix2nside(npix)
    indices = np.arange(0, npix)
    ang_coord = pix2vec(nside, indices, nested)
    ang_coord_array = np.vstack((ang_coord[0], ang_coord[1], ang_coord[2]))
    eul = euler_matrix_new(a1, a2, a3, X=X, Y=Y, ZYX=ZYX, deg=deg)
    new_coord = np.dot(eul, ang_coord_array)
    theta_arr, phi_arr = vec2ang(new_coord.T)
    neigh, weigh = get_interp_weights(nside,
                                      theta_arr,
                                      phi=phi_arr,
                                      nest=nested,
                                      lonlat=False)
    thr_val = 1e-8
    weigh[np.where(np.abs(weigh) < thr_val)] = 0
    weigh = weigh / np.sum(weigh, axis=0)
    rotIm = np.zeros_like(Imag)
    for k in range(neigh.shape[0]):
        rotIm = rotIm + weigh[k] * Imag[neigh[k]]
    return rotIm
Пример #8
0
def prep_masks(surveys):
    import os

    root = os.environ['LBGCMB'] + '/mask/dat/'

    fpaths     =  {'Planck': 'Planck_MaskInt_UT78_256.fits', 'SPT': 'SPT_150_hits_hpx.fits', 'AdvACT': 'ACT_148_equ_hits_hpx.fits', 'SO': 'mask_40pc.fits',\
                      'QSO': 'BOSS_dr12_qso.fits', 'LSST': 'opsim_nvisits_g.fits', 'HSC': 'hsc_256.txt', 'AdvACT-SOU': 'ACT_148_south_hits_hpx.fits'}

    strategy = 'deep'

    fpaths['DESI-Y1'] = 'desi/%s/yr0_64.fits' % strategy
    fpaths['DESI-Y2'] = 'desi/%s/yr1_64.fits' % strategy
    fpaths['DESI-Y3'] = 'desi/%s/yr2_64.fits' % strategy
    fpaths['DESI-Y4'] = 'desi/%s/yr3_64.fits' % strategy
    fpaths['DESI-Y5'] = 'desi/%s/yr4_64.fits' % strategy

    masks = []

    for survey in surveys:
        fpath = root + fpaths[survey]
        masks.append(load_hp(fpath, view=False))

    ## DESI (desi_256.dat) defaults to nested. Convert to ringed.
    ## masks[2]  = hp.pixelfunc.reorder(masks[2], n2r=True)

    if 'SO' in surveys:
        ## Convert SO from galactic to ecliptic.
        masks[surveys.index('SO')] = rotate_map(masks[surveys.index('SO')],
                                                'C', 'G')

    if 'Planck' in surveys:
        ## Convert Planck from galactic to ecliptic.
        masks[surveys.index('Planck')] = rotate_map(
            masks[surveys.index('Planck')], 'C', 'G')

    ## Highest resolution available.
    nside = np.array([npix2nside(len(mask)) for mask in masks]).max()

    ##  Upgrade all masks to the nside with greatest resoltuion.
    masks = [hp.pixelfunc.ud_grade(mask, nside) for mask in masks]

    return masks, nside
Пример #9
0
def load_hp(fpath='cmb/dat/mask/mask_40pc.fits', view=False, printit=False):
    extension = fpath.split('.')[-1]

    if extension == 'fits':
        from astropy.io import fits

        ##   Survey files of the form:  https://lambda.gsfc.nasa.gov/toolbox/footprint/configfile.cfm
        T = Table.read(fpath)

        ORDER = T.meta['ORDERING']
        NROWS = len(T)

        NSIDE = T.meta['NSIDE']
        NPIX = nside2npix(NSIDE)

        count = 0.0

        mask = []

        for i in np.arange(NROWS):
            mask += [element for element in T[i][0]]

        mask = np.array(mask)

    else:
        mask = np.loadtxt(fpath)

        NPIX = len(mask)
        NSIDE = npix2nside(NPIX)

    print('Loaded healpy mask: %s as %s (NPIX:  %d, \t NSIDE:  %d)' %
          (fpath.split('/')[-1], extension, NPIX, NSIDE))

    if printit:
        print('Min:  %s;  Max:  %s' % (mask.min(), mask.max()))

    ##  Element check.
    mask[mask > 1.0] = 1.0
    mask[mask < 0.0] = 0.0

    return mask
Пример #10
0
    def __init__(self, str_gamsrc, str_evtclass, htg_cube, lon_cntr=0, lat_cntr=0, char_coord='E', deg_radius=None, lst_validpix=None):

        self.htg = htg_cube
        self.eregion=EnergyLogRegion(self.htg.GetXaxis().GetNbins(), self.htg.GetXaxis().GetBinLowEdge(1), self.htg.GetXaxis().GetBinWidth(1))
        print 'Energy region:', self.eregion.printR()
        self.cthregion=EnergyLogRegion(self.htg.GetYaxis().GetNbins(), self.htg.GetYaxis().GetBinLowEdge(1), self.htg.GetYaxis().GetBinWidth(1))
        print 'cos(theta) region:', self.cthregion.printR()
        self.NPIX=self.htg.GetZaxis().GetNbins()
        self.NSIDE=hppf.npix2nside(self.NPIX)

        if lst_validpix is None:
            self.validpix = range(self.NPIX)
        else:
            self.validpix = lst_validpix

        self.name=str_gamsrc
        self.evtclass = str_evtclass
        self.loncntr=lon_cntr
        self.latcntr=lat_cntr
        self.coord=char_coord
        self.radius=deg_radius 
Пример #11
0
def Smear(htg, nparr_dist, path_king, eregion, cthregion, deg_threshold=None):

    print htg.Integral()
    NPIX = len(nparr_dist)
    NSIDE = hppf.npix2nside(NPIX)
    sa_pix = hppf.nside2pixarea(NSIDE) # Solid angle of a pixel [sr]
    htg_smr = htg.Clone("{0}_smeared".format(htg.GetName()))
    for hx in range(htg_smr.GetXaxis().GetNbins()+2):
        for hy in range(htg_smr.GetYaxis().GetNbins()+2):
            for hz in range(htg_smr.GetZaxis().GetNbins()+2):
                htg_smr.SetBinContent(hx, hy, hz, 0)
                htg_smr.SetBinError(hx, hy, hz, 0)
    FILE_KING = ROOT.TFile(path_king, 'READ')
    TP_HTG_KING = (FILE_KING.Get('htgKingN'), FILE_KING.Get('htgKingS'), FILE_KING.Get('htgKingG'))
    fc_King_annulus = ROOT.TF1("fc_King_annulus", "TMath::Sin(x)*[0]*(1.-1./[2])*pow(1.+(x/[1])**2/2./[2],-[2])/[1]**2", 0, pi)
    fc_King = ROOT.TF1("fc_King", "[0]*(1.-1./[2])*pow(1.+(x/[1])**2/2./[2],-[2])/2./TMath::Pi()/[1]**2", 0, pi)

    for ienr in range(1, eregion.nBin+1):
        kxbin = TP_HTG_KING[0].GetXaxis().FindBin(eregion.getBinCenter(ienr-1))
        for icth in range(1, cthregion.nBin+1):
            kybin = TP_HTG_KING[0].GetYaxis().FindBin(cthregion.getBinCenter(icth-1))
            if kxbin>0 and kybin>0:
                for ipar in range(3): # Setting the parameters of King function
                    # PSF
                    par_value = TP_HTG_KING[ipar].GetBinContent(kxbin, kybin)
                    #print '    Parameter No.{0}:'.format(ipar), par_value
                    fc_King_annulus.FixParameter(ipar, par_value)
                    fc_King.FixParameter(ipar, par_value)
                factor_norm = 1.0/fc_King_annulus.Integral(0, pi)
                #print "Normalization factor:", factor_norm
                for ipix in range(NPIX):
                    cnt = htg.GetBinContent(ienr, icth, ipix+1)
                    if cnt>0:
                        #sys.stdout.write('.')
                        for jpix in range(NPIX):
                            angdist = nparr_dist[ipix][jpix]
                            htg_smr.Fill(htg_smr.GetXaxis().GetBinCenter(ienr), htg_smr.GetYaxis().GetBinCenter(icth), jpix+0.5, cnt*fc_King.Eval(angdist)*factor_norm*sa_pix)
                print ''
    print htg_smr.Integral()
    return htg_smr
Пример #12
0
def get_faces(Imag, nested=False, num_cores=None):
    npix = np.shape(Imag)[0]
    assert npix % 12 == 0

    nside = npix2nside(npix)
    CubeFace = np.zeros((12, nside, nside))

    if not nested:
        NewIm = reorder(Imag, r2n=True)
    else:
        NewIm = Imag

    # index = np.array([xyf2pix(nside, x, range(nside), 0, True)
    #                     for x in range(nside-1, -1, -1)
    #                 ])
    index = make_index_par(nside, num_cores)

    for face in range(12):
        CubeFace[face] = np.resize(NewIm[index + nside**2 * face],
                                   (nside, nside))

    return CubeFace
Пример #13
0
def set_faces(CubeFace, nested=False):
    npix = np.size(CubeFace)
    assert npix % 12 == 0

    nside = npix2nside(npix)
    Imag = np.zeros((npix))

    index = np.array([
        xyf2pix(nside, x, range(nside), 0, True)
        for x in range(nside - 1, -1, -1)
    ])

    for face in range(12):
        Imag[index + nside**2 * face] = np.resize(CubeFace[face],
                                                  (nside, nside))

    if not nested:
        NewIm = reorder(Imag, n2r=True)
    else:
        NewIm = Imag

    return NewIm
def merge_map(maps, nside=None, nest=True, verbose=False, renormalize=False):
    if nside is None:
        nside = 4096
        for m in maps:
            nside = min(nside, hpf.npix2nside(len(m)))



    filled_mask = np.zeros(hpf.nside2npix(nside), dtype=bool)
    result = np.zeros(hpf.nside2npix(nside), dtype=maps[0].dtype)

    for m in maps:
        m = ud_grade(m, nside, nest=nest)
        #valid in m
        valid_mask = ~(np.isnan(m)|np.isinf(m))

        #pixels to be taken from m, earlier m takes priority and will not be over-written
        fill_mask = valid_mask&(~filled_mask)
        if verbose:
            print "%.1f%% valid"%(100. * np.sum(valid_mask) / len(valid_mask)),
            print "%.1f%% to be filled"%(100. * np.sum(fill_mask) / len(fill_mask))

        if renormalize:
            overlap_mask = valid_mask&filled_mask
            if overlap_mask.any():
                factor = m[overlap_mask].dot(result[overlap_mask]) / m[overlap_mask].dot(m[overlap_mask])
                if verbose:
                    print "renormalizing by ", factor
                m *= factor

        #fill pixel and mask
        result[fill_mask] = m[fill_mask]
        filled_mask[fill_mask] = True
    result[~filled_mask] = np.nan

    return result
    return get_derivative(get_derivative(x))
########################################
#load data
result_filename = '/mnt/data0/omniscope/polarized foregrounds/result_25+4_nside_64_smooth_8.73E-02_edge_5.24E-02_rmvcmb_1_UV0_v3.0_principal_6_step_1.00_err_remove_pt.npz'
# result_filename = '/mnt/data0/omniscope/polarized foregrounds/result_25+4_nside_128_smooth_6.28E-02_edge_5.24E-02_rmvcmb_1_UV0_v3.0_principal_6_step_1.00_err_remove_pt.npz'
f = np.load(result_filename)
w_nfo = f['w_nf']#n_principal by frequency
w_nf = f['w_nf'][:, 1:]#n_principal by frequency
x_ni = f['x_ni']#n_principal by pixel
freqs = f['freqs'][1:]#GHz
freqso = f['freqs']#GHz
# ps_mask = f['ps_mask']
# x_ni *= (1-ps_mask)
n_f = len(freqs)
n_principal = len(w_nf)
nside = hpf.npix2nside(x_ni.shape[1])
########################################
normalizationo = f['normalization']
normalizationo[freqso < 20] = K_RJ2MJysr(normalizationo[freqso < 20], freqso[freqso < 20] * 1e9)
normalizationo[(freqso >= 20) & (freqso < 500)] = K_CMB2MJysr(normalizationo[(freqso >= 20) & (freqso < 500)], freqso[(freqso >= 20) & (freqso < 500)] * 1e9)

normalization = normalizationo[1:]

################################################
#plot orthogonal results
cmap = cm.gist_rainbow_r
cmap.set_under('w')
cmap.set_bad('gray')
def plot_components(M=np.eye(n_principal)):
    w_nf_local = M.dot(w_nf)
    x_ni_local = la.inv(M).transpose().dot(x_ni)
Пример #16
0
def smoothing(
    maps,
    fwhm=0.0,
    sigma=None,
    invert=False,
    pol=True,
    iter=3,
    lmax=None,
    mmax=None,
    use_weights=False,
    regression=True,
    datapath=None,
):
    """Smooth a map with a Gaussian symmetric beam.

    Parameters
    ----------
    maps : array or sequence of 3 arrays
      Either an array representing one map, or a sequence of
      3 arrays representing 3 maps, accepts masked arrays
    fwhm : float, optional
      The full width half max parameter of the Gaussian [in 
      radians]. Default:0.0
    sigma : float, optional
      The sigma of the Gaussian [in radians]. Override fwhm.
    invert : bool, optional
      If True, alms are divided by Gaussian beam function (un-smooth).
      Otherwise, alms are multiplied by Gaussian beam function (smooth).
      Default: False.
    pol : bool, optional
      If True, assumes input maps are TQU. Output will be TQU maps.
      (input must be 1 or 3 alms)
      If False, each map is assumed to be a spin 0 map and is 
      treated independently (input can be any number of alms).
      If there is only one input map, it has no effect. Default: True.
    iter : int, scalar, optional
      Number of iteration (default: 3)
    lmax : int, scalar, optional
      Maximum l of the power spectrum. Default: 3*nside-1
    mmax : int, scalar, optional
      Maximum m of the alm. Default: lmax
    use_weights: bool, scalar, optional
      If True, use the ring weighting. Default: False.
    regression: bool, scalar, optional
      If True, subtract map average before computing alm. Default: True.
    datapath : None or str, optional
      If given, the directory where to find the weights data.

    Returns
    -------
    maps : array or list of 3 arrays
      The smoothed map(s)
    """

    if not cb.is_seq(maps):
        raise TypeError("maps must be a sequence")

    # save the masks of inputs
    masks = pixelfunc.mask_bad(maps)

    if cb.is_seq_of_seq(maps):
        nside = pixelfunc.npix2nside(len(maps[0]))
        n_maps = len(maps)
    else:
        nside = pixelfunc.npix2nside(len(maps))
        n_maps = 0

    if pol or n_maps in (0, 1):
        # Treat the maps together (1 or 3 maps)
        alms = map2alm(
            maps,
            lmax=lmax,
            mmax=mmax,
            iter=iter,
            pol=pol,
            use_weights=use_weights,
            regression=regression,
            datapath=datapath,
        )
        smoothalm(alms, fwhm=fwhm, sigma=sigma, invert=invert, inplace=True)
        output_map = alm2map(alms, nside, pixwin=False)
    else:
        # Treat each map independently (any number)
        output_map = []
        for m, mask in zip(maps, masks):
            alm = map2alm(maps, iter=iter, pol=pol, use_weights=use_weights, regression=regression, datapath=datapath)
            smoothalm(alm, fwhm=fwhm, sigma=sigma, invert=invert, inplace=True)
            output_map.append(alm2map(alm, nside, pixwin=False))
    if pixelfunc.maptype(output_map) == 0:
        output_map[masks.flatten()] = UNSEEN
    else:
        for m, mask in zip(output_map, masks):
            m[mask] = UNSEEN

    return output_map
Пример #17
0
 def projmap(self,map,nest=False,**kwds):
     nside = pixelfunc.npix2nside(len(map))
     f = lambda x,y,z: pixelfunc.vec2pix(nside,x,y,z,nest=nest)
     return super(HpxOrthographicAxes,self).projmap(map,f,**kwds)
mit_AtNicsd = np.fromfile('/home/omniscope/data/GSM_data/absolute_calibrated_data/miteor_150.00MHzAtNicsd_N3.00e-02_noaddA_dI_u102_t300_p9725_n32_128_b256_1000000000.000_v1.0', dtype='float64')
mit_AtNiA = np.fromfile('/home/omniscope/data/GSM_data/absolute_calibrated_data/miteor_150.00MHzAtNiA_N3.00e-02_noaddA_dI_u102_t300_p9725_n32_128_b256_1000000000.000_v1.0', dtype='float64').reshape((np.sum(mit_mask), np.sum(mit_mask)))

#339, 195
mwa_pix_file = np.load('/home/omniscope/data/GSM_data/absolute_calibrated_data/pixel_scheme_9785.npz')
mwa_mask = mwa_pix_file['valid_pix_mask']
mwa_gsm = mwa_pix_file['gsm']
mwa_AtNisd = np.fromfile('/home/omniscope/data/GSM_data/absolute_calibrated_data/mwa_aug23_eor0_forjeff/mwa_150.00MHzAtNisd_N2.56e-02_noaddA_dI_u195_t300_p9785_n32_128_b256_1000000000.000_v1.0', dtype='float64')
mwa_AtNicsd = np.fromfile('/home/omniscope/data/GSM_data/absolute_calibrated_data/mwa_aug23_eor0_forjeff/mwa_150.00MHzAtNicsd_N2.56e-02_noaddA_dI_u195_t300_p9785_n32_128_b256_1000000000.000_v1.0', dtype='float64')
mwa_AtNiA = np.fromfile('/home/omniscope/data/GSM_data/absolute_calibrated_data/mwa_aug23_eor0_forjeff/mwa_150.00MHzAtNiA_N2.56e-02_noaddA_dI_u195_t300_p9785_n32_128_b256_1000000000.000_v1.0', dtype='float64').reshape((np.sum(mwa_mask), np.sum(mwa_mask)))

pix_scale = np.median(mwa_pix_file['sizes'])
precision = 'float64'

npix = Ashape1 = len(mit_mask)
nside = hpf.npix2nside(npix)
AtNisd = np.zeros(npix)
AtNicsd = np.zeros(npix)
AtNiA = np.zeros((npix, npix))
fake_solution = np.zeros(npix)

AtNisd[mit_mask] += mit_AtNisd
AtNicsd[mit_mask] += mit_AtNicsd
fake_solution[mit_mask] = mit_gsm
AtNiA[np.ix_(mit_mask, mit_mask)] += mit_AtNiA

AtNisd[mwa_mask] += mwa_AtNisd
AtNicsd[mwa_mask] += mwa_AtNicsd
fake_solution[mwa_mask] = mwa_gsm
AtNiA[np.ix_(mwa_mask, mwa_mask)] += mwa_AtNiA
Пример #19
0
def smoothing(maps, fwhm = 0.0, sigma = None, invert = False, pol = True,
              iter = 3, lmax = None, mmax = None, use_weights = False,
              regression = True, datapath = None):
    """Smooth a map with a Gaussian symmetric beam.

    Parameters
    ----------
    maps : array or sequence of 3 arrays
      Either an array representing one map, or a sequence of
      3 arrays representing 3 maps
    fwhm : float, optional
      The full width half max parameter of the Gaussian. Default:0.0
    sigma : float, optional
      The sigma of the Gaussian. Override fwhm.
    invert : bool, optional
      If True, alms are divided by Gaussian beam function (un-smooth).
      Otherwise, alms are multiplied by Gaussian beam function (smooth).
      Default: False.
    pol : bool, optional
      If True, assumes input maps are TQU. Output will be TQU maps.
      (input must be 1 or 3 alms)
      If False, each map is assumed to be a spin 0 map and is 
      treated independently (input can be any number of alms).
      If there is only one input map, it has no effect. Default: True.
    iter : int, scalar, optional
      Number of iteration (default: 3)
    lmax : int, scalar, optional
      Maximum l of the power spectrum. Default: 3*nside-1
    mmax : int, scalar, optional
      Maximum m of the alm. Default: lmax
    use_weights: bool, scalar, optional
      If True, use the ring weighting. Default: False.
    regression: bool, scalar, optional
      If True, subtract map average before computing alm. Default: True.
    datapath : None or str, optional
      If given, the directory where to find the weights data.

    Returns
    -------
    maps : array or list of 3 arrays
      The smoothed map(s)
    """
    if not cb.is_seq(maps):
        raise TypeError("maps must be a sequence")

    if cb.is_seq_of_seq(maps):
        nside = pixelfunc.npix2nside(len(maps[0]))
        n_maps = len(maps)
    else:
        nside = pixelfunc.npix2nside(len(maps))
        n_maps = 0

    if pol or n_maps in (0, 1):
        # Treat the maps together (1 or 3 maps)
        alms = map2alm(maps, lmax = lmax, mmax = mmax, iter = iter,
                       pol = pol, use_weights = use_weights,
                       regression = regression, datapath = datapath)
        smoothalm(alms, fwhm = fwhm, sigma = sigma, invert = invert,
                  inplace = True)
        return alm2map(alms, nside, pixwin = False)
    else:
        # Treat each map independently (any number)
        retmaps = []
        for m in maps:
            alm = map2alm(maps, iter = iter, pol = pol,
                          use_weights = use_weights,
                       regression = regression, datapath = datapath)
            smoothalm(alm, fwhm = fwhm, sigma = sigma, invert = invert,
                      inplace = True)
            retmaps.append(alm2map(alm, nside, pixwin = False))
        return retmaps
Пример #20
0
 def projmap(self,map,nest=False,**kwds):
     nside = pixelfunc.npix2nside(pixelfunc.get_map_size(map))
     f = lambda x,y,z: pixelfunc.vec2pix(nside,x,y,z,nest=nest)
     return super(HpxCartesianAxes,self).projmap(map,f,**kwds)
Пример #21
0
 def projcontourf(self,map,*args,**kwds):
     nside = pixelfunc.npix2nside(pixelfunc.get_map_size(map))
     nest = kwds.pop('nest', False)
     f = lambda x,y,z: pixelfunc.vec2pix(nside,x,y,z,nest=nest)
     return super(HpxOrthographicAxes,self).projcontourf(map,f,*args,**kwds)