示例#1
0
    def draw_realization(self, synalm_lmax=None, seeds=None):

        if seeds is None:
            seeds = (None, None)

        if synalm_lmax is None:
            synalm_lmax = min(16384, 3 * self.nside - 1)

        np.random.seed(seeds[0])

        alm_small_scale = hp.synalm(
            list(self.small_scale_cl.value) +
            [np.zeros_like(self.small_scale_cl[0])] * 3,
            lmax=synalm_lmax,
            new=True,
        )

        alm_small_scale = [
            hp.almxfl(each, np.ones(min(synalm_lmax, 3 * self.nside - 1)))
            for each in alm_small_scale
        ]
        map_small_scale = hp.alm2map(alm_small_scale, nside=self.nside)

        # need later for beta
        modulate_map_I = hp.alm2map(self.modulate_alm[0].value, self.nside)

        map_small_scale[0] *= modulate_map_I
        map_small_scale[1:] *= hp.alm2map(self.modulate_alm[1].value,
                                          self.nside)

        map_small_scale += hp.alm2map(
            self.template_largescale_alm.value,
            nside=self.nside,
        )

        output_IQU = (utils.log_pol_tens_to_map(map_small_scale) *
                      self.template_largescale_alm.unit)

        np.random.seed(seeds[1])
        output_unit = np.sqrt(1 * self.small_scale_cl_pl_index.unit).unit
        alm_small_scale = hp.synalm(
            self.small_scale_cl_pl_index.value,
            lmax=synalm_lmax,
            new=True,
        )

        alm_small_scale = hp.almxfl(
            alm_small_scale, np.ones(min(3 * self.nside - 1, synalm_lmax + 1)))
        pl_index = hp.alm2map(alm_small_scale, nside=self.nside) * output_unit
        pl_index *= modulate_map_I
        pl_index += (hp.alm2map(
            self.largescale_alm_pl_index.value,
            nside=self.nside,
        ) * output_unit)
        pl_index -= 3.1 * u.dimensionless_unscaled

        # Fixed values for comparison with s4
        # pl_index = -3.1 * u.dimensionless_unscaled

        return (output_IQU[0], output_IQU[1], output_IQU[2], pl_index)
示例#2
0
文件: test_data.py 项目: AdriJD/ksw
    def test_data_compute_alm_sim_1d_T(self):

        cosmo = self.FakeCosmology()
        pol = ['T']
        data = Data(self.lmax, self.n_ell_T, self.b_ell_T, pol, cosmo)

        np.random.seed(10)
        alm = data.compute_alm_sim(lens_power=False)
        np.random.seed(10)

        alm_exp = hp.synalm(data.cov_ell_nonlensed[0], new=True)

        alm_sim_expec = np.zeros((1, self.nelem), dtype=complex)
        alm_sim_expec[0] = alm_exp

        np.testing.assert_almost_equal(alm, alm_sim_expec)

        # Again for lensed.
        np.random.seed(10)
        alm = data.compute_alm_sim(lens_power=True)
        np.random.seed(10)

        alm_exp = hp.synalm(data.cov_ell_lensed[0], new=True)

        alm_sim_expec = np.zeros((1, self.nelem), dtype=complex)
        alm_sim_expec[0] = alm_exp

        np.testing.assert_almost_equal(alm, alm_sim_expec)
示例#3
0
def syn_full_alm(Cls, lmax=None):
    """
    Basically healpy synalm, but returns a sorted pandas
    series with positive and negative m (complex map).
    """
    if lmax is None:
        lmax = len(Cls) - 1

    l, m = hp.Alm.getlm(lmax)
    # index with positive, negative values of m (healpy only supplies positive)
    # (m first for ease since that's the way healpix likes it)
    mpos_idx = pd.MultiIndex.from_arrays([m, l], names=['m', 'l'])
    mneg_idx = pd.MultiIndex.from_arrays([-m, l], names=['m', 'l'])

    # complex map should be made of 2 ind. components (each is a real map)
    # Cls defined for total map, so each component should use half
    almr = pd.Series(hp.synalm(Cls * 0.5, lmax, verbose=False), index=mpos_idx)
    almi = pd.Series(hp.synalm(Cls * 0.5, lmax, verbose=False), index=mpos_idx)

    # combine real, imaginary components to get positive, negative m alms
    # almn relation derived from cmplx conj--neg m relation for almi, almr
    almp = almr + 1j * almi
    almn = np.conj(almr - 1j * almi) * (-1)**m
    almn.index = mneg_idx

    # positive, negative m=0 terms should match, so check & drop one
    assert np.all(
        almn.loc[0] == almp.loc[0]), 'Different values of alm for m = +/-0'
    almn = almn.drop(0, level='m')

    # combine & reorder to match *my* preferences
    alm = (almp.add(almn, fill_value=0j).swaplevel().sort_index(level='l'))

    return alm
示例#4
0
def crankNicolson_good(cls_, d):
    s = hp.synalm(cls_, lmax=config.L_MAX_SCALARS)
    s = utils.flatten_map(s)
    #h, s = gradientDescent.gradient_ascent_good(d, cls_)
    s_pix = hp.sphtfunc.alm2map(utils.unflat_map_to_pix(s), nside=config.NSIDE)
    #s = utils.flatten_map(s)
    accepted = 0
    history = []
    for i in range(config.N_CN):
        if i == 40000:
            config.beta_CN /= 9.5

        history.append(s)
        s_prop = np.sqrt(
            1 - config.beta_CN**2) * s + config.beta_CN * utils.flatten_map(
                hp.synalm(cls_, lmax=config.L_MAX_SCALARS))
        ### Ajouté le dénom !!!
        s_prop_pix = hp.alm2map(utils.unflat_map_to_pix(s_prop),
                                nside=config.NSIDE) * (1 / config.NSIDE)
        r = compute_CN_ratio_good(s_pix, s_prop_pix, d)
        if np.log(np.random.uniform()) < r:
            s = s_prop
            s_pix = s_prop_pix
            accepted += 1

    print(accepted / config.N_CN)
    return history, s
示例#5
0
    def setUp(self):
        self.nside = 64
        self.lmax  = self.nside
        
        seed = 12345
        np.random.seed(seed)
        self.mapr = hp.synfast( np.ones(self.lmax+1), self.nside, pixwin=False, fwhm=0.0, sigma=None )
        self.mapi = hp.synfast( np.ones(self.lmax+1), self.nside, pixwin=False, fwhm=0.0, sigma=None )

        self.almg = hp.synalm( np.ones(self.lmax+1), self.lmax )
        self.almc = hp.synalm( np.ones(self.lmax+1), self.lmax )
示例#6
0
    def setUp(self):
        self.nside = 64
        self.lmax  = self.nside
        
        seed = 12345
        np.random.seed(seed)
        self.mapr = hp.synfast( np.ones(self.lmax+1), self.nside, pixwin=False, fwhm=0.0, sigma=None )
        self.mapi = hp.synfast( np.ones(self.lmax+1), self.nside, pixwin=False, fwhm=0.0, sigma=None )

        self.almg = hp.synalm( np.ones(self.lmax+1), self.lmax )
        self.almc = hp.synalm( np.ones(self.lmax+1), self.lmax )
示例#7
0
def simulate_cmb(nside=2048, lmax=3000,
                 frequency=100,smear=False,
                 nomap = False, beam=None, beamP=None,
                 save=False, filename='testcmb.fits',
                 cl_file='bf_base_cmbonly_plikHMv18_TT_lowTEB_lmax4000.minimum.theory_cl'):
        
    ls, cltt, clte, clee, clbb = get_theory_cls(lmax=lmax, cl_file=cl_file)
 
    Tlm, Elm, Blm = hp.synalm( (cltt, clee, clbb, clte), new=True, lmax=lmax)

    
    if smear:
        if (beam is None) or (beamP is None) :
            hdulist = fits.open(data_path + 'HFI_RIMO_Beams-100pc_R2.00.fits')
            beam = hdulist[beam_index['{}'.format(frequency)]].data.NOMINAL[0][:lmax+1]
            beamP = hdulist[beam_index['{}P'.format(frequency)]].data.NOMINAL[0][:lmax+1]
        hp.sphtfunc.almxfl(Tlm, beam, inplace=True)
        hp.sphtfunc.almxfl(Elm, beamP, inplace=True)
        hp.sphtfunc.almxfl(Blm, beamP, inplace=True)

    if nomap:
        return Tlm,Elm,Blm
    
    Tmap = hp.alm2map( Tlm, nside )
    Qmap, Umap = hp.alm2map_spin( (Elm, Blm), nside, 2, lmax=lmax)

    if save:
        hp.write_map([Tmap,Qmap,Umap],data_path + filename)
    return Tmap, Qmap, Umap
示例#8
0
def mala(cls, observations, grad_constant_part):
    conjgrad = CG()
    extended_cls = np.array(
        [cl for l in range(config.L_MAX_SCALARS + 1) for cl in cls[l:]])
    acceptance = 0
    s = hp.synalm(cls, lmax=config.L_MAX_SCALARS)
    #h_g, s = gradientDescent.gradient_ascent(observations, cls)
    warm_start = s
    s_pixel = hp.sphtfunc.alm2map(s, nside=config.NSIDE)
    grad_log_s = compute_gradient_log(s, s_pixel, grad_constant_part,
                                      extended_cls)
    history = []
    history_ratio = []
    for i in range(config.N_mala):
        history.append(s)
        s_prop = proposal(grad_log_s, s)
        s_prop_pix = hp.sphtfunc.alm2map(s, nside=config.NSIDE)
        grad_log_prop = compute_gradient_log(s_prop, s_prop_pix,
                                             grad_constant_part, extended_cls)
        r = compute_MH_ratio(grad_log_s, grad_log_prop, s, s_pixel, s_prop,
                             s_prop_pix, observations, extended_cls)
        history_ratio.append(r)
        r = 1
        if np.log(np.random.uniform()) < r:
            s = s_prop
            s_pixel = s_prop_pix
            grad_log_s = grad_log_prop
            acceptance += 1

    print("Acceptance rate:")
    print(acceptance / config.N_mala)
    return history, s, warm_start, history_ratio
示例#9
0
def generate_correlated_alm(input_alm_f1, Clf1f1, Clf2f2, Clf1f2, seed=None):
    correlated = hp.almxfl(input_alm_f1, Clf1f2 / Clf1f1)
    ps_noise = Clf2f2 - np.nan_to_num(Clf1f2**2 / Clf1f1)
    assert np.all(ps_noise >= 0)
    if seed is not None: np.random.seed(seed)
    noise = hp.synalm(ps_noise, lmax=hp.Alm.getlmax(input_alm_f1.size))
    return correlated + noise
def getSsim(ell, Cl, lmax=100, cutSky=False):
    """
  Purpose:
    create simulated S_{1/2} from input power spectrum
  Note:
    this calculates Jmn every time it is run so should not be used for ensembles
  Procedure:
    simulates full sky CMB, measures S_{1/2}
  Inputs:
    ell: the l values for the power spectrum
    Cl: the power spectrum
    lmax: the maximum ell value to use in calculation
      Default: 100
    cutSky: set to True to convert to real space, apply mask, etc.
      Default: False
      Note: true option not yet implemented
  Returns:
    simulated S_{1/2}
  """
    # get Jmn matrix for harmonic space S_{1/2} calc.
    myJmn = getJmn(lmax=lmax)[2:, 2:]  # do not include monopole, dipole

    #alm_prim,alm_late = hp.synalm((primCl,lateCl,crossCl),lmax=lmax,new=True)
    almSim = hp.synalm(
        Cl, lmax=lmax)  # question: does this need to start at ell[0]=1?
    ClSim = hp.alm2cl(almSim)

    return np.dot(ClSim[2:], np.dot(myJmn, ClSim[2:]))
示例#11
0
def test_correlated_alm():
    lmax = 2000
    ells = np.arange(0, lmax, 1)

    def get_cls(ells, index, amplitude):
        cls = amplitude * ells.astype(np.float32)**index
        cls[ells < 2] = 0
        return cls

    Clf1f1 = get_cls(ells, -1, 1)
    Clf2f2 = get_cls(ells, -1.3, 2)
    Clf1f2 = get_cls(ells, -1.4, 0.5)

    alm_f1 = hp.synalm(Clf1f1, lmax=lmax - 1)
    alm_f2 = deltag.generate_correlated_alm(alm_f1, Clf1f1, Clf2f2, Clf1f2)

    f1f1 = hp.alm2cl(alm_f1, alm_f1)
    f2f2 = hp.alm2cl(alm_f2, alm_f2)
    f1f2 = hp.alm2cl(alm_f1, alm_f2)

    pl = io.Plotter(xyscale='linlog', scalefn=lambda x: x)
    pl.add(ells, f1f1, color="C0", alpha=0.4)
    pl.add(ells, f2f2, color="C1", alpha=0.4)
    pl.add(ells, f1f2, color="C2", alpha=0.4)
    pl.add(ells, Clf1f1, label="f1f1", color="C0", ls="--", lw=3)
    pl.add(ells, Clf2f2, label="f2f2", color="C1", ls="--", lw=3)
    pl.add(ells, Clf1f2, label="f1f2", color="C2", ls="--", lw=3)
    pl.done()
示例#12
0
文件: test_l.py 项目: carronj/lenspyx
def test_t():
    lmax = 200
    nside = 256
    cls_unl = np.ones(lmax + 1, dtype=float)
    tunl = hp.synalm(cls_unl, new=True)
    dlm = np.zeros_like(tunl)
    hp.almxfl(dlm,
              np.sqrt(
                  np.arange(lmax + 1) * np.arange(1, lmax + 2, dtype=float)),
              inplace=True)
    T = hp.alm2map(tunl, nside)
    T1 = lensing.alm2lenmap(tunl, [dlm, None],
                            nside,
                            verbose=True,
                            nband=8,
                            facres=-2)
    assert np.max(np.abs(T - T1)) / np.std(T) < 1e-5
    d1Re, d1Im = hp.alm2map_spin([dlm, np.zeros_like(dlm)], nside, 1, lmax)
    T2 = lensing.alm2lenmap(tunl, [d1Re, d1Im],
                            nside,
                            verbose=True,
                            nband=8,
                            facres=-2)
    assert np.max(np.abs(T - T2)) / np.std(T) < 1e-5
    T3 = lensing.alm2lenmap(tunl, [dlm, dlm.copy()],
                            nside,
                            verbose=True,
                            nband=8,
                            facres=-2)
    assert np.all(T2 == T3)
示例#13
0
def mala3(cls, d, grad_constant_part):
    extended_cls = extend_cls4(cls)
    s = hp.synalm(cls, lmax=config.L_MAX_SCALARS)
    s_pix = hp.sphtfunc.alm2map(s, nside=config.NSIDE)
    s = flatten_map4(s)
    grad_log_s = compute_gradient_log3(s, s_pix, grad_constant_part,
                                       extended_cls)
    history = []
    accepted = 0
    history.append(s)
    for i in range(config.N_mala):
        print(i)
        s_prop = proposal3(s, grad_log_s, step_size=config.step_size_mala)
        s_prop_pix = hp.alm2map(unflat_map_to_pix4(s_prop), nside=config.NSIDE)
        grad_log_s_prop = compute_gradient_log3(s_prop, s_prop_pix,
                                                grad_constant_part,
                                                extended_cls)
        r = compute_log_MH_ratio3(s, s_pix, grad_log_s, s_prop, s_prop_pix,
                                  grad_log_s_prop, d, extended_cls,
                                  config.step_size_mala)
        if np.log(np.random.uniform()) < r:
            s = s_prop
            s_pix = s_prop_pix
            grad_log_s = grad_log_s_prop
            accepted += 1

        history.append(s)

    print(accepted / config.N_mala)
    return history, s
示例#14
0
def _simu_from_rq(mt, me, mb, rq, nside=2048):
    import healpy as hp
    m = mt + me + mb
    cls = []
    for i in range(m):
        for j in range(m - i):
            cls += [rq[:, j, j + i]]
    print("random alms")
    alms = hp.synalm(cls, new=True)
    rmaps = nm.zeros((m, m, 12 * nside**2))
    mp = nm.max(me, mb)

    mps = []

    for i in range(mp):
        almsr = []
        if mt <= i:
            almsr += [nm.zeros_like(alms[0])]
        else:
            almsr += [cls[i]]
        if me <= i:
            almsr += [nm.zeros_like(alms[0])]
        else:
            almsr += [cls[i + mt]]
        if mb <= i:
            almsr += [nm.zeros_like(alms[0])]
        else:
            almsr += [cls[i + mt + me]]
        print("fg channel %d" % i)
        mps += [hp.alm2map(almsr, nside, pol=True)]
    for i in range(mp, m):
        print("fg channel %d" % i)
        mps += [[hp.alm2map(alms[i], nside, pol=True), None, None]]
    return mps
示例#15
0
文件: taysim.py 项目: amaurea/taylens
def simulate_tebp_correlated(cl_tebp_arr,nside,lmax) :
	alms=healpy.synalm(cl_tebp_arr,lmax=lmax,new=True)
	aphi=alms[-1]
	acmb=alms[0:-1]
#Set to zero above map resolution to avoid aliasing
	beam_cut=np.ones(3*nside)
	for ac in acmb :
		healpy.almxfl(ac,beam_cut,inplace=True)
	cmb=np.array(healpy.alm2map(acmb,nside,pol=True))

	return cmb,aphi
def generate_w0term(Cl):
    """
    
    """
    lmax = np.size(Cl) - 1
    onesl = np.ones(lmax + 1)
    onesl[:2] = 0
    #### make sure no mono- and dipole
    w0 = hp.synalm(onesl)
    # w0 = hp.synalm(np.ones(lmax))#1/np.sqrt(2)*(np.random.randn(hp.Alm.getsize(lmax)) + 1j*np.random.randn(hp.Alm.getsize(lmax)))
    return w0
示例#17
0
文件: pysm.py 项目: bthorne93/PySM
def simulate_tebp_correlated(cl_tebp_arr,nside,lmax,seed):
        np.random.seed(seed)
        alms=hp.synalm(cl_tebp_arr,lmax=lmax,new=True)
        aphi=alms[-1]
        acmb=alms[0:-1]
#Set to zero above map resolution to avoid aliasing                                        
        beam_cut=np.ones(3*nside)
        for ac in acmb :
                hp.almxfl(ac,beam_cut,inplace=True)
        cmb=np.array(hp.alm2map(acmb,nside,pol=True,verbose=False))

        return cmb,aphi
示例#18
0
def draw_gaussian_a_p(input_kappa_alm, aux_cl, num_of_kcmb):
    '''
    Draw a_p alms from distributions with the right auxiliary spectra.
    '''
    num_of_tracers = len(aux_cl[:,0])
    a_alms = np.zeros((num_of_tracers, len(input_kappa_alm)), dtype='complex128') #Unweighted alm components

    a_alms[0:num_of_kcmb,:] = input_kappa_alm
    for j in range(num_of_kcmb, num_of_tracers):
        a_alms[j,:] = hp.synalm(aux_cl[j,:], lmax=len(aux_cl[0,:])-1)

    return a_alms
示例#19
0
文件: ps.py 项目: guanyilun/cosmoslib
 def gen_alm_hp(self):
     if self.prefactor:
         self.remove_prefactor()
     # healpy requires array starts from zero, fill will 0
     ps = np.zeros((4, self.lmax + 1))
     ps[:, self.lmin:] = self.values[:, 1:].T
     alm = hp.synalm((ps[0], ps[1], ps[2], ps[3], np.zeros_like(
         ps[0]), np.zeros_like(ps[0])),
                     lmax=self.lmax,
                     verbose=False,
                     new=True)
     return alm
def generate_w1term(Cl, bl, nl):
    """
    
    """
    lmax = np.size(Cl) - 1
    onesl = np.ones(lmax + 1)
    onesl[:2] = 0
    w1 = hp.synalm(onesl)
    #### make sure no mono- and dipole
    # w1 = 1/np.sqrt(2)*(np.random.randn(hp.Alm.getsize(lmax)) + 1j*np.random.randn(hp.Alm.getsize(lmax)))
    out = hp.almxfl(w1, np.sqrt(Cl[: lmax + 1] / nl[: lmax + 1]) * bl[: lmax + 1])
    return out
示例#21
0
    def test_alm2cl(self):
        nside = 32
        lmax = 64
        lmax_out = 100
        seed = 12345
        np.random.seed(seed)

        # Input power spectrum and alm
        alm_syn = hp.synalm(self.cla, lmax=lmax)

        cl_out = hp.alm2cl(alm_syn, lmax_out=lmax_out - 1)

        np.testing.assert_array_almost_equal(cl_out, self.cla[:lmax_out], decimal=4)
示例#22
0
    def generate_gaus_map(self, readGmap=-1):
        if (readGmap<0):
            self.gausalm=hp.synalm(self.inputCls)
        else:
            # code assumes that self.mapsdir="maps50/"
            self.gausalm=hp.read_alm(self.mapsdir+"gmap_"+str(readGmap)+".fits")
            ns=0.965
            C0N=(1.0-np.exp(-(ns-1)*self.efolds))/(ns-1)
            C050=(1.0-np.exp(-(ns-1)*50))/(ns-1)
            alm0r=np.sqrt(C0N/C050)
            self.gausalm[0]=self.gausalm[0]*alm0r

        self.gausmap=hp.alm2map(self.gausalm, nside=self.NSIDE) # includes the monopole bit
示例#23
0
def simulate_tebp_correlated(cl_tebp_arr, nside, lmax, seed):
    """This generates correlated T,E,B and Phi maps

        """
    np.random.seed(seed)
    alms = hp.synalm(cl_tebp_arr, lmax=lmax, new=True)
    aphi = alms[-1]
    acmb = alms[0:-1]
    # Set to zero above map resolution to avoid aliasing
    beam_cut = np.ones(3 * nside)
    for ac in acmb:
        hp.almxfl(ac, beam_cut, inplace=True)
    cmb = np.array(hp.alm2map(acmb, nside, pol=True, verbose=False))
    return cmb, aphi
示例#24
0
def generate_sky_map(cls_, noise=False):
    s = hp.synalm(cls_, lmax=config.L_MAX_SCALARS)
    if noise:
        n = np.sqrt(config.noise_covar[0]/2)*np.random.normal(size=config.dimension_sph) \
            + 1j * np.sqrt(config.noise_covar[0]/2)* np.random.normal(size=config.dimension_sph)

        n[:config.L_MAX_SCALARS +
          1] = np.sqrt(config.noise_covar[0]) * np.random.normal(
              size=config.L_MAX_SCALARS + 1)
        n[0] = 0
        n[1] = 0
        return s + n

    return s
示例#25
0
文件: test_data.py 项目: AdriJD/ksw
    def test_data_compute_alm_sim_2d(self):

        cosmo = self.FakeCosmology()
        pol = ['T', 'E']
        data = Data(self.lmax, self.n_ell_TplusE, self.b_ell_TplusE, pol,
                    cosmo)

        np.random.seed(10)
        alm = data.compute_alm_sim(lens_power=False)
        np.random.seed(10)

        cl = np.zeros((4, self.nell))
        cl[:2] = data.cov_ell_nonlensed[:2]
        cl[3] = data.cov_ell_nonlensed[2]
        alm_exp = hp.synalm(cl, new=True)

        alm_sim_expec = np.zeros((2, self.nelem), dtype=complex)
        alm_sim_expec[0] = alm_exp[0]
        alm_sim_expec[1] = alm_exp[1]

        np.testing.assert_almost_equal(alm, alm_sim_expec)

        # Again for lensed.
        np.random.seed(10)
        alm = data.compute_alm_sim(lens_power=True)
        np.random.seed(10)

        cl = np.zeros((4, self.nell))
        cl[:2] = data.cov_ell_lensed[:2]
        cl[3] = data.cov_ell_lensed[2]
        alm_exp = hp.synalm(cl, new=True)

        alm_sim_expec = np.zeros((2, self.nelem), dtype=complex)
        alm_sim_expec[0] = alm_exp[0]
        alm_sim_expec[1] = alm_exp[1]

        np.testing.assert_almost_equal(alm, alm_sim_expec)
示例#26
0
def generate_individual_gaussian_tracers(a_alms, A, nlkk, num_of_kcmb):
    '''
    Put all the weights and alm components together to give appropriately correlated tracers
    '''
    num_of_tracers = len(a_alms[:,0])
    tracer_alms = np.zeros((num_of_tracers, len(a_alms[0,:])), dtype='complex128') #Appropriately correlated final tracers

    for i in range(num_of_kcmb):
        tracer_alms[i,:] = a_alms[i,:] + hp.synalm(nlkk[i], lmax=len(A[i,i,:])-1)
    
    for i in range(num_of_kcmb,num_of_tracers):
        for j in range(i+1):
            tracer_alms[i,:] += hp.almxfl(a_alms[j,:], A[i,j,:])

    return tracer_alms
示例#27
0
    def generate_gaus_map(self, readGmap=-1):
        if (readGmap<0):
            self.gausalm0=hp.synalm(self.inputCls)
        else:
            self.gausalm0=hp.read_alm(self.mapsdir+"gmap_"+str(readGmap)+".fits")

        self.gausalm1=np.copy(self.gausalm0); self.gausalm1[0]=0.0
        if (self.nodipole):
            ndxfl=np.ones(len(self.inputCls))
            ndxfl[1]=0.0
            hp.almxfl(self.gausalm1, ndxfl, inplace=True)
            hp.almxfl(self.gausalm0, ndxfl, inplace=True)

        self.gausmap0=hp.alm2map(self.gausalm0, nside=self.NSIDE) # includes the monopole bit
        self.gausmap1=hp.alm2map(self.gausalm1, nside=self.NSIDE) # does not include the monopole
示例#28
0
def crankNicolson(cls_, observations):
    accept = 0
    history = []
    s = hp.synalm(cls_, lmax=config.L_MAX_SCALARS)
    #h, s = gradientDescent.gradient_ascent(observations, cls_)
    s_pixel = hp.sphtfunc.alm2map(s, nside=config.NSIDE)
    for i in range(config.N_CN):
        history.append(s)
        prop = np.sqrt(1-config.beta_CN**2)*s + config.beta_CN*hp.sphtfunc.synalm(cls_, lmax=config.L_MAX_SCALARS)
        prop_pix = hp.sphtfunc.alm2map(prop, nside=config.NSIDE)
        r = compute_CN_ratio(s_pixel, prop_pix, observations)
        if np.log(np.random.uniform()) < r:
            s = prop
            s_pixel = prop_pix
            accept += 1

    print(accept/config.N_CN)
    return history, s
示例#29
0
    def compute_alm_sim(self, lens_power=False):
        '''
        Draw isotropic Gaussian realisation from (S+N) covariance.

        Parameters
        ----------
        lens_power : bool, optional
            Include lensing power in covariance.

        Returns
        -------
        alm_sim : (npol, nelem) complex array
            Simulated alm with ells up to lmax.
        '''

        if lens_power:
            cov_ell = self.cov_ell_lensed
        else:
            cov_ell = self.cov_ell_nonlensed
            
        # Synalm expects 1D TT array or (TT, EE, BB, TE) array.
        if 'E' in self.pol:
            nspec, nell = cov_ell.shape
            c_ell_in = np.zeros((4, nell))
            if 'T' in self.pol:
                c_ell_in[0,:] = cov_ell[0]
                c_ell_in[1,:] = cov_ell[1]
                c_ell_in[3,:] = cov_ell[2]
            else:
                c_ell_in[1,:] = cov_ell[0]
        else:
            c_ell_in = cov_ell

        alm = hp.synalm(c_ell_in, lmax=self.lmax, new=True)

        if self.pol == ('T', 'E'):
            # Only return I and E.
            alm = alm[:2,:]
        elif self.pol == ('E',):
            alm = (alm[1,:])[np.newaxis,:]
        else:
            alm = alm

        return alm
示例#30
0
    def generate_gaus_map(self, readGmap=-1):
        if (readGmap < 0):
            self.gausalm0 = hp.synalm(self.inputCls)
        else:
            self.gausalm0 = hp.read_alm(self.mapsdir + "gmap_" +
                                        str(readGmap) + ".fits")

        self.gausalm1 = np.copy(self.gausalm0)
        self.gausalm1[0] = 0.0
        if (self.nodipole):
            ndxfl = np.ones(len(self.inputCls))
            ndxfl[1] = 0.0
            hp.almxfl(self.gausalm1, ndxfl, inplace=True)
            hp.almxfl(self.gausalm0, ndxfl, inplace=True)

        self.gausmap0 = hp.alm2map(
            self.gausalm0, nside=self.NSIDE)  # includes the monopole bit
        self.gausmap1 = hp.alm2map(
            self.gausalm1, nside=self.NSIDE)  # does not include the monopole
dd['scalar_spectral_index(1)'] = 0.9655
dd['hubble'] = 67.31


########### Here we simulate the data set ############

# White noise spectrum, (Commander level, so low)
nl = 1.7504523623688016e-16*1e12 * np.ones(2500)

# Gaussian beam fwhm 5 arcmin 
bl = CG.gaussian_beam(2500,5)

# Spectrum according to parameter defined above
Cl = cb.generate_spectrum(dd)
lmax = Cl.shape[0]-1
alm = hp.synalm(Cl[:,1])
dlm = hp.almxfl(alm,bl[:lmax+1])
nlm = hp.synalm(nl[:lmax+1])
dlm = dlm+nlm



# Could be used for asymetric proposal, but now only for first guess
x_mean = np.array([0.02222,0.1197,0.078,3.089,0.9655,67.31])


#cov_mat from tableTT_lowEB downloaded from PLA, used in proposal
cov_new = np.load("cov_tableTT_lowEB_2_3_5_6_7_23.npy")


# priors parameters here central value and 1/sigma**2
示例#32
0
def main(run_type='data', nsim=0, fnl=0):

    '''
    MPI Setup
    '''
    o_comm = MPI.COMM_WORLD
    i_rank = o_comm.Get_rank() # current core number -- e.g., i in arange(i_size)
    i_size = o_comm.Get_size() # number of cores assigned to run this program
    o_status = MPI.Status()

    i_work_tag = 0
    i_die_tag = 1

    '''
    Loading and calculating power spectrum components
    '''

    if (run_type == 'fnl'):
        nl = 1024
    else:
        nl = 1499

    if (run_type == 'data'):
        fn_map = h._fn_map
    elif (run_type == 'sim'):
        fn_map = 'output/map_sim_%i.fits' % nsim
    elif (run_type == 'fnl'):
        #print "fnl value: %i" % fnl
        fn_map = 'data/fnl_sims/map_fnl_%i_sim_%i.fits' % (int(fnl), nsim)

    # (1) read map (either map_data or map_sim), mask, mll, and create mll_inv;
    map_in = hp.read_map(fn_map)
    mask = hp.read_map(h._fn_mask)
    if (run_type == 'fnl'):
        mask = 1.
    fn_mll = 'output/na_mll_%i_lmax.npy' % nl
    mll = np.load(fn_mll)
    if (run_type == 'fnl'):
        mll = np.identity(nl)
    mll_inv = np.linalg.inv(mll)

    nside = hp.get_nside(map_in)

    if (i_rank == 0):

        if (run_type == 'data'):
            fn_cl21 = 'output/cl21_data.dat'
            fn_cl21_no_mll = 'output/cl21_data_no_mll.dat'
        elif (run_type == 'sim'):
            fn_cl21 = 'output/cl21_sim_%i.dat' % nsim
            fn_cl21_no_mll = 'output/cl21_no_mll_%i.dat' % nsim
        elif (run_type == 'fnl'):
            fn_cl21 = 'output/cl21_fnl_%i_sim_%i.dat' % (int(fnl), nsim)
            fn_cl21_no_mll = 'output/cl21_fnl_%i_sim_%i_no_mll.dat' % (int(fnl), nsim)

        f_t1 = time.time()

        print ""
        print "Run parameters:"
        print "(Using %i cores)" % i_size
        print "nl: %i, nside: %i, map: %s" % (nl, nside, fn_map)
        print "beam: %s, alpha_beta: %s, cltt: %s" % (h._fn_beam, h._fn_alphabeta, h._fn_cltt)

        print ""
        print "Loading ell, r, dr, alpha, beta, cltt, and beam..."

    # (2) normalize, remove mono-/dipole, and mask map to create map_masked;
    map_in /= (1e6 * 2.7)
    map_in = hp.remove_dipole(map_in)
    map_masked = map_in * mask

    # (3) create alm_masked (map2alm on map_masked), cltt_masked (anafast on 
    #     map_masked), and cltt_corrected (dot cltt_masked with mll_inv)

    if (run_type == 'data' or run_type == 'sim'):
        alm_masked = hp.map2alm(map_masked)
    elif (run_type == 'fnl'):
        fn_almg = ('data/fnl_sims/alm_l_%04d_v3.fits' % (nsim,))
        almg = hp.read_alm(fn_almg)
        fn_almng = ('data/fnl_sims/alm_nl_%04d_v3.fits' % (nsim,))
        almng = hp.read_alm(fn_almng)
        alm = almg + fnl * almng
        alm_masked = alm

    cltt_masked = hp.anafast(map_masked)
    cltt_masked = cltt_masked[:nl]
    cltt_corrected = np.dot(mll_inv, cltt_masked)

    # stuff with alpha, beta, r, dr, and beam

    l, r, dr, alpha, beta = np.loadtxt(h._fn_alphabeta, 
                                usecols=(0,1,2,3,4), unpack=True, skiprows=3)

    l = np.unique(l)
    r = np.unique(r)[::-1]

    nr = len(r)

    if (run_type == 'data' or run_type == 'sim'):
        cltt_denom = np.load('output/cltt_theory.npy') #replace with 'output/na_cltt.npy'
        cltt_denom = cltt_denom[:nl]
    elif (run_type == 'fnl'):
        cltt_denom = np.loadtxt('joe/cl_wmap5_bao_sn.dat', usecols=(1,), unpack=True)

    alpha = alpha.reshape(len(l), nr)
    beta = beta.reshape(len(l), nr)
    dr = dr.reshape(len(l), nr)
    dr = dr[0]
    
    if (run_type != 'fnl'):
        beam = np.load(h._fn_beam)
    else:
        #beam = np.ones(len(cltt_denom))
        beam = np.load(h._fn_beam)
        noise = np.zeros(len(cltt_denom))
        nlm = hp.synalm(noise, lmax=nl)

    ####### TEMPORARY -- change beam #######
    #beam = np.ones(len(cltt_denom))
    #noise = np.zeros(len(cltt_denom))
    #nlm = hp.synalm(noise, lmax=nl)
    ########################################

    l = l[:nl]
    beam = beam[:nl]
    alpha = alpha[:nl,:]
    beta = beta[:nl,:]

    if (i_rank == 0):
        print "nr: %i, nl: %i" % (nr, nl)

    f_t2 = time.time()

    if (i_rank == 0):
        print ""
        print "Time to create alms from maps: %.2f s" % (f_t2 - f_t1)
        print "Calculating full skewness power spectrum..."

    cl21 = np.zeros(nl)
    work = np.zeros(1, dtype='i')
    result = np.zeros(nl, dtype='d')

    # master loop
    if (i_rank == 0):

        # send initial jobs

        for i_rank_out in range(1,i_size):

            work = np.array([i_rank_out-1], dtype='i')
            o_comm.Send([work, MPI.INT], dest=i_rank_out, tag=i_work_tag)

        for i_r in range(i_size-1,nr):

            if (i_r % (nr / 10) == 0):
                print "Finished %i%% of jobs... (%.2f s)" % (i_r * 100 / nr,
                time.time() - f_t2)

            work = np.array([i_r], dtype='i')

            o_comm.Recv([result, MPI.DOUBLE], source=MPI.ANY_SOURCE, 
                status=o_status, tag=MPI.ANY_TAG)

            #print "received results from core %i" % o_status.Get_source()

            o_comm.Send([work,MPI.INT], dest=o_status.Get_source(), 
                tag=i_work_tag)

            cl21 += result

        for i_rank_out in range(1,i_size):

            o_comm.Recv([result, MPI.DOUBLE], source=MPI.ANY_SOURCE,
                status=o_status, tag=MPI.ANY_TAG)

            cl21 += result
            print "cl21 = %.6f, result = %.6f" % (np.average(cl21), np.average(result))

            o_comm.Send([np.array([9999], dtype='i'), MPI.INT], 
                dest=o_status.Get_source(), tag=i_die_tag)

    #slave loop:
    else:

        while(1):

            o_comm.Recv([work, MPI.INT], source=0, status=o_status, 
                tag=MPI.ANY_TAG)

            if (o_status.Get_tag() == i_die_tag):

                break

            i_r = work[0]
            #print ("i_r: %i" % i_r)
            #print ("alm_masked: ", alm_masked)
            #print ("cltt_denom: ", cltt_denom)
            #print ("beam: ", beam)
            #print ("mask: ", mask)
            #print ("fn_map: ", fn_map)

            # create Alm, Blm (almxfl with alm_masked and beta / cltt_denom 
            # * beam, etc.)

            Alm = np.zeros(alm_masked.shape[0],complex)
            Blm = np.zeros(alm_masked.shape[0],complex)
            clAB2 = np.zeros(nl+1)
            clABB = np.zeros(nl+1)

            #Alm = hp.almxfl(alm_masked, alpha[:,i_r] / cltt_denom * beam)
            #Blm = hp.almxfl(alm_masked, beta[:,i_r] / cltt_denom * beam)
            #for li in xrange(2,nl):
            #    I = hp.Alm.getidx(nl,li,np.arange(min(nl,li)+1))
            #    Alm[I]=alpha[li-2][i_r]*(alm_masked[I]*beam[li]+nlm[I])/(cltt_denom[li]*beam[li]**2+noise[li])
            #    Blm[I]=beta[li-2][i_r]*(alm_masked[I]*beam[li]+nlm[I])/(cltt_denom[li]*beam[li]**2+noise[li])
            if (run_type == 'fnl'):
                for li in xrange(2,nl):
                    I = hp.Alm.getidx(nl,li,np.arange(min(nl,li)+1))
                    Alm[I]=alpha[li-2][i_r]*(alm_masked[I]*beam[li]+nlm[I])/(cltt_denom[li]*beam[li]**2+noise[li])
                    Blm[I]=beta[li-2][i_r]*(alm_masked[I]*beam[li]+nlm[I])/(cltt_denom[li]*beam[li]**2+noise[li])
            else:
                for li in xrange(2,nl):
                    I = hp.Alm.getidx(nl,li,np.arange(min(nl,li)+1))
                    Alm[I]=alpha[li-2][i_r]*(alm_masked[I])/cltt_denom[li]*beam[li]
                    Blm[I]=beta[li-2][i_r]*(alm_masked[I])/cltt_denom[li]*beam[li]

            ############################# DEBUG ################################
            if i_r == 0:
                cltt_Alm = hp.alm2cl(Alm)
                cltt_Blm = hp.alm2cl(Blm)
                np.savetxt('debug2/cltt_%s_Alm.dat' % run_type, cltt_Alm)
                np.savetxt('debug2/cltt_%s_Blm.dat' % run_type, cltt_Blm)
            ####################################################################

            #An = hp.alm2map(Alm, nside=nside, fwhm=0.00145444104333,
            #    verbose=False)
            #Bn = hp.alm2map(Blm, nside=nside, fwhm=0.00145444104333,
            #    verbose=False)
            An = hp.alm2map(Alm, nside=nside)
            Bn = hp.alm2map(Blm, nside=nside)

            ############################# DEBUG ################################
            if i_r == 0:
                cltt_An = hp.anafast(An)
                cltt_Bn = hp.anafast(Bn)
                np.savetxt('debug2/cltt_%s_An.dat' % run_type, cltt_An)
                np.savetxt('debug2/cltt_%s_Bn.dat' % run_type, cltt_Bn)
            ####################################################################

            An = An * mask
            Bn = Bn * mask

            ############################# DEBUG ################################
            #if i_r == 0:
            #    print "saving alpha, beta for %i" % i_r
            #    np.savetxt('debug2/alpha_ir_%i' % i_r, alpha[:,i_r])
            #    np.savetxt('debug2/beta_ir_%i' % i_r, beta[:,i_r])
            #    print "(An * Bn)[:10] == An[:10] * Bn[:10]:", (An * Bn)[:10] == An[:10] * Bn[:10]
            
            ####################################################################

            B2lm = hp.map2alm(Bn*Bn, lmax=nl)
            ABlm = hp.map2alm(An*Bn, lmax=nl)

            ############################# DEBUG ################################
            if i_r == 0:
                cltt_B2lm = hp.alm2cl(B2lm)
                cltt_ABlm = hp.alm2cl(ABlm)
                np.savetxt('debug2/cltt_%s_B2lm.dat' % run_type, cltt_B2lm)
                np.savetxt('debug2/cltt_%s_ABlm.dat' % run_type, cltt_ABlm)
            ####################################################################

            #clAB2 = hp.alm2cl(Alm, B2lm, lmax=nl)
            #clABB = hp.alm2cl(ABlm, Blm, lmax=nl)
            for li in xrange(2,nl+1):
                I = hp.Alm.getidx(nl,li,np.arange(min(nl,li)+1))
                clAB2[li] = (Alm[I[0]]*B2lm[I[0]].conj()
                        +2.*sum(Alm[I[1:]]*B2lm[I[1:]].conj()))/(2.0*li+1.0)
                clABB[li] = (Blm[I[0]]*ABlm[I[0]].conj()
                        +2.*sum(Blm[I[1:]]*ABlm[I[1:]].conj()))/(2.0*li+1.0)


            ############################# DEBUG ################################
            if i_r == 0:
                np.savetxt('debug2/clAB2_%s.dat' % run_type, clAB2)
                np.savetxt('debug2/clABB_%s.dat' % run_type, clABB)
            ####################################################################

            clAB2 = clAB2[1:]
            clABB = clABB[1:]

            result = np.zeros(nl, dtype='d')
            result += (clAB2 + 2 * clABB) * r[i_r]**2. * dr[i_r]

            ############################# DEBUG ################################
            np.savetxt('debug2/cl21_%s.dat' % run_type, result)
            ####################################################################

            print ("finished work for r=%i, dr=%.2f, avg(alpha)=%.2f, avg(beta)=%.2f, avg(result)=%.4g" % 
                (int(r[i_r]), dr[i_r], np.average(alpha[:,i_r]), 
                    np.average(beta[:,i_r]), np.average(result)))

            ############################# DEBUG ################################
            #if i_r == 0:
            #    print "finished debug -- goodbye!"
            #    exit()
            ####################################################################
            
            o_comm.Send([result,MPI.DOUBLE], dest=0, tag=1)

    f_t8 = time.time()

    if (i_rank == 0):
        print ""
        print ("Saving power spectrum to %s (not mll corrected)" 
            % fn_cl21_no_mll)

        np.savetxt(fn_cl21_no_mll, cl21)

        print ""
        print "Saving power spectrum to %s (mll corrected)" % fn_cl21
        
        cl21 = np.dot(mll_inv, cl21)
        np.savetxt(fn_cl21, cl21)

    return
def main(i_sim=0):

    '''
    MPI Setup
    '''
    o_comm = MPI.COMM_WORLD
    i_rank = o_comm.Get_rank() # current core number -- e.g., i in arange(i_size)
    i_size = o_comm.Get_size() # number of cores assigned to run this program
    o_status = MPI.Status()

    i_work_tag = 0
    i_die_tag = 1

    '''
    Loading and calculating power spectrum components
    '''

    # Get run parameters
    
    s_fn_params = 'data/params.pkl'
    (i_lmax, i_nside, s_fn_map, s_map_name, s_fn_mask, s_fn_mll, s_fn_beam, 
        s_fn_alphabeta, s_fn_cltt) = get_params(s_fn_params)

    #s_fn_cltt = ('sims/na_cltt_sim_%i.npy' % i_sim)

    if (i_rank == 0):

        s_fn_cl21_data = 'output/na_cl21_data_g_sim_%i.dat' % i_sim
        s_fn_cl21_data_no_mll = ('output/na_cl21_data_g_sim_%i_no_mll.dat' 
            % i_sim)

        f_t1 = time.time()

        print ""
        print "Run parameters:"
        print "(Using %i cores)" % i_size
        print "lmax: %i, nside: %i, map name: %s" % (i_lmax, i_nside, s_map_name)
        print "beam: %s, alpha_beta: %s, cltt: %s" % (s_fn_beam, s_fn_alphabeta, s_fn_cltt)

        print ""
        print "Loading ell, r, dr, alpha, beta, cltt, and beam..."

    na_mask = hp.read_map(s_fn_mask)
    #s_fn_mll = 'output/na_mll_%i_lmax.npy' % i_lmax
    s_fn_mll = 'output/na_mll_1499_lmax.npy'
    na_mll = np.load(s_fn_mll)
    na_mll_inv = np.linalg.inv(na_mll)

    na_l, na_r, na_dr, na_alpha, na_beta = np.loadtxt(s_fn_alphabeta, 
                                usecols=(0,1,2,3,4), unpack=True, skiprows=3)

    na_l = np.unique(na_l)
    na_r = np.unique(na_r)[::-1]
    na_l = na_l[:i_lmax]

    i_num_ell = len(na_l)
    i_num_r = len(na_r)

    if (i_rank == 0):
        print "i_num_r: %i, i_num_ell: %i" % (i_num_r, i_num_ell)

    na_alpha = na_alpha.reshape(i_num_ell, i_num_r)
    na_beta = na_beta.reshape(i_num_ell, i_num_r)
    na_dr = na_dr.reshape(i_num_ell, i_num_r)
    na_dr = na_dr[0]

    na_cltt = np.load(s_fn_cltt)
    na_cltt = na_cltt[:i_num_ell]

    na_bl = np.load(s_fn_beam)
    na_bl = na_bl[:i_num_ell]

    # f_t2 = time.time()

    if (i_rank == 0):
        print ""
        print "Calculating full skewness power spectrum..."

    data_run = False
    if data_run:
        s_fn_alm = 'output/na_alm_data.fits'
        na_alm = hp.read_alm(s_fn_alm)
        na_alm = na_alm[:hp.Alm.getsize(i_num_ell)]
    else:
        na_cltt_not_corrected = np.load('output/na_cltt_not_corrected.npy')
        na_alm = hp.synalm(na_cltt_not_corrected, lmax=i_num_ell, verbose=False)

    # f_t3 = time.time()

    na_cl21_data = np.zeros(i_num_ell)
    na_work = np.zeros(1, dtype='i')
    na_result = np.zeros(i_num_ell, dtype='d')

    # master loop
    if (i_rank == 0):

        # send initial jobs

        for i_rank_out in range(1,i_size):

            na_work = np.array([i_rank_out-1], dtype='i')
            o_comm.Send([na_work, MPI.INT], dest=i_rank_out, tag=i_work_tag)

        for i_r in range(i_size-1,i_num_r):

            if (i_r % (i_num_r / 10) == 0):
                print "Finished %i%% of jobs... (%.2f s)" % (i_r * 100 / i_num_r,
                time.time() - f_t1)

            na_work = np.array([i_r], dtype='i')

            o_comm.Recv([na_result, MPI.DOUBLE], source=MPI.ANY_SOURCE, 
                status=o_status, tag=MPI.ANY_TAG)

            #print "received results from core %i" % o_status.Get_source()

            o_comm.Send([na_work,MPI.INT], dest=o_status.Get_source(), 
                tag=i_work_tag)

            na_cl21_data += na_result

        for i_rank_out in range(1,i_size):

            o_comm.Recv([na_result, MPI.DOUBLE], source=MPI.ANY_SOURCE,
                status=o_status, tag=MPI.ANY_TAG)

            na_cl21_data += na_result

            o_comm.Send([np.array([9999], dtype='i'), MPI.INT], 
                dest=o_status.Get_source(), tag=i_die_tag)

    #slave loop:
    else:

        while(1):

            o_comm.Recv([na_work, MPI.INT], source=0, status=o_status, 
                tag=MPI.ANY_TAG)

            if (o_status.Get_tag() == i_die_tag):

                break

            i_r = na_work[0]

            #print "doing work for r = %i on core %i" % (i_r, i_rank)

            na_Alm = hp.almxfl(na_alm, na_alpha[:,i_r] / na_cltt * na_bl)
            na_Blm = hp.almxfl(na_alm, na_beta[:,i_r] / na_cltt * na_bl)

            # f_t4 = time.time()

            na_An = hp.alm2map(na_Alm, nside=i_nside, fwhm=0.00145444104333,
                verbose=False)
            na_Bn = hp.alm2map(na_Blm, nside=i_nside, fwhm=0.00145444104333,
                verbose=False)

            # *REMBER TO MULTIPLY BY THE MASK!* -- already doing this in cltt.py...

            na_An = na_An * na_mask
            na_Bn = na_Bn * na_mask

            # f_t5 = time.time()

            #print "starting map2alm for r = %i on core %i" % (i_r, i_rank)

            na_B2lm = hp.map2alm(na_Bn*na_Bn, lmax=i_num_ell)
            na_ABlm = hp.map2alm(na_An*na_Bn, lmax=i_num_ell)

            #print "finished map2alm for r = %i on core %i" % (i_r, i_rank)

            # f_t6 = time.time()

            na_clAB2 = hp.alm2cl(na_Alm, na_B2lm, lmax=i_num_ell)
            na_clABB = hp.alm2cl(na_ABlm, na_Blm, lmax=i_num_ell)

            na_clAB2 = na_clAB2[1:]
            na_clABB = na_clABB[1:]

            #na_clAB2 = na_clAB2[:-1] # just doing this to make things fit...
            #na_clABB = na_clABB[:-1] # just doing this to make things fit...

            #f_t7 = time.time()

            na_result = np.zeros(i_num_ell, dtype='d')
            na_result += (na_clAB2 + 2 * na_clABB) * na_r[i_r]**2. * na_dr[i_r]

            #print "finished work for r = %i on core %i" % (i_r, i_rank)

            o_comm.Send([na_result,MPI.DOUBLE], dest=0, tag=1)

            # print "Load time: %.2f s" % (f_t2 - f_t1)
            # print "synalm time: %.2f s" % (f_t3 - f_t2)
            # print "almxfl time: %.2f s" % ((f_t4 - f_t3) / 2.)
            # print "alm2map time: %.2f s" % ((f_t5 - f_t4) / 2.)
            # print "map2alm time: %.2f s" % ((f_t6 - f_t5) / 2.)
            # print "alm2cl time: %.2f s" % ((f_t7 - f_t6) / 2.)

    f_t8 = time.time()

    if (i_rank == 0):
        print ""
        print ("Saving power spectrum to %s (not mll corrected)" 
            % s_fn_cl21_data_no_mll)

        np.savetxt(s_fn_cl21_data_no_mll, na_cl21_data)

        print ""
        print "Saving power spectrum to %s (mll corrected)" % s_fn_cl21_data
        
        na_cl21_data = np.dot(na_mll_inv, na_cl21_data)
        np.savetxt(s_fn_cl21_data, na_cl21_data)

        # print "Finished in %.2f s" % (f_t8 - f_t1)
        # # print "Load time: %.2f s" % (f_t2 - f_t1)
        # # print "synalm time: %.2f s" % (f_t3 - f_t2)
        # # print "almxfl time: %.2f s" % ((f_t4 - f_t3) / 2.)
        # # print "alm2map time: %.2f s" % ((f_t5 - f_t4) / 2.)
        # # print "map2alm time: %.2f s" % ((f_t6 - f_t5) / 2.)
        # # print "alm2cl time: %.2f s" % ((f_t7 - f_t6) / 2.)

    return
import healpy as hp

nl = 2000

map_smica = hp.read_map('data/CompMap_CMB-smica_2048_R1.11.fits')
mask = hp.read_map('data/CompMap_Mask_2048_R1.00.fits')
map_processed = hp.remove_dipole(map_smica) / 1e6 / 2.7 * mask
alm_data = hp.map2alm(map_processed)
cltt = hp.anafast(map_processed)

np.save('debug/cltt_not_corrected.npy', cltt)

mll = np.load('output/na_mll_2000_lmax.npy')
mll_inv = np.linalg.inv(mll)
cltt_corrected = np.dot(mll_inv, cltt[:nl])
alm_sim = hp.synalm(cltt_corrected[:nl]) #this is probably the issue...

# load and resize things

l, r, dr, alpha, beta = np.loadtxt('data/l_r_alpha_beta.txt', usecols=(0,1,2,3,4), unpack=True, skiprows=3)

lmax = 1499

mll = np.load('output/na_mll_1499_lmax.npy')
mll_inv = np.linalg.inv(mll)

l = np.unique(l)
r = np.unique(r)[::-1]
l = l[:lmax]

nr = len(r)
#nl = 1.7504523623688016e-16*1e12 * np.ones(2500)
#nl = 1.7504523623688016e-16*1e12 * np.ones(2500) *2


# Gaussian beam fwhm 5 arcmin 
#bl = CG.gaussian_beam(2500,5)
#bl = CG.gaussian_beam(2500,5*np.sqrt(hp.nside2pixarea(nside,degrees=True))*60)
bl = CG.gaussian_beam(2500,13)

# Spectrum according to parameter defined above
if generate_new_data==1:
    Cl = cb.generate_spectrum(dd)
    # White noise level defined so that SNR=1 at \ell of 1700
    nl = Cl[900,1]*bl[900]**2*np.ones(2500)
    lmax_temp = Cl.shape[0]-1
    alm = hp.synalm(Cl[:,1])
    dlm = hp.almxfl(alm,bl[:lmax_temp+1])
    nlm = hp.synalm(nl[:lmax_temp+1])
    dlm = dlm+nlm
    #np.save("Dataset_planck2015_900SNR1_13arcmin.npy",dlm)
    plt.figure()
    ell = np.arange(lmax)*np.arange(1,lmax+1)
    plt.plot(ell*(Cl[:lmax,1]*bl[:lmax]**2),label = "$C_\ell b_\ell^2$")
    plt.plot(ell*(nl[:lmax]),label="$n_\ell$")
    plt.plot(ell*(Cl[:lmax,1]*bl[:lmax]**2+nl[:lmax]),"--",label = "$C_\ell b_\ell^2 + n_\ell$")
    plt.ylabel("$\ell (\ell +1) C_\ell$")
    plt.xlabel("$\ell$")
    plt.axvline(900)
    plt.yscale("log")
    plt.legend(loc="best")
    plt.savefig("plots/powerspectrum_WMAPtype.png")
示例#36
0
 def simulate(self, idx):
     tlm, elm, blm = hp.synalm( [self.cl.cltt, self.cl.clte, self.cl.clee, self.cl.clbb], lmax=self.lmax )
     return tlm, elm, blm
示例#37
0
def rand_alm_healpy(ps, lmax=None, seed=None, dtype=np.complex128):
	import healpy
	if seed is not None: np.random.seed(seed)
	ps = powspec.sym_compress(ps, scheme="diag")
	return np.asarray(healpy.synalm(ps, lmax=lmax, new=True))
示例#38
0
l, cl = loadtxt('cl_wmap5_bao_sn.dat',usecols=(0,1),unpack=True)
#bl = loadtxt('bl_V.txt')
bl = ones(cl.shape[0])*1.0
     
# Read in alpha(r) and beta(r)
R, a, b = loadtxt("total.dat", usecols = (0,3,4), unpack=True)
    
# Put alpha and beta in a format condusive for r dependance
a = a.reshape(500,1999)
b = b.reshape(500,1999)
R = R.reshape(500,1999)

# Define A(r,lm) and B(r,lm)
#nl = ones(cl.shape[0])*2.39643128e-15
nl = ones(cl.shape[0])*0.0
nlm = hp.synalm(nl, lmax=LMAX)
 
dR = 1.1337565
   
#for N in xrange(111,121):    
for N in xrange(7,10):
    print '#########################################################'
    print ""
    print N
    print ""
    print '#########################################################'
    
    #Read In alms and cls
    alm = hp.read_alm('Maps/alm_l_'+str(N)+'.fits')
    flm = hp.read_alm('Maps/alm_nl_'+str(N)+'.fits')
    Alm = zeros(alm.shape[0],complex)
示例#39
0
cl_tt = dl_tt * 2 * np.pi / (l * (l + 1))
cl_tt[0] = 0
cl_ee = dl_ee * 2 * np.pi / (l * (l + 1))
cl_ee[0] = 0
cl_bb = dl_bb * 2 * np.pi / (l * (l + 1))
cl_bb[0] = 0
cl_te = dl_te * 2 * np.pi / (l * (l + 1))
cl_te[0] = 0
cl_tb = np.zeros_like(cl_te)
cl_eb = np.zeros_like(cl_te)
dl_tb = np.zeros_like(cl_te)
dl_eb = np.zeros_like(cl_te)

nside = 256

alms = hp.synalm([cl_tt, cl_ee, cl_bb, cl_te, cl_eb, cl_tb], lmax=3 * nside - 1, new=True)
map_t, map_e, map_b = hp.alm2map(alms, nside, pol=False)
map_tb, map_q, map_u = hp.alm2map(alms, nside, pol=True)

# hp.mollview(map_t);
# hp.mollview(map_e);
# hp.mollview(map_b);
# hp.mollview(map_tb);
# hp.mollview(map_q);
# hp.mollview(map_u);
# plt.show()

cl_tt_a, cl_ee_a, cl_bb_a, cl_te_a, cl_eb_a, cl_tb_a = hp.anafast([map_t, map_e, map_b], pol=False)
cl_tt_b, cl_ee_b, cl_bb_b, cl_te_b, cl_eb_b, cl_tb_b = hp.anafast([map_tb, map_q, map_u], pol=True)
l_d = np.arange(len(cl_tt_a))
plt.plot(l[: len(l_d)], dl_tt[: len(l_d)], "k-")
def main():

    '''
    Loading and calculating power spectrum components
    '''

    # Get run parameters
    
    s_fn_params = 'data/params.pkl'
    (i_lmax, i_nside, s_fn_map, s_map_name, s_fn_mask, s_fn_mll, s_fn_beam, 
        s_fn_alphabeta, s_fn_cltt) = get_params(s_fn_params)

    s_fn_cl21_data = 'output/na_cl21_data.dat'

    f_t1 = time.time()

    print ""
    print "Run parameters:"
    print "lmax: %i, nside: %i, map name: %s" % (i_lmax, i_nside, s_map_name)
    print "beam: %s, alpha_beta: %s, cltt: %s" % (s_fn_beam, s_fn_alphabeta, s_fn_cltt)

    print ""
    print "Loading ell, r, dr, alpha, beta, cltt, and beam..."

    na_l, na_r, na_dr, na_alpha, na_beta = np.loadtxt(s_fn_alphabeta, 
                                usecols=(0,1,2,3,4), unpack=True, skiprows=3)

    na_l = np.unique(na_l)
    na_r = np.unique(na_r)[::-1]
    na_l = na_l[:i_lmax]

    i_num_ell = len(na_l)
    i_num_r = len(na_r)

    print "i_num_r: %i, i_num_ell: %i" % (i_num_r, i_num_ell)

    na_alpha = na_alpha.reshape(i_num_ell, i_num_r)
    na_beta = na_beta.reshape(i_num_ell, i_num_r)
    na_dr = na_dr.reshape(i_num_ell, i_num_r)
    na_dr = na_dr[0]

    na_cltt = np.load(s_fn_cltt)
    na_cltt = na_cltt[:i_num_ell]

    na_bl = np.load(s_fn_beam)
    na_bl = na_bl[:i_num_ell]

    # f_t2 = time.time()

    print ""
    print "Calculating full skewness power spectrum..."

    na_alm = hp.synalm(na_cltt, lmax=i_num_ell, verbose=False)

    # f_t3 = time.time()

    na_cl21_data = np.zeros(i_num_ell)

    for i_r in range(i_num_r):

        if (i_r % (i_num_r / 10) == 0):
            print "Finished %i%% of jobs... (%.2f s)" % (i_r * 100 / i_num_r,
            time.time() - f_t1)


        na_Alm = hp.almxfl(na_alm, na_alpha[:,i_r] / na_cltt * na_bl)
        na_Blm = hp.almxfl(na_alm, na_beta[:,i_r] / na_cltt * na_bl)

        # f_t4 = time.time()

        na_An = hp.alm2map(na_Alm, nside=i_nside, fwhm=0.00145444104333, 
            verbose=False)
        na_Bn = hp.alm2map(na_Blm, nside=i_nside, fwhm=0.00145444104333, 
            verbose=False)

        # f_t5 = time.time()

        #print "starting map2alm for r = %i on core %i" % (i_r, i_rank)

        na_B2lm = hp.map2alm(na_Bn*na_Bn, lmax=i_num_ell)
        na_ABlm = hp.map2alm(na_An*na_Bn, lmax=i_num_ell)

        #print "finished map2alm for r = %i on core %i" % (i_r, i_rank)

        # f_t6 = time.time()

        na_clAB2 = hp.alm2cl(na_Alm, na_B2lm, lmax=i_num_ell)
        na_clABB = hp.alm2cl(na_ABlm, na_Blm, lmax=i_num_ell)

        na_clAB2 = na_clAB2[1:]
        na_clABB = na_clABB[1:]

        #f_t7 = time.time()

        na_cl21_data += (na_clAB2 + 2 * na_clABB) * na_r[i_r]**2. * na_dr[i_r]

    f_t8 = time.time()

    print ""
    print "Saving power spectrum to %s" % s_fn_cl21_data

    np.savetxt(s_fn_cl21_data, na_cl21_data)

        # print "Finished in %.2f s" % (f_t8 - f_t1)
        # # print "Load time: %.2f s" % (f_t2 - f_t1)
        # # print "synalm time: %.2f s" % (f_t3 - f_t2)
        # # print "almxfl time: %.2f s" % ((f_t4 - f_t3) / 2.)
        # # print "alm2map time: %.2f s" % ((f_t5 - f_t4) / 2.)
        # # print "map2alm time: %.2f s" % ((f_t6 - f_t5) / 2.)
        # # print "alm2cl time: %.2f s" % ((f_t7 - f_t6) / 2.)

    return
示例#41
0
def main(i_sim=0):

    '''
    MPI Setup
    '''
    o_comm = MPI.COMM_WORLD
    i_rank = o_comm.Get_rank() # current core number -- e.g., i in arange(i_size)
    i_size = o_comm.Get_size() # number of cores assigned to run this program
    o_status = MPI.Status()

    i_work_tag = 0
    i_die_tag = 1

    '''
    Loading and calculating power spectrum components
    '''

    # Get run parameters
    
    s_fn_params = 'data/params.pkl'
    (i_lmax, i_nside, s_fn_map, s_map_name, s_fn_mask, s_fn_mll, s_fn_beam, 
        s_fn_alphabeta, s_fn_cltt) = get_params(s_fn_params)

    s_fn_cltt = ('sims/na_cltt_sim_%i.npy' % i_sim)

    if (i_rank == 0):

        f_t1 = time.time()

        print ""
        print "Run parameters:"
        print "(Using %i cores)" % i_size
        print "lmax: %i, nside: %i, map name: %s" % (i_lmax, i_nside, s_map_name)
        print "beam: %s, alpha_beta: %s, cltt: %s" % (s_fn_beam, s_fn_alphabeta, s_fn_cltt)

        print ""
        print "Loading ell, r, dr, alpha, beta, cltt, and beam..."

    na_l, na_r, na_dr, na_alpha, na_beta = np.loadtxt(s_fn_alphabeta, 
                                usecols=(0,1,2,3,4), unpack=True, skiprows=3)

    na_l = np.unique(na_l)
    na_r = np.unique(na_r)[::-1]
    na_l = na_l[:i_lmax]

    i_num_ell = len(na_l)
    i_num_r = len(na_r)

    na_alpha = na_alpha.reshape(i_num_ell, i_num_r)
    na_beta = na_beta.reshape(i_num_ell, i_num_r)
    na_dr = na_dr.reshape(i_num_ell, i_num_r)
    na_dr = na_dr[0]

    if (i_rank == 0):
        print "(sizes from file load)"
        print "i_num_r: %i, i_num_ell: %i" % (i_num_r, i_num_ell)

    if (len(sys.argv) > 2):
        i_lmax_run = int(sys.argv[2])
    else:
        i_lmax_run = i_lmax
    if (len(sys.argv) > 3):
        i_num_r_run = int(sys.argv[3])
    else:
        i_num_r_run = i_num_r

    i_lmax_run = min(i_lmax_run, len(na_l))
    i_num_r_run = min(i_num_r, i_num_r_run)

    i_r_steps = i_num_r / i_num_r_run

    na_mask = hp.read_map(s_fn_mask)
    s_fn_mll = 'output/na_mll_%i_lmax.npy' % i_lmax_run
    na_mll = np.load(s_fn_mll)
    na_mll_inv = np.linalg.inv(na_mll)

    if (i_rank == 0):
        print "(sizes for run)"
        print "i_num_r_run: %i, i_lmax_run: %i" % (i_num_r_run, i_lmax_run)

    na_l = na_l[:i_lmax_run]
    na_r = na_r[::i_r_steps]
    na_dr = na_dr[::i_r_steps]

    na_alpha = na_alpha[:i_lmax_run, ::i_r_steps]
    na_beta = na_beta[:i_lmax_run, ::i_r_steps]

    na_cltt = np.load(s_fn_cltt)
    na_cltt = na_cltt[:i_lmax_run]

    na_bl = np.load(s_fn_beam)
    na_bl = na_bl[:i_lmax_run]

    # f_t2 = time.time()

    if (i_rank == 0):
        print ""
        print "Calculating full kurtosis power spectra..."

    na_alm = hp.synalm(na_cltt, lmax=i_lmax_run, verbose=False)

    # f_t3 = time.time()

    na_work = np.zeros(2, dtype='i')
    na_result = np.zeros((2,i_lmax_run), dtype='d')
    li_dims = [i_num_r_run, i_num_r_run]

    # master loop
    if (i_rank == 0):

        na_kl22_data = np.zeros(i_lmax_run)
        na_kl31_data = np.zeros(i_lmax_run)

        # send initial jobs

        for i_rank_out in range(1, i_size):

            na_work = np.array(cart_index(i_rank_out-1, li_dims), dtype='i')
            o_comm.Send([na_work, MPI.INT], dest=i_rank_out, tag=i_work_tag)

        na_work = np.array(cart_index(i_size-1, li_dims), dtype='i')
        i_r1_start = na_work[0]
        i_r2_start = na_work[1]

        for i_r1 in range(i_r1_start, i_num_r_run):

            if (i_r1 % (i_num_r / 10) == 0):
                print "Finished %i%% of jobs... (%.2f s)" % (i_r1 * 100 / i_num_r_run,
                time.time() - f_t1)

            for i_r2 in range(i_r2_start, i_num_r_run):

                na_work = np.array([i_r1, i_r2], dtype='i')

                o_comm.Recv([na_result, MPI.DOUBLE], source=MPI.ANY_SOURCE, 
                    status=o_status, tag=MPI.ANY_TAG)

                #print "received results from core %i" % o_status.Get_source()

                o_comm.Send([na_work,MPI.INT], dest=o_status.Get_source(), 
                    tag=i_work_tag)

                na_kl22_data += na_result[0]
                na_kl31_data += na_result[1]

        for i_rank_out in range(1, i_size):

            o_comm.Recv([na_result, MPI.DOUBLE], source=MPI.ANY_SOURCE,
                status=o_status, tag=MPI.ANY_TAG)

            na_kl22_data += na_result[0]
            na_kl31_data += na_result[1]

            o_comm.Send([np.array([9999], dtype='i'), MPI.INT], 
                dest=o_status.Get_source(), tag=i_die_tag)

    #slave loop:
    else:

        while(1):

            o_comm.Recv([na_work, MPI.INT], source=0, status=o_status, 
                tag=MPI.ANY_TAG)

            if (o_status.Get_tag() == i_die_tag):

                break

            i_r1 = na_work[0]
            i_r2 = na_work[1]

            #print "doing work for r = %i on core %i" % (i_r, i_rank)

            na_Almr1 = hp.almxfl(na_alm, na_alpha[:,i_r1] / na_cltt * na_bl)
            na_Blmr1 = hp.almxfl(na_alm, na_beta[:,i_r1] / na_cltt * na_bl)
            na_Almr2 = hp.almxfl(na_alm, na_alpha[:,i_r2] / na_cltt * na_bl)
            na_Blmr2 = hp.almxfl(na_alm, na_beta[:,i_r2] / na_cltt * na_bl)

            # f_t4 = time.time() #all da maps

            na_Ar1n = hp.alm2map(na_Almr1, nside=i_nside, fwhm=0.00145444104333,
                verbose=False)
            na_Br1n = hp.alm2map(na_Blmr1, nside=i_nside, fwhm=0.00145444104333,
                verbose=False)
            na_Ar2n = hp.alm2map(na_Almr2, nside=i_nside, fwhm=0.00145444104333,
                verbose=False)
            na_Br2n = hp.alm2map(na_Blmr2, nside=i_nside, fwhm=0.00145444104333,
                verbose=False)

            na_Ar1n = na_Ar1n * na_mask
            na_Br1n = na_Br1n * na_mask
            na_Ar2n = na_Ar2n * na_mask
            na_Br2n = na_Br2n * na_mask

            # f_t5 = time.time()

            #print "starting map2alm for r = %i on core %i" % (i_r, i_rank)

            na_ABlmr1 = hp.map2alm(na_Ar1n*na_Br1n, lmax=i_lmax_run)
            if i_r1 == i_r2:
                na_B2lmr1 = hp.map2alm(na_Br1n*na_Br1n, lmax=i_lmax_run)
                na_AB2lmr1 = hp.map2alm(na_Ar1n*na_Br1n*na_Br1n, lmax=i_lmax_run)
            na_ABAlmr1 = hp.map2alm(na_Ar1n*na_Br1n*na_Ar1n, lmax=i_lmax_run)

            na_ABlmr2 = hp.map2alm(na_Ar2n*na_Br2n, lmax=i_lmax_run)
            na_B2lmr2 = hp.map2alm(na_Br2n*na_Br2n, lmax=i_lmax_run)            

            #print "finished map2alm for r = %i on core %i" % (i_r, i_rank)

            # f_t6 = time.time()

            na_Jl_ABA_B = hp.alm2cl(na_ABAlmr1, na_Blmr2, lmax=i_lmax_run)
            na_Jl_AB_AB = hp.alm2cl(na_ABlmr1, na_ABlmr2, lmax=i_lmax_run)

            na_Jl_ABA_B = na_Jl_ABA_B[1:]
            na_Jl_AB_AB = na_Jl_AB_AB[1:]

            if i_r1 == i_r2:

                na_Ll_AB2_B = hp.alm2cl(na_AB2lmr1, na_Blmr1, lmax=i_lmax_run)
                na_Ll_AB_B2 = hp.alm2cl(na_ABlmr1, na_B2lmr1, lmax=i_lmax_run)

                na_Ll_AB2_B = na_Ll_AB2_B[1:]
                na_Ll_AB_B2 = na_Ll_AB_B2[1:]

            #f_t7 = time.time()

            na_result = np.zeros((2,i_lmax_run), dtype='d')
            if i_r1 == i_r2:
                na_result[0] += ((5./3.)**2. * na_Jl_AB_AB 
                * na_r[i_r1]**2. * na_dr[i_r1] * na_r[i_r2]**2. * na_dr[i_r2] 
                + 2. * na_Ll_AB_B2 * na_r[i_r1]**2. * na_dr[i_r1]) #kl22
                na_result[1] += ((5./3.)**2. * na_Jl_ABA_B 
                * na_r[i_r1]**2. * na_dr[i_r1] * na_r[i_r2]**2. * na_dr[i_r2] 
                + 2. * na_Ll_AB2_B * na_r[i_r1]**2. * na_dr[i_r1]) #kl31
            else:
                na_result[0] += ((5./3.)**2. * na_Jl_AB_AB 
                * na_r[i_r1]**2. * na_dr[i_r1] * na_r[i_r2]**2. * na_dr[i_r2]) #kl22
                na_result[1] += ((5./3.)**2. * na_Jl_ABA_B 
                * na_r[i_r1]**2. * na_dr[i_r1] * na_r[i_r2]**2. * na_dr[i_r2]) #kl31



            #print "finished work for r = %i on core %i" % (i_r, i_rank)

            o_comm.Send([na_result,MPI.DOUBLE], dest=0, tag=1)

            # print "Load time: %.2f s" % (f_t2 - f_t1)
            # print "synalm time: %.2f s" % (f_t3 - f_t2)
            # print "almxfl time: %.2f s" % ((f_t4 - f_t3) / 2.)
            # print "alm2map time: %.2f s" % ((f_t5 - f_t4) / 2.)
            # print "map2alm time: %.2f s" % ((f_t6 - f_t5) / 2.)
            # print "alm2cl time: %.2f s" % ((f_t7 - f_t6) / 2.)

    f_t8 = time.time()

    if (i_rank == 0):

        s_fn_kl22_data_no_mll = 'output/na_kl22_data_g_sim_%i_%i_rsteps_%i_lmax_no_mll.dat' % (i_sim, i_num_r_run, i_lmax_run)
        s_fn_kl31_data_no_mll = 'output/na_kl31_data_g_sim_%i_%i_rsteps_%i_lmax_no_mll.dat' % (i_sim, i_num_r_run, i_lmax_run)

        print ""
        print "Saving power spectrum to %s (not mll corrected)" % s_fn_kl22_data_no_mll
        print "Saving power spectrum to %s (not mll corrected)" % s_fn_kl31_data_no_mll

        np.savetxt(s_fn_kl22_data_no_mll, na_kl22_data)
        np.savetxt(s_fn_kl31_data_no_mll, na_kl31_data)

        s_fn_kl22_data = 'output/na_kl22_data_g_sim_%i_%i_rsteps_%i_lmax.dat' % (i_sim, i_num_r_run, i_lmax_run)
        s_fn_kl31_data = 'output/na_kl31_data_g_sim_%i_%i_rsteps_%i_lmax.dat' % (i_sim, i_num_r_run, i_lmax_run)

        print ""
        print "Saving power spectrum to %s" % s_fn_kl22_data
        print "Saving power spectrum to %s" % s_fn_kl31_data

        na_kl22_data = np.dot(na_mll_inv, na_kl22_data)
        na_kl31_data = np.dot(na_mll_inv, na_kl31_data)
        np.savetxt(s_fn_kl22_data, na_kl22_data)
        np.savetxt(s_fn_kl31_data, na_kl31_data)

        # print "Finished in %.2f s" % (f_t8 - f_t1)
        # # print "Load time: %.2f s" % (f_t2 - f_t1)
        # # print "synalm time: %.2f s" % (f_t3 - f_t2)
        # # print "almxfl time: %.2f s" % ((f_t4 - f_t3) / 2.)
        # # print "alm2map time: %.2f s" % ((f_t5 - f_t4) / 2.)
        # # print "map2alm time: %.2f s" % ((f_t6 - f_t5) / 2.)
        # # print "alm2cl time: %.2f s" % ((f_t7 - f_t6) / 2.)

    return