Exemplo n.º 1
0
 def test_workspace_deprojection_bias(self):
     #Test deprojection_bias
     c = nmt.deprojection_bias(self.f0, self.f0, self.n_good)
     self.assertEqual(c.shape, (1, self.f0.fl.lmax + 1))
     with self.assertRaises(ValueError):
         c = nmt.deprojection_bias(self.f0, self.f0, self.n_bad)
     with self.assertRaises(ValueError):
         c = nmt.deprojection_bias(self.f0, self.f0, self.n_half)
     with self.assertRaises(RuntimeError):
         c = nmt.deprojection_bias(self.f0, self.f0_half, self.n_good)
Exemplo n.º 2
0
def test_lite_errors():
    f0 = nmt.NmtField(WT.msk, [WT.mps[0]], n_iter=0)
    fl = nmt.NmtField(WT.msk, [WT.mps[0]],
                      templates=[[WT.tmp[0]]], n_iter=0,
                      lite=True)
    with pytest.raises(ValueError):  # Needs spin
        fe = nmt.NmtField(WT.msk, None)
    fe = nmt.NmtField(WT.msk, None, spin=0)

    for f in [fl, fe]:
        with pytest.raises(RuntimeError):  # No deprojection bias
            nmt.deprojection_bias(f0, f, np.zeros([1, 3*WT.nside]))
        with pytest.raises(RuntimeError):  # No deprojection bias
            nmt.uncorr_noise_deprojection_bias(fl, WT.mps[0])
    with pytest.raises(RuntimeError):  # No C_l without maps
        nmt.compute_coupled_cell(f0, fe)
Exemplo n.º 3
0
 def test_lite_cont(self):
     f0 = nmt.NmtField(self.msk, [self.mps[0]], templates=[[self.tmp[0]]])
     f2 = nmt.NmtField(self.msk, [self.mps[1], self.mps[2]],
                       templates=[[self.tmp[1], self.tmp[2]]])
     f2l = nmt.NmtField(self.msk, [self.mps[1], self.mps[2]],
                        templates=[[self.tmp[1], self.tmp[2]]])
     f2e = nmt.NmtField(self.msk, None, lite=True, spin=2)
     clth = np.array([self.clte, 0*self.clte])
     nlth = np.array([self.nlte, 0*self.nlte])
     w = nmt.NmtWorkspace()
     w.compute_coupling_matrix(f0, f2e, self.b)
     clb = nlth
     dlb = nmt.deprojection_bias(f0, f2, clth+nlth)
     clb += dlb
     cl = w.decouple_cell(nmt.compute_coupled_cell(f0, f2l),
                          cl_bias=clb)
     tl = np.loadtxt("test/benchmarks/bm_yc_np_c02.txt",
                     unpack=True)[1:, :]
     tlb = np.loadtxt("test/benchmarks/bm_yc_np_cb02.txt",
                      unpack=True)[1:, :]
     self.assertTrue((np.fabs(dlb-tlb) <=
                      np.fmin(np.fabs(dlb),
                              np.fabs(tlb))*1E-5).all())
     self.assertTrue((np.fabs(cl-tl) <=
                      np.fmin(np.fabs(cl),
                              np.fabs(tl))*1E-5).all())
Exemplo n.º 4
0
def test_workspace_deprojection_bias():
    # Test deprojection_bias
    c = nmt.deprojection_bias(WT.f0, WT.f0, WT.n_good)
    assert c.shape == (1, WT.f0.fl.lmax+1)
    with pytest.raises(ValueError):
        nmt.deprojection_bias(WT.f0, WT.f0, WT.n_bad)
    with pytest.raises(ValueError):
        nmt.deprojection_bias(WT.f0, WT.f0, WT.n_half)
    with pytest.raises(RuntimeError):
        nmt.deprojection_bias(WT.f0, WT.f0_half, WT.n_good)
Exemplo n.º 5
0
    def mastest(self,wtemp,wpure) :
        prefix="test/benchmarks/bm"
        if wtemp :
            prefix+="_yc"
            f0=nmt.NmtField(self.msk,[self.mps[0]],templates=[[self.tmp[0]]])
            f2=nmt.NmtField(self.msk,[self.mps[1],self.mps[2]],
                            templates=[[self.tmp[1],self.tmp[2]]],
                            purify_b=wpure)
        else :
            prefix+="_nc"
            f0=nmt.NmtField(self.msk,[self.mps[0]])
            f2=nmt.NmtField(self.msk,[self.mps[1],self.mps[2]],purify_b=wpure)
        f=[f0,f2]
        
        if wpure :
            prefix+="_yp"
        else :
            prefix+="_np"

        for ip1 in range(2) :
            for ip2 in range(ip1,2) :
                if ip1==ip2==0 :
                    clth=np.array([self.cltt]);
                    nlth=np.array([self.nltt]);
                elif ip1==ip2==1 :
                    clth=np.array([self.clee,0*self.clee,0*self.clbb,self.clbb]);
                    nlth=np.array([self.nlee,0*self.nlee,0*self.nlbb,self.nlbb]);
                else :
                    clth=np.array([self.clte,0*self.clte]);
                    nlth=np.array([self.nlte,0*self.nlte]);
                w=nmt.NmtWorkspace()
                w.compute_coupling_matrix(f[ip1],f[ip2],self.b)
                clb=nlth
                if wtemp :
                    dlb=nmt.deprojection_bias(f[ip1],f[ip2],clth+nlth)
                    tlb=np.loadtxt(prefix+'_cb%d%d.txt'%(2*ip1,2*ip2),unpack=True)[1:,:]
                    self.assertTrue((np.fabs(dlb-tlb)<=np.fmin(np.fabs(dlb),np.fabs(tlb))*1E-5).all())
                    clb+=dlb
                cl=w.decouple_cell(nmt.compute_coupled_cell(f[ip1],f[ip2]),cl_bias=clb)
                tl=np.loadtxt(prefix+'_c%d%d.txt'%(2*ip1,2*ip2),unpack=True)[1:,:]
                self.assertTrue((np.fabs(cl-tl)<=np.fmin(np.fabs(cl),np.fabs(tl))*1E-5).all())
Exemplo n.º 6
0
def mastest(wtemp, wpure, do_teb=False):
    prefix = "test/benchmarks/bm"
    if wtemp:
        prefix += "_yc"
        f0 = nmt.NmtField(WT.msk, [WT.mps[0]],
                          templates=[[WT.tmp[0]]])
        f2 = nmt.NmtField(WT.msk, [WT.mps[1], WT.mps[2]],
                          templates=[[WT.tmp[1], WT.tmp[2]]],
                          purify_b=wpure)
    else:
        prefix += "_nc"
        f0 = nmt.NmtField(WT.msk, [WT.mps[0]])
        f2 = nmt.NmtField(WT.msk,
                          [WT.mps[1], WT.mps[2]],
                          purify_b=wpure)
    f = [f0, f2]

    if wpure:
        prefix += "_yp"
    else:
        prefix += "_np"

    for ip1 in range(2):
        for ip2 in range(ip1, 2):
            if ip1 == ip2 == 0:
                clth = np.array([WT.cltt])
                nlth = np.array([WT.nltt])
            elif ip1 == ip2 == 1:
                clth = np.array([WT.clee, 0*WT.clee,
                                 0*WT.clbb, WT.clbb])
                nlth = np.array([WT.nlee, 0*WT.nlee,
                                 0*WT.nlbb, WT.nlbb])
            else:
                clth = np.array([WT.clte, 0*WT.clte])
                nlth = np.array([WT.nlte, 0*WT.nlte])
            w = nmt.NmtWorkspace()
            w.compute_coupling_matrix(f[ip1], f[ip2], WT.b)
            clb = nlth
            if wtemp:
                dlb = nmt.deprojection_bias(f[ip1], f[ip2],
                                            clth+nlth)
                tlb = np.loadtxt(prefix+'_cb%d%d.txt' % (2*ip1, 2*ip2),
                                 unpack=True)[1:, :]
                assert ((np.fabs(dlb-tlb) <=
                         np.fmin(np.fabs(dlb),
                                 np.fabs(tlb))*1E-5).all())
                clb += dlb
            cl = w.decouple_cell(nmt.compute_coupled_cell(f[ip1],
                                                          f[ip2]),
                                 cl_bias=clb)
            tl = np.loadtxt(prefix+'_c%d%d.txt' % (2*ip1, 2*ip2),
                            unpack=True)[1:, :]
            assert ((np.fabs(cl-tl) <=
                     np.fmin(np.fabs(cl),
                             np.fabs(tl))*1E-5).all())

    # TEB
    if do_teb:
        clth = np.array([WT.cltt, WT.clte, 0*WT.clte,
                         WT.clee, 0*WT.clee, 0*WT.clbb,
                         WT.clbb])
        nlth = np.array([WT.nltt, WT.nlte, 0*WT.nlte,
                         WT.nlee, 0*WT.nlee, 0*WT.nlbb,
                         WT.nlbb])
        w = nmt.NmtWorkspace()
        w.compute_coupling_matrix(f[0], f[1], WT.b, is_teb=True)
        c00 = nmt.compute_coupled_cell(f[0], f[0])
        c02 = nmt.compute_coupled_cell(f[0], f[1])
        c22 = nmt.compute_coupled_cell(f[1], f[1])
        cl = np.array([c00[0], c02[0], c02[1], c22[0],
                       c22[1], c22[2], c22[3]])
        t00 = np.loadtxt(prefix+'_c00.txt', unpack=True)[1:, :]
        t02 = np.loadtxt(prefix+'_c02.txt', unpack=True)[1:, :]
        t22 = np.loadtxt(prefix+'_c22.txt', unpack=True)[1:, :]
        tl = np.array([t00[0], t02[0], t02[1], t22[0],
                       t22[1], t22[2], t22[3]])
        cl = w.decouple_cell(cl, cl_bias=nlth)
        assert ((np.fabs(cl-tl) <=
                 np.fmin(np.fabs(cl),
                         np.fabs(tl))*1E-5).all())
Exemplo n.º 7
0
clb22 = np.array([
    nlee[:3 * nside_out], 0 * nlee[:3 * nside_out], 0 * nlbb[:3 * nside_out],
    nlbb[:3 * nside_out]
])
c22 = w22.decouple_cell(nmt.compute_coupled_cell(f2, f2), cl_bias=clb22)
w22.write_to(prefix + '_w22.dat')
np.savetxt(prefix + "_c22.txt",
           np.transpose([leff, c22[0], c22[1], c22[2], c22[3]]))

#With contaminants
prefix = 'bm_yc_np'
f0 = nmt.NmtField(mask, [dl], templates=[[sl]])
f2 = nmt.NmtField(mask, [dw_q, dw_u], templates=[[sw_q, sw_u]])
w00 = nmt.NmtWorkspace()
w00.compute_coupling_matrix(f0, f0, b)
clb00 = nmt.deprojection_bias(f0, f0, [(cltt + nltt)[:3 * nside_out]])
np.savetxt(prefix + '_cb00.txt', np.transpose([lfull, clb00[0]]))
clb00 += np.array([nltt[:3 * nside_out]])
c00 = w00.decouple_cell(nmt.compute_coupled_cell(f0, f0), cl_bias=clb00)
w00.write_to(prefix + '_w00.dat')
np.savetxt(prefix + "_c00.txt", np.transpose([leff, c00[0]]))
w02 = nmt.NmtWorkspace()
w02.compute_coupling_matrix(f0, f2, b)
clb02 = nmt.deprojection_bias(
    f0, f2, [(clte + nlte)[:3 * nside_out], 0 * clte[:3 * nside_out]])
np.savetxt(prefix + '_cb02.txt', np.transpose([lfull, clb02[0], clb02[1]]))
clb02 += np.array([nlte[:3 * nside_out], 0 * nlte[:3 * nside_out]])
c02 = w02.decouple_cell(nmt.compute_coupled_cell(f0, f2), cl_bias=clb02)
w02.write_to(prefix + '_w02.dat')
np.savetxt(prefix + "_c02.txt", np.transpose([leff, c02[0], c02[1]]))
w22 = nmt.NmtWorkspace()
Exemplo n.º 8
0
    prefix + "_cl_th.txt",
    np.transpose([
        b.get_effective_ells(), cl22_th[0], cl22_th[1], cl22_th[2], cl22_th[3]
    ]))

#Compute noise and deprojection bias
if not os.path.isfile(prefix + "_clb22.npy"):
    print("Computing deprojection and noise bias 22")
    #Compute noise bias
    clb22 = w22.couple_cell(
        [nlee / beam**2, 0 * nlee, 0 * nlbb, nlbb / beam**2])
    #Compute deprojection bias
    if w_cont and (not o.no_deproject) and (not o.no_debias):
        #Signal contribution
        clb22 += nmt.deprojection_bias(
            f2, f2,
            [clee * beam**2 + nlee, 0 * clee, 0 * clbb, clbb * beam**2 + nlbb])
    np.save(prefix + "_clb22", clb22)
else:
    clb22 = np.load(prefix + "_clb22.npy")

#Compute mean and variance over nsims simulations
cl22_all = []
for i in np.arange(nsims):
    #if i%100==0 :
    print("%d-th sim" % (i + o.isim_ini))

    if not os.path.isfile(prefix + "_cl_%04d.txt" % (o.isim_ini + i)):
        np.random.seed(1000 + o.isim_ini + i)
        f2 = get_fields()
        cl22 = w22.decouple_cell(nmt.compute_coupled_cell(f2, f2),
def process_catalog(o) :

    #Read z-binning
    print "Bins"
    z0_bins,zf_bins,lmax_bins=np.loadtxt(o.fname_bins_z,unpack=True)
    try:
        nbins=len(z0_bins)
    except:
        nbins=1

    cat=fc.Catalog(read_from=o.fname_in)
    

    #Get weights, compute binary mask based on weights, and apodize it if needed
    print "Window"
    mask=Mask(cat,o.nside,o.theta_apo)
    nside=mask.nside
    tot_area=4.*np.pi*np.sum(mask.weights)/len(mask.weights)

    #Get contaminant templates
    if "none" not in o.templates_fname :
        templates=[[t] for t in hp.read_map(o.templates_fname,field=None)]
        ntemp=len(templates)
    else :
        templates=None
        ntemp=0

    #Generate bandpowers binning scheme (we're assuming all maps will use the same bandpowers!)
    print "Bandpowers"
    bpw=nmt.NmtBin(nside,nlb=o.delta_ell)
    ell_eff=bpw.get_effective_ells()
    tracers=[]

    #Generate tracers
    print "Maps"
    #TODO: pass extra sampling parameters
    zs,nzs,mps=bin_catalog(cat,z0_bins,zf_bins,mask)
    if mrank!=0 :
        return
    #Make sure that we have an iterable
    try:
        len(lmax_bins)
    except:
        lmax_bins=[lmax_bins]
    for zar,nzar,mp,lmax in zip(zs,nzs,mps,lmax_bins):
        zav = np.average(zar,weights=nzar)
        print "-- z-bin: %3.2f "%zav
        tracers.append(Tracer(mp,zar,nzar,lmax,mask,templates=templates))
        if o.save_map:
            hp.write_map("map_%3.2f.fits"%zav,mp)
        cat.rewind()
        
    print "Compute power spectra"
    #Compute coupling matrix
    #TODO: (only done once, assuming all maps have the same mask!)
    print "  Computing coupling matrix"
    w=nmt.NmtWorkspace()
    if not(os.path.isfile(o.nmt_workspace)) :
        w.compute_coupling_matrix(tracers[0].field,tracers[0].field,bpw)
        if o.nmt_workspace!="none" :
            w.write_to(o.nmt_workspace)
    else :
        w.read_from(o.nmt_workspace)

    #Compute all cross-correlations
    def compute_master(fa,fb,wsp,clb=None,cln=None) :
        cl_coupled=nmt.compute_coupled_cell(fa,fb)
        cl_decoupled=wsp.decouple_cell(cl_coupled,cl_bias=clb,cl_noise=cln)
        return cl_decoupled

    #If attempting to deproject contaminant templates, we need an estimate of the true power spectra.
    #This can be done interatively from a first guess using cl_bias=0, but I haven't coded that up yet.
    #For the moment we will use cl_guess=0.
    cl_guess=np.zeros(3*nside)
    t1 = time()
    print "  Computing power spectrum"
    cls_all={}
    for b1 in np.arange(nbins) :
        f1=tracers[b1].field
        for b2 in np.arange(b1,nbins) :
            f2=tracers[b2].field
            if (b1==b2) and o.sub_sn :
                cl_noise=tracers[b1].shotnoise*np.ones([1,3*nside])
            else :
                cl_noise=None
            if ntemp>0 :
                cl_bias=nmt.deprojection_bias(f1,f2,w,cl_guess)
            else :
                cl_bias=None
            cls_all[(b1,b2)]=compute_master(f1,f2,w,clb=cl_bias,cln=cl_noise)[0]

        print 'Computed bin: ', b1, b2, ' in ', time()-t1, ' s'
        if debug:
            plt.figure()
            plt.plot(ell_eff,cls_all[(b1,b1)])
            plt.xscale('log')
            plt.yscale('log')
            plt.xlabel(r'$l$')
            plt.ylabel(r'$C_{l}$')
            plt.show()
  
    print "Translating into SACC"
    #Transform everything into SACC format
    #1- Generate SACC tracers
    stracers=[sacc.Tracer("tr_b%d"%i,"point",t.zarr,t.nzarr,exp_sample="gals")
              for i,t in enumerate(tracers)]

    #2- Define SACC binning
    typ,ell,t1,q1,t2,q2=[],[],[],[],[],[]
    for i1 in np.arange(nbins) :
        for i2 in np.arange(i1,nbins) :
            lmax=min(tracers[i1].lmax,tracers[i2].lmax)
            for l in ell_eff[ell_eff<lmax] :
                typ.append('F')
                ell.append(l)
                t1.append(i1); t2.append(i2)
                q1.append('P'); q2.append('P')
    sbin=sacc.Binning(typ,ell,t1,q1,t2,q2)
    ssbin=sacc.SACC(stracers,sbin)
      
    #3- Arrange power spectra into SACC mean vector
    vec=np.zeros((ssbin.size(),))
    for t1i,t2i,ells,ndx in ssbin.sortTracers() :
        lmax=min(tracers[t1i].lmax,tracers[t2i].lmax)
        vec[ndx]=cls_all[(t1i,t2i)][np.where(ell_eff<lmax)[0]]
    svec=sacc.MeanVec(vec)
    cw = nmt.covariance.NmtCovarianceWorkspace()
    cw.compute_coupling_coefficients(w,w)

    #4- Create SACC file and write to file
    csacc=sacc.SACC(stracers,sbin,svec)

    #5- Compute covariance if needed
    if o.compute_covariance:
        print "Computing covariance"
        sacc_th = csacc
        cov_all={}
        tcov0 = time()
        #This doesn't avoid some repetitions but it is the simplest way
        ngar=np.array([np.sum(t.nzarr)/tot_area for t in tracers])
        for i1 in np.arange(nbins):
             for i2 in np.arange(i1,nbins):
                 for i3 in np.arange(nbins):
                     for i4 in np.arange(i3,nbins): 
                         cov_all[(i1,i2,i3,i4)]=compute_covariance(w,sacc_th.mean.vector,sacc_th.binning,
                                                                   i1,i2,i3,i4,ngar[i1],ngar[i2],cw)
                         cov_all[(i2,i1,i4,i3)]=cov_all[(i1,i2,i3,i4)]
        cov=np.zeros((ssbin.size(),ssbin.size()))
        for t1i,t2i,ells,ndx in ssbin.sortTracers():
            for t3i,t4i,ells2,ndy in ssbin.sortTracers():
                lmax=min(tracers[t1i].lmax,tracers[t2i].lmax,tracers[t3i].lmax,tracers[t4i].lmax)
                cov[ndx,ndy]=cov_all[(t1i,t2i,t3i,t4i)][ell_eff<lmax,ell_eff<lmax]
        icov=inv(cov)
        precision=sacc.Precision(icov,"dense",sbin)
        csacc=sacc.SACC(stracers,sbin,svec,precision)
        print 'Computed covariance in', time()-tcov0, ' seconds'
    csacc.saveToHDF(o.fname_out)
Exemplo n.º 10
0
#   ii) Generate random realizations of our fields to compute the errors
l, cltt, clee, clbb, clte = np.loadtxt("cls.txt", unpack=True)
cl_02_th = np.array([clte, np.zeros_like(clte)])

#We then generate an NmtWorkspace object that we use to compute and store
#the mode coupling matrix. Note that this matrix depends only on the masks
#of the two fields to correlate, but not on the maps themselves (in this
#case both maps are the same.
w = nmt.NmtWorkspace()
w.compute_coupling_matrix(f0, f2, b)

#Since we suspect that our maps are contaminated (that's why we passed the
#contaminant templates as arguments to the NmtField constructor), we also
#need to compute the bias to the power spectrum caused by contaminant
#cleaning (deprojection bias).
cl_bias = nmt.deprojection_bias(f0, f2, cl_02_th)


#The function defined below will compute the power spectrum between two
#NmtFields f_a and f_b, using the coupling matrix stored in the
#NmtWorkspace wsp and subtracting the deprojection bias clb.
#Note that the most expensive operations in the MASTER algorithm are
#the computation of the coupling matrix and the deprojection bias. Since
#these two objects are precomputed, this function should be pretty fast!
def compute_master(f_a, f_b, wsp, clb):
    #Compute the power spectrum (a la anafast) of the masked fields
    #Note that we only use n_iter=0 here to speed up the computation,
    #but the default value of 3 is recommended in general.
    cl_coupled = nmt.compute_coupled_cell(f_a, f_b)
    #Decouple power spectrum into bandpowers inverting the coupling matrix
    cl_decoupled = wsp.decouple_cell(cl_coupled, cl_bias=clb)
Exemplo n.º 11
0
        ff2 = nmt.NmtField(mask, [mppq, mppu],
                           purify_e=ispure_e,
                           purify_b=ispure_b,
                           n_iter_mask_purify=10)
    return mppq, mppu, ff2


mpq, mpu, f2 = get_fields()

if plotres:
    hp.mollview((mpq * mask).flatten(), title='$Q$')
    hp.mollview((mpu * mask).flatten(), title='$U$')

#Compute deprojection bias
if w_cont:  #Not ready yet
    clb22 = nmt.deprojection_bias(f2, f2, [clee, 0 * clee, 0 * clbb, clbb])
else:
    clb22 = None

#Use initial fields to generate coupling matrix
w22 = nmt.NmtWorkspace()
if not os.path.isfile(prefix + "_w22.dat"):
    print "Computing 22"
    w22.compute_coupling_matrix(f2, f2, b)
    w22.write_to(prefix + "_w22.dat")
else:
    w22.read_from(prefix + "_w22.dat")

#Generate theory prediction
cl22_th = w22.decouple_cell(w22.couple_cell([clee, 0 * clee, 0 * clbb, clbb]))
np.savetxt(
Exemplo n.º 12
0
    else:
        ff0 = nmt.NmtField(mask, [mppt])
        ff2 = nmt.NmtField(mask, [mppq, mppu])
    return mppt, mppq, mppu, ff0, ff2


mpt, mpq, mpu, f0, f2 = get_fields()

if plotres:
    hp.mollview((mpt * mask).flatten(), title='$\\delta_g$')
    hp.mollview((mpq * mask).flatten(), title='$\\gamma_1$')
    hp.mollview((mpu * mask).flatten(), title='$\\gamma_2$')

#Compute deprojection bias
if w_cont:
    clb00 = nmt.deprojection_bias(f0, f0, [cltt])
    clb02 = nmt.deprojection_bias(f0, f2, [clte, 0 * clte])
    clb22 = nmt.deprojection_bias(f2, f2, [clee, 0 * clee, 0 * clbb, clbb])
else:
    clb00 = None
    clb02 = None
    clb22 = None

#Use initial fields to generate coupling matrix
w00 = nmt.NmtWorkspace()
if not os.path.isfile(prefix + "_w00.dat"):
    print "Computing 00"
    w00.compute_coupling_matrix(f0, f0, b)
    w00.write_to(prefix + "_w00.dat")
else:
    w00.read_from(prefix + "_w00.dat")
Exemplo n.º 13
0
def process_catalog(o):

    #Read z-binning
    print "Bins"
    z0_bins, zf_bins, lmax_bins = np.loadtxt(o.fname_bins_z, unpack=True)
    nbins = len(z0_bins)

    cat = fc.Catalog(read_from=o.fname_in)

    #Get weights, compute binary mask based on weights, and apodize it if needed
    print "Window"
    mask = Mask(cat, o.nside, o.theta_apo)
    nside = mask.nside

    #Get contaminant templates
    #TODO: check resolution
    if o.templates_fname != "none":
        templates = [[t] for t in hp.read_map(o.templates_fname, field=None)]
        ntemp = len(templates)
    else:
        templates = None
        ntemp = 0

    #Generate bandpowers binning scheme (we're assuming all maps will use the same bandpowers!)
    print "Bandpowers"
    bpw = nmt.NmtBin(nside, nlb=o.delta_ell)
    ell_eff = bpw.get_effective_ells()
    tracers = []
    #Generate tracers
    #TODO: pass extra sampling parameters
    zs, nzs, mps = bin_catalog(cat, z0_bins, zf_bins, mask)
    if mrank != 0:
        return

    for zar, nzar, mp, lmax in zip(zs, nzs, mps, lmax_bins):
        zav = np.average(zar, weights=nzar)
        print "-- z-bin: %3.2f " % zav
        tracers.append(Tracer(mp, zar, nzar, lmax, mask, templates=templates))
        if o.save_map:
            hp.write_map("map_%3.2f.fits" % zav, mp)
        cat.rewind()

    print "Compute power spectra"
    #Compute coupling matrix
    #TODO: (only done once, assuming all maps have the same mask!)
    print "  Computing coupling matrix"
    w = nmt.NmtWorkspace()
    if not (os.path.isfile(o.nmt_workspace)):
        w.compute_coupling_matrix(tracers[0].field, tracers[0].field, bpw)
        if o.nmt_workspace != "none":
            w.write_to(o.nmt_workspace)
    else:
        w.read_from(o.nmt_workspace)

    #Compute all cross-correlations
    def compute_master(fa, fb, wsp, clb):
        cl_coupled = nmt.compute_coupled_cell(fa, fb)
        cl_decoupled = wsp.decouple_cell(cl_coupled, cl_bias=clb)
        return cl_decoupled

    #If attempting to deproject contaminant templates, we need an estimate of the true power spectra.
    #This can be done interatively from a first guess using cl_bias=0, but I haven't coded that up yet.
    #For the moment we will use cl_guess=0.
    cl_guess = np.zeros(3 * nside)
    t1 = time()
    print "  Computing power spectrum"
    cls_all = {}
    for b1 in np.arange(nbins):
        f1 = tracers[b1].field
        for b2 in np.arange(b1, nbins):
            f2 = tracers[b2].field
            if ntemp > 0:
                cl_bias = nmt.deprojection_bias(f1, f2, w, cl_theory)
            else:
                cl_bias = None
            cls_all[(b1, b2)] = compute_master(f1, f2, w, clb=cl_bias)[0]
        print 'Computed bin: ', b1, b2, ' in ', time() - t1, ' s'
        if debug:
            plt.figure()
            plt.plot(ell_eff, cls_all[(b1, b1)])
            plt.xscale('log')
            plt.yscale('log')
            plt.xlabel(r'$l$')
            plt.ylabel(r'$C_{l}$')
            plt.show()
    print "Translating into SACC"
    #Transform everything into SACC format
    #1- Generate SACC tracers
    stracers = [
        sacc.Tracer("tr_b%d" % i, "point", t.zarr, t.nzarr, exp_sample="gals")
        for i, t in enumerate(tracers)
    ]

    #2- Define SACC binning
    typ, ell, t1, q1, t2, q2 = [], [], [], [], [], []
    for i1 in np.arange(nbins):
        for i2 in np.arange(i1, nbins):
            lmax = min(tracers[i1].lmax, tracers[i2].lmax)
            for l in ell_eff[ell_eff < lmax]:
                typ.append('F')
                ell.append(l)
                t1.append(i1)
                t2.append(i2)
                q1.append('P')
                q2.append('P')
    sbin = sacc.Binning(typ, ell, t1, q1, t2, q2)
    ssbin = sacc.SACC(stracers, sbin)

    #3- Arrange power spectra into SACC mean vector
    vec = np.zeros((ssbin.size(), ))
    for t1i, t2i, ells, ndx in ssbin.sortTracers():
        lmax = min(tracers[t1i].lmax, tracers[t2i].lmax)
        vec[ndx] = cls_all[(t1i, t2i)][np.where(ell_eff < lmax)[0]]
    svec = sacc.MeanVec(vec)

    #4- Create SACC file and write to file
    csacc = sacc.SACC(stracers, sbin, svec)
    csacc.saveToHDF(o.fname_out)