コード例 #1
0
ファイル: mk_sims.py プロジェクト: sssm8d/LSSLike
def main(dic_par):

    cosmo = ccl.Cosmology(
        ccl.Parameters(
            Omega_c=dic_par['omega_c'],
            Omega_b=dic_par['omega_b'],
            h=dic_par['h0'],
            sigma8=dic_par['sigma_8'],
            n_s=dic_par['n_s'],
        ),
        transfer_function=dic_par['transfer_function'],
        matter_power_spectrum=dic_par['matter_power_spectrum'])
    tracers, cltracers = getTracers(cosmo, dic_par)
    binning = getBinning(tracers)
    binning_sacc = sacc.SACC(tracers, binning)
    theories = getTheories(cosmo, binning_sacc, cltracers)
    mean = getTheoryVec(binning_sacc, theories)
    precision, covmatrix = getPrecisionMatrix(binning_sacc, theories)
    chol = la.cholesky(covmatrix)
    csacc = sacc.SACC(tracers, binning, mean, precision)
    csacc.printInfo()
    ## generate mean sim
    generate_sim("sims/sim_mean.sacc",
                 csacc,
                 add_random=False,
                 store_precision=True,
                 cholesky=chol)
    #Generate file containing only noiseless realization and precision matrix
    nsim = 10
    for i in np.arange(nsim):
        generate_sim("sims/sim_%03d.sacc" % i, csacc, cholesky=chol)
コード例 #2
0
def get_mean_cov(s_data, Ntr, Nztr, noi_fac, upsample):
    ## THIS IMPLEMENTS UPSAMPLING + SECTION 2.2.1
    # photo-z codes
    pz_codes = ['nz_demp', 'nz_ephor', 'nz_ephor_ab', 'nz_frankenz']

    # store new tracers and noise covariance
    myNztr = Nztr * upsample
    cov_all = np.zeros((myNztr * Ntr, myNztr * Ntr))
    tr = []
    for i in range(Ntr):
        # get nz for all pz codes
        zs = s_data.tracers[i].z
        nzs = s_data.tracers[i].Nz
        nzs /= np.sum(nzs)
        if (upsample >= 1):
            minz, maxz = zs[0], zs[-1]
            newzs = zs[0] + np.arange(myNztr) * (maxz - minz) / (Nztr -
                                                                 1) / upsample
            newnzs = np.zeros(myNztr)
            w = np.where(newzs <= maxz)
            newnzs[w] = [interp1d(zs, nzs, kind='cubic')(newzs[w])]
            nzs = [newnzs]
        else:
            newzs = zs
            nzs = [nzs]

        for pn in pz_codes:
            n = s_data.tracers[i].extra_cols[pn]
            n /= np.sum(n)
            newn = np.zeros(myNztr)
            newn[w] = interp1d(zs, n, kind='cubic')(newzs[w])
            nzs.append(newn)
        nzs = np.array(nzs)

        # get mean and variance
        #nz_mean = np.mean(nzs, axis=0)
        # TESTING
        nz_mean = np.zeros(myNztr)
        nz_mean[w] = interp1d(zs, s_data.tracers[i].Nz, kind='cubic')(newzs[w])
        nz_var = np.var(nzs, axis=0)
        # TODO: is this necessary?
        nz_var = gaussian_filter(nz_var, 2.5 * upsample)
        # TODO: is this correct?
        # used to be
        #corr = np.eye(Nztr)
        #sqrtnz_var = np.sqrt(nz_var)
        #cov = noi_fac*np.outer(sqrtnz_var,sqrtnz_var)*corr
        # I think it should be
        cov = noi_fac * np.diag(nz_var)
        cov_all[i * myNztr:(i + 1) * myNztr, i * myNztr:(i + 1) * myNztr] = cov

        # store new tracers
        T = sacc.Tracer('bin_%d' % i,
                        'point',
                        newzs,
                        nz_mean,
                        exp_sample='HSC_DESC')
        tr.append(T)
    s_m = sacc.SACC(tr, s_data.binning, s_data.mean)
    return s_m, cov_all
コード例 #3
0
def get_mean_cov(s_data, Ntr, Nztr, noi_fac):
    # photo-z codes
    pz_codes = ['nz_demp', 'nz_ephor', 'nz_ephor_ab', 'nz_frankenz']

    # store new tracers and noise covariance
    cov_all = np.zeros((Nztr * Ntr, Nztr * Ntr))
    tr = []
    for i in range(Ntr):
        # get nz for all pz codes
        zs = s_data.tracers[i].z
        nzs = [s_data.tracers[i].Nz / np.sum(s_data.tracers[i].Nz)]
        for pn in pz_codes:
            n = s_data.tracers[i].extra_cols[pn]
            nzs.append(n / np.sum(n))
        nzs = np.array(nzs)

        # get mean and variance
        nz_mean = np.mean(nzs, axis=0)
        nz_var = np.var(nzs, axis=0)
        nz_var_mean = nz_var[nz_mean > 0].mean()
        cov = np.diag(noi_fac * nz_var_mean * np.ones(len(zs)))
        cov_all[i * len(zs):(i + 1) * len(zs),
                i * len(zs):(i + 1) * len(zs)] = cov

        # store new tracers
        T = sacc.Tracer('bin_%d' % i,
                        'point',
                        zs,
                        nz_mean,
                        exp_sample='HSC_DESC')
        tr.append(T)

    s_m = sacc.SACC(tr, s_data.binning, s_data.mean)
    return s_m, cov_all
コード例 #4
0
ファイル: power_specter.py プロジェクト: LSSTDESC/DEHSC_LSS
    def write_vector_to_sacc(self,
                             fname_out,
                             sacc_t,
                             sacc_b,
                             cls,
                             covar=None,
                             verbose=False):
        """
        Write a vector of power spectrum measurements into a SACC file.
        :param fname_out: path to output file
        :param sacc_t: list of SACC tracers
        :param sacc_b: SACC Binning object.
        :param cls: list of power spectrum measurements.
        :param covar: covariance matrix:
        :param verbose: do you want to print out information about the SACC file?
        """
        sacc_mean = sacc.MeanVec(cls.flatten())
        if covar is None:
            sacc_precision = None
        else:
            sacc_precision = sacc.Precision(covar,
                                            "dense",
                                            is_covariance=True,
                                            binning=sacc_b)

        sacc_meta = {'Area_rad': self.area_patch}
        s = sacc.SACC(sacc_t,
                      sacc_b,
                      sacc_mean,
                      precision=sacc_precision,
                      meta=sacc_meta)
        if verbose:
            s.printInfo()
        s.saveToHDF(fname_out)
コード例 #5
0
def get_smooth_s_and_prior(s_data,
                           cosmo,
                           want_prior,
                           A_smooth=0.25,
                           noi_fac=4.):
    # number of tracers and bins
    Nz_per_tracer = len(s_data.tracers[0].z)
    N_tracers = len(s_data.tracers)
    Nz_total = N_tracers * Nz_per_tracer
    zs = s_data.tracers[0].z

    # obtain the mean of the 4 pz codes with their noise
    s_mean, cov_noise = get_mean_cov(s_data, N_tracers, Nz_per_tracer, noi_fac)
    s0 = NzVec(s_data)

    # compute the CV
    if os.path.isfile("cov_CV.npy"):
        print("!!!!! Loading cached CV covariance matrix !!!!!")
        cov_CV = np.load("cov_CV.npy")
    else:
        # compute cv covmat
        cov_CV = np.zeros((Nz_total, Nz_total))
        for i in range(N_tracers):
            # cosmic variance covmat for each tracer
            cov_CV_per_tracer = compute_covmat_cv(cosmo, s_mean.tracers[i].z,
                                                  s_mean.tracers[i].Nz)
            cov_CV[i * Nz_per_tracer:(i + 1) * Nz_per_tracer, i *
                   Nz_per_tracer:(i + 1) * Nz_per_tracer] = cov_CV_per_tracer
        np.save("cov_CV.npy", cov_CV)

    # impose smoothness of first and second derivative
    D = A_smooth**2 * obtain_smoothing_D(s_mean, first=True, second=True)

    # compute total covariance of noise
    cov_total = cov_noise + cov_CV

    # compute precision with and without the smoothing matrix D
    P0 = np.linalg.inv(cov_total)
    P = P0 + D

    # get the smoothed N(z) for all tracers
    s_smooth = np.dot(np.dot(np.linalg.inv(P0 + D), P0), s0)
    print(s0[:10], s_smooth[:10])
    tr = []
    for i in range(N_tracers):
        T = sacc.Tracer('bin_%d' % i,
                        'point',
                        zs,
                        s_smooth[i * Nz_per_tracer:(i + 1) * Nz_per_tracer],
                        exp_sample='HSC_DESC')
        tr.append(T)
    s = sacc.SACC(tr, s_data.binning, s_data.mean)

    # return smooth s (and smooth prior)
    if want_prior:
        return s, P
    else:
        return s
コード例 #6
0
ファイル: power_specter.py プロジェクト: susannaaz/BBPipe
    def save_cell_to_file(self, cell, tracers, binning, fname):
        # Create data vector
        vector = []
        for b1, b2, s1, s2, l1, l2 in self.get_cell_iterator():
            add_BE = not ((b1 == b2) and (s1 == s2))
            vector.append(cell[l1][l2][0])  #EE
            vector.append(cell[l1][l2][1])  #EB
            if add_BE:  #Only add B1E2 if 1!=2
                vector.append(cell[l1][l2][2])  #BE
            vector.append(cell[l1][l2][3])  #BB

        sacc_mean = sacc.MeanVec(np.array(vector).flatten())
        s = sacc.SACC(tracers, binning, sacc_mean)
        print("Saving to " + fname)
        s.saveToHDF(fname)
コード例 #7
0
ファイル: create_sacc.py プロジェクト: damonge/WeePeeZee
Tmatfn = "Tmat_%s_%i.npy"%(read,upsample)
if os.path.isfile(Tmatfn):
    print ("Loading cached Tmat")
    Tmat = np.load(Tmatfn)
else:
    Tmat = obtain_Tmat(s_m,s_noi,hod_params,Z_params,HMCorrection)
    np.save(Tmatfn,Tmat)

# Obtain the new covariance matrix
cov_CVnoismo = obtain_improved_cov(cov,Tmat,prior_smo)

# Weirdly it seems like this object actually expects the covariance rather than the precision 
cov_n = sacc.Precision(cov_CVnoismo)

# Create new sacc file (TODO: ask about "dense")
s_n = sacc.SACC(s_d.tracers,s_d.binning,s_d.mean.vector,cov_n,meta=s_d.meta)


# Save the new covariance and the power spectrum of the data which shall be run via MCMC
if not os.path.exists(dir_write):
    os.makedirs(dir_write)
s_n.saveToHDF(dir_write+"/power_spectra_wdpj.sacc")

# Copying noise bias file
shutil.copy(dir_read +"/noi_bias.sacc",dir_write)

# !!!!! Everything below is just for checking that the code works !!!!!!


# Sanity check of implementation
prec_CVnoismo = s_n.precision.getPrecisionMatrix()
コード例 #8
0
    q2_arr += nbands * [q2]
    t1_arr += nbands * [t1]
    t2_arr += nbands * [t2]
    typ_arr += nbands * [typ]
bins = sacc.Binning(typ_arr,
                    ls_arr,
                    t1_arr,
                    q1_arr,
                    t2_arr,
                    q2_arr,
                    windows=w_arr)

#Write
s_d = sacc.SACC(tracers,
                bins,
                mean=v_signal,
                precision=precis,
                meta={'data_name': 'SO_V3_Mock_no_noise_data'})
s_f = sacc.SACC(tracers,
                bins,
                mean=v_signal,
                meta={'data_name': 'SO_V3_Mock_no_noise_fiducial'})
s_n = sacc.SACC(tracers,
                bins,
                mean=v_noise,
                meta={'data_name': 'SO_V3_Mock_no_noise_noise'})

s_d.saveToHDF(prefix_out + ".sacc")
s_d.printInfo()
s_f.saveToHDF(prefix_out + "_fiducial.sacc")
s_f.printInfo()
コード例 #9
0
            val.append(np.random.uniform(0, 10))
            err.append(np.random.uniform(1, 2))

binning = sacc.Binning(type, ell, t1, q1, t2, q2, windows=wins)
mean = sacc.MeanVec(val)

## We need to add covariance matrix. We will use ell_block_diagonal
## where everything is coupled across tracers/redshifts at the same ell but not
## across ell with fixed 10% off-diagonal elements
Np = binning.size()
cov = np.zeros((Np, Np))
for i in range(Np):
    for j in range(i, Np):
        if ell[i] == ell[j]:
            cov[i, j] = err[i] * err[j]
            if (i != j):
                cov[i, j] /= 10
            cov[j, i] = cov[i, j]
precision = sacc.Precision(cov,
                           "ell_block_diagonal",
                           is_covariance=True,
                           binning=binning)

## Add some meta data
meta = {"Creator": "McGyver", "Project": "Victory"}

## finally, create SACC object
s = sacc.SACC(tracers, binning, mean, precision, meta)
s.printInfo()
s.saveToHDF("test.sacc")
コード例 #10
0
    typ = q1 + q2
    for b in range(nells):
        w = windows[b, ic]
        lmean = np.sum(ls * w) / np.sum(w)
        win = sacc.Window(ls, w)
        ls_arr.append(lmean)
        w_arr.append(win)
    q1_arr += nells * [q1]
    q2_arr += nells * [q2]
    t1_arr += nells * [t1]
    t2_arr += nells * [t2]
    typ_arr += nells * [typ]
bins = sacc.Binning(typ_arr,
                    ls_arr,
                    t1_arr,
                    q1_arr,
                    t2_arr,
                    q2_arr,
                    windows=w_arr)

#SACC file
s = sacc.SACC(tracers,
              bins,
              mean=meanvec,
              precision=precis,
              meta={'data_name': 'BK15_bmode_analysis'})

#Save SACC file
s.saveToHDF("BK15.sacc")
s.printInfo()
コード例 #11
0
ファイル: data_generator.py プロジェクト: damonge/WeePeeZee
            nz_new += np.sum((np.random.randn(n_svd) * sigs)[None, :] * evs,
                             axis=1)
        T = sacc.Tracer('bin_%d' % i,
                        'point',
                        zs,
                        nz_new,
                        exp_sample='HSC_DESC')
        tr.append(T)
    return tr


tr_mean = get_new_tracers(add_random=False)
tr_rand = get_new_tracers(add_random=True, n_svd=4)

# SACC object containing the mean N(z) (which we will treat as the truth)
s_mean = sacc.SACC(tr_mean, s_d.binning, s_d.mean)

# Halo model correction
cosmo = ccl.Cosmology(n_s=0.9649,
                      sigma8=0.8111,
                      h=0.6736,
                      Omega_c=0.264,
                      Omega_b=0.0493)
HMCorr = HaloModCorrection(cosmo,
                           k_range=[1e-4, 1e2],
                           nlk=256,
                           z_range=[0., 3.],
                           nz=50)

# Parameters
cosmo_params = {
コード例 #12
0
    for i2 in range(i1, 2 * nfreqs):
        b2 = i2 // 2
        p2 = i2 % 2
        ty = pol_names[p1] + pol_names[p2]
        for il, ll in enumerate(ells_bpw):
            ell.append(ll)
            typ.append(ty)
            t1.append(b1)
            t2.append(b2)
            q1.append('C')
            q2.append('C')
bins = sacc.Binning(typ, ell, t1, q1, t2, q2)
bpw_model = bpw_model.reshape([2 * nfreqs, 2 * nfreqs,
                               n_bpw])[np.triu_indices(2 * nfreqs)].flatten()
mean_model = sacc.MeanVec(bpw_model)
sacc_model = sacc.SACC(tracers, bins, mean=mean_model)
os.system('mkdir -p ' + prefix_out)
sacc_model.saveToHDF(prefix_out + "/cells_model.sacc")

nhits_binary = np.zeros_like(nhits)
inv_sqrtnhits = np.zeros_like(nhits)
inv_sqrtnhits[nhits > 1E-3] = 1. / np.sqrt(nhits[nhits > 1E-3])
nhits_binary[nhits > 1E-3] = 1
#Add noise
ylf = 1
nell = np.zeros([nfreqs, lmax + 1])
_, nell[:, 2:], _ = Simons_Observatory_V3_SA_noise(sens,
                                                   knee,
                                                   ylf,
                                                   fsky,
                                                   lmax + 1,
コード例 #13
0
w_arr=[]
for ic, c in enumerate(corr_ordering):
    s1, s2 = c
    tn1 = s1[:-2]
    q1 = s1[-1]
    t1 = np.where(tracer_names==tn1)[0][0]
    tn2 = s2[:-2]
    q2 = s2[-1]
    t2 = np.where(tracer_names==tn2)[0][0]
    typ = q1 + q2
    for b in range(nells) :
        w = windows[typ][b]
        lmean = np.sum(ls * w) / np.sum(w)
        win = sacc.Window(ls, w)
        ls_arr.append(lmean)
        w_arr.append(win)
    q1_arr += nells * [q1]
    q2_arr += nells * [q2]
    t1_arr += nells * [t1]
    t2_arr += nells * [t2]
    typ_arr += nells * [typ]
bins = sacc.Binning(typ_arr, ls_arr, t1_arr, q1_arr, t2_arr, q2_arr, windows=w_arr)

#SACC files
s = sacc.SACC(tracers, bins, mean=meanvec, precision=precis, meta={'data_name':'ACTPol_TT_TE_EE_analysis'})

#Save SACC file
s.saveToHDF("ACTPol_0.sacc")
s.printInfo()

コード例 #14
0
def get_smooth_s_and_prior(s_data,
                           cosmo,
                           noi_fac=4.,
                           A_smooth=1.,
                           dz_thr=0.04,
                           upsample=1,
                           cov_cv=True):
    # number of tracers and bins
    Nz_per_tracer = len(s_data.tracers[0].z)
    N_tracers = len(s_data.tracers)
    Nz_total = N_tracers * Nz_per_tracer
    zs_data = s_data.tracers[0].z

    # obtain the mean of the 4 pz codes with their noise
    s_mean, cov_noise = get_mean_cov(s_data, N_tracers, Nz_per_tracer, noi_fac,
                                     upsample)
    zs_mean = s_mean.tracers[0].z
    s0 = NzVec(s_mean)

    if cov_cv:
        # compute the CV
        covfn = "cov_CV_%i.npy" % (upsample)
        if os.path.isfile(covfn):
            print("!!!!! Loading cached CV covariance matrix !!!!!")
            cov_CV = np.load(covfn)
        else:
            # compute cv covmat
            cov_CV = np.zeros((Nz_total * upsample, Nz_total * upsample))
            for i in range(N_tracers):
                print("Tracer = %i out of %i" % (i, N_tracers - 1))
                # cosmic variance covmat for each tracer
                cov_CV_per_tracer = compute_covmat_cv(cosmo,
                                                      s_data.tracers[i].z,
                                                      s_data.tracers[i].Nz)

                if upsample > 1:
                    new_Nz_per_tracer = Nz_per_tracer * upsample
                    assert (new_Nz_per_tracer == len(zs_mean))

                    cov_CV_up_per_tracer = np.zeros(
                        (new_Nz_per_tracer, new_Nz_per_tracer))
                    intermediate_cov = np.zeros(
                        (Nz_per_tracer, new_Nz_per_tracer))
                    bor_cov_CV_up_per_tracer = np.zeros(
                        (new_Nz_per_tracer, new_Nz_per_tracer))
                    """
                    ## I think this might or might not be right.
                    ## but in fact it is, just looks wrong.

                    for row in range(Nz_per_tracer):
                        fun = interpolate.interp1d(zs_data,cov_CV_per_tracer[row,:],fill_value="extrapolate")
                        bor_cov_CV_up_per_tracer[row,:] = fun(zs_mean)

                    for col in range(Nz_per_tracer*upsample):
                        fun = interpolate.interp1d(zs_data,bor_cov_CV_up_per_tracer[:len(zs_data),col],fill_value="extrapolate")
                        bor_cov_CV_up_per_tracer[:,col] = fun(zs_mean)
                    """

                    for row in range(Nz_per_tracer):
                        fun = interpolate.interp1d(zs_data,
                                                   cov_CV_per_tracer[row, :],
                                                   fill_value="extrapolate")
                        intermediate_cov[row, :] = fun(zs_mean)

                    for col in range(Nz_per_tracer * upsample):
                        fun = interpolate.interp1d(zs_data,
                                                   intermediate_cov[:, col],
                                                   fill_value="extrapolate")
                        cov_CV_up_per_tracer[:, col] = fun(zs_mean)
                    print(bor_cov_CV_up_per_tracer - cov_CV_up_per_tracer)
                    cov_CV[i * len(zs_mean):(i + 1) * len(zs_mean),
                           i * len(zs_mean):(i + 1) *
                           len(zs_mean)] = cov_CV_up_per_tracer
                else:
                    cov_CV[i * len(zs_mean):(i + 1) * len(zs_mean),
                           i * len(zs_mean):(i + 1) *
                           len(zs_mean)] = cov_CV_per_tracer
            np.save(covfn, cov_CV)
    else:
        cov_CV = 0

    # impose smoothness
    D = obtain_generalized_D(s_mean, A_smooth, dz_thr)

    # compute total covariance of noise
    cov_total = cov_noise + cov_CV
    # compute precision with and without the smoothing matrix D
    P0 = np.linalg.inv(cov_total)
    P = P0 + D

    # get the smoothed N(z) for all tracers
    s_smooth = np.dot(np.dot(np.linalg.inv(P0 + D), P0), s0)
    #s_smooth = s0

    tr = []
    for i in range(N_tracers):
        T = sacc.Tracer('bin_%d' % i,
                        'point',
                        zs_mean,
                        s_smooth[i * Nz_per_tracer * upsample:(i + 1) *
                                 Nz_per_tracer * upsample],
                        exp_sample='HSC_DESC')
        tr.append(T)
    s = sacc.SACC(tr, s_data.binning, s_data.mean)

    # return smooth s and smoothing prior
    return s, P
コード例 #15
0
ファイル: mk_theory.py プロジェクト: kakoon/2pt_validation
    return theo

def getTheoryVec(s, cls_theory):
    vec=np.zeros((s.size(),))
    for t1i,t2i,ells,ndx in s.sortTracers():
        vec[ndx]=cls_theory[(t1i,t2i)]
    return sacc.MeanVec(vec)
print 'Setting up cosmology'
cosmo = ccl.Cosmology(ccl.Parameters(Omega_c=0.266,Omega_b=0.049,h=hhub,sigma8=0.8,n_s=0.96,),matter_power_spectrum='linear',transfer_function='eisenstein_hu')

#Compute grid scale
zmax=2.5
ngrid=3072
a_grid=2*ccl.comoving_radial_distance(cosmo,1./(1+zmax))*(1+2./ngrid)/ngrid*hhub
print "Grid smoothing : %.3lf Mpc/h"%a_grid

print 'Reading SACC file'
#SACC File with the N(z) to analyze
binning_sacc = sacc.SACC.loadFromHDF('../test/catalog0.sacc')
#Bias file (it can also be included in the SACC file in the line before)
bias_tab = astropy.table.Table.read('../test/bz_lsst.txt',format='ascii')
tracers = binning_sacc.tracers
print 'Got ',len(tracers),' tracers'
cltracers=[ccl.ClTracerNumberCounts(cosmo,False,False,n=(t.z,t.Nz),bias=(bias_tab['col1'],bias_tab['col2']),r_smooth=0.5*a_grid) for t in tracers]
print 'Cl tracers ready'
theories = getTheories(cosmo,binning_sacc,cltracers)
mean=getTheoryVec(binning_sacc,theories)
csacc=sacc.SACC(tracers,binning_sacc.binning,mean)
csacc.printInfo()
csacc.saveToHDF('theory.sacc',save_precision=False)
コード例 #16
0
ファイル: power_specter.py プロジェクト: LSSTDESC/DEHSC_LSS
  T.addColumns({'ndens':t.ndens_perad*np.ones_like(nz)})
  sacc_tracers.append(T)
#Binning and mean
type,ell,dell,t1,q1,t2,q2=[],[],[],[],[],[],[]
for t1i in range(nbins) :
  for t2i in range(t1i,nbins) :
    for i_l,l in enumerate(ell_eff) :
      type.append('F') #Fourier-space
      ell.append(l)
      dell.append(lend[i_l]-lini[i_l])
      t1.append(t1i)
      q1.append('P')
      t2.append(t2i)
      q2.append('P')
sacc_binning=sacc.Binning(type,ell,t1,q1,t2,q2,deltaLS=dell,windows=windows_sacc)
sacc_mean=sacc.MeanVec(cls_all.flatten())
if covar is None :
  sacc_precision=None
else :
  sacc_precision=sacc.Precision(covar,"dense",is_covariance=True, binning=sacc_binning)
sacc_meta={'Field':o.hsc_field,'Area_rad':area_patch}
s=sacc.SACC(sacc_tracers,sacc_binning,sacc_mean,precision=sacc_precision,meta=sacc_meta)
s.printInfo()
s.saveToHDF(o.prefix_out+'.sacc')
#Save noise
sacc_binning_noise=sacc.Binning(type,ell,t1,q1,t2,q2,deltaLS=dell)
sacc_mean_noise=sacc.MeanVec(nls_all.flatten())
s=sacc.SACC(sacc_tracers,sacc_binning_noise,sacc_mean_noise,precision=None,meta=sacc_meta)
s.printInfo()
s.saveToHDF(o.prefix_out+'_noise.sacc')
コード例 #17
0
def process_catalog(o):

    #Read z-binning
    print "Bins"
    z0_bins, zf_bins, lmax_bins = np.loadtxt(o.fname_bins_z, unpack=True)
    nbins = len(z0_bins)

    cat = fc.Catalog(read_from=o.fname_in)

    #Get weights, compute binary mask based on weights, and apodize it if needed
    print "Window"
    mask = Mask(cat, o.nside, o.theta_apo)
    nside = mask.nside

    #Get contaminant templates
    #TODO: check resolution
    if o.templates_fname != "none":
        templates = [[t] for t in hp.read_map(o.templates_fname, field=None)]
        ntemp = len(templates)
    else:
        templates = None
        ntemp = 0

    #Generate bandpowers binning scheme (we're assuming all maps will use the same bandpowers!)
    print "Bandpowers"
    bpw = nmt.NmtBin(nside, nlb=o.delta_ell)
    ell_eff = bpw.get_effective_ells()
    tracers = []
    #Generate tracers
    #TODO: pass extra sampling parameters
    zs, nzs, mps = bin_catalog(cat, z0_bins, zf_bins, mask)
    if mrank != 0:
        return

    for zar, nzar, mp, lmax in zip(zs, nzs, mps, lmax_bins):
        zav = np.average(zar, weights=nzar)
        print "-- z-bin: %3.2f " % zav
        tracers.append(Tracer(mp, zar, nzar, lmax, mask, templates=templates))
        if o.save_map:
            hp.write_map("map_%3.2f.fits" % zav, mp)
        cat.rewind()

    print "Compute power spectra"
    #Compute coupling matrix
    #TODO: (only done once, assuming all maps have the same mask!)
    print "  Computing coupling matrix"
    w = nmt.NmtWorkspace()
    if not (os.path.isfile(o.nmt_workspace)):
        w.compute_coupling_matrix(tracers[0].field, tracers[0].field, bpw)
        if o.nmt_workspace != "none":
            w.write_to(o.nmt_workspace)
    else:
        w.read_from(o.nmt_workspace)

    #Compute all cross-correlations
    def compute_master(fa, fb, wsp, clb):
        cl_coupled = nmt.compute_coupled_cell(fa, fb)
        cl_decoupled = wsp.decouple_cell(cl_coupled, cl_bias=clb)
        return cl_decoupled

    #If attempting to deproject contaminant templates, we need an estimate of the true power spectra.
    #This can be done interatively from a first guess using cl_bias=0, but I haven't coded that up yet.
    #For the moment we will use cl_guess=0.
    cl_guess = np.zeros(3 * nside)
    t1 = time()
    print "  Computing power spectrum"
    cls_all = {}
    for b1 in np.arange(nbins):
        f1 = tracers[b1].field
        for b2 in np.arange(b1, nbins):
            f2 = tracers[b2].field
            if ntemp > 0:
                cl_bias = nmt.deprojection_bias(f1, f2, w, cl_theory)
            else:
                cl_bias = None
            cls_all[(b1, b2)] = compute_master(f1, f2, w, clb=cl_bias)[0]
        print 'Computed bin: ', b1, b2, ' in ', time() - t1, ' s'
        if debug:
            plt.figure()
            plt.plot(ell_eff, cls_all[(b1, b1)])
            plt.xscale('log')
            plt.yscale('log')
            plt.xlabel(r'$l$')
            plt.ylabel(r'$C_{l}$')
            plt.show()
    print "Translating into SACC"
    #Transform everything into SACC format
    #1- Generate SACC tracers
    stracers = [
        sacc.Tracer("tr_b%d" % i, "point", t.zarr, t.nzarr, exp_sample="gals")
        for i, t in enumerate(tracers)
    ]

    #2- Define SACC binning
    typ, ell, t1, q1, t2, q2 = [], [], [], [], [], []
    for i1 in np.arange(nbins):
        for i2 in np.arange(i1, nbins):
            lmax = min(tracers[i1].lmax, tracers[i2].lmax)
            for l in ell_eff[ell_eff < lmax]:
                typ.append('F')
                ell.append(l)
                t1.append(i1)
                t2.append(i2)
                q1.append('P')
                q2.append('P')
    sbin = sacc.Binning(typ, ell, t1, q1, t2, q2)
    ssbin = sacc.SACC(stracers, sbin)

    #3- Arrange power spectra into SACC mean vector
    vec = np.zeros((ssbin.size(), ))
    for t1i, t2i, ells, ndx in ssbin.sortTracers():
        lmax = min(tracers[t1i].lmax, tracers[t2i].lmax)
        vec[ndx] = cls_all[(t1i, t2i)][np.where(ell_eff < lmax)[0]]
    svec = sacc.MeanVec(vec)

    #4- Create SACC file and write to file
    csacc = sacc.SACC(stracers, sbin, svec)
    csacc.saveToHDF(o.fname_out)
コード例 #18
0
    sacc_tracers.append(T)
#Binning and mean
type, ell, dell, t1, q1, t2, q2 = [], [], [], [], [], [], []
for t1i in range(nbins):
    for t2i in range(t1i, nbins):
        for i_l, l in enumerate(ell_eff):
            type.append('F')  #Fourier-space
            ell.append(l)
            dell.append(lend[i_l] - lini[i_l])
            t1.append(t1i)
            q1.append('P')
            t2.append(t2i)
            q2.append('P')
sacc_binning = sacc.Binning(type, ell, t1, q1, t2, q2, deltaLS=dell)
sacc_mean = sacc.MeanVec(cls_all.flatten())
if covar is None:
    sacc_precision = None
else:
    sacc_precision = sacc.Precision(covar,
                                    "dense",
                                    is_covariance=True,
                                    binning=sacc_binning)
sacc_meta = {'Field': o.hsc_field, 'Area_rad': area_patch}
s = sacc.SACC(sacc_tracers,
              sacc_binning,
              sacc_mean,
              precision=sacc_precision,
              meta=sacc_meta)
s.printInfo()
s.saveToHDF(o.fname_out)
コード例 #19
0
    q2_arr += nells * [q2]
    t1_arr += nells * [t1]
    t2_arr += nells * [t2]
    typ_arr += nells * [typ]
bins = sacc.Binning(typ_arr,
                    ls_arr,
                    t1_arr,
                    q1_arr,
                    t2_arr,
                    q2_arr,
                    windows=w_arr)

#SACC files
s = sacc.SACC(tracers,
              bins,
              mean=meanvec,
              precision=precis,
              meta={'data_name': 'BK15_bmode_analysis'})
s_f = sacc.SACC(tracers,
                bins,
                mean=meanvec_f,
                meta={'data_name': 'BK15_bmode_analysis_fiducial'})
s_n = sacc.SACC(tracers,
                bins,
                mean=meanvec_n,
                meta={'data_name': 'BK15_bmode_analysis_noise'})

#Save SACC file
s.saveToHDF("BK15.sacc")
s.printInfo()
s_n.saveToHDF("BK15_noise.sacc")
コード例 #20
0
ファイル: coadder.py プロジェクト: damonge/WeePeeZee
               axis=0) / area)
    for i_w, ww in enumerate(sc['GAMA09H'].binning.windows)
]

# Bins
s_bn = sc['GAMA09H'].binning
s_bn.windows = wins

# Tracers
s_tr = []
for i_t in range(4):
    T = sacc.Tracer('bin_%d' % i_t,
                    'point',
                    zs[i_t],
                    Nz[i_t],
                    exp_sample="HSC_DESC")
    T.addColumns({pn: ec[pn][i_t] for pn in pz_codes})
    s_tr.append(T)

# Signal spectra
s_mean = sacc.MeanVec(mean)
s_prec = sacc.Precision(cov, "dense", is_covariance=True, binning=s_bn)
s_meta = {'Area_rad': area}
s = sacc.SACC(s_tr, s_bn, s_mean, precision=s_prec, meta=s_meta)
s.saveToHDF("COADD/power_spectra_wdpj.sacc")
# Noise spectra
s_mean = sacc.MeanVec(mean_n)
s_bn.windows = None
s = sacc.SACC(s_tr, s_bn, s_mean, meta=s_meta)
s.saveToHDF("COADD/noi_bias.sacc")
コード例 #21
0
ファイル: power_summarizer.py プロジェクト: susannaaz/BBPipe
 def save_to_sacc(self, fname, t, b, v, cov=None, return_sacc=False):
     s = sacc.SACC(t, b, mean=v, precision=cov)
     s.saveToHDF(fname)
     if return_sacc:
         return s