コード例 #1
0
ファイル: cal_Cijl_Gm.py プロジェクト: zdplayground/SVD_ps
        def cal_Gm(l, rank):
            #n_l = default_num_l_in_rank * rank + l
            n_l = np.sum(num_ell_array[0:rank]) + l
            ell = l_min + n_l * delta_l
            ell = alpha * ell
            ##offset_Gm = n_l * N_dset * num_kout * data_type_size

            Gmatrix_l = np.zeros((N_dset, num_kout))
            # j denotes column
            for j in range(num_kout):
                #chi_k: comoving distance from k
                chi_k = ell / k_mid[
                    j]  # I would have to say I could only do approximation here, e.g., using k_mid
                z_k = interpolate.splev(chi_k, tck_zchi, der=0)
                # i denotes row
                if z_k < zmax:
                    GF = (growth_factor(z_k, cosmic_params.omega_m) / G_0)**2.0
                    for i in range(N_dset):
                        # redshift bin i: rb_i
                        rb_i = iu1[0][i]
                        gi = lens_eff(zbin, center_z, n_z, nz_y2, rb_i, z_k,
                                      sigma_z_const)
                        # redshift bin j: rb_j
                        rb_j = iu1[1][i]
                        gj = lens_eff(zbin, center_z, n_z, nz_y2, rb_j, z_k,
                                      sigma_z_const)
                        # here too, I did approximation for the integration, e.g., the term (1/k1 - 1/k2)
                        Gmatrix_l[i][j] = pow(
                            (1.0 + z_k), 2.0) * gi * gj * ell * (
                                1.0 / kout[j] -
                                1.0 / kout[j + 1]) * GF * Pnorm_out[j]
            return Gmatrix_l
コード例 #2
0
 def Gm_integrand_out(k, c_i, spl_gi, spl_gj, ell):
     chi_k = ell / k
     # Since the diameter of Milky Way is about 0.03 Mpc, we assume that the smallest interval between chi_k and chibin[i+1] larger than 0.1 Mpc/h.
     if (chibin[c_i + 1] - 1.e-8) < chi_k:
         return 0.0
     else:
         #z_k = interpolate.splev(chi_k, tck_zchi, der=0)
         z_k = spl_zchi(chi_k)
         GF = (growth_factor(z_k, cosmic_params.omega_m) / G_0)**2.0
         return (1.0 + z_k
                 )**2.0 * spl_gi(chi_k) * spl_gj(chi_k) * ell / k**2.0 * GF
コード例 #3
0
ファイル: cal_Cijl_Gm.py プロジェクト: zdplayground/SVD_ps
        def cal_cijl(l, rank):
            #n_l = default_num_l_in_rank * rank + l
            n_l = np.sum(num_ell_array[0:rank]) + l
            ell = l_min + n_l * delta_l
            ell = alpha * ell
            ##offset_cijl = n_l * N_dset * data_type_size
            c_temp = np.zeros((nbin_ext, nbin_ext))
            for c_i in range(nbin_ext):
                g_nr = nbin_ext - c_i
                #print('g_nr:', g_nr)
                # gmatrix_jk is used to store integration elements to calculate array C^ij(l) from Pk for a certain l
                # j=i, i+1,...nbin_ext; j in the name gmatrix_jk also denotes row index
                gmatrix_jk = np.zeros((g_nr, num_par))
                #print(gmatrix_jk)
                # g_col is the column index of gmatrix_jk
                for g_col in range(num_par):
                    chi_k = ell / k_par[g_col]
                    z_k = interpolate.splev(
                        chi_k, tck_zchi, der=0
                    )  # Here z_k is understood as the spectroscopic redshift z
                    g_i = lens_eff(zbin, center_z, n_z, nz_y2, c_i, z_k,
                                   sigma_z_const)
                    if z_k < zmax:  # zmax corresponding to \chi_h in the expression of C^ij(l)
                        GF = (growth_factor(z_k, cosmic_params.omega_m) /
                              G_0)**2.0
                        #print('zmax, z_k, GF:', zmax, z_k, GF)
                        c_j = c_i
                        # here g_row is the row index of gmatrix_jk
                        for g_row in range(g_nr):
                            g_j = lens_eff(zbin, center_z, n_z, nz_y2, c_j,
                                           z_k, sigma_z_const)
                            gmatrix_jk[g_row][g_col] = pow(
                                (1.0 + z_k), 2.0) * g_i * g_j * ell * (
                                    1.0 / k_camb[g_col] -
                                    1.0 / k_camb[g_col + 1]) * GF
                            ###gmatrix_jk[g_row][g_col] = pow((1.0+z_k), 2.0)*lens_eff(c_i, chi_k)*lens_eff(c_j, chi_k)*ell*(1.0/k_par[g_col]-1.0/k_par[g_col+1])*GF
                            c_j += 1

                c_temp[c_i][c_i:nbin_ext] = np.dot(gmatrix_jk, Pk_par)
            #    print(c_temp)
            cijl = np.asarray(
                c_temp[iu1],
                dtype=np.float64)  # extract upper-triangle of c_temp
            if rank == 0:
                print('ell from rank', rank, 'is', ell)
            return cijl
コード例 #4
0
def main():
    comm = MPI.COMM_WORLD
    size = comm.Get_size()
    rank = comm.Get_rank()

    t_0 = MPI.Wtime()
    N_dset = (nrbin + 1) * nrbin // 2  # numbe of C^ij(l) data sets
    #data_type_size = 8                     # number of bytes for double precison data
    zbin = np.zeros(nrbin +
                    1)  # for both zbin and chibin, the first element is 0.
    chibin = np.zeros(nrbin + 1)
    shape_noise = np.zeros(nrbin)

    num_kin = 506  # the number of boundary points of k bins from the input matter power spectrum file
    # consider num_kbin as the input number of k bins
    num_kbin = num_kin - 1  # k_max should be larger than lmax/xmax, -1 means disregarding the last term

    k_par = np.zeros(num_kbin)  # Input k and Pk for the calculation of C^ij(l)
    Pk_par = np.zeros(num_kbin)

    # l (the parameter of C^ij(l)) value equals to l_min, l_min+delta_l, ..., l_max-delta_l
    # We choose the case below:
    l_max = 2002  # l_max < X_max*k_max
    #l_max = 22
    l_min = 1
    delta_l = 3
    num_l = (l_max - l_min) // delta_l + 1

    c = 2.99792458e5  # speed of light unit in km/s
    H_0 = 100.0  # unit: h * km/s/Mpc
    sigmae = 0.021  # Tully-Fisher case \sigma_e from Eric's paper
    scale_n = 1.10  # Tully-Fisher total surface number density (unit: arcmin^-2), from Eric et al.(2013), Table 2 (TF-Stage)

    cross_const = (1.5 * cosmic_params.omega_m)**2.0 * (
        H_0 / c
    )**4.0  # It's the coefficent constant of convergence power spectrum, see Eq.(21)
    #print 'cross_const', cross_const
    sr_const = np.pi**2.0 / 1.1664e8  # 1 square acrminute = sr_const steradian
    constx = sr_const / cross_const  # The constx connects shot noise with C^ij(l)

    idir0 = '/Users/ding/Documents/playground/shear_ps/SVD_ps/'
    inputf = idir0 + 'Input_files/nz_stage_IV.txt'  # Input file of n(z) which is the galaxy number density distribution in terms of z
    # Here center_z denotes z axis of n(z). It may not be appropriate since we don't have redshift bin setting
    center_z, n_z = np.loadtxt(inputf, dtype='f8', comments='#', unpack=True)
    spl_nz = InterpolatedUnivariateSpline(center_z, n_z)
    n_sum = spl_nz.integral(center_z[0],
                            center_z[-1])  # Calculate the total number density
    #print(n_sum)
    scale_dndz = scale_n / n_sum
    n_z = n_z * scale_dndz  # rescale n(z) to match the total number density from the data file equal to scale_n
    spl_nz = InterpolatedUnivariateSpline(
        center_z, n_z)  # Interpolate n(z) in terms of z using spline

    #nz_test = interpolate.splev(center_z, tck_nz, der=0)
    #print(abs(n_z- nz_test)<1.e-7)

    # calculate total number density n^i (integrate dn/dz) in the ith tomographic bin
    def n_i_bin(zbin, i):
        zi = zbin[i]
        zf = zbin[i + 1]
        # rescale n(z) to match the total number density from the data file equal to scale_n
        ##n_i = scale_dndz * integrate.quad(n_z, zi, zf, epsabs=1.e-7, epsrel=1.e-7)[0]
        n_i = spl_nz.integral(zi, zf)
        return n_i

    G_0 = growth_factor(
        0.0, cosmic_params.omega_m)  # G_0 at z=0, normalization factor
    num_z = np.size(
        center_z)  # the number of z bins of n(z), obtained from the data file
    chi_z = np.zeros(num_z)

    for i in range(num_z):
        chi_z[i] = comove_d(
            center_z[i]
        ) * c / H_0  # with unit Mpc/h, matched with that of ell/k
    # we want interpolate z as a function of chi
    spl_zchi = InterpolatedUnivariateSpline(chi_z,
                                            center_z)  # z as a function of chi

    # here interpolate \chi as a function of z
    spl_chiz = InterpolatedUnivariateSpline(center_z, chi_z)

    # bin interval
    z_min = center_z[0]
    z_max = 2.0  # based on the data file, at z=2.0, n(z) is very small
    zbin_avg = (z_max - z_min) / float(nrbin)
    for i in range(nrbin):
        zbin[i] = i * zbin_avg + z_min
    zbin[-1] = z_max

    # print('Xmax', c/H_0*comove_d(zbin[-1]))
    # print('nbar first element: ', n_i_bin(zbin, 0))

    # Note that here chibin[0] is not equal to 0, since there is redshift cut at low z. Unit is Mpc/h
    for i in range(0, nrbin + 1):
        chibin[i] = comove_d(zbin[i]) * c / H_0

    # 3D power spectrum is obtained from CAMB using the above cosmological parameters.
    ##inputf = fpath+'test_matterpower.dat'# if it's used, the cosmological parameters should also be changed correspondingly.
    inputf = idir0 + 'Input_files/CAMB_Planck2015_matterpower.dat'
    k_camb, Pk_camb = np.loadtxt(inputf, dtype='f8', comments='#', unpack=True)
    Pk_camb_spl = InterpolatedUnivariateSpline(k_camb, Pk_camb)

    ifile = idir0 + 'Input_files/transfer_fun_Planck2015.dat'
    kk, Tf = np.loadtxt(ifile,
                        dtype='f8',
                        comments='#',
                        usecols=(0, 1),
                        unpack=True)
    ##print(kk)
    k_0 = 0.001  # unit h*Mpc^-1
    Pk_0 = Pk_camb_spl(k_0)
    Tf_spl = InterpolatedUnivariateSpline(kk, Tf)
    Tf_0 = Tf_spl(k_0)
    P0_a = Pk_0 / (pow(k_0, cosmic_params.ns) * Tf_0**2.0)
    Psm_transfer = P0_a * pow(
        k_camb, cosmic_params.ns
    ) * Tf**2.0  # Get primordial (smooth) power spectrum from the transfer function
    Pk_now_spl = InterpolatedUnivariateSpline(k_camb, Psm_transfer)

    # ------ This part calculates the Sigma^2_{xy} using Pwig from CAMB. -------#
    z_mid = z_max / 2.0
    q_BAO = 110.0  # unit: Mpc/h, the sound horizon scale
    Sigma2_integrand = lambda k: Pk_camb_spl(k) * (1.0 - np.sin(k * q_BAO) /
                                                   (k * q_BAO))
    pre_factor = 1.0 / (3.0 * np.pi**2.0) * (
        growth_factor(z_mid, cosmic_params.omega_m) / G_0)**2.0
    Sigma2_xy = pre_factor * integrate.quad(
        Sigma2_integrand, k_camb[0], k_camb[-1], epsabs=1.e-03,
        epsrel=1.e-03)[0]
    print('At z=', z_mid, 'Sigma2_xy=', Sigma2_xy)

    #----------------------------------------------------------------------------#
    def Pk_par_integrand(k):
        if Pk_type == 'Pwig_linear':
            Pk_par = Pk_camb_spl(k)
        elif Pk_type == 'Pnow':
            Pk_par = Pk_now_spl(k)
        elif Pk_type == 'Pwig_nonlinear':
            Pk_par = Pk_now_spl(k) + (Pk_camb_spl(k) - Pk_now_spl(k)) * np.exp(
                -k**2.0 * Sigma2_xy / 2.0)
        return Pk_par

    odir1 = 'mpi_preliminary_data_{}/'.format(Pk_type)
    if Psm_type == 'Pnow':
        odir1_Gm = odir1 + 'set_Pnorm_Pnow/'
    else:
        odir1_Gm = odir1

    odir = odir0 + odir1 + 'comm_size{}/'.format(size)
    odir_Gm = odir0 + odir1_Gm + 'comm_size{}/'.format(size)

    if rank == 0:
        if not os.path.exists(odir):
            os.makedirs(odir)

        if not os.path.exists(odir_Gm):
            os.makedirs(odir_Gm)
    comm.Barrier()

    print('odir_Gm:', odir_Gm, 'from rank:', rank)
    prefix = 'Tully-Fisher'
    Cijl_outf_prefix = odir + prefix  # The prefix of output file name
    Gm_outf_prefix = odir_Gm + prefix
    iu1 = np.triu_indices(
        nrbin)  # Return the indices for the upper-triangle of an (n, m) array

    #------------------------------------------------
    def get_shapenoise(rank):
        if rank == 0:
            # Calculate covariance matrix of Pk, the unit of number density is per steradians
            for i in range(nrbin):
                shape_noise[i] = sigmae**2.0 / n_i_bin(zbin, i)
            pseudo_sn = shape_noise * constx

            # Output the shape noise (includes the scale factor) in a file
            outf = odir0 + odir1 + prefix + '_pseudo_shapenoise_{0}rbins.out'.format(
                nrbin)  # basic variable
            np.savetxt(outf, pseudo_sn, fmt='%.15f', newline='\n')

    #---------------------------------------------------
    # Interpolate g^i in terms of chi (comoving distance)
    ifile = './lens_eff_g_i/g_i_{}rbins.npz'.format(nrbin)
    npz_data = np.load(ifile)
    chi_array = npz_data['chi_array'] * c / H_0
    g_i_matrix = npz_data['g_i_matrix']
    spl_gi_list = []
    for i in range(nrbin):
        spl_gi = InterpolatedUnivariateSpline(chi_array, g_i_matrix[i, :])
        spl_gi_list.append(spl_gi)

    ifile = idir0 + 'Input_files/KW_stage_IV_num_ell_per_rank_comm_size{}.dat'.format(
        size)
    num_ell_array = np.loadtxt(ifile, dtype='int', comments='#', usecols=(1, ))
    num_l_in_rank = num_ell_array[rank]

    #------------------------------------------------------------------------------------------------#
    #--------------------------- Part 1: calculate C^ij(l) ------------------------------------------#
    # Note: Don't generate output G matrix for output P(k) in the process with C^ij(l), because the interval of k bins are
    # different from those of the G matrix for 'observered' data C^ij(l)!
    #------------------------------------------------------------------------------------------------#
    # This is for output G' matrix.
    def Gm_integrand_out(k, c_i, spl_gi, spl_gj, ell):
        chi_k = ell / k
        # Since the diameter of Milky Way is about 0.03 Mpc, we assume that the smallest interval between chi_k and chibin[i+1] larger than 0.1 Mpc/h.
        if (chibin[c_i + 1] - 1.e-8) < chi_k:
            return 0.0
        else:
            #z_k = interpolate.splev(chi_k, tck_zchi, der=0)
            z_k = spl_zchi(chi_k)
            GF = (growth_factor(z_k, cosmic_params.omega_m) / G_0)**2.0
            return (1.0 + z_k
                    )**2.0 * spl_gi(chi_k) * spl_gj(chi_k) * ell / k**2.0 * GF

    # This is for output C^{ij}(l).
    def Gm_integrand_in(k, c_i, spl_gi, spl_gj, ell):
        return Gm_integrand_out(k, c_i, spl_gi, spl_gj,
                                ell) * Pk_par_integrand(k)

    def get_Cijl(comm, rank):
        # Output the Cij_l array in which each term is unique.
        def cal_cijl(l, rank):
            #n_l = default_num_l_in_rank * rank + l
            n_l = np.sum(num_ell_array[0:rank]) + l
            ell = l_min + n_l * delta_l
            ell = ell * alpha
            #offset_cijl = n_l * N_dset * data_type_size
            c_temp = np.zeros((nrbin, nrbin))
            for c_i in range(nrbin):
                for c_j in range(c_i, nrbin):
                    # we could use smaller epsrel, but it would require more integration points to achieve that precision.
                    res = integrate.quad(Gm_integrand_in,
                                         k_camb[0],
                                         k_camb[-1],
                                         args=(c_i, spl_gi_list[c_i],
                                               spl_gi_list[c_j], ell),
                                         limit=200,
                                         epsabs=1.e-6,
                                         epsrel=1.e-12)
                    c_temp[c_i][c_j] = res[0]
                    abserr = res[1]
                    if res[0] != 0.0:
                        relerr = abserr / res[0]
                    else:
                        relerr = 0.0
                #c_temp[c_i][c_i : nrbin] = np.dot(gmatrix_jk, Pk_par)
            array_cij = np.asarray(
                c_temp[iu1],
                dtype=np.float64)  # extract upper-triangle of c_temp
            if rank == 0:
                #print('rank:', rank, 'array_cij:', array_cij)
                print('ell from rank', rank, 'is', ell,
                      'abs err of Cijl is %.4e' % abserr,
                      'and rel err is %.4e' % relerr)
            return ell, array_cij, abserr, relerr

        Cijl_file = Cijl_outf_prefix + '_Cij_l_{0}rbins_{1}kbins_CAMB_rank{2}.bin'.format(
            nrbin, num_kbin, rank)  # basic variable
        Cijl_fwriter = open(Cijl_file, 'wb')

        err_info = np.array([], dtype=np.float64).reshape(0, 3)
        for l in range(num_l_in_rank):
            ell, cijl, abserr, relerr = cal_cijl(l, rank)
            cijl.tofile(Cijl_fwriter, sep="")
            err_info = np.vstack((err_info, np.array([ell, abserr, relerr])))
        Cijl_fwriter.close()
        err_info_ofile = Cijl_outf_prefix + '_integration_error_Cij_l_{0}rbins_{1}kbins_CAMB_rank{2}.out'.format(
            nrbin, num_kbin, rank)
        np.savetxt(err_info_ofile,
                   err_info,
                   fmt='%i  %.4e  %.4e',
                   delimiter=' ',
                   newline='\n',
                   header='ell    abs_err   rel_err',
                   comments='#')
        #comm.Barrie()

    #-----------------------------------------------------------------------------------------------#
    #------------------------- Part 2: get Gm_cross_out for output P(k) ----------------------------#

    ######------------- set up output k space and G' matrix for output Pk ----------------###########
    def get_Gm_out(comm, rank):
        # construct Gmatrix: Gout for output Pk with num_kout kbins
        # Note: The algorithm is the same as that calculating C^ij(l) in Part 1. Here we use a simplified (looks like) way to get Gmatrix_l.
        def cal_G(l, rank):
            n_l = np.sum(num_ell_array[0:rank]) + l
            ell = l_min + n_l * delta_l
            ell = ell * alpha
            #offset_Gm = n_l * N_dset * num_kout * data_type_size

            Gmatrix_l = np.zeros((N_dset, num_kout))
            # j denotes column
            for j in range(num_kout):
                # i denotes row
                for i in range(N_dset):
                    # redshift bin i: rb_i
                    rb_i = iu1[0][i]
                    # in python, eps should be larger than 1.e-15, to be safe. The smallest chi from the corresponding output k bin should be smaller than the
                    # the upper boundary of chi from the ith tomographic bin
                    if chibin[rb_i + 1] > ell / kout[j + 1]:
                        ##krb_i = ell/(chibin[rb_i]+1.e-12)  # avoid to be divided by 0
                        rb_j = iu1[1][i]
                        # more precise calculation of Gmatrix_l
                        # the j index of Pnorm_out denotes k bin id, different from the index rb_j of g_j
                        res = integrate.quad(Gm_integrand_out,
                                             kout[j],
                                             kout[j + 1],
                                             args=(rb_i, spl_gi_list[rb_i],
                                                   spl_gi_list[rb_j], ell),
                                             epsabs=1.e-6,
                                             epsrel=1.e-6)
                        Gmatrix_l[i][j] = res[0] * Pnorm_out[j]
                        abserr = res[1]
                        if res[0] != 0.0:
                            relerr = abserr / res[0]
                        else:
                            relerr = 0.0
            #print Gmatrix_l[:, 0]
            if rank == 0:
                #print('rank:', rank, 'Gm:', Gmatrix_l)
                print('ell from rank', rank, 'is', ell,
                      'abs err of G is %.4e' % abserr,
                      'rel err is %.4e' % relerr)
            return ell, Gmatrix_l, abserr, relerr

        kout, k_mid = np.zeros(num_kout + 1), np.zeros(num_kout)
        k_low, k_high = 0.01, 1.0  # This set may need to be checked more!
        kout[0], kout[1], kout[-1] = k_camb[0], k_low, k_camb[-1]
        lnk_factor = np.log(k_high / k_low) / (num_kout - 2)

        for i in range(2, num_kout):
            kout[i] = kout[i - 1] * np.exp(lnk_factor)
        #print kout

        for i in range(num_kout):
            k_mid[i] = (kout[i] + kout[i + 1]) / 2.0
        if Psm_type == 'Pnorm' or Psm_type == 'default':
            Pnorm_out = 1.5e4 / (
                1.0 + (k_mid / 0.05)**
                2.0)**0.65  # from Eisenstein & Zaldarriaga (2001)
        elif Psm_type == 'Pnow':
            Pnorm_out = Pk_now_spl(
                k_mid
            )  # Test how the change of Pnow could influence the eigenvalues from SVD routine.

        # Gm_cross_out uses selected new k bins
        Gm_cross_file = Gm_outf_prefix + '_Gm_cross_out_{0}rbins_{1}kbins_CAMB_rank{2}.bin'.format(
            nrbin, num_kout, rank)  # basic variable
        Gm_cross_fwriter = open(Gm_cross_file, 'wb')

        err_info = np.array([], dtype=np.float64).reshape(0, 3)
        for l in range(num_l_in_rank):
            ell, Gm, abserr, relerr = cal_G(l, rank)
            Gm.tofile(Gm_cross_fwriter, sep="")
            err_info = np.vstack(
                (err_info, np.array([ell, abserr, relerr]))
            )  # If relerr = xx/0, it doesn't seem to be appendable on array.
        Gm_cross_fwriter.close()
        err_info_ofile = Gm_outf_prefix + '_integration_error_Gm_cross_out_{0}rbins_{1}kbins_CAMB_rank{2}.out'.format(
            nrbin, num_kbin, rank)
        np.savetxt(err_info_ofile,
                   err_info,
                   fmt='%i  %.4e  %.4e',
                   delimiter=' ',
                   newline='\n',
                   header='ell    abs_err   rel_err',
                   comments='#')

    if cal_sn == "True":
        get_shapenoise(rank)
    if cal_cijl == "True":
        get_Cijl(comm, rank)
    #comm.Barrier()
    t_1 = MPI.Wtime()

    if cal_Gm == "True" and Pk_type == 'Pwig_nonlinear':
        get_Gm_out(comm, rank)
    #comm.Barrier()
    t_2 = MPI.Wtime()
    if rank == 0:
        print('Running time for Cijl:', t_1 - t_0)
        print('Running time for G matrix:', t_2 - t_1)


#######################################################

    def plot_numd_spectroscopy():
        odir_data = "./numd_distribute_spectro/"
        if not os.path.exists(odir_data):
            os.makedirs(odir_data)
        odir_fig = odir_data + 'nz_fig/'
        if not os.path.exists(odir_fig):
            os.makedirs(odir_fig)
        nd_avg = []
        for i in range(nrbin):
            nd_avg.append(n_i_bin(zbin, i) / (zbin[i + 1] - zbin[i]))
        ofile = odir_data + 'gal_numden_spectroz_{}rbins.out'.format(nrbin)
        header_line = ' bin_boundary(low)   nz_avg'
        np.savetxt(ofile,
                   np.array([zbin[0:-1], nd_avg]).T,
                   fmt='%.7f',
                   newline='\n',
                   comments='#')

        print("nd_avg:", nd_avg, "zbin:", zbin)
        fig, ax = plt.subplots(figsize=(8, 6))
        bars = ax.bar(left=zbin[0:-1],
                      height=nd_avg,
                      width=zbin_avg,
                      align='edge',
                      color='white',
                      edgecolor='grey')
        bars[11].set_color('r')
        print(bars)
        # n, bins, pathes = ax.hist(nd_avg, bins=nrbin, range=[zbin[0], zbin[-1]], align='left')
        # print(n, bins, pathes)
        ax.plot(center_z, n_z, 'k-', lw=2.0)
        ax.set_xlim([0.0, z_max])
        ax.set_ylim([0.0, 1.0])
        ax.set_xlabel(r'$z$', fontsize=20)
        #ax.set_ylabel('$n^i(z)$ $[\mathtt{arcmin}]^{-2}$', fontsize=20)
        ax.set_ylabel(r'$dn^i/dz \; [\mathtt{arcmin}]^{-2}$', fontsize=20)
        ax.minorticks_on()
        ax.tick_params('both', length=5, width=2, which='major', labelsize=15)
        ax.tick_params('both', length=3, width=1, which='minor')
        ax.set_title("KWL-Stage IV", fontsize=20)

        plt.tight_layout()
        figname = "gal_numden_{}rbins_spectro.pdf".format(nrbin)
        plt.savefig(odir_fig + figname)
        plt.show()
        plt.close()

    if show_nz == "True" and rank == 0:
        plot_numd_spectroscopy()
コード例 #5
0
ファイル: cal_Cijl_Gm.py プロジェクト: zdplayground/SVD_ps
def main():
    comm = MPI.COMM_WORLD
    size = comm.Get_size()
    rank = comm.Get_rank()

    num_kin = 506  # number of k point in the input matter power spectrum file
    # l value from l_min, l_min+delta_l, ..., l_max-delta_l
    l_max = 2002  # lmax<Xmax*k_max
    l_min = 1
    delta_l = 3
    num_l = (l_max - l_min) // delta_l + 1
    # index of kmin from CAMB
    kpar_min = 0
    # k_max should be larger than lmax/xmax. -1 means disregarding the last term. Basically it's the number of k bins from input.
    num_par = num_kin - kpar_min - 1

    # dimensin of parameter vector
    k_par = np.zeros(num_par)
    Pk_par = np.zeros(num_par)
    Pnorm = np.zeros(num_par)

    c = 2.99792458e5  # speed of light unit in km/s

    ##----- in traditational WL, sigmae is larger, equal to 0.26 from Table 2 in Eric et al. 2013 ----#
    sigmae = 0.26
    # For PW-Stage III, the constant of sigma_z (systematic error of source redshift from photometry)
    sigma_z_const = 0.1
    scale_n = 10.0

    # In this code, the constant of cross power spectrum is
    cross_const = (1.5 * cosmic_params.omega_m)**2.0 * (100 / c)**4.0
    #print 'cross_const', cross_const
    # 1 square acrminute = sr_const steradian
    sr_const = np.pi**2.0 / 1.1664e8
    constx = sr_const / cross_const

    # input galaxy number density n(z) file
    idir0 = '/Users/ding/Documents/playground/shear_ps/SVD_ps/'
    inputf = idir0 + 'Input_files/zdistribution_DES_Tully_Fisher.txt'
    lower_z, center_z, upper_z, n_z = np.loadtxt(inputf,
                                                 dtype='f8',
                                                 comments='#',
                                                 unpack=True)

    n_sum = np.sum(n_z * (upper_z - lower_z))
    #print(n_sum)
    scale_dndz = 1.0 / n_sum  # Normalize n(z) distribution
    n_z = n_z * scale_dndz  # rescale n(z) to match the total number density from the data file equal to 1.0/arcmin^2
    ## spline n_z as a function of z
    tck_nz = interpolate.splrep(center_z, n_z)
    zmax = center_z[-1]
    n_p_zmax = interpolate.splev(zmax, tck_nz,
                                 der=1)  # the first derivative at z_max~1.3

    # fitting function nc1 * exp(nc2*z)
    #nc2 = n_p_zmax/n_z[-1]
    #nc1 = n_z[-1]/math.exp(nc2 * center_z[-1])

    # fitting function using nc1*z^2*exp(-z^2/nc2), Ma, Z. et al. 2006
    nc2 = 2.0 * zmax**2.0 * n_z[-1] / (2.0 * n_z[-1] - zmax * n_p_zmax)
    nc1 = n_z[-1] / (zmax**2.0 * math.exp(-zmax**2.0 / nc2))
    #print("z_0^2: ", nc2, "Const: ", nc1)

    # set zmax as the maximum z after extension
    zmax_ext = zmax + math.sqrt(2.0) * 0.05 * (
        1 + zmax) * 2.2  # 2.2 is from erf(2.2) which is close to 1.0
    print('zmax_ext: ', zmax_ext)  # It's about 1.655.
    # add 0.001 which is just a small number, so zext[0] doesn't coincide with center_z[-1]
    zext = np.linspace(
        center_z[-1] + 0.001, zmax_ext,
        80)  # extended z region for the external tomographic bins
    nzext = nc1 * zext**2.0 * np.exp(-zext**2.0 / nc2)

    #print('n(zmax):', nzext[-1]) # it's around 1.64
    center_z = np.append(center_z, zext)
    n_z = np.append(n_z, nzext)

    num_z = len(center_z)  # set the new num_z
    chi_z = np.zeros(num_z)
    nz_y2 = np.zeros(num_z)

    tck_nz = interpolate.splrep(center_z, n_z)  # do a new spline interpolation

    ##--for traditational WL, the total distribution of galaxies n(z) in tomographic bins is unchanged regardless how complicate the photo-z probability distribution is --#
    # calculate the number density nbar^i (integrate dn/dz) in the ith tomographic bin
    # the unit of number density is per steradian
    def n_i_bin(zbin, i):
        zi = zbin[i]
        zf = zbin[i + 1]
        n_i = interpolate.splint(zi, zf, tck_nz)
        return n_i

    ##-----set tomographic bins-------------##
    chi_z = np.zeros(num_z)
    for i in range(num_z):
        chi_z[i] = comove_d(center_z[i]) * c / 100.0
    # we want interpolate z as a function of chi
    tck_zchi = interpolate.splrep(chi_z, center_z)

    tck_chiz = interpolate.splrep(center_z, chi_z)

    #**** Different from the previous case that zmin is 0, zmin is not 0 in the new n(z) file. ****#
    zmin = center_z[0]
    zbin_avg = (zmax - zmin) / float(num_rbin)  # bin interval
    nbin_ext = int(zmax_ext / zbin_avg)
    #print('# of redshift bins (extended): ', nbin_ext)

    # for zbin and chibin, the first element is 0.
    zbin = np.zeros(nbin_ext + 1)
    chibin = np.zeros(nbin_ext + 1)
    for i in range(nbin_ext + 1):
        zbin[i] = i * zbin_avg + zmin
        # Just note that chibin[0] and chibin[1] store the first bin's up boundaries
        chibin[i] = interpolate.splev(zbin[i], tck_chiz, der=0)

    G_0 = growth_factor(
        0.0, cosmic_params.omega_m)  # G_0 at z=0, normalization factor

    # 3D power spectrum is from CAMB
    ##inputf = '../test_matterpower.dat'
    inputf = idir0 + 'Input_files/CAMB_Planck2015_matterpower.dat'
    k_camb, Pk_camb = np.loadtxt(inputf, dtype='f8', comments='#', unpack=True)
    Pk_camb_spl = InterpolatedUnivariateSpline(k_camb, Pk_camb)

    ifile = idir0 + 'Input_files/transfer_fun_Planck2015.dat'
    kk, Tf = np.loadtxt(ifile,
                        dtype='f8',
                        comments='#',
                        usecols=(0, 1),
                        unpack=True)
    k_0 = 0.001  # unit h*Mpc^-1
    Pk_0 = Pk_camb_spl(k_0)
    Tf_spl = InterpolatedUnivariateSpline(kk, Tf)
    Tf_0 = Tf_spl(k_0)
    P0_a = Pk_0 / (pow(k_0, cosmic_params.ns) * Tf_0**2.0)
    Psm_transfer = P0_a * pow(
        k_camb, cosmic_params.ns
    ) * Tf**2.0  # Get primordial (smooth) power spectrum from the transfer function
    Pk_now_spl = InterpolatedUnivariateSpline(k_camb, Psm_transfer)

    # ------ This part calculates the Sigma^2_{xy} using Pwig from CAMB. -------#
    z_mid = zmax / 2.0
    q_BAO = 110.0  # unit: Mpc/h, the sound horizon scale
    Sigma2_integrand = lambda k: Pk_camb_spl(k) * (1.0 - np.sin(k * q_BAO) /
                                                   (k * q_BAO))
    pre_factor = 1.0 / (3.0 * np.pi**2.0) * (
        growth_factor(z_mid, cosmic_params.omega_m) / G_0)**2.0
    Sigma2_xy = pre_factor * integrate.quad(
        Sigma2_integrand, k_camb[0], k_camb[-1], epsabs=1.e-06,
        epsrel=1.e-06)[0]
    print('At z=', z_mid, 'Sigma2_xy=', Sigma2_xy)

    for i in range(num_par):
        k_par[i] = (k_camb[i] + k_camb[i + 1]) / 2.0
        # We didn't include 'now' type here as was did in Tully-Fisher case.
        if Pk_type == 'Pwig_linear':
            Pk_par[i] = Pk_camb_spl(k_par[i])
        elif Pk_type == 'Pnow':
            Pk_par[i] = Pk_now_spl(k_par[i])
        elif Pk_type == 'Pwig_nonlinear':
            Pk_par[i] = Pk_now_spl(
                k_par[i]) + (Pk_camb_spl(k_par[i]) - Pk_now_spl(
                    k_par[i])) * np.exp(-k_par[i]**2.0 * Sigma2_xy / 2.0)

    odir0 = args.odir0
    if alpha != None:
        odir0 = odir0 + 'BAO_alpha_{}/'.format(alpha)
    odir = odir0 + 'mpi_preliminary_data_{}/comm_size{}/'.format(Pk_type, size)
    prefix = 'TW_zext_'
    outf_prefix = odir + prefix
    if rank == 0:
        if not os.path.exists(odir):
            os.makedirs(odir)
    comm.Barrier()

    def get_shapenoise():
        shape_noise = np.zeros(nbin_ext)
        # Calculate covariance matrix of Pk, the unit of number density is per steradians
        for i in range(nbin_ext):
            shape_noise[i] = sigmae**2.0 / (scale_n * n_i_bin(zbin, i))
        #shape_noise[i] = sigmae**2.0/ s_nz[i] # It's the serious bug that I made and couldn't find it for half a year!
        pseudo_sn = shape_noise * constx

        # put the shape noise (includes the scale factor) in a file
        outf = odir0 + 'mpi_preliminary_data_{}/'.format(
            Pk_type) + prefix + 'pseudo_shapenoise_{0}rbins_ext.out'.format(
                nbin_ext)  # basic variable
        np.savetxt(outf, pseudo_sn, fmt='%.15f', newline='\n')

    ifile = idir0 + 'Input_files/PW_stage_num_ell_per_rank_comm_size{}.dat'.format(
        size)  # num of ell is roughly estimated
    num_ell_array = np.loadtxt(ifile, dtype='int', comments='#', usecols=(1, ))
    num_l_in_rank = num_ell_array[rank]

    data_type_size = 8
    N_dset = (nbin_ext + 1) * nbin_ext // 2
    iu1 = np.triu_indices(nbin_ext)

    #
    ##################################################################################################################
    #------------------------------------ get cross power spectrum C^ij(l) ------------------------------------------#
    ##################################################################################################################
    # If we use an up-triangle matrix to store C^ij(l) with nbin_ext redshift bins, it's easier to extract C^ij(l) within numb_bin spectroscopic bins.
    # But to save space, I used array form to store C^ij(l) for each l.
    def get_Cijl(comm, rank):
        def cal_cijl(l, rank):
            #n_l = default_num_l_in_rank * rank + l
            n_l = np.sum(num_ell_array[0:rank]) + l
            ell = l_min + n_l * delta_l
            ell = alpha * ell
            ##offset_cijl = n_l * N_dset * data_type_size
            c_temp = np.zeros((nbin_ext, nbin_ext))
            for c_i in range(nbin_ext):
                g_nr = nbin_ext - c_i
                #print('g_nr:', g_nr)
                # gmatrix_jk is used to store integration elements to calculate array C^ij(l) from Pk for a certain l
                # j=i, i+1,...nbin_ext; j in the name gmatrix_jk also denotes row index
                gmatrix_jk = np.zeros((g_nr, num_par))
                #print(gmatrix_jk)
                # g_col is the column index of gmatrix_jk
                for g_col in range(num_par):
                    chi_k = ell / k_par[g_col]
                    z_k = interpolate.splev(
                        chi_k, tck_zchi, der=0
                    )  # Here z_k is understood as the spectroscopic redshift z
                    g_i = lens_eff(zbin, center_z, n_z, nz_y2, c_i, z_k,
                                   sigma_z_const)
                    if z_k < zmax:  # zmax corresponding to \chi_h in the expression of C^ij(l)
                        GF = (growth_factor(z_k, cosmic_params.omega_m) /
                              G_0)**2.0
                        #print('zmax, z_k, GF:', zmax, z_k, GF)
                        c_j = c_i
                        # here g_row is the row index of gmatrix_jk
                        for g_row in range(g_nr):
                            g_j = lens_eff(zbin, center_z, n_z, nz_y2, c_j,
                                           z_k, sigma_z_const)
                            gmatrix_jk[g_row][g_col] = pow(
                                (1.0 + z_k), 2.0) * g_i * g_j * ell * (
                                    1.0 / k_camb[g_col] -
                                    1.0 / k_camb[g_col + 1]) * GF
                            ###gmatrix_jk[g_row][g_col] = pow((1.0+z_k), 2.0)*lens_eff(c_i, chi_k)*lens_eff(c_j, chi_k)*ell*(1.0/k_par[g_col]-1.0/k_par[g_col+1])*GF
                            c_j += 1

                c_temp[c_i][c_i:nbin_ext] = np.dot(gmatrix_jk, Pk_par)
            #    print(c_temp)
            cijl = np.asarray(
                c_temp[iu1],
                dtype=np.float64)  # extract upper-triangle of c_temp
            if rank == 0:
                print('ell from rank', rank, 'is', ell)
            return cijl

        #eps = zbin_avg/10.0              # set minimum interval for g^i integration, z_max-z>eps  !maybe I need to consider this, 04/25/2016
        # Output the Cij_l array in which each term is unique.
        Cijl_file = outf_prefix + 'Cij_l_{}rbins_ext_{}kbins_CAMB_rank{}.bin'.format(
            nbin_ext, num_par, rank)  # basic variable
        # open file, write and append data in binary format (save storing volume)
        Cijl_fwriter = open(Cijl_file, 'wb')

        for l in range(num_l_in_rank):
            cijl = cal_cijl(l, rank)
            cijl.tofile(Cijl_fwriter, sep="")
        Cijl_fwriter.close()

    ##################################################################################################################
    #####################-------------------get output k space and G' matrix for output Pk-----------#################
    ##################################################################################################################
    def get_Gm_out(comm, rank):
        # odir_list = ['./Gm_cross_out_linear_k_data/', './Gm_cross_out_exp_k_data/']
        case = 1  # output k exponentially distributed
        kout, k_mid, Pnorm_out = np.zeros(num_kout + 1), np.zeros(
            num_kout), np.zeros(num_kout)
        # Try k bins linearly distributed
        if case == 0:
            delta_k = (k_camb[-1] - k_camb[0]) / num_kout
            kout[0] = k_camb[0]
            for i in range(num_kout):
                kout[i + 1] = kout[i] + delta_k
                k_mid[i] = (kout[i + 1] + kout[i]) / 2.0
                Pnorm_out[i] = 1.5e4 / (1.0 + (k_mid[i] / 0.05)**2.0)**0.65

        # Try the simplest case, using the trapezoidal rule. Try to use exponentially distributed k bins
        elif case == 1:
            k_low, k_high = 0.01, 1.0
            kout[0], kout[1], kout[-1] = k_camb[0], k_low, k_camb[-1]
            lnk_factor = np.log(k_high / k_low) / (num_kout - 2)

            for i in range(2, num_kout):
                kout[i] = kout[i - 1] * np.exp(lnk_factor)
            #print(kout)
            for i in range(num_kout):
                k_mid[i] = (kout[i] + kout[i + 1]) / 2.0
                Pnorm_out[i] = 1.5e4 / (
                    1.0 + (k_mid[i] / 0.05)**2.0
                )**0.65  # This Pnorm is from Eisenstein & Zaldarriaga 1999.

        # construct Gmatrix: Gout for output Pk with num_kout kbins

        def cal_Gm(l, rank):
            #n_l = default_num_l_in_rank * rank + l
            n_l = np.sum(num_ell_array[0:rank]) + l
            ell = l_min + n_l * delta_l
            ell = alpha * ell
            ##offset_Gm = n_l * N_dset * num_kout * data_type_size

            Gmatrix_l = np.zeros((N_dset, num_kout))
            # j denotes column
            for j in range(num_kout):
                #chi_k: comoving distance from k
                chi_k = ell / k_mid[
                    j]  # I would have to say I could only do approximation here, e.g., using k_mid
                z_k = interpolate.splev(chi_k, tck_zchi, der=0)
                # i denotes row
                if z_k < zmax:
                    GF = (growth_factor(z_k, cosmic_params.omega_m) / G_0)**2.0
                    for i in range(N_dset):
                        # redshift bin i: rb_i
                        rb_i = iu1[0][i]
                        gi = lens_eff(zbin, center_z, n_z, nz_y2, rb_i, z_k,
                                      sigma_z_const)
                        # redshift bin j: rb_j
                        rb_j = iu1[1][i]
                        gj = lens_eff(zbin, center_z, n_z, nz_y2, rb_j, z_k,
                                      sigma_z_const)
                        # here too, I did approximation for the integration, e.g., the term (1/k1 - 1/k2)
                        Gmatrix_l[i][j] = pow(
                            (1.0 + z_k), 2.0) * gi * gj * ell * (
                                1.0 / kout[j] -
                                1.0 / kout[j + 1]) * GF * Pnorm_out[j]
            return Gmatrix_l

        # Gm_cross_out uses selected new k bins
        Gm_cross_file = outf_prefix + 'Gm_cross_out_{}rbins_{}kbins_CAMB_rank{}.bin'.format(
            nbin_ext, num_kout, rank)  # basic variable
        Gm_cross_fwriter = open(Gm_cross_file, 'wb')
        for l in range(num_l_in_rank):
            Gm = cal_Gm(l, rank)
            Gm.tofile(Gm_cross_fwriter, sep="")
        Gm_cross_fwriter.close()