def fit_sub_mean():
    np.random.seed()
    for z_id in xrange(3):
        norm_gf = growth_factor(float(sim_z[z_id]), Omega_m) / G_0
        for space_id in xrange(1):
            ifile_Pk = './{}_sub_Pk_2d_wnw_mean_{}_ksorted_mu/{}kave{}.wig_minus_now_mean_sub_a_{}.dat'.format(
                sim_run, rec_dirs[rec_id], rec_fprefix[rec_id],
                sim_space[space_id], sim_a[z_id])
            Pk_wnw_diff_true = np.loadtxt(
                ifile_Pk, dtype='f4', comments='#', usecols=(2, )
            )  # be careful that there are k, \mu, P(k, \mu) columns.

            ifile_Cov_Pk = './{}_sub_Cov_Pk_2d_wnw_mean_{}_ksorted_mu/{}kave{}.wig_minus_now_mean_sub_a_{}.dat'.format(
                sim_run, rec_dirs[rec_id], rec_fprefix[rec_id],
                sim_space[space_id], sim_a[z_id])
            Cov_Pk_wnw = np.loadtxt(ifile_Cov_Pk, dtype='f4', comments='#')
            ivar_Pk_wnow = N_dataset / np.diag(
                Cov_Pk_wnw)  # the mean sigma error

            params_mcmc = mcmc_routine(N_params, N_walkers, N_walkersteps,
                                       theta, params_T, params_indices,
                                       initial_pvalue, k_p, mu_p,
                                       Pk_wnw_diff_true, ivar_Pk_wnow,
                                       tck_Pk_linw, tck_Pk_sm, norm_gf)
            chi_square = chi2(params_mcmc[:, 0], params_indices,
                              initial_pvalue, k_p, mu_p, Pk_wnw_diff_true,
                              ivar_Pk_wnow, tck_Pk_linw, tck_Pk_sm, norm_gf)
            reduced_chi2 = chi_square / (N_fitbin - N_params)
            print('Reduced chi2: ', reduced_chi2)
            # output parameters into a file
            ofile_params = odir + '{}kave{}.wig-now_b_bscale_mean_sub_a_{}_params{}_isotropic.dat'.format(
                rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id], ''.join(
                    map(str, initial_pvalue)))
            write_params(ofile_params, params_mcmc, params_name, reduced_chi2)
Exemplo n.º 2
0
def Pk_wnw_diff_divideby_Psm():

    rec_id = 1          # rec_id=0: before reconstruction; rec_id=1: after reconstruction
    odir_prefix = './run2_3_'
    for z_id in xrange(3):
        for space_id in xrange(1):
#            Pk_mnow_obs = np.array([], dtype=np.float64).reshape(0, n_fitbin)        # m means matrix
#            Pk_mnow_true = np.array([], dtype=np.float64).reshape(0, n_fitbin)
#            Pk_mwig_obs = np.array([], dtype=np.float64).reshape(0, n_fitbin)
#            Pk_mwig_true = np.array([], dtype=np.float64).reshape(0, n_fitbin)
            Pk_mwnw_diff_obs = np.array([], dtype=np.float64).reshape(0, n_fitbin)
            Pk_mwnw_diff_true = np.array([], dtype=np.float64).reshape(0, n_fitbin)
            
            for run_id in xrange(2):
                for sim_seed_id in xrange(10):
                    k_i, Pk_now_obs, Pk_now_true = read_fftPk_file(dir0, z_id, rec_id, space_id, sim_wig[run_id][0], sim_seed_id)
#                    Pk_mnow_obs = np.vstack([Pk_mnow_obs, Pk_now_obs])
#                    Pk_mnow_true = np.vstack([Pk_mnow_true, Pk_now_true])
                    k_i, Pk_wig_obs, Pk_wig_true = read_fftPk_file(dir0, z_id, rec_id, space_id, sim_wig[run_id][1], sim_seed_id)
#                    Pk_mwig_obs = np.vstack([Pk_mwig_obs, Pk_wig_obs])
#                    Pk_mwig_true = np.vstack([Pk_mwig_true, Pk_wig_true])

                    Pk_wnw_diff_true = (Pk_wig_true-Pk_now_true)/Pk_sm_obsk
                    Pk_mwnw_diff_true = np.vstack([Pk_mwnw_diff_true, Pk_wnw_diff_true])
        
            Pk_wnw_diff_true_mean = np.mean(Pk_mwnw_diff_true, axis=0)
            
            for i in xrange(n_fitbin):
                Pk_mwnw_diff_true[:, i] = Pk_mwnw_diff_true[:, i] - Pk_wnw_diff_true_mean[i]
            Cov_Pk = np.dot(Pk_mwnw_diff_true.T, Pk_mwnw_diff_true)/(N_dataset -1.0)
            #print(Cov_Pk, Cov_Pk.shape)
            # add the const factor with growth function and bias (b=1.0 for dark matter P(k))
            bias = 1.0
            z = float(sim_z[z_id])
            print("z=", z, "G: ", growth_factor(z, Omega_m)/G_0)
            G2b2 = (growth_factor(z, Omega_m)/G_0*bias)**2.0
            Pk_wnw_diff_true_mean = Pk_wnw_diff_true_mean/G2b2
            Cov_Pk = Cov_Pk/G2b2**2.0
            
            var_name = 'sub_Pk_2d_wnw_mean_{}_ksorted_mu/'.format(rec_dirs[rec_id])
            filename = "{}kave{}.wnw_diff_mean_a_{}ga.dat".format(rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id])
            header_line = ' After sorting k, the mean (P(k)_wig-P(k)_now)/(G^2b^2) in 2d (k, mu) case\n     k       mu    Pk_wnw_diff_true_mean'
            write_output(odir_prefix, var_name, header_line, rec_id, space_id, z_id, np.array([k_p, mu_p, Pk_wnw_diff_true_mean]).T, filename)

            var_name = 'sub_Cov_Pk_2d_wnw_mean_{}_ksorted_mu/'.format(rec_dirs[rec_id])
            header_line = ' Cov(P_2d_wnw_diff(k1), P_2d_wnw_diff(k2)), (wnw_diff means wig-now, 2d: k, mu; and k1, k2 are the k bin indices).'
            write_output(odir_prefix, var_name, header_line, rec_id, space_id, z_id, Cov_Pk, filename)
def fit_fof_mean():
    rec_id = 0
    #odir = './params_{}_wig-now_b_bscale_fitted_mean_dset/'.format(rec_dirs[rec_id])
    odir = './params_{}_wig-now_b_bscale_fitted_mean_dset_broad_prior/'.format(
        rec_dirs[rec_id])
    if not os.path.exists(odir):
        os.makedirs(odir)
    for z_id in xrange(3):
        norm_gf = growth_factor(float(sim_z[z_id]), Omega_m) / G_0
        print(sim_z[z_id], norm_gf)
Exemplo n.º 4
0
def fit_fof_mean():
    #free_params = ['True', 'True', 'False', 'False', 'True', 'True']
    #fix_params = np.array([1., 1., 0., 0., 1., 1.])
    for rec_id in xrange(1, 2):
        odir = './params_{}_wig-now_b_bscale_fitted_mean_dset/'.format(
            rec_dirs[rec_id])
        if not os.path.exists(odir):
            os.makedirs(odir)
        for z_id in xrange(3):
            normalized_growth_factor = growth_factor(float(sim_z[z_id]),
                                                     Omega_m) / G_0
            for mcut_id in xrange(N_masscut):
                np.random.seed()
                for space_id in xrange(1):
                    ifile_Pk = './{}_Pk_obs_2d_wnw_mean_{}_ksorted_mu_masscut/{}kave{}.wig_minus_now_mean_fof_a_{}_mcut{}.dat'.format(
                        sim_run, rec_dirs[rec_id], rec_fprefix[rec_id],
                        sim_space[space_id], sim_a[z_id],
                        mcut_Npar_list[z_id][mcut_id])
                    Pk_wnw_diff_obs = np.loadtxt(
                        ifile_Pk, dtype='f4', comments='#', usecols=(2, )
                    )  # be careful that there are k, \mu, P(k, \mu) columns.

                    ifile_Cov_Pk = './{}_Cov_Pk_obs_2d_wnw_mean_{}_ksorted_mu_masscut/{}kave{}.wig_minus_now_mean_fof_a_{}_mcut{}.dat'.format(
                        sim_run, rec_dirs[rec_id], rec_fprefix[rec_id],
                        sim_space[space_id], sim_a[z_id],
                        mcut_Npar_list[z_id][mcut_id])
                    Cov_Pk_wnw = np.loadtxt(ifile_Cov_Pk,
                                            dtype='f4',
                                            comments='#')
                    ivar_Pk_wnow = N_dataset / np.diag(
                        Cov_Pk_wnw)  # the mean sigma error

                    alpha_1, alpha_2, b_0, b_scale = mcmc_routine(
                        N_params, N_walkers, N_walkersteps, k_p, mu_p,
                        Pk_wnw_diff_obs, ivar_Pk_wnow, tck_Pk_linw, tck_Pk_sm,
                        normalized_growth_factor)

                    chi_square = chi2(
                        [alpha_1[0], alpha_2[0], b_0[0], b_scale[0]], k_p,
                        mu_p, Pk_wnw_diff_obs, tck_Pk_linw, tck_Pk_sm,
                        normalized_growth_factor, ivar_Pk_wnow)
                    reduced_chi2 = chi_square / (N_fitbin - N_params)
                    print('Reduced chi2: ', reduced_chi2)
                    # output parameters into a file
                    ofile_params = odir + '{}kave{}.wig-now_b_bscale_mean_fof_a_{}_mcut{}_params.dat'.format(
                        rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id],
                        mcut_Npar_list[z_id][mcut_id])
                    write_params(ofile_params, alpha_1, alpha_2, b_0, b_scale,
                                 reduced_chi2)
Exemplo n.º 5
0
def plot_Sigma_z(sim_z, sub_Sigma, Sigma_Plinw, odir, figname):
    G_index = 1.
    z_axis = np.linspace(0.0, 1.2, 200)
    fit_Sigma = np.array([(growth_factor(z, Omega_m)/G_0)**G_index for z in z_axis])*sub_Sigma[0]/G_z[0]**G_index
    # We don't show error from the integration, because it's not one sigma error.
    plt.plot(0.0, Sigma_Plinw, 'rs', label=r'$\Sigma(0)$ from $P_{\mathtt{lin,w}}$')
    plt.plot(map(float, sim_z[:]), sub_Sigma, 'ks', label=r'$\Sigma(z)$ from $\hat{P}_{\mathtt{sim,w}}$')
    plt.plot(z_axis, fit_Sigma, 'g--', label=r'$\Sigma_0 G(z)$')
    plt.legend(frameon=False, loc="upper right", fontsize=20)
    plt.title(r"$\Sigma$ calculated {}".format(title_space[0]), fontsize=20)
    plt.xlim([-0.05, 1.2])
    plt.xlabel(r"$z$", fontsize=24)
    plt.ylim([4.0, 9.0])
    plt.ylabel(r"$\Sigma$ $[Mpc/h]$", fontsize=24)
    plt.savefig(odir+figname)
    plt.show()
    plt.close()
Exemplo n.º 6
0
def fit_fof_sub_mean():
    for z_id in xrange(3):
        G_z = growth_factor(float(sim_z[z_id]), Omega_m)/G_0
        Sigma_z = Sigma_0 * G_z
        
#        Sigma_z = Sigma_z - 1.0
#        for count in xrange(5):

        print("Sigma_z: ", Sigma_z)
        sigma_xy, sigma_z = Sigma_z, Sigma_z
        alpha_1, alpha_2, b_0, b_scale = 1.0, 1.0, 1.0, 0.0
        all_params = alpha_1, alpha_2, sigma_xy, sigma_z, b_0, b_scale
        all_names = "alpha_1", "alpha_2", "sigma_xy", "sigma_z", "b_0", "b_scale"
        all_temperature = 0.01, 0.01, 0.1, 0.1, 0.1, 1.0

        params_indices = [0, 0, 0, 0, 1, 1] # 0: parameter fixed, 1: parameter free.
        fix_params = np.array([], dtype=np.float)
        theta = np.array([], dtype=np.float)
        params_T = np.array([], dtype=np.float)
        params_name = []
        N_params = 0
        count = 0
        for i in params_indices:
            if i == 1:
                fix_params = np.append(fix_params, 0.)
                theta = np.append(theta, all_params[count])
                params_T = np.append(params_T, all_temperature[count])
                params_name.append(all_names[count])
                N_params += 1
            else:
                fix_params = np.append(fix_params, all_params[count])
            count += 1
        print(theta, params_name, N_params)
        print("fixed params: ", fix_params)

        norm_gf = growth_factor(float(sim_z[z_id]), Omega_m)/G_0
        np.random.seed()
        for space_id in xrange(1):
            for mcut_id in xrange(N_masscut):
                ifile_Pk = './{}_Pk_obs_2d_wnw_mean_{}_ksorted_mu_masscut/{}kave{}.wig_minus_now_mean_fof_a_{}_mcut{}.dat'.format(sim_run, rec_dirs[rec_id], rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id], mcut_Npar_list[z_id][mcut_id])
                Pk_wnw_diff_obs = np.loadtxt(ifile_Pk, dtype='f4', comments='#', usecols=(2,)) # be careful that there are k, \mu, P(k, \mu) columns.
                
                ifile_Cov_Pk = './{}_Cov_Pk_obs_2d_wnw_mean_{}_ksorted_mu_masscut/{}kave{}.wig_minus_now_mean_fof_a_{}_mcut{}.dat'.format(sim_run, rec_dirs[rec_id], rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id], mcut_Npar_list[z_id][mcut_id])
                Cov_Pk_wnw = np.loadtxt(ifile_Cov_Pk, dtype='f4', comments='#')
                ivar_Pk_wnow = N_dataset/np.diag(Cov_Pk_wnw)                                   # the mean sigma error
                
                params_mcmc = mcmc_routine(N_params, N_walkers, N_walkersteps, theta, params_T, params_indices, fix_params, k_p, mu_p, Pk_wnw_diff_obs, ivar_Pk_wnow, tck_Pk_linw, tck_Pk_sm, norm_gf, params_name)

                chi_square = chi2(params_mcmc[:, 0], params_indices, fix_params, k_p, mu_p, Pk_wnw_diff_obs, ivar_Pk_wnow, tck_Pk_linw, tck_Pk_sm, norm_gf)
                reduced_chi2 = chi_square/(N_fitbin-N_params)
                print('Reduced chi2: ', reduced_chi2)
                # output parameters into a file
                ##ofile_params = odir + '{}kave{}.wig-now_b_bscale_mean_fof_a_{}_mcut{}_params{}_isotropic.dat'.format(rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id], mcut_Npar_list[z_id][mcut_id], ''.join(map(str, params_indices)))
                ofile_params = odir + '{}kave{}.wig-now_b_bscale_mean_fof_a_{}_mcut{}_params{}_isotropic_Sigmaz_{}.dat'.format(rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id], mcut_Npar_list[z_id][mcut_id], ''.join(map(str, params_indices)), round(Sigma_z, 3))
                write_params(ofile_params, params_mcmc, params_name, reduced_chi2)

            # Fit for DM power spectrum
            ifile_Pk = './{}_sub_Pk_2d_wnw_mean_{}_ksorted_mu/{}kave{}.wig_minus_now_mean_sub_a_{}.dat'.format(sim_run, rec_dirs[rec_id], rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id])
            Pk_wnw_diff_true = np.loadtxt(ifile_Pk, dtype='f4', comments='#', usecols=(2,)) # be careful that there are k, \mu, P(k, \mu) columns.
            
            ifile_Cov_Pk = './{}_sub_Cov_Pk_2d_wnw_mean_{}_ksorted_mu/{}kave{}.wig_minus_now_mean_sub_a_{}.dat'.format(sim_run, rec_dirs[rec_id], rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id])
            Cov_Pk_wnw = np.loadtxt(ifile_Cov_Pk, dtype='f4', comments='#')
            ivar_Pk_wnow = N_dataset/np.diag(Cov_Pk_wnw)                              # the mean sigma error
            
            params_mcmc = mcmc_routine(N_params, N_walkers, N_walkersteps, theta, params_T, params_indices, fix_params, k_p, mu_p, Pk_wnw_diff_true, ivar_Pk_wnow, tck_Pk_linw, tck_Pk_sm, norm_gf, params_name)
            chi_square = chi2(params_mcmc[:, 0], params_indices, fix_params, k_p, mu_p, Pk_wnw_diff_true, ivar_Pk_wnow, tck_Pk_linw, tck_Pk_sm, norm_gf)
            reduced_chi2 = chi_square/(N_fitbin-N_params)
            print('Reduced chi2: ', reduced_chi2)
            # output parameters into a file
            ##ofile_params = odir + '{}kave{}.wig-now_b_bscale_mean_sub_a_{}_params{}_isotropic.dat'.format(rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id], ''.join(map(str, params_indices)))
            ofile_params = odir + '{}kave{}.wig-now_b_bscale_mean_sub_a_{}_params{}_isotropic_Sigmaz_{}.dat'.format(rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id], ''.join(map(str, params_indices)), round(Sigma_z, 3))
            write_params(ofile_params, params_mcmc, params_name, reduced_chi2)
Exemplo n.º 7
0
    del sampler
    return np.array(theta_mcmc)



#################################################################################################
######################################--------main code---------#################################
#################################################################################################
# simulation run name
sim_run = 'run2_3'
N_dataset = 20
N_mu_bin = 100
#N_skip_header = 11
#N_skip_footer = 31977
Omega_m = 0.3075
G_0 = growth_factor(0.0, Omega_m) # G_0 at z=0, normalization factor
N_walkers = 40
N_walkersteps = 5000

rec_id = 0
##Sigma_0 = 8.3364           # the approximated value of \Sigma_xy and \Sigma_z, unit Mpc/h
Sigma_0 = 7.8364            # ~7.8 Mpc/h, the theoretical value

sim_z=['0', '0.6', '1.0']
sim_seed = [0, 9]
sim_wig = ['NW', 'WG']
sim_a = ['1.0000', '0.6250', '0.5000']
sim_space = ['r', 's']     # r for real space; s for redshift space
rec_dirs = ['DD', 'ALL']   # "ALL" folder stores P(k, \mu) after reconstruction process, while DD is before reconstruction.
rec_fprefix = ['', 'R']
def fit_subsamplefof_mean():
    parser = argparse.ArgumentParser(
        description=
        'This is the MCMC code to get the fitting parameters, made by Zhejie Ding.'
    )
    parser.add_argument(
        '-rec_id',
        "--rec_id",
        help='The id of reconstruction, either 0 or 1.',
        required=True)  #0: pre-reconstruct; 1: post-reconstruct
    parser.add_argument('-space_id',
                        "--space_id",
                        help='0 for real space, 1 for redshift space.',
                        required=True)
    parser.add_argument(
        '-set_Sigma_xyz_theory',
        "--set_Sigma_xyz_theory",
        help=
        'Determine whether the parameters \Sigma_xy and \Sigma_z are fixed or not, either True or False',
        required=True)
    parser.add_argument(
        '-set_Sigma_sm_theory',
        "--set_Sigma_sm_theory",
        help=
        'Determine whether we use sigma_sm from theory in the fitting model. \
                         If False, sigma_sm=0 (be careful that sigma_sm=\inf in real space case)',
        required=True)
    args = parser.parse_args()
    print("args: ", args)

    rec_id = int(args.rec_id)
    space_id = int(args.space_id)
    set_Sigma_xyz_theory = args.set_Sigma_xyz_theory
    set_Sigma_sm_theory = args.set_Sigma_sm_theory
    print("rec_id: ", rec_id, "space_id: ", space_id)
    print("set_Sigma_xyz_theory: ", set_Sigma_xyz_theory,
          "set_Sigma_sm_theory: ", set_Sigma_sm_theory)

    N_walkers = 40  # increase N_walkers would decrease the minimum number of walk steps which make fitting parameters convergent, but running time increases.
    N_walkersteps = 5000
    # simulation run name
    N_dataset = 20
    N_mu_bin = 100
    #N_skip_header = 11
    #N_skip_footer = 31977
    Omega_m = 0.3075
    G_0 = growth_factor(0.0, Omega_m)  # G_0 at z=0, normalization factor
    Volume = 1380.0**3.0  # the volume of simulation box

    sim_z = ['0', '0.6', '1.0']
    sim_seed = [0, 9]
    sim_wig = ['NW', 'WG']
    sim_a = ['1.0000', '0.6250', '0.5000']
    sim_space = ['r', 's']  # r for real space; s for redshift space
    rec_dirs = [
        'DD', 'ALL'
    ]  # "ALL" folder stores P(k, \mu) after reconstruction process, while DD is before reconstruction.
    rec_fprefix = ['', 'R']

    mcut_Npar_list = [[37, 149, 516, 1524, 3830], [35, 123, 374, 962, 2105],
                      [34, 103, 290, 681, 1390]]
    N_masscut = np.size(mcut_Npar_list, axis=1)

    # Sigma_sm = sqrt(2.* Sig_RR) in post-reconstruction case, for pre-reconstruction, we don't use sub_Sigma_RR.
    Sigma_RR_list = [[37, 48.5, 65.5, 84.2, 110], [33, 38, 48.5, 63.5, 91.5],
                     [31, 38, 49, 65, 86]]
    sub_Sigma_RR = 50.0  # note from Hee-Jong's recording

    inputf = '../Zvonimir_data/planck_camb_56106182_matterpower_smooth_z0.dat'
    k_smooth, Pk_smooth = np.loadtxt(inputf,
                                     dtype='f8',
                                     comments='#',
                                     unpack=True)
    tck_Pk_sm = interpolate.splrep(k_smooth, Pk_smooth)

    inputf = '../Zvonimir_data/planck_camb_56106182_matterpower_z0.dat'
    k_wiggle, Pk_wiggle = np.loadtxt(inputf,
                                     dtype='f8',
                                     comments='#',
                                     unpack=True)
    tck_Pk_linw = interpolate.splrep(k_wiggle, Pk_wiggle)

    # firstly, read one file and get k bins we want for the fitting range
    dir0 = '/Users/ding/Documents/playground/WiggleNowiggle/subsample_FoF_data_HS/Pk_obs_2d_wnw_mean_DD_ksorted_mu_masscut/'
    inputf = dir0 + 'fof_kaver.wnw_diff_a_0.6250_mcut35_fraction0.126.dat'
    k_p, mu_p = np.loadtxt(inputf,
                           dtype='f8',
                           comments='#',
                           delimiter=' ',
                           usecols=(0, 1),
                           unpack=True)
    #print(k_p, mu_p)
    N_fitbin = len(k_p)
    #print('# of (k, mu) bins: ', N_fitbin)

    # for output parameters fitted
    odir = './params_{}_wig-now_b_bscale_fitted_mean_dset/'.format(
        rec_dirs[rec_id])
    if not os.path.exists(odir):
        os.makedirs(odir)

    print("N_walkers: ", N_walkers, "N_walkersteps: ", N_walkersteps, "\n")
    if rec_id == 0:
        ##Sigma_0 = 8.3364           # the approximated value of \Sigma_xy and \Sigma_z, unit Mpc/h, at z=0.
        Sigma_0 = 7.8364  # suggested by Zvonimir, at z=0
    elif rec_id == 1:
        Sigma_0 = 2.84

    ##space_id = 1                       # in redshift space
    # 0: parameter fixed, 1: parameter free.
    #params_indices = [1, 1, 1, 1, 1, 1]  # It doesn't fit \Sigma and bscale well. Yes, it dosen't work well (it's kind of overfitting).
    ##params_indices = [1, 1, 1, 1, 0, 1, 1, 0, 0]    # b0 needs to be fitted. For this case, make sure \Sigma_xy and \Sigma_z positive, which should be set in mcmc_routine.
    ##params_indices = [0, 1, 0, 1, 1, 0]      # make sure \alpha_xy = \alpha_z and \Sigma_xy = \Sigma_z

    params_indices = [
        1, 1, 1, 1, 0, 0, 0, 0, 0
    ]  # Set sigma_fog=0, f=0, b_scale=0, b_0=1.0 for subsamled DM case in real space.
    ##params_indices = [1, 1, 0, 0, 1, 1]    # with fixed Sigma from theoretical value, then need to set sigma_xy, sigma_z equal to Sigma_z
    ##params_indices = [0, 1, 0, 0, 1, 1]    # For this case, make sure \alpha_1 = \alpha_2 in the function lnlike(..) and set sigma_xy, sigma_z equal to Sigma_z.
    print("params_indices: ", params_indices)

    ##alpha_1, alpha_2, sigma_fog, f, b_0, b_scale  = 1.0, 1.0, 2.0, 0.2, 1.0, 0.0
    alpha_1, alpha_2, sigma_fog, f, b_0, b_scale = 1.0, 1.0, 0.0, 0.0, 1.0, 0.0  # ! only for real space, i.e., set sigma_fog, f equal to 0.
    all_names = "alpha_1", "alpha_2", "sigma_xy", "sigma_z", "sigma_sm", "sigma_fog", "f", "b_0", "b_scale"  # the same order for params_indices
    all_temperature = 0.01, 0.01, 0.1, 0.1, 0.1, 0.1, 0.1, 0.01, 0.1

    pool = MPIPool(loadbalance=True)
    for z_id in xrange(3):
        norm_gf = growth_factor(float(sim_z[z_id]), Omega_m) / G_0
        Sigma_z = Sigma_0 * norm_gf / 2.0  # divided by 2.0 for estimated \Sigma of post-reconstruction
        ##Sigma_z = Sigma_0* norm_gf

        if set_Sigma_xyz_theory == "True":
            print("Sigma_z: ", Sigma_z)
            sigma_xy, sigma_z = Sigma_z, Sigma_z
        else:
            if params_indices[2] == 0:
                sigma_xy = 0.0
            else:
                sigma_xy = 10.0
            if params_indices[3] == 0:
                sigma_z = 0.0
            else:
                sigma_z = 10.0

        np.random.seed()
        #        Set it for FoF fitting
        #        for mcut_id in xrange(N_masscut):
        #            if set_Sigma_sm_theory == "True":
        #                sigma_sm = (float(Sigma_RR_list[z_id][mcut_id])*2.0)**0.5
        #            else:
        #                sigma_sm = 0.0
        #
        #            all_params = alpha_1, alpha_2, sigma_xy, sigma_z, sigma_sm, sigma_fog, f, b_0, b_scale
        #            N_params, theta, fix_params, params_T, params_name = set_params(all_params, params_indices, all_names, all_temperature)
        #
        #            ifile_Pk = './run2_3_Pk_obs_2d_wnw_mean_{}_ksorted_mu_masscut/{}kave{}.wig_minus_now_mean_fof_a_{}_mcut{}.dat'.format(rec_dirs[rec_id], rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id], mcut_Npar_list[z_id][mcut_id])
        #            Pk_wnw_diff_obs = np.loadtxt(ifile_Pk, dtype='f4', comments='#', usecols=(2,)) # be careful that there are k, \mu, P(k, \mu) columns.
        #
        #            ifile_Cov_Pk = './run2_3_Cov_Pk_obs_2d_wnw_{}_ksorted_mu_masscut/{}kave{}.wig_minus_now_mean_fof_a_{}_mcut{}.dat'.format(rec_dirs[rec_id], rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id], mcut_Npar_list[z_id][mcut_id])
        #            Cov_Pk_wnw = np.loadtxt(ifile_Cov_Pk, dtype='f4', comments='#')
        #            ivar_Pk_wnow = N_dataset/np.diag(Cov_Pk_wnw)                                   # the mean sigma error
        #
        #            params_mcmc = mcmc_routine(N_params, N_walkers, N_walkersteps, theta, params_T, params_indices, fix_params, k_p, mu_p, Pk_wnw_diff_obs, ivar_Pk_wnow, tck_Pk_linw, tck_Pk_sm, norm_gf, params_name, pool)
        #
        #            chi_square = chi2(params_mcmc[:, 0], params_indices, fix_params, k_p, mu_p, Pk_wnw_diff_obs, ivar_Pk_wnow, tck_Pk_linw, tck_Pk_sm, norm_gf)
        #            reduced_chi2 = chi_square/(N_fitbin-N_params)
        #            print("Reduced chi2: {}\n".format(reduced_chi2))
        #            # output parameters into a file
        #            if set_Sigma_xyz_theory == "False":
        #                ofile_params = odir + 'fof_{}kave{}.wnw_diff_a_{}_mcut{}_params{}_Sigma_sm{}.dat'.format(rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id], mcut_Npar_list[z_id][mcut_id], ''.join(map(str, params_indices)), round(sigma_sm,3))
        #            else:
        #                ofile_params = odir + 'fof_{}kave{}.wnw_diff_a_{}_mcut{}_params{}_isotropic_Sigmaz_{}_Sigma_sm{}.dat'.format(rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id], mcut_Npar_list[z_id][mcut_id], ''.join(map(str, params_indices)), round(Sigma_z, 3), round(sigma_sm,3))
        #            print(ofile_params)
        #            write_params(ofile_params, params_mcmc, params_name, reduced_chi2)
        # set it for DM subsample fitting
        sub_sigma_sm = (sub_Sigma_RR * 2.0)**0.5
        print("sub_sigma_sm: ", sub_sigma_sm)
        all_params = alpha_1, alpha_2, sigma_xy, sigma_z, sub_sigma_sm, sigma_fog, f, b_0, b_scale  # set \Sigma_sm = sqrt(50*2)=10, for post-rec
        N_params, theta, fix_params, params_T, params_name = set_params(
            all_params, params_indices, all_names, all_temperature)

        # Fit for DM power spectrum
        ifile_Pk = './run2_3_sub_Pk_2d_wnw_mean_{}_ksorted_mu/{}kave{}.wig_minus_now_mean_sub_a_{}.dat'.format(
            rec_dirs[rec_id], rec_fprefix[rec_id], sim_space[space_id],
            sim_a[z_id])
        Pk_wnw_diff_true = np.loadtxt(
            ifile_Pk, dtype='f4', comments='#', usecols=(
                2, ))  # be careful that there are k, \mu, P(k, \mu) columns.

        ifile_Cov_Pk = './run2_3_sub_Cov_Pk_2d_wnw_{}_ksorted_mu/{}kave{}.wig_minus_now_mean_sub_a_{}.dat'.format(
            rec_dirs[rec_id], rec_fprefix[rec_id], sim_space[space_id],
            sim_a[z_id])
        Cov_Pk_wnw = np.loadtxt(ifile_Cov_Pk, dtype='f4', comments='#')
        ivar_Pk_wnow = N_dataset / np.diag(Cov_Pk_wnw)  # the mean sigma error

        params_mcmc = mcmc_routine(N_params, N_walkers, N_walkersteps, theta,
                                   params_T, params_indices, fix_params, k_p,
                                   mu_p, Pk_wnw_diff_true, ivar_Pk_wnow,
                                   tck_Pk_linw, tck_Pk_sm, norm_gf,
                                   params_name, pool)
        chi_square = chi2(params_mcmc[:, 0], params_indices, fix_params, k_p,
                          mu_p, Pk_wnw_diff_true, ivar_Pk_wnow, tck_Pk_linw,
                          tck_Pk_sm, norm_gf)
        reduced_chi2 = chi_square / (N_fitbin - N_params)
        print('Reduced chi2: {}\n'.format(reduced_chi2))
        if rec_id == 1:
            if set_Sigma_xyz_theory == "False":
                ofile_params = odir + 'sub_{}kave{}.wnw_diff_a_{}_params{}_Sigma_sm{}.dat'.format(
                    rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id],
                    ''.join(map(str, params_indices)), round(sub_sigma_sm, 3))
            else:
                ofile_params = odir + 'sub_{}kave{}.wnw_diff_a_{}_params{}_isotropic_Sigmaz_{}_Sigma_sm{}.dat'.format(
                    rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id],
                    ''.join(map(str, params_indices)), round(Sigma_z, 3),
                    round(sub_sigma_sm, 3))
        elif rec_id == 0:
            if set_Sigma_xyz_theory == "False":
                ofile_params = odir + 'sub_{}kave{}.wnw_diff_a_{}_params{}.dat'.format(
                    rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id],
                    ''.join(map(str, params_indices)))
            else:
                ofile_params = odir + 'sub_{}kave{}.wnw_diff_a_{}_params{}_isotropic_Sigmaz_{}.dat'.format(
                    rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id],
                    ''.join(map(str, params_indices)), round(Sigma_z, 3))


#        write_params(ofile_params, params_mcmc, params_name, reduced_chi2)
    pool.close()
def fit_subsamplefof_mean():
    parser = argparse.ArgumentParser(
        description=
        'This is the MCMC code to get the fitting parameters, made by Zhejie Ding.'
    )
    parser.add_argument(
        '-rec_id',
        "--rec_id",
        help='The id of reconstruction, either 0 or 1.',
        required=True)  #0: pre-reconstruct; 1: post-reconstruct
    parser.add_argument(
        '-space_id',
        "--space_id",
        help=
        'The type of space for fitting, 0 for real-space and 1 for redshift space.',
        required=True)
    args = parser.parse_args()
    print("args: ", args)
    rec_id = int(args.rec_id)
    print("rec_id: ", rec_id)
    space_id = int(args.space_id)
    print("space_id: ", space_id)

    N_walkers = 40
    # simulation run name
    N_dataset = 20
    N_mu_bin = 100
    #N_skip_header = 11
    #N_skip_footer = 31977
    Omega_m = 0.3075
    G_0 = growth_factor(0.0, Omega_m)  # G_0 at z=0, normalization factor
    Volume = 1380.0**3.0  # the volume of simulation box

    sim_z = ['0', '0.6', '1.0']
    sim_seed = [0, 9]
    sim_wig = ['NW', 'WG']
    sim_a = ['1.0000', '0.6250', '0.5000']
    sim_space = ['r', 's']  # r for real space; s for redshift space
    rec_dirs = [
        'DD', 'ALL'
    ]  # "ALL" folder stores P(k, \mu) after reconstruction process, while DD is before reconstruction.
    rec_fprefix = ['', 'R']

    mcut_Npar_list = [[37, 149, 516, 1524, 3830], [35, 123, 374, 962, 2105],
                      [34, 103, 290, 681, 1390]]
    N_masscut = np.size(mcut_Npar_list, axis=1)

    inputf = '../Zvonimir_data/planck_camb_56106182_matterpower_smooth_z0.dat'
    k_smooth, Pk_smooth = np.loadtxt(inputf,
                                     dtype='f8',
                                     comments='#',
                                     unpack=True)
    tck_Pk_sm = interpolate.splrep(k_smooth, Pk_smooth)

    inputf = '../Zvonimir_data/planck_camb_56106182_matterpower_z0.dat'
    k_wiggle, Pk_wiggle = np.loadtxt(inputf,
                                     dtype='f8',
                                     comments='#',
                                     unpack=True)
    tck_Pk_linw = interpolate.splrep(k_wiggle, Pk_wiggle)

    # firstly, read one file and get k bins we want for the fitting range
    dir0 = '/Users/ding/Documents/playground/WiggleNowiggle/subsample_FoF_data_HS/Pk_obs_2d_wnw_mean_DD_ksorted_mu_masscut/'
    inputf = dir0 + 'fof_kaver.wnw_diff_a_0.6250_mcut35_fraction0.126.dat'
    k_p, mu_p = np.loadtxt(inputf,
                           dtype='f8',
                           comments='#',
                           delimiter=' ',
                           usecols=(0, 1),
                           unpack=True)
    #print(k_p, mu_p)
    N_fitbin = len(k_p)
    #print('# of (k, mu) bins: ', N_fitbin)

    # for output parameters fitted
    odir = './ZV_lagrange_params_{}_wig-now_mean_dset/'.format(
        rec_dirs[rec_id])
    if not os.path.exists(odir):
        os.makedirs(odir)

    #space_id = 1                       # in redshift space
    if rec_id == 0:
        ##Sigma_0 = 8.3364           # the approximated value of \Sigma_xy and \Sigma_z, unit Mpc/h, at z=0.
        Sigma_0 = 7.8364  # suggested by Zvonimir, at z=0
    elif rec_id == 1:
        Sigma_0 = 2.84

    # 0: parameter fixed, 1: parameter free.
    #params_indices = [1, 1, 1, 1, 1, 1, 0, 0]    # Only for DM case. b0 needs to be fixed for DM subsample power spectrum. For this case, make sure \Sigma_xy and \Sigma_z positive, which should be set in mcmc_routine.
    if space_id == 0:
        # It may not work in real-space just setting f=0.
        params_indices = [1, 1, 1, 0, 1, 1]  # Set f=0 for real-space.
        f = 0.0
    elif space_id == 1:
        params_indices = [1, 1, 1, 1, 1, 1]  # for redshift space
        f = 1.0

    print("params_indices: ", params_indices)

    alpha_1, alpha_2, sigma, b_0, b_scale = 1.0, 1.0, Sigma_0, 1.0, 0.0
    all_names = "alpha_1", "alpha_2", "Sigma_qmax", "f", "b_1", "b_partial"  # the same order for params_indices
    all_temperature = 0.01, 0.01, 0.1, 0.1, 0.01, 0.1

    pool = MPIPool(loadbalance=True)
    for z_id in xrange(3):
        norm_gf = growth_factor(float(sim_z[z_id]), Omega_m) / G_0
        # ##Sigma_z = Sigma_0* norm_gf
        #
        # if set_Sigma_xyz_theory == "True":
        #     print("Sigma_z: ", Sigma_z)
        #     sigma_xy, sigma_z = Sigma_z, Sigma_z
        # else:
        #     if params_indices[2] == 0:
        #         sigma_xy = 0.0
        #     else:
        #         sigma_xy = 10.0
        #     if params_indices[3] == 0:
        #         sigma_z = 0.0
        #     else:
        #         sigma_z = 10.0
        all_params = alpha_1, alpha_2, sigma, f, b_0, b_scale
        np.random.seed()
        for mcut_id in xrange(N_masscut):
            N_params, theta, fix_params, params_T, params_name = set_params(
                all_params, params_indices, all_names, all_temperature)

            ifile_Pk = './run2_3_Pk_obs_2d_wnw_mean_{}_ksorted_mu_masscut/{}kave{}.wig_minus_now_mean_fof_a_{}_mcut{}.dat'.format(
                rec_dirs[rec_id], rec_fprefix[rec_id], sim_space[space_id],
                sim_a[z_id], mcut_Npar_list[z_id][mcut_id])
            Pk_wnw_diff_obs = np.loadtxt(
                ifile_Pk, dtype='f4', comments='#', usecols=(2, )
            )  # be careful that there are k, \mu, P(k, \mu) columns.

            ifile_Cov_Pk = './run2_3_Cov_Pk_obs_2d_wnw_{}_ksorted_mu_masscut/{}kave{}.wig_minus_now_mean_fof_a_{}_mcut{}.dat'.format(
                rec_dirs[rec_id], rec_fprefix[rec_id], sim_space[space_id],
                sim_a[z_id], mcut_Npar_list[z_id][mcut_id])
            Cov_Pk_wnw = np.loadtxt(ifile_Cov_Pk, dtype='f4', comments='#')
            ivar_Pk_wnow = N_dataset / np.diag(
                Cov_Pk_wnw)  # the mean sigma error

            params_mcmc = mcmc_routine(N_params, N_walkers, theta, params_T,
                                       params_indices, fix_params, k_p, mu_p,
                                       Pk_wnw_diff_obs, ivar_Pk_wnow,
                                       tck_Pk_linw, tck_Pk_sm, norm_gf,
                                       params_name, pool)

            chi_square = chi2(params_mcmc[:, 0], params_indices, fix_params,
                              k_p, mu_p, Pk_wnw_diff_obs, ivar_Pk_wnow,
                              tck_Pk_linw, tck_Pk_sm, norm_gf)
            reduced_chi2 = chi_square / (N_fitbin - N_params)
            print("Reduced chi2: {}\n".format(reduced_chi2))
            # output parameters into a file
            ofile_params = odir + 'fof_{}kave{}.wnw_diff_a_{}_mcut{}_params{}.dat'.format(
                rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id],
                mcut_Npar_list[z_id][mcut_id], ''.join(map(
                    str, params_indices)))
            print(ofile_params)
            write_params(ofile_params, params_mcmc, params_name, reduced_chi2)

        np.random.seed()
        N_params, theta, fix_params, params_T, params_name = set_params(
            all_params, params_indices, all_names, all_temperature)
        # Fit for DM power spectrum
        ifile_Pk = './run2_3_sub_Pk_2d_wnw_mean_{}_ksorted_mu/{}kave{}.wig_minus_now_mean_sub_a_{}.dat'.format(
            rec_dirs[rec_id], rec_fprefix[rec_id], sim_space[space_id],
            sim_a[z_id])
        Pk_wnw_diff_true = np.loadtxt(
            ifile_Pk, dtype='f4', comments='#', usecols=(
                2, ))  # be careful that there are k, \mu, P(k, \mu) columns.
        print(ifile_Pk)
        ifile_Cov_Pk = './run2_3_sub_Cov_Pk_2d_wnw_{}_ksorted_mu/{}kave{}.wig_minus_now_mean_sub_a_{}.dat'.format(
            rec_dirs[rec_id], rec_fprefix[rec_id], sim_space[space_id],
            sim_a[z_id])
        Cov_Pk_wnw = np.loadtxt(ifile_Cov_Pk, dtype='f4', comments='#')
        ivar_Pk_wnow = N_dataset / np.diag(Cov_Pk_wnw)  # the mean sigma error

        params_mcmc = mcmc_routine(N_params, N_walkers, theta, params_T,
                                   params_indices, fix_params, k_p, mu_p,
                                   Pk_wnw_diff_true, ivar_Pk_wnow, tck_Pk_linw,
                                   tck_Pk_sm, norm_gf, params_name, pool)
        chi_square = chi2(params_mcmc[:, 0], params_indices, fix_params, k_p,
                          mu_p, Pk_wnw_diff_true, ivar_Pk_wnow, tck_Pk_linw,
                          tck_Pk_sm, norm_gf)
        reduced_chi2 = chi_square / (N_fitbin - N_params)
        print('Reduced chi2: {}\n'.format(reduced_chi2))
        ofile_params = odir + 'sub_{}kave{}.wnw_diff_a_{}_params{}.dat'.format(
            rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id], ''.join(
                map(str, params_indices)))
        write_params(ofile_params, params_mcmc, params_name, reduced_chi2)
    pool.close()
Exemplo n.º 10
0
def fit_BAO(args):

    kmin = float(args.kmin)
    kmax = float(args.kmax)
    params_str = args.params_str
    Pk_type = args.Pk_type
    params_indices = [int(i) for i in params_str]

    old_stdout = sys.stdout
    odir = './fit_kmin{}_kmax{}_{}/'.format(kmin, kmax, Pk_type)
    if not os.path.exists(odir):
        os.makedirs(odir)

    ofile = odir + "mcmc_fit_params{}.log".format(params_str)
    log_file = open(ofile, "w")
    sys.stdout = log_file
    print('Arguments for the fitting: ', args)

    ifile = '/Users/mehdi/work/quicksurvey/ELG/run8/planck_camb_56106182_matterpower_z0.dat'
    klin, Pk_linw = np.loadtxt(ifile, dtype='f8', comments='#', unpack=True)
    Pwig_spl = InterpolatedUnivariateSpline(klin, Pk_linw)

    ifile = '/Users/mehdi/work/quicksurvey/ELG/run8/planck_camb_56106182_matterpower_smooth_z0.dat'
    klin, Pk_sm = np.loadtxt(ifile, dtype='f8', comments='#', unpack=True)
    Psm_spl = InterpolatedUnivariateSpline(klin, Pk_sm)

    norm_gf = 1.0
    N_walkers = 40
    ##params_indices = [1, 0, 0]  # 1: free parameter; 0: fixed parameter

    all_param_names = 'alpha', 'Sigma2_xy', 'A', 'B'
    all_temperature = 0.01, 1.0, 0.1, 0.1
    Omega_m = 0.3075  # matter density
    G_0 = growth_factor(0.0, Omega_m)
    Sigma_0 = 7.7840  # This is exactly calculated from theoretical prediction with q_{BAO}=110 Mpc/h.

    z_list = [0.625]  #, 0.875, 1.125, 1.375]   # z list for the data files
    cut_list = ['F']  #, 'T']
    # initial guess for fitting, Sigma2_xy=31.176 at z=0.65 is from theory prediction
    alpha, A, B = 1.0, 1.0, 10.0
    idir = '../kp0kp2knmodes/surveyscaled-nmodes/'
    odir = './mcmc_fit_params_{}/kmin{}_kmax{}/'.format(Pk_type, kmin, kmax)

    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()

    if rank == 0:
        if not os.path.exists(odir):
            os.makedirs(odir)

    pool = MPIPool(loadbalance=True)
    for z_value in z_list:
        norm_gf = growth_factor(z_value, Omega_m) / G_0
        Sigma2_xy = (Sigma_0 * norm_gf)**2.0
        print('z, Sigma2_xy: ', z_value, Sigma2_xy)
        all_params = alpha, Sigma2_xy, A, B
        N_params, theta, fix_params, params_T, params_name = set_params(
            all_params, params_indices, all_param_names, all_temperature)
        for cut_type in cut_list:
            ifile = idir + 'kp0kp2knmodes_z{}RADECcut{}.dat'.format(
                z_value, cut_type)
            print(ifile)
            data_m = np.loadtxt(ifile, dtype='f8',
                                comments='#')  # k, P0(k), P2(k), N_modes
            indices = np.argwhere((data_m[:, 0] >= kmin)
                                  & (data_m[:, 0] <= kmax)).flatten()
            N_fitbin = len(indices)
            k_obs, Pk_wig_obs, N_modes = data_m[indices,
                                                0], data_m[indices,
                                                           1], data_m[indices,
                                                                      3]
            ivar_Pk_wig = N_modes / (2.0 * Pk_wig_obs**2.0)
            #print('ivar_Pk_wig', ivar_Pk_wig)

            params_mcmc = mcmc_routine(N_params, N_walkers, theta, params_T,
                                       params_indices, fix_params, k_obs,
                                       Pk_wig_obs, ivar_Pk_wig, Pwig_spl,
                                       Psm_spl, norm_gf, params_name, pool)
            print(params_mcmc)
            chi_square = chi2(params_mcmc[:, 0], params_indices, fix_params,
                              k_obs, Pk_wig_obs, ivar_Pk_wig, Pwig_spl,
                              Psm_spl, norm_gf)
            reduced_chi2 = chi_square / (N_fitbin - N_params)
            print("chi^2/dof: ", reduced_chi2, "\n")
            ofile_params = odir + 'fit_p0_z{}RADECcut{}_params{}.dat'.format(
                z_value, cut_type, params_str)
            write_params(ofile_params, params_mcmc, params_name, reduced_chi2)

    pool.close()

    sys.stdout = old_stdout
    log_file.close()
def show_sigma_Pwnw():
    rec_id = 1
    indices_pn = indices_p1
    k_p = np.array(k_sorted[indices_pn])
    Pk_sm_obsk = interpolate.splev(
        k_p, tck_Pk_smooth,
        der=0)  # interpolate theoretical linear P(k) at points k_p

    mu_p = np.array(mu_sorted[indices_pn])
    Nmode_p = np.array(Nmode_sorted[indices_pn])
    Nmode_p = Nmode_p * N_dset  # count the number of data sets (N_dset)
    mu_boundary = np.round([min(mu_p), max(mu_p)], decimals=1)
    print(mu_boundary)
    n_fitbin = np.size(mu_p)
    print(n_fitbin)
    idir_Pk = [
        './run2_3_Pk_obs_2d_wnw_mean_{}_ksorted_mu_masscut/'.format(
            rec_dirs[rec_id]),
        './run2_3_Pk_2d_wnw_mean_{}_ksorted_mu_masscut/'.format(
            rec_dirs[rec_id])
    ]
    idir_covPk = [
        './run2_3_Cov_Pk_obs_2d_wnw_mean_{}_ksorted_mu_masscut/'.format(
            rec_dirs[rec_id]),
        './run2_3_Cov_Pk_2d_wnw_mean_{}_ksorted_mu_masscut/'.format(
            rec_dirs[rec_id])
    ]
    odir = './{}_figs_fofPkobs_subPktrue/'.format(rec_dirs[rec_id])
    #odir = './{}_figs_barn_mean/'.format(rec_dirs[rec_id])
    for space_id in xrange(1):
        for z_id in xrange(3):
            for mcut_id in xrange(4, 5):  #(N_masscut[z_id]):
                bias = bias_array[z_id][mcut_id]
                z = float(sim_z[z_id])
                print("z=", z, "G: ", growth_factor(z, Omega_m) / G_0)
                G2b2 = (growth_factor(z, Omega_m) / G_0 * bias)**2.0

                inputf = idir_Pk[
                    1] + '{}kave{}.wnw_mean_fof_a_{}_mcut{}.dat'.format(
                        rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id],
                        mcut_Npar_list[z_id][mcut_id])
                Pk_wnw = np.loadtxt(inputf,
                                    dtype='f4',
                                    comments='#',
                                    usecols=(2, ))
                inputf = idir_covPk[
                    1] + '{}kave{}.wnw_mean_fof_a_{}_mcut{}.dat'.format(
                        rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id],
                        mcut_Npar_list[z_id][mcut_id])
                Cov_Pk_a = np.loadtxt(inputf, dtype='f4', comments='#')
                sigma_Pk_a = (np.diag(Cov_Pk_a)[indices_pn])**0.5
                sigma_Pk_wnw = sigma_Pk_a / math.sqrt(float(N_dset))
                ##sigma_Pk_wnw = sigma_Pk_a/math.sqrt(float(N_dset))* G2b2 * Pk_sm_obsk  # get sigma_(Pwig/Pnow)*P_{Lin,w}
                relative_sigma_Pk_wnw = sigma_Pk_wnw / Pk_wnw[
                    indices_pn]  # get the relative error
                print(sigma_Pk_wnw.shape)

                inputf = idir_Pk[
                    0] + '{}kave{}.wnw_diff_mean_fof_a_{}_mcut{}.dat'.format(
                        rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id],
                        mcut_Npar_list[z_id][mcut_id])
                Pk_wnw_diff = np.loadtxt(inputf,
                                         dtype='f4',
                                         comments='#',
                                         usecols=(2, ))
                Pk_wig_minus_now = Pk_wnw_diff[
                    indices_pn] * G2b2 * Pk_sm_obsk  # get back P_wig-P_now

                inputf = idir_covPk[
                    0] + '{}kave{}.wnw_diff_mean_fof_a_{}_mcut{}.dat'.format(
                        rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id],
                        mcut_Npar_list[z_id][mcut_id])
                Cov_Pk_a = np.loadtxt(inputf, dtype='f4', comments='#')
                sigma_Pk_a = (np.diag(Cov_Pk_a)[indices_pn])**0.5
                sigma_Pk_wnw_diff = sigma_Pk_a / math.sqrt(float(N_dset))
                ##sigma_Pk_wnw_diff = sigma_Pk_wnw_diff * G2b2 * Pk_sm_obsk              # get back sigma_Pk_wnw_diff

                ##relative_sigma_Pk_wnw_diff = sigma_Pk_wnw_diff/np.abs(Pk_wnw_diff[indices_pn]) # get the relative error
                relative_sigma_Pk_wnw_diff = sigma_Pk_wnw_diff / (
                    abs(Pk_wig_minus_now) + 1.0
                )  # get the "relative" error, add 1.0 in the denominator
                #print sigma_Pk_a.shape
                plot_sigma_Pk_wnw_mean(k_p, sigma_Pk_wnw, sigma_Pk_wnw_diff,
                                       Nmode_p, mu_boundary, rec_id, space_id,
                                       z_id, mcut_id, odir)
def Pk_wnw_diff_divideby_b2G2Psm():
    rec_id = 0  # rec_id=0: before reconstruction; rec_id=1: after reconstruction
    odir_prefix = './run2_3_'
    for z_id in xrange(1, 3):
        for mcut_id in xrange(N_masscut):
            for space_id in xrange(1):
                Pk_mwnw_diff_obs = np.array([], dtype=np.float64).reshape(
                    0, n_fitbin)
                for run_id in xrange(2):
                    for sim_seed_id in xrange(10):
                        k_obs, Pk_now_obs, Pk_now_true = read_fftPk_file(
                            dir0, z_id, mcut_id, rec_id, space_id,
                            sim_wig[run_id][0], sim_seed_id)

                        k_obs, Pk_wig_obs, Pk_wig_true = read_fftPk_file(
                            dir0, z_id, mcut_id, rec_id, space_id,
                            sim_wig[run_id][1], sim_seed_id)

                        Pk_wnw_diff_obs = (Pk_wig_obs -
                                           Pk_now_obs) / Pk_sm_obsk
                        Pk_mwnw_diff_obs = np.vstack(
                            [Pk_mwnw_diff_obs, Pk_wnw_diff_obs])

                Pk_wnw_diff_obs_mean = np.mean(Pk_mwnw_diff_obs, axis=0)

                for i in xrange(n_fitbin):
                    Pk_mwnw_diff_obs[:,
                                     i] = Pk_mwnw_diff_obs[:,
                                                           i] - Pk_wnw_diff_obs_mean[
                                                               i]
                Cov_Pk = np.dot(Pk_mwnw_diff_obs.T,
                                Pk_mwnw_diff_obs) / (N_dataset - 1.0)
                #print(Cov_Pk, Cov_Pk.shape)
                # add the const factor with growth function and bias b
                bias = bias_list[z_id][mcut_id]
                z = float(sim_z[z_id])
                print("z=", z, "G: ", growth_factor(z, Omega_m) / G_0)
                G2b2 = (growth_factor(z, Omega_m) / G_0 * bias)**2.0
                Pk_wnw_diff_obs_mean = Pk_wnw_diff_obs_mean / G2b2
                Cov_Pk = Cov_Pk / G2b2**2.0

                var_name = 'Pk_obs_2d_wnw_mean_{}_ksorted_mu_masscut/'.format(
                    rec_dirs[rec_id])

                filename = "{}kave{}.wnw_diff_mean_fof_a_{}_mcut{}.dat".format(
                    rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id],
                    mcut_Npar_list[z_id][mcut_id])
                header_line = ' After sorting k, the mean (P(k)_wig-P(k)_now)/(G^2*b^2*Plinnw) in 2d (k, mu) case\n     k       mu       Pk_wnw_diff_obs_mean'
                write_output(odir_prefix, var_name, header_line, rec_id,
                             space_id, z_id, mcut_id,
                             np.array([k_p, mu_p,
                                       Pk_wnw_diff_obs_mean]).T, filename)

                var_name = 'Cov_Pk_obs_2d_wnw_mean_{}_ksorted_mu_masscut/'.format(
                    rec_dirs[rec_id])
                header_line = ' Cov(P_obs_2d_wnw_diff_b2G2(k1), P_obs_2d_wnw_diff_b2G2(k2)), (obs: including shot noise; wnw_diff_b2G2 means (Pwig-Pnow)/(b^2 G^2 Psm), 2d: k, mu; and k1, k2 are the k bin indices).'
                filename = "{}kave{}.wnw_diff_mean_fof_a_{}_mcut{}.dat".format(
                    rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id],
                    mcut_Npar_list[z_id][mcut_id])
                write_output(odir_prefix, var_name, header_line, rec_id,
                             space_id, z_id, mcut_id, Cov_Pk, filename)
Exemplo n.º 13
0
sim_z=['0', '0.6', '1.0']
sim_seed = [0, 9]
sim_wig = ['NW', 'WG']
sim_a = ['1.0000', '0.6250', '0.5000']
sim_space = ['r', 's']     # r for real space; s for redshift space
title_space = ['in real space', 'in redshift space']
rec_dirs = ['DD', 'ALL']   # "ALL" folder stores P(k, \mu) after reconstruction process, while DD is before reconstruction.
rec_fprefix = ['', 'R']

R_bao = 110.                # unit: Mpc/h
space_id = 0
k_cut = 0.3
ns = 0.965
Omega_m = 0.3075
G_0 = growth_factor(0.0, Omega_m) # G_0 at z=0, normalization factor
G_z = np.array([growth_factor(float(z), Omega_m)/G_0 for z in sim_z])
print("Growth factor: ", G_z)

inputf = '../Zvonimir_data/planck_camb_56106182_matterpower_z0.dat'
k_wiggle, Pk_wiggle = np.loadtxt(inputf, dtype='f8', comments='#', unpack=True)
tck_Pk_linw = interpolate.splrep(k_wiggle, Pk_wiggle, k=3)
print(k_wiggle[0], k_wiggle[-1], len(k_wiggle))

const = 1./(6.0*math.pi**2.0)
##sigma_v2_err = quad(lambda x: interpolate.splev(x, tck_Pk_linw, der=0), k_wiggle[0], k_wiggle[-1])
sigma_v2_err = quad(lambda x: interpolate.splev(x, tck_Pk_linw, der=0)*(1.0-math.sin(x*R_bao)/(x*R_bao)), k_wiggle[0], 100.0) # the expression is from Zvonimir's suggestion.
print(sigma_v2_err)
sigma_v2 = sigma_v2_err[0]*const
Sigma_Plinw = (2*sigma_v2)**0.5
print("Sigma_Plinw: ", Sigma_Plinw)
Exemplo n.º 14
0
def fit_subsamplefof_mean():
    parser = argparse.ArgumentParser(description='This is the MCMC code to get the fitting parameters using Zvonimir model, made by Zhejie Ding.')
    parser.add_argument('-rec_id', "--rec_id", help='The id of reconstruction, either 0 or 1.', required=True)   #0: pre-reconstruct; 1: post-reconstruct
    args = parser.parse_args()
    rec_id = int(args.rec_id)
    print("rec_id: ", rec_id)
    
    N_walkers = 200
    N_walkersteps = 20000
    space_id = 1                       # in redshift space
    print("N_walkers: ", N_walkers, "N_walkersteps: ", N_walkersteps, "\n")
    
    # 0: parameter fixed, 1: parameter free.
    params_indices = [1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1]    # b0 needs to be fitted. For this case, make sure \Sigma_0,2,4 positive, which should be set in mcmc_routine.
    ##params_indices = [0, 1, 0, 0, 1, 1]    # For this case, make sure \alpha_1 = \alpha_2 in the function lnlike(..) and set sigma_xy, sigma_z equal to Sigma_z.
    print("params_indices: ", params_indices)
    
    # simulation run name
    N_dataset = 20
    N_mu_bin = 100
    #N_skip_header = 11
    #N_skip_footer = 31977
    Omega_m = 0.3075
    G_0 = growth_factor(0.0, Omega_m) # G_0 at z=0, normalization factor
    Volume = 1380.0**3.0   # the volume of simulation box


    sim_z=['0', '0.6', '1.0']
    sim_seed = [0, 9]
    sim_wig = ['NW', 'WG']
    sim_a = ['1.0000', '0.6250', '0.5000']
    sim_space = ['r', 's']     # r for real space; s for redshift space
    rec_dirs = ['DD', 'ALL']   # "ALL" folder stores P(k, \mu) after reconstruction process, while DD is before reconstruction.
    rec_fprefix = ['', 'R']

    mcut_Npar_list = [[37, 149, 516, 1524, 3830],
                      [35, 123, 374, 962, 2105],
                      [34, 103, 290, 681, 1390]]
    N_masscut = np.size(mcut_Npar_list, axis=1)

    # Sigma_sm = sqrt(2.* Sig_RR) in post-reconstruction case, for pre-reconstruction, we don't use sub_Sigma_RR.
    Sigma_RR_list = [[37, 48.5, 65.5, 84.2, 110],
                     [33, 38, 48.5, 63.5, 91.5],
                     [31, 38, 49, 65, 86]]
    sub_Sigma_RR = 50.0       # note from Hee-Jong's recording

    inputf = '../Zvonimir_data/planck_camb_56106182_matterpower_smooth_z0.dat'
    k_smooth, Pk_smooth = np.loadtxt(inputf, dtype='f8', comments='#', unpack=True)
    tck_Pk_sm = interpolate.splrep(k_smooth, Pk_smooth)

    inputf = '../Zvonimir_data/planck_camb_56106182_matterpower_z0.dat'
    k_wiggle, Pk_wiggle = np.loadtxt(inputf, dtype='f8', comments='#', unpack=True)
    tck_Pk_linw = interpolate.splrep(k_wiggle, Pk_wiggle)


    # firstly, read one file and get k bins we want for the fitting range
    dir0='/Users/ding/Documents/playground/WiggleNowiggle/subsample_FoF_data_HS/Pk_obs_2d_wnw_mean_DD_ksorted_mu_masscut/'
    inputf = dir0 +'fof_kaver.wnw_diff_a_0.6250_mcut35_fraction0.126.dat'
    k_p, mu_p = np.loadtxt(inputf, dtype='f8', comments='#', delimiter=' ', usecols=(0,1), unpack=True)
    #print(k_p, mu_p)
    N_fitbin = len(k_p)
    #print('# of (k, mu) bins: ', N_fitbin)

    # for output parameters fitted
    odir = './params_{}_wig-now_b_bscale_fitted_mean_dset/'.format(rec_dirs[rec_id])
    if not os.path.exists(odir):
        os.makedirs(odir)
    

    alpha_1, alpha_2, sigma_0, sigma_2, sigma_4, f, b_0, b_scale0, b_scale2, b_scale4  = 1.0, 1.0, 8.0, 8.0, 8.0, 0.2, 1.0, 0.0, 0.0, 0.0
    all_names = "alpha_1", "alpha_2", "sigma_0", "sigma_2", "sigma_4", "sigma_sm", "f", "b_0", "b_scale0", "b_scale2", "b_scale4"    # the same order for params_indices
    all_temperature = 0.01, 0.01, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 1.0, 1.0, 1.0
    pool = MPIPool(loadbalance=True)
    for z_id in xrange(1):
        norm_gf = growth_factor(float(sim_z[z_id]), Omega_m)/G_0
#        for mcut_id in xrange(N_masscut):
#            np.random.seed()
#            sigma_sm = (float(Sigma_RR_list[z_id][mcut_id])*2.0)**0.5
#            all_params = alpha_1, alpha_2, sigma_0, sigma_2, sigma_4, sigma_sm, f, b_0, b_scale0, b_scale2, b_scale4
#            N_params, theta, fix_params, params_T, params_name = set_params(all_params, params_indices, all_names, all_temperature)
#
#            ifile_Pk = './run2_3_Pk_obs_2d_wnw_mean_{}_ksorted_mu_masscut/{}kave{}.wig_minus_now_mean_fof_a_{}_mcut{}.dat'.format(rec_dirs[rec_id], rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id], mcut_Npar_list[z_id][mcut_id])
#            print(ifile_Pk)
#            Pk_wnw_diff_obs = np.loadtxt(ifile_Pk, dtype='f4', comments='#', usecols=(2,)) # be careful that there are k, \mu, P(k, \mu) columns.
#
#            ifile_Cov_Pk = './run2_3_Cov_Pk_obs_2d_wnw_{}_ksorted_mu_masscut/{}kave{}.wig_minus_now_mean_fof_a_{}_mcut{}.dat'.format(rec_dirs[rec_id], rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id], mcut_Npar_list[z_id][mcut_id])
#            Cov_Pk_wnw = np.loadtxt(ifile_Cov_Pk, dtype='f4', comments='#')
#            ivar_Pk_wnow = N_dataset/np.diag(Cov_Pk_wnw)                                   # the mean sigma error
#
#            params_mcmc = mcmc_routine(N_params, N_walkers, N_walkersteps, theta, params_T, params_indices, fix_params, k_p, mu_p, Pk_wnw_diff_obs, ivar_Pk_wnow, tck_Pk_linw, tck_Pk_sm, norm_gf, params_name, pool)
#
#            chi_square = chi2(params_mcmc[:, 0], params_indices, fix_params, k_p, mu_p, Pk_wnw_diff_obs, ivar_Pk_wnow, tck_Pk_linw, tck_Pk_sm, norm_gf)
#            reduced_chi2 = chi_square/(N_fitbin-N_params)
#            print("Reduced chi2: {}\n".format(reduced_chi2))
#            # output parameters into a file
#            ofile_params = odir + 'ZV_fof_{}kave{}.wnw_diff_a_{}_mcut{}_params{}_Sigma_sm{}.dat'.format(rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id], mcut_Npar_list[z_id][mcut_id], ''.join(map(str, params_indices)), round(sigma_sm, 3))
#            write_params(ofile_params, params_mcmc, params_name, reduced_chi2)

        np.random.seed()
        sigma_sm = 10.0  # for all redshifts, it's the same value.
        all_params = alpha_1, alpha_2, sigma_0, sigma_2, sigma_4, sigma_sm, f, b_0, b_scale0, b_scale2, b_scale4
        N_params, theta, fix_params, params_T, params_name = set_params(all_params, params_indices, all_names, all_temperature)
        
        # Fit for DM power spectrum
        ifile_Pk = './run2_3_sub_Pk_2d_wnw_mean_{}_ksorted_mu/{}kave{}.wig_minus_now_mean_sub_a_{}.dat'.format(rec_dirs[rec_id], rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id])
        Pk_wnw_diff_true = np.loadtxt(ifile_Pk, dtype='f4', comments='#', usecols=(2,)) # be careful that there are k, \mu, P(k, \mu) columns.
        print(ifile_Pk)
        ifile_Cov_Pk = './run2_3_sub_Cov_Pk_2d_wnw_{}_ksorted_mu/{}kave{}.wig_minus_now_mean_sub_a_{}.dat'.format(rec_dirs[rec_id], rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id])
        Cov_Pk_wnw = np.loadtxt(ifile_Cov_Pk, dtype='f4', comments='#')
        ivar_Pk_wnow = N_dataset/np.diag(Cov_Pk_wnw)                              # the mean sigma error

        params_mcmc = mcmc_routine(N_params, N_walkers, N_walkersteps, theta, params_T, params_indices, fix_params, k_p, mu_p, Pk_wnw_diff_true, ivar_Pk_wnow, tck_Pk_linw, tck_Pk_sm, norm_gf, params_name, pool)
        chi_square = chi2(params_mcmc[:, 0], params_indices, fix_params, k_p, mu_p, Pk_wnw_diff_true, ivar_Pk_wnow, tck_Pk_linw, tck_Pk_sm, norm_gf)
        reduced_chi2 = chi_square/(N_fitbin-N_params)
        print('Reduced chi2: {}\n'.format(reduced_chi2))
        ofile_params = odir + 'ZV_sub_{}kave{}.wnw_diff_a_{}_params{}_Sigma_sm{}.dat'.format(rec_fprefix[rec_id], sim_space[space_id], sim_a[z_id], ''.join(map(str, params_indices)), round(sigma_sm, 3))
        write_params(ofile_params, params_mcmc, params_name, reduced_chi2)
    pool.close()