def main():
    # redshift of snapshots
    zs = [1., 0.7, 0.3, 0.]
    z_nbody = zs[0]

    machine = 'alan'
    #machine = 'NERSC'

    #sim_name = "AbacusSummit_hugebase_c000_ph000"
    #sim_name = "AbacusSummit_hugebase_c000_ph001"
    sim_name = 'Sim256'
    #sim_name = 'Sim1024'

    user_dict, cosmo_dict = load_dict(z_nbody, sim_name, machine)
    data_dir = user_dict['data_dir']
    n_chunks = user_dict['n_chunks']
    z_nbody = user_dict['z_nbody']
    Lbox = user_dict['Lbox']

    # create directory if it does not exist
    if not os.path.exists(data_dir):
        os.makedirs(data_dir)

    # loop over all chunks
    for i_chunk in range(n_chunks):
        #if rank != i_chunk%size: continue
        print("saving chunk number %d out of %d chunks" % (i_chunk, n_chunks))

        if user_dict['sim_code'] == 'abacus':
            # load simulation information
            halo_table = read_halo_abacus(sim_name, z_nbody, i_chunk)
            pos_halo = halo_table['x_L2com']
            m_halo = halo_table['N'] * user_dict['m_part']

            # convert [-Lbox/2.,Lbox/2.] to [0,Lbox]
            pos_halo += Lbox / 2.

        elif user_dict['sim_code'] == 'gadget':
            # fof files
            fof_fns = sorted(
                glob.glob(user_dict['sim_dir'] +
                          "fof_snap_box_L%d_%d_%03d*.fits" %
                          (Lbox, user_dict['ppd'], user_dict['ind_snap'])))
            print(fof_fns)

            pos_halo, m_halo = read_halo_gadget(fof_fns, i_chunk, n_chunks)

        save_pos(pos_halo, "halo_%03d" % i_chunk, data_dir, mass=m_halo)
        del pos_halo, m_halo

    print("Saved all halo positions")
Exemplo n.º 2
0
def main(sim_name, z_nbody, z_ic, R_smooth, machine):
    # which power spectra to compute
    #compute_pks = ['Pk_hh', 'Pk_hm', 'Pk_mm']
    compute_pks = ['Pk_hm', 'Pk_mm']

    # load dictionary
    user_dict, cosmo_dict = load_dict(z_nbody, sim_name, machine)
    interlaced = user_dict['interlaced']
    data_dir = user_dict['data_dir']
    R_smooth = user_dict['R_smooth']
    n_chunks = user_dict['n_chunks']
    N_dim = user_dict['N_dim']
    Lbox = user_dict['Lbox']
    dk = user_dict['dk']
    m_threshold = user_dict['mass_threshold']

    # load simulation information;
    pos_halo_fns = sorted(glob.glob(data_dir + "pos_halo_*"))
    pos_snap_fns = sorted(glob.glob(data_dir + "pos_ones_snap_*"))

    # obtain the hh power spectrum
    if 'Pk_hh' in compute_pks:
        ks, Pk_hh = get_Pk(pos_halo_fns,
                           N_dim,
                           Lbox,
                           interlaced,
                           dk=dk,
                           m_thr=m_threshold)
        print("Computed hh power spectrum")
        np.save(data_dir + "Pk_hh.npy", Pk_hh)
        np.save(data_dir + "ks.npy", ks)

    # obtain the mm power spectrum
    if 'Pk_mm' in compute_pks:
        ks, Pk_mm = get_Pk(pos_snap_fns, N_dim, Lbox, interlaced, dk=dk)
        print("Computed mm power spectrum")
        np.save(data_dir + "Pk_mm.npy", Pk_mm)

    # obtain the mm power spectrum
    if 'Pk_hm' in compute_pks:
        ks, Pk_hm = get_Pk(pos_halo_fns,
                           N_dim,
                           Lbox,
                           interlaced,
                           pos2_fns=pos_snap_fns,
                           dk=dk)
        print("Computed hm power spectrum")
        np.save(data_dir + "Pk_hm.npy", Pk_hm)
Exemplo n.º 3
0
def main(sim_name, z_nbody, z_ic, R_smooth, machine):
    # load dictionary
    user_dict, cosmo_dict = load_dict(z_nbody,sim_name,machine)
    interlaced = user_dict['interlaced']
    dens_dir = user_dict['dens_dir']
    data_dir = user_dict['data_dir']
    R_smooth = user_dict['R_smooth']
    n_chunks = user_dict['n_chunks']
    z_nbody = user_dict['z_nbody']
    N_dim = user_dict['N_dim']
    z_ic = user_dict['z_ic']
    Lbox = user_dict['Lbox']
    dk = user_dict['dk']

    field_names = ['ones', 'delta', 'delta_sq', 'nabla_sq', 's_sq']

    # get a mesh list for all 5 cases
    mesh_list = []
    for key in field_names:
        if key == 'ones':
            pos_snap_fns = sorted(glob.glob(data_dir+"pos_delta_snap_*"))
        else:
            pos_snap_fns = sorted(glob.glob(data_dir+"pos_"+key+"_snap_*"))
        mesh = get_mesh(key, pos_snap_fns, N_dim, Lbox, interlaced)
        mesh_list.append(mesh)
    print("Obtained mesh lists for all fields")
    
    # compute all cross power spectra
    ks_all, Pk_all, k_lengths = get_all_cross_ps(mesh_list,dk=dk)
    del mesh_list
    print("Computed cross power spectra of all fields")
    
    # save all power spectra
    np.save(data_dir+"ks_all.npy",ks_all)
    np.save(data_dir+"Pk_all_%d.npy"%(int(R_smooth)),Pk_all)
    np.save(data_dir+"k_lengths.npy",k_lengths)
    print("Saved all templates")
Exemplo n.º 4
0
def main(sim_name,
         sim_name_halo,
         z_nbody,
         z_ic,
         R_smooth,
         machine,
         fit_type,
         tol,
         factor,
         max_iter,
         fit_shotnoise=False):

    # power spectrum choices
    k_max = 0.5  #0.3#0.5
    k_min = 0.  #1.e-2#0

    # load parameters
    user_dict, cosmo_dict = load_dict(z_nbody, sim_name, machine)
    R_smooth = user_dict['R_smooth']
    data_dir = user_dict['data_dir']
    Lbox = user_dict['Lbox']

    # which halo files are we loading
    halo_dir = data_dir.replace(sim_name, sim_name_halo)

    # load power spectra
    Pk_hh = np.load(halo_dir + "Pk_hh.npy")
    #Pk_mm = np.load(halo_dir+"Pk_mm.npy")
    # TESTING
    Pk_mm = np.load(
        "data/AbacusSummit_base_c000_ph000/z1.100/r_smooth_0/Pk_mm.npy")
    Pk_hm = np.load(halo_dir + "Pk_hm.npy")
    ks = np.load(halo_dir + "ks.npy")

    # apply cuts to the data
    k_cut = (ks < k_max) & (ks >= k_min)
    Pk_hh = Pk_hh[k_cut]
    Pk_mm = Pk_mm[k_cut]
    Pk_hm = Pk_hm[k_cut]
    ks = ks[k_cut]
    dk = ks[1] - ks[0]

    # number of modes in each bin
    N_modes = ks**2 * dk * Lbox**3 / (2. * np.pi**2)
    # load errorbars for plotting
    Pk_hh_err = Pk_hh * np.sqrt(2. / N_modes)
    Pk_hm_err = np.sqrt((Pk_hm**2 + Pk_hh * Pk_mm) / N_modes)

    # combine the ratios
    # TODO has to be done properly with jackknifing
    Pk_hh_err[0] = 1.e6  #tuks
    cov_hh = np.diag(Pk_hh_err**2)

    Pk_both = np.hstack((Pk_hh, Pk_hm))
    Pk_both_err = np.hstack((Pk_hh_err, Pk_hm_err))
    Pk_both_err[len(Pk_hh)] = 1.e6
    cov_both = np.diag(Pk_both_err**2)

    Pk_hm_err[0] = 1.e6
    cov_hm = np.diag(Pk_hm_err**2)

    # load all 15 templates
    ks_all = np.load(data_dir + "ks_all.npy")
    Pk_all = np.load(data_dir + "Pk_all_%d.npy" % (int(R_smooth)))  # og
    Pk_tmps = asdf.open(data_dir + "Pk_templates_0.asdf")['data']  # og
    #TESTING
    #data_dir = "data/AbacusSummit_base_c000_ph000/z1.100/r_smooth_1/"
    #Pk_all = np.load(data_dir+"Pk_all_%d.npy"%(1))
    k_lengths = np.load(data_dir + "k_lengths.npy").astype(int)
    fields_tmp = ['1', 'b_1', 'b_2', 'b_{\\nabla^2}', 'b_s']

    # linear solution
    Pk_all = Pk_all.reshape(int(len(ks_all) / k_lengths[0]), k_lengths[0])
    Pk_all = Pk_all[:, k_cut]

    # shot noise params
    #Pk_sh = 1./n_bar # analytical shot noise
    F_size = 5

    Pk_ij = np.zeros((F_size, F_size, len(Pk_hh)))
    c = 0
    for i in range(F_size):
        for j in range(F_size):
            if i > j: continue

            # TESTING
            Pk_tmp = Pk_tmps[r'$(' + fields_tmp[i] + ',' + fields_tmp[j] +
                             r')$']
            Pk_tmp = np.interp(ks, Pk_tmps['ks'], Pk_tmp)
            # original
            #Pk_tmp = Pk_all[c]

            Pk_ij[i, j, :] = Pk_tmp
            if i != j: Pk_ij[j, i, :] = Pk_tmp
            c += 1

    # solution params
    F_start = np.ones((F_size, 1))  # initial guess
    n_steps = 10

    # first solve varying all 4 parameters
    if fit_type == 'power_both':
        icov = np.linalg.inv(cov_both)
    elif fit_type == 'power_hh':
        icov = np.linalg.inv(cov_hh)
    elif fit_type == 'power_hm':
        icov = np.linalg.inv(cov_hm)
    F = solve(Pk_ij, Pk_hh, Pk_hm, icov, F_start, len(Pk_hh), tol, factor,
              max_iter, fit_type)
    #F = np.array([1.,-0.8277,-0.0424,-0.339,0.0355])
    #F = np.array([1, -1.01524924, 0.0075658, 0.0001073, -0.0052661])

    # compute power spectrum for best-fit
    Pk_hh_guess, Pk_hm_guess, P_hat = get_P(Pk_ij, F, len(Pk_hh))
    Pk_hh_best = Pk_hh_guess
    Pk_hm_best = Pk_hm_guess

    # compute the probability
    delta = Pk_hh_best - Pk_hh
    lnprob = np.einsum('i,ij,j', delta, np.linalg.inv(cov_hh), delta)
    lnprob *= -0.5
    print("lnprob = ", lnprob)
    print("Pk_hh_truth = ", Pk_hh[::10])
    print("Pk_hh_best = ", Pk_hh_best[::10])
    print("Pk_hm_truth = ", Pk_hm[::10])
    print("Pk_hm_best = ", Pk_hm_best[::10])

    # plot solution
    plt.figure(1, figsize=(12, 8))
    fields = ['1', '\delta', '\delta^2', '\\nabla^2 \delta', 's^2']
    for i in range(len(F)):
        for j in range(len(F)):
            if i > j: continue
            label = r'$\langle ' + fields[i] + "," + fields[j] + r" \rangle$"
            Pk_tmp = Pk_ij[i, j, :] * F[i] * F[j]
            plt.plot(ks, Pk_tmp, ls='--', lw=2., label=label)

    plt.errorbar(ks,
                 Pk_hh,
                 yerr=Pk_hh_err,
                 color='black',
                 label='halo-halo truth',
                 zorder=1)
    plt.plot(ks,
             Pk_hh_best,
             color='dodgerblue',
             label='halo-halo fit',
             zorder=2)
    plt.xscale('log')
    plt.yscale('log')
    plt.xlabel(r"$k$ [$h \ \mathrm{Mpc}^{-1}$]")
    plt.ylabel(r"$P(k)$")
    plt.legend()
    plt.savefig("figs/Pk_hh_fit.png")

    plt.figure(2)
    plt.errorbar(ks,
                 Pk_hm,
                 yerr=Pk_hm_err,
                 color='black',
                 label='halo-matter truth',
                 zorder=1)
    plt.plot(ks,
             Pk_hm_best,
             color='dodgerblue',
             label='halo-matter fit',
             zorder=2)
    plt.xscale('log')
    plt.yscale('log')
    plt.xlabel(r"$k$ [$h \ \mathrm{Mpc}^{-1}$]")
    plt.ylabel(r"$P(k)$")
    plt.legend()
    plt.savefig("figs/Pk_hm_fit.png")
    plt.show()
Exemplo n.º 5
0
def main(sim_name, z_nbody, z_ic, R_smooth, machine, want_plot=False):
    # user choices
    k_max = 0.5
    k_min = 1.e-4

    # redshift choice
    #z_s = np.array([3.0, 2.5, 2.0, 1.7, 1.4, 1.1, 0.8, 0.5, 0.4, 0.3, 0.2, 0.1])
    z_s = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.8, 1.1])
    a_s = 1. / (1 + z_s)

    # test with class
    class_dir = os.path.expanduser(
        "~/repos/AbacusSummit/Cosmologies/abacus_cosm000/")

    # load parameters
    user_dict, cosmo_dict = load_dict(z_nbody, sim_name, machine)
    R_smooth = user_dict['R_smooth']
    data_dir = user_dict['data_dir']

    # Cosmology
    cosmo = ccl.Cosmology(**cosmo_dict)

    # Redshift distributions
    nz_s = np.exp(-((z_s - 0.8) / 0.05)**2 / 2)

    # Bias
    bz_s = 0.95 / ccl.growth_factor(cosmo, a_s)

    # This tracer will only include the density contribution
    galaxies = ccl.NumberCountsTracer(cosmo,
                                      has_rsd=False,
                                      dndz=(z_s, nz_s),
                                      bias=(z_s, bz_s),
                                      mag_bias=None)

    # read in CLASS power spectra
    ks, Pk = np.loadtxt(class_dir + 'abacus_cosm000.z%d_pk_cb.dat' % (1),
                        unpack=True)
    Pk_a_s = np.zeros((len(a_s), len(ks)))
    for i in range(len(a_s)):
        print(i)
        Pk_a_s[i, :] = np.loadtxt(class_dir + 'abacus_cosm000.z%d_pk_cb.dat' %
                                  (i + 1))[:, 1]

    # generating fake data
    k = np.load("data/AbacusSummit_base_c000_ph006/z1.100/ks.npy")
    dk = k[1] - k[0]
    Lbox = 2000.
    N_modes = k**2 * dk * Lbox**3. / (2. * np.pi**2)
    for i in range(len(a_s)):
        # ccl expects Mpc units todo: improve and ask
        h = cosmo_dict['h']
        # give the ks in Mpc^-1 and get Pk_gg in [Mpc/h]^3
        Pk_gg = ccl.nonlin_matter_power(cosmo, k * h, a_s[i]) * h**3
        cov = np.diag(Pk_gg**2 * (2. / N_modes))
        np.save("data_power/pk_gg_z%4.3f.npy" % z_s[i], Pk_gg)
        np.save("data_power/ks.npy", k)
        np.save("data_power/cov_pk_gg_z%4.3f.npy" % z_s[i], cov)

    # load k and P(k,a)
    ells, cl_tt_tmp = project_Cl(cosmo, galaxies, Pk_a_s, ks, a_s, want_plot)
def main(sim_name, z_templates, R_smooth, machine, pars_vary, check_derivatives=False):

    # convert the templates to floats and sort since angular cl expects that
    z_templates = np.array([float(z) for z in z_templates])
    z_templates = np.sort(z_templates)[::-1]

    fid_Pk_dPk_templates = {}
    for k, z_nbody in enumerate(z_templates):
        # get the name of that
        z_str = 'ztmp%d'%k
        
        # now we need the choose parameters
        if machine == 'alan':
            #data_dir =  home+"/repos/hybrid_eft_nbody/data/%s/z%4.3f/r_smooth_%d/"%(sim_name,z_nbody,int(R_smooth))
            data_dir =  home+"/repos/hybrid_eft_nbody/data/%s/z%4.3f/"%(sim_name,z_nbody)
        else:
            user_dict, cosmo_dict = load_dict(z_nbody,sim_name,machine)
            data_dir = user_dict['data_dir']

        # load fiducial asdf file
        Pk_templates = asdf.open(os.path.join(data_dir, "Pk_templates_%d.asdf"%(int(R_smooth))))['data']
        ks = Pk_templates['ks']

        # get cosmological parameters for fiducial cosmology
        fid_dict = get_dict(sim_name)

        # available parameters
        main_name = 'AbacusSummit_base'
        sim_par_dict = {'omega_b': [main_name+'_c100_ph000', main_name+'_c101_ph000'],
                       'omega_cdm': [main_name+'_c102_ph000', main_name+'_c103_ph000', main_name+'_c117_ph000', main_name+'_c118_ph000'],
                       'n_s': [main_name+'_c104_ph000', main_name+'_c105_ph000', main_name+'_c119_ph000', main_name+'_c120_ph000'],
                       'sigma8_cb': [main_name+'_c112_ph000', main_name+'_c113_ph000', main_name+'_c125_ph000', main_name+'_c126_ph000']}

        for j, par_vary in enumerate(pars_vary):
            # simulations that vary par_vary
            sim_names = sim_par_dict[par_vary]

            # load templates for these simulations
            # temporarily let's use the fine derivatives for z = 1.1, 0.8, and 0.5 for the omega_cdm simulations
            if par_vary == 'omega_cdm' and z_nbody in [0.5, 0.8, 1.1]:
                sim_names[0] = sim_names[2]
                sim_names[1] = sim_names[3]
            elif (z_nbody in [0.4, 0.3, 0.2, 0.1]) or (par_vary == 'omega_b'):
                sim_names[2] = sim_names[0]
                sim_names[3] = sim_names[1]
                
            Pk_templates_large_plus = asdf.open(os.path.join(data_dir.replace(sim_name, sim_names[0]), "Pk_templates_%d.asdf"%(int(R_smooth))))['data']
            Pk_templates_large_minus = asdf.open(os.path.join(data_dir.replace(sim_name, sim_names[1]), "Pk_templates_%d.asdf"%(int(R_smooth))))['data']
            if check_derivatives:
                Pk_templates_small_plus = asdf.open(os.path.join(data_dir.replace(sim_name, sim_names[2]), "Pk_templates_%d.asdf"%(int(R_smooth))))['data']
                Pk_templates_small_minus = asdf.open(os.path.join(data_dir.replace(sim_name, sim_names[3]), "Pk_templates_%d.asdf"%(int(R_smooth))))['data']

            # get the relevant parameters for each simulation
            pars_dict = {}
            for sim in sim_names:
                dic = get_dict(sim)
                pars_dict[sim] = dic

            # derivative difference for the pair
            h_large = pars_dict[sim_names[0]][par_vary] - fid_dict[par_vary]
            if check_derivatives:
                h_small = pars_dict[sim_names[2]][par_vary] - fid_dict[par_vary]

            plot_no = 1
            plt.subplots(3, 5, figsize=(18,10))
            # plot spectra and save derivatives
            for i, key in enumerate(Pk_templates.keys()):

                # save the fiducial power spectrum
                if key == 'ks': i -= 1; continue
                Pk_tmp = Pk_templates[key]
                Pk_tmp_plus = Pk_templates_large_plus[key]
                Pk_tmp_minus = Pk_templates_large_minus[key]
                dPk_tmp = deriv_Pk(Pk_tmp_plus, Pk_tmp_minus, Pk_tmp, h_large)

                
                # need to save the derivatives
                fid_Pk_dPk_templates[z_str+'_'+key2str(key)+'_'+par_vary] = dPk_tmp
                if j == 0:
                    fid_Pk_dPk_templates[z_str+'_'+key2str(key)] = Pk_tmp

                if check_derivatives:
                    Pk_pred_plus, Pk_pred_minus = predict_Pk(Pk_tmp, dPk_tmp, h_small)
                    Pk_true_plus = Pk_templates_small_plus[key]
                    Pk_true_minus = Pk_templates_small_minus[key]

                    # TESTING
                    '''
                    Pk_pred_plus, Pk_pred_minus = predict_Pk(Pk_tmp, dPk_tmp, h_large)
                    Pk_true_plus = Pk_tmp_plus
                    Pk_true_minus = Pk_tmp_minus
                    '''

                    print(key2str(key))
                    if key2str(key) == '1_1':
                        print(par_vary, h_small, z_nbody)
                        np.save("../montepython_public/Pk_11_"+par_vary+"_ztmp%d.npy"%k, Pk_pred_plus)
                        #np.save("../montepython_public/Pk_11_"+par_vary+"_ztmp%d.npy"%k, Pk_true_plus)

                    
                    
                    plt.subplot(3,5,plot_no)
                    #plt.loglog(ks, Pk_true_plus, color=hexcols[i], label=key+' plus')
                    #plt.loglog(ks, Pk_pred_plus, color=hexcols[i], ls='--')
                    #plt.loglog(ks, Pk_true_minus, color=hexcols[15-i-1], label=key+' minus')
                    #plt.loglog(ks, Pk_pred_minus, color=hexcols[15-i-1], ls='--')
                    plt.plot(ks, np.ones(len(ks)), 'k--')
                    plt.semilogx(ks, Pk_pred_plus/Pk_true_plus, color=hexcols[i], label=key)#+' plus')
                    plt.semilogx(ks, Pk_pred_minus/Pk_true_minus, color=hexcols[15-i-1], ls='--')#, label=key+' minus')
                    plt.legend(ncol=1)

                    if plot_no >= (3-1)*5+1:
                        plt.xlabel('k [h/Mpc]')
                    #plt.ylabel(r'$P_{ab}$ [(Mpc/h)$^3$]')
                    if plot_no % 5 == 1:
                        plt.ylabel(r'$P_{ab}^{\rm pred}/P_{ab}^{\rm true}$')
                    plt.ylim([0.8, 1.2])
                    plot_no += 1
            plt.savefig("figs/deriv_"+par_vary+"__z%4.3f.png"%z_nbody)
            plt.close()
            
    # add the wavenumbers
    fid_Pk_dPk_templates['ks'] = ks

    # add the header with fiducial cosmological parameters and the redshifts of the templates [how do elegantly]
    #header = {par: fid_dict[par] for par in pars_vary}
    #header['h'] = fid_dict['h']
    header = fid_dict.copy()
    header.pop(r'notes')
    header.pop(r'root')
    #header['sigma8'] = header['sigma8_cb']# TESTING CLASS does not take sigma8
    #header.pop(r'A_s') # TESTING CLASS does not take sigma8
    header.pop(r'sigma8_cb') # og
    header.pop(r'sigma8_m')
    header.pop(r'w0_fld')
    header.pop(r'wa_fld')

    print(fid_Pk_dPk_templates.keys())

    # invoke class to get theta value
    '''
    from classy import Class
    target_param_dict = header.copy()

    target_cosmo = Class()
    target_cosmo.set(target_param_dict)
    target_cosmo.compute()
    theta_target = target_cosmo.theta_s_100()
    print("Target 100*theta_s = ",theta_target)
    print('h = ', target_param_dict['h'])
    target_cosmo.set({'omega_cdm': 0.11})
    target_cosmo.compute()
    new_cosmo = target_cosmo
    h = search(new_cosmo, theta_target)
    '''
    # TESTING
    theta_target = 1.041533
    h = 0.6736
        
    # this_cosmo can have same params as target_cosmo changing only the 4 parameters that are being varied and g
    header['theta_s_100'] = theta_target
    header['sigma8_cb'] = fid_dict['sigma8_cb']
    header['w0_fld'] = fid_dict['w0_fld']
    header['wa_fld'] = fid_dict['wa_fld']

    # I think we need to solve for h at each iteration? in ccl_class given theta star
    for i in range(len(z_templates)):
        header['ztmp%d'%i] = z_templates[i]
        
    #print(header.keys())
    print(header.items())
        
    # save as asdf file
    save_asdf(fid_Pk_dPk_templates,"fid_Pk_dPk_templates_%d.asdf"%(int(R_smooth)), data_dir, header=header)
Exemplo n.º 7
0
def main(sim_name, z_nbody, z_ic, R_smooth, machine):

    # now we need the choose parameters
    if machine == 'alan':
        #data_dir =  home+"/repos/hybrid_eft_nbody/data/%s/z%4.3f/r_smooth_%d/"%(sim_name,z_nbody,int(R_smooth))
        data_dir = home + "/repos/hybrid_eft_nbody/data/%s/z%4.3f/" % (
            sim_name, z_nbody)
    else:
        user_dict, cosmo_dict = load_dict(z_nbody, sim_name, machine)
        data_dir = user_dict['data_dir']

    # indices for the CLASS files
    zs_pk = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.8, 1.1, 99.0])
    i_pk = np.argmin(np.abs(zs_pk - z_nbody)) + 1
    i_zel = np.argmin(np.abs(zs_pk - z_ic)) + 1

    # growth factor
    class_dict = get_dict(sim_name)
    h = np.float(class_dict['h'])
    n_s = np.float(class_dict['n_s'])
    Omega_b = np.float(class_dict['omega_b'] / h**2)
    Omega_c = np.float(class_dict['omega_cdm'] / h**2)
    sigma8 = np.float(class_dict['sigma8_m'])
    cosmo_dict = {
        'h': h,
        'n_s': n_s,
        'Omega_c': Omega_c,
        'Omega_b': Omega_b,
        'sigma8': sigma8
    }
    cosmo = ccl.Cosmology(**cosmo_dict)
    D_z_nbody = ccl.growth_factor(cosmo, 1. / (1 + z_nbody))
    D_z_ic = ccl.growth_factor(cosmo, 1. / (1 + z_ic))
    D_growth = D_z_nbody / D_z_ic

    # name of folder in AbacusSummit/Cosmologies
    index_cosm = int((sim_name.split('c')[-1]).split('_ph')[0])
    name_cosm = "abacus_cosm%03d" % index_cosm

    # power spectrum file
    # Lehman's computation
    #pk_fn = home+"/repos/hybrid_eft_nbody/data/%s/z%4.3f/r_smooth_%d/power_nfft2048.csv"%(sim_name,z_nbody,int(R_smooth))
    pk_fn = home + "/repos/AbacusSummit/Cosmologies/" + name_cosm + "/" + name_cosm + ".z%d_pk_cb.dat" % i_pk
    pk_zel_fn = home + "/repos/AbacusSummit/Cosmologies/" + name_cosm + "/" + name_cosm + ".z%d_pk_cb.dat" % i_zel

    # templates from nbody
    ks_all = np.load(data_dir + "ks_all.npy")
    Pk_all = np.load(data_dir + "Pk_all_%d.npy" % (int(R_smooth)))
    k_lengths = np.load(data_dir + "k_lengths.npy").astype(int)
    k_starts = np.zeros(len(k_lengths), dtype=int)
    k_starts[1:] = np.cumsum(k_lengths)[:-1]
    fields = ['1', 'b_1', 'b_2', 'b_{\\nabla^2}', 'b_s']

    # scaling initial power spectrum to that redshift:
    z, D, f = z_nbody, 1., 1.
    klin, plin = np.loadtxt(pk_fn, unpack=True)
    kzel, pzel = np.loadtxt(pk_zel_fn, unpack=True)
    # Lehman's computation
    #bs,bs,klin,plin,bs  = np.loadtxt(pk_fn, unpack=True)
    plin *= D**2
    pzel *= D_growth**2

    # Initialize the class -- with no wisdom file passed it will
    # experiment to find the fastest FFT algorithm for the system.
    # B.H. modified velocileptors/Utils/loginterp.py in case of exception error
    start = time.time()
    cleft = CLEFT(klin, plin, cutoff=10)
    print("Elapsed time: ", time.time() - start, " seconds.")
    # You could save the wisdom file here if you wanted:
    # mome.export_wisdom(wisdom_file_name)

    # The first four are deterministic Lagrangian bias up to third order
    # While alpha and sn are the counterterm and stochastic term (shot noise)
    cleft.make_ptable()
    kv = cleft.pktable[:, 0]

    # frankenstein
    k_frank = np.logspace(np.log10(kv[0]), np.log10(kv[-1]), 1000)

    # parsing the velocileptors spectra
    '''
    r'$(1,b_{\nabla^2})$':0.5*cleft.pktable[:,13]*kv**2, \
    r'$(b_1,b_{\nabla^2})$':0.5*cleft.pktable[:,13]*kv**2, r'$(b_{\nabla^2},b_{\nabla^2})$':cleft.pktable[:,13]*kv**2,
    r'$(b_{\nabla^2},b_s)$':0.5*cleft.pktable[:,13]*kv**2, r'$(b_2,b_{\nabla^2})$':0.5*cleft.pktable[:,13]*kv**2}
    '''
    spectra = {r'$(1,1)$':cleft.pktable[:,1],\
               r'$(1,b_1)$':0.5*cleft.pktable[:,2], r'$(b_1,b_1)$': cleft.pktable[:,3],\
               r'$(1,b_2)$':0.5*cleft.pktable[:,4], r'$(b_1,b_2)$': 0.5*cleft.pktable[:,5],  r'$(b_2,b_2)$': cleft.pktable[:,6],\
               r'$(1,b_s)$':0.5*cleft.pktable[:,7], r'$(b_1,b_s)$': 0.5*cleft.pktable[:,8],  r'$(b_2,b_s)$':0.5*cleft.pktable[:,9],\
               r'$(b_s,b_s)$':cleft.pktable[:,10], r'$(1,b_{\nabla^2})$': np.interp(kv,kzel,pzel*kzel**2), \
               r'$(b_1,b_{\nabla^2})$':np.interp(kv,kzel,pzel*kzel**2), r'$(b_{\nabla^2},b_{\nabla^2})$':np.interp(kv,kzel,pzel*kzel**2),
               r'$(b_{\nabla^2},b_s)$':np.interp(kv,kzel,pzel*kzel**2), r'$(b_2,b_{\nabla^2})$':np.interp(kv,kzel,pzel*kzel**2)}

    # parsing the nbody spectra
    nbody_spectra = {}
    name_dic = {}
    counter = 0
    for i in range(len(fields)):
        for j in range(len(fields)):
            if j < i: continue
            label = r'$(' + fields[i] + ',' + fields[j] + r')$'
            start = k_starts[counter]
            size = k_lengths[counter]

            Pk = Pk_all[start:start + size]
            ks = ks_all[start:start + size]
            Pk = Pk[~np.isnan(ks)]
            ks = ks[~np.isnan(ks)]
            nbody_spectra[label] = Pk

            counter += 1

    # dictionary for where to plot each power spectrum
    plot_dic ={r'$(1,1)$':1,
               r'$(1,b_1)$':1, r'$(b_1,b_1)$': 2,\
               r'$(1,b_2)$':4, r'$(b_1,b_2)$': 2,  r'$(b_2,b_2)$': 3,\
               r'$(1,b_s)$':4, r'$(b_1,b_s)$': 5, r'$(b_1,b_{\nabla^2})$': 5,  r'$(b_2,b_s)$': 3, r'$(b_s,b_s)$':6,
               r'$(1,b_{\nabla^2})$':4, r'$(b_2,b_{\nabla^2})$': 3, r'$(b_{\nabla^2},b_s)$': 6, r'$(b_{\nabla^2},b_{\nabla^2})$': 6}

    label_dic={r'$(1,1)$': r'$P_{00}(k)$',
               r'$(1,b_1)$': r'$P_{01}(k)$', r'$(b_1,b_1)$': r'$P_{11}(k)$',\
               r'$(1,b_2)$': r'$P_{02}(k)$', r'$(b_1,b_2)$': r'$P_{12}(k)$',  r'$(b_2,b_2)$': r'$P_{22}(k)$',\
               r'$(1,b_s)$': r'$P_{0s}(k)$', r'$(b_1,b_s)$': r'$P_{1s}(k)$', r'$(b_1,b_{\nabla^2})$': r'$P_{1\nabla}(k)$',  r'$(b_2,b_s)$': r'$P_{2s}(k)$', r'$(b_s,b_s)$': r'$P_{ss}(k)$',
               r'$(1,b_{\nabla^2})$': r'$P_{0\nabla}(k)$', r'$(b_2,b_{\nabla^2})$': r'$P_{2\nabla}(k)$', r'$(b_{\nabla^2},b_s)$': r'$P_{s\nabla}(k)$', r'$(b_{\nabla^2},b_{\nabla^2})$': r'$P_{\nabla\nabla}(k)$'}

    # create frankenstein templates
    spectra_frank_dic = {}
    plt.subplots(2, 3, figsize=(18, 10))
    for i, key in enumerate(spectra.keys()):
        # pivot scale (i.e. k number where we switch between theory and numerics)
        if key in [r'$(1,b_s)$', r'$(b_1,b_s)$']:
            kpivot = 0.2
        elif key in [r'$(b_2,b_s)$', r'$(b_s,b_s)$']:
            kpivot = 3.e-2
        elif key == r'$(b_{\nabla^2},b_s)$':
            kpivot = 2.e-1
        elif key == r'$(b_2,b_{\nabla^2})$':
            kpivot = 3.e-1
        elif key == r'$(b_1,b_{\nabla^2})$':
            kpivot = 7.e-2
        else:
            kpivot = 9.e-2

        # indices of the pivot
        iv_pivot = np.argmin(np.abs(kv - kpivot))
        is_pivot = np.argmin(np.abs(ks - kpivot))
        if_pivot = np.argmin(np.abs(k_frank - kpivot))

        # get the templates and theory for this spectrum
        Pk_tmp = nbody_spectra[key]
        Pk_lpt = spectra[key]

        # LPT defines 1/2 (delta^2-<delta^2>)
        if key == r'$(b_2,b_2)$':
            Pk_tmp /= 4.
        elif 'b_2' in key:
            Pk_tmp /= 2.

        # those are negative so we make them positive in order to show them in logpsace
        if key in [r'$(b_1,b_s)$', r'$(1,b_s)$']:
            Pk_tmp *= -1
            Pk_lpt *= -1

        # this term is positive if nabla^2 delta = -k^2 delta, but reason we multiply here is that we use k^2 delta instead and k^2 P_zeldovich
        if key == r'$(b_{\nabla^2},b_s)$':
            Pk_tmp *= -1

        # compute the factor
        factor = spectra[key][iv_pivot] / nbody_spectra[key][is_pivot]
        print("factor for %s = " % key, factor)

        # frankensteining
        kf = np.hstack((kv[:iv_pivot], ks[is_pivot:]))

        # exterpolate as a power law (done for all nabla^2 terms)
        if key in [
                r'$(b_{\nabla^2},b_s)$', r'$(b_{\nabla^2},b_{\nabla^2})$',
                r'$(b_2,b_{\nabla^2})$', r'$(b_1,b_{\nabla^2})$'
        ]:
            print("extrapolating")
            if key == r'$(b_{\nabla^2},b_{\nabla^2})$':
                const = np.mean(Pk_tmp[:is_pivot])
                f = interp1d(ks[:is_pivot],
                             np.ones(is_pivot) * const,
                             bounds_error=False,
                             fill_value=const)
                Pk_frank = np.hstack((f(kv[:iv_pivot]), Pk_tmp[is_pivot:]))
            elif key == r'$(b_{\nabla^2},b_s)$':
                Pk_eft = kv**2 * (spectra[r'$(1,b_s)$'] * 0 +
                                  spectra[r'$(b_1,b_s)$'])
                # og
                #fac = Pk_eft[iv_pivot]/nbody_spectra[key][is_pivot]
                # TESTING
                fac = 0.4
                Pk_frank = np.hstack(
                    ((Pk_eft / fac)[:iv_pivot],
                     (nbody_spectra[r'$(b_{\nabla^2},b_s)$'])[is_pivot:]))
            elif key == r'$(b_2,b_{\nabla^2})$':
                Pk_eft = kv**2 * (spectra[r'$(1,b_2)$'] * 0 +
                                  spectra[r'$(b_1,b_2)$'])
                # TESTING
                fac = 3.7
                Pk_frank = np.hstack(((Pk_eft / fac)[:iv_pivot],
                                      (nbody_spectra[key])[is_pivot:]))
            elif key == r'$(b_1,b_{\nabla^2})$':
                Pk_frank = np.hstack(
                    ((kv**2 * (spectra[r'$(1,b_1)$'] + spectra[r'$(b_1,b_1)$'])
                      )[:iv_pivot],
                     (nbody_spectra[r'$(b_1,b_{\nabla^2})$'])[is_pivot:]))
            else:
                # currently not used -- extrapolating with a power law
                Pk_frank, kf = extrapolate(Pk_tmp, ks, kv, is_pivot)
        else:
            # og
            #Pk_frank = np.hstack((Pk_lpt[:iv_pivot], Pk_tmp[is_pivot:]))
            # elegant scheme for interpolating between theory and simulations
            w = (1. - np.tanh(100 * (kf - kpivot))) * 0.5

            f = interp1d(kv, Pk_lpt, bounds_error=False, fill_value=0.)
            Pk_lpt = f(kf)
            f = interp1d(ks, Pk_tmp, bounds_error=False, fill_value=0.)
            Pk_tmp = f(kf)
            Pk_frank = w * Pk_lpt + (1 - w) * Pk_tmp

        # interpolate with the values that we want
        f = interp1d(kf, Pk_frank, bounds_error=False, fill_value=0.)
        Pk_frank = f(k_frank)

        # gaussian filtering to smooth function
        # TESTING
        #Pk_frank[if_pivot-100:if_pivot+100] = gaussian_filter(Pk_frank[if_pivot-100:if_pivot+100], 10.)
        Pk_frank = gaussian_filter(Pk_frank, 10.)

        # getting back to original sign
        # Note: you can also add all of the nabla^2 templates if you wanna have -k^2 delta, but note that in that case b_nabla^2, b_s is positive!
        if key in [r'$(b_{\nabla^2},b_s)$', r'$(b_1,b_s)$', r'$(1,b_s)$']:
            #print("Multiply by -1 for real run")
            Pk_frank *= -1.

        spectra_frank_dic[key] = Pk_frank

        print("----------------------------")

    # add the wavenumbers to the dictionary
    spectra_frank_dic['ks'] = k_frank

    # save as asdf file
    #save_asdf(spectra_frank_dic,"Pk_templates_%d.asdf"%(int(R_smooth)),data_dir)
    print("NOT SAVING ASDF!")

    # plot spectra
    for i, key in enumerate(spectra.keys()):

        Pk_frank = spectra_frank_dic[key]
        Pk_tmp = nbody_spectra[key]
        Pk_lpt = spectra[key]
        plot_no = plot_dic[key]

        plt.subplot(2, 3, plot_no)
        plt.loglog(k_frank,
                   np.abs(Pk_frank),
                   color=hexcols[i],
                   label=label_dic[key])
        plt.loglog(ks, np.abs(Pk_tmp), ls='--', color=hexcols[i])
        if 'nabla' in key:
            pass  #plt.loglog(ks, np.abs(Pk_tmp), ls='--', color=hexcols[i])
        else:
            plt.loglog(kv, Pk_lpt, color=hexcols[i], ls=':')  #, label=key)
        #plt.loglog(klin, plin, ls='-', color='y')

        plt.legend(ncol=1)

        if plot_no in [1, 4]:
            plt.ylabel(r'$P_{\alpha \beta}(k) \ [({\rm Mpc}/h)^3]$')
        if plot_no in [4, 5, 6]:
            plt.xlabel(r'$k \ [h/{\rm Mpc}]$')

    plt.savefig("figs/templates_" +
                sim_name.replace('AbacusSummit_base_', '') +
                "_z%4.3f.pdf" % z_nbody)
    plt.close()
Exemplo n.º 8
0
import numpy as np
import matplotlib.pyplot as plt
from choose_parameters import load_dict

# redshift choice
#z_nbody = 1.1
z_nbody = 1.

machine = 'alan'
#machine = 'NERSC'

#sim_name = "AbacusSummit_hugebase_c000_ph000"
sim_name = "Sim256"

user_dict, cosmo_dict = load_dict(z_nbody,sim_name,machine)

R_smooth = user_dict['R_smooth']
data_dir = user_dict['data_dir']

emergency_dir = "/global/cscratch1/sd/boryanah/data_hybrid/gadget/%s/z%.3f/"%('Sim256',z_nbody)

Pk_hh = np.load(data_dir+"Pk_hh.npy")
ks = np.load(data_dir+"ks.npy")
#Pk_err = np.load(data_dir+"Pk_hh_err.npy")
#Pk_err = np.load(emergency_dir+"Pk_hh_err.npy")

ks_all = np.load(data_dir+"ks_all.npy")
Pk_all = np.load(data_dir+"Pk_all_%d.npy"%(int(R_smooth)))
k_lengths = np.load(data_dir+"k_lengths.npy").astype(int)
k_starts = np.zeros(len(k_lengths),dtype=int)
k_starts[1:] = np.cumsum(k_lengths)[:-1]
Exemplo n.º 9
0
def main(sim_name, z_nbody, z_ic, R_smooth, machine, want_chunk=True):

    # load dictionary with relevant quantities
    user_dict, cosmo_dict = load_dict(z_nbody,sim_name,machine)
    interlaced = user_dict['interlaced']
    dens_dir = user_dict['dens_dir']
    data_dir = user_dict['data_dir']
    R_smooth = user_dict['R_smooth']
    n_chunks = user_dict['n_chunks']
    z_nbody = user_dict['z_nbody']
    N_dim = user_dict['N_dim']
    z_ic = user_dict['z_ic']
    Lbox = user_dict['Lbox']

    # names of the 5 fields 
    field_names = ['delta', 'delta_sq', 'nabla_sq', 's_sq']
    factors = {'delta': 1, 'delta_sq': 2, 'nabla_sq': 1, 's_sq': 2}
    
    # load the cosmology
    cosmo = ccl.Cosmology(**cosmo_dict)
    
    # factor to scale the density as suggested in Modi et al.
    D_z_nbody = ccl.growth_factor(cosmo,1./(1+z_nbody))
    D_z_ic = ccl.growth_factor(cosmo,1./(1+z_ic))
    D_growth = D_z_nbody/D_z_ic

    
    if want_chunk:
        fields = {}
        for i in range(len(field_names)):
            fields[field_names[i]], start_pos, end_pos = load_field_chunk_bigfile(field_names[i], dens_dir, R_smooth, N_dim, rank, n_chunks, Lbox)
        print("loaded chunkwise fields")
    else:
        fields = {}
        for i in range(len(field_names)):
            fields[field_names[i]] = load_field_bigfile(field_names[i], dens_dir, R_smooth, N_dim)
        print("loaded fields")    
    
    # create directory if it does not exist
    if not os.path.exists(data_dir):
        os.makedirs(data_dir)

    # loop over all chunks
    for i_chunk in range(n_chunks):
        if rank != i_chunk%size: continue
        print("saving chunk number %d out of %d chunks"%(i_chunk,n_chunks))
        
        if os.path.exists(data_dir+"pos_s_sq_snap_%03d.fits"%i_chunk):
            print("Data from chunk %d already saved, moving on"%i_chunk)
            continue
        
        if user_dict['sim_code'] == 'abacus':
            # load simulation information 
            lagr_pos, pos_snap, halo_table = read_abacus(sim_name,z_nbody,i_chunk)
            pos_halo = halo_table['x_L2com']
            m_halo = halo_table['N']*user_dict['m_part']

            # convert [-Lbox/2.,Lbox/2.] to [0,Lbox]
            pos_halo += Lbox/2.
            pos_snap += Lbox/2.
            lagr_pos += Lbox/2.
            
        elif user_dict['sim_code'] == 'gadget':
            # find all files, todo: fix for multiple chunks
            ic_fns = sorted(glob.glob(user_dict['sim_dir']+"ic_box_L%d_%d*"%(Lbox,user_dict['ppd'])))
            snap_fns = sorted(glob.glob(user_dict['sim_dir']+"snap_box_L%d_%d_%03d*"%(Lbox,user_dict['ppd'],user_dict['ind_snap'])))
            fof_fns = sorted(glob.glob(user_dict['sim_dir']+"fof_snap_box_L%d_%d_%03d*.fits"%(Lbox,user_dict['ppd'],user_dict['ind_snap'])))

            print(ic_fns)
            print(snap_fns)
            print(fof_fns)
            
            lagr_pos, pos_snap, pos_halo, m_halo = read_gadget(ic_fns,snap_fns,fof_fns,i_chunk,n_chunks,want_chunk=want_chunk)

        # TESTING
        #save_pos(pos_halo,"halo_%03d"%i_chunk,data_dir,mass=m_halo)
        del pos_halo, m_halo

        # offset the positions to match the chunk
        if want_chunk:
            if start_pos < end_pos:
                print("normal chunk",i_chunk)
                lagr_pos[:,0] -= start_pos
            else:
                print("subverted chunk",i_chunk)
                choice1 = (start_pos <= lagr_pos[:,0]) & (lagr_pos[:,0] < Lbox)
                choice2 = (end_pos > lagr_pos[:,0]) & (lagr_pos[:,0] >= 0)  
                print("min max mean = ",np.min(lagr_pos[:,0]),np.max(lagr_pos[:,0]),np.mean(lagr_pos[:,0]))
                lagr_pos[choice1,0] -= start_pos
                lagr_pos[choice2,0] += Lbox-start_pos
                print("min max mean = ",np.min(lagr_pos[:,0]),np.max(lagr_pos[:,0]),np.mean(lagr_pos[:,0]))
                
        # get i, j, k for position on the density array
        lagr_ijk = (lagr_pos/(Lbox/N_dim)).astype(int)%N_dim
        del lagr_pos

        # save the particles as they are for the ones field
        #save_pos(pos_snap,"ones_snap_%03d"%i_chunk,data_dir) # TESTING
        for key in fields.keys():
            values = (fields[key]*D_growth**factors[key])[lagr_ijk[:,0],lagr_ijk[:,1],lagr_ijk[:,2]]
       
            save_pos(pos_snap,key+"_snap_%03d"%i_chunk,data_dir,value=values)
        del pos_snap, lagr_ijk
    print("Saved all particle and halo positions")
    del fields