Beispiel #1
0
def cross_correlation_power_spectra(fielda, fieldb, kbins=100, box_dims=None):
    Pab, kab = c2t.cross_power_spectrum_1d(fielda,
                                           fieldb,
                                           kbins=kbins,
                                           box_dims=box_dims)
    Paa, kaa = c2t.power_spectrum_1d(fielda, kbins=kbins, box_dims=box_dims)
    Pbb, kbb = c2t.power_spectrum_1d(fieldb, kbins=kbins, box_dims=box_dims)
    Rxx = Pab / np.sqrt(Paa * Pbb)
    return Rxx, kab
Beispiel #2
0
def _integrated_bispectrum_normalized_cross(cube,
                                            cube2,
                                            Ncuts=4,
                                            kbins=20,
                                            box_dims=None):
    #assert statistic in ['mean']
    assert cube.shape == cube2.shape
    assert cube.shape[0] % Ncuts == 0 and cube.shape[
        1] % Ncuts == 0 and cube.shape[2] % Ncuts == 0
    Lx, Ly, Lz = cube.shape[0] / Ncuts, cube.shape[1] / Ncuts, cube.shape[
        2] / Ncuts
    rLs = [[Lx / 2. + i * Lx, Ly / 2. + j * Ly, Lz / 2. + k * Lz]
           for i in xrange(Ncuts) for j in xrange(Ncuts)
           for k in xrange(Ncuts)]
    B_k = np.zeros(kbins, dtype=np.float64)
    P_k = np.zeros(kbins, dtype=np.float64)
    sig2 = 0
    n_box = Ncuts**3
    V_L = (Lx * Ly * Lz)
    for i in xrange(n_box):
        w = W_L(cube, rLs[i], [Lx, Ly, Lz])
        w2 = W_L(cube2, rLs[i], [Lx, Ly, Lz])
        c = cube * w
        c2 = cube2 * w2
        pk, ks = c2t.power_spectrum_1d(c, kbins=kbins, box_dims=box_dims)
        d_mean = c2.sum(dtype=np.float64) / V_L
        B_k += pk * d_mean
        P_k += pk
        sig2 += (d_mean)**2  #c2.var(dtype=np.float64)
        print 100 * (i + 1) / n_box, "%"
    B_k = B_k / n_box
    P_k = P_k / n_box
    sig2 = sig2 / n_box
    return B_k / P_k / sig2, ks
Beispiel #3
0
def integrated_bispectrum_normalized_cross(cube,
                                           cube2,
                                           Ncuts=4,
                                           statistic='mean',
                                           kbins=100):
    assert statistic in ['mean']
    assert cube.shape[0] % Ncuts == 0 and cube.shape[
        1] % Ncuts == 0 and cube.shape[2] % Ncuts == 0
    Lx, Ly, Lz = cube.shape[0] / Ncuts, cube.shape[1] / Ncuts, cube.shape[
        2] / Ncuts
    smaller_cubes = [
        cube[i * Lx:(i + 1) * Lx, j * Ly:(j + 1) * Ly, k * Lz:(k + 1) * Lz]
        for i in xrange(Ncuts) for j in xrange(Ncuts) for k in xrange(Ncuts)
    ]
    smaller_cubes2 = [
        cube2[i * Lx:(i + 1) * Lx, j * Ly:(j + 1) * Ly, k * Lz:(k + 1) * Lz]
        for i in xrange(Ncuts) for j in xrange(Ncuts) for k in xrange(Ncuts)
    ]
    B_k = np.zeros(kbins, dtype=np.float64)
    P_k = np.zeros(kbins, dtype=np.float64)
    sig2 = 0
    for i in xrange(Ncuts**3):
        c = smaller_cubes[i]
        c2 = smaller_cubes2[i]
        pk, ks = c2t.power_spectrum_1d(c,
                                       kbins=kbins,
                                       box_dims=c2t.conv.LB / Ncuts)
        B_k += pk * c2.mean(dtype=np.float64)
        P_k += pk
        sig2 += c2.mean(dtype=np.float64)**2  #c2.var(dtype=np.float64)
    B_k = B_k / Ncuts**3
    P_k = P_k / Ncuts**3
    sig2 = sig2 / Ncuts**3
    return B_k / P_k / sig2, ks
Beispiel #4
0
def position_dependent_powerspectra(xfrac_dir,
                                    dens_dir,
                                    z,
                                    ps_quantity='signal',
                                    quantity='density',
                                    statistic='mean',
                                    kbins=100,
                                    Ncuts=4):
    quantities = ['density', 'xfrac', 'signal']
    stats = ['mean', 'skewness', 'kurtosis']
    assert quantity in quantities
    assert statistic in stats
    if ps_quantity == 'signal':
        cube = owntools.coeval_21cm(xfrac_dir, dens_dir, z)
    elif ps_quantity == 'density':
        cube = owntools.coeval_dens(dens_dir, z)
    elif ps_quantity == 'xfrac':
        cube = owntools.coeval_xfrac(xfrac_dir, z)
    stat_functions = [np.mean, scipy.stats.skew, scipy.stats.kurtosis]
    P_k_s = np.zeros((kbins, Ncuts**3))
    k_s = np.zeros((kbins, Ncuts**3))
    stat = np.zeros(Ncuts**3)
    stat_func = stat_functions[stats.index(statistic)]
    if quantity == 'density':
        quant = owntools.coeval_dens(dens_dir, z)
        quant = quant / quant.mean(dtype=np.float64) - 1.
    elif quantity == 'xfrac':
        quant = owntools.coeval_xfrac(xfrac_dir, z)
    elif quantity == 'signal':
        quant = owntools.coeval_21cm(xfrac_dir, dens_dir, z)
    Lx, Ly, Lz = cube.shape[0] / Ncuts, cube.shape[1] / Ncuts, cube.shape[
        2] / Ncuts
    smaller_cubes = [
        cube[i * Lx:(i + 1) * Lx, j * Ly:(j + 1) * Ly, k * Lz:(k + 1) * Lz]
        for i in xrange(Ncuts) for j in xrange(Ncuts) for k in xrange(Ncuts)
    ]
    smaller_quant = [
        quant[i * Lx:(i + 1) * Lx, j * Ly:(j + 1) * Ly, k * Lz:(k + 1) * Lz]
        for i in xrange(Ncuts) for j in xrange(Ncuts) for k in xrange(Ncuts)
    ]
    for i in xrange(len(smaller_cubes)):
        pk, ks = c2t.power_spectrum_1d(smaller_cubes[i],
                                       kbins=kbins,
                                       box_dims=c2t.conv.LB / Ncuts)
        P_k_s[:, i], k_s[:, i] = pk, ks
        stat[i] = stat_func(smaller_quant[i])
    return P_k_s, k_s, stat
Beispiel #5
0
def integrated_bispectrum(cube, Ncuts=4, statistic='mean', kbins=100):
    assert statistic in ['mean']
    assert cube.shape[0] % Ncuts == 0 and cube.shape[
        1] % Ncuts == 0 and cube.shape[2] % Ncuts == 0
    Lx, Ly, Lz = cube.shape[0] / Ncuts, cube.shape[1] / Ncuts, cube.shape[
        2] / Ncuts
    smaller_cubes = [
        cube[i * Lx:(i + 1) * Lx, j * Ly:(j + 1) * Ly, k * Lz:(k + 1) * Lz]
        for i in xrange(Ncuts) for j in xrange(Ncuts) for k in xrange(Ncuts)
    ]
    B_k = np.zeros(kbins)
    for c in smaller_cubes:
        bk, ks = c2t.power_spectrum_1d(c,
                                       kbins=kbins,
                                       box_dims=c2t.conv.LB / Ncuts)
        B_k += bk * c.mean(dtype=np.float64)
    B_k = B_k / Ncuts**3
    return B_k, ks
Beispiel #6
0
base_path = '/disk/sn-12/garrelt/Science/Simulations/Reionization/C2Ray_WMAP5/114Mpc_WMAP5' 
density_filename = base_path+'/coarser_densities/halos_removed/30.000n_all.dat'
velocity_filename = base_path+'/coarser_densities/halos_removed/30.000v_all.dat'

#Enable output
c2t.set_verbose(True)

#We are using the 114/h Mpc simulation box, so set all the proper conversion factors
c2t.set_sim_constants(boxsize_cMpc = 114.)

#Read density
dfile = c2t.DensityFile(density_filename)

#Read a velocity data file
vfile = c2t.VelocityFile(velocity_filename)
kms = vfile.get_kms_from_density(dfile)

#Make a distorted box. Assume x_i = 0. We could of course also have calculated dT like in example.py
#and passed that to get_distorted_dt
distorted = c2t.get_distorted_dt(dfile.raw_density.astype('float64'), kms, dfile.z, los_axis=0, num_particles=20)

#Calculate power spectra
ps_dist,k = c2t.power_spectrum_1d(distorted)
ps_nodist,k = c2t.power_spectrum_1d(dfile.raw_density)

#Plot ratio
pl.semilogx(k, ps_dist/ps_nodist)
pl.xlabel('$k \; \mathrm{[Mpc^{-1}]}$')
pl.ylabel('$P_k^{\mathrm{PV}}/P_k^{\mathrm{NoPV}}$')
pl.show()
Beispiel #7
0
#Read density
dfile = c2t.DensityFile(density_filename)

#Read velocity data file and get the actual velocity 
vfile = c2t.VelocityFile(velocity_filename)
kms = vfile.get_kms_from_density(dfile)

#To speed things up, we will assume that the IGM is completely neutral, so instead
#of reading an ionization fraction file, we will just make an array of zeros
xi = np.zeros_like(dfile.cgs_density)

#Calculate the dT
dT_realspace = c2t.calc_dt(xi, dfile, z=dfile.z)

#Make the redshift-space volume
dT_redshiftspace = c2t.get_distorted_dt(dT_realspace, kms, \
                                 dfile.z, los_axis=0, num_particles=20)

#Calculate spherically-averaged power spectra, 
#using 20 logarithmically spaced k bins, from 1e-1 to 10
kbins = 10**np.linspace(-1, 1, 20)
ps_dist, k = c2t.power_spectrum_1d(dT_redshiftspace, kbins)
ps_nodist, k = c2t.power_spectrum_1d(dT_realspace, kbins)

#Plot ratio. On large scales, this should be close to 1.83
pl.semilogx(k, ps_dist/ps_nodist)
pl.xlabel('$k \; \mathrm{[Mpc^{-1}]}$')
pl.ylabel('$P_k^{\mathrm{PV}}/P_k^{\mathrm{NoPV}}$')
pl.show()
Beispiel #8
0
#Enable output
c2t.set_verbose(True)

#We are using the 114/h Mpc simulation box, so set all the proper conversion factors
c2t.set_sim_constants(boxsize_cMpc=114.)

#Read density
dfile = c2t.DensityFile(density_filename)

#Read a velocity data file
vfile = c2t.VelocityFile(velocity_filename)
kms = vfile.get_kms_from_density(dfile)

#Make a distorted box. Assume x_i = 0. We could of course also have calculated dT like in example.py
#and passed that to get_distorted_dt
distorted = c2t.get_distorted_dt(dfile.raw_density.astype('float64'),
                                 kms,
                                 dfile.z,
                                 los_axis=0,
                                 num_particles=20)

#Calculate power spectra
ps_dist, k = c2t.power_spectrum_1d(distorted)
ps_nodist, k = c2t.power_spectrum_1d(dfile.raw_density)

#Plot ratio
pl.semilogx(k, ps_dist / ps_nodist)
pl.xlabel('$k \; \mathrm{[Mpc^{-1}]}$')
pl.ylabel('$P_k^{\mathrm{PV}}/P_k^{\mathrm{NoPV}}$')
pl.show()
#xvs  = ph_count_info[[np.abs(ph_count_info[:,-2]-x).argmin() for x in xvs_],-2]
zs_  = ph_count_info[[np.abs(ph_count_info[:,-2]-x).argmin() for x in xvs_], 0]

dens_zs = owntools.get_zs_list(dens_dir, file_type='/*n_all.dat')
zs   = dens_zs[[np.abs(dens_zs-i).argmin() for i in zs_]]
xvs  = ph_count_info[[np.abs(ph_count_info[:,0]-i).argmin() for i in zs],-2]

i = 9
z = 6.549

cube_21 = owntools.coeval_21cm(xfrac_dir, dens_dir, z, mean_subtract=True)
cube_m  = owntools.coeval_overdens(dens_dir, z)
cube_d = owntools.coeval_dens(dens_dir, z)
cube_x = owntools.coeval_xfrac(xfrac_dir, z)

P_dd, ks_m = c2t.power_spectrum_1d(cube_m, kbins=100, box_dims=c2t.conv.LB)
P_21, ks_x = c2t.power_spectrum_1d(cube_21, kbins=100, box_dims=c2t.conv.LB)

f_dd, k_dd  = squeezed_bispectrum._integrated_bispectrum_normalized_cross(cube_m, cube_m, Ncuts=Ncuts)
f_21d, k_xd = squeezed_bispectrum._integrated_bispectrum_normalized_cross(cube_21, cube_m, Ncuts=Ncuts)
f_xx, k_xx  = squeezed_bispectrum._integrated_bispectrum_normalized_cross(cube_x-cube_x.mean(), cube_x-cube_x.mean(), Ncuts=Ncuts)
f_x_ = squeezed_bispectrum._integrated_bispectrum_normalized_cross1(1-cube_x, cube_m, Ncuts=Ncuts)

ks = k_dd.copy()

### Zeta calculation
sources = np.loadtxt(parent_dir+'sources/'+str(z)+'-coarser_sources.dat', skiprows=1)
M_min = sources[np.nonzero(sources[:,-2]),-2].min()*c2t.conv.M_grid*c2t.const.solar_masses_per_gram #solarunit
M_max = sources[np.nonzero(sources[:,-2]),-2].max()*c2t.conv.M_grid*c2t.const.solar_masses_per_gram #solarunit
M_halo_sum = sources[:,-2].sum()*c2t.conv.M_grid*c2t.const.solar_masses_per_gram                    #solarunit
mpc_to_cm  = 3.086e24
z_arr = np.loadtxt(z_filename, unpack=True)

c2t.set_sim_constants(boxsize_cMpc = 244.)

for i in range(1,len(z_arr)):
    output_filename = output_path+'%.3f.dat'%(z_arr[i])
    out1_filename = out1_path+'%.3f.dat'%(z_arr[i])
    print 'z = %.3f'% (z_arr[i])
 
    dT_file = ''.join(glob.glob('./dT_boxes/dT_%.3f.cbin'%(z_arr[i])))
    print 'dT file = %s' % dT_file
    dT_box = c2t.read_cbin(dT_file, bits=64, order='F')
    
    dT_rsd_file = ''.join(glob.glob('./dT_pv_boxes/dT_pv_%.3f.cbin'%(z_arr[i])))
    print 'dT_pv file = %s' % dT_rsd_file
    dT_rsd_box = c2t.read_cbin(dT_rsd_file, bits=64, order='F')
    
    print 'Calculating power spectra...'

    ps_raw,k = c2t.power_spectrum_1d(dT_box, kbins=75)
    ps_rsd,k = c2t.power_spectrum_1d(dT_rsd_box, kbins=75)
   
    out = open(output_filename, 'w')
    out1 = open(out1_filename, 'w')
    for j in range(0,len(k)):
        out.write('%.3f %.4f\n' % (k[j],ps_raw[j]))
        out1.write('%.3f %.4f\n' % (k[j],ps_rsd[j]))
    out.close()
    out1.close()
    
    density_file = ''.join(glob.glob(density_path+'%.3fn_all.dat'%(z_arr[i])))
    if not density_file:
        zi = np.where(z_f==z_arr[i])
        density_file = ''.join(glob.glob(density_path+'%.3fn_all.dat'%(z_f[zi[0]-1])))
    print 'Density file = %s' % density_file
    dfile = c2t.DensityFile(density_file)

    xfrac_file = ''.join(glob.glob(xfrac_path+'xfrac3d_%.3f.bin'%(z_arr[i])))
    print 'Ionized fraction file = %s' % xfrac_file
    xfile = c2t.XfracFile(xfrac_file)

    dT_file = ''.join(glob.glob('./dT_boxes/dT_%.3f.cbin'%(z_arr[i])))
    print 'dT file = %s' % dT_file
    dT_box = c2t.read_cbin(dT_file, bits=64, order='F')

    ps_dT,kT = c2t.power_spectrum_1d(dT_box, kbins=75)
    ps_xi,kxi = c2t.power_spectrum_1d(xfile.xi/xfile.xi.mean()-1, kbins=75)
    ps_x,kx = c2t.power_spectrum_1d((1-xfile.xi)/(1-xfile.xi.mean())-1,kbins=75)
    ps_d,kd = c2t.power_spectrum_1d(dfile.raw_density/dfile.raw_density.mean()-1, kbins=75)
    ps_xid,kxid = c2t.cross_power_spectrum_1d((1-xfile.xi)/(1-xfile.xi.mean())-1,(dfile.raw_density/dfile.raw_density.mean()-1), kbins=75)

    if( kT.all() != kxi.all() ): print 'poop'
    if( kT.all() != kd.all() ): print 'poop2'

    factor = pow(dT_box.mean(),2)
    out = open(output_filename, 'w')
    for j in range(len(kT)):
        out.write('%.3f %.4f %.4f %.4f %.4f %.4f\n' % (kT[j],ps_dT[j],ps_xi[j]*factor,ps_x[j]*factor,ps_d[j]*factor,ps_xid[j]*factor))
    out.close()
Beispiel #12
0
#Read density
dfile = c2t.DensityFile(density_filename)

#Read velocity data file and get the actual velocity
vfile = c2t.VelocityFile(velocity_filename)
kms = vfile.get_kms_from_density(dfile)

#To speed things up, we will assume that the IGM is completely neutral, so instead
#of reading an ionization fraction file, we will just make an array of zeros
xi = np.zeros_like(dfile.cgs_density)

#Calculate the dT
dT_realspace = c2t.calc_dt(xi, dfile, z=dfile.z)

#Make the redshift-space volume
dT_redshiftspace = c2t.get_distorted_dt(dT_realspace, kms, \
                                 dfile.z, los_axis=0, num_particles=20)

#Calculate spherically-averaged power spectra,
#using 20 logarithmically spaced k bins, from 1e-1 to 10
kbins = 10**np.linspace(-1, 1, 20)
ps_dist, k = c2t.power_spectrum_1d(dT_redshiftspace, kbins)
ps_nodist, k = c2t.power_spectrum_1d(dT_realspace, kbins)

#Plot ratio. On large scales, this should be close to 1.83
pl.semilogx(k, ps_dist / ps_nodist)
pl.xlabel('$k \; \mathrm{[Mpc^{-1}]}$')
pl.ylabel('$P_k^{\mathrm{PV}}/P_k^{\mathrm{NoPV}}$')
pl.show()
Beispiel #13
0
                    0]

dens_zs = owntools.get_zs_list(dens_dir, file_type='/*n_all.dat')
zs = dens_zs[[np.abs(dens_zs - i).argmin() for i in zs_]]
xvs = ph_count_info[[np.abs(ph_count_info[:, 0] - i).argmin() for i in zs], -2]

P_k_m = []
P_k_x = []

## Global Power Spectrum
for i in xrange(len(zs)):
    z = zs[i]
    print z
    cube_x = owntools.coeval_21cm(xfrac_dir, dens_dir, z, mean_subtract=True)
    cube_m = owntools.coeval_overdens(dens_dir, z)
    pk_m, ks_m = c2t.power_spectrum_1d(cube_m, kbins=100, box_dims=c2t.conv.LB)
    pk_x, ks_x = c2t.power_spectrum_1d(cube_x, kbins=100, box_dims=c2t.conv.LB)
    P_k_m.append((pk_m, ks_m))
    P_k_x.append((pk_x, ks_x))

iB_k_mm = []
iB_k_xm = []
### Response Functions
for i in xrange(len(zs)):
    z = zs[i]
    print z
    cube_x = owntools.coeval_21cm(xfrac_dir, dens_dir, z, mean_subtract=True)
    cube_m = owntools.coeval_overdens(dens_dir, z)
    ibk_m_m, k_m_m = squeezed_bispectrum._integrated_bispectrum_normalized_cross(
        cube_m, cube_m, Ncuts=Ncuts)
    ibk_x_m, k_x_m = squeezed_bispectrum._integrated_bispectrum_normalized_cross(
Beispiel #14
0
c2t.set_sim_constants(500)

#zs  = [20.134, 17.848, 15.596, 12.603, 10.11, 9.026]
zs = [20.134, 12.603, 9.026, 7.305, 6.549, 6.113]
colors = ['b', 'r', 'g', 'k', 'c', 'm']

xfrac_dir = '/disk/dawn-1/garrelt/Reionization/C2Ray_WMAP7/500Mpc/500Mpc_f2_0_300/results/'
dens_dir = '/disk/dawn-1/garrelt/Reionization/C2Ray_WMAP7/500Mpc/coarser_densities/nc300/'

plt.figure()
for i in xrange(len(zs)):
    z = zs[i]
    cube_x = owntools.coeval_xfrac(xfrac_dir, z)
    cube_x = cube_x - cube_x.mean()
    cube_m = owntools.coeval_overdens(dens_dir, z)
    P_mm, k_mm = c2t.power_spectrum_1d(cube_m, kbins=100)
    P_xx, k_xx = c2t.power_spectrum_1d(cube_x, kbins=100)
    P_mx, k_mx = c2t.cross_power_spectrum_1d(cube_x, cube_m, kbins=100)
    plt.subplot(2, 3, i + 1)
    plt.title('z=' + str(z))
    plt.loglog(k_mm, P_mm, c='b')
    plt.loglog(k_xx, P_xx, c='r')
    plt.loglog(k_mx, P_mx, c='g')
    print 'z=', z, 'done'

plt.suptitle('BLUE: $\delta \delta$, RED: $x_n x_n$, GREEN: $\delta x_n$')
for i in xrange(len(zs)):
    plt.subplot(2, 3, i + 1)
    plt.ylim(2e-4, 2e4)
    plt.xlabel('k')
    plt.ylabel('P(k)')