def find_pdf(snapshot, grid, MAS, do_RSD, axis, threads, ptype, fpdf,
             smoothing, Filter):

    if os.path.exists(fpdf): return 0

    # read header
    head = readgadget.header(snapshot)
    BoxSize = head.boxsize / 1e3  #Mpc/h
    Nall = head.nall  #Total number of particles
    Masses = head.massarr * 1e10  #Masses of the particles in Msun/h
    Omega_m = head.omega_m
    Omega_l = head.omega_l
    redshift = head.redshift
    Hubble = 100.0 * np.sqrt(Omega_m *
                             (1.0 + redshift)**3 + Omega_l)  #km/s/(Mpc/h)
    h = head.hubble

    # read snapshot
    pos = readgadget.read_block(snapshot, "POS ", ptype) / 1e3  #Mpc/h

    # move particles to redshift-space
    if do_RSD:
        vel = readgadget.read_block(snapshot, "VEL ", ptype)  #km/s
        RSL.pos_redshift_space(pos, vel, BoxSize, Hubble, redshift, axis)

    # calculate the overdensity field
    delta = np.zeros((grid, grid, grid), dtype=np.float32)
    if len(ptype) > 1:  #for multiple particles read masses
        mass = np.zeros(pos.shape[0], dtype=np.float32)
        offset = 0
        for j in ptype:
            mass[offset:offset + Nall[j]] = Masses[j]
            offset += Nall[j]
        MASL.MA(pos, delta, BoxSize, MAS, W=mass)
    else:
        MASL.MA(pos, delta, BoxSize, MAS)
    delta /= np.mean(delta, dtype=np.float64)
    #delta -= 1.0

    # define the array containing the variance
    var = np.zeros(smoothing.shape[0], dtype=np.float64)
    var_log = np.zeros(smoothing.shape[0], dtype=np.float64)

    # do a loop over the different smoothing scales
    for i, smooth_scale in enumerate(smoothing):

        # smooth the overdensity field
        W_k = SL.FT_filter(BoxSize, smooth_scale, grid, Filter, threads)
        delta_smoothed = SL.field_smoothing(delta, W_k, threads)

        # compute the variance of the field
        var[i] = np.var(delta_smoothed)

        indexes = np.where(delta_smoothed > 0.0)
        var_log[i] = np.var(np.log10(delta_smoothed[indexes]))

    # save results to file
    np.savetxt(fpdf, np.transpose([smoothing, var, var_log]), delimiter='\t')
Exemplo n.º 2
0
def find_pdf(snapshot, grid, MAS, do_RSD, axis, threads, ptype, fpdf,
             smoothing, Filter):

    if os.path.exists(fpdf): return 0

    # read header
    head = readgadget.header(snapshot)
    BoxSize = head.boxsize / 1e3  #Mpc/h
    Nall = head.nall  #Total number of particles
    Masses = head.massarr * 1e10  #Masses of the particles in Msun/h
    Omega_m = head.omega_m
    Omega_l = head.omega_l
    redshift = head.redshift
    Hubble = 100.0 * np.sqrt(Omega_m *
                             (1.0 + redshift)**3 + Omega_l)  #km/s/(Mpc/h)
    h = head.hubble

    # read snapshot
    pos = readgadget.read_block(snapshot, "POS ", ptype) / 1e3  #Mpc/h

    # move particles to redshift-space
    if do_RSD:
        vel = readgadget.read_block(snapshot, "VEL ", ptype)  #km/s
        RSL.pos_redshift_space(pos, vel, BoxSize, Hubble, redshift, axis)

    # calculate the overdensity field
    delta = np.zeros((grid, grid, grid), dtype=np.float32)
    if len(ptype) > 1:  #for multiple particles read masses
        mass = np.zeros(pos.shape[0], dtype=np.float32)
        offset = 0
        for j in ptype:
            mass[offset:offset + Nall[j]] = Masses[j]
            offset += Nall[j]
        MASL.MA(pos, delta, BoxSize, MAS, W=mass)
    else:
        MASL.MA(pos, delta, BoxSize, MAS)
    delta /= np.mean(delta, dtype=np.float64)
    #delta -= 1.0

    # smooth the overdensity field
    W_k = SL.FT_filter(BoxSize, smoothing, grid, Filter, threads)
    delta_smoothed = SL.field_smoothing(delta, W_k, threads)

    bins = np.logspace(-2, 2, 100)
    pdf, mean = np.histogram(delta_smoothed, bins=bins)
    mean = 0.5 * (mean[1:] + mean[:-1])
    pdf = pdf * 1.0 / grid**3

    # save results to file
    np.savetxt(fpdf, np.transpose([mean, pdf]), delimiter='\t')
Exemplo n.º 3
0
def Mk(galaxy_pos, Filter, R, p, ds, BoxSize, grid, MAS, threads):
    
    ''' Measure the marked spectrum using the `Pylians3` package  
    Input:
        galaxy_pos: (N,3) array
        FIlter:     'Top-Hat' or 'Gaussian'
        R:          parameter of the mark: scale to define local density
        p:          parameter of the mark
        ds:         parameter of the mark
        BoxSize
        grid:       scalar: size of the grid where we compute the density
        MAS:        'CIC'
        threads:    scalar
    Output:     
        Pk:         object with power spectrum: k = Pk.k3D
                                                P0 = Pk.Pk[:,0]
                                                P2 = Pk.Pk[:,1]
                                                P4 = Pk.Pk[:,2]
    '''
    
    # calculate delta                                                                                                   
    delta = np.zeros((grid,grid,grid), dtype=np.float32)
    MASL.MA(galaxy_pos, delta, BoxSize, MAS)
    delta /= np.mean(delta, dtype=np.float64);  delta -= 1.0
    # smooth delta                                                                                                      
    W_k = SL.FT_filter(BoxSize, R, grid, Filter, threads)
    delta_smoothed = SL.field_smoothing(delta, W_k, threads)
    # marks                                                                                                             
    weight = np.zeros(galaxy_pos.shape[0], dtype=np.float32)
    MASL.CIC_interp(delta_smoothed, BoxSize, galaxy_pos, weight)
    mark = func_mark(weight,ds,p)
    delta_m = np.zeros((grid,grid,grid), dtype=np.float32)
    MASL.MA(galaxy_pos,delta_m,BoxSize,MAS,W=mark)
    delta_m /= np.mean(delta_m,dtype=np.float32);  delta_m -= 1.0
    # compute marked Pk                                                                                                 
    Pk = PKL.Pk(delta_m, BoxSize, axis, MAS, threads)
    return Pk
Exemplo n.º 4
0
def generate_data(realization,cluster):
    sim = realization
    #raise NotImplementedError("Please update the directory of the data to your system below.")
    #snapshot = '/mnt/ceph/users/fvillaescusa/Quijote/Snapshots/latin_hypercube_HR/%d/snapdir_004/snap_004' % (sim,)
    #snapdir = '/mnt/ceph/users/fvillaescusa/Quijote/Halos/latin_hypercube/%d' % (sim,)
    if cluster=='adroit':
        prefix = '/scratch/network/jdh4'
        snapshot = prefix +'/projects/QUIJOTE/Snapshots/fiducial_HR/%d/snapdir_004/snap_004' % (sim,)
        snapdir =  prefix +'/projects/QUIJOTE/Halos/fiducial_HR/%d' % (sim,)
    elif cluster=='tiger':
        snapshot = '/projects/QUIJOTE/Snapshots/fiducial_HR/%d/snapdir_004/snap_004' % (sim,)
        snapdir =  '/projects/QUIJOTE/Halos/fiducial_HR/%d' % (sim,)
    else:
        try:
            snapshot = '/mnt/ceph/users/fvillaescusa/Quijote/Snapshots/latin_hypercube_HR/%d/snapdir_004/snap_004' % (sim,)
            snapdir = '/mnt/ceph/users/fvillaescusa/Quijote/Halos/latin_hypercube/%d' % (sim,)
        except ValueError:
            print("Path not found. Wrong cluster or data DNE.")

    #snapshot = '/projects/QUIJOTE/Snapshots/fiducial_HR/%d/snapdir_004/snap_004' % (sim,)
    #snapdir =  '/projects/QUIJOTE/Halos/fiducial_HR/%d' % (sim,)
    snapnum = 4

# parameters for density field
    grid   = 1024  #density field will have grid^3 voxels
    ptypes = [1]   #CDM
    MAS    = 'CIC' #mass assignment scheme
    do_RSD = False #dont do redshift-space distortions
    axis   = 0     #only needed if do_RSD=True

# parameters for smoothing
    BoxSize = 1000.0    #Mpc/h
    R       = 20.0      #Mpc.h
    Filter  = 'Top-Hat' #'Top-Hat' or 'Gaussian'
    threads = 28        #number of openmp threads
#######################################################################################

# # Computing density contrast field:

# compute density field of the snapshot (density constrast d = rho/<rho>-1)
    delta = MASL.density_field_gadget(snapshot, ptypes, grid, MAS, do_RSD, axis)
    delta /= np.mean(delta, dtype=np.float64);  delta -= 1.0

# # Smooth density field:

# smooth the field on a given scale
    W_k = SL.FT_filter(BoxSize, R, grid, Filter, threads)
    delta_smoothed = SL.field_smoothing(delta, W_k, threads)

# # Load halo properties:

# read halo catalogue
    z_dict = {4:0.0, 3:0.5, 2:1.0, 1:2.0, 0:3.0}
    redshift = z_dict[snapnum]
    FoF = readfof.FoF_catalog(snapdir, snapnum, long_ids=False,
                              swap=False, SFR=False, read_IDs=False)
    pos_h = FoF.GroupPos/1e3            #Halo positions in Mpc/h  
    mass  = FoF.GroupMass*1e10          #Halo masses in Msun/h   
    vel_h = FoF.GroupVel*(1.0+redshift) #Halo peculiar velocities in km/s    

# # Find overdensity at each halo:

# interpolate to find the value of the smoothed overdensity field at the position of each halo
# delta_h will contain the value of the smoothed overdensity in the position of each halo
    delta_h = np.zeros(pos_h.shape[0], dtype=np.float32)
    MASL.CIC_interp(delta_smoothed, BoxSize, pos_h, delta_h)

# # Save:

    cur_data = pd.DataFrame({
        'x': pos_h[:, 0],
        'y': pos_h[:, 1],
        'z': pos_h[:, 2],
        'vx': vel_h[:, 0],
        'vy': vel_h[:, 1],
        'vz': vel_h[:, 2],
        'M14': mass/1e14,
        'delta': delta_h
    })

    cur_data.to_hdf('halos_%d.h5' % (sim,), 'df')
Exemplo n.º 5
0
            # get name of output file
            fout = '../Results/Results_Gaussian_%s_HR_%s_z=%s.hdf5' % (
                cosmo, R, z[snapnum])
            if os.path.exists(fout): continue

            # define the arrays containing the variance and the pdf of the fields
            var_tot = np.zeros(realizations, dtype=np.float64)
            pdf_tot = np.zeros((realizations, bins), dtype=np.float64)

            # compute delta_max from snapshot
            snapshot = '%s/%s/0_HR/snapdir_%03d/snap_%03d' % (root, cosmo,
                                                              snapnum, snapnum)
            delta = MASL.density_field_gadget(snapshot, ptypes, grid, MAS,
                                              do_RSD, axis)
            delta = delta / np.mean(delta, dtype=np.float64)
            delta_smoothed = SL.field_smoothing(delta, W_k,
                                                threads)  #smooth the field
            delta_min, delta_max = np.min(delta_smoothed), np.max(
                delta_smoothed)
            del delta

            # define the pdf bins
            pdf_bins = np.linspace(delta_min, delta_max, bins + 1)
            pdf_mean = 0.5 * (pdf_bins[1:] + pdf_bins[:-1])
            pdf_width = pdf_bins[1:] - pdf_bins[:-1]

            # compute pdf and variance
            i = 0
            var_tot[i] = np.var(delta_smoothed)
            pdf_tot[i] = np.histogram(delta_smoothed, bins=pdf_bins)[0]
            pdf_tot[i] = pdf_tot[i] / pdf_width / np.sum(pdf_tot[i],
                                                         dtype=np.float64)
Exemplo n.º 6
0
numbers = np.random.randint(0, grid**3, 64**3)

W_k = SL.FT_filter(BoxSize, R, grid, Filter, threads)

for z in [0, 1, 2, 3, 4, 5]:

    f = h5py.File('../HI_bias/fields_z=%.1f.hdf5' % z, 'r')
    delta_HI = f['delta_HI'][:]
    delta_m = f['delta_m'][:]
    f.close()

    print 'Omega_HI(z=%d) = %.4e'\
        %(z,np.sum(delta_HI, dtype=np.float64)/(BoxSize**3*2.775e11))
    print '%.2f < M_HI < %.2f' % (np.min(delta_HI), np.max(delta_HI))

    delta_HI_new = SL.field_smoothing(delta_HI, W_k, threads)
    delta_m_new = SL.field_smoothing(delta_m, W_k, threads)

    #delta_HI_new = TR.grid_reducer(delta_HI, dims)
    #delta_m_new  = TR.grid_reducer(delta_m,  dims)

    print 'Omega_HI(z=%d) = %.4e'\
        %(z,np.sum(delta_HI_new, dtype=np.float64)/(BoxSize**3*2.775e11))
    print '%.2f < M_HI < %.2f' % (np.min(delta_HI_new), np.max(delta_HI_new))

    delta_HI_new /= np.mean(delta_HI_new)
    delta_m_new /= np.mean(delta_m_new)

    delta_HI_new = np.ravel(delta_HI_new)
    delta_m_new = np.ravel(delta_m_new)
numbers = np.random.randint(0, grid**3, 64**3)

W_k = SL.FT_filter(BoxSize, R, grid, Filter, threads)

for z in [0,1,2,3,4,5]:

    f = h5py.File('../HI_bias/fields_z=%.1f.hdf5'%z,'r')
    delta_HI = f['delta_HI'][:]
    delta_m  = f['delta_m'][:]
    f.close()

    print 'Omega_HI(z=%d) = %.4e'\
        %(z,np.sum(delta_HI, dtype=np.float64)/(BoxSize**3*2.775e11))
    print '%.2f < M_HI < %.2f'%(np.min(delta_HI), np.max(delta_HI))

    delta_HI_new = SL.field_smoothing(delta_HI, W_k, threads)
    delta_m_new  = SL.field_smoothing(delta_m,  W_k, threads)

    #delta_HI_new = TR.grid_reducer(delta_HI, dims)
    #delta_m_new  = TR.grid_reducer(delta_m,  dims)

    print 'Omega_HI(z=%d) = %.4e'\
        %(z,np.sum(delta_HI_new, dtype=np.float64)/(BoxSize**3*2.775e11))
    print '%.2f < M_HI < %.2f'%(np.min(delta_HI_new), np.max(delta_HI_new))

    delta_HI_new /= np.mean(delta_HI_new)
    delta_m_new /= np.mean(delta_m_new)

    delta_HI_new = np.ravel(delta_HI_new)
    delta_m_new  = np.ravel(delta_m_new)