def find_pdf(snapshot, grid, MAS, do_RSD, axis, threads, ptype, fpdf,
             smoothing, Filter):

    if os.path.exists(fpdf): return 0

    # read header
    head = readgadget.header(snapshot)
    BoxSize = head.boxsize / 1e3  #Mpc/h
    Nall = head.nall  #Total number of particles
    Masses = head.massarr * 1e10  #Masses of the particles in Msun/h
    Omega_m = head.omega_m
    Omega_l = head.omega_l
    redshift = head.redshift
    Hubble = 100.0 * np.sqrt(Omega_m *
                             (1.0 + redshift)**3 + Omega_l)  #km/s/(Mpc/h)
    h = head.hubble

    # read snapshot
    pos = readgadget.read_block(snapshot, "POS ", ptype) / 1e3  #Mpc/h

    # move particles to redshift-space
    if do_RSD:
        vel = readgadget.read_block(snapshot, "VEL ", ptype)  #km/s
        RSL.pos_redshift_space(pos, vel, BoxSize, Hubble, redshift, axis)

    # calculate the overdensity field
    delta = np.zeros((grid, grid, grid), dtype=np.float32)
    if len(ptype) > 1:  #for multiple particles read masses
        mass = np.zeros(pos.shape[0], dtype=np.float32)
        offset = 0
        for j in ptype:
            mass[offset:offset + Nall[j]] = Masses[j]
            offset += Nall[j]
        MASL.MA(pos, delta, BoxSize, MAS, W=mass)
    else:
        MASL.MA(pos, delta, BoxSize, MAS)
    delta /= np.mean(delta, dtype=np.float64)
    #delta -= 1.0

    # define the array containing the variance
    var = np.zeros(smoothing.shape[0], dtype=np.float64)
    var_log = np.zeros(smoothing.shape[0], dtype=np.float64)

    # do a loop over the different smoothing scales
    for i, smooth_scale in enumerate(smoothing):

        # smooth the overdensity field
        W_k = SL.FT_filter(BoxSize, smooth_scale, grid, Filter, threads)
        delta_smoothed = SL.field_smoothing(delta, W_k, threads)

        # compute the variance of the field
        var[i] = np.var(delta_smoothed)

        indexes = np.where(delta_smoothed > 0.0)
        var_log[i] = np.var(np.log10(delta_smoothed[indexes]))

    # save results to file
    np.savetxt(fpdf, np.transpose([smoothing, var, var_log]), delimiter='\t')
Example #2
0
def find_pdf(snapshot, grid, MAS, do_RSD, axis, threads, ptype, fpdf,
             smoothing, Filter):

    if os.path.exists(fpdf): return 0

    # read header
    head = readgadget.header(snapshot)
    BoxSize = head.boxsize / 1e3  #Mpc/h
    Nall = head.nall  #Total number of particles
    Masses = head.massarr * 1e10  #Masses of the particles in Msun/h
    Omega_m = head.omega_m
    Omega_l = head.omega_l
    redshift = head.redshift
    Hubble = 100.0 * np.sqrt(Omega_m *
                             (1.0 + redshift)**3 + Omega_l)  #km/s/(Mpc/h)
    h = head.hubble

    # read snapshot
    pos = readgadget.read_block(snapshot, "POS ", ptype) / 1e3  #Mpc/h

    # move particles to redshift-space
    if do_RSD:
        vel = readgadget.read_block(snapshot, "VEL ", ptype)  #km/s
        RSL.pos_redshift_space(pos, vel, BoxSize, Hubble, redshift, axis)

    # calculate the overdensity field
    delta = np.zeros((grid, grid, grid), dtype=np.float32)
    if len(ptype) > 1:  #for multiple particles read masses
        mass = np.zeros(pos.shape[0], dtype=np.float32)
        offset = 0
        for j in ptype:
            mass[offset:offset + Nall[j]] = Masses[j]
            offset += Nall[j]
        MASL.MA(pos, delta, BoxSize, MAS, W=mass)
    else:
        MASL.MA(pos, delta, BoxSize, MAS)
    delta /= np.mean(delta, dtype=np.float64)
    #delta -= 1.0

    # smooth the overdensity field
    W_k = SL.FT_filter(BoxSize, smoothing, grid, Filter, threads)
    delta_smoothed = SL.field_smoothing(delta, W_k, threads)

    bins = np.logspace(-2, 2, 100)
    pdf, mean = np.histogram(delta_smoothed, bins=bins)
    mean = 0.5 * (mean[1:] + mean[:-1])
    pdf = pdf * 1.0 / grid**3

    # save results to file
    np.savetxt(fpdf, np.transpose([mean, pdf]), delimiter='\t')
Example #3
0
def Mk(galaxy_pos, Filter, R, p, ds, BoxSize, grid, MAS, threads):
    
    ''' Measure the marked spectrum using the `Pylians3` package  
    Input:
        galaxy_pos: (N,3) array
        FIlter:     'Top-Hat' or 'Gaussian'
        R:          parameter of the mark: scale to define local density
        p:          parameter of the mark
        ds:         parameter of the mark
        BoxSize
        grid:       scalar: size of the grid where we compute the density
        MAS:        'CIC'
        threads:    scalar
    Output:     
        Pk:         object with power spectrum: k = Pk.k3D
                                                P0 = Pk.Pk[:,0]
                                                P2 = Pk.Pk[:,1]
                                                P4 = Pk.Pk[:,2]
    '''
    
    # calculate delta                                                                                                   
    delta = np.zeros((grid,grid,grid), dtype=np.float32)
    MASL.MA(galaxy_pos, delta, BoxSize, MAS)
    delta /= np.mean(delta, dtype=np.float64);  delta -= 1.0
    # smooth delta                                                                                                      
    W_k = SL.FT_filter(BoxSize, R, grid, Filter, threads)
    delta_smoothed = SL.field_smoothing(delta, W_k, threads)
    # marks                                                                                                             
    weight = np.zeros(galaxy_pos.shape[0], dtype=np.float32)
    MASL.CIC_interp(delta_smoothed, BoxSize, galaxy_pos, weight)
    mark = func_mark(weight,ds,p)
    delta_m = np.zeros((grid,grid,grid), dtype=np.float32)
    MASL.MA(galaxy_pos,delta_m,BoxSize,MAS,W=mark)
    delta_m /= np.mean(delta_m,dtype=np.float32);  delta_m -= 1.0
    # compute marked Pk                                                                                                 
    Pk = PKL.Pk(delta_m, BoxSize, axis, MAS, threads)
    return Pk
Example #4
0
def generate_data(realization,cluster):
    sim = realization
    #raise NotImplementedError("Please update the directory of the data to your system below.")
    #snapshot = '/mnt/ceph/users/fvillaescusa/Quijote/Snapshots/latin_hypercube_HR/%d/snapdir_004/snap_004' % (sim,)
    #snapdir = '/mnt/ceph/users/fvillaescusa/Quijote/Halos/latin_hypercube/%d' % (sim,)
    if cluster=='adroit':
        prefix = '/scratch/network/jdh4'
        snapshot = prefix +'/projects/QUIJOTE/Snapshots/fiducial_HR/%d/snapdir_004/snap_004' % (sim,)
        snapdir =  prefix +'/projects/QUIJOTE/Halos/fiducial_HR/%d' % (sim,)
    elif cluster=='tiger':
        snapshot = '/projects/QUIJOTE/Snapshots/fiducial_HR/%d/snapdir_004/snap_004' % (sim,)
        snapdir =  '/projects/QUIJOTE/Halos/fiducial_HR/%d' % (sim,)
    else:
        try:
            snapshot = '/mnt/ceph/users/fvillaescusa/Quijote/Snapshots/latin_hypercube_HR/%d/snapdir_004/snap_004' % (sim,)
            snapdir = '/mnt/ceph/users/fvillaescusa/Quijote/Halos/latin_hypercube/%d' % (sim,)
        except ValueError:
            print("Path not found. Wrong cluster or data DNE.")

    #snapshot = '/projects/QUIJOTE/Snapshots/fiducial_HR/%d/snapdir_004/snap_004' % (sim,)
    #snapdir =  '/projects/QUIJOTE/Halos/fiducial_HR/%d' % (sim,)
    snapnum = 4

# parameters for density field
    grid   = 1024  #density field will have grid^3 voxels
    ptypes = [1]   #CDM
    MAS    = 'CIC' #mass assignment scheme
    do_RSD = False #dont do redshift-space distortions
    axis   = 0     #only needed if do_RSD=True

# parameters for smoothing
    BoxSize = 1000.0    #Mpc/h
    R       = 20.0      #Mpc.h
    Filter  = 'Top-Hat' #'Top-Hat' or 'Gaussian'
    threads = 28        #number of openmp threads
#######################################################################################

# # Computing density contrast field:

# compute density field of the snapshot (density constrast d = rho/<rho>-1)
    delta = MASL.density_field_gadget(snapshot, ptypes, grid, MAS, do_RSD, axis)
    delta /= np.mean(delta, dtype=np.float64);  delta -= 1.0

# # Smooth density field:

# smooth the field on a given scale
    W_k = SL.FT_filter(BoxSize, R, grid, Filter, threads)
    delta_smoothed = SL.field_smoothing(delta, W_k, threads)

# # Load halo properties:

# read halo catalogue
    z_dict = {4:0.0, 3:0.5, 2:1.0, 1:2.0, 0:3.0}
    redshift = z_dict[snapnum]
    FoF = readfof.FoF_catalog(snapdir, snapnum, long_ids=False,
                              swap=False, SFR=False, read_IDs=False)
    pos_h = FoF.GroupPos/1e3            #Halo positions in Mpc/h  
    mass  = FoF.GroupMass*1e10          #Halo masses in Msun/h   
    vel_h = FoF.GroupVel*(1.0+redshift) #Halo peculiar velocities in km/s    

# # Find overdensity at each halo:

# interpolate to find the value of the smoothed overdensity field at the position of each halo
# delta_h will contain the value of the smoothed overdensity in the position of each halo
    delta_h = np.zeros(pos_h.shape[0], dtype=np.float32)
    MASL.CIC_interp(delta_smoothed, BoxSize, pos_h, delta_h)

# # Save:

    cur_data = pd.DataFrame({
        'x': pos_h[:, 0],
        'y': pos_h[:, 1],
        'z': pos_h[:, 2],
        'vx': vel_h[:, 0],
        'vy': vel_h[:, 1],
        'vz': vel_h[:, 2],
        'M14': mass/1e14,
        'delta': delta_h
    })

    cur_data.to_hdf('halos_%d.h5' % (sim,), 'df')
Example #5
0
BoxSize = 1000.0  #Mpc/h
Filter = 'Gaussian'
threads = 1

# pdf parameters
bins = 200
###############################################################################

# redshift dictionary
z = {10: 0, 9: 0.5, 8: 1, 7: 2, 6: 3, 5: 4, 4: 5, 3: 6, 2: 7, 1: 8, 0: 9}

# do a loop over the different grid sizes
for R in [2.0, 3.0, 4.0, 5.0, 7.5, 10.0]:

    # compute FFT of the filter
    W_k = SL.FT_filter(BoxSize, R, grid, Filter, threads)

    # do a loop over the different neutrino masses
    for cosmo in ['0.1eV', '0.2eV', '0.4eV']:

        # do a loop over the different redshifts
        for snapnum in [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]:

            # get name of output file
            fout = '../Results/Results_Gaussian_%s_HR_%s_z=%s.hdf5' % (
                cosmo, R, z[snapnum])
            if os.path.exists(fout): continue

            # define the arrays containing the variance and the pdf of the fields
            var_tot = np.zeros(realizations, dtype=np.float64)
            pdf_tot = np.zeros((realizations, bins), dtype=np.float64)