def find_voids(snapshot, ptypes, grid, MAS, do_RSD, axis, threshold, Radii,
               threads1, threads2, fout):
    
    # read snapshot header and obtain BoxSize and redshift
    head     = readgadget.header(snapshot)
    BoxSize  = head.boxsize/1e3  #Mpc/h                      
    redshift = head.redshift
    
    # compute density field
    delta = MASL.density_field_gadget(snapshot, ptypes, grid, MAS, do_RSD, axis)
    delta /= np.mean(delta, dtype=np.float64);  delta -= 1.0
    
    # identify voids
    V = VL.void_finder(delta, BoxSize, threshold, Radii, 
                       threads1, threads2, void_field=False)

    # void properties and void size function
    void_pos    = V.void_pos
    void_radius = V.void_radius
    VSF_R       = V.Rbins     # bins in radius
    VSF         = V.void_vsf  # void size function
        
    parameters = [grid, MAS, '%s'%ptypes, threshold, '%s'%Radii]

    # save the results to file
    f = h5py.File(fout, 'w')
    f.create_dataset('parameters',    data=parameters)
    f.create_dataset('pos',           data=void_pos)
    f.create_dataset('radius',        data=void_radius)
    f.create_dataset('VSF_Rbins',     data=VSF_R)
    f.create_dataset('VSF',           data=VSF)
    f.close()
Exemplo n.º 2
0
def geometry(snapshot_fname, plane, x_min, x_max, y_min, y_max, z_min, z_max):

    # read snapshot head and obtain BoxSize
    head = readgadget.header(snapshot_fname)
    BoxSize = head.boxsize / 1e3  #Mpc/h

    plane_dict = {'XY': [0, 1], 'XZ': [0, 2], 'YZ': [1, 2]}

    # check that the plane is square
    if plane == 'XY':
        length1 = x_max - x_min
        length2 = y_max - y_min
        depth = z_max - z_min
        offset1 = x_min
        offset2 = y_min
    elif plane == 'XZ':
        length1 = x_max - x_min
        length2 = z_max - z_min
        depth = y_max - y_min
        offset1 = x_min
        offset2 = z_min
    else:
        length1 = y_max - y_min
        length2 = z_max - z_min
        depth = x_max - x_min
        offset1 = y_min
        offset2 = z_min
    if length1 != length2:
        print 'Plane has to be a square!!!'
        sys.exit()
    BoxSize_slice = length1

    return length1, offset1, length2, offset2, depth, BoxSize_slice
def compute_vf(snapshot, ptypes, grid, axis, MAS, fout):
    if not (os.path.exists(snapshot + '.0')): return 0

    # read header
    header = readgadget.header(snapshot)
    BoxSize = header.boxsize / 1e3  #Mpc/h
    Nall = header.nall  #Total number of particles
    Masses = header.massarr * 1e10  #Masses of the particles in Msun/
    redshift = header.redshift  #redshift of the snapshot

    # read positions and velocities
    pos = readgadget.read_block(snapshot, "POS ", ptypes) / 1e3  #Mpc/h
    vel = readgadget.read_block(snapshot, "VEL ", ptypes)  #km/s

    # compute density field
    df = np.zeros((grid, grid, grid), dtype=np.float32)
    MASL.MA(pos, df, BoxSize, MAS)
    df[np.where(df == 0)] = 1e-7  # to avoid dividing by 0

    # compute the velocity field
    vf = np.zeros((grid, grid, grid), dtype=np.float32)
    MASL.MA(pos, vf, BoxSize, MAS, W=vel[:, axis])
    vf = vf / df

    # save results to file
    np.save(fout, df)
Exemplo n.º 4
0
def measure_Overdensity(snapshot):
    ''' Measure delta from a snapshot using pylians3 mass-assignment scheme
    '''

    print("Reading snapshot file: %s \n" % (snapshot, ))
    try:
        # read header
        header = readgadget.header(snapshot)
        Boxsize = header.boxsize  #/1e3     #Mpc/h. For Gadget2 snapshots uncomment /1e3
        Nall = header.nall  #Total number of particles
        Masses = header.massarr * 1e10  #Masses of the particles in Msun/h format:[1](CDM), [2](neutrinos) or [1,2](CDM+neutrinos)
        ptype = [Nall.nonzero()[0][0]
                 ]  #[1](CDM), [2](neutrinos) or [1,2](CDM+neutrinos)
    except OSError:
        print("Error: Overdensity File Not Found.\t")
        exit()
    print(
        'Checking snapshot header data, they should agree.\t (right) initializeGlobals, (left) input parameters.\t'
    )
    print('Boxsize: %s should be equal to %s \t' % (BoxSize, Boxsize))
    print('Particle type: %i should be equal to %i \t' % (ptypes[0], ptype[0]))

    rho = MASL.density_field_gadget(snapshot, ptypes, gridSize, MAS, do_RSD,
                                    axis)
    delta = rho / np.mean(rho, dtype=np.float64)
    delta -= 1.0  #overdensity

    return delta
def find_pdf(snapshot, grid, MAS, do_RSD, axis, threads, ptype, fpdf,
             smoothing, Filter):

    if os.path.exists(fpdf): return 0

    # read header
    head = readgadget.header(snapshot)
    BoxSize = head.boxsize / 1e3  #Mpc/h
    Nall = head.nall  #Total number of particles
    Masses = head.massarr * 1e10  #Masses of the particles in Msun/h
    Omega_m = head.omega_m
    Omega_l = head.omega_l
    redshift = head.redshift
    Hubble = 100.0 * np.sqrt(Omega_m *
                             (1.0 + redshift)**3 + Omega_l)  #km/s/(Mpc/h)
    h = head.hubble

    # read snapshot
    pos = readgadget.read_block(snapshot, "POS ", ptype) / 1e3  #Mpc/h

    # move particles to redshift-space
    if do_RSD:
        vel = readgadget.read_block(snapshot, "VEL ", ptype)  #km/s
        RSL.pos_redshift_space(pos, vel, BoxSize, Hubble, redshift, axis)

    # calculate the overdensity field
    delta = np.zeros((grid, grid, grid), dtype=np.float32)
    if len(ptype) > 1:  #for multiple particles read masses
        mass = np.zeros(pos.shape[0], dtype=np.float32)
        offset = 0
        for j in ptype:
            mass[offset:offset + Nall[j]] = Masses[j]
            offset += Nall[j]
        MASL.MA(pos, delta, BoxSize, MAS, W=mass)
    else:
        MASL.MA(pos, delta, BoxSize, MAS)
    delta /= np.mean(delta, dtype=np.float64)
    #delta -= 1.0

    # define the array containing the variance
    var = np.zeros(smoothing.shape[0], dtype=np.float64)
    var_log = np.zeros(smoothing.shape[0], dtype=np.float64)

    # do a loop over the different smoothing scales
    for i, smooth_scale in enumerate(smoothing):

        # smooth the overdensity field
        W_k = SL.FT_filter(BoxSize, smooth_scale, grid, Filter, threads)
        delta_smoothed = SL.field_smoothing(delta, W_k, threads)

        # compute the variance of the field
        var[i] = np.var(delta_smoothed)

        indexes = np.where(delta_smoothed > 0.0)
        var_log[i] = np.var(np.log10(delta_smoothed[indexes]))

    # save results to file
    np.savetxt(fpdf, np.transpose([smoothing, var, var_log]), delimiter='\t')
Exemplo n.º 6
0
def Pk_comp(snapshot_fname,ptype,dims,do_RSD,axis,cpus,folder_out):

    # read relevant paramaters on the header
    print 'Computing power spectrum...'
    head     = readgadget.header(snapshot_fname)
    BoxSize  = head.boxsize/1e3 #Mpc/h
    Masses   = head.massarr*1e10 #Msun/h
    Nall     = head.nall;  Ntotal = np.sum(Nall,dtype=np.int64)
    Omega_m  = head.omega_m
    Omega_l  = head.omega_l
    redshift = head.redshift
    Hubble   = 100.0*np.sqrt(Omega_m*(1.0+redshift)**3+Omega_l)  #km/s/(Mpc/h)
    z        = '%.3f'%redshift
        
    # find output file name
    fout = folder_out+'/Pk_' + name_dict[str(ptype)]
    if do_RSD:  fout += ('_RS_axis=' + str(axis) + '_z=' + z + '.dat')
    else:       fout +=                           ('_z=' + z + '.dat')

    # read the positions of the particles
    pos = readgadget.read_block(snapshot_fname,"POS ",[ptype])/1e3 #Mpc/h
    print '%.3f < X [Mpc/h] < %.3f'%(np.min(pos[:,0]),np.max(pos[:,0]))
    print '%.3f < Y [Mpc/h] < %.3f'%(np.min(pos[:,1]),np.max(pos[:,1]))
    print '%.3f < Z [Mpc/h] < %.3f\n'%(np.min(pos[:,2]),np.max(pos[:,2]))

    # read the velocities of the particles
    if do_RSD:
        print 'moving particles to redshift-space...'
        vel = readgadget.read_block(snapshot_fname,"VEL ",[ptype]) #km/s
        RSL.pos_redshift_space(pos,vel,BoxSize,Hubble,redshift,axis)
        del vel;  print 'done'

    # define delta array
    delta = np.zeros((dims,dims,dims),dtype=np.float32)

    # when dealing with all particles take into account their different masses
    if ptype==-1:
        if Nall[0]==0: #if not hydro
            M = np.zeros(Ntotal,dtype=np.float32) #define the mass array
            offset = 0
            for ptype in [0,1,2,3,4,5]:
                M[offset:offset+Nall[ptype]] = Masses[ptype]
                offset += Nall[ptype]
        else:
            M = readgadget.read_block(snapshot_fname,"MASS",ptype=[-1])*1e10
        
        mean = np.sum(M,dtype=np.float64)/dims**3
        MASL.MA(pos,delta,BoxSize,'CIC',M); del pos,M

    else:  
        mean = len(pos)*1.0/dims**3
        MASL.MA(pos,delta,BoxSize,'CIC'); del pos

    # compute the P(k) and save results to file
    delta /= mean;  delta -= 1.0
    Pk = PKL.Pk(delta,BoxSize,axis=axis,MAS='CIC',threads=cpus);  del delta
    np.savetxt(fout,np.transpose([Pk.k3D, Pk.Pk[:,0], Pk.Pk[:,1], Pk.Pk[:,2],
                                  Pk.Nmodes3D]))
Exemplo n.º 7
0
def compute_Pk(snapshot, grid, MAS, threads, ptype, root_out):

    # read header
    if not(os.path.exists(snapshot)):  return 0
    head     = readgadget.header(snapshot)
    BoxSize  = head.boxsize/1e3  #Mpc/h  
    Nall     = head.nall         #Total number of particles
    Masses   = head.massarr*1e10 #Masses of the particles in Msun/h                    
    Omega_m  = head.omega_m
    Omega_l  = head.omega_l
    redshift = head.redshift
    Hubble   = 100.0*np.sqrt(Omega_m*(1.0+redshift)**3+Omega_l)#km/s/(Mpc/h)
    h        = head.hubble
    Ntot     = np.sum(Nall[ptype], dtype=np.int64)

    # get the name of the output file
    fout = '%s/Pk_%s_z=%.2f.txt'%(root_out, Pk_suffix(ptype), redshift)
    if os.path.exists(fout):  return 0

    # define the arrays containing the number positions and masses of the particles
    pos  = np.zeros((Ntot,3), dtype=np.float32)
    mass = np.zeros(Ntot,     dtype=np.float32)

    # read data for the different particle types
    f = h5py.File(snapshot, 'r');  offset = 0
    for pt in ptype:
        # sometimes there are not black-holes or stars...
        if 'PartType%d'%pt not in f.keys():  continue

        # read positions
        pos_pt  = f['PartType%d/Coordinates'%pt][:]/1e3  #Mpc/h
        if pos_pt.dtype==np.float64:  pos_pt = pos_pt.astype(np.float32)

        # read masses
        if 'PartType%d/Masses'%pt in f:
            mass_pt = f['PartType%d/Masses'%pt][:]*1e10                    #Msun/h
        else:
            mass_pt = np.ones(pos_pt.shape[0], dtype=np.float32)*Masses[1] #Msun/h

        # fill pos and mass arrays
        length  = len(pos_pt)
        pos[offset:offset+length]  = pos_pt
        mass[offset:offset+length] = mass_pt
        offset += length
    f.close()
    if offset!=Ntot:  raise Exception('Not all particles counted')

    # calculate density field
    delta = np.zeros((grid,grid,grid), dtype=np.float32)
    MASL.MA(pos, delta, BoxSize, MAS, W=mass)
    delta /= np.mean(delta, dtype=np.float64);  delta -= 1.0 

    # compute Pk and save results to file
    axis = 0
    Pk = PKL.Pk(delta, BoxSize, axis, MAS, threads)
    np.savetxt(fout, np.transpose([Pk.k3D, Pk.Pk[:,0]]), delimiter='\t')
Exemplo n.º 8
0
def compute_Pk_ratio(root, sim, i, snapnum, root_out):

    # find the number of N-body counterpart
    #if   i in map(str,np.arange(1522,1566)):                       j = 1505
    #elif i in ['1505_0', '1505_1', '1505_2', '1505_3', '1505_4']:  j = 1505
    #elif i in ['0_test', '0_clean0', '0_clean1']:                  j = 0
    #else:                                                          j = i

    #if i in ['1505_0', '1505_1', '1505_2', '1505_3', '1505_4', '1505_5', '1505_6',
    #         '1505_7', '1505_8', '1505_9', '1505_10', '1505_11', '1505_12', '1505_13',
    #         '1505_14', '1505_15', '1505_16', '1505_17', '1505_18', '1505_19', 
    #         '1505_20', '1505_21', '1505_22', '1505_23', '1505_24', '1505_25', 
    #         '1505_26'] and sim=='IllustrisTNG':
    #    sim2='SIMBA'
    #else:  sim2=sim

    # get the names of the snapshots
    #snapshot1 = '%s/Sims/%s/%s/snap_%03d.hdf5'%(root,sim,i,snapnum)
    #snapshot2 = '%s/Sims/%s_DM/%s/snap_%03d.hdf5'%(root,sim2,j,snapnum)
    snapshot1 = '%s/Sims/%s/%s/snap_%03d.hdf5'%(root,sim,i,snapnum)
    snapshot2 = '%s/Sims/%s_DM/%s/snap_%03d.hdf5'%(root,sim,i,snapnum)
    if not(os.path.exists(snapshot1)) or not(os.path.exists(snapshot2)): return 0

    # read the redshifts of the snapshots
    redshift1 = (readgadget.header(snapshot1)).redshift
    redshift2 = (readgadget.header(snapshot2)).redshift

    # get the name of the output file
    fout = '%s/Pk_ratio_m_z=%.2f.txt'%(root_out, redshift2)
    if os.path.exists(fout):  return 0

    # get the name of the power spectra
    f_hydro = '%s/Results/Pk/%s/%s/Pk_m_z=%.2f.txt'%(root,sim,i,redshift1)
    f_nbody = '%s/Results/Pk/%s_DM/%s/Pk_m_z=%.2f.txt'%(root,sim,i,redshift2)
    #f_nbody = '%s/Results/Pk/%s_DM/%s/Pk_m_z=%.2f.txt'%(root,sim,j,redshift2)
    if not(os.path.exists(f_hydro)) or not(os.path.exists(f_nbody)):  return 0

    # read power spectra and save results to file
    k1, Pk_hydro = np.loadtxt(f_hydro, unpack=True)
    k2, Pk_nbody = np.loadtxt(f_nbody, unpack=True)
    if np.any(k1!=k2):  raise Exception('k-values differ!')
    np.savetxt(fout, np.transpose([k1,Pk_hydro/Pk_nbody]))
Exemplo n.º 9
0
def baryon_fraction_SO(RMmin, RMmax, bins, f_SO, snapshot, root_out):

    # check if SO file exists
    if not(os.path.exists(f_SO)):  return 0

    # read header and get masses of CDM particles
    head     = readgadget.header(snapshot)
    BoxSize  = head.boxsize/1e3  #Mpc/h  
    Om       = head.omega_m
    redshift = head.redshift

    # get the name of the output file
    fout1 = '%s/bf_SO_%.2e_%.2e_%d_z=%.2f.txt'%(root_out, RMmin, RMmax, bins, redshift)
    fout2 = '%s/gf_SO_%.2e_%.2e_%d_z=%.2f.txt'%(root_out, RMmin, RMmax, bins, redshift)
    if os.path.exists(fout1) and os.path.exists(fout2):  return 0

    # read halo masses
    data = np.loadtxt(f_SO, unpack=False)
    halo_mass = data[:,0]
    Mg        = data[:,5]
    Mc        = data[:,6]
    Ms        = data[:,7]
    Mbh       = data[:,8]
    Nc        = data[:,11]

    # take only halos with more than 50 CDM particles
    indexes   = np.where(Nc>50)[0]
    halo_mass = halo_mass[indexes]
    Mg        = Mg[indexes]
    Mc        = Mc[indexes]
    Ms        = Ms[indexes]
    Mbh       = Mbh[indexes]

    # define the bins with the number of CDM particles and the intervals mean
    RM_bins = np.logspace(np.log10(RMmin), np.log10(RMmax), bins+1)
    RM_mean = 10**(0.5*(np.log10(RM_bins[1:]) + np.log10(RM_bins[:-1])))

    # compute baryon fraction in units of cosmic fraction
    fraction1 = ((Mg + Ms + Mbh)/halo_mass) / (0.049/Om)
    fraction2 = (Mg/halo_mass) / (0.049/Om)

    # take bins in halo mass / Omega_m. Compute average baryon fraction
    mean_fraction1 = np.histogram(halo_mass/Om, RM_bins, weights=fraction1)[0]
    mean_fraction2 = np.histogram(halo_mass/Om, RM_bins, weights=fraction2)[0]
    Number         = np.histogram(halo_mass/Om, RM_bins)[0]
    Number[np.where(Number==0)] = 1.0
    mean_fraction1 = mean_fraction1/Number
    mean_fraction2 = mean_fraction2/Number

    # save results to file
    np.savetxt(fout1, np.transpose([RM_mean, mean_fraction1]))
    np.savetxt(fout2, np.transpose([RM_mean, mean_fraction2]))
Exemplo n.º 10
0
def find_Bk(snapshot, snapnum, Ngrid, Nmax, Ncut, step, do_RSD, axis, ptype,
            fbk):

    # read header
    head = readgadget.header(snapshot)
    BoxSize = head.boxsize / 1e3  #Mpc/h
    Omega_m = head.omega_m
    Omega_l = head.omega_l
    redshift = head.redshift
    Hubble = 100.0 * np.sqrt(Omega_m *
                             (1.0 + redshift)**3 + Omega_l)  #km/s/(Mpc/h)
    h = head.hubble

    # read the snapshot
    pos = readgadget.read_block(snapshot, "POS ", ptype) / 1e3  #Mpc/h

    # move positions to redshift-space
    if do_RSD:
        vel = readgadget.read_block(snapshot, "VEL ", ptype)  #km/s
        RSL.pos_redshift_space(pos, vel, BoxSize, Hubble, redshift, axis)

    # calculate bispectrum
    b123out = pySpec.Bk_periodic(pos.T,
                                 Lbox=BoxSize,
                                 Ngrid=Ngrid,
                                 step=step,
                                 Ncut=Ncut,
                                 Nmax=Nmax,
                                 fft='pyfftw',
                                 nthreads=1,
                                 silent=False)

    i_k = b123out['i_k1']
    j_k = b123out['i_k2']
    l_k = b123out['i_k3']
    p0k1 = b123out['p0k1']
    p0k2 = b123out['p0k2']
    p0k3 = b123out['p0k3']
    b123 = b123out['b123']
    b_sn = b123out['b123_sn']
    q123 = b123out['q123']
    cnts = b123out['counts']

    hdr = ('matter bispectrum; k_f = 2pi/%.1f, Nhalo=%i' %
           (BoxSize, pos.shape[0]))
    np.savetxt(
        fbk,
        np.array([i_k, j_k, l_k, p0k1, p0k2, p0k3, b123, q123, b_sn, cnts]).T,
        fmt='%i %i %i %.5e %.5e %.5e %.5e %.5e %.5e %.5e',
        delimiter='\t',
        header=hdr)
Exemplo n.º 11
0
def find_pdf(snapshot, grid, MAS, do_RSD, axis, threads, ptype, fpdf,
             smoothing, Filter):

    if os.path.exists(fpdf): return 0

    # read header
    head = readgadget.header(snapshot)
    BoxSize = head.boxsize / 1e3  #Mpc/h
    Nall = head.nall  #Total number of particles
    Masses = head.massarr * 1e10  #Masses of the particles in Msun/h
    Omega_m = head.omega_m
    Omega_l = head.omega_l
    redshift = head.redshift
    Hubble = 100.0 * np.sqrt(Omega_m *
                             (1.0 + redshift)**3 + Omega_l)  #km/s/(Mpc/h)
    h = head.hubble

    # read snapshot
    pos = readgadget.read_block(snapshot, "POS ", ptype) / 1e3  #Mpc/h

    # move particles to redshift-space
    if do_RSD:
        vel = readgadget.read_block(snapshot, "VEL ", ptype)  #km/s
        RSL.pos_redshift_space(pos, vel, BoxSize, Hubble, redshift, axis)

    # calculate the overdensity field
    delta = np.zeros((grid, grid, grid), dtype=np.float32)
    if len(ptype) > 1:  #for multiple particles read masses
        mass = np.zeros(pos.shape[0], dtype=np.float32)
        offset = 0
        for j in ptype:
            mass[offset:offset + Nall[j]] = Masses[j]
            offset += Nall[j]
        MASL.MA(pos, delta, BoxSize, MAS, W=mass)
    else:
        MASL.MA(pos, delta, BoxSize, MAS)
    delta /= np.mean(delta, dtype=np.float64)
    #delta -= 1.0

    # smooth the overdensity field
    W_k = SL.FT_filter(BoxSize, smoothing, grid, Filter, threads)
    delta_smoothed = SL.field_smoothing(delta, W_k, threads)

    bins = np.logspace(-2, 2, 100)
    pdf, mean = np.histogram(delta_smoothed, bins=bins)
    mean = 0.5 * (mean[1:] + mean[:-1])
    pdf = pdf * 1.0 / grid**3

    # save results to file
    np.savetxt(fpdf, np.transpose([mean, pdf]), delimiter='\t')
Exemplo n.º 12
0
def find_CF(snapshot, snapnum, grid, MAS, do_RSD, axis, threads, ptype, fcf,
            save_multipoles):

    if os.path.exists(fcf): return 0

    # read header
    head = readgadget.header(snapshot)
    BoxSize = head.boxsize / 1e3  #Mpc/h
    Nall = head.nall  #Total number of particles
    Masses = head.massarr * 1e10  #Masses of the particles in Msun/h
    Omega_m = head.omega_m
    Omega_l = head.omega_l
    redshift = head.redshift
    Hubble = 100.0 * np.sqrt(Omega_m *
                             (1.0 + redshift)**3 + Omega_l)  #km/s/(Mpc/h)
    h = head.hubble

    # read snapshot
    pos = readgadget.read_block(snapshot, "POS ", ptype) / 1e3  #Mpc/h

    # move particles to redshift-space
    if do_RSD:
        vel = readgadget.read_block(snapshot, "VEL ", ptype)  #km/s
        RSL.pos_redshift_space(pos, vel, BoxSize, Hubble, redshift, axis)

    # calculate CF
    delta = np.zeros((grid, grid, grid), dtype=np.float32)
    if len(ptype) > 1:  #for multiple particles read masses
        mass = np.zeros(pos.shape[0], dtype=np.float32)
        offset = 0
        for j in ptype:
            mass[offset:offset + Nall[j]] = Masses[j]
            offset += Nall[j]
        MASL.MA(pos, delta, BoxSize, MAS, W=mass)
    else:
        MASL.MA(pos, delta, BoxSize, MAS)
    delta /= np.mean(delta, dtype=np.float64)
    delta -= 1.0
    CF = PKL.Xi(delta, BoxSize, MAS, axis, threads)

    # save results to file
    if save_multipoles:
        np.savetxt(fcf,
                   np.transpose(
                       [CF.r3D, CF.xi[:, 0], CF.xi[:, 1], CF.xi[:, 2]]),
                   delimiter='\t')
    else:
        np.savetxt(fcf, np.transpose([CF.r3D, CF.xi[:, 0]]), delimiter='\t')
Exemplo n.º 13
0
def baryon_fraction_FoF(RMmin, RMmax, bins, f_subfind, snapshot, root_out):

    # check if subfind file exists
    if not(os.path.exists(f_subfind)):  return 0

    # read header and get masses of CDM particles
    head     = readgadget.header(snapshot)
    BoxSize  = head.boxsize/1e3  #Mpc/h  
    Masses   = head.massarr*1e10 #Masses of the particles in Msun/h       
    Nall     = head.nall         #Total number of particles
    Om       = head.omega_m
    redshift = head.redshift
    Mc       = (Om - 0.049)*BoxSize**3*rho_crit/Nall[1] #mass of a CDM particle

    # get the name of the output file
    fout = '%s/bf_%.2e_%.2e_%d_z=%.2f.txt'%(root_out,RMmin, RMmax, bins, redshift)
    if os.path.exists(fout):  return 0

    # read halo masses
    f              = h5py.File(f_subfind, 'r')
    halo_mass      = f['Group/GroupMass'][:]*1e10
    halo_mass_type = f['Group/GroupMassType'][:]*1e10
    halo_part_type = f['Group/GroupLenType'][:]   #number of particles in each halo
    f.close()

    # take only halos with more than 50 CDM particles
    indexes = np.where(halo_part_type[:,1]>50)[0]
    halo_mass = halo_mass[indexes]
    halo_mass_type = halo_mass_type[indexes]

    # define the bins with the number of CDM particles and the intervals mean
    RM_bins = np.logspace(np.log10(RMmin), np.log10(RMmax), bins+1)
    RM_mean = 10**(0.5*(np.log10(RM_bins[1:]) + np.log10(RM_bins[:-1])))

    # compute baryon fraction in units of cosmic fraction
    fraction = ((halo_mass_type[:,0] + halo_mass_type[:,4] + halo_mass_type[:,5])/halo_mass) / (0.049/Om)

    # take bins in halo mass / Omega_m. Compute average baryon fraction
    mean_fraction = np.histogram(halo_mass/Om, RM_bins, weights=fraction)[0]
    Number        = np.histogram(halo_mass/Om, RM_bins)[0]
    Number[np.where(Number==0)] = 1.0
    mean_fraction = mean_fraction/Number

    # save results to file
    np.savetxt(fout, np.transpose([RM_mean, mean_fraction]))
Exemplo n.º 14
0
def find_Pk(folder, snapdir, snapnum, grid, MAS, do_RSD, axis, threads,
            fixed_Mmin, Mmin, Nhalos, fpk, save_multipoles):

    if os.path.exists(fpk):  return 0
    
    # read header
    head     = readgadget.header(snapdir)
    BoxSize  = head.boxsize/1e3  #Mpc/h                      
    Omega_m  = head.omega_m
    Omega_l  = head.omega_l
    redshift = head.redshift
    Hubble   = 100.0*np.sqrt(Omega_m*(1.0+redshift)**3+Omega_l)#km/s/(Mpc/h)
    h        = head.hubble

    # read halo catalogue
    FoF   = readfof.FoF_catalog(folder, snapnum, long_ids=False,
                                swap=False, SFR=False, read_IDs=False)
    pos_h = FoF.GroupPos/1e3            #Mpc/h
    mass  = FoF.GroupMass*1e10          #Msun/h
    vel_h = FoF.GroupVel*(1.0+redshift) #km/s
    if fixed_Mmin:
        indexes = np.where(mass>Mmin)[0]
        pos_h = pos_h[indexes];  vel_h = vel_h[indexes];  del indexes
    else:
        indexes = np.argsort(mass)[-Nhalos:] #take the Nhalos most massive halos
        pos_h = pos_h[indexes];  vel_h = vel_h[indexes];  del indexes

    # move halos to redshift-space
    if do_RSD:  RSL.pos_redshift_space(pos_h, vel_h, BoxSize, Hubble, redshift, axis)

    # calculate Pk
    delta = np.zeros((grid,grid,grid), dtype=np.float32)
    MASL.MA(pos_h, delta, BoxSize, MAS)
    delta /= np.mean(delta, dtype=np.float64);  delta -= 1.0 
    Pk = PKL.Pk(delta, BoxSize, axis, MAS, threads)

    # save results to file
    hdr = ('Nhalos=%i BoxSize=%.3f'%(pos_h.shape[0],BoxSize))    
    if save_multipoles:
        np.savetxt(fpk, np.transpose([Pk.k3D, Pk.Pk[:,0], Pk.Pk[:,1], Pk.Pk[:,2]]),
                   delimiter='\t', header=hdr)
    else:
        np.savetxt(fpk, np.transpose([Pk.k3D, Pk.Pk[:,0]]),
                   delimiter='\t', header=hdr)
Exemplo n.º 15
0
def density_field_gadget(snapshot_fname, ptypes, dims, MAS='CIC',
	do_RSD=False, axis=0, verbose=True): 

	start = time.time()
	if verbose:  print('\nComputing density field of particles',ptypes)

	# declare the array hosting the density field
	density = np.zeros((dims, dims, dims), dtype=np.float32)

	# read relevant paramaters on the snapshot
	head     = readgadget.header(snapshot_fname)
	BoxSize  = head.boxsize/1e3 #Mpc/h
	Masses   = head.massarr*1e10 #Msun/h
	Nall     = head.nall;  Ntotal = np.sum(Nall,dtype=np.int64)
	filenum  = head.filenum
	Omega_m  = head.omega_m
	Omega_l  = head.omega_l
	redshift = head.redshift
		fformat  = head.format
Exemplo n.º 16
0
def compute_Pk_ICs(snapshot, grid, MAS, threads, ptype, root_out):

    if not(os.path.exists(snapshot)) and not(os.path.exists(snapshot+'.0')):  return 0

    # read header
    head     = readgadget.header(snapshot)
    BoxSize  = head.boxsize/1e3  #Mpc/h  
    redshift = head.redshift

    # get the name of the file
    fout = '%s/Pk_%s_z=%.2f.txt'%(root_out, Pk_suffix(ptype), redshift)
    if os.path.exists(fout):  return 0
    
    # compute overdensity field
    do_RSD, axis = False, 0
    delta = MASL.density_field_gadget(snapshot, ptype, grid, MAS, do_RSD, axis)
    delta /= np.mean(delta, dtype=np.float64);  delta -= 1.0

    # compute Pk and save results to file
    Pk = PKL.Pk(delta, BoxSize, axis, MAS, threads)
    np.savetxt(fout, np.transpose([Pk.k3D, Pk.Pk[:,0]]), delimiter='\t')
Exemplo n.º 17
0
def halo_mass_function(RMmin, RMmax, bins, f_subfind, snapshot, root_out):

    # check if subfind file exists
    if not(os.path.exists(f_subfind)):  return 0

    # read header and get masses of CDM particles
    head     = readgadget.header(snapshot)
    BoxSize  = head.boxsize/1e3  #Mpc/h  
    Masses   = head.massarr*1e10 #Masses of the particles in Msun/h                    
    Nall     = head.nall         #Total number of particles
    Om       = head.omega_m
    redshift = head.redshift

    # get the name of the output file
    fout = '%s/mass_function_%.2e_%.2e_%d_z=%.2f.txt'%(root_out,RMmin,RMmax,bins,redshift)
    if os.path.exists(fout):  return 0

    # read halo masses
    f              = h5py.File(f_subfind, 'r')
    halo_mass      = f['Group/GroupMass'][:]*1e10 #Msun/h
    halo_part_type = f['Group/GroupLenType'][:]   #number of particles in each halo
    f.close()

    # take only halos with more than 50 CDM particles
    indexes   = np.where(halo_part_type[:,1]>50)[0]
    halo_mass = halo_mass[indexes]

    # define the arrays with the reduced mass (mass/Omega_m) bins, mean and width
    RM_bins = np.logspace(np.log10(RMmin), np.log10(RMmax), bins+1)
    RM_mean = 10**(0.5*(np.log10(RM_bins[1:]) + np.log10(RM_bins[:-1])))
    dRM     = RM_bins[1:] - RM_bins[:-1]

    # compute halo mass function and save results to file
    HMF = np.histogram(halo_mass/Om, RM_bins)[0]
    HMF = HMF/(BoxSize**3*dRM*Om)
    np.savetxt(fout, np.transpose([RM_mean*Om, RM_mean, HMF]))
Exemplo n.º 18
0
def read_header(snap):
    snapshot_fname = '/cosma6/data/dp004/dc-smit4/Daemmerung/Planck2013-Npart_2048_Box_3000-Fiducial/run1/snapdir_{0:03d}/Planck2013-L3000-N2048-Fiducial_{0:03d}.0'.format(
        snap)
    return readgadget.header(snapshot_fname)
Exemplo n.º 19
0
	if ptypes==[-1]:  ptypes = [0, 1, 2, 3, 4, 5]
	if len(ptypes)==1:  single_component=True
	else:               single_component=False

	# do a loop over all files
	num = 0.0
	for i in range(filenum):

		# find the name of the sub-snapshot
				if filenum==1:       snapshot = snapshot_fname
				else:                snapshot = snapshot_fname+'.%d'%i
				if fformat=='hdf5':  snapshot = snapshot+'.hdf5'

		# find the local particles in the sub-snapshot
		head  = readgadget.header(snapshot)
		npart = head.npart

		# do a loop over all particle types
		for ptype in ptypes:

			if npart[ptype]==0:  continue

			# read positions in Mpc/h
						pos = readgadget.read_field(snapshot, "POS ", ptype)/1e3
			#pos = readsnap.read_block(snapshot,"POS ",parttype=ptype)/1e3

			# read velocities in km/s and move particles to redshift-space
			if do_RSD:
								vel = readgadget.read_field(snapshot, "VEL ", ptype)
				#vel = readsnap.read_block(snapshot,"VEL ",parttype=ptype)
Exemplo n.º 20
0
import numpy as np
import readgadget

tng = '/home/jovyan/Simulations/IllustrisTNG/1P_0/ICs/ics'
smb = '/home/jovyan/Simulations/SIMBA/1P_0/ICs/ics'

header_tng = readgadget.header(tng)
header_smb = readgadget.header(smb)

h_tng = header_tng.hubble
h_smb = header_smb.hubble

print(h_tng)
print(h_smb)
Exemplo n.º 21
0
def find_Bk(folder, snapdir, snapnum, axis, Ngrid, step, Ncut, Nmax, do_RSD,
            fixed_Mmin, Mmin, Nhalos):

    # read header
    head = readgadget.header(snapdir)
    BoxSize = head.boxsize / 1e3  #Mpc/h
    Omega_m = head.omega_m
    Omega_l = head.omega_l
    redshift = head.redshift
    Hubble = 100.0 * np.sqrt(Omega_m *
                             (1.0 + redshift)**3 + Omega_l)  #km/s/(Mpc/h)
    h = head.hubble

    # read halo catalogue
    FoF = readfof.FoF_catalog(folder,
                              snapnum,
                              long_ids=False,
                              swap=False,
                              SFR=False,
                              read_IDs=False)
    pos_h = FoF.GroupPos / 1e3  #Mpc/h
    mass = FoF.GroupMass * 1e10  #Msun/h
    vel_h = FoF.GroupVel * (1.0 + redshift)  #km/s
    if fixed_Mmin:
        indexes = np.where(mass > Mmin)[0]
        pos_h = pos_h[indexes]
        vel_h = vel_h[indexes]
        del indexes
    else:
        indexes = np.argsort(mass)[
            -Nhalos:]  #take the Nhalos most massive halos
        pos_h = pos_h[indexes]
        vel_h = vel_h[indexes]
        del indexes

    # move halos to redshift-space
    if do_RSD:
        RSL.pos_redshift_space(pos_h, vel_h, BoxSize, Hubble, redshift, axis)

    # calculate bispectrum
    b123out = pySpec.Bk_periodic(pos_h.T,
                                 Lbox=BoxSize,
                                 Ngrid=Ngrid,
                                 step=step,
                                 Ncut=Ncut,
                                 Nmax=Nmax,
                                 fft='pyfftw',
                                 nthreads=1,
                                 silent=False)

    i_k = b123out['i_k1']
    j_k = b123out['i_k2']
    l_k = b123out['i_k3']
    p0k1 = b123out['p0k1']
    p0k2 = b123out['p0k2']
    p0k3 = b123out['p0k3']
    b123 = b123out['b123']
    b_sn = b123out['b123_sn']
    q123 = b123out['q123']
    cnts = b123out['counts']

    hdr = ('halo bispectrum; k_f = 2pi/%.1f, Nhalo=%i' %
           (BoxSize, pos_h.shape[0]))
    np.savetxt(
        fbk,
        np.array([i_k, j_k, l_k, p0k1, p0k2, p0k3, b123, q123, b_sn, cnts]).T,
        fmt='%i %i %i %.5e %.5e %.5e %.5e %.5e %.5e %.5e',
        delimiter='\t',
        header=hdr)
Exemplo n.º 22
0
def density_field_2D(snapshot_fname, x_min, x_max, y_min, y_max, z_min, z_max,
                     dims, ptypes, plane, MAS, save_density_field):

    plane_dict = {'XY': [0, 1], 'XZ': [0, 2], 'YZ': [1, 2]}

    # read snapshot head and obtain BoxSize, filenum...
    head = readgadget.header(snapshot_fname)
    BoxSize = head.boxsize / 1e3  #Mpc/h
    Nall = head.nall
    Masses = head.massarr * 1e10  #Msun/h
    filenum = head.filenum
    redshift = head.redshift

    # find the geometric values of the density field square
    len_x, off_x, len_y, off_y, depth, BoxSize_slice = \
            geometry(snapshot_fname, plane, x_min, x_max, y_min, y_max,
                    z_min, z_max)

    # compute the mean density in the box
    if len(ptypes) == 1 and Masses[ptypes[0]] != 0.0:
        single_specie = True
    else:
        single_specie = False

    # define the density array
    overdensity = np.zeros((dims, dims), dtype=np.float32)

    # do a loop over all subfiles in the snapshot
    total_mass, mass_slice = 0.0, 0.0
    renormalize_2D = False
    for i in xrange(filenum):

        # find the name of the subfile
        snap = snapshot_fname + '.%d' % i

        # in the last snapshot we renormalize the field
        if i == filenum - 1: renormalize_2D = True

        # do a loop over
        for ptype in ptypes:

            # read the positions of the particles in Mpc/h
            pos = readgadget.read_field(snap, "POS ", ptype) / 1e3

            if single_specie: total_mass += len(pos)

            # keep only with the particles in the slice
            indexes = np.where((pos[:, 0] > x_min) & (pos[:, 0] < x_max)
                               & (pos[:, 1] > y_min) & (pos[:, 1] < y_max)
                               & (pos[:, 2] > z_min) & (pos[:, 2] < z_max))
            pos = pos[indexes]

            # renormalize positions
            pos[:, 0] -= x_min
            pos[:, 1] -= y_min
            pos[:, 2] -= z_min

            # project particle positions into a 2D plane
            pos = pos[:, plane_dict[plane]]

            # read the masses of the particles in Msun/h
            if not (single_specie):
                mass = readgadget.read_field(snap, "MASS", ptype) * 1e10
                total_mass += np.sum(mass, dtype=np.float64)
                mass = mass[indexes]
                MASL.MA(pos,
                        overdensity,
                        BoxSize_slice,
                        MAS=MAS,
                        W=mass,
                        renormalize_2D=renormalize_2D)
            else:
                mass_slice += len(pos)
                MASL.MA(pos,
                        overdensity,
                        BoxSize_slice,
                        MAS=MAS,
                        W=None,
                        renormalize_2D=renormalize_2D)

    print 'Expected mass = %.7e' % mass_slice
    print 'Computed mass = %.7e' % np.sum(overdensity, dtype=np.float64)

    # compute mean density in the whole box
    mass_density = total_mass * 1.0 / BoxSize**3  #(Msun/h)/(Mpc/h)^3 or #/(Mpc/h)^3

    print 'mass density = %.5e' % mass_density

    # compute the volume of each cell in the density field slice
    V_cell = BoxSize_slice**2 * depth * 1.0 / dims**2  #(Mpc/h)^3

    # compute the mean mass in each cell of the slice
    mean_mass = mass_density * V_cell  #Msun/h or #

    # compute overdensities
    overdensity /= mean_mass
    print np.min(overdensity), '< rho/<rho> <', np.max(overdensity)

    # in our convention overdensity(x,y), while for matplotlib is
    # overdensity(y,x), so we need to transpose the field
    overdensity = np.transpose(overdensity)

    # save density field to file
    f_df = density_field_name(snapshot_fname, x_min, x_max, y_min, y_max,
                              z_min, z_max, dims, ptypes, plane, MAS)
    if save_density_field: np.save(f_df, overdensity)

    return overdensity
Exemplo n.º 23
0
def Pk_Gadget(snapshot_fname,
              dims,
              particle_type,
              do_RSD,
              axis,
              cpus,
              folder_out=None):

    # find folder to place output files. Default is current directory
    if folder_out is None: folder_out = os.getcwd()

    # for either one single species or all species use this routine
    if len(particle_type) == 1:
        Pk_comp(snapshot_fname, particle_type[0], dims, do_RSD, axis, cpus,
                folder_out)
        return None

    # read snapshot head and obtain BoxSize, Omega_m and Omega_L
    print('\nREADING SNAPSHOTS PROPERTIES')
    head = readgadget.header(snapshot_fname)
    BoxSize = head.boxsize / 1e3  #Mpc/h
    Nall = head.nall
    Masses = head.massarr * 1e10  #Msun/h
    Omega_m = head.omega_m
    Omega_l = head.omega_l
    redshift = head.redshift
    Hubble = 100.0 * np.sqrt(Omega_m *
                             (1.0 + redshift)**3 + Omega_l)  #km/s/(Mpc/h)
    h = head.hubble
    z = '%.3f' % redshift
    dims3 = dims**3

    # compute the values of Omega_cdm, Omega_nu, Omega_gas and Omega_s
    Omega_c = Masses[1] * Nall[1] / BoxSize**3 / rho_crit
    Omega_n = Masses[2] * Nall[2] / BoxSize**3 / rho_crit
    Omega_g, Omega_s = 0.0, 0.0
    if Nall[0] > 0:
        if Masses[0] > 0:
            Omega_g = Masses[0] * Nall[0] / BoxSize**3 / rho_crit
            Omega_s = Masses[4] * Nall[4] / BoxSize**3 / rho_crit
        else:
            # mass in Msun/h
            mass = readgadget.read_block(snapshot_fname, "MASS",
                                         ptype=[0]) * 1e10
            Omega_g = np.sum(mass, dtype=np.float64) / BoxSize**3 / rho_crit
            mass = readgadget.read_block(snapshot_fname, "MASS",
                                         ptype=[4]) * 1e10
            Omega_s = np.sum(mass, dtype=np.float64) / BoxSize**3 / rho_crit
            del mass

    # some verbose
    print('Omega_gas    = ', Omega_g)
    print('Omega_cdm    = ', Omega_c)
    print('Omega_nu     = ', Omega_n)
    print('Omega_star   = ', Omega_s)
    print('Omega_m      = ', Omega_g + Omega_c + Omega_n + Omega_s)
    print('Omega_m snap = ', Omega_m)

    # dictionary giving the value of Omega for each component
    Omega_dict = {0: Omega_g, 1: Omega_c, 2: Omega_n, 4: Omega_s}
    #####################################################################

    # define the array containing the deltas
    delta = [[], [], [],
             []]  #array containing the gas, CDM, NU and stars deltas

    # dictionary among particle type and the index in the delta and Pk arrays
    # delta of stars (ptype=4) is delta[3] not delta[4]
    index_dict = {0: 0, 1: 1, 2: 2, 4: 3}

    # define suffix here
    if do_RSD: suffix = '_RS_axis=' + str(axis) + '_z=' + z + '.dat'
    else: suffix = '_z=' + z + '.dat'
    #####################################################################

    # do a loop over all particle types and compute the deltas
    for ptype in particle_type:

        # read particle positions in #Mpc/h
        pos = readgadget.read_block(snapshot_fname, "POS ", [ptype]) / 1e3

        # move particle positions to redshift-space
        if do_RSD:
            vel = readgadget.read_block(snapshot_fname, "VEL ", [ptype])  #km/s
            RSL.pos_redshift_space(pos, vel, BoxSize, Hubble, redshift, axis)
            del vel

        # find the index of the particle type in the delta array
        index = index_dict[ptype]

        # compute mean number of particles per grid cell
        mean_number = len(pos) * 1.0 / dims3

        # compute the deltas
        delta[index] = np.zeros((dims, dims, dims), dtype=np.float32)
        MASL.MA(pos, delta[index], BoxSize, 'CIC')
        del pos
        delta[index] /= mean_number
        delta[index] -= 1.0
    #####################################################################

    #####################################################################
    # if there are two or more particles compute auto- and cross-power spectra
    for i, ptype1 in enumerate(particle_type):
        for ptype2 in particle_type[i + 1:]:

            # find the indexes of the particle types
            index1 = index_dict[ptype1]
            index2 = index_dict[ptype2]

            # choose the name of the output files
            fout1 = '/Pk_' + name_dict[str(ptype1)] + suffix
            fout2 = '/Pk_' + name_dict[str(ptype2)] + suffix
            fout12 = '/Pk_' + name_dict[str(ptype1) + str(ptype2)] + suffix
            fout1 = folder_out + fout1
            fout2 = folder_out + fout2
            fout12 = folder_out + fout12

            # some verbose
            print('\nComputing the auto- and cross-power spectra of types: '\
                ,ptype1,'-',ptype2)
            print('saving results in:')
            print(fout1, '\n', fout2, '\n', fout12)

            # This routine computes the auto- and cross-power spectra
            data = PKL.XPk([delta[index1], delta[index2]],
                           BoxSize,
                           axis=axis,
                           MAS=['CIC', 'CIC'],
                           threads=cpus)

            k = data.k3D
            Nmodes = data.Nmodes3D

            # save power spectra results in the output files
            np.savetxt(
                fout12,
                np.transpose([
                    k, data.XPk[:, 0, 0], data.XPk[:, 1, 0], data.XPk[:, 2, 0],
                    Nmodes
                ]))
            np.savetxt(
                fout1,
                np.transpose([
                    k, data.Pk[:, 0, 0], data.Pk[:, 1, 0], data.Pk[:, 2, 0],
                    Nmodes
                ]))
            np.savetxt(
                fout2,
                np.transpose([
                    k, data.Pk[:, 0, 1], data.Pk[:, 1, 1], data.Pk[:, 2, 1],
                    Nmodes
                ]))
    #####################################################################

    #####################################################################
    # compute the power spectrum of the sum of all components
    print('\ncomputing P(k) of all components')

    # define delta of all components
    delta_tot = np.zeros((dims, dims, dims), dtype=np.float32)

    Omega_tot = 0.0
    fout = folder_out + '/Pk_'
    for ptype in particle_type:
        index = index_dict[ptype]
        delta_tot += (Omega_dict[ptype] * delta[index])
        Omega_tot += Omega_dict[ptype]
        fout += name_dict[str(ptype)] + '+'

    delta_tot /= Omega_tot
    del delta
    fout = fout[:-1]  #avoid '+' in the end

    # compute power spectrum
    data = PKL.Pk(delta_tot, BoxSize, axis=axis, MAS='CIC', threads=cpus)
    del delta_tot

    # write P(k) to output file
    np.savetxt(
        fout + suffix,
        np.transpose([
            data.k3D, data.Pk[:, 0], data.Pk[:, 1], data.Pk[:, 2],
            data.Nmodes3D
        ]))
Exemplo n.º 24
0
def density_field_2D(snapshot, x_min, x_max, y_min, y_max, z_min, z_max,
                     dims, ptypes, plane, MAS, save_density_field):


    # find the geometric values of the density field square
    dx, x, dy, y, depth, BoxSize_slice = \
        geometry(snapshot, plane, x_min, x_max, y_min, y_max, z_min, z_max)

    # find the name of the density field and read it if already exists
    f_df = density_field_name(snapshot, x_min, x_max, y_min, y_max, 
                                 z_min, z_max, dims, ptypes, plane, MAS)
    if os.path.exists(f_df):
        print('\nDensity field already computed. Reading it from file...')
        overdensity = np.load(f_df);  return dx, x, dy, y, overdensity

    # if not, compute it
    print('\nComputing density field...')
    plane_dict = {'XY':[0,1], 'XZ':[0,2], 'YZ':[1,2]}

    # read snapshot head and obtain BoxSize, filenum...
    head     = readgadget.header(snapshot)
    BoxSize  = head.boxsize/1e3 #Mpc/h                    
    Nall     = head.nall
    Masses   = head.massarr*1e10 #Msun/h                  
    filenum  = head.filenum
    redshift = head.redshift

    # define the density array
    overdensity = np.zeros((dims,dims), dtype=np.float32)

    # do a loop over all subfiles in the snapshot
    total_mass, mass_slice = 0.0, 0.0;  renormalize_2D = False
    for i in range(filenum):

        # find the name of the subfile
        snap1 = '%s.%d'%(snapshot,i)
        snap2 = '%s.%d.hdf5'%(snapshot,i)
        snap3 = '%s'%(snapshot)
        if   os.path.exists(snap1):  snap = snap1
        elif os.path.exists(snap2):  snap = snap2
        elif os.path.exists(snap3):  snap = snap3
        else:  raise Exception('Problem with the snapshot name!')                

        # in the last subfile we renormalize the field
        if i==filenum-1:  renormalize_2D = True

        # do a loop over all particle types
        for ptype in ptypes:

            # read the positions of the particles in Mpc/h
            pos = readgadget.read_field(snap,"POS ",ptype)/1e3
            
            # keep only with the particles in the slice
            indexes = np.where((pos[:,0]>x_min) & (pos[:,0]<x_max) &
                               (pos[:,1]>y_min) & (pos[:,1]<y_max) &
                               (pos[:,2]>z_min) & (pos[:,2]<z_max) )
            pos = pos[indexes]

            # renormalize positions
            pos[:,0] -= x_min;  pos[:,1] -= y_min;  pos[:,2] -= z_min

            # project particle positions into a 2D plane
            pos = pos[:,plane_dict[plane]]

            # read the masses of the particles
            mass = readgadget.read_field(snap,"MASS",ptype)*1e10
            total_mass += np.sum(mass, dtype=np.float64)
            mass = mass[indexes];  mass_slice += np.sum(mass)

            # update 2D density field
            MASL.MA(pos, overdensity, BoxSize_slice, MAS=MAS, W=mass,
                    renormalize_2D=renormalize_2D)
            

    print('Expected mass = %.7e'%mass_slice)
    print('Computed mass = %.7e'%np.sum(overdensity, dtype=np.float64))

    # compute mean density in the whole box
    mass_density = total_mass*1.0/BoxSize**3 #(Msun/h)/(Mpc/h)^3 or #/(Mpc/h)^3
    print('mass density = %.5e'%mass_density)

    # compute the volume and mean mass of each cell of the slice
    V_cell = BoxSize_slice**2*depth*1.0/dims**2  #(Mpc/h)^3
    mean_mass = mass_density*V_cell #Msun/h or #

    # compute overdensities
    overdensity /= mean_mass
    print(np.min(overdensity),'< rho/<rho> <',np.max(overdensity))

    # in our convention overdensity(x,y), while for matplotlib is
    # overdensity(y,x), so we need to transpose the field
    overdensity = np.transpose(overdensity)

    # save density field to file
    if save_density_field:  np.save(f_df, overdensity)
    return dx, x, dy, y, overdensity
Exemplo n.º 25
0
def density_field_gadget(snapshot_fname, ptypes, dims, MAS='CIC',
	do_RSD=False, axis=0, verbose=True): 

	start = time.time()
	if verbose:  print '\nComputing density field of particles',ptypes

	# declare the array hosting the density field
	density = np.zeros((dims, dims, dims), dtype=np.float32)

	# read relevant paramaters on the snapshot
	head     = readgadget.header(snapshot_fname)
	BoxSize  = head.boxsize/1e3 #Mpc/h
	Masses   = head.massarr*1e10 #Msun/h
	Nall     = head.nall;  Ntotal = np.sum(Nall,dtype=np.int64)
	filenum  = head.filenum
	Omega_m  = head.omega_m
	Omega_l  = head.omega_l
	redshift = head.redshift
        fformat  = head.format
	Hubble   = head.Hubble


	if ptypes==[-1]:  ptypes = [0, 1, 2, 3, 4, 5]
	if len(ptypes)==1:  single_component=True
	else:               single_component=False

	# do a loop over all files
	num = 0.0
	for i in xrange(filenum):

		# find the name of the sub-snapshot
                if filenum==1:       snapshot = snapshot_fname
                else:                snapshot = snapshot_fname+'.%d'%i
                if fformat=='hdf5':  snapshot = snapshot+'.hdf5'

		# find the local particles in the sub-snapshot
		head  = readgadget.header(snapshot)
		npart = head.npart

		# do a loop over all particle types
		for ptype in ptypes:

			if npart[ptype]==0:  continue

			# read positions in Mpc/h
                        pos = readgadget.read_field(snapshot, "POS ", ptype)/1e3
			#pos = readsnap.read_block(snapshot,"POS ",parttype=ptype)/1e3

			# read velocities in km/s and move particles to redshift-space
			if do_RSD:
                                vel = readgadget.read_field(snapshot, "VEL ", ptype)
				#vel = readsnap.read_block(snapshot,"VEL ",parttype=ptype)
				RSL.pos_redshift_space(pos,vel,BoxSize,Hubble,redshift,axis)
				del vel

			# compute density field. If multicomponent, read/find masses
                        if Masses[ptype]!=0:
                                if single_component:
                                        MASL.MA(pos, density, BoxSize, MAS) 
                                        num += pos.shape[0]
                                else:
					mass = np.ones(npart[ptype], dtype=np.float32)*Masses[ptype]
                                        MASL.MA(pos, density, BoxSize, MAS, W=mass) 
                                        num += np.sum(mass, dtype=np.float64)
                        else:
                                mass = readgadget.read_field(snapshot, "MASS", ptype)*1e10
                                #mass = readsnap.read_block(snapshot,"MASS",
                                #        parttype=ptype)*1e10 #Msun/h
                                MASL.MA(pos, density, BoxSize, MAS, W=mass) 
                                num += np.sum(mass, dtype=np.float64)

	if verbose:
		print '%.8e should be equal to\n%.8e'\
			%(np.sum(density, dtype=np.float64), num)
		print 'Time taken = %.2f seconds'%(time.time()-start)

	return np.asarray(density)
Exemplo n.º 26
0
def do_snap(snap):
    #load shapshot
    print('Loading snapshot', snap)

    snapshot_fname = '/cosma6/data/dp004/dc-smit4/Daemmerung/Planck2013-Npart_2048_Box_3000-Fiducial/run1/snapdir_{0:03d}/Planck2013-L3000-N2048-Fiducial_{0:03d}'.format(
        snap)
    density_fname = '/cosma6/data/dp004/dc-boot5/Lightcone/Density/density_{0:03d}.npy'.format(
        snap)
    powerspec_path = '/cosma6/data/dp004/dc-boot5/Lightcone/Power_Spectrum/'
    lightcone_path = "/cosma6/data/dp004/dc-boot5/Lightcone/Galaxy_FullSky/"

    #calculate density delta
    # declare the array hosting the density field
    density = np.zeros((dims, dims, dims), dtype=np.float32)

    # read relevant paramaters on the snapshot
    head = readgadget.header(snapshot_fname)
    BoxSize = head.boxsize / 1e3  #Mpc/h
    Masses = head.massarr * 1e10  #Msun/h
    Nall = head.nall
    filenum = head.filenum
    Omega_m = head.omega_m
    Omega_l = head.omega_l
    redshift = head.redshift
    fformat = head.format
    Hubble = head.Hubble

    Ntotal = np.sum(Nall, dtype=np.int64)

    grid = 2048
    MAS = 'CIC'
    axis = 0
    do_RSD = True
    BoxSize = 3000  #Mpc/h
    ptype = 1  #dark matter

    # do a loop over all files
    num = 0.0

    for i in range(filenum):

        # find the name of the sub-snapshot
        if filenum == 1: snapshot = snapshot_fname
        else: snapshot = snapshot_fname + '.{0:d}'.format(i)
        if fformat == 'hdf5': snapshot = snapshot + '.hdf5'

        # find the local particles in the sub-snapshot
        head = readgadget.header(snapshot)
        npart = head.npart

        if verbose:
            print('Sub-snapshot {0:d}, DM particles = {1:d} \n'.format(
                i, npart[ptype]))
        if (DEBUG > 1 and i % 100 == 0):
            print(
                'Sub-snapshot {0:d}, DM particles = {1:d}, time = {2:%H:%M:%S} \n'
                .format(i, npart[ptype], datetime.datetime.now()))

        # read positions in Mpc/h
        pos = readgadget.read_field(snapshot, "POS ", ptype)

        # read velocities in km/s
        if do_RSD:
            vel = readgadget.read_field(snapshot, "VEL ", ptype)

        # write galaxy data for each redshift bin into its own file
        fname = lightcone_path + 'galaxy_lightcone.snap{0:02d}'.format(snap)

        # open output file
        if (i == 0):
            mode = 'w'  #open file in write mode for first file in snapshot
        else:
            mode = 'r+'  #thereafter open in append mode
        fo = h5py.File(fname, mode)

        for oct in range(8):
            #relocate origin to each corner of the simulation box for each octant
            orig_x = oct % 2 * BoxSize
            orig_y = oct // 2 % 2 * BoxSize
            orig_z = oct // 4 * BoxSize

            # translate particle positions to new origin for each octant
            x = pos[::, 0] - orig_x
            y = pos[::, 1] - orig_y
            z = pos[::, 2] - orig_z

            # calculate comoving radial distance, RA and Dec
            r = np.sqrt(x * x + y * y + z * z)
            dec = np.rad2deg(np.arcsin(z / r))
            ra = np.rad2deg(np.arctan2(y, x))

            # lookup redshift corresponding to this r
            zz = d2z(r)

            if do_RSD:
                # Calculate radial velocity
                vr = np.sqrt(vel[::, 0]**2 + vel[::, 1]**2 +
                             vel[::, 2]**2) * np.sign(vel[::, 0] + vel[::, 1] +
                                                      vel[::, 2])

                # Calculate RSD factor
                # Particle velocities u in internal velocity units (corresponds to km/sec if the default choice for the system of units is adopted).
                # Peculiar velocities v are obtained by multiplying u with sqrt(a), i.e. v = u * sqrt(a). So v = u / sqrt(1+z)
                f_RSD = np.sqrt(1 + zz) * vr / z2H(zz)
            else:
                f_RSD = np.zeros(len(r))

            #Check whether particle within shell max and min
            sn = 63 - snap
            if (sn == 0):
                F = [(r <= Dc_max[sn])]
            else:
                F = [(r > Dc_max[sn - 1]) & (r <= Dc_max[sn])]
            f = tuple(F)

            # create random luminosity value for each particle
            ngal = len(pos)
            L = P2L(np.random.random(ngal) * n)

            # create dataset. Use f to filter only those galaxies within snapshot redshift boundaries
            g = np.array(list(zip(r[f], ra[f], dec[f], zz[f], f_RSD[f], L[f])),
                         dtype=gal)

            ds_name = 'octant_{0:01d}'.format(oct)
            if (DEBUG > 3): print(ds_name)

            # if filenum = 0 then create new datasets for each octant and set dataset atributes
            if (i == 0):
                gals = fo.create_dataset(
                    ds_name, data=g, dtype=gal, maxshape=(None, ), chunks=True
                )  # set maxshape = None to make resizeable and chunks = True to enable chunking
                gals.attrs['max_z'] = z_max[sn]
                if (sn == 0):
                    gals.attrs['min_z'] = 0
                else:
                    gals.attrs['min_z'] = z_max[sn - 1]
                gals.attrs['snap'] = snap
                gals.attrs['octant'] = oct
                gals.attrs['alpha'] = alpha
                gals.attrs['phi_star'] = p_star
            elif (len(g) > 0):
                gals = fo[ds_name]
                gals.resize(gals.shape[0] + len(g), axis=0)
                gals[-len(g):] = g

                # end of processing for this octant

        fo.close()
        if (DEBUG > 2):
            print(fname, " completed, time:", datetime.datetime.now())
        sys.stdout.flush()

        # compute density field.
        MASL.MA(pos, density, BoxSize, MAS)
        num += pos.shape[0]

    # All files read for snapshot
    if (DEBUG > 0): print(fname, " completed, time:", datetime.datetime.now())

    # Write density field to file
    rho_avg = np.mean(density, dtype=np.float64)
    density /= rho_avg
    density -= 1.0
    density.tofile(density_fname)
    if verbose:
        print(
            'Density delta written to file for snap {0:d}, mean density = {1:04f}'
            .format(snap, rho_avg))

    # Calculate power spectrum from density
    threads = 16

    Pk = PKL.Pk(density, BoxSize, axis, MAS, threads)
    print('Pk calculated')

    #Save power spectra	components in hdf5 file
    fname = 'powerspec_{0:03d}.npy'.format(snap)

    # open output file
    fo = h5py.File(powerspec_path + fname, 'w')

    # create datasets
    atts = fo.create_dataset(
        "attribs", dtype="f")  # empty dataset for holding snapshot attributes
    atts.attrs['z'] = redshift
    atts.attrs['Omega_m'] = Omega_m
    atts.attrs['Omega_l'] = Omega_l

    # 1D P(k)
    dset = fo.create_dataset('k1D', data=Pk.k1D)
    dset = fo.create_dataset('Pk1D', data=Pk.Pk1D)
    dset = fo.create_dataset('Nmodes1D', data=Pk.Nmodes1D)

    # 2D P(k)
    dset = fo.create_dataset('kpar', data=Pk.kpar)
    dset = fo.create_dataset('kper', data=Pk.kper)
    dset = fo.create_dataset('Pk2D', data=Pk.Pk2D)
    dset = fo.create_dataset('Nmodes2D', data=Pk.Nmodes2D)

    # 3D P(k)
    dset = fo.create_dataset('k', data=Pk.k3D)
    dset = fo.create_dataset('Pk0', data=Pk.Pk[:, 0])
    dset = fo.create_dataset('Pk2', data=Pk.Pk[:, 1])
    dset = fo.create_dataset('Pk4', data=Pk.Pk[:, 2])
    dset = fo.create_dataset('Nmodes', data=Pk.Nmodes3D)

    fo.close()

    print('Power spectrum data written to file')
Exemplo n.º 27
0
def density_field_gadget(snapshot_fname,
                         ptypes,
                         dims,
                         MAS='CIC',
                         do_RSD=False,
                         axis=0,
                         verbose=True):

    start = time.time()
    if verbose: print('\nComputing density field of particles', ptypes)

    # declare the array hosting the density field
    density = np.zeros((dims, dims, dims), dtype=np.float32)

    # read relevant paramaters on the snapshot
    head = readgadget.header(snapshot_fname)
    BoxSize = head.boxsize / 1e3  #Mpc/h
    Masses = head.massarr * 1e10  #Msun/h
    Nall = head.nall
    Ntotal = np.sum(Nall, dtype=np.int64)
    filenum = head.filenum
    Omega_m = head.omega_m
    Omega_l = head.omega_l
    redshift = head.redshift
    fformat = head.format
    Hubble = head.Hubble

    if ptypes == [-1]: ptypes = [0, 1, 2, 3, 4, 5]
    if len(ptypes) == 1: single_component = True
    else: single_component = False

    # do a loop over all files
    num = 0.0
    for i in range(filenum):

        # find the name of the sub-snapshot
        if filenum == 1: snapshot = snapshot_fname
        else: snapshot = snapshot_fname + '.%d' % i
        if fformat == 'hdf5': snapshot = snapshot + '.hdf5'

        # find the local particles in the sub-snapshot
        head = readgadget.header(snapshot)
        npart = head.npart

        # do a loop over all particle types
        for ptype in ptypes:

            if npart[ptype] == 0: continue

            # read positions in Mpc/h
            pos = readgadget.read_field(snapshot, "POS ", ptype) / 1e3
            #pos = readsnap.read_block(snapshot,"POS ",parttype=ptype)/1e3

            # read velocities in km/s and move particles to redshift-space
            if do_RSD:
                vel = readgadget.read_field(snapshot, "VEL ", ptype)
                #vel = readsnap.read_block(snapshot,"VEL ",parttype=ptype)
                RSL.pos_redshift_space(pos, vel, BoxSize, Hubble, redshift,
                                       axis)
                del vel

            # compute density field. If multicomponent, read/find masses
            if Masses[ptype] != 0:
                if single_component:
                    MASL.MA(pos, density, BoxSize, MAS)
                    num += pos.shape[0]
                else:
                    mass = np.ones(npart[ptype],
                                   dtype=np.float32) * Masses[ptype]
                    MASL.MA(pos, density, BoxSize, MAS, W=mass)
                    num += np.sum(mass, dtype=np.float64)
            else:
                mass = readgadget.read_field(snapshot, "MASS", ptype) * 1e10
                #mass = readsnap.read_block(snapshot,"MASS",
                #        parttype=ptype)*1e10 #Msun/h
                MASL.MA(pos, density, BoxSize, MAS, W=mass)
                num += np.sum(mass, dtype=np.float64)

    if verbose:
        print('%.8e should be equal to\n%.8e'\
            %(np.sum(density, dtype=np.float64), num))
        print('Time taken = %.2f seconds' % (time.time() - start))

    return np.asarray(density)
Exemplo n.º 28
0
root = '/n/hernquistfs3/IllustrisTNG/Runs/L75n1820TNG/output/'
################################# INPUT ########################################
Mmin = 3.7e8 #3e9 #3.7e8
Mmax = 1e14
bins = 60

fout = 'ratio_g_75Mpc_1820_UVB'
#fout = 'ratio_g_75Mpc_1820_UVB'
################################################################################

for snapnum in [17,21,25,33,50,99]:

    # read header
    snapshot = root + 'snapdir_%03d/snap_%03d'%(snapnum,snapnum)
    header = readgadget.header(snapshot)
    BoxSize = header.boxsize/1e3 #Mpc/h
    redshift = header.redshift

    print 'L = %.1f Mpc/h'%BoxSize
    print 'z = %.1f'%redshift

    halos = groupcat.loadHalos(root, snapnum,
                               fields=['GroupMassType','GroupMass',
                                       'Group_R_TopHat200'])
    halo_mass = halos['GroupMassType'][:]*1e10    #Msun/h
    Mass      = halos['GroupMass'][:]*1e10        #Msun/h
    R         = halos['Group_R_TopHat200'][:]/1e3 #Mpc/h

    indexes = np.where(R>0.0)[0]
    halo_mass = halo_mass[indexes]