Пример #1
0
def PlotRhoVsTemp(base, num, arepo=0, BINS=200, Omegab=0.044, xmin=-3., xmax=7., ymin=2., ymax=8., format='.eps'):

        if arepo > 0:
                filename=base+'snap_arepo_'+str(num).zfill(3)
        else:
               filename=base+'snap_gadget_'+str(num).zfill(3)
        head=rs.snapshot_header(filename)
	mass=np.float64(rs.read_block(filename, "MASS", parttype=0, arepo=arepo))
        rho=np.float64(rs.read_block(filename, "RHO ", parttype=0, arepo=arepo))
        u=np.float64(rs.read_block(filename, "U   ", parttype=0, arepo=arepo))
        Nelec=np.float64(rs.read_block(filename, "NE  ", parttype=0, arepo=arepo))

        temp=co.GetTemp(u, Nelec, 5./3.)
        rho_b=Omegab*co.GetRhoCrit()
        rho/=rho_b

        print "z = ", head.redshift
        print "min/max of T [K]            = ", min(temp), max(temp)
        print "min/max of rho/rho_b        = ", min(rho), max(rho)

        rho=np.log10(rho)
        temp=np.log10(temp)

        if (xmin==0.) & (ymin==0.) & (xmax==0.) & (ymax==0.):
                print "range not specified -> adjusting min/max"
                xmin=min(rho)
                xmax=max(rho)
                ymin=min(temp)
                ymax=max(temp)

        Z,x,y=np.histogram2d(rho,temp, range=[[xmin,xmax],[ymin,ymax]], weights=mass, bins=BINS, normed=True)
        Z=np.log10(Z)
        
	Zmin=Z[Z>-np.inf].min()
	Zmax=Z.max()

	print "min/max of log10(histogram) = ", Zmin, Zmax

        fig = plt.figure(1, figsize=(10.0,10.0))

        ax = fig.add_subplot(1,1,1)

        im=ax.imshow(Z.T, vmin=Zmin, vmax=Zmax, origin='lower',interpolation='nearest', extent=[xmin, xmax, ymin, ymax], cmap=cm.get_cmap('jet'))
        ax.contour(Z.T,origin='lower',extent=[xmin, xmax, ymin, ymax], colors='black', vmin=Zmin, vmax=Zmax)
        x0, x1 = ax.get_xlim()
        y0, y1 = ax.get_ylim()
        ax.set_aspect((x1-x0)/(y1-y0))
        ax.set_xlabel(r'log $\rho/\rho_{b}$', fontsize=20)
        ax.set_ylabel('log T [K]', fontsize=20)

	plt.colorbar(im, shrink=0.5)
        if arepo > 0:
                plt.suptitle('Arepo   z='+str(round(head.redshift,2)))
                plt.savefig('Rho_vs_T_arepo_'+str(num).zfill(3)+format)
        else:
                plt.suptitle('Gadget   z='+str(round(head.redshift,2)))
                plt.savefig('Rho_vs_T_gagdet_'+str(num).zfill(3)+format)

 	fig.clf() 
Пример #2
0
def Pk_comp(snapshot_fname,ptype,dims,do_RSD,axis,cpus,folder_out):

    # read relevant paramaters on the header
    print 'Computing power spectrum...'
    head     = readsnap.snapshot_header(snapshot_fname)
    BoxSize  = head.boxsize/1e3 #Mpc/h
    Masses   = head.massarr*1e10 #Msun/h
    Nall     = head.nall;  Ntotal = np.sum(Nall,dtype=np.int64)
    Omega_m  = head.omega_m
    Omega_l  = head.omega_l
    redshift = head.redshift
    Hubble   = 100.0*np.sqrt(Omega_m*(1.0+redshift)**3+Omega_l)  #km/s/(Mpc/h)
    z        = '%.3f'%redshift
        
    # find output file name
    fout = folder_out+'/Pk_' + name_dict[str(ptype)]
    if do_RSD:  fout += ('_RS_axis=' + str(axis) + '_z=' + z + '.dat')
    else:       fout +=                           ('_z=' + z + '.dat')

    # read the positions of the particles
    pos = readsnap.read_block(snapshot_fname,"POS ",parttype=ptype)/1e3 #Mpc/h
    print '%.3f < X [Mpc/h] < %.3f'%(np.min(pos[:,0]),np.max(pos[:,0]))
    print '%.3f < Y [Mpc/h] < %.3f'%(np.min(pos[:,1]),np.max(pos[:,1]))
    print '%.3f < Z [Mpc/h] < %.3f\n'%(np.min(pos[:,2]),np.max(pos[:,2]))

    # read the velocities of the particles
    if do_RSD:
        print 'moving particles to redshift-space...'
        vel = readsnap.read_block(snapshot_fname,"VEL ",parttype=ptype) #km/s
        RSL.pos_redshift_space(pos,vel,BoxSize,Hubble,redshift,axis)
        del vel;  print 'done'

    # define delta array
    delta = np.zeros((dims,dims,dims),dtype=np.float32)

    # when dealing with all particles take into account their different masses
    if ptype==-1:
        if Nall[0]==0: #if not hydro
            M = np.zeros(Ntotal,dtype=np.float32) #define the mass array
            offset = 0
            for ptype in [0,1,2,3,4,5]:
                M[offset:offset+Nall[ptype]] = Masses[ptype]
                offset += Nall[ptype]
        else:
            M = readsnap.read_block(snapshot_fname,"MASS",parttype=-1)*1e10
        
        mean = np.sum(M,dtype=np.float64)/dims**3
        MASL.MA(pos,delta,BoxSize,'CIC',M); del pos,M

    else:  
        mean = len(pos)*1.0/dims**3
        MASL.MA(pos,delta,BoxSize,'CIC'); del pos

    # compute the P(k) and save results to file
    delta /= mean;  delta -= 1.0
    Pk = PKL.Pk(delta,BoxSize,axis=axis,MAS='CIC',threads=cpus);  del delta
    np.savetxt(fout,np.transpose([Pk.k3D, Pk.Pk[:,0], Pk.Pk[:,1], Pk.Pk[:,2],
                                  Pk.Nmodes3D]))
Пример #3
0
def read_field(snapshot, block, ptype):

    filename, fformat = fname_format(snapshot)
    head = header(filename)
    Masses = head.massarr * 1e10  #Msun/h
    Npart = head.npart  #number of particles in the subfile
    Nall = head.nall  #total number of particles in the snapshot

    if fformat == "binary":
        return readsnap.read_block(filename, block, parttype=ptype)
    else:
        prefix = 'PartType%d/' % ptype
        f = h5py.File(filename, 'r')
        if block == "POS ": suffix = "Coordinates"
        elif block == "MASS": suffix = "Masses"
        elif block == "ID  ": suffix = "ParticleIDs"
        elif block == "VEL ": suffix = "Velocities"
        else: raise Exception('block not implemented in readgadget!')

        if '%s%s' % (prefix, suffix) not in f:
            if Masses[ptype] != 0.0:
                array = np.ones(Npart[ptype], np.float32) * Masses[ptype]
            else:
                raise Exception('Problem reading the block %s' % block)
        else:
            array = f[prefix + suffix][:]
        f.close()

        if block == "VEL ": array *= np.sqrt(head.time)
        if block == "POS " and array.dtype == np.float64:
            array = array.astype(np.float32)

        return array
Пример #4
0
def visualize_mesh(basename, num, Lx, Ly, mi, ma, arepo=1, format=".eps"):	
	filename=basename+str(num).zfill(3)
        print filename
	rho = rs.read_block(filename,"RHO ", parttype=0, arepo=arepo)

        filename='voronoi_mesh_'+str(num).zfill(3)
        f = open(filename,'rb')

        ngas=np.fromfile(f,dtype=np.int32,count=1)
        nel=np.fromfile(f,dtype=np.int32,count=1)
        nedgepoints=np.fromfile(f,dtype=np.int32,count=1)
        nedges=np.fromfile(f,dtype=np.int32,count=ngas[0])
        nedges_offest=np.fromfile(f,dtype=np.int32,count=ngas[0])
        edgelist=np.fromfile(f,dtype=np.int32,count=nel[0])
        points=np.fromfile(f,dtype=np.dtype((np.float32,2)),count=nedgepoints[0])
        f.close()

        print "Number        = ", num
        print "Voronoi Cells = ", ngas[0]
        print "nel           = ", nel[0]
        print "Edge points   = ", nedgepoints[0]

        if (Lx>Ly):
	        fig = plt.figure(1, figsize=(Lx/Ly*2.5,2.5))
	else:
		fig = plt.figure(1, figsize=(10.0,10.0))
        ax = fig.add_subplot(1,1,1)

        print "min/max of rho = ", min(rho),max(rho)

        rho[rho > ma] = ma
        rho[rho < mi] = mi



        for i in range(0,ngas[0]):
		x = points[edgelist[nedges_offest[i]:nedges_offest[i]+nedges[i]],0]
		y = points[edgelist[nedges_offest[i]:nedges_offest[i]+nedges[i]],1]
                cmap=get_cmap("jet")
                co = cmap( (rho[i]-mi)/(ma-mi) )


		ax.fill(np.append(x,x[0]), np.append(y,y[0]), color=co, edgecolor='white')

	        if shape(np.nonzero((x < 0.0) | (x > Lx) | (y < 0.0) | (y > Ly )))[1] > 0:
		        for dx in range(-1,2):
			        for dy in range(-1,2):        
                      		 	ax.fill(np.append(x,x[0])+dx*Lx,np.append(y,y[0])+dy*Ly,color=co, edgecolor='white')
	                	
        
	ax.axis([0.0,Lx,0.0,Ly])
	ax.set_xlabel('x')
	ax.set_ylabel('y')
	ax.xaxis.set_major_locator(MultipleLocator(0.5))
	ax.yaxis.set_major_locator(MultipleLocator(0.5))

	plt.savefig("image_"+str(num).zfill(3)+format)
        clf()
Пример #5
0
def read_snapshot(snapshot, printOut=False):
    ptype = [1]  #[1](CDM), [2](neutrinos) or [1,2](CDM+neutrinos)
    header = rs.snapshot_header(snapshot)  # reads snapshot header

    coords = rs.read_block(
        snapshot, "POS "
    )  # reads mass for particles of type 5, using block names should work for both format 1 and 2 snapshots
    ids = rs.read_block(
        snapshot, "ID  "
    )  # reads mass for particles of type 5, using block names should work for both format 1 and 2 snapshots

    if printOut == True:
        print("coordinates for", coords.size, "particles read")
        print(coords[0:10])
        print("ids for", ids.size, "particles read")
        print(ids[0:10])

    return [ids, coords]
Пример #6
0
def read_block(snapshot, block, ptype, verbose=False):

    # find the format of the file and read header
    filename, fformat = fname_format(snapshot)
    head    = header(filename)    
    Nall    = head.nall
    filenum = head.filenum

    # find the total number of particles to read
    Ntotal = 0
    for i in ptype:
        Ntotal += Nall[i]

    # find the dtype of the block
    if   block=="POS ":  dtype=np.dtype((np.float32,3))
    elif block=="VEL ":  dtype=np.dtype((np.float32,3))
    elif block=="MASS":  dtype=np.float32
    elif block=="ID  ":  dtype=read_field(filename, block, ptype[0]).dtype
    else: raise Exception('block not implemented in readgadget!')

    # define the array containing the data
    array = np.zeros(Ntotal, dtype=dtype)


    # do a loop over the different particle types
    offset = 0
    for pt in ptype:

        # format I or format II Gadget files
        if fformat=="binary":
            array[offset:offset+Nall[pt]] = \
                readsnap.read_block(snapshot, block, pt, verbose=verbose)
            offset += Nall[pt]

        # single files (either binary or hdf5)
        elif filenum==1:
            array[offset:offset+Nall[pt]] = read_field(snapshot, block, pt)
            offset += Nall[pt]

        # multi-file hdf5 snapshot
        else:

            # do a loop over the different files
            for i in range(filenum):
                
                # find the name of the file to read
                filename = '%s.%d.hdf5'%(snapshot,i)

                # read number of particles in the file and read the data
                npart = header(filename).npart[pt]
                array[offset:offset+npart] = read_field(filename, block, pt)
                offset += npart   

    if offset!=Ntotal:  raise Exception('not all particles read!!!!')
            
    return array
Пример #7
0
def read_block(snapshot, block, ptype, verbose=False):

    # find the format of the file and read header
    filename, fformat = fname_format(snapshot)
    head    = header(filename)    
    Nall    = head.nall
    filenum = head.filenum

    # find the total number of particles to read
    Ntotal = 0
    for i in ptype:
        Ntotal += Nall[i]

    # find the dtype of the block
    if   block=="POS ":  dtype=np.dtype((np.float32,3))
    elif block=="VEL ":  dtype=np.dtype((np.float32,3))
    elif block=="MASS":  dtype=np.float32
    elif block=="ID  ":  dtype=read_field(filename, block, ptype[0]).dtype
    else: raise Exception('block not implemented in readgadget!')

    # define the array containing the data
    array = np.zeros(Ntotal, dtype=dtype)


    # do a loop over the different particle types
    offset = 0
    for pt in ptype:

        # format I or format II Gadget files
        if fformat=="binary":
            array[offset:offset+Nall[pt]] = \
                readsnap.read_block(snapshot, block, pt, verbose=verbose)
            offset += Nall[pt]

        # single files (either binary or hdf5)
        elif filenum==1:
            array[offset:offset+Nall[pt]] = read_field(snapshot, block, pt)
            offset += Nall[pt]

        # multi-file hdf5 snapshot
        else:

            # do a loop over the different files
            for i in xrange(filenum):
                
                # find the name of the file to read
                filename = '%s.%d.hdf5'%(snapshot,i)

                # read number of particles in the file and read the data
                npart = header(filename).npart[pt]
                array[offset:offset+npart] = read_field(filename, block, pt)
                offset += npart   

    if offset!=Ntotal:  raise Exception('not all particles read!!!!')
            
    return array
Пример #8
0
    def read(self, blocklist, parttype=-1):
        '''Reading method to load particle data from snapshots.
        my_snapshot.read(blocklist, parttype = [0,1])

        Arguments:
        blocklist    List of hdf5 block names to be read (see: 'my_snapshot.show_snapshot_contents()')
        parttype     List of parttypes for which the data should be read, optional, default '-1' (read all types)

        Usage Example: 

        my_snapshot.read(['Velocities', 'Coordinates'], parttype = [0,1])

        Will read coordinates and velocities for gas and dm from the snapshot.
        The data is accessible through 

        my_snapshot.data
        '''
        print("Reading " + str(blocklist) + "from snapshot")
        if type(blocklist) == str:
            blocklist = [blocklist]
        if not self.hdf5:  #use the old method
            for block in blocklist:
                if block == "POS ":
                    self.pos = rs.read_block(self.snapname,
                                             "POS ",
                                             parttype=parttype) / self.const.h
                    self.data["POS "] = self.pos
                elif block == "VEL ":
                    self.vel = rs.read_block(self.snapname,
                                             "VEL ",
                                             parttype=parttype)
                    self.data["VEL "] = self.vel
                elif block == "MASS":
                    self.mass = rs.read_block(
                        self.snapname, "MASS",
                        parttype=parttype) * 1e10 / self.const.h
                    self.data["MASS"] = self.mass
                else:
                    self.data[block] = rs.read_block(self.snapname,
                                                     block,
                                                     parttype=parttype)
        else:  #use the faster hdf5 reading routines
            self.read_hdf5(blocklist, parttype)
Пример #9
0
def read_field(snapshot, block, ptype):

    filename, fformat = fname_format(snapshot)
    head              = header(filename)

    if fformat=="binary":
        return readsnap.read_block(filename, block, parttype=ptype)
    else:
        prefix = 'PartType%d/'%ptype
        f = h5py.File(filename, 'r')
        if   block=="POS ":  suffix = "Coordinates"
        elif block=="MASS":  suffix = "Masses"
        elif block=="ID  ":  suffix = "ParticleIDs"
        elif block=="VEL ":  suffix = "Velocities"
        else: raise Exception('block not implemented in readgadget!')
        array = f[prefix+suffix][:];  f.close()

        if block=="VEL ":  array *= np.sqrt(head.time)
        if block=="POS " and array.dtype==np.float64:
            array = array.astype(np.float32)

        return array
Пример #10
0
def cell_mass_dist(basename, num, arepo=1, bins=100, format=".eps"):	
	filename=basename+str(num).zfill(3)

	mass = rs.read_block(filename,"MASS", parttype=0,arepo=arepo)

	meanmass=mass.mean()

	print "mean cell mass       = ", meanmass
        print "min/max of cell mass = ", mass.min(),mass.max()


        fig = plt.figure(1, figsize=(10.0,10.0))
        ax = fig.add_subplot(1,1,1)

	# the histogram of the data
	n, bins, patches = plt.hist(np.log10(mass/meanmass), bins, normed=1, facecolor='blue')

	ax.set_xlabel('log[m/<m>]')
	ax.set_ylabel('df / dlog[m/<m>]')

	plt.savefig("cell_mass_dist_"+str(num).zfill(3)+format)
        clf()
def read_field(snapshot, block, ptype):

    filename, fformat = fname_format(snapshot)
    head = header(filename)

    if fformat == "binary":
        return readsnap.read_block(filename, block, parttype=ptype)
    else:
        prefix = 'PartType%d/' % ptype
        f = h5py.File(filename, 'r')
        if block == "POS ": suffix = "Coordinates"
        elif block == "MASS": suffix = "Masses"
        elif block == "ID  ": suffix = "ParticleIDs"
        elif block == "VEL ": suffix = "Velocities"
        else: raise Exception('block not implemented in readgadget!')
        array = f[prefix + suffix][:]
        f.close()

        if block == "VEL ": array *= np.sqrt(head.time)
        if block == "POS " and array.dtype == np.float64:
            array = array.astype(np.float32)

        return array
Пример #12
0
def star_mass_dist(basename, num, arepo=1, bins=100, format=".eps"):	
	filename=basename+str(num).zfill(3)

        head=rs.snapshot_header(filename)
	mass = rs.read_block(filename,"MASS", parttype=4,arepo=arepo)
	print mass
	meanmass=mass.mean()

	print "mean star mass       = ", meanmass
        print "min/max of star mass = ", mass.min(),mass.max()


        fig = plt.figure(1, figsize=(10.0,10.0))
        ax = fig.add_subplot(1,1,1)

	# the histogram of the data
	n, bins, patches = plt.hist(np.log10(mass/meanmass), bins, normed=1, facecolor='blue')

	ax.set_xlabel('log[m/<m>]')
	ax.set_ylabel('df / dlog[m/<m>]')
	plt.suptitle('z='+str(round(head.redshift,2)))
	plt.savefig("star_mass_dist_"+str(num).zfill(3)+format)
        fig.clf()
# read snapshot head and obtain BoxSize, Omega_m and Omega_L
print '\nREADING SNAPSHOTS PROPERTIES'
head = readsnap.snapshot_header(snapshot_fname)
BoxSize = head.boxsize / 1e3  #Mpc/h
Nall = head.nall
Masses = head.massarr * 1e10  #Msun/h
Omega_m = head.omega_m
Omega_l = head.omega_l
redshift = head.redshift
Hubble = 100.0 * np.sqrt(Omega_m * (1.0 + redshift)**3 + Omega_l)  #h*km/s/Mpc
h = head.hubble

# read the density, electron fraction and internal energy
# rho units: h^2 Msun / Mpc^3
rho = readsnap.read_block(snapshot_fname, "RHO ", parttype=0) * 1e10 / 1e-9
ne = readsnap.read_block(snapshot_fname, "NE  ",
                         parttype=0)  #electron fraction
U = readsnap.read_block(snapshot_fname, "U   ", parttype=0)  #(km/s)^2

# compute the mean molecular weight
yhelium = (1.0 - 0.76) / (4.0 * 0.76)
mean_mol_weight = (1.0 + 4.0 * yhelium) / (1.0 + yhelium + ne)
del ne

# compute the temperature of the gas particles
T = U * (gamma - 1.0) * mH * mean_mol_weight / kB
del U, mean_mol_weight
T = T.astype(np.float64)

print '%.3e < T[k] < %.3e' % (np.min(T), np.max(T))
head = readsnap.snapshot_header(snapshot_fname)
BoxSize = head.boxsize / 1e3  #Mpc/h
Nall = head.nall
Masses = head.massarr * 1e10  #Msun/h
Omega_m = head.omega_m
Omega_l = head.omega_l
redshift = head.redshift
Hubble = 100.0 * np.sqrt(Omega_m * (1.0 + redshift)**3 + Omega_l)  #h*km/s/Mpc
h = head.hubble

#find the total number of particles in the simulation
Ntotal = np.sum(Nall, dtype=np.uint64)
print 'Total number of particles in the simulation:', Ntotal

#sort the pos array
ID_unsort = readsnap.read_block(snapshot_fname, "ID  ", parttype=-1) - 1
pos_unsort = readsnap.read_block(snapshot_fname, "POS ",
                                 parttype=-1) / 1e3  #Mpc/h
pos = np.empty((Ntotal, 3), dtype=np.float32)
pos[ID_unsort] = pos_unsort
del pos_unsort, ID_unsort

#sort the R array
ID_unsort = readsnap.read_block(snapshot_fname, "ID  ", parttype=0) - 1
R_unsort = readsnap.read_block(snapshot_fname, "HSML",
                               parttype=0) / 1e3  #Mpc/h
R = np.zeros(Ntotal, dtype=np.float32)
R[ID_unsort] = R_unsort
del R_unsort, ID_unsort

#find the IDs and HI masses of the particles to which HI has been assigned
Пример #15
0
Nall = head.nall
Masses = head.massarr * 1e10  #Msun/h
Omega_m = head.omega_m
Omega_l = head.omega_l
redshift = head.redshift
Hubble = 100.0 * np.sqrt(Omega_m * (1.0 + redshift)**3 + Omega_l)  #h*km/s/Mpc
h = head.hubble

#compute the values of Omega_CDM and Omega_B
Omega_cdm = Nall[1] * Masses[1] / BoxSize**3 / rho_crit
Omega_b = Omega_m - Omega_cdm
print '\nOmega_CDM = %.3f\nOmega_B   = %0.3f\nOmega_M   = %.3f\n'\
    %(Omega_cdm,Omega_b,Omega_m)

#read the positions of all the particles
pos = readsnap.read_block(snapshot_fname, "POS ", parttype=-1) / 1e3  #Mpc/h
print '%.3f < X [Mpc/h] < %.3f' % (np.min(pos[:, 0]), np.max(pos[:, 0]))
print '%.3f < Y [Mpc/h] < %.3f' % (np.min(pos[:, 1]), np.max(pos[:, 1]))
print '%.3f < Z [Mpc/h] < %.3f\n' % (np.min(pos[:, 2]), np.max(pos[:, 2]))

#read the velocities of all the particles
vel = readsnap.read_block(snapshot_fname, "VEL ", parttype=-1)  #kms

if do_RSD:
    print 'moving particles to redshift-space'
    RSD(pos[:, axis], vel[:, axis], Hubble, redshift)

#read the masses of all the particles
M = readsnap.read_block(snapshot_fname, "MASS", parttype=-1) * 1e10  #Msun/h
print '%.3e < M [Msun/h] < %.3e' % (np.min(M), np.max(M))
print 'Omega_m = %.3f\n' % (np.sum(M, dtype=np.float64) / rho_crit /
#obtain the positions of the random particles reading/creating a random catalogue
pos_r,RR_name = CFL.create_random_catalogue(random_points,Rmin,Rmax,bins,BoxSize)

#we set here the actions                                                      
DD_action = 'compute'
RR_action = 'read'      #if needed, the RR pairs are computed above           
DR_action = 'compute'

#Only the master will read the positions of the galaxies                      
pos_g = None

if myrank==0:

    #read positions, velocities and IDs of DM particles: sort the IDs array
    DM_pos = readsnap.read_block(snapshot_fname,"POS ",parttype=-1)  #kpc/h
    DM_vel = readsnap.read_block(snapshot_fname,"VEL ",parttype=-1)  #km/s
    #IDs should go from 0 to N-1, instead from 1 to N
    DM_ids = readsnap.read_block(snapshot_fname,"ID  ",parttype=-1)-1
    if np.min(DM_ids)!=0 or np.max(DM_ids)!=(len(DM_pos)-1):
        print 'Error!!!!'; print 'IDs should go from 0 to N-1'
    print len(DM_ids),np.min(DM_ids),np.max(DM_ids)
    sorted_ids = DM_ids.argsort(axis=0); del DM_ids
    #the particle whose ID is N is located in the position sorted_ids[N]
    #i.e. DM_ids[sorted_ids[N]]=N
    #the position of the particle whose ID is N would be:
    #DM_pos[sorted_ids[N]]

    #read the IDs of the particles belonging to the CDM halos
    #again the IDs should go from 0 to N-1
    halos_ID = readsubf.subf_ids(groups_fname,groups_number,0,0,
Пример #17
0
            snapnum = LM['snapnum'][ll]
            zl = LM['zl'][ll]

            # Only load new particle data if lens is at another snapshot
            if (previous_snapnum != snapnum):
                rks_file = '/cosma5/data/dp004/dc-beck3/rockstar/'+sim_phy[sim]+ \
                           sim_name[sim]+'/halos_' + str(snapnum)+'.dat'
                hdata = pd.read_csv(rks_file,
                                    sep='\s+',
                                    skiprows=np.arange(1, 16))
                # Load Particle Properties
                # 0 Gas, 1 DM, 4 Star[Star=+time & Wind=-time], 5 BH
                snap = snapfile % (snapnum, snapnum)
                #gas_pos = readsnap.read_block(snap, 'POS ', parttype=0)*scale
                #gas_vel = readsnap.read_block(snap, 'VEL ', parttype=0)
                star_pos = readsnap.read_block(snap, 'POS ',
                                               parttype=4) * scale
                star_age = readsnap.read_block(snap, 'AGE ', parttype=4)
                star_vel = readsnap.read_block(snap, 'VEL ', parttype=4)
                star_mass = readsnap.read_block(snap, 'MASS',
                                                parttype=4) * 1e10 / h
                star_pos = star_pos[star_age >= 0]
                star_vel = star_vel[star_age >= 0]
                star_mass = star_mass[star_age >= 0]
                del star_age
                #star_pos = np.vstack((star_pos, gas_pos))
                #del gas_pos
                #star_vel = np.vstack((star_vel, gas_vel))
                #del gas_vel
            previous_snapnum = snapnum

            # Load Halo Properties
Пример #18
0
head=readsnap.snapshot_header(snapshot_fname)
BoxSize=head.boxsize/1e3 #Mpc/h
Nall=head.nall
Masses=head.massarr*1e10 #Msun/h
Omega_m=head.omega_m
Omega_l=head.omega_l
redshift=head.redshift
Hubble=100.0*np.sqrt(Omega_m*(1.0+redshift)**3+Omega_l)  #h*km/s/Mpc
h=head.hubble

#find the total number of particles in the simulation
Ntotal=np.sum(Nall,dtype=np.uint64)
print 'Total number of particles in the simulation =',Ntotal

#sort the pos and vel array
ID_unsort=readsnap.read_block(snapshot_fname,"ID  ",parttype=-1)-1 #normalized
print 'sorting the POS array...'
pos_unsort=readsnap.read_block(snapshot_fname,"POS ",parttype=-1)/1e3 #Mpc/h
pos=np.empty((Ntotal,3),dtype=np.float32); pos[ID_unsort]=pos_unsort; del pos_unsort
if do_RSD:
    print 'sorting the VEL array...'
    vel_unsort=readsnap.read_block(snapshot_fname,"VEL ",parttype=-1) #km/s
    vel=np.empty((Ntotal,3),dtype=np.float32); vel[ID_unsort]=vel_unsort; del vel_unsort
del ID_unsort

#find the IDs and HI masses of the particles to which HI has been assigned
if method=='Dave':
    [IDs,M_HI]=HIL.Dave_HI_assignment(snapshot_fname,HI_frac,fac)
elif method=='method_1': 
    [IDs,M_HI]=HIL.method_1_HI_assignment(snapshot_fname,HI_frac,Omega_HI_ref)
elif method=='Barnes':
Пример #19
0
head     = readsnap.snapshot_header(snapshot_fname)
BoxSize  = head.boxsize/1e3 #Mpc/h
Nall     = head.nall
Masses   = head.massarr*1e10 #Msun/h
Omega_m  = head.omega_m
Omega_l  = head.omega_l
redshift = head.redshift
Hubble   = 100.0*np.sqrt(Omega_m*(1.0+redshift)**3+Omega_l)  #h*km/s/Mpc
h        = head.hubble

#find the total number of particles in the simulation
Ntotal = np.sum(Nall,dtype=np.uint64)
print 'Total number of particles in the simulation =',Ntotal

#sort the pos and vel array
ID_unsort=readsnap.read_block(snapshot_fname,"ID  ",parttype=-1)-1 #normalized
print 'sorting the POS array...'
pos_unsort=readsnap.read_block(snapshot_fname,"POS ",parttype=-1)/1e3 #Mpc/h
pos=np.empty((Ntotal,3),dtype=np.float32); pos[ID_unsort]=pos_unsort; 
del pos_unsort
if Pk_HI_redshift_space:
    print 'sorting the VEL array...'
    vel_unsort=readsnap.read_block(snapshot_fname,"VEL ",parttype=-1) #km/s
    vel=np.empty((Ntotal,3),dtype=np.float32); vel[ID_unsort]=vel_unsort; 
    del vel_unsort
del ID_unsort

#find the IDs and HI masses of the particles to which HI has been assigned
if method == 'Dave':
    [IDs,M_HI]=HIL.Dave_HI_assignment(snapshot_fname,HI_frac,fac)
elif method=='method_1': 
Пример #20
0
    depth = y_max - y_min
    offset1 = x_min
    offset2 = z_min
else:
    length1 = y_max - y_min
    length2 = z_max - z_min
    depth = x_max - x_min
    offset1 = y_min
    offset2 = z_min
if length1 != length2:
    print 'Plane has to be a square!!!'
    sys.exit()
BoxSize_slice = length1

#read positions and masses of the CDM particles
pos = readsnap.read_block(snapshot_fname, "POS ", parttype=1) / 1e3  #Mpc/h
mass = readsnap.read_block(snapshot_fname, "MASS", parttype=1) * 1e10  #Msun/h

#compute the mean mass in each cell of the slice
mass_density = np.sum(mass,
                      dtype=np.float64) * 1.0 / BoxSize**3  #mass/(Mpc/h)^3
V_cell = BoxSize_slice**2 * depth * 1.0 / dims**2  #slice cell volume in (Mpc/h)^3
mean_mass = mass_density * V_cell

#keep only with the particles in the slice
indexes = np.where((pos[:, 0] > x_min) & (pos[:, 0] < x_max)
                   & (pos[:, 1] > y_min) & (pos[:, 1] < y_max)
                   & (pos[:, 2] > z_min) & (pos[:, 2] < z_max))
pos = pos[indexes]
mass = mass[indexes]
print 'Coordinates of the particles in the slice:'
Пример #21
0
def HaloProfiles(basename, num, centre, r200, rmin=0.05, rmax=10.0, bins=50, arepo=1, gamma=5./3., format=".eps"):	
	filename=basename+str(num).zfill(3)

        head=rs.snapshot_header(filename)
	mass_gas = rs.read_block(filename,"MASS", parttype=0,arepo=arepo).astype('float64')	
	mass_DM = rs.read_block(filename,"MASS", parttype=1,arepo=arepo).astype('float64')		
	pos_gas = rs.read_block(filename,"POS ", parttype=0,arepo=arepo).astype('float64')	
	pos_DM = rs.read_block(filename,"POS ", parttype=1,arepo=arepo).astype('float64')		
	u = rs.read_block(filename,"U   ", parttype=0,arepo=arepo).astype('float64')	
	rho = rs.read_block(filename,"RHO ", parttype=0,arepo=arepo).astype('float64')		
	Nele = rs.read_block(filename,"NE  ", parttype=0,arepo=arepo).astype('float64')			
	
	print "Centre     = ", centre
	print "R200       = ", r200
	print "rmin/rmax  = ", rmin, rmax 
	
	x=pos_gas[:,0] - centre[0]
	y=pos_gas[:,1] - centre[1]
	z=pos_gas[:,2] - centre[2]		
	r_gas=np.sqrt(x**2. + y**2. + z**2.) / r200
	

	x=pos_DM[:,0] - centre[0]
	y=pos_DM[:,1] - centre[1]
	z=pos_DM[:,2] - centre[2]		
	r_DM=np.sqrt(x**2. + y**2. + z**2.) / r200
	
	rmin=np.log10(rmin)
	rmax=np.log10(rmax)
	
    	dlog10=(rmax-rmin)/bins
	rho_DM_bin=np.zeros(bins)		
	rho_gas_bin=np.zeros(bins)	
	temp_bin=np.zeros(bins)
	entropy_bin=np.zeros(bins)

	rbinm=10.**((np.arange(bins)+0.5)*dlog10 + rmin)

	for n in range(0,bins):
		r1=10.**((n+0.)*dlog10 + rmin)
		r2=10.**((n+1.)*dlog10 + rmin)		
		index_gas=((r_gas>r1) & (r_gas<r2)).nonzero()
		index_DM=((r_DM>r1) & (r_DM<r2)).nonzero()		

		totmass_gas=mass_gas[index_gas].sum()
		totmass_DM=mass_DM[index_DM].sum()		

		rho_gas_bin[n]=totmass_gas/(4.*np.pi/3.*(r2**3.-r1**3.)*r200**3.)
		rho_DM_bin[n]=totmass_DM/(4.*np.pi/3.*(r2**3.-r1**3.)*r200**3.)		

		if (totmass_gas > 0.):
			entropy_bin[n]=np.average(co.GetEntropy(u[index_gas],rho[index_gas],gamma), weights=mass_gas[index_gas]) 
			temp_bin[n]=np.average(co.GetTemp(u[index_gas],Nele[index_gas],gamma), weights=mass_gas[index_gas])
								
	
        fig = plt.figure(1, figsize=(10.0,10.0))
        ax = fig.add_subplot(2,2,1)
	ax.set_xlabel('$r/r_{200}$')
	ax.set_ylabel(r'$\rho_{DM}$ [$h^2$ M$_\odot$ Kpc$^{-3}$]')
	ax.loglog()
	ax.plot(rbinm, 10.0**10.0*rho_DM_bin)
	ax.set_xlim((10.0**rmin,10.0**rmax))	

        ax = fig.add_subplot(2,2,2)
	ax.set_xlabel('$r/r_{200}$')
	ax.set_ylabel(r'$\rho_{gas}$ [$h^2$ M$_\odot$ Kpc$^{-3}$]')
	ax.loglog()
	ax.plot(rbinm, 10.0**10.0*rho_gas_bin)
	ax.set_xlim((10.0**rmin,10.0**rmax))	

        ax = fig.add_subplot(2,2,3)
	ax.set_xlabel('$r/r_{200}$')
	ax.set_ylabel('$T_{gas}$ [K]')
	ax.loglog()
	ax.plot(rbinm, temp_bin)
	ax.set_xlim((10.0**rmin,10.0**rmax))	
	
        ax = fig.add_subplot(2,2,4)
	ax.set_xlabel('$r/r_{200}$')
	ax.set_ylabel('Entropy')
	ax.loglog()
	ax.plot(rbinm, entropy_bin)
	ax.set_xlim((10.0**rmin,10.0**rmax))	

	plt.savefig("HaloProfiles_"+str(num).zfill(3)+format)
Пример #22
0
# read snapshot head and obtain BoxSize, Omega_m and Omega_L
print '\nREADING SNAPSHOTS PROPERTIES'
head     = readsnap.snapshot_header(snapshot_fname)
BoxSize  = head.boxsize/1e3 #Mpc/h
Nall     = head.nall
Masses   = head.massarr*1e10 #Msun/h
Omega_m  = head.omega_m
Omega_l  = head.omega_l
redshift = head.redshift
Hubble   = 100.0*np.sqrt(Omega_m*(1.0+redshift)**3+Omega_l)  #h*km/s/Mpc
h        = head.hubble

# read the density, electron fraction and internal energy
# rho units: h^2 Msun / Mpc^3
rho = readsnap.read_block(snapshot_fname,"RHO ",parttype=0)*1e10/1e-9 
ne  = readsnap.read_block(snapshot_fname,"NE  ",parttype=0) #electron fraction
U   = readsnap.read_block(snapshot_fname,"U   ",parttype=0) #(km/s)^2

# compute the mean molecular weight
yhelium = (1.0-0.76)/(4.0*0.76) 
mean_mol_weight = (1.0+4.0*yhelium)/(1.0+yhelium+ne);  del ne

# compute the temperature of the gas particles
T = U*(gamma-1.0)*mH*mean_mol_weight/kB;  del U, mean_mol_weight
T = T.astype(np.float64)

print '%.3e < T[k] < %.3e'%(np.min(T),np.max(T))

mean_rho_b      = Omega_b*rho_crit
rho_overdensity = rho/mean_rho_b;  del rho
Пример #23
0
def density_field_2D(snapshot_fname, x_min, x_max, y_min, y_max, z_min, z_max,
                     dims, ptypes, plane, MAS, save_density_field):

    plane_dict = {'XY': [0, 1], 'XZ': [0, 2], 'YZ': [1, 2]}

    # read snapshot head and obtain BoxSize, filenum...
    head = readsnap.snapshot_header(snapshot_fname)
    BoxSize = head.boxsize / 1e3  #Mpc/h
    Nall = head.nall
    Masses = head.massarr * 1e10  #Msun/h
    filenum = head.filenum
    redshift = head.redshift

    # find the geometric values of the density field square
    len_x, off_x, len_y, off_y, depth, BoxSize_slice = \
            geometry(snapshot_fname, plane, x_min, x_max, y_min, y_max,
                    z_min, z_max)

    # compute the mean density in the box
    if len(ptypes) == 1 and Masses[ptypes[0]] != 0.0:
        single_specie = True
    else:
        single_specie = False

    # define the density array
    overdensity = np.zeros((dims, dims), dtype=np.float32)

    # do a loop over all subfiles in the snapshot
    total_mass, mass_slice = 0.0, 0.0
    renormalize_2D = False
    for i in xrange(filenum):

        # find the name of the subfile
        snap = snapshot_fname + '.%d' % i

        # in the last snapshot we renormalize the field
        if i == filenum - 1: renormalize_2D = True

        # do a loop over
        for ptype in ptypes:

            # read the positions of the particles in Mpc/h
            pos = readsnap.read_block(snap, "POS ", parttype=ptype) / 1e3

            if single_specie: total_mass += len(pos)

            # keep only with the particles in the slice
            indexes = np.where((pos[:, 0] > x_min) & (pos[:, 0] < x_max)
                               & (pos[:, 1] > y_min) & (pos[:, 1] < y_max)
                               & (pos[:, 2] > z_min) & (pos[:, 2] < z_max))
            pos = pos[indexes]

            # renormalize positions
            pos[:, 0] -= x_min
            pos[:, 1] -= y_min
            pos[:, 2] -= z_min

            # project particle positions into a 2D plane
            pos = pos[:, plane_dict[plane]]

            # read the masses of the particles in Msun/h
            if not (single_specie):
                mass = readsnap.read_block(snap, "MASS", parttype=ptype) * 1e10
                total_mass += np.sum(mass, dtype=np.float64)
                mass = mass[indexes]
                MASL.MA(pos,
                        overdensity,
                        BoxSize_slice,
                        MAS=MAS,
                        W=mass,
                        renormalize_2D=renormalize_2D)
            else:
                mass_slice += len(pos)
                MASL.MA(pos,
                        overdensity,
                        BoxSize_slice,
                        MAS=MAS,
                        W=None,
                        renormalize_2D=renormalize_2D)

    print 'Expected mass = %.7e' % mass_slice
    print 'Computed mass = %.7e' % np.sum(overdensity, dtype=np.float64)

    # compute mean density in the whole box
    mass_density = total_mass * 1.0 / BoxSize**3  #(Msun/h)/(Mpc/h)^3 or #/(Mpc/h)^3

    print 'mass density = %.5e' % mass_density

    # compute the volume of each cell in the density field slice
    V_cell = BoxSize_slice**2 * depth * 1.0 / dims**2  #(Mpc/h)^3

    # compute the mean mass in each cell of the slice
    mean_mass = mass_density * V_cell  #Msun/h or #

    # compute overdensities
    overdensity /= mean_mass
    print np.min(overdensity), '< rho/<rho> <', np.max(overdensity)

    # in our convention overdensity(x,y), while for matplotlib is
    # overdensity(y,x), so we need to transpose the field
    overdensity = np.transpose(overdensity)

    # save density field to file
    f_df = density_field_name(snapshot_fname, x_min, x_max, y_min, y_max,
                              z_min, z_max, dims, ptypes, plane, MAS)
    if save_density_field: np.save(f_df, overdensity)

    return overdensity
Пример #24
0
#########################################################################
# read snapshot head and obtain BoxSize, Omega_m and Omega_L
print '\nREADING SNAPSHOTS PROPERTIES'
head     = readsnap.snapshot_header(snapshot_fname)
BoxSize  = head.boxsize/1e3  #Mpc/h                      
Nall     = head.nall
Masses   = head.massarr*1e10 #Msun/h              
Omega_m  = head.omega_m
Omega_l  = head.omega_l
redshift = head.redshift
Hubble   = 100.0*np.sqrt(Omega_m*(1.0+redshift)**3+Omega_l)#km/s/(Mpc/h)
h        = head.hubble
z        = '%.3f'%redshift
#########################################################################

#########################################################################
pos = readsnap.read_block(snapshot_fname,"POS ",parttype=1)/1e3 #Mpc/h
vel = readsnap.read_block(snapshot_fname,"VEL ",parttype=1)     #km/s
#########################################################################

#########################################################################
# read positions and velocities of halos
FoF   = readfof.FoF_catalog(snapdir,snapnum,long_ids=False,
                            swap=False,SFR=False,read_IDs=True)
pos_h = FoF.GroupPos/1e3            #Mpc/h
mass  = FoF.GroupMass*1e10          #Msun/h
vel_h = FoF.GroupVel*(1.0+redshift) #km/s
indexes = np.where(mass>Mmin)[0]
pos_h = pos_h[indexes];  vel_h = vel_h[indexes];  del indexes
#########################################################################
head = readsnap.snapshot_header(snapshot_fname)
BoxSize = head.boxsize / 1e3  #Mpc/h
Nall = head.nall
Masses = head.massarr * 1e10  #Msun/h
Omega_m = head.omega_m
Omega_l = head.omega_l
redshift = head.redshift
Hubble = 100.0 * np.sqrt(Omega_m * (1.0 + redshift)**3 + Omega_l)  #h*km/s/Mpc
h = head.hubble

#find the total number of particles in the simulation
Ntotal = np.sum(Nall, dtype=np.uint64)
print 'Total number of particles in the simulation =', Ntotal

#sort the pos and vel array
ID_unsort = readsnap.read_block(snapshot_fname, "ID  ",
                                parttype=-1) - 1  #normalized
print 'sorting the POS array...'
pos_unsort = readsnap.read_block(snapshot_fname, "POS ",
                                 parttype=-1) / 1e3  #Mpc/h
pos = np.empty((Ntotal, 3), dtype=np.float32)
pos[ID_unsort] = pos_unsort
del pos_unsort
if Pk_HI_redshift_space:
    print 'sorting the VEL array...'
    vel_unsort = readsnap.read_block(snapshot_fname, "VEL ",
                                     parttype=-1)  #km/s
    vel = np.empty((Ntotal, 3), dtype=np.float32)
    vel[ID_unsort] = vel_unsort
    del vel_unsort
del ID_unsort
Пример #26
0
head=readsnap.snapshot_header(snapshot_fname)
BoxSize=head.boxsize/1e3 #Mpc/h
Nall=head.nall
Masses=head.massarr*1e10 #Msun/h
Omega_m=head.omega_m
Omega_l=head.omega_l
redshift=head.redshift
Hubble=100.0*np.sqrt(Omega_m*(1.0+redshift)**3+Omega_l)  #h*km/s/Mpc
h=head.hubble

#find the total number of particles in the simulation
Ntotal=np.sum(Nall,dtype=np.uint64)
print 'Total number of particles in the simulation:',Ntotal

#sort the pos array
ID_unsort=readsnap.read_block(snapshot_fname,"ID  ",parttype=-1)-1 #normalized
pos_unsort=readsnap.read_block(snapshot_fname,"POS ",parttype=-1)/1e3 #Mpc/h
pos=np.empty((Ntotal,3),dtype=np.float32); pos[ID_unsort]=pos_unsort
del pos_unsort, ID_unsort

#sort the R array (note that only gas particles have an associated R)
ID_unsort=readsnap.read_block(snapshot_fname,"ID  ",parttype=0)-1 #normalized
R_unsort=readsnap.read_block(snapshot_fname,"HSML",parttype=0)/1e3 #Mpc/h
R=np.zeros(Ntotal,dtype=np.float32); R[ID_unsort]=R_unsort
del R_unsort, ID_unsort

#find the IDs and HI masses of the particles to which HI has been assigned
if method=='Dave':
    [IDs,M_HI]=HIL.Dave_HI_assignment(snapshot_fname,HI_frac,fac)
elif method=='method_1': 
    [IDs,M_HI]=HIL.method_1_HI_assignment(snapshot_fname,HI_frac,Omega_HI_ref)
Пример #27
0
def Pk_Gadget(snapshot_fname,dims,particle_type,do_RSD,axis,cpus,
              folder_out=None):

    # find folder to place output files. Default is current directory
    if folder_out is None:  folder_out = os.getcwd()

    # for either one single species or all species use this routine
    if len(particle_type)==1:
        Pk_comp(snapshot_fname,particle_type[0],dims,do_RSD,
                axis,cpus,folder_out)
        return None

    # read snapshot head and obtain BoxSize, Omega_m and Omega_L
    print '\nREADING SNAPSHOTS PROPERTIES'
    head     = readsnap.snapshot_header(snapshot_fname)
    BoxSize  = head.boxsize/1e3  #Mpc/h
    Nall     = head.nall
    Masses   = head.massarr*1e10 #Msun/h
    Omega_m  = head.omega_m
    Omega_l  = head.omega_l
    redshift = head.redshift
    Hubble   = 100.0*np.sqrt(Omega_m*(1.0+redshift)**3+Omega_l)  #km/s/(Mpc/h)
    h        = head.hubble
    z        = '%.3f'%redshift
    dims3    = dims**3

    # compute the values of Omega_cdm, Omega_nu, Omega_gas and Omega_s
    Omega_c = Masses[1]*Nall[1]/BoxSize**3/rho_crit
    Omega_n = Masses[2]*Nall[2]/BoxSize**3/rho_crit
    Omega_g, Omega_s = 0.0, 0.0
    if Nall[0]>0:
        if Masses[0]>0:  
            Omega_g = Masses[0]*Nall[0]/BoxSize**3/rho_crit
            Omega_s = Masses[4]*Nall[4]/BoxSize**3/rho_crit
        else:    
            # mass in Msun/h
            mass = readsnap.read_block(snapshot_fname,"MASS",parttype=0)*1e10 
            Omega_g = np.sum(mass,dtype=np.float64)/BoxSize**3/rho_crit
            mass = readsnap.read_block(snapshot_fname,"MASS",parttype=4)*1e10
            Omega_s = np.sum(mass,dtype=np.float64)/BoxSize**3/rho_crit
            del mass

    # some verbose
    print 'Omega_gas    = ',Omega_g
    print 'Omega_cdm    = ',Omega_c
    print 'Omega_nu     = ',Omega_n
    print 'Omega_star   = ',Omega_s
    print 'Omega_m      = ',Omega_g + Omega_c + Omega_n + Omega_s
    print 'Omega_m snap = ',Omega_m

    # dictionary giving the value of Omega for each component
    Omega_dict = {0:Omega_g, 1:Omega_c, 2:Omega_n, 4:Omega_s}
    #####################################################################

    # define the array containing the deltas
    delta = [[],[],[],[]]  #array containing the gas, CDM, NU and stars deltas

    # dictionary among particle type and the index in the delta and Pk arrays
    # delta of stars (ptype=4) is delta[3] not delta[4]
    index_dict = {0:0, 1:1, 2:2, 4:3} 

    # define suffix here
    if do_RSD:  suffix = '_RS_axis=' + str(axis) + '_z=' + z + '.dat'
    else:       suffix =                           '_z=' + z + '.dat'
    #####################################################################

    # do a loop over all particle types and compute the deltas
    for ptype in particle_type:
    
        # read particle positions in #Mpc/h
        pos = readsnap.read_block(snapshot_fname,"POS ",parttype=ptype)/1e3 

        # move particle positions to redshift-space
        if do_RSD:
            vel = readsnap.read_block(snapshot_fname,"VEL ",parttype=ptype)#km/s
            RSL.pos_redshift_space(pos,vel,BoxSize,Hubble,redshift,axis)
            del vel

        # find the index of the particle type in the delta array
        index = index_dict[ptype]

        # compute mean number of particles per grid cell
        mean_number = len(pos)*1.0/dims3

        # compute the deltas
        delta[index] = np.zeros((dims,dims,dims),dtype=np.float32)
        MASL.MA(pos,delta[index],BoxSize,'CIC');  del pos
        delta[index] /= mean_number;  delta[index] -= 1.0
    #####################################################################

    #####################################################################
    # if there are two or more particles compute auto- and cross-power spectra
    for i,ptype1 in enumerate(particle_type):
        for ptype2 in particle_type[i+1:]:

            # find the indexes of the particle types
            index1 = index_dict[ptype1];  index2 = index_dict[ptype2]

            # choose the name of the output files
            fout1  = '/Pk_' + name_dict[str(ptype1)]             + suffix
            fout2  = '/Pk_' + name_dict[str(ptype2)]             + suffix
            fout12 = '/Pk_' + name_dict[str(ptype1)+str(ptype2)] + suffix
            fout1  = folder_out + fout1
            fout2  = folder_out + fout2
            fout12 = folder_out + fout12

            # some verbose
            print '\nComputing the auto- and cross-power spectra of types: '\
                ,ptype1,'-',ptype2
            print 'saving results in:';  print fout1,'\n',fout2,'\n',fout12

            # This routine computes the auto- and cross-power spectra
            data = PKL.XPk([delta[index1],delta[index2]],BoxSize,axis=axis,
                           MAS=['CIC','CIC'],threads=cpus)
                                                        
            k = data.k3D;   Nmodes = data.Nmodes3D

            # save power spectra results in the output files
            np.savetxt(fout12,np.transpose([k,
                                            data.XPk[:,0,0],
                                            data.XPk[:,1,0],
                                            data.XPk[:,2,0],
                                            Nmodes]))
            np.savetxt(fout1, np.transpose([k,
                                            data.Pk[:,0,0],
                                            data.Pk[:,1,0],
                                            data.Pk[:,2,0],
                                            Nmodes]))
            np.savetxt(fout2, np.transpose([k,
                                            data.Pk[:,0,1],
                                            data.Pk[:,1,1],
                                            data.Pk[:,2,1],
                                            Nmodes]))
    #####################################################################

    #####################################################################
    # compute the power spectrum of the sum of all components
    print '\ncomputing P(k) of all components'

    # define delta of all components
    delta_tot = np.zeros((dims,dims,dims),dtype=np.float32)

    Omega_tot = 0.0;  fout = folder_out + '/Pk_'
    for ptype in particle_type:
        index = index_dict[ptype]
        delta_tot += (Omega_dict[ptype]*delta[index])
        Omega_tot += Omega_dict[ptype]
        fout += name_dict[str(ptype)] + '+'

    delta_tot /= Omega_tot;  del delta;  fout = fout[:-1] #avoid '+' in the end
    
    # compute power spectrum
    data = PKL.Pk(delta_tot,BoxSize,axis=axis,MAS='CIC',
                  threads=cpus);  del delta_tot

    # write P(k) to output file
    np.savetxt(fout+suffix, np.transpose([data.k3D,
                                          data.Pk[:,0],
                                          data.Pk[:,1],
                                          data.Pk[:,2],
                                          data.Nmodes3D]))
Пример #28
0
Omega_l = head.omega_l
redshift = head.redshift
Hubble = 100.0 * np.sqrt(Omega_m * (1.0 + redshift)**3 + Omega_l)  #h*km/s/Mpc
h = head.hubble

#split the particle IDs among CDM, gas and stars and also among enviroment
indexes = HIL.particle_indexes(snapshot_fname, groups_fname, groups_number,
                               long_ids_flag, SFR_flag, mass_interval,
                               min_mass, max_mass)

#find the total number of particles in the simulation
Ntotal = np.sum(Nall, dtype=np.uint64)
print 'Total number of particles in the simulation =', Ntotal

#sort the pos array
ID_unsort = readsnap.read_block(snapshot_fname, "ID  ",
                                parttype=-1) - 1  #normalized
pos_unsort = readsnap.read_block(snapshot_fname, "POS ",
                                 parttype=-1) / 1e3  #Mpc/h
pos = np.empty((Ntotal, 3), dtype=np.float32)
pos[ID_unsort] = pos_unsort

#sort the SPH radii array (only required if SPH_gas used)
if Pk_method == 'SPH':
    ID_unsort = readsnap.read_block(snapshot_fname, "ID  ",
                                    parttype=0) - 1  #normalized
    radii_unsort = readsnap.read_block(snapshot_fname, "HSML",
                                       parttype=0) / 1e3  #Mpc/h
    radii = np.empty(Ntotal, dtype=np.float32)
    radii[ID_unsort] = radii_unsort
    del radii_unsort, ID_unsort
#################################### INPUT ####################################
snapshot_fname = '../ics'
bins = 100  #number of bins for the distribution

# parameters for the FD distribution
Mnu = 0.6  #eV
h_planck = 6.582e-16  #eV*s
kB = 8.617e-5  #eV/K
c = 3e5  #km/s
Tnu = 1.95  #K
###############################################################################

########## fraction from simulation ###########
# read snapshot redshift and neutrino velocities
z = readsnap.snapshot_header(snapshot_fname).redshift
vel = readsnap.read_block(snapshot_fname, "VEL ", parttype=2)  #km/s

# compute velocity modulus
V = np.sqrt(vel[:, 0]**2 + vel[:, 1]**2 + vel[:, 2]**2)
del vel

# define the velocity intervals, their mean value and their widths
vel_min, vel_max = np.min(V), np.max(V)
if vel_min == 0.0: vel_min = 1e-3
vel_intervals = np.logspace(np.log10(vel_min), np.log10(vel_max), bins + 1)
dV = vel_intervals[1:] - vel_intervals[:-1]  #km/s
V_mean = 0.5 * (vel_intervals[1:] + vel_intervals[:-1])  #km/s

# compute the franction of neutrinos within each velocity bin
hist = (np.histogram(V, bins=vel_intervals)[0]) * 1.0 / len(V)
###############################################
Пример #30
0
def compute_Pk(snapshot_fname,dims,do_RSD,axis,hydro):

    # read snapshot head and obtain BoxSize, Omega_m and Omega_L
    print '\nREADING SNAPSHOTS PROPERTIES'
    head     = readsnap.snapshot_header(snapshot_fname)
    BoxSize  = head.boxsize/1e3 #Mpc/h
    Nall     = head.nall
    Masses   = head.massarr*1e10 #Msun/h
    Omega_m  = head.omega_m
    Omega_l  = head.omega_l
    redshift = head.redshift
    Hubble   = 100.0*np.sqrt(Omega_m*(1.0+redshift)**3+Omega_l)  #h*km/s/Mpc
    h        = head.hubble

    z = '%.3f'%redshift
    f_out = 'Pk_m_z='+z+'.dat'

    # compute the values of Omega_CDM and Omega_B
    Omega_cdm = Nall[1]*Masses[1]/BoxSize**3/rho_crit
    Omega_nu  = Nall[2]*Masses[2]/BoxSize**3/rho_crit
    Omega_b   = Omega_m-Omega_cdm-Omega_nu
    print '\nOmega_CDM = %.4f\nOmega_B   = %0.4f\nOmega_NU  = %.4f'\
        %(Omega_cdm,Omega_b,Omega_nu)
    print 'Omega_M   = %.4f'%(Omega_m)

    # read the positions of all the particles
    pos = readsnap.read_block(snapshot_fname,"POS ",parttype=-1)/1e3 #Mpc/h
    print '%.3f < X [Mpc/h] < %.3f'%(np.min(pos[:,0]),np.max(pos[:,0]))
    print '%.3f < Y [Mpc/h] < %.3f'%(np.min(pos[:,1]),np.max(pos[:,1]))
    print '%.3f < Z [Mpc/h] < %.3f\n'%(np.min(pos[:,2]),np.max(pos[:,2]))

    if do_RSD:
        print 'moving particles to redshift-space'
        # read the velocities of all the particles
        vel = readsnap.read_block(snapshot_fname,"VEL ",parttype=-1) #km/s
        RSL.pos_redshift_space(pos,vel,BoxSize,Hubble,redshift,axis);  del vel

    # read the masses of all the particles
    if not(hydro):
        Ntotal = np.sum(Nall,dtype=np.int64)   #compute the number of particles
        M = np.zeros(Ntotal,dtype=np.float32)  #define the mass array
        offset = 0
        for ptype in [0,1,2,3,4,5]:
            M[offset:offset+Nall[ptype]] = Masses[ptype];  offset += Nall[ptype]
    else:
        M = readsnap.read_block(snapshot_fname,"MASS",parttype=-1)*1e10 #Msun/h
    print '%.3e < M [Msun/h] < %.3e'%(np.min(M),np.max(M))
    print 'Omega_M = %.4f\n'%(np.sum(M,dtype=np.float64)/rho_crit/BoxSize**3)

    # compute the mean mass per grid cell
    mean_M = np.sum(M,dtype=np.float64)/dims**3

    # compute the mass within each grid cell
    delta = np.zeros(dims**3,dtype=np.float32)
    CIC.CIC_serial(pos,dims,BoxSize,delta,M); del pos
    print '%.6e should be equal to \n%.6e\n'\
        %(np.sum(M,dtype=np.float64),np.sum(delta,dtype=np.float64)); del M

    # compute the density constrast within each grid cell
    delta/=mean_M; delta-=1.0
    print '%.3e < delta < %.3e\n'%(np.min(delta),np.max(delta))

    # compute the P(k)
    Pk = PSL.power_spectrum_given_delta(delta,dims,BoxSize)

    # write P(k) to output file
    np.savetxt(f_out,np.transpose([Pk[0],Pk[1]]))
Пример #31
0
Masses=head.massarr*1e10 #Msun/h
Omega_m=head.omega_m
Omega_l=head.omega_l
redshift=head.redshift
Hubble=100.0*np.sqrt(Omega_m*(1.0+redshift)**3+Omega_l)  #h*km/s/Mpc
h=head.hubble

#find the total number of particles in the simulation
Ntotal=np.sum(Nall,dtype=np.uint64)
print 'Total number of particles in the simulation =',Ntotal



################################## HI ###########################################
#sort the pos and vel array
ID_unsort=readsnap.read_block(snapshot_fname,"ID  ",parttype=-1)-1 #normalized
print 'sorting the POS array...'
pos_unsort=readsnap.read_block(snapshot_fname,"POS ",parttype=-1)/1e3 #Mpc/h
pos=np.empty((Ntotal,3),dtype=np.float32); pos[ID_unsort]=pos_unsort
del pos_unsort,ID_unsort

#find the IDs and HI masses of the particles to which HI has been assigned
if method=='Dave':
    [IDs,M_HI]=HIL.Dave_HI_assignment(snapshot_fname,HI_frac,fac)
elif method=='method_1': 
    [IDs,M_HI]=HIL.method_1_HI_assignment(snapshot_fname,HI_frac,Omega_HI_ref)
elif method=='Barnes':
    [IDs,M_HI]=HIL.Barnes_Haehnelt(snapshot_fname,groups_fname,
                                   groups_number,long_ids_flag,SFR_flag)
elif method=='Paco':
    [IDs,M_HI]=HIL.Paco_HI_assignment(snapshot_fname,groups_fname,
def hod(snapshot_fname,
        groups_fname,
        groups_number,
        min_mass,
        max_mass,
        fiducial_density,
        M1,
        alpha,
        mass_criteria,
        verbose=False):

    thres = 1e-3  #controls the max relative error to accept a galaxy density

    #read the header and obtain the boxsize
    head = readsnap.snapshot_header(snapshot_fname)
    BoxSize = head.boxsize  #BoxSize in kpc/h

    #read positions and IDs of DM particles: sort the IDs array
    DM_pos = readsnap.read_block(snapshot_fname, "POS ", parttype=-1)  #kpc/h
    DM_ids = readsnap.read_block(snapshot_fname, "ID  ", parttype=-1) - 1
    sorted_ids = DM_ids.argsort(axis=0)
    #the particle whose ID is N is located in the position sorted_ids[N]
    #i.e. DM_ids[sorted_ids[N]]=N
    #the position of the particle whose ID is N would be:
    #DM_pos[sorted_ids[N]]

    #read the IDs of the particles belonging to the CDM halos
    halos_ID = readsubf.subf_ids(groups_fname,
                                 groups_number,
                                 0,
                                 0,
                                 long_ids=True,
                                 read_all=True)
    IDs = halos_ID.SubIDs - 1
    del halos_ID

    #read CDM halos information
    halos = readsubf.subfind_catalog(groups_fname,
                                     groups_number,
                                     group_veldisp=True,
                                     masstab=True,
                                     long_ids=True,
                                     swap=False)
    if mass_criteria == 't200':
        halos_mass = halos.group_m_tophat200 * 1e10  #masses in Msun/h
        halos_radius = halos.group_r_tophat200  #radius in kpc/h
    elif mass_criteria == 'm200':
        halos_mass = halos.group_m_mean200 * 1e10  #masses in Msun/h
        halos_radius = halos.group_r_mean200  #radius in kpc/h
    elif mass_criteria == 'c200':
        halos_mass = halos.group_m_crit200 * 1e10  #masses in Msun/h
        halos_radius = halos.group_r_crit200  #radius in kpc/h
    else:
        print('bad mass_criteria')
        sys.exit()
    halos_pos = halos.group_pos  #positions in kpc/h
    halos_len = halos.group_len
    halos_offset = halos.group_offset
    halos_indexes = np.where((halos_mass > min_mass)
                             & (halos_mass < max_mass))[0]
    del halos

    if verbose:
        print(' ')
        print('total halos found=', halos_pos.shape[0])
        print('halos number density=', len(halos_pos) / (BoxSize * 1e-3)**3)

    #keep only the halos in the given mass range
    halo_mass = halos_mass[halos_indexes]
    halo_pos = halos_pos[halos_indexes]
    halo_radius = halos_radius[halos_indexes]
    halo_len = halos_len[halos_indexes]
    halo_offset = halos_offset[halos_indexes]
    del halos_indexes

    ##### COMPUTE Mmin GIVEN M1 & alpha #####
    i = 0
    max_iterations = 20  #maximum number of iterations
    Mmin1 = min_mass
    Mmin2 = max_mass
    while (i < max_iterations):
        Mmin = 0.5 * (Mmin1 + Mmin2)  #estimation of the HOD parameter Mmin

        total_galaxies = 0
        inside = np.where(halo_mass > Mmin)[0]  #take all galaxies with M>Mmin
        mass = halo_mass[
            inside]  #only halos with M>Mmin have central/satellites

        total_galaxies = mass.shape[0] + np.sum((mass / M1)**alpha)
        mean_density = total_galaxies * 1.0 / (BoxSize *
                                               1e-3)**3  #galaxies/(Mpc/h)^3

        if (np.absolute(
            (mean_density - fiducial_density) / fiducial_density) < thres):
            i = max_iterations
        elif (mean_density > fiducial_density):
            Mmin1 = Mmin
        else:
            Mmin2 = Mmin
        i += 1

    if verbose:
        print(' ')
        print('Mmin=', Mmin)
        print('average number of galaxies=', total_galaxies)
        print('average galaxy density=', mean_density)
    #########################################

    #just halos with M>Mmin; the rest do not host central/satellite galaxies
    inside = np.where(halo_mass > Mmin)[0]
    halo_mass = halo_mass[inside]
    halo_pos = halo_pos[inside]
    halo_radius = halo_radius[inside]
    halo_len = halo_len[inside]
    halo_offset = halo_offset[inside]
    del inside

    #compute number of satellites in each halo using the Poisson distribution
    N_mean_sat = (halo_mass / M1)**alpha  #mean number of satellites
    N_sat = np.empty(len(N_mean_sat), dtype=np.int32)
    for i in range(len(N_sat)):
        N_sat[i] = np.random.poisson(N_mean_sat[i])
    N_tot = np.sum(N_sat) + len(
        halo_mass)  #total number of galaxies in the catalogue

    if verbose:
        print(' ')
        print(np.min(halo_mass), '< M_halo <', np.max(halo_mass))
        print('total number of galaxies=', N_tot)
        print('galaxy number density=', N_tot / (BoxSize * 1e-3)**3)

    #put satellites following the distribution of dark matter in groups
    if verbose:
        print(' ')
        print('Creating mock catalogue ...', )

    pos_galaxies = np.empty((N_tot, 3), dtype=np.float32)
    #index: variable that go through halos (may be several galaxies in a halo)
    #i: variable that go through all (central/satellites) galaxies
    #count: find number of galaxies that lie beyond its host halo virial radius
    index = 0
    count = 0
    i = 0
    while (index < halo_mass.shape[0]):

        position = halo_pos[index]  #position of the DM halo
        radius = halo_radius[index]  #radius of the DM halo

        #save the position of the central galaxy
        pos_galaxies[i] = position
        i += 1

        #if halo contains satellites, save their positions
        Nsat = N_sat[index]
        if Nsat > 0:
            offset = halo_offset[index]
            length = halo_len[index]
            idss = sorted_ids[IDs[offset:offset + length]]

            #compute the distances to the halo center keeping those with R<Rvir
            pos = DM_pos[
                idss]  #positions of the particles belonging to the halo
            posc = pos - position

            #this is to populate correctly halos closer to box boundaries
            if np.any((position + radius > BoxSize) +
                      (position - radius < 0.0)):

                inside = np.where(posc[:, 0] > BoxSize / 2.0)[0]
                posc[inside, 0] -= BoxSize
                inside = np.where(posc[:, 0] < -BoxSize / 2.0)[0]
                posc[inside, 0] += BoxSize

                inside = np.where(posc[:, 1] > BoxSize / 2.0)[0]
                posc[inside, 1] -= BoxSize
                inside = np.where(posc[:, 1] < -BoxSize / 2.0)[0]
                posc[inside, 1] += BoxSize

                inside = np.where(posc[:, 2] > BoxSize / 2.0)[0]
                posc[inside, 2] -= BoxSize
                inside = np.where(posc[:, 2] < -BoxSize / 2.0)[0]
                posc[inside, 2] += BoxSize

            radii = np.sqrt(posc[:, 0]**2 + posc[:, 1]**2 + posc[:, 2]**2)
            inside = np.where(radii < radius)[0]
            selected = random.sample(inside, Nsat)
            pos = pos[selected]

            #aditional, not esential check. Can be comment out
            posc = pos - position
            if np.any((posc > BoxSize / 2.0) + (posc < -BoxSize / 2.0)):
                inside = np.where(posc[:, 0] > BoxSize / 2.0)[0]
                posc[inside, 0] -= BoxSize
                inside = np.where(posc[:, 0] < -BoxSize / 2.0)[0]
                posc[inside, 0] += BoxSize

                inside = np.where(posc[:, 1] > BoxSize / 2.0)[0]
                posc[inside, 1] -= BoxSize
                inside = np.where(posc[:, 1] < -BoxSize / 2.0)[0]
                posc[inside, 1] += BoxSize

                inside = np.where(posc[:, 2] > BoxSize / 2.0)[0]
                posc[inside, 2] -= BoxSize
                inside = np.where(posc[:, 2] < -BoxSize / 2.0)[0]
                posc[inside, 2] += BoxSize
            r_max = np.max(
                np.sqrt(posc[:, 0]**2 + posc[:, 1]**2 + posc[:, 2]**2))
            if r_max > radius:  #check no particles beyond Rv selected
                print(position)
                print(radius)
                print(pos)
                count += 1

            for j in range(Nsat):
                pos_galaxies[i] = pos[j]
                i += 1
        index += 1

    if verbose:
        print('done')
    #some final checks
    if i != N_tot:
        print('some galaxies missing:')
        print('register', i, 'galaxies out of', N_tot)
    if count > 0:
        print('error:', count, 'particles beyond the virial radius selected')

    return pos_galaxies
###############################################################################

#read snapshot head and obtain BoxSize, Omega_m and Omega_L
print '\nREADING SNAPSHOTS PROPERTIES'
head = readsnap.snapshot_header(snapshot_fname)
BoxSize = head.boxsize / 1e3  #Mpc/h
Nall = head.nall
Masses = head.massarr * 1e10  #Msun/h
Omega_m = head.omega_m
Omega_l = head.omega_l
redshift = head.redshift
Hubble = 100.0 * np.sqrt(Omega_m * (1.0 + redshift)**3 + Omega_l)  #h*km/s/Mpc
h = head.hubble

#read the positions and SPH smoothing lengths of the gas particles
pos = readsnap.read_block(snapshot_fname, "POS ", parttype=0) / 1e3  #Mpc/h
radii = readsnap.read_block(snapshot_fname, "HSML", parttype=0) / 1e3  #Mpc/h
"""
print len(np.where(radii<(BoxSize/1024.0))[0])*1.0/len(radii)
bins_histo=np.logspace(np.log10(np.min(radii)),np.log10(np.max(radii)),101)
middle_bin=0.5*(bins_histo[1:]+bins_histo[:-1])
H=np.histogram(radii,bins=bins_histo)[0]*1.0/len(radii)
print np.sum(H,dtype=np.float64)
f=open('borrar.dat','w')
for i in range(100):
    f.write(str(middle_bin[i])+' '+str(H[i])+'\n')
f.close()
"""

#compute the density in the grid cells
delta = np.zeros(dims**3, dtype=np.float32)
print '\nREADING SNAPSHOTS PROPERTIES'
head     = readsnap.snapshot_header(snapshot_fname)
BoxSize  = head.boxsize/1e3  #Mpc/h
Nall     = head.nall
Masses   = head.massarr*1e10 #Msun/h
Omega_m  = head.omega_m
Omega_l  = head.omega_l
redshift = head.redshift
Hubble   = 100.0*np.sqrt(Omega_m*(1.0+redshift)**3+Omega_l)  #km/s/(Mpc/h)
h        = head.hubble

#define the array containing the deltas
delta = np.zeros(dims3,dtype=np.float32)

#read the positions and masses of the CDM particles
pos  = readsnap.read_block(snapshot_fname,"POS ",parttype=1)/1e3  #Mpc/h
mass = readsnap.read_block(snapshot_fname,"MASS",parttype=1)*1e10 #Msun/h
if do_RSD:
    vel = readsnap.read_block(snapshot_fname,"VEL ",parttype=1) #km/s
print 'Omega_CDM = %.4f'%(np.sum(mass,dtype=np.float64)/BoxSize**3/rho_crit)

#if there are neutrinos read their positions and masses
if Nall[2]>0:
    pos_nu  = readsnap.read_block(snapshot_fname,"POS ",parttype=2)/1e3  #Mpc/h
    mass_nu = readsnap.read_block(snapshot_fname,"MASS",parttype=2)*1e10 #Msun/h
    print 'Omega_NU  = %.4f'\
        %(np.sum(mass_nu,dtype=np.float64)/BoxSize**3/rho_crit)
    pos  = np.vstack([pos,pos_nu]);    del pos_nu
    mass = np.hstack([mass,mass_nu]);  del mass_nu
    if do_RSD:
        vel_nu = readsnap.read_block(snapshot_fname,"VEL ",parttype=2) #km/s
z_max = nu0 / nu_max - 1.0
print 'Channel redshift interval: %1.4f < z < %1.4f' % (z_min, z_max)
print 'Channel frequency [%1.3f - %1.3f] MHz' % (nu_max, nu_min)
delta_r = cl.comoving_distance(z_max, Omega_m, Omega_l) - r
print 'delta_r channel = %2.2f Mpc/h' % delta_r
grid_res = (ang_res / 60.0) * (pi / 180.0) * r

#grid resolution
print '\nSpatial resolution = %2.3f Mpc/h' % grid_res

#find the total number of particles in the simulation
Ntotal = np.sum(Nall, dtype=np.uint64)
print '\nTotal number of particles in the simulation:', Ntotal

#sort the pos and vel array
ID_unsort = readsnap.read_block(snapshot_fname, "ID  ",
                                parttype=-1) - 1  #normalized
pos_unsort = readsnap.read_block(snapshot_fname, "POS ",
                                 parttype=-1) / 1e3  #Mpc/h
vel_unsort = readsnap.read_block(snapshot_fname, "VEL ", parttype=-1)  #km/s
pos = np.empty((Ntotal, 3), dtype=np.float32)
pos[ID_unsort] = pos_unsort
vel = np.empty((Ntotal, 3), dtype=np.float32)
vel[ID_unsort] = vel_unsort
del pos_unsort, vel_unsort, ID_unsort

#find the IDs and HI masses of the particles to which HI has been assigned
if method == 'Dave':
    [IDs, M_HI] = HIL.Dave_HI_assignment(snapshot_fname, HI_frac, fac)
elif method == 'method_1':
    [IDs, M_HI] = HIL.method_1_HI_assignment(snapshot_fname, HI_frac,
                                             Omega_HI_ref)
BoxSize = head.boxsize / 1e3  #Mpc/h
Nall = head.nall
Masses = head.massarr * 1e10  #Msun/h
Omega_m = head.omega_m
Omega_l = head.omega_l
redshift = head.redshift
Hubble = 100.0 * np.sqrt(Omega_m * (1.0 + redshift)**3 + Omega_l)  #h*km/s/Mpc
h = head.hubble

#find the total number of particles in the simulation
Ntotal = np.sum(Nall, dtype=np.uint64)
print 'Total number of particles in the simulation =', Ntotal

################################## HI ###########################################
#sort the pos and vel array
ID_unsort = readsnap.read_block(snapshot_fname, "ID  ",
                                parttype=-1) - 1  #normalized
print 'sorting the POS array...'
pos_unsort = readsnap.read_block(snapshot_fname, "POS ",
                                 parttype=-1) / 1e3  #Mpc/h
pos = np.empty((Ntotal, 3), dtype=np.float32)
pos[ID_unsort] = pos_unsort
del pos_unsort, ID_unsort

#find the IDs and HI masses of the particles to which HI has been assigned
if method == 'Dave':
    [IDs, M_HI] = HIL.Dave_HI_assignment(snapshot_fname, HI_frac, fac)
elif method == 'method_1':
    [IDs, M_HI] = HIL.method_1_HI_assignment(snapshot_fname, HI_frac,
                                             Omega_HI_ref)
elif method == 'Barnes':
    [IDs, M_HI] = HIL.Barnes_Haehnelt(snapshot_fname, groups_fname,
Пример #37
0
    Nall=head.nall
    Masses=head.massarr*1e10 #Msun/h
    Omega_m=head.omega_m
    Omega_l=head.omega_l
    redshift=head.redshift
    Hubble=100.0*np.sqrt(Omega_m*(1.0+redshift)**3+Omega_l)  #(km/s)/(Mpc/h) 
    
    #define the delta array and the mean_mass variable
    delta     = np.zeros(dims3,dtype=np.float32)
    mean_mass = 0.0   #Msun/h

    #make a loop over all particle types and sum their masses in the grid
    for ptype in particle_type:

        #read particle positions 
        pos  = readsnap.read_block(snapshot_fname,"POS ",
                                   parttype=ptype)/1e3  #Mpc/h

        #displace particle positions to redshift-space
        if do_RSD:
            vel  = readsnap.read_block(snapshot_fname,"VEL ",
                                       parttype=ptype)  #km/s
            RSL.pos_redshift_space(pos,vel,BoxSize,Hubble,redshift,axis)
            del vel

        #read particle masses
        mass = readsnap.read_block(snapshot_fname,"MASS",
                                   parttype=ptype)*1e10 #Msun/h

        print 'Number of '+pname[ptype]+' particles =',len(pos)
        print '%.4f < X < %.4f'%(np.min(pos[:,0]), np.max(pos[:,0]))
        print '%.4f < Y < %.4f'%(np.min(pos[:,1]), np.max(pos[:,1]))
Пример #38
0
Masses=head.massarr*1e10 #Msun/h
Omega_m=head.omega_m
Omega_l=head.omega_l
redshift=head.redshift
Hubble=100.0*np.sqrt(Omega_m*(1.0+redshift)**3+Omega_l)  #h*km/s/Mpc
h=head.hubble

#compute the values of Omega_CDM and Omega_B
Omega_cdm=Nall[1]*Masses[1]/BoxSize**3/rho_crit
Omega_b=Omega_m-Omega_cdm
print '\nOmega_CDM = %.3f\nOmega_B   = %0.3f\nOmega_M   = %.3f\n'\
    %(Omega_cdm,Omega_b,Omega_m)


#read the positions of all the particles
pos=readsnap.read_block(snapshot_fname,"POS ",parttype=-1)/1e3 #Mpc/h
print '%.3f < X [Mpc/h] < %.3f'%(np.min(pos[:,0]),np.max(pos[:,0]))
print '%.3f < Y [Mpc/h] < %.3f'%(np.min(pos[:,1]),np.max(pos[:,1]))
print '%.3f < Z [Mpc/h] < %.3f\n'%(np.min(pos[:,2]),np.max(pos[:,2]))

#read the velocities of all the particles
vel=readsnap.read_block(snapshot_fname,"VEL ",parttype=-1) #kms

if do_RSD:
    print 'moving particles to redshift-space'
    RSD(pos[:,axis],vel[:,axis],Hubble,redshift)

#read the masses of all the particles
M=readsnap.read_block(snapshot_fname,"MASS",parttype=-1)*1e10 #Msun/h
print '%.3e < M [Msun/h] < %.3e'%(np.min(M),np.max(M))
print 'Omega_m = %.3f\n'%(np.sum(M,dtype=np.float64)/rho_crit/BoxSize**3)
Пример #39
0
Omega_l=head.omega_l
redshift=head.redshift
Hubble=100.0*np.sqrt(Omega_m*(1.0+redshift)**3+Omega_l)  #h*km/s/Mpc
h=head.hubble

#split the particle IDs among CDM, gas and stars and also among enviroment
indexes=HIL.particle_indexes(snapshot_fname,groups_fname,groups_number,
                             long_ids_flag,SFR_flag,mass_interval,min_mass,
                             max_mass)

#find the total number of particles in the simulation
Ntotal=np.sum(Nall,dtype=np.uint64)
print 'Total number of particles in the simulation =',Ntotal

#sort the pos array
ID_unsort=readsnap.read_block(snapshot_fname,"ID  ",parttype=-1)-1 #normalized
pos_unsort=readsnap.read_block(snapshot_fname,"POS ",parttype=-1)/1e3 #Mpc/h
pos=np.empty((Ntotal,3),dtype=np.float32); pos[ID_unsort]=pos_unsort

#find the IDs of the gas particles residing in filaments
IDs=indexes.gas_filaments

#find the IDs and HI masses of the particles to which HI has been assigned
#note that these IDs_g correspond to all the particles to which HI has been
#assigned
if method=='Bagla' or method=='Paco':

    #sort the HI/H array: only use gas particles
    ID_unsort=readsnap.read_block(snapshot_fname,"ID  ",parttype=0)-1#normalized
    nH0_unsort=readsnap.read_block(snapshot_fname,"NH  ",parttype=0)*fac #HI/H
    nH0=np.zeros(Ntotal,dtype=np.float32); nH0[ID_unsort]=nH0_unsort
Пример #40
0
#################################### INPUT ####################################
snapshot_fname = '../ics'
bins           = 100 #number of bins for the distribution

# parameters for the FD distribution
Mnu      = 0.6       #eV
h_planck = 6.582e-16 #eV*s
kB       = 8.617e-5  #eV/K
c        = 3e5       #km/s 
Tnu      = 1.95      #K
###############################################################################

########## fraction from simulation ###########
# read snapshot redshift and neutrino velocities
z   = readsnap.snapshot_header(snapshot_fname).redshift
vel = readsnap.read_block(snapshot_fname,"VEL ",parttype=2) #km/s

# compute velocity modulus
V = np.sqrt(vel[:,0]**2 + vel[:,1]**2 + vel[:,2]**2);  del vel

# define the velocity intervals, their mean value and their widths
vel_min, vel_max = np.min(V),np.max(V)
if vel_min==0.0:  vel_min = 1e-3
vel_intervals = np.logspace(np.log10(vel_min),np.log10(vel_max),bins+1)
dV            = vel_intervals[1:] - vel_intervals[:-1]       #km/s
V_mean        = 0.5*(vel_intervals[1:] + vel_intervals[:-1]) #km/s

# compute the franction of neutrinos within each velocity bin
hist = (np.histogram(V,bins=vel_intervals)[0])*1.0/len(V) 
###############################################
Пример #41
0
from colossus.halo import profile_nfw
from colossus.halo import concentration

# Setting cosmology for concentrations
pdict = {'flat': True, 'H0': params.h0true, 'Om0': params.omega0, 'Ob0': params.omegabaryon,\
          'sigma8': cosmology.sigma8, 'ns': params.ns}
colossus.addCosmology('myCosmo', pdict)
colossus.setCosmology('myCosmo')

# Load Catalog
cat = catalog(params.pincatfile.format(0.0))
rhoc = cosmology.lcdm.critical_density(0.0).to("M_sun/Mpc^3").value
rDelta = (3 * cat.Mass / 4 / np.pi / 200 / rhoc)**(1.0 / 3)

# Getting particle positions and particle mass
pos = rs.read_block(
    params.pintlessfile.replace("t_snapshot", r"{0:5.4f}").format(0.0), "POS ")
mp = rs.snapshot_header(
    params.pintlessfile.replace("t_snapshot",
                                r"{0:5.4f}").format(0.0)).massarr[1] * 1e10

# Getting the Index of the i-th passive object in catalog
idx = np.argsort(cat.Mass)[-1]

# Getting the first and last icdx of the particles inside the most massive halo
idxp1 = np.sum(cat.Npart[:idx])
idxp2 = idxp1 + cat.Npart[idx]

plt.scatter(pos[idxp1:idxp2][:, 0], pos[idxp1:idxp2][:, 1], s=0.1)
plt.show()

plt.scatter(pos[idxp1:idxp2][:, 0], pos[idxp1:idxp2][:, 2], s=0.1)
Пример #42
0
Masses=head.massarr*1e10 #Msun/h
Omega_m=head.omega_m
Omega_l=head.omega_l
redshift=head.redshift
Hubble=100.0*np.sqrt(Omega_m*(1.0+redshift)**3+Omega_l)  #h*km/s/Mpc
h=head.hubble

#find the total number of particles in the simulation
Ntotal=np.sum(Nall,dtype=np.uint64)
print 'Total number of particles in the simulation =',Ntotal



################################## HI ###########################################
#sort the pos and vel array
ID_unsort=readsnap.read_block(snapshot_fname,"ID  ",parttype=-1)-1 #normalized
print 'sorting the POS array...'
pos_unsort=readsnap.read_block(snapshot_fname,"POS ",parttype=-1)/1e3 #Mpc/h
pos=np.empty((Ntotal,3),dtype=np.float32); pos[ID_unsort]=pos_unsort; del pos_unsort
del ID_unsort

#find the IDs and HI masses of the particles to which HI has been assigned
if method=='Dave':
    [IDs,M_HI]=HIL.Dave_HI_assignment(snapshot_fname,HI_frac,fac)
elif method=='method_1': 
    [IDs,M_HI]=HIL.method_1_HI_assignment(snapshot_fname,HI_frac,Omega_HI_ref)
elif method=='Barnes':
    [IDs,M_HI]=HIL.Barnes_Haehnelt(snapshot_fname,groups_fname,
                                   groups_number,long_ids_flag,SFR_flag)
elif method=='Paco':
    [IDs,M_HI]=HIL.Paco_HI_assignment(snapshot_fname,groups_fname,
Пример #43
0
def generate_lens_map(lenses, cpunum, LC, Halo_HF_ID, Halo_ID, Halo_z, Rvir,
                      snapnum, snapfile, h, scale, Ncells, HQ_dir, sim, sim_phy,
                      sim_name, HaloPosBox, HaloVel, cosmo, results_per_cpu):
    """
    Input:
        ll: halo array indexing
        LC: Light-cone dictionary
        Halo_ID: ID of Halo
        Halo_z: redshift of Halo
        Rvir: virial radius in [Mpc]
        previous_snapnum: 
        snapnum
    Output:
    """
    print('Process %s started' % mp.current_process().name)
    first_lens = lenses[0]
    previous_snapnum = snapnum[first_lens]
    memtrack = 0

    file = open('F6_test_'+str(mp.current_process().name)+'.txt','w') 

    lenslistinit()
    # Run through lenses
    for ll in range(first_lens, lenses[-1]):
        zs, Src_ID, SrcPosSky = source_selection(LC['Src_ID'], LC['Src_z'],
                                                 LC['SrcPosSky'], Halo_ID[ll])
        zl = Halo_z[ll]
        Lbox = Rvir[ll]*0.3*u.Mpc
        FOV = Lbox.to_value('Mpc')
        # converting box size and pixels size from ang. diam. dist. to arcsec
        FOV_arc = (FOV/cf.Da(zl, cosmo)*u.rad).to_value('arcsec')  #[arcsec] box size
        dsx_arc = FOV_arc/Ncells                  #[arcsec] pixel size
        # initialize the coordinates of grids (light rays on lens plan)
        lp1, lp2 = cf.make_r_coor(FOV_arc, Ncells)  #[arcsec]


        # Only load new particle data if lens is at another snapshot
        if (previous_snapnum != snapnum[ll]) or (ll == first_lens):
            snap = snapfile % (snapnum[ll], snapnum[ll])
            # 0 Gas, 1 DM, 4 Star[Star=+time & Wind=-time], 5 BH
            DM_pos = readsnap.read_block(snap, 'POS ', parttype=1)*scale  #[Mpc]
            DM_mass = readsnap.read_block(snap, 'MASS', parttype=1)*1e10/h
            Gas_pos = readsnap.read_block(snap, 'POS ', parttype=0)*scale  #[Mpc]
            Gas_mass = readsnap.read_block(snap, 'MASS', parttype=0)*1e10/h
            Star_pos = readsnap.read_block(snap, 'POS ', parttype=4)*scale  #[Mpc]
            Star_age = readsnap.read_block(snap, 'AGE ', parttype=4)
            Star_mass = readsnap.read_block(snap, 'MASS', parttype=4)
            Star_pos = Star_pos[Star_age >= 0]
            Star_mass = Star_mass[Star_age >= 0]*1e10/h
            del Star_age
            BH_pos = readsnap.read_block(snap, 'POS ', parttype=5)*scale
            BH_mass = readsnap.read_block(snap, 'MASS', parttype=5)*1e10/h
            file.write(str(mp.current_process().name) + 'read particles \n')
        previous_snapnum = snapnum[ll]
        
        DM_sigma, xs, ys = projected_surface_density(ll,
                                                     DM_pos,   #[Mpc]
                                                     DM_mass,
                                                     HaloPosBox[ll],
                                                     fov=FOV,  #[Mpc]
                                                     bins=Ncells,
                                                     smooth=False,
                                                     smooth_fac=0.5,
                                                     neighbour_no=32)
        Gas_sigma, xs, ys = projected_surface_density(ll, Gas_pos, #*a/h,
                                                      Gas_mass,
                                                      HaloPosBox[ll], #*a/h,
                                                      fov=FOV,
                                                      bins=Ncells,
                                                      smooth=False,
                                                      smooth_fac=0.5,
                                                      neighbour_no=32)
        Star_sigma, xs, ys = projected_surface_density(ll, Star_pos, #*a/h,
                                                       Star_mass,
                                                       HaloPosBox[ll], #*a/h,
                                                       fov=FOV,
                                                       bins=Ncells,
                                                       smooth=False,
                                                       smooth_fac=0.5,
                                                       neighbour_no=8)
        file.write(str(mp.current_process().name) + 'created surfce densities \n')
        # point sources need to be smoothed by > 1 pixel to avoid artefacts
        tot_sigma = DM_sigma + Gas_sigma + Star_sigma

        srclistinit()
        # Run through Sources
        check_for_sources = 0
        for ss in range(len(Src_ID)):
            # Calculate critical surface density
            sigma_cr = sigma_crit(zl, zs[ss], cosmo).to_value('Msun Mpc-2')
            kappa = tot_sigma/sigma_cr
            fig = plt.figure()
            ax = fig.add_subplot(111)
            kappa = gaussian_filter(kappa, sigma=3)
            # Calculate Deflection Maps
            alpha1, alpha2, mu_map, phi, detA, lambda_t = cal_lensing_signals(kappa,
                                                                              FOV_arc,
                                                                              Ncells) 
            file.write(str(ll)+'; '+str(ss)+'; '+str(mp.current_process().name) + ' lensing signals \n')
            # Calculate Einstein Radii
            Ncrit, curve_crit, curve_crit_tan, Rein = einstein_radii(lp1, lp2,
                                                                     detA,
                                                                     lambda_t,
                                                                     zl, cosmo,
                                                                     ax, 'med')
            file.write(str(mp.current_process().name)+' Rein calc \n')
            # Calculate Time-Delay and Magnification
            n_imgs, delta_t, mu, theta, beta = timedelay_magnification(mu_map, phi,
                                                                       dsx_arc,
                                                                       Ncells, lp1, lp2,
                                                                       alpha1, alpha2,
                                                                       SrcPosSky[ss],
                                                                       zs[ss],
                                                                       zl, cosmo)
            file.write(str(mp.current_process().name)+' time delay \n')
            if n_imgs > 1:
                file.write(str(mp.current_process().name)+' adding multi source --------------- \n')
                print('%s, %s, %s' % (str(mp.current_process().name),
                                      str(ll),
                                      str(ss)))
                # Tree Branch 2
                s_srcID.append(Src_ID[ss])
                print('-- zs[ss]', zs)
                s_zs.append(zs[ss])
                s_beta.append(beta)
                #s_lensplane.append([lp1, lp2])
                s_detA.append(detA)
                s_tancritcurves.append(curve_crit_tan)
                s_einsteinradius.append(Rein)
                # Tree Branch 3
                s_theta.append(theta)
                s_deltat.append(delta_t)
                s_mu.append(mu)
                check_for_sources = 1
        if check_for_sources == 1:
            # Tree Branch 1
            l_HFID.append(Halo_HF_ID[ll])
            l_haloID.append(Halo_ID[ll])
            l_snapnum.append(int(snapnum[ll]))
            l_zl.append(Halo_z[ll])
            l_haloposbox.append(HaloPosBox[ll])
            l_halovel.append(HaloVel[ll])
            # Tree Branch 2
            l_srcID.append(s_srcID)
            l_zs.append(s_zs)
            l_srcbeta.append(s_beta)
            #l_lensplane.append(s_lensplane)
            l_detA.append(s_detA)
            l_tancritcurves.append(s_tancritcurves)
            l_einsteinradius.append(s_einsteinradius)
            # Tree Branch 3
            l_srctheta.append(s_theta)
            l_deltat.append(s_deltat)
            l_mu.append(s_mu)
            #memuseout = (sys.getsizeof(l_HFID) + sys.getsizeof(l_haloID) + \
            #         sys.getsizeof(l_snapnum) + sys.getsizeof(l_zl) + \
            #         sys.getsizeof(l_haloposbox) + sys.getsizeof(l_halovel) + \
            #         sys.getsizeof(l_srcID) + sys.getsizeof(l_zs) + \
            #         sys.getsizeof(l_srcbeta) + sys.getsizeof(l_detA) + \
            #         sys.getsizeof(l_tancritcurves) + sys.getsizeof(l_einsteinradius) + \
            #         sys.getsizeof(l_srctheta) + sys.getsizeof(l_deltat) + \
            #         sys.getsizeof(l_mu))/1024**3  #[GB]
        memusetot = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024**3  #[GB]
        print('::::::::::::: Tot. Memory Size [GB]: ', memusetot)
        if memusetot > 5:
            ########## Save to File ########
            print('save file because it is too big')
            tree = plant_Tree()
            #tree = grow_Tree()

            # Tree Branches of Node 1 : Lenses
            tree['Halo_ID'] = l_haloID
            tree['HF_ID'] = l_HFID
            tree['snapnum'] = l_snapnum
            tree['zl'] = l_zl
            tree['HaloPosBox'] = l_haloposbox
            tree['HaloVel'] = l_halovel
            for sid in range(len(l_haloID)):
                # Tree Branches of Node 2 : Sources
                tree['Sources']['Src_ID'][sid] = l_srcID[sid]
                tree['Sources']['zs'][sid] = l_zs[sid]
                tree['Sources']['beta'][sid] = l_srcbeta[sid]
                #tree['Sources']['LP'][sid] = l_lensplane[sid]
                tree['Sources']['detA'][sid] = l_detA[sid]
                tree['Sources']['TCC'][sid] = l_tancritcurves[sid]
                tree['Sources']['Rein'][sid] = l_einsteinradius[sid]
                for imgs in range(len(l_srcID[sid])):
                    # Tree Branches of Node 3 : Multiple Images
                    tree['Sources']['theta'][sid][imgs] = l_srctheta[sid][imgs]
                    tree['Sources']['delta_t'][sid][imgs] = l_deltat[sid][imgs]
                    tree['Sources']['mu'][sid][imgs] = l_mu[sid][imgs]

            lm_dir = HQ_dir+'LensingMap/'+sim_phy[sim]+'/'+sim_name[sim]+'/'
            ensure_dir(lm_dir)
            filename = lm_dir+'LM_'+mp.current_process().name+'_' + \
                       str(memtrack)+'.pickle'
            filed = open(filename, 'wb')
            pickle.dump(tree, filed)
            filed.close()
            plt.close(fig)
            memtrack += 1
            lenslistinit()
            srclistinit()

    ########## Save to File ########
    tree = plant_Tree()
    #tree = grow_Tree()

    # Tree Branches of Node 1 : Lenses
    tree['Halo_ID'] = l_haloID
    tree['HF_ID'] = l_HFID
    tree['snapnum'] = l_snapnum
    tree['zl'] = l_zl
    tree['HaloPosBox'] = l_haloposbox
    tree['HaloVel'] = l_halovel
    for sid in range(len(l_haloID)):
        # Tree Branches of Node 2 : Sources
        tree['Sources']['Src_ID'][sid] = l_srcID[sid]
        tree['Sources']['zs'][sid] = l_zs[sid]
        tree['Sources']['beta'][sid] = l_srcbeta[sid]
        #tree['Sources']['LP'][sid] = l_lensplane[sid]
        tree['Sources']['detA'][sid] = l_detA[sid]
        tree['Sources']['TCC'][sid] = l_tancritcurves[sid]
        tree['Sources']['Rein'][sid] = l_einsteinradius[sid]
        for imgs in range(len(l_srcID[sid])):
            # Tree Branches of Node 3 : Multiple Images
            tree['Sources']['theta'][sid][imgs] = l_srctheta[sid][imgs]
            tree['Sources']['delta_t'][sid][imgs] = l_deltat[sid][imgs]
            tree['Sources']['mu'][sid][imgs] = l_mu[sid][imgs]
    file.close()
    lm_dir = HQ_dir+'LensingMap/'+sim_phy[sim]+'/'+sim_name[sim]+'/'
    ensure_dir(lm_dir)
    filename = lm_dir+'LM_'+mp.current_process().name+'_' + \
               str(memtrack)+'.pickle'
    filed = open(filename, 'wb')
    pickle.dump(tree, filed)
    filed.close()
    plt.close(fig)
Пример #44
0
    DR_action='compute' #'compute' or 'read' (from DR_name file)
    
    #### wp FILE ####
    wp_file='w_p_21.dat'

    #### OUTPUT ####
    results_file='500Mpc_512_0.6_mean200.dat'
######################################################

M1_array=np.linspace(M1_min, M1_max, M1_bins)
alpha_array=np.linspace(alpha_min, alpha_max, alpha_bins)

if myrank==0:

    #read positions and IDs of DM particles: sort the IDs array
    DM_pos=readsnap.read_block(snapshot_fname,"POS ",parttype=-1)
    DM_ids=readsnap.read_block(snapshot_fname,"ID  ",parttype=-1)
    print len(DM_ids),np.min(DM_ids),np.max(DM_ids)
    sorted_ids=DM_ids.argsort(axis=0)
    del DM_ids
    #the particle whose ID is N is located in the position sorted_ids[N]
    #i.e. DM_ids[sorted_ids[N]]=N
    #the position of the particle whose ID is N would be:
    #DM_pos[sorted_ids[N]]

    #read the IDs of the particles belonging to the CDM halos
    halos_ID=readsubf.subf_ids(groups_fname,groups_number,0,0,
                               long_ids=True,read_all=True)
    IDs=halos_ID.SubIDs
    del halos_ID
        #read subhalos positions/masses
        subhalos_mass = halos.sub_mass*1e10             #masses in Msun/h
        subhalos_pos = halos.sub_pos/1e3                #positions in Mpc/h
        
        if obj == 'halos':
            halos_indexes = np.where((halos_mass>min_mass) & \
                                     (halos_mass<max_mass))[0]
            pos_g = halos_pos[halos_indexes]
        else:
            halos_indexes = np.where((subhalos_mass>min_mass) & \
                                     (subhalos_mass<max_mass))[0]
            pos_g = halos_pos[halos_indexes]
    
    elif obj == 'DM':                           #read snapshot file
        PAR_pos = readsnap.read_block(snapshot_fname,"POS ",parttype=1)/1e3#Mpc/h
        IDs = np.arange(len(PAR_pos));  IDs=random.sample(IDs,N_par)
        pos_g = PAR_pos[IDs];           del PAR_pos,IDs

    elif obj == 'data':
        #read Pinocchio halos information
        data = np.loadtxt(data_file,comments='#')
        Pin_pos = data[:,5:8];  Pin_mass = data[:,1];  Pin_vel = data[:,8:11]
        Pin_pos  = Pin_pos.astype(np.float32);  
        Pin_mass = Pin_mass.astype(np.float32)
        Pin_vel  = Pin_vel.astype(np.float32)
        halos_indexes = np.where((Pin_mass>min_mass) & \
                                 (Pin_mass<max_mass))[0]
        pos_g = Pin_pos[halos_indexes]
        if do_RSD:
            vel_g = Pin_vel[halos_indexes]
Пример #46
0
print '\nREADING SNAPSHOTS PROPERTIES'
head = readsnap.snapshot_header(snapshot_fname)
BoxSize = head.boxsize / 1e3  #Mpc/h
Nall = head.nall
Masses = head.massarr * 1e10  #Msun/h
Omega_m = head.omega_m
Omega_l = head.omega_l
redshift = head.redshift
Hubble = 100.0 * np.sqrt(Omega_m *
                         (1.0 + redshift)**3 + Omega_l)  #km/s/(Mpc/h)
h = head.hubble

fout = 'CF_CDM_z=%.3f.txt' % redshift

# read the positions and masses of the CDM particles
pos = readsnap.read_block(snapshot_fname, "POS ", parttype=1) / 1e3  #Mpc/h

# compute delta_CDM
delta = np.zeros((dims, dims, dims), dtype=np.float32)
MASL.MA(pos, delta, BoxSize, MAS)
print '%.6e should be equal to\n%.6e'\
    %(np.sum(delta,dtype=np.float64),len(pos))
delta /= np.mean(delta, dtype=np.float64)
delta -= 1.0

#compute the correlation function
CF = PKL.Xi(delta, BoxSize, MAS, threads=8)

#save results to file
np.savetxt(fout, np.transpose([CF.r3D, CF.xi]))
Пример #47
0
def dyn_vs_lensing_mass(cpunum, LC, lm_file, snapfile, h, scale, HQ_dir, sim,
                        sim_phy, sim_name, hfname, cosmo, results_per_cpu):
    """
    Input:
        ll: halo array indexing
        LC: Light-cone dictionary
        Halo_ID: ID of Halo
        Halo_z: redshift of Halo
        Rvir: virial radius in [Mpc]
        previous_snapnum: 
        snapnum
    Output:
    """
    LM = pickle.load(open(lm_file, 'rb'))  #, encoding="utf8")
    logging.info('Process %s started. Nr. of Halos: %s' %
                 (mp.current_process().name, len(LM['Halo_ID'])))
    results = []
    previous_snapnum = -1
    # Run through lenses
    for ll in range(len(LM['Halo_ID'])):
        # Load Lens properties
        #HaloHFID= LM['HF_ID'][ll]
        HaloHFID = int(LM['Rockstar_ID'][ll])
        HaloPosBox = LM['HaloPosBox'][ll]
        HaloVel = LM['HaloVel'][ll]
        snapnum = LM['snapnum'][ll]
        zl = LM['zl'][ll]

        # Only load new particle data if lens is at another snapshot
        if (previous_snapnum != snapnum):
            # Load Halo Properties
            rks_file = '/cosma5/data/dp004/dc-beck3/rockstar/'+sim_phy[sim]+ \
                       sim_name[sim]+'/halos_' + str(snapnum)+'.dat'
            df = pd.read_csv(
                rks_file,
                sep='\s+',
                skiprows=16,
                usecols=[0, 4, 5, 30, 31, 32, 33, 34, 35, 36, 37, 38, 47],
                names=[
                    'ID', 'Vrms', 'Rvir', 'A[x]', 'A[y]', 'A[z]', 'B[x]',
                    'B[y]', 'B[z]', 'C[x]', 'C[y]', 'C[z]', 'Halfmass_Radius'
                ])
            # Load Particle Properties
            #s = read_hdf5.snapshot(snapnum, snapfile)
            # 0 Gas, 1 DM, 4 Star[Star=+time & Wind=-time], 5 BH
            # Have to load all particles :-( -> takes too long
            #s.read(["Velocities", "Coordinates", "AGE"], parttype=-1)
            #Star_pos = s.data['Velocities']['stars']*scale
            snap = snapfile % (snapnum, snapnum)
            Star_age = readsnap.read_block(snap, 'AGE ', parttype=4)
            Star_pos = readsnap.read_block(snap, 'POS ',
                                           parttype=4) * scale  #[Mpc]
            Star_vel = readsnap.read_block(snap, 'VEL ', parttype=4)
            Star_mass = readsnap.read_block(snap, 'MASS',
                                            parttype=4) * 1e10 / h
            Star_pos = Star_pos[Star_age >= 0]
            Star_vel = Star_vel[Star_age >= 0]
            Star_mass = Star_mass[Star_age >= 0]
            del Star_age
        previous_snapnum = snapnum

        # Load Halo Properties
        indx = df['ID'][df['ID'] == HaloHFID].index[0]
        Vrms = df['Vrms'][indx] * (u.km / u.s)  #[km/s]
        Rvir = df['Rvir'][indx] * u.kpc
        Rhalfmass = df['Halfmass_Radius'][indx] * u.kpc
        epva = pd.concat([df['A[x]'], df['A[y]'], df['A[z]']],
                         axis=1).loc[[indx]].values
        epvb = pd.concat([df['B[x]'], df['B[y]'], df['B[z]']],
                         axis=1).loc[[indx]].values
        epvc = pd.concat([df['C[x]'], df['C[y]'], df['C[z]']],
                         axis=1).loc[[indx]].values

        # Stellar half mass radius
        Rshm = cf.call_stellar_halfmass(
            Star_pos[:, 0], Star_pos[:, 1], Star_pos[:, 2],
            HaloPosBox[0], HaloPosBox[1], HaloPosBox[2], Star_mass,
            Rvir.to_value('Mpc')) * u.Mpc
        #print('Rshm', Rshm)
        Star_indx = check_in_sphere(HaloPosBox, Star_pos, Rshm.to_value('kpc'))
        if len(Star_indx[0]) > 50:
            Mdyn = mass_dynamical(Rshm, Star_vel[Star_indx], HaloPosBox,
                                  HaloVel, epva, epvb, epvc)
            print('Mdyn', Mdyn)
            if Mdyn == .0:
                continue
            # Run through sources
            for ss in range(len(LM['Sources']['Src_ID'][ll])):
                zs = LM['Sources']['zs'][ll][ss]
                Rein = LM['Sources']['Rein'][ll][ss] * u.kpc
                Mlens = mass_lensing(Rein, zl, zs, cosmo)
                #Mdyn = (Vrms.to('m/s')**2*Rvir.to('m')/ \
                #        const.G.to('m3/(kg*s2)')).to_value('M_sun')

                results.append([
                    LM['Halo_ID'][ll], LM['Sources']['Src_ID'][ll][ss], Mdyn,
                    Mlens
                ])
    results_per_cpu[cpunum] = results
# read snapshot head and obtain BoxSize, Omega_m and Omega_L
print '\nREADING SNAPSHOTS PROPERTIES'
head = readsnap.snapshot_header(snapshot_fname)
BoxSize = head.boxsize / 1e3  #Mpc/h
Nall = head.nall
Masses = head.massarr * 1e10  #Msun/h
Omega_m = head.omega_m
Omega_l = head.omega_l
redshift = head.redshift
Hubble = 100.0 * np.sqrt(Omega_m *
                         (1.0 + redshift)**3 + Omega_l)  #km/s/(Mpc/h)
h = head.hubble

# read particle positions and masses
pos = readsnap.read_block(snapshot_fname, "POS ", parttype=ptype) / 1e3  #Mpc/h
mass = readsnap.read_block(snapshot_fname, "MASS",
                           parttype=ptype) * 1e10  #Msun/h

# move particle positions to redshift-space
if do_RSD:
    vel = readsnap.read_block(snapshot_fname, "VEL ", parttype=ptype)  #km/s
    RSL.pos_redshift_space(pos, vel, BoxSize, Hubble, redshift, axis)
    del vel

# some verbose
print '%.3f < X [Mpc/h] < %.3f' % (np.min(pos[:, 0]), np.max(pos[:, 0]))
print '%.3f < Y [Mpc/h] < %.3f' % (np.min(pos[:, 1]), np.max(pos[:, 1]))
print '%.3f < Z [Mpc/h] < %.3f' % (np.min(pos[:, 2]), np.max(pos[:, 2]))
print 'Omega_ptype = %.4f' % (np.sum(mass, dtype=np.float64) / BoxSize**3 /
                              rho_crit)
Пример #49
0
def hod(snapshot_fname,groups_fname,groups_number,min_mass,max_mass,
        fiducial_density,M1,alpha,mass_criteria,verbose=False):

    thres=1e-3 #controls the max relative error to accept a galaxy density
    
    #read the header and obtain the boxsize
    head=readsnap.snapshot_header(snapshot_fname)
    BoxSize=head.boxsize    #BoxSize in kpc/h

    #read positions and IDs of DM particles: sort the IDs array
    DM_pos=readsnap.read_block(snapshot_fname,"POS ",parttype=-1) #kpc/h
    DM_ids=readsnap.read_block(snapshot_fname,"ID  ",parttype=-1)-1 
    sorted_ids=DM_ids.argsort(axis=0)
    #the particle whose ID is N is located in the position sorted_ids[N]
    #i.e. DM_ids[sorted_ids[N]]=N
    #the position of the particle whose ID is N would be:
    #DM_pos[sorted_ids[N]]

    #read the IDs of the particles belonging to the CDM halos
    halos_ID=readsubf.subf_ids(groups_fname,groups_number,0,0,
                               long_ids=True,read_all=True)
    IDs=halos_ID.SubIDs-1
    del halos_ID

    #read CDM halos information
    halos=readsubf.subfind_catalog(groups_fname,groups_number,
                                   group_veldisp=True,masstab=True,
                                   long_ids=True,swap=False)
    if mass_criteria=='t200':
        halos_mass=halos.group_m_tophat200*1e10   #masses in Msun/h
        halos_radius=halos.group_r_tophat200      #radius in kpc/h
    elif mass_criteria=='m200':
        halos_mass=halos.group_m_mean200*1e10     #masses in Msun/h
        halos_radius=halos.group_r_mean200        #radius in kpc/h
    elif mass_criteria=='c200':    
        halos_mass=halos.group_m_crit200*1e10     #masses in Msun/h
        halos_radius=halos.group_r_crit200        #radius in kpc/h
    else:
        print 'bad mass_criteria'
        sys.exit()
    halos_pos=halos.group_pos   #positions in kpc/h
    halos_len=halos.group_len
    halos_offset=halos.group_offset
    halos_indexes=np.where((halos_mass>min_mass) & (halos_mass<max_mass))[0]
    del halos
    
    if verbose:
        print ' '
        print 'total halos found=',halos_pos.shape[0]
        print 'halos number density=',len(halos_pos)/(BoxSize*1e-3)**3

    #keep only the halos in the given mass range 
    halo_mass=halos_mass[halos_indexes]
    halo_pos=halos_pos[halos_indexes]
    halo_radius=halos_radius[halos_indexes]
    halo_len=halos_len[halos_indexes]
    halo_offset=halos_offset[halos_indexes]
    del halos_indexes

    ##### COMPUTE Mmin GIVEN M1 & alpha #####
    i=0; max_iterations=20 #maximum number of iterations
    Mmin1=min_mass; Mmin2=max_mass
    while (i<max_iterations):
        Mmin=0.5*(Mmin1+Mmin2) #estimation of the HOD parameter Mmin

        total_galaxies=0
        inside=np.where(halo_mass>Mmin)[0] #take all galaxies with M>Mmin
        mass=halo_mass[inside] #only halos with M>Mmin have central/satellites

        total_galaxies=mass.shape[0]+np.sum((mass/M1)**alpha)
        mean_density=total_galaxies*1.0/(BoxSize*1e-3)**3 #galaxies/(Mpc/h)^3

        if (np.absolute((mean_density-fiducial_density)/fiducial_density)<thres):
            i=max_iterations
        elif (mean_density>fiducial_density):
            Mmin1=Mmin
        else:
            Mmin2=Mmin
        i+=1

    if verbose:
        print ' '
        print 'Mmin=',Mmin
        print 'average number of galaxies=',total_galaxies
        print 'average galaxy density=',mean_density
    #########################################

    #just halos with M>Mmin; the rest do not host central/satellite galaxies
    inside=np.where(halo_mass>Mmin)[0]
    halo_mass=halo_mass[inside]
    halo_pos=halo_pos[inside]
    halo_radius=halo_radius[inside]
    halo_len=halo_len[inside]
    halo_offset=halo_offset[inside]
    del inside

    #compute number of satellites in each halo using the Poisson distribution 
    N_mean_sat=(halo_mass/M1)**alpha #mean number of satellites
    N_sat=np.empty(len(N_mean_sat),dtype=np.int32)
    for i in range(len(N_sat)):
        N_sat[i]=np.random.poisson(N_mean_sat[i])
    N_tot=np.sum(N_sat)+len(halo_mass) #total number of galaxies in the catalogue

    if verbose:
        print ' '
        print np.min(halo_mass),'< M_halo <',np.max(halo_mass)
        print 'total number of galaxies=',N_tot
        print 'galaxy number density=',N_tot/(BoxSize*1e-3)**3

    #put satellites following the distribution of dark matter in groups
    if verbose:
        print ' '
        print 'Creating mock catalogue ...',

    pos_galaxies=np.empty((N_tot,3),dtype=np.float32)
    #index: variable that go through halos (may be several galaxies in a halo)
    #i: variable that go through all (central/satellites) galaxies
    #count: find number of galaxies that lie beyond its host halo virial radius
    index=0; count=0; i=0 
    while (index<halo_mass.shape[0]):

        position=halo_pos[index]  #position of the DM halo
        radius=halo_radius[index] #radius of the DM halo

        #save the position of the central galaxy
        pos_galaxies[i]=position; i+=1

        #if halo contains satellites, save their positions
        Nsat=N_sat[index]
        if Nsat>0:
            offset=halo_offset[index] 
            length=halo_len[index]
            idss=sorted_ids[IDs[offset:offset+length]]

            #compute the distances to the halo center keeping those with R<Rvir
            pos=DM_pos[idss] #positions of the particles belonging to the halo
            posc=pos-position

            #this is to populate correctly halos closer to box boundaries
            if np.any((position+radius>BoxSize) + (position-radius<0.0)):
                
                inside=np.where(posc[:,0]>BoxSize/2.0)[0]
                posc[inside,0]-=BoxSize
                inside=np.where(posc[:,0]<-BoxSize/2.0)[0]
                posc[inside,0]+=BoxSize

                inside=np.where(posc[:,1]>BoxSize/2.0)[0]
                posc[inside,1]-=BoxSize
                inside=np.where(posc[:,1]<-BoxSize/2.0)[0]
                posc[inside,1]+=BoxSize

                inside=np.where(posc[:,2]>BoxSize/2.0)[0]
                posc[inside,2]-=BoxSize
                inside=np.where(posc[:,2]<-BoxSize/2.0)[0]
                posc[inside,2]+=BoxSize
                
            radii=np.sqrt(posc[:,0]**2+posc[:,1]**2+posc[:,2]**2)
            inside=np.where(radii<radius)[0]
            selected=random.sample(inside,Nsat)
            pos=pos[selected]

            #aditional, not esential check. Can be comment out
            posc=pos-position
            if np.any((posc>BoxSize/2.0) + (posc<-BoxSize/2.0)):
                inside=np.where(posc[:,0]>BoxSize/2.0)[0]
                posc[inside,0]-=BoxSize
                inside=np.where(posc[:,0]<-BoxSize/2.0)[0]
                posc[inside,0]+=BoxSize

                inside=np.where(posc[:,1]>BoxSize/2.0)[0]
                posc[inside,1]-=BoxSize
                inside=np.where(posc[:,1]<-BoxSize/2.0)[0]
                posc[inside,1]+=BoxSize

                inside=np.where(posc[:,2]>BoxSize/2.0)[0]
                posc[inside,2]-=BoxSize
                inside=np.where(posc[:,2]<-BoxSize/2.0)[0]
                posc[inside,2]+=BoxSize
            r_max=np.max(np.sqrt(posc[:,0]**2+posc[:,1]**2+posc[:,2]**2))
            if r_max>radius: #check no particles beyond Rv selected 
                print position
                print radius
                print pos
                count+=1

            for j in range(Nsat):
                pos_galaxies[i]=pos[j]; i+=1
        index+=1

    if verbose:
        print 'done'
    #some final checks
    if i!=N_tot:
        print 'some galaxies missing:'
        print 'register',i,'galaxies out of',N_tot
    if count>0:
        print 'error:',count,'particles beyond the virial radius selected'

    return pos_galaxies
Пример #50
0
def compute_Pk(snapshot_fname, dims, do_RSD, axis, hydro):

    # read snapshot head and obtain BoxSize, Omega_m and Omega_L
    print '\nREADING SNAPSHOTS PROPERTIES'
    head = readsnap.snapshot_header(snapshot_fname)
    BoxSize = head.boxsize / 1e3  #Mpc/h
    Nall = head.nall
    Masses = head.massarr * 1e10  #Msun/h
    Omega_m = head.omega_m
    Omega_l = head.omega_l
    redshift = head.redshift
    Hubble = 100.0 * np.sqrt(Omega_m *
                             (1.0 + redshift)**3 + Omega_l)  #h*km/s/Mpc
    h = head.hubble

    z = '%.3f' % redshift
    f_out = 'Pk_m_z=' + z + '.dat'

    # compute the values of Omega_CDM and Omega_B
    Omega_cdm = Nall[1] * Masses[1] / BoxSize**3 / rho_crit
    Omega_nu = Nall[2] * Masses[2] / BoxSize**3 / rho_crit
    Omega_b = Omega_m - Omega_cdm - Omega_nu
    print '\nOmega_CDM = %.4f\nOmega_B   = %0.4f\nOmega_NU  = %.4f'\
        %(Omega_cdm,Omega_b,Omega_nu)
    print 'Omega_M   = %.4f' % (Omega_m)

    # read the positions of all the particles
    pos = readsnap.read_block(snapshot_fname, "POS ",
                              parttype=-1) / 1e3  #Mpc/h
    print '%.3f < X [Mpc/h] < %.3f' % (np.min(pos[:, 0]), np.max(pos[:, 0]))
    print '%.3f < Y [Mpc/h] < %.3f' % (np.min(pos[:, 1]), np.max(pos[:, 1]))
    print '%.3f < Z [Mpc/h] < %.3f\n' % (np.min(pos[:, 2]), np.max(pos[:, 2]))

    if do_RSD:
        print 'moving particles to redshift-space'
        # read the velocities of all the particles
        vel = readsnap.read_block(snapshot_fname, "VEL ", parttype=-1)  #km/s
        RSL.pos_redshift_space(pos, vel, BoxSize, Hubble, redshift, axis)
        del vel

    # read the masses of all the particles
    if not (hydro):
        Ntotal = np.sum(Nall, dtype=np.int64)  #compute the number of particles
        M = np.zeros(Ntotal, dtype=np.float32)  #define the mass array
        offset = 0
        for ptype in [0, 1, 2, 3, 4, 5]:
            M[offset:offset + Nall[ptype]] = Masses[ptype]
            offset += Nall[ptype]
    else:
        M = readsnap.read_block(snapshot_fname, "MASS",
                                parttype=-1) * 1e10  #Msun/h
    print '%.3e < M [Msun/h] < %.3e' % (np.min(M), np.max(M))
    print 'Omega_M = %.4f\n' % (np.sum(M, dtype=np.float64) / rho_crit /
                                BoxSize**3)

    # compute the mean mass per grid cell
    mean_M = np.sum(M, dtype=np.float64) / dims**3

    # compute the mass within each grid cell
    delta = np.zeros(dims**3, dtype=np.float32)
    CIC.CIC_serial(pos, dims, BoxSize, delta, M)
    del pos
    print '%.6e should be equal to \n%.6e\n'\
        %(np.sum(M,dtype=np.float64),np.sum(delta,dtype=np.float64))
    del M

    # compute the density constrast within each grid cell
    delta /= mean_M
    delta -= 1.0
    print '%.3e < delta < %.3e\n' % (np.min(delta), np.max(delta))

    # compute the P(k)
    Pk = PSL.power_spectrum_given_delta(delta, dims, BoxSize)

    # write P(k) to output file
    np.savetxt(f_out, np.transpose([Pk[0], Pk[1]]))
Пример #51
0
Radii      = np.array([5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27,
                       29, 31, 33, 35, 37, 39, 41], dtype=np.float32)
threshold  = -0.7
threads1   = 16
threads2   = 4
void_field = True
##############################################################################

# read snapshot head and obtain BoxSize, Omega_m and Omega_L
head    = readsnap.snapshot_header(snapshot)
BoxSize = head.boxsize/1e3  #Mpc/h                      

Radii = Radii*BoxSize/grid

# read particle positions
pos = readsnap.read_block(snapshot,"POS ",parttype=1)/1e3 #Mpc/h

# compute density field
delta = np.zeros((grid, grid, grid), dtype=np.float32)
MASL.MA(pos, delta, BoxSize, MAS)
delta /= np.mean(delta, dtype=np.float64);  delta -= 1.0

# find the void
V = VL.void_finder(delta, BoxSize, threshold, Radii, 
                   threads1, threads2, void_field=void_field)

# void properties
void_pos    = V.void_pos     #Mpc/h
void_radius = V.void_radius  #Mpc/h
VSF_R       = V.Rbins        #VSF bins in radius
VSF         = V.void_vsf     #VSF