コード例 #1
0
    def __init__(self,
                 base,
                 num,
                 long_ids=False,
                 snapbase='snap',
                 double_output=False,
                 verbose=False,
                 run=None):
        self.base = base
        self.snapbase = snapbase
        self.num = num
        self.num_pad = str(num).zfill(3)
        self.verbose = verbose
        self.part_types = [0, 1, 4, 5]
        keysel = [
            "ngroups", "nsubs", "GroupLenType", "GroupNsubs", "GroupFirstSub",
            "SubhaloLenType"
        ]
        self.cat = readsubfHDF5.subfind_catalog(base,
                                                num,
                                                long_ids=long_ids,
                                                double_output=double_output,
                                                keysel=keysel)
        if not hasattr(self.cat, "GroupLenType"):
            raise RuntimeError("Subfind catalog has no group or subhalo "
                               "information.")

        self.filenames = naming.get_snap_filenames(self.base, self.snapbase,
                                                   self.num)
        offsets = readsubfHDF5.get_offsets(self.cat, self.part_types, self.num,
                                           run)
        self.group_offset, self.halo_offset = offsets

        head = readsnapHDF5.snapshot_header(self.filenames[0])

        self.file_num = head.filenum
        assert (self.file_num == len(self.filenames))

        ntypes = 6
        self.file_type_numbers = np.zeros([self.file_num, ntypes],
                                          dtype="int64")
        cumcount = np.zeros(ntypes, dtype="int64")

        # Store in file_type_numbers[i, :] the cumulative number of particles
        # in all previous files.  Note we never need to open the last file.
        for i in range(0, self.file_num - 1):
            if self.verbose:
                print("READHALO: initial read of file: %s" % self.filenames[i])
            head = readsnapHDF5.snapshot_header(self.filenames[i])

            cumcount[:] += head.npart[:]
            self.file_type_numbers[i + 1, :] = cumcount[:]
コード例 #2
0
    def __init__(self,catdir,snapnum,nbins,rmin=1e-2,rmax=1.,useSubhaloID=False,\
            useFOF=False, useStellarhalfmassRad=None, useReduced=False, radinkpc=False,\
            NR=False, binwidth=0.1, debug=False, useSubhaloes=False, testconvergence=False,):

        assert type(nbins) is int and nbins >= 0, 'parameter nbins must be int'

        # Set main parameters
        if catdir.find('/output') < 0:
            self.snapdir = catdir + '/output/'
        else:
            self.snapdir = catdir
        self.snapnum = snapnum
        self.useSubhaloID = useSubhaloID
        self.useStellarhalfmassRad = False
        self.radinkpc = radinkpc
        print '\n\tAxialRatio: ', self.snapdir

        # Read snapshot header for boxsize
        snapstr = str(snapnum).zfill(3)
        self.header = readsnapHDF5.snapshot_header(self.snapdir + '/snapdir_' +
                                                   snapstr + '/snap_' +
                                                   snapstr)
        self.boxsize = self.header.boxsize
        print '\tAxialRatio: Boxsize =', self.boxsize

        self.parttypes = [1]
        if self.header.cooling == 1:
            self.parttypes.append(0)
        if self.header.sfr == 1:
            self.parttypes.append(4)
        print '\tAvailable parttypes =', self.parttypes

        # Set other parameters
        self.setparams(nbins,rmin,rmax,useFOF,useStellarhalfmassRad,useReduced,NR,\
                binwidth, debug, testconvergence)

        # Read SUBFIND catalogue
        keysel = [
            "Group_R_Crit200", "GroupFirstSub", "Group_M_Crit200", "GroupPos"
        ]
        if useSubhaloes is True:
            print 'adding keysel for subhaloes'
            keysel += ["GroupNsubs", "SubhaloPos", "SubhaloMass"]
        if useSubhaloID is True:
            print 'adding keysel for using SubhaloIDs'
            assert useStellarhalfmassRad is True
            assert useFOF is False
        if useStellarhalfmassRad is True:
            assert self.header.sfr == 1, 'Simulation does not have stars'
            print 'adding keysel for StellarhalfmassRad'
            keysel += [
                "SubhaloPos", "SubhaloMassInRadType", "SubhaloHalfmassRadType"
            ]
        self.cat = readsubfHDF5.subfind_catalog(self.snapdir,
                                                snapnum,
                                                keysel=keysel)
コード例 #3
0
ファイル: track_tracers.py プロジェクト: jgsuresh/Arepo-Disks
def get_subhalo_ids(base,snap_num,sub_id):
	cat = readsubfHDF5.subfind_catalog(base, snap_num)

	sub_list = cat.GroupFirstSub
	grp_id = np.argmin( np.abs(sub_id-sub_list) )

	#snapname = base + "snapdir_"+str(snap_num).zfill(3)+"/snap_"+str(snap_num).zfill(3)
	snapname = base + "/snap_"+str(snap_num).zfill(3)

	redshift = readsnapHDF5.snapshot_header(snapname).redshift
	scale_factor = 1./(1.+redshift)


	# Get full bound-particle ID list:
	all_bound_ids = get_full_bound_id_list(base,snap_num)

	# First need to construct the offset table:
	groupOffset = np.zeros(cat.ngroups, dtype="int64")
	haloOffset = np.zeros(cat.nsubs, dtype="int64")

	for i in range(1,cat.ngroups):
		groupOffset[i] = groupOffset[i-1] + cat.GroupLen[i-1]

	for GrNr in range(0,cat.ngroups): #GrNr means Group number
		nsubs = cat.GroupNsubs[GrNr]

		if nsubs > 0:
			SubNr = cat.GroupFirstSub[GrNr]
			haloOffset[SubNr] = groupOffset[GrNr]

			if nsubs > 1:
				subOffsets = np.cumsum(cat.SubhaloLen[SubNr:SubNr+nsubs-2])
				haloOffset[SubNr+1:SubNr+nsubs-1] = groupOffset[GrNr] + subOffsets


	sub_offset = haloOffset[sub_id]
	sub_len = cat.SubhaloLen[sub_id]

	sub_ids = all_bound_ids[haloOffset[sub_id]:haloOffset[sub_id]+cat.SubhaloLen[sub_id]]

	return sub_ids
コード例 #4
0
fbase = args.base
if (args.t.find('TNG') > 0) or (args.TNG):
    print args.t
    print args.TNG
    fbase = "/n/hernquistfs3/IllustrisTNG/Runs/"
fdir = fbase + args.t + '/'
assert os.path.isdir(fdir), fdir + " does not exist!"

snap = args.snap
snapstr = str(snap).zfill(3)
if len(args.extra) == 0:
    fout = args.t + '_StellarShape_' + snapstr + '.hdf5'
else:
    fout = args.t + '_StellarShape_' + snapstr + '_' + args.extra + '.hdf5'

header = readsnapHDF5.snapshot_header(fdir + '/output/snapdir_' + snapstr +
                                      '/snap_' + snapstr)
boxsize = header.boxsize
hubble = header.hubble

chunksize = args.chunksize
#ar.ellipsoid.ne.set_num_threads(args.nthreads)
mass = [args.minmass, args.maxmass]
if args.test:
    mass = [10, 10.5]
    chunksize = 5
binwidth = args.binwidth

print 'Directory: ', fdir
print 'Snapshot: ', snap
print 'Boxsize: ', boxsize
print 'Hubble parameter: ', hubble
コード例 #5
0
    if ("sink" in base): sink_label='-sink' 
    else: sink_label=''
    if ("hires" in base): 
        if ("_hires" in base):res_label='-hires' 
        elif ("_vhires" in base): res_label='-vhires' 
        elif ("_vvhires" in base): res_label='-vvhires' 
    else: res_label=''

    labels=sink_label+res_label

    #check the number of orbits at the zeroth and last snapshots
    orbit_range = []
    for snap in [init_snap,final_snap]:
        filename=directory+base+snap_base+str(snap).zfill(3)
        header = rs.snapshot_header(filename)
        time = header.time + time_offset
        orbit_range.append(int(time/(2*np.pi)))
    
    norbits = orbit_range[0]

    outfilename="binary-accretion-rate"+labels+"_norbits%i-%i_q%3.1f_e%3.1f_h%3.1f_alpha%3.1f_eta%.2f.txt"\
	% (orbit_range[0],orbit_range[1],qb,eb,h,alpha,eta)
    print "Saving accretion rate data to file:"
    print "                                  ",outfilename

    
    # Now read accretion data file
    print "Reading in data..."
    accretionfile=directory+base+'circumstellarsink.txt'
    #accretionfile=directory+base+'circumstellarsink_justmasses.txt'
コード例 #6
0
ファイル: redshift.py プロジェクト: jgsuresh/Arepo-Disks
# stupid program to see how long we are on each redshift..

import numpy as np
import matplotlib.pyplot as plt
import readsnapHDF5 as rs

home = "/n/hernquistfs1/mvogelsberger/ComparisonProject/"

res = "256_20Mpc"
code_type = "Arepo_ENERGY"
#snapnum = 314

out_folder = "/output"

base = home + res + "/" + code_type + out_folder


snap_array = np.arange(315)
z = np.zeros(315)
for snapnum in snap_array:
	print "on snapnum ",snapnum
	snapname = base + "/snapdir_"+str(snapnum).zfill(3)+ "/snap_"+str(snapnum).zfill(3)
	z[snapnum] = rs.snapshot_header(snapname).redshift

snap_z2 = (np.abs(z-2.)).argmin()
print "snap number ",snap_z2
print "has redshift ",z[snap_z2]

plt.plot(snap_array,z)
plt.show()
コード例 #7
0
def Illustris_region(snapshot_root, snapnum, TREECOOL_file, x_min, x_max, 
                     y_min, y_max, z_min, z_max, padding, fout,
                     redshift_space=False, axis=0):


    # read snapshot and find number of subfiles
    snapshot = snapshot_root + 'snapdir_%03d/snap_%03d'%(snapnum,snapnum)
    header   = rs.snapshot_header(snapshot)
    nall     = header.nall
    redshift = header.redshift
    BoxSize  = header.boxsize/1e3 #Mpc/h
    filenum  = header.filenum
    Omega_m  = header.omega0
    Omega_L  = header.omegaL
    h        = header.hubble
    Hubble = 100.0*np.sqrt(Omega_m*(1.0+redshift)**3+Omega_L) #km/s/(Mpc/h)

    if myrank==0:
        print '\n'
        print 'BoxSize         = %.3f Mpc/h'%BoxSize
        print 'Number of files = %d'%filenum
        print 'Omega_m         = %.3f'%Omega_m
        print 'Omega_l         = %.3f'%Omega_L
        print 'redshift        = %.3f'%redshift

    # find the numbers each cpu will work on
    array   = np.arange(0, filenum)
    numbers = np.where(array%nprocs==myrank)[0]

    # do a loop over the different realizations
    particles = 0
    for i in numbers:

        snapshot = snapshot_root + 'snapdir_%03d/snap_%03d.%d'%(snapnum,snapnum,i)
        pos = rs.read_block(snapshot, 'POS ', parttype=0, verbose=False)/1e3
        pos = pos.astype(np.float32)

        # read velocities and displace particle positions
        if redshift_space:
            vel = rs.read_block(snapshot, 'VEL ', parttype=0, verbose=False)/np.sqrt(1.0+redshift) #km/s
            RSL.pos_redshift_space(pos, vel, BoxSize, Hubble, redshift, axis)

        # check if particles are in the region
        indexes_region = np.where((pos[:,0]>=x_min-padding) & (pos[:,0]<=x_max+padding) &\
                                  (pos[:,1]>=y_min-padding) & (pos[:,1]<=y_max+padding) &\
                                  (pos[:,2]>=z_min-padding) & (pos[:,2]<=z_max+padding))[0]

        # if particles are not in the region continue
        local_particles = indexes_region.shape[0]
        print 'Myrank = %d ---> num = %d ---> part = %ld'%(myrank,i,local_particles)
        if local_particles==0:  continue

        # find radii, HI and gas masses
        MHI  = rs.read_block(snapshot, 'NH  ', parttype=0, verbose=False)#HI/H
        mass = rs.read_block(snapshot, 'MASS', parttype=0, verbose=False)*1e10
        SFR  = rs.read_block(snapshot, 'SFR ', parttype=0, verbose=False)
        indexes = np.where(SFR>0.0)[0];  del SFR

        # find the metallicity of star-forming particles
        metals = rs.read_block(snapshot, 'GZ  ', parttype=0, verbose=False)
        metals = metals[indexes]/0.0127

        # find densities of star-forming particles: units of h^2 Msun/Mpc^3
        rho    = rs.read_block(snapshot, 'RHO ', parttype=0, verbose=False)*1e19
        Volume = mass/rho                            #(Mpc/h)^3
        radii  = (Volume/(4.0*np.pi/3.0))**(1.0/3.0) #Mpc/h 
        rho    = rho[indexes]                        #h^2 Msun/Mpc^3
        Volume = Volume[indexes]                     #(Mpc/h)^3

        # find volume and radius of star-forming particles
        radii_SFR  = (Volume/(4.0*np.pi/3.0))**(1.0/3.0) #Mpc/h 
            
        # find HI/H fraction for star-forming particles
        MHI[indexes] = HIL.Rahmati_HI_Illustris(rho, radii_SFR, metals, redshift, 
                                                h, TREECOOL_file, Gamma=None,
                                                fac=1, correct_H2=True) #HI/H
        MHI *= (0.76*mass)
            

        # select the particles belonging to the region
        pos   = pos[indexes_region]
        MHI   = MHI[indexes_region]
        radii = radii[indexes_region]
        mass  = mass[indexes_region]

        # write partial files        
        new_size = particles + local_particles    

        if particles==0:
            f = h5py.File(fout[:-5]+'_%d.hdf5'%myrank, 'w')
            f.create_dataset('pos',   data=pos,   maxshape=(None,3))
            f.create_dataset('M_HI',  data=MHI,   maxshape=(None,))
            f.create_dataset('radii', data=radii, maxshape=(None,))
            f.create_dataset('mass',  data=mass,  maxshape=(None,))
        else:
            f = h5py.File(fout[:-5]+'_%d.hdf5'%myrank, 'a')
            pos_f   = f['pos'];    pos_f.resize((new_size,3))
            M_HI_f  = f['M_HI'];   M_HI_f.resize((new_size,))
            radii_f = f['radii'];  radii_f.resize((new_size,))
            mass_f  = f['mass'];   mass_f.resize((new_size,))
            pos_f[particles:]   = pos
            M_HI_f[particles:]  = MHI
            radii_f[particles:] = radii
            mass_f[particles:]  = mass
        f.close()
        particles += local_particles
                
    comm.Barrier()

    # sum the particles found in each cpu
    All_particles = 0 
    All_particles = comm.reduce(particles, op=MPI.SUM, root=0)

    # Master will merge partial files into a file one
    if myrank==0:

        print 'Found %d particles'%All_particles
        f = h5py.File(fout,'w')
        
        f1 = h5py.File(fout[:-5]+'_0.hdf5','r')
        pos   = f1['pos'][:]
        M_HI  = f1['M_HI'][:]
        radii = f1['radii'][:]
        mass  = f1['mass'][:]
        f1.close()

        particles = mass.shape[0]
        pos_f   = f.create_dataset('pos',   data=pos,   maxshape=(None,3))
        M_HI_f  = f.create_dataset('M_HI',  data=M_HI,  maxshape=(None,))
        radii_f = f.create_dataset('radii', data=radii, maxshape=(None,))
        mass_f  = f.create_dataset('mass',  data=mass,  maxshape=(None,))

        for i in xrange(1,nprocs):
            f1 = h5py.File(fout[:-5]+'_%d.hdf5'%i,'r')
            pos   = f1['pos'][:]
            M_HI  = f1['M_HI'][:]
            radii = f1['radii'][:]
            mass  = f1['mass'][:]
            f1.close()
            
            size = mass.shape[0]
            
            pos_f.resize((particles+size,3));  pos_f[particles:] = pos
            M_HI_f.resize((particles+size,));  M_HI_f[particles:] = M_HI
            radii_f.resize((particles+size,)); radii_f[particles:] = radii
            mass_f.resize((particles+size,));  mass_f[particles:] = mass

            particles += size

        f.close()

        for i in xrange(nprocs):
            os.system('rm '+fout[:-5]+'_%d.hdf5'%i)
コード例 #8
0
    #path_tng100_z2 = "/n/hernquistfs3/IllustrisTNG/Runs/L75n1820TNG/output/snapdir_033/snap_033"
    #path_tng100_z3 = "/n/hernquistfs3/IllustrisTNG/Runs/L75n1820TNG/output/snapdir_025/snap_025"
    #path_tng100_z4 = "/n/hernquistfs3/IllustrisTNG/Runs/L75n1820TNG/output/snapdir_021/snap_021"
    path_tng100_z5 = "/n/hernquistfs3/IllustrisTNG/Runs/L75n1820TNG/output/snapdir_017/snap_017"
    #path_tng100_z6 = "/n/hernquistfs3/IllustrisTNG/Runs/L75n1820TNG/output/snapdir_013/snap_013"
    #path_tng100_z8 = "/n/hernquistfs3/IllustrisTNG/Runs/L75n1820TNG/output/snapdir_008/snap_008"

    # Read header
    #header = rs.snapshot_header(path_tng100_z0+".1.hdf5")
    #header = rs.snapshot_header(path_tng100_z02+".1.hdf5")
    #header = rs.snapshot_header(path_tng100_z05+".1.hdf5")
    #header = rs.snapshot_header(path_tng100_z1+".1.hdf5")
    #header = rs.snapshot_header(path_tng100_z2+".1.hdf5")
    #header = rs.snapshot_header(path_tng100_z3+".1.hdf5")
    #header = rs.snapshot_header(path_tng100_z4+".1.hdf5")
    header = rs.snapshot_header(path_tng100_z5 + ".1.hdf5")
    #header = rs.snapshot_header(path_tng100_z6+".1.hdf5")
    #header = rs.snapshot_header(path_tng100_z8+".1.hdf5")
    if rank == 0:
        print "Time = ", header.time
        print "H0 = ", header.hubble
        print "Omega0 = ", header.omega0
        print "OmegaLambda = ", header.omegaL
        print "Box Size = ", header.boxsize
        print "Number of files = ", header.filenum

    # Interpolation grid parameters
    #nx = 256
    #ny = 256
    #nz = 256
    nx = 512
コード例 #9
0
def Illustris_region(snapshot_root,
                     snapnum,
                     TREECOOL_file,
                     x_min,
                     x_max,
                     y_min,
                     y_max,
                     z_min,
                     z_max,
                     padding,
                     fout,
                     redshift_space=False,
                     axis=0):

    # read snapshot and find number of subfiles
    snapshot = snapshot_root + 'snapdir_%03d/snap_%03d' % (snapnum, snapnum)
    header = rs.snapshot_header(snapshot)
    nall = header.nall
    redshift = header.redshift
    BoxSize = header.boxsize / 1e3  #Mpc/h
    filenum = header.filenum
    Omega_m = header.omega0
    Omega_L = header.omegaL
    h = header.hubble
    Hubble = 100.0 * np.sqrt(Omega_m *
                             (1.0 + redshift)**3 + Omega_L)  #km/s/(Mpc/h)

    if myrank == 0:
        print '\n'
        print 'BoxSize         = %.3f Mpc/h' % BoxSize
        print 'Number of files = %d' % filenum
        print 'Omega_m         = %.3f' % Omega_m
        print 'Omega_l         = %.3f' % Omega_L
        print 'redshift        = %.3f' % redshift

    # find the numbers each cpu will work on
    array = np.arange(0, filenum)
    numbers = np.where(array % nprocs == myrank)[0]

    # do a loop over the different realizations
    particles = 0
    for i in numbers:

        snapshot = snapshot_root + 'snapdir_%03d/snap_%03d.%d' % (snapnum,
                                                                  snapnum, i)
        pos = rs.read_block(snapshot, 'POS ', parttype=0, verbose=False) / 1e3
        pos = pos.astype(np.float32)

        # read velocities and displace particle positions
        if redshift_space:
            vel = rs.read_block(snapshot, 'VEL ', parttype=0,
                                verbose=False) / np.sqrt(1.0 + redshift)  #km/s
            RSL.pos_redshift_space(pos, vel, BoxSize, Hubble, redshift, axis)

        # check if particles are in the region
        indexes_region = np.where((pos[:,0]>=x_min-padding) & (pos[:,0]<=x_max+padding) &\
                                  (pos[:,1]>=y_min-padding) & (pos[:,1]<=y_max+padding) &\
                                  (pos[:,2]>=z_min-padding) & (pos[:,2]<=z_max+padding))[0]

        # if particles are not in the region continue
        local_particles = indexes_region.shape[0]
        print 'Myrank = %d ---> num = %d ---> part = %ld' % (myrank, i,
                                                             local_particles)
        if local_particles == 0: continue

        # find radii, HI and gas masses
        MHI = rs.read_block(snapshot, 'NH  ', parttype=0, verbose=False)  #HI/H
        mass = rs.read_block(snapshot, 'MASS', parttype=0,
                             verbose=False) * 1e10
        SFR = rs.read_block(snapshot, 'SFR ', parttype=0, verbose=False)
        indexes = np.where(SFR > 0.0)[0]
        del SFR

        # find the metallicity of star-forming particles
        metals = rs.read_block(snapshot, 'GZ  ', parttype=0, verbose=False)
        metals = metals[indexes] / 0.0127

        # find densities of star-forming particles: units of h^2 Msun/Mpc^3
        rho = rs.read_block(snapshot, 'RHO ', parttype=0, verbose=False) * 1e19
        Volume = mass / rho  #(Mpc/h)^3
        radii = (Volume / (4.0 * np.pi / 3.0))**(1.0 / 3.0)  #Mpc/h
        rho = rho[indexes]  #h^2 Msun/Mpc^3
        Volume = Volume[indexes]  #(Mpc/h)^3

        # find volume and radius of star-forming particles
        radii_SFR = (Volume / (4.0 * np.pi / 3.0))**(1.0 / 3.0)  #Mpc/h

        # find HI/H fraction for star-forming particles
        MHI[indexes] = HIL.Rahmati_HI_Illustris(rho,
                                                radii_SFR,
                                                metals,
                                                redshift,
                                                h,
                                                TREECOOL_file,
                                                Gamma=None,
                                                fac=1,
                                                correct_H2=True)  #HI/H
        MHI *= (0.76 * mass)

        # select the particles belonging to the region
        pos = pos[indexes_region]
        MHI = MHI[indexes_region]
        radii = radii[indexes_region]
        mass = mass[indexes_region]

        # write partial files
        new_size = particles + local_particles

        if particles == 0:
            f = h5py.File(fout[:-5] + '_%d.hdf5' % myrank, 'w')
            f.create_dataset('pos', data=pos, maxshape=(None, 3))
            f.create_dataset('M_HI', data=MHI, maxshape=(None, ))
            f.create_dataset('radii', data=radii, maxshape=(None, ))
            f.create_dataset('mass', data=mass, maxshape=(None, ))
        else:
            f = h5py.File(fout[:-5] + '_%d.hdf5' % myrank, 'a')
            pos_f = f['pos']
            pos_f.resize((new_size, 3))
            M_HI_f = f['M_HI']
            M_HI_f.resize((new_size, ))
            radii_f = f['radii']
            radii_f.resize((new_size, ))
            mass_f = f['mass']
            mass_f.resize((new_size, ))
            pos_f[particles:] = pos
            M_HI_f[particles:] = MHI
            radii_f[particles:] = radii
            mass_f[particles:] = mass
        f.close()
        particles += local_particles

    comm.Barrier()

    # sum the particles found in each cpu
    All_particles = 0
    All_particles = comm.reduce(particles, op=MPI.SUM, root=0)

    # Master will merge partial files into a file one
    if myrank == 0:

        print 'Found %d particles' % All_particles
        f = h5py.File(fout, 'w')

        f1 = h5py.File(fout[:-5] + '_0.hdf5', 'r')
        pos = f1['pos'][:]
        M_HI = f1['M_HI'][:]
        radii = f1['radii'][:]
        mass = f1['mass'][:]
        f1.close()

        particles = mass.shape[0]
        pos_f = f.create_dataset('pos', data=pos, maxshape=(None, 3))
        M_HI_f = f.create_dataset('M_HI', data=M_HI, maxshape=(None, ))
        radii_f = f.create_dataset('radii', data=radii, maxshape=(None, ))
        mass_f = f.create_dataset('mass', data=mass, maxshape=(None, ))

        for i in xrange(1, nprocs):
            f1 = h5py.File(fout[:-5] + '_%d.hdf5' % i, 'r')
            pos = f1['pos'][:]
            M_HI = f1['M_HI'][:]
            radii = f1['radii'][:]
            mass = f1['mass'][:]
            f1.close()

            size = mass.shape[0]

            pos_f.resize((particles + size, 3))
            pos_f[particles:] = pos
            M_HI_f.resize((particles + size, ))
            M_HI_f[particles:] = M_HI
            radii_f.resize((particles + size, ))
            radii_f[particles:] = radii
            mass_f.resize((particles + size, ))
            mass_f[particles:] = mass

            particles += size

        f.close()

        for i in xrange(nprocs):
            os.system('rm ' + fout[:-5] + '_%d.hdf5' % i)
コード例 #10
0
		ax3 = fig.add_subplot(1,3,3)
		cax=fig.add_axes([0.92,0.03,0.02,0.80])


	ax_list = [ax1,ax2,ax3]


	for jj,base_label in enumerate(output_base_label_list):
		base = "output_restart_"+base_label+"/"
		print("Reading from output directory:",base)
		snap_base="snap_"

                #check the number of orbits at the zeroth snapshot
		if (jj == 0): #use first output directory as reference
			filename=directory+base+snap_base+str(snap).zfill(3)
			header = rs.snapshot_header(filename)
			BoxX,BoxY = header.boxsize,header.boxsize
			time0 = header.time
			norbits=int(time0/(2*np.pi))
			ecc_anom = orbital.KeplerEquation(time0 + np.pi,eb)
			posx1, posy1 = -qb/(1+qb)*(np.cos(ecc_anom) - eb),-qb/(1+qb)*(np.sqrt(1 - eb * eb) * np.sin(ecc_anom))
			lims = -0.7+posx1,0.7+posx1,-0.7+posy1,0.7+posy1
				

		time = 1.0e30
		for snapfile in sorted(glob.glob(directory+base+snap_base+"*")):
			if (np.abs(time0-rs.snapshot_header(snapfile).time) < np.abs(time0-time)):
				time = rs.snapshot_header(snapfile).time
				num = int(split(split(snapfile,snap_base)[1],".hdf5")[0])
	
		filename=directory+base+snap_base+str(num).zfill(3)
コード例 #11
0
args = parser.parse_args()

print '######################################################'
print 'Saving Stellar Mass!'
#dir='/n/ghernquist/Illustris/Runs/L75n1820DM/'
fbase = args.base
if (args.t.find('TNG') > 0):
    print args.t
    fbase = "/n/hernquistfs3/IllustrisTNG/Runs/"
fdir = fbase + args.t + '/output/'
assert os.path.isdir(fdir), fdir + " does not exist!"
snap = args.snap
snapstr = str(snap).zfill(3)
fout = '{}_StellarMass_{:03d}.hdf5'.format(args.t, snap)

header = readsnapHDF5.snapshot_header(
    '{0}/snapdir_{1:03d}/snap_{1:03d}'.format(fdir, snap))
hubble = header.hubble

box = header.boxsize

print 'Directory: ', fdir
print 'Snapshot: ', snap
print 'Redshift: ', header.redshift
print 'Hubble: ', header.hubble
print 'Massarr ', header.massarr
print 'Halo mass range for calculation: 10^[{},{}] M_sun'.format(
    args.minmass, args.maxmass)

#1: Create and write useful information
cat = readsubfHDF5.subfind_catalog(fdir,
                                   snap,
コード例 #12
0
ファイル: RayTracing.py プロジェクト: djmunoz/diegopy
def ray_tracing(filename,nframe,target,cam_pos,cam_orient,frame,near,far,xbins,ybins,Box,basename):
	cam_pos = np.array(cam_pos)
	cam_orient = np.array(cam_orient)	
	target = np.array(target)

	print cam_pos,cam_orient,target
	
    	head=rs.snapshot_header(filename)
	pos=rs.read_block(filename, "POS ", parttype=0)
	mass=rs.read_block(filename, "MASS", parttype=0)
        hsmlfac=2.75*1.1
        hsml=hsmlfac*(3.0/4.0/np.pi*rs.read_block(filename, "VOL ", parttype=0))**(1.0/3.0)
	
	b,l,t,r = frame[0], frame[1], frame[2], frame[3]
	
        #translate box so that center is at Box/2.0
	centerx,centery,centerz = Box/2.0,Box/2.0,Box/2.0
        pos[:,0]=Box/2.0 + (pos[:,0]-centerx)
        pos[:,1]=Box/2.0 + (pos[:,1]-centery)
        pos[:,2]=Box/2.0 + (pos[:,2]-centerz)

        for k in range(0,3):
            ind=pos[:,k]>Box
            pos[ind,k]=pos[ind,k]-Box
            ind=pos[:,k]<0
            pos[ind,k]=pos[ind,k]+Box
            print "min/max coord: ",k,pos[:,k].min(), pos[:,k].max()

        #save original values
        x_orig=pos[:,0]
        y_orig=pos[:,1]
        z_orig=pos[:,2]
        hsml_orig=hsml
        mass_orig=mass
        
	#construct homog. transformation matrix
	PS=np.matrix([[(2*near)/(r-l),0,(r+l)/(r-l),0],[0,(2*near)/(t-b),(t+b)/(t-b),0],[0,0,-(far+near)/(far-near),-(2*far*near)/(far-near)],[0,0,-1,0]])
	nvec=-(cam_pos-target)*(-1.0)/np.sqrt((cam_pos[0]-target[0])**2.0 + (cam_pos[1]-target[1])**2.0 + (cam_pos[2]-target[2])**2.0)  #-1 
	temp=np.cross(cam_orient,nvec)
	rvec=temp/np.sqrt(temp[0]**2.0 + temp[1]**2.0 + temp[2]**2.0)
	uvec=np.cross(nvec, rvec) 
	R=np.matrix([[rvec[0],rvec[1],rvec[2],0],[uvec[0],uvec[1],uvec[2],0],[nvec[0],nvec[1],nvec[2],0],[0,0,0,1]])
	T=np.matrix([[1,0,0,-cam_pos[0]],[0,1,0,-cam_pos[1]],[0,0,1,-cam_pos[2]],[0,0,0,1]])

	PSRT=PS*R*T

	#PSRT tranformation: world coordinates -> camera coordinates
	x=PSRT[0,0]*x_orig + PSRT[0,1]*y_orig + PSRT[0,2]*z_orig + PSRT[0,3]*1
	y=PSRT[1,0]*x_orig + PSRT[1,1]*y_orig + PSRT[1,2]*z_orig + PSRT[1,3]*1
	z=PSRT[2,0]*x_orig + PSRT[2,1]*y_orig + PSRT[2,2]*z_orig + PSRT[2,3]*1
	w=PSRT[3,0]*x_orig + PSRT[3,1]*y_orig + PSRT[3,2]*z_orig + PSRT[3,3]*1

        hsml_x=PS[0,0]*hsml_orig + PS[0,1]*hsml_orig + PS[0,2]*hsml_orig + PS[0,3]*1
        hsml_y=PS[1,0]*hsml_orig + PS[1,1]*hsml_orig + PS[1,2]*hsml_orig + PS[1,3]*1
	
	#homog. scaling
	x/=w
	y/=w
	z/=w
	mass=mass_orig
	s=np.abs(w)
	hsml_x/=s
	hsml_y/=s
	hsml_o=hsml_orig

	#clipping in frustum (clip a bit larger for particle contributions outside of frustum)
	index=(np.abs(x) < 1.01)  & (np.abs(y) < 1.01) & (np.abs(z) < 1.01)
	x=x[index]
	y=y[index]
	z=z[index]
	hsml_x=hsml_x[index]
	hsml_y=hsml_y[index]
	hsml_o=hsml_o[index]
	mass=mass[index]

	print "Number of particles in frustum: ", mass.shape[0]
	
	#sort descending according to pseudo-depth
	index=np.argsort(z)[::-1]
	x=x[index]
	y=y[index]
	z=z[index]
	hsml_x=hsml_x[index]
	hsml_y=hsml_y[index]
        hsml_o=hsml_o[index]
	mass=mass[index]

	#avoid single pixel flickering
        pixfac=0.5
        hsml_x[hsml_x<pixfac*2.0/xbins]=0
        hsml_y[hsml_y<pixfac*2.0/ybins]=0

	#now ray-trace
	print "start render..."

        image=rc.Render(x, y, np.repeat(Ap, x.shape[0]).astype("float32"), hsml_x, hsml_y, hsml_o/(hsmlfac*(3.0/4.0/np.pi)**(1.0/3.0)), mass, xbins, ybins, hsmlfac)
	print "done."

	#save file
	fd=open(basename+str(nframe).zfill(4)+".dat", "wb")
	image.astype("float64").tofile(fd)
	fd.close()
	print image.shape	
	#clean image
	image=image*0.0	
コード例 #13
0
#===============================================================================================

resnap_name = "base400"
base = directory(resnap_name)[0]
snapnum = 311 #directory(resnap_name)[1]

print resnap_name

snapname = base + "/snapdir_"+str(snapnum).zfill(3)+ "/snap_"+str(snapnum).zfill(3)
#snapname = base + "/snapdir_"+str(snapnum).zfill(3)+"_SAVE"+ "/snap_"+str(snapnum).zfill(3)

print "snapname ", snapname
#===============================================================================================

redshift = rs.snapshot_header(snapname).redshift
scale_factor = 1./(1.+redshift)
print "z: ",redshift
print "a: ",scale_factor

#cat = readsubf.subfind_catalog(base,snapnum,masstab=True)
cat = readsubfHDF5.subfind_catalog(base,snapnum)

nsubs = cat.nsubs
print str(nsubs) + " subhalos!\n"

parttype_list = [0,1,4]

all_ids = np.array([],dtype="uint32")
types = np.array([],dtype="uint32")
mass = np.array([],dtype="float64")
コード例 #14
0
def calculate_halo(sub_id): #submit job to queue

	# I assume that the subhalo is the primary subhalo in its group
	sub_list = cat.GroupFirstSub
	grp_id = np.argmin( np.abs(sub_id-sub_list) )
	#grp_id = cat.SubhaloParent[sub_id]

	sub_partIDs = get_subhalo_ids(base,snap_num,sub_id)
	sub_pos = cat.SubhaloPos[sub_id]
	sub_mass = cat.SubhaloMass[sub_id]
	sub_vel = cat.SubhaloVel[sub_id]
	sub_Rvir = cat.Group_R_Crit200[grp_id]




	frame_num = 0
	for current_snap in snap_nums:
		# Find the halo in this snapshot that corresponds to the halo in our original snapshot:
		print "on snapshot number ",current_snap


		old_match_flag = new_match_flag
		old_subid = new_subid
		old_partIDs = new_partIDs
		old_subpos = new_subpos
		old_submass = new_submass
		old_subvel = new_subvel

		#new_snapname = base + "snapdir_"+str(current_snap).zfill(3)+"/snap_"+str(current_snap).zfill(3)
		new_snapname = base + "/snap_"+str(current_snap).zfill(3)
		new_cat = readsubfHDF5.subfind_catalog(base, current_snap)

		redshift = readsnapHDF5.snapshot_header(new_snapname).redshift
		scale_factor = 1./(1.+redshift)

		#readsnapHDF5.list_blocks(new_snapname+".hdf5")

		# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
		# MC tracer information:
		MC_ids = readsnapHDF5.read_block(new_snapname,"TFID",parttype=3) # May want to fix this for very large # of tracers (read in 1 snapshot at a time)
		MC_ind = np.in1d(MC_ids,gal_MC_ids,assume_unique=True) #This step takes a long time!
		MC_ids = 0.
		print "MC_ind.sum() ",MC_ind.sum()
		# now match the tracers with their parent particles:
		parent_ids = readsnapHDF5.read_block(new_snapname,"TRPI",parttype=3)[MC_ind]
		#parent_ids = np.unique(parent_ids)

		parent_ids.sort()
		(unique_parent_ids,unique_arg) = np.unique(parent_ids,return_index=True)
		foo = parent_ids.size-unique_arg[-1] # the number of reps for the very last unique entry in parent_ids

		rep_count = np.append(np.diff(unique_arg),foo)

		gas_ids = readsnapHDF5.read_block(new_snapname,"ID  ",parttype=0)
		gas_ind = np.in1d(gas_ids,unique_parent_ids,assume_unique=True)
		gas_ids = 0.
		gas_pos = readsnapHDF5.read_block(new_snapname,"POS ",parttype=0)
		pos1 = gas_pos[gas_ind]
		gas_pos = 0.
		gas_vel = readsnapHDF5.read_block(new_snapname,"VEL ",parttype=0)
		vel1 = gas_vel[gas_ind]
		gas_vel = 0.

		# now find which ones are stars (and get their properties):
		star_ids = readsnapHDF5.read_block(new_snapname,"ID  ",parttype=4)
		star_ind = np.in1d(star_ids,unique_parent_ids,assume_unique=True)
		star_ids = 0.
		star_pos = readsnapHDF5.read_block(new_snapname,"POS ",parttype=4)
		pos2 = star_pos[star_ind]
		star_pos = 0.
		star_vel = readsnapHDF5.read_block(new_snapname,"VEL ",parttype=4)
		vel2 = star_vel[star_ind]
		star_vel = 0.

		pos = np.concatenate((pos1,pos2))
		pos1 = 0.
		pos2 = 0.
		vel = np.concatenate((vel1,vel2))
		vel1 = 0.
		vel2 = 0.

		gal_MC_pos = np.repeat(pos,rep_count,axis=0)
		gal_MC_vel = np.repeat(vel,rep_count,axis=0)

		print "len(gal_MC_pos) ",len(gal_MC_pos)
		print "len(gal_MC_vel) ",len(gal_MC_vel)
		# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~


		# Generate image:
		#generate_tracer_image()
		print "not generating image "
	                        
		# Generate savefile: # save: snap_num, # of vts, gal_vt_ids at z=0, sub_id at that snap, 
		# subhalo_pos at that snap, Rvir at that time, 
		filename = save_dir + str(current_snap).zfill(3)+".dat"
		f = open(filename,'wb')

		primary_halo_flag = np.uint32(primary_halo_flag)
		gal_MC_posx = gal_MC_pos[:,0]
		gal_MC_posy = gal_MC_pos[:,1]
		gal_MC_posz = gal_MC_pos[:,2]
		gal_MC_velx = gal_MC_vel[:,0]
		gal_MC_vely = gal_MC_vel[:,1]
		gal_MC_velz = gal_MC_vel[:,2]

		print "len(gal_MC_posx) ",len(gal_MC_posx)
		print "len(gal_MC_velz) ",len(gal_MC_velz)


		# Saving stuff to file:
		# general stuff
		current_snap.astype("uint32").tofile(f)
		redshift.astype("float64").tofile(f)
		# MC tracer data
		n_MC.astype("uint32").tofile(f)
		gal_MC_ids.astype("uint64").tofile(f)
		gal_MC_posx.astype("float64").tofile(f)
		gal_MC_posy.astype("float64").tofile(f)
		gal_MC_posz.astype("float64").tofile(f)
		gal_MC_velx.astype("float64").tofile(f)
		gal_MC_vely.astype("float64").tofile(f)
		gal_MC_velz.astype("float64").tofile(f)
		# halo data
		primary_halo_flag.astype("uint32").tofile(f)
		new_subid.astype("uint32").tofile(f)
		new_subpos.astype("float64").tofile(f)
		new_subvel.astype("float64").tofile(f)
		new_subRvir.astype("float64").tofile(f)
		if False:
			if primary_halo_flag == 1: pass
			elif primary_halo_flag == 0: 
				primary_halo_pos.astype("float64").tofile(f)
				primary_halo_vel.astype("float64").tofile(f)		
		
		f.close()

		frame_num = frame_num + 1
		time4 = time.time()

	print "done!"
	return
コード例 #15
0
fout = 'sigma_HI_75_1820.txt'

snaps = np.array([17, 21, 25, 33, 50, 99])
###############################################################################


snapshot_root = '%s/output/'%run

# do a loop over the different realizations
for num in snaps:

    # find snapshot name and offset file name
    snapshot = snapshot_root + 'snapdir_%03d/snap_%03d'%(num,num)

    # read header
    header   = rs.snapshot_header(snapshot)
    nall     = header.nall
    redshift = header.redshift
    BoxSize  = header.boxsize/1e3 #Mpc/h
    filenum  = header.filenum
    Omega_m  = header.omega0
    Omega_L  = header.omegaL
    h        = header.hubble

    print '\n'
    print 'BoxSize         = %.3f Mpc/h'%BoxSize
    print 'Number of files = %d'%filenum
    print 'Omega_m         = %.3f'%Omega_m
    print 'Omega_l         = %.3f'%Omega_L
    print 'redshift        = %.3f'%redshift
コード例 #16
0
def Illustris_halo(snapshot_root,
                   snapnum,
                   halo_number,
                   TREECOOL_file,
                   fout,
                   ptype=0):

    # find snapshot name and read header
    snapshot = snapshot_root + 'snapdir_%03d/snap_%03d' % (snapnum, snapnum)
    header = rs.snapshot_header(snapshot)
    redshift = header.redshift
    BoxSize = header.boxsize / 1e3  #Mpc/h
    filenum = header.filenum
    Omega_m = header.omega0
    Omega_L = header.omegaL
    h = header.hubble
    massarr = header.massarr * 1e10  #Msun/h

    print '\nBoxSize         = %.1f Mpc/h' % BoxSize
    print 'Number of files = %d' % filenum
    print 'Omega_m         = %.3f' % Omega_m
    print 'Omega_l         = %.3f' % Omega_L
    print 'redshift        = %.3f' % redshift

    # read number of particles in halos and subhalos and number of subhalos
    halos = groupcat.loadHalos(
        snapshot_root,
        snapnum,
        fields=['GroupLenType', 'GroupPos', 'GroupMass'])
    halo_len = halos['GroupLenType'][:, ptype]
    halo_pos = halos['GroupPos'] / 1e3
    halo_mass = halos['GroupMass'] * 1e10
    del halos

    # find where the halo starts and ends in the file
    begin = np.sum(halo_len[:halo_number], dtype=np.int64)
    end = begin + halo_len[halo_number]
    print begin, end

    # do a loop over all snapshot subfiles
    f = h5py.File(fout, 'w')
    pos_f = f.create_dataset('pos', (0, 3), maxshape=(None, 3))
    vel_f = f.create_dataset('vel', (0, 3), maxshape=(None, 3))
    if ptype == 0:
        mass_f = f.create_dataset('mass', (0, ), maxshape=(None, ))
        MHI_f = f.create_dataset('M_HI', (0, ), maxshape=(None, ))
        radii_f = f.create_dataset('radii', (0, ), maxshape=(None, ))
    if ptype == 1:
        radii_f = f.create_dataset('radii', (0, ), maxshape=(None, ))
        mass_f = f.create_dataset('mass_c', (0, ), maxshape=(None, ))

    begin_subfile, particles = 0, 0
    for i in xrange(filenum):

        # find subfile name and read the number of particles in it
        snapshot = snapshot_root + 'snapdir_%03d/snap_%03d.%d' % (snapnum,
                                                                  snapnum, i)
        header = rs.snapshot_header(snapshot)
        npart = header.npart

        end_subfile = begin_subfile + npart[ptype]

        # if all particles in the halo has been read exit loop
        if end < begin_subfile: break

        # if the subfile does not contain any particle move to next subfile
        if begin > end_subfile:
            begin_subfile = end_subfile
            continue

        print 'Working with subfile %03d' % i
        pos = rs.read_block(snapshot, 'POS ', parttype=ptype,
                            verbose=False) / 1e3
        pos = pos.astype(np.float32)
        vel = rs.read_block(snapshot, 'VEL ', parttype=ptype,
                            verbose=False) / np.sqrt(1.0 + redshift)  #km/s

        if ptype == 0:
            MHI = rs.read_block(snapshot, 'NH  ', parttype=0,
                                verbose=False)  #HI/H
            mass = rs.read_block(snapshot, 'MASS', parttype=0,
                                 verbose=False) * 1e10
            SFR = rs.read_block(snapshot, 'SFR ', parttype=0, verbose=False)
            indexes = np.where(SFR > 0.0)[0]
            del SFR

            # find the metallicity of star-forming particles
            metals = rs.read_block(snapshot, 'GZ  ', parttype=0, verbose=False)
            metals = metals[indexes] / 0.0127

            # find densities of star-forming particles: units of h^2 Msun/Mpc^3
            rho = rs.read_block(snapshot, 'RHO ', parttype=0,
                                verbose=False) * 1e19
            Volume = mass / rho  #(Mpc/h)^3
            radii = (Volume / (4.0 * np.pi / 3.0))**(1.0 / 3.0)  #Mpc/h

            # find density and radius of star-forming particles
            radii_SFR = radii[indexes]
            rho = rho[indexes]

            # find HI/H fraction for star-forming particles
            MHI[indexes] = HIL.Rahmati_HI_Illustris(rho,
                                                    radii_SFR,
                                                    metals,
                                                    redshift,
                                                    h,
                                                    TREECOOL_file,
                                                    Gamma=None,
                                                    fac=1,
                                                    correct_H2=True)  #HI/H
            MHI *= (0.76 * mass)

        if ptype == 1:
            radii = rs.read_block(snapshot, 'SFHS', parttype=1,
                                  verbose=False) / 1e3  #Mpc/h
            mass = np.ones(len(radii)) * massarr[1]

        # find the indexes of current subfile that contribute to halo
        begin_array = begin - begin_subfile
        end_array = begin_array + (end - begin)

        if end > end_subfile:
            end_array = end_subfile - begin_subfile
            begin = end_subfile

        new_size = particles + (end_array - begin_array)

        pos_f.resize((new_size, 3))
        pos_f[particles:] = pos[begin_array:end_array]
        vel_f.resize((new_size, 3))
        vel_f[particles:] = vel[begin_array:end_array]

        if ptype == 0:
            mass_f.resize((new_size, ))
            mass_f[particles:] = mass[begin_array:end_array]

            MHI_f.resize((new_size, ))
            MHI_f[particles:] = MHI[begin_array:end_array]

            radii_f.resize((new_size, ))
            radii_f[particles:] = radii[begin_array:end_array]

        if ptype == 1:
            radii_f.resize((new_size, ))
            radii_f[particles:] = radii[begin_array:end_array]

            mass_f.resize((new_size, ))
            mass_f[particles:] = mass[begin_array:end_array]

        particles = new_size
        begin_subfile = end_subfile

    f.close()
    print 'Halo mass = %.3e' % halo_mass[halo_number]
    print 'Halo pos  =', halo_pos[halo_number]
    print 'Number of particles in the halo = %ld' % particles
コード例 #17
0
    def read(self, block_name, parttype, fof_num, sub_num):
        if sub_num < 0 and fof_num < 0:
            # Load all of the non-FoF particles.
            off = (self.group_offset[-1, parttype] +
                   self.cat.GroupLenType[-1, parttype])
            left = 1e9  # reads the rest.
            print(off, left)

        if sub_num >= 0 and fof_num < 0:
            off = self.halo_offset[sub_num, parttype]
            left = self.cat.SubhaloLenType[sub_num, parttype]

        if fof_num >= 0 and sub_num < 0:
            off = self.group_offset[fof_num, parttype]
            left = self.cat.GroupLenType[fof_num, parttype]

        if sub_num >= 0 and fof_num >= 0:
            real_sub_num = sub_num + self.cat.GroupFirstSub[fof_num]
            off = self.halo_offset[real_sub_num, parttype]
            left = self.cat.SubhaloLenType[real_sub_num, parttype]

        if left == 0:
            if self.verbose:
                print("READHALO: no particles of type...returning")
            return

        # Get first file that contains particles of required halo/fof/etc.
        findex = np.argmax(self.file_type_numbers[:, parttype] > off) - 1
        # np.argmax returns 0 when the offset corresponds to a particle
        # in the last file.
        if findex == -1:
            findex = self.file_num - 1

        # Convert the overall offset to an offset for just the file given by
        # findex by subtracting off the number of particles in previous files.
        for fnr in range(0, findex):
            off -= (self.file_type_numbers[fnr + 1, parttype] -
                    self.file_type_numbers[fnr, parttype])

        # Read data from file(s).
        first = True
        for fnr in range(findex, self.file_num):
            path = self.filenames[fnr]

            head = readsnapHDF5.snapshot_header(path)
            nloc = head.npart[parttype]

            if nloc > off:
                if self.verbose:
                    print("READHALO: data found in %s" % path)
                start = off
                if nloc - off > left:
                    # All remaining particles are in this file.
                    count = left
                else:
                    # Read to end of file.
                    count = nloc - off

                block = readsnapHDF5.read_block(path,
                                                block_name,
                                                parttype,
                                                slab_start=start,
                                                slab_len=count)
                if first:
                    data = block
                    first = False
                else:
                    data = np.append(data, block, axis=0)

                left -= count
                off += count
            if left == 0:
                break
            off -= nloc

        gc.collect()

        return data
コード例 #18
0
#===================================================================

# Compare snap_nums with the dumb_merger_tree file list:


# Convert the file list into an array of snapshots:
# (Remove any file that is not a data file for a specific snapshot)


post_setup = time.time()
#===================================================================
#===================================================================
# Get the tracer IDs!
snapname = base + "/snap_"+str(latest_snap).zfill(3)
redshift = readsnapHDF5.snapshot_header(snapname).redshift
scale_factor = 1./(1.+redshift)
gal_radfac =  0.2#scale_factor * 2.5

# Read in from snapshot once:
pre_read = time.time()
# GAS #
gas_pos = readsnapHDF5.read_block(snapname,"POS ",parttype=0)
gas_ids = readsnapHDF5.read_block(snapname,"ID  ",parttype=0)
# STARS #
star_pos = readsnapHDF5.read_block(snapname,"POS ",parttype=4)
star_ids = readsnapHDF5.read_block(snapname,"ID  ",parttype=4)
# TRACERS #
parent_ids = readsnapHDF5.read_block(snapname,"TRPI",parttype=3)
tracer_ids = readsnapHDF5.read_block(snapname,"TFID",parttype=3)
post_read = time.time()
コード例 #19
0
    eb = float(sys.argv[2])
    h = 0.1
    alpha = 0.1

    print eb

    if (len(sys.argv) < 4): init_snap = 0
    else: init_snap = int(sys.argv[3])
    if (len(sys.argv) < 5): final_snap = 1001
    else: final_snap = int(sys.argv[4])
    if (len(sys.argv) < 6): snap_step = 1
    else: snap_step = int(sys.argv[5])

    snap_list = np.arange(init_snap, final_snap + 1, snap_step)

    # paths to files
    run_path = "/data2/djmunoz/CIRCUMBINARY_DISKS_2D/RESTART_PLUTO_RUNS/"

    run_base = "restart-3000-pluto-woboundary-standardres-binary"

    run_name = run_base + ("_q%.1f_e%.1f_h%.1f_alpha%.1f/" %
                           (qb, eb, h, alpha))

    orbit_range = []
    for snap in [init_snap, final_snap]:
        filename = directory + base + snap_base + str(snap).zfill(3)
        header = rs.snapshot_header(filename)
        time = header.time + time_offset
        orbit_range.append(int(time / (2 * np.pi)))
        acc = rs.read_block
コード例 #20
0
halo_id = SL.indexes_3D_cube(halo_pos, BoxSize, cell_size)

# define the array containing the HI mass in each spherical shell
HI_mass_shell = np.zeros((halos, bins), dtype=np.float64)
part_in_halo  = np.zeros(halos,         dtype=np.int64)

# find the numbers each cpu will work on
array   = np.arange(filenum)
numbers = np.where(array%nprocs==myrank)[0]

# do a loop over each subsnapshot
for i in numbers:

    # find subfile name and read the number of particles in it
    snapshot = snapshot_root + 'snapdir_%03d/snap_%03d.%d'%(snapnum, snapnum, i)
    header = rs.snapshot_header(snapshot)
    npart  = header.npart 

    pos  = rs.read_block(snapshot, 'POS ', parttype=0, verbose=False)/1e3
    pos  = pos.astype(np.float32)
    MHI  = rs.read_block(snapshot, 'NH  ', parttype=0, verbose=False)#HI/H
    mass = rs.read_block(snapshot, 'MASS', parttype=0, verbose=False)*1e10
    SFR  = rs.read_block(snapshot, 'SFR ', parttype=0, verbose=False)
    indexes = np.where(SFR>0.0)[0];  del SFR

    # find the metallicity of star-forming particles
    metals = rs.read_block(snapshot, 'GZ  ', parttype=0, verbose=False)
    metals = metals[indexes]/0.0127

    # find densities of star-forming particles: units of h^2 Msun/Mpc^3
    rho = rs.read_block(snapshot, 'RHO ', parttype=0, verbose=False)*1e19
コード例 #21
0
def Illustris_halo(snapshot_root, snapnum, halo_number, TREECOOL_file, fout,
                   ptype=0):

    # find snapshot name and read header
    snapshot = snapshot_root + 'snapdir_%03d/snap_%03d'%(snapnum, snapnum)
    header   = rs.snapshot_header(snapshot)
    redshift = header.redshift
    BoxSize  = header.boxsize/1e3 #Mpc/h
    filenum  = header.filenum
    Omega_m  = header.omega0
    Omega_L  = header.omegaL
    h        = header.hubble
    massarr  = header.massarr*1e10 #Msun/h

    print '\nBoxSize         = %.1f Mpc/h'%BoxSize
    print 'Number of files = %d'%filenum
    print 'Omega_m         = %.3f'%Omega_m
    print 'Omega_l         = %.3f'%Omega_L
    print 'redshift        = %.3f'%redshift

    # read number of particles in halos and subhalos and number of subhalos
    halos = groupcat.loadHalos(snapshot_root, snapnum, 
            fields=['GroupLenType','GroupPos','GroupMass'])
    halo_len  = halos['GroupLenType'][:,ptype]  
    halo_pos  = halos['GroupPos']/1e3
    halo_mass = halos['GroupMass']*1e10
    del halos


    # find where the halo starts and ends in the file
    begin = np.sum(halo_len[:halo_number], dtype=np.int64)
    end   = begin + halo_len[halo_number]
    print begin,end

    # do a loop over all snapshot subfiles
    f = h5py.File(fout,'w')
    pos_f   = f.create_dataset('pos',   (0,3),  maxshape=(None,3))
    vel_f   = f.create_dataset('vel',   (0,3),  maxshape=(None,3))
    if ptype==0:
        mass_f  = f.create_dataset('mass',  (0,),   maxshape=(None,))
        MHI_f   = f.create_dataset('M_HI',  (0,),   maxshape=(None,))
        radii_f = f.create_dataset('radii', (0,),   maxshape=(None,))
    if ptype==1:
        radii_f = f.create_dataset('radii',  (0,),   maxshape=(None,))
        mass_f  = f.create_dataset('mass_c', (0,),   maxshape=(None,))

    begin_subfile, particles = 0, 0
    for i in xrange(filenum):

        # find subfile name and read the number of particles in it
        snapshot = snapshot_root + 'snapdir_%03d/snap_%03d.%d'%(snapnum, snapnum, i)
        header = rs.snapshot_header(snapshot)
        npart  = header.npart 

        end_subfile = begin_subfile + npart[ptype]

        # if all particles in the halo has been read exit loop
        if end<begin_subfile:  break

        # if the subfile does not contain any particle move to next subfile
        if begin>end_subfile:
            begin_subfile = end_subfile;  continue


        print 'Working with subfile %03d'%i
        pos  = rs.read_block(snapshot, 'POS ', parttype=ptype, 
                             verbose=False)/1e3
        pos  = pos.astype(np.float32)
        vel  = rs.read_block(snapshot, 'VEL ', parttype=ptype, 
                             verbose=False)/np.sqrt(1.0+redshift) #km/s

        if ptype==0:
            MHI  = rs.read_block(snapshot, 'NH  ', parttype=0,
                                 verbose=False)#HI/H
            mass = rs.read_block(snapshot, 'MASS', parttype=0,
                                 verbose=False)*1e10
            SFR  = rs.read_block(snapshot, 'SFR ', parttype=0,
                                 verbose=False)
            indexes = np.where(SFR>0.0)[0];  del SFR

            # find the metallicity of star-forming particles
            metals = rs.read_block(snapshot, 'GZ  ', parttype=0, verbose=False)
            metals = metals[indexes]/0.0127

            # find densities of star-forming particles: units of h^2 Msun/Mpc^3
            rho = rs.read_block(snapshot, 'RHO ', parttype=0, 
                                verbose=False)*1e19
            Volume = mass/rho                            #(Mpc/h)^3
            radii  = (Volume/(4.0*np.pi/3.0))**(1.0/3.0) #Mpc/h 

            # find density and radius of star-forming particles
            radii_SFR = radii[indexes]    
            rho       = rho[indexes]

            # find HI/H fraction for star-forming particles
            MHI[indexes] = HIL.Rahmati_HI_Illustris(rho, radii_SFR, metals,
                                                    redshift, h, TREECOOL_file,
                                                    Gamma=None, fac=1,
                                                    correct_H2=True) #HI/H
            MHI *= (0.76*mass)
            
        if ptype==1:
            radii = rs.read_block(snapshot, 'SFHS', parttype=1,
                                  verbose=False)/1e3 #Mpc/h
            mass = np.ones(len(radii))*massarr[1]
            

        # find the indexes of current subfile that contribute to halo
        begin_array = begin - begin_subfile
        end_array   = begin_array + (end-begin)

        if end>end_subfile:
            end_array = end_subfile - begin_subfile
            begin     = end_subfile

        new_size = particles + (end_array - begin_array)

        pos_f.resize((new_size,3))
        pos_f[particles:] = pos[begin_array:end_array]
        vel_f.resize((new_size,3))
        vel_f[particles:] = vel[begin_array:end_array]

        if ptype==0:
            mass_f.resize((new_size,))
            mass_f[particles:] = mass[begin_array:end_array]

            MHI_f.resize((new_size,))
            MHI_f[particles:] = MHI[begin_array:end_array]

            radii_f.resize((new_size,))
            radii_f[particles:] = radii[begin_array:end_array]

        if ptype==1:
            radii_f.resize((new_size,))
            radii_f[particles:] = radii[begin_array:end_array]

            mass_f.resize((new_size,))
            mass_f[particles:] = mass[begin_array:end_array]

        particles = new_size
        begin_subfile = end_subfile


    f.close()
    print 'Halo mass = %.3e'%halo_mass[halo_number]
    print 'Halo pos  =',halo_pos[halo_number]
    print 'Number of particles in the halo = %ld'%particles