Пример #1
0
    def __init__(self,catdir,snapnum,nbins,rmin=1e-2,rmax=1.,useSubhaloID=False,\
            useFOF=False, useStellarhalfmassRad=None, useReduced=False, radinkpc=False,\
            NR=False, binwidth=0.1, debug=False, useSubhaloes=False, testconvergence=False,):

        assert type(nbins) is int and nbins >= 0, 'parameter nbins must be int'

        # Set main parameters
        if catdir.find('/output') < 0:
            self.snapdir = catdir + '/output/'
        else:
            self.snapdir = catdir
        self.snapnum = snapnum
        self.useSubhaloID = useSubhaloID
        self.useStellarhalfmassRad = False
        self.radinkpc = radinkpc
        print '\n\tAxialRatio: ', self.snapdir

        # Read snapshot header for boxsize
        snapstr = str(snapnum).zfill(3)
        self.header = readsnapHDF5.snapshot_header(self.snapdir + '/snapdir_' +
                                                   snapstr + '/snap_' +
                                                   snapstr)
        self.boxsize = self.header.boxsize
        print '\tAxialRatio: Boxsize =', self.boxsize

        self.parttypes = [1]
        if self.header.cooling == 1:
            self.parttypes.append(0)
        if self.header.sfr == 1:
            self.parttypes.append(4)
        print '\tAvailable parttypes =', self.parttypes

        # Set other parameters
        self.setparams(nbins,rmin,rmax,useFOF,useStellarhalfmassRad,useReduced,NR,\
                binwidth, debug, testconvergence)

        # Read SUBFIND catalogue
        keysel = [
            "Group_R_Crit200", "GroupFirstSub", "Group_M_Crit200", "GroupPos"
        ]
        if useSubhaloes is True:
            print 'adding keysel for subhaloes'
            keysel += ["GroupNsubs", "SubhaloPos", "SubhaloMass"]
        if useSubhaloID is True:
            print 'adding keysel for using SubhaloIDs'
            assert useStellarhalfmassRad is True
            assert useFOF is False
        if useStellarhalfmassRad is True:
            assert self.header.sfr == 1, 'Simulation does not have stars'
            print 'adding keysel for StellarhalfmassRad'
            keysel += [
                "SubhaloPos", "SubhaloMassInRadType", "SubhaloHalfmassRadType"
            ]
        self.cat = readsubfHDF5.subfind_catalog(self.snapdir,
                                                snapnum,
                                                keysel=keysel)
Пример #2
0
    def __init__(self,
                 base,
                 num,
                 long_ids=False,
                 snapbase='snap',
                 double_output=False,
                 verbose=False,
                 run=None):
        self.base = base
        self.snapbase = snapbase
        self.num = num
        self.num_pad = str(num).zfill(3)
        self.verbose = verbose
        self.part_types = [0, 1, 4, 5]
        keysel = [
            "ngroups", "nsubs", "GroupLenType", "GroupNsubs", "GroupFirstSub",
            "SubhaloLenType"
        ]
        self.cat = readsubfHDF5.subfind_catalog(base,
                                                num,
                                                long_ids=long_ids,
                                                double_output=double_output,
                                                keysel=keysel)
        if not hasattr(self.cat, "GroupLenType"):
            raise RuntimeError("Subfind catalog has no group or subhalo "
                               "information.")

        self.filenames = naming.get_snap_filenames(self.base, self.snapbase,
                                                   self.num)
        offsets = readsubfHDF5.get_offsets(self.cat, self.part_types, self.num,
                                           run)
        self.group_offset, self.halo_offset = offsets

        head = readsnapHDF5.snapshot_header(self.filenames[0])

        self.file_num = head.filenum
        assert (self.file_num == len(self.filenames))

        ntypes = 6
        self.file_type_numbers = np.zeros([self.file_num, ntypes],
                                          dtype="int64")
        cumcount = np.zeros(ntypes, dtype="int64")

        # Store in file_type_numbers[i, :] the cumulative number of particles
        # in all previous files.  Note we never need to open the last file.
        for i in range(0, self.file_num - 1):
            if self.verbose:
                print("READHALO: initial read of file: %s" % self.filenames[i])
            head = readsnapHDF5.snapshot_header(self.filenames[i])

            cumcount[:] += head.npart[:]
            self.file_type_numbers[i + 1, :] = cumcount[:]
Пример #3
0
    def __init__(self,catdir,snapnum=135,savepartids=False,\
            GetRotMat=True,RotateParticles=False,GetCoef=True,GetInit=True,GetInitStar=False):
        self.catdir=catdir
        self.snapnum=snapnum

        self.cat=readsubfHDF5.subfind_catalog(catdir,snapnum,keysel=keysel)
        #self.rvir=self.cat.Group_R_Crit200[groupid]/h*kpc
        self.savepartids=savepartids
        self.RotateParticles=RotateParticles
        self.GetRotMat=GetRotMat
        self.GetCoef=GetCoef
        self.GetInit=GetInit
        self.GetInitStar=GetInitStar

        print 'You have chosen the following:'
        if self.RotateParticles: print 'Rotate particles using ',self.RotateParticles
        if self.GetRotMat: print 'Save rotmat'
        if self.GetCoef: print 'Save Knlm coefs'
        if self.GetInit: print 'Save DM init file'
        if self.GetInitStar: print 'Save Stellar init file'

        self.fields={1:["Coordinates","Velocities"],4:["Coordinates","Velocities","Masses"],\
        0:["Coordinates","InternalEnergy","Masses","ElectronAbundance"]}

        if catdir.find('1820DM') >0:
            self.whichdm='1820DM'
            self.dmpartmass=0.00052946428432085776
        elif catdir.find('1820FP') >0:
            self.whichdm='1820FP'
            self.dmpartmass=0.00044089652436109581
        elif catdir.find('910DM') >0:
            self.whichdm='910DM'
            self.dmpartmass=0.0042357142745668621
        elif catdir.find('910FP') >0:
            self.whichdm='910FP'
            self.dmpartmass=0.0035271721948887664
        else:
            raise ValueError('Cant find DM or FP!')
        print self.catdir,self.snapnum
        print self.whichdm,self.dmpartmass

        if self.whichdm.find('DM')>0:
            if self.GetInitStar:
                self.GetInitStar=False
                print 'Warning: no stellar init for DMO runs!'

        #self.catdir=cat.filebase.split(self.whichdm)[0]+self.whichdm+'/output/'
        N=self.cat.filebase.find('/groups_')
        if int(self.cat.filebase[N+8:N+11]) != self.snapnum:
            raise ValueError('Mismatching specification of SnapNum with Subfind catalogue')
Пример #4
0
def find_vt_tracer_ids(snap_num,sub_id,base,scale_factor,gal_radfac):
	print "Finding velocity tracer IDs..."
	cat = readsubfHDF5.subfind_catalog(base, snap_num)
	
	# I assume that the subhalo is the primary subhalo in its group
	sub_list = cat.GroupFirstSub
	grp_id = np.argmin( np.abs(sub_id-sub_list) )

	sub_pos = cat.SubhaloPos[sub_id]
	sub_mass = cat.SubhaloMass[sub_id]
	sub_Rvir = cat.Group_R_Crit200[grp_id]

	gal_rad = sub_Rvir * gal_radfac #* 0.1

	#snapname = base + "snapdir_"+str(snap_num).zfill(3)+"/snap_"+str(snap_num).zfill(3)
	snapname = base + "snap_"+str(snap_num).zfill(3)

	#Find all velocity tracers within 0.1 Rvir of the subhalo
	vt_pos = readsnapHDF5.read_block(snapname,"POS ",parttype=2)
	vt_vel = readsnapHDF5.read_block(snapname,"VEL ",parttype=2)	
	vt_ids = readsnapHDF5.read_block(snapname,"ID  ",parttype=2)
	#readsnapHDF5.list_blocks(snapname+".hdf5")

	vt_x = np.logical_and(vt_pos[:,0] > sub_pos[0]-gal_rad, vt_pos[:,0] < sub_pos[0]+gal_rad)
	vt_y = np.logical_and(vt_pos[:,1] > sub_pos[1]-gal_rad, vt_pos[:,1] < sub_pos[1]+gal_rad)
	vt_z = np.logical_and(vt_pos[:,2] > sub_pos[2]-gal_rad, vt_pos[:,2] < sub_pos[2]+gal_rad)


	vt_ind = np.logical_and(np.logical_and(vt_x,vt_y),vt_z)
	gal_vt_pos = vt_pos[vt_ind] - sub_pos
	#gal_vt_vel = vt_vel[vt_ind]
	

	gal_vt_rad = np.sqrt(gal_vt_pos[:,0]**2 + gal_vt_pos[:,1]**2 + gal_vt_pos[:,2]**2)
	vt_ind2 = gal_vt_rad < gal_rad

	gal_vt_pos = gal_vt_pos[vt_ind2]
	#gal_vt_vel = gal_vt_vel[vt_ind2]
	gal_vt_ids = vt_ids[vt_ind][vt_ind2]
	
	print "Done finding velocity tracer IDs!"
	return gal_vt_ids
Пример #5
0
def get_subhalo_ids(base,snap_num,sub_id):
	cat = readsubfHDF5.subfind_catalog(base, snap_num)

	sub_list = cat.GroupFirstSub
	grp_id = np.argmin( np.abs(sub_id-sub_list) )

	#snapname = base + "snapdir_"+str(snap_num).zfill(3)+"/snap_"+str(snap_num).zfill(3)
	snapname = base + "/snap_"+str(snap_num).zfill(3)

	redshift = readsnapHDF5.snapshot_header(snapname).redshift
	scale_factor = 1./(1.+redshift)


	# Get full bound-particle ID list:
	all_bound_ids = get_full_bound_id_list(base,snap_num)

	# First need to construct the offset table:
	groupOffset = np.zeros(cat.ngroups, dtype="int64")
	haloOffset = np.zeros(cat.nsubs, dtype="int64")

	for i in range(1,cat.ngroups):
		groupOffset[i] = groupOffset[i-1] + cat.GroupLen[i-1]

	for GrNr in range(0,cat.ngroups): #GrNr means Group number
		nsubs = cat.GroupNsubs[GrNr]

		if nsubs > 0:
			SubNr = cat.GroupFirstSub[GrNr]
			haloOffset[SubNr] = groupOffset[GrNr]

			if nsubs > 1:
				subOffsets = np.cumsum(cat.SubhaloLen[SubNr:SubNr+nsubs-2])
				haloOffset[SubNr+1:SubNr+nsubs-1] = groupOffset[GrNr] + subOffsets


	sub_offset = haloOffset[sub_id]
	sub_len = cat.SubhaloLen[sub_id]

	sub_ids = all_bound_ids[haloOffset[sub_id]:haloOffset[sub_id]+cat.SubhaloLen[sub_id]]

	return sub_ids
Пример #6
0
def find_all_halos(num, base, min_mass):
    """Get a halo catalogue and return its members, filtering out those with masses below min_mass.
    Select halos via their M_200 mass, defined in terms of the critical density.
    Arguments:
        num - snapnumber
        base - simulation directory
        min_mass - minimum mass of halos to use
    Returns:
        ind - list of halo indices used
        sub_mass - halo masses in M_sun /h
        sub_cofm - halo positions
        sub_radii - R_Crit200 for halo radii"""
    try:
        subs=readsubf.subfind_catalog(base,num,masstab=True,long_ids=True)
        #Get list of halos resolved, using a mass cut; cuts off at about 2e9 for 512**3 particles.
        ind=np.where(subs.group_m_crit200 > min_mass)
        #Store the indices of the halos we are using
        #Get particle center of mass, use group catalogue.
        sub_cofm=np.array(subs.group_pos[ind])
        #halo masses in M_sun/h: use M_200
        sub_mass=np.array(subs.group_m_crit200[ind])*UnitMass_in_g/SolarMass_in_g
        #r200 in kpc.
        sub_radii = np.array(subs.group_r_crit200[ind])
        del subs
    except IOError:
        # We might have the halo catalog stored in the new format, which is HDF5.
        subs=readsubfHDF5.subfind_catalog(base, num,long_ids=True)
        #Get list of halos resolved, using a mass cut; cuts off at about 2e9 for 512**3 particles.
        ind=np.where(subs.Group_M_Crit200 > min_mass)
        #Store the indices of the halos we are using
        #Get particle center of mass, use group catalogue.
        sub_cofm=np.array(subs.GroupPos[ind])
        #halo masses in M_sun/h: use M_200
        sub_mass=np.array(subs.Group_M_Crit200[ind])*UnitMass_in_g/SolarMass_in_g
        #r200 in kpc/h (comoving).
        sub_radii = np.array(subs.Group_R_Crit200[ind])
        del subs

    return (ind, sub_mass,sub_cofm,sub_radii)
Пример #7
0
import snapshot
import readsubfHDF5
import numpy as np
import tables

which='DM'

dir='/n/hernquistfs1/Illustris/Runs/L75n1820'+which+'/output/'
snapnum=135
massrange=[11,15]
nbins=20

cat=readsubfHDF5.subfind_catalog(dir,135,\
        keysel=["Group_M_Crit200","Group_R_Crit200","GroupVel","GroupPos","GroupFirstSub"])

massg=cat.Group_M_Crit200/0.704*1e10
N=len(massg)
gallist=np.nonzero((massg>10**massrange[0]) & (massg < 10**massrange[1]))[0]

edges=10**np.linspace(np.log10(1e-2),np.log10(1),nbins+1)
y=(edges[1:]+edges[:-1])/2

with tables.open_file('1820'+which+'VelDisp.hdf5','w') as f:
    s2=f.create_carray('/','Veldisp2',tables.Float32Col(),(N,nbins))
    rs2=f.create_carray('/','RadialVeldisp2',tables.Float32Col(),(N,nbins))
    ts2=f.create_carray('/','TanVeldisp2',tables.Float32Col(),(N,nbins))
    B=f.create_carray('/','VelAniso',tables.Float32Col(),(N,nbins))
    r=f.create_carray('/','Radius',tables.Float32Col(),(nbins,))

    for gal in gallist:
        part=snapshot.loadSubhalo(dir,snapnum,cat.GroupFirstSub[gal],1,["Coordinates","Velocities"])
Пример #8
0
box = header.boxsize

print 'Directory: ', fdir
print 'Snapshot: ', snap
print 'Redshift: ', header.redshift
print 'Hubble: ', header.hubble
print 'Massarr ', header.massarr
print 'Halo mass range for calculation: 10^[{},{}] M_sun'.format(
    args.minmass, args.maxmass)

#1: Create and write useful information
cat = readsubfHDF5.subfind_catalog(fdir,
                                   snap,
                                   keysel=[
                                       "Group_R_Crit200", "GroupFirstSub",
                                       "Group_M_Crit200",
                                       "SubhaloMassInRadType",
                                       "SubhaloHalfmassRadType",
                                       "SubhaloMassInHalfRadType", "SubhaloPos"
                                   ])
first = cat.GroupFirstSub[:]
#mgal=cat.SubhaloMassInRadType[cat.GroupFirstSub[:]]
ngroups = cat.ngroups
groupmass = cat.Group_M_Crit200 * 1e10 / hubble
groups = np.nonzero((groupmass > 10**args.minmass)
                    & (groupmass < 10**args.maxmass))[0]

print 'To calculate {} groups'.format(len(groups))

with tables.open_file(fout, 'w') as f:
    f.create_carray("/", "Group_M_Crit200", tables.Float32Col(), (ngroups, ))
Пример #9
0
with open('../match' + res + '_' + vel + '_' + str(snapnum) + '.dat',
          'rb') as f:
    matched = pickle.load(f)
with open('../SIGOidx' + res + '_' + vel + '_' + str(snapnum) + '.dat',
          'rb') as f:
    SIGOidx = pickle.load(f)
with open(
        '../luminosity/GP_luminosity' + res + '_' + vel + '_' + str(snapnum) +
        '.dat', 'rb') as f:
    gp = pickle.load(f)

prefix = '../../../'
run = "14Mpc_118kms_Cooling_OldArepo"
snap = 5
name = 'clump'
cat_118kms = readsubfHDF5.subfind_catalog(
    "../../../14Mpc_118kms_Cooling_OldArepo/output/GasOnly_FOF", snap)

jobdir = 'paperplots/'  # 'Plots_projections/'


def h100toSIGOidx(idx):
    try:
        return np.where(SIGOidx == idx)[0][0]
    except:
        print "No SIGO found"
        raise KeyError


def plotProjection(kind, index, boxsize=1.):
    #index is which SIGO it is, ihalo refers to the numbering in halo100_indices
    ihalo = SIGOidx[index]
Пример #10
0
def readhalo(
    base, snapbase, num, block_name, parttype, fof_num, sub_num, long_ids=False, double_output=False, verbose=False
):
    global FlagRead, cat, GroupOffset, HaloOffset, multiple, filename, Parttype, FileTypeNumbers, FileNum

    if (FlagRead == False) | ((parttype in Parttype) == False):
        if verbose:
            print "READHALO: INITIAL READ"

            # add parttype to list
        Parttype.append(parttype)

        if verbose:
            print "READHALO: Parttype = ", Parttype

            # read in catalog
        cat = readsubfHDF5.subfind_catalog(
            base,
            num,
            long_ids=long_ids,
            double_output=double_output,
            keysel=["GroupLenType", "GroupNsubs", "GroupFirstSub", "SubhaloLenType", "SubhaloMassType"],
        )

        if cat.ngroups == 0:
            if verbose:
                print "READHALO: no groups in catalog... returning"
            return

        if FlagRead == False:
            GroupOffset = np.zeros([cat.ngroups, 6], dtype="int64")
            HaloOffset = np.zeros([cat.nsubs, 6], dtype="int64")

            filename = base + "/" + snapbase + "_" + str(num).zfill(3)
            multiple = False
            if os.path.exists(filename + ".hdf5") == False:
                filename = (
                    base + "/snapdir_" + str(num).zfill(3) + "/" + snapbase + "_" + str(num).zfill(3) + "." + str(0)
                )
                multiple = True
            if os.path.exists(filename + ".hdf5") == False:
                print "READHALO: [error] file not found : ", filename
                sys.exit()

            FlagRead = True

            # construct offset tables
        k = 0
        for i in range(0, cat.ngroups):
            if i > 0:
                GroupOffset[i, parttype] = GroupOffset[i - 1, parttype] + cat.GroupLenType[i - 1, parttype]
            if cat.GroupNsubs[i] > 0:
                HaloOffset[k, parttype] = GroupOffset[i, parttype]
                k += 1
                for j in range(1, cat.GroupNsubs[i]):
                    HaloOffset[k, parttype] = HaloOffset[k - 1, parttype] + cat.SubhaloLenType[k - 1, parttype]
                    k += 1
        if k != cat.nsubs:
            print "READHALO: problem with offset table", k, cat.nsubs
            sys.exit()

            # construct file tables
        if multiple:
            filename = base + "/snapdir_" + str(num).zfill(3) + "/" + snapbase + "_" + str(num).zfill(3) + "." + str(0)
        else:
            filename = base + "/" + snapbase + "_" + str(num).zfill(3)

        head = snapHDF5.snapshot_header(filename)
        FileNum = head.filenum

        FileTypeNumbers = np.zeros([FileNum, 6], dtype="int64")
        cumcount = np.zeros(6, dtype="int64")

        for fnr in range(0, FileNum - 1):
            if multiple:
                filename = (
                    base + "/snapdir_" + str(num).zfill(3) + "/" + snapbase + "_" + str(num).zfill(3) + "." + str(fnr)
                )
            else:
                filename = base + "/" + snapbase + "_" + str(num).zfill(3)

            if verbose:
                print "READHALO: initial reading file :", filename

            head = snapHDF5.snapshot_header(filename)

            cumcount[:] += head.npart[:]
            FileTypeNumbers[fnr + 1, :] = cumcount[:]

    if (sub_num >= 0) & (fof_num < 0):
        off = HaloOffset[sub_num, parttype]
        left = cat.SubhaloLenType[sub_num, parttype]
        if verbose:
            print "READHALO: nr / particle # / mass :", sub_num, cat.SubhaloLenType[
                sub_num, parttype
            ], cat.SubhaloMassType[sub_num, parttype].astype("float64")
    if (fof_num >= 0) & (sub_num < 0):
        off = GroupOffset[fof_num, parttype]
        left = cat.GroupLenType[fof_num, parttype]
        if verbose:
            print "READHALO: nr / particle # / mass :", fof_num, cat.GroupLenType[fof_num, parttype], cat.GroupMassType[
                fof_num, parttype
            ].astype("float64")
    if (sub_num >= 0) & (fof_num >= 0):
        real_sub_num = sub_num + cat.GroupFirstSub[fof_num]
        off = HaloOffset[real_sub_num, parttype]
        left = cat.SubhaloLenType[real_sub_num, parttype]
        if verbose:
            print "READHALO: nr / particle # / mass :", real_sub_num, cat.SubhaloLenType[
                real_sub_num, parttype
            ], cat.SubhaloMassType[real_sub_num, parttype].astype("float64")

    if left == 0:
        if verbose:
            print "READHALO: no particles of type... returning"
        return

        # get first file that contains particles of required halo/fof/etc
    findex = np.argmax(FileTypeNumbers[:, parttype] > off) - 1
    # in case we reached the end argmax returns 0
    if findex == -1:
        findex = FileNum - 1

    if verbose:
        print "READHALO: first file that contains particles =", findex

    for fnr in range(0, findex):
        off -= FileTypeNumbers[fnr + 1, parttype] - FileTypeNumbers[fnr, parttype]

        # read data from file
    first = True
    for fnr in range(findex, FileNum):
        if multiple:
            filename = (
                base + "/snapdir_" + str(num).zfill(3) + "/" + snapbase + "_" + str(num).zfill(3) + "." + str(fnr)
            )
        else:
            filename = base + "/" + snapbase + "_" + str(num).zfill(3)

        if verbose:
            print "READHALO: reading file :", filename

        head = snapHDF5.snapshot_header(filename)
        nloc = head.npart[parttype]

        if nloc > off:
            if verbose:
                print "READHALO: data"
            start = off
            if nloc - off > left:
                count = left
            else:
                count = nloc - off

            if first == True:
                data = snapHDF5.read_block(filename, block_name, parttype, slab_start=start, slab_len=count)
                first = False
            else:
                data = np.append(
                    data, snapHDF5.read_block(filename, block_name, parttype, slab_start=start, slab_len=count), axis=0
                )

            left -= count
            off += count
        if left == 0:
            break
        off -= nloc

    return data
Пример #11
0
    else:
        min_mass = littleh  # 1e10 Msun in 1/1e10 Msun / h
        max_mass = 100 * littleh  # 1e12 Msun
        search_query = "?mass_stars__gt=" + str(min_mass)

        cut1 = get(url_sbhalos + search_query)
        cut1['count']
        cut1 = get(url_sbhalos + search_query, {
            'limit': cut1['count'],
            'order_by': 'id'
        })

        if args.local:
            subs = np.array([sub['id'] for sub in cut1['results']], dtype='i')
            cat = readsubfHDF5.subfind_catalog(
                args.local, snapnum, keysel=['SubhaloPos', 'SubhaloMass'])
            sub_dat = np.hstack(
                (subs.reshape(subs.size,
                              1), cat.SubhaloMass[subs].reshape(subs.size, 1),
                 cat.SubhaloPos[subs]))
            del cat

        else:
            keys = ('id', 'mass', 'pos_x', 'pos_y', 'pos_z')
            sub_dat = np.array([ itemgetter(*keys)(get(sub['url'])) \
                                 for sub in cut1['results'] ])

        np.savetxt(
            folder + 'subhalo_mass_positions.csv',
            sub_dat,
            delimiter=',',
Пример #12
0
print resnap_name

snapname = base + "/snapdir_"+str(snapnum).zfill(3)+ "/snap_"+str(snapnum).zfill(3)
#snapname = base + "/snapdir_"+str(snapnum).zfill(3)+"_SAVE"+ "/snap_"+str(snapnum).zfill(3)

print "snapname ", snapname
#===============================================================================================

redshift = rs.snapshot_header(snapname).redshift
scale_factor = 1./(1.+redshift)
print "z: ",redshift
print "a: ",scale_factor

#cat = readsubf.subfind_catalog(base,snapnum,masstab=True)
cat = readsubfHDF5.subfind_catalog(base,snapnum)

nsubs = cat.nsubs
print str(nsubs) + " subhalos!\n"

parttype_list = [0,1,4]

all_ids = np.array([],dtype="uint32")
types = np.array([],dtype="uint32")
mass = np.array([],dtype="float64")
pos = np.array([],dtype="float64")
vel = np.array([],dtype="float64")
u = np.array([],dtype="float64")
T = np.array([],dtype="float64")
rho = np.array([],dtype="float64")
sfr = np.array([],dtype="float64")
    def __init__(self, res, vel, snapnum):
        #self.res = res
        #self.vel = vel
        #self.snapnum = snapnum
        if res == "1.12Mpc":
            s_res = '112Mpc'
        elif res == "1.4Mpc":
            s_res = '14Mpc'
        if vel == "Sig0":
            s_vel = "Sig0"
        elif vel == "11.8kms":
            s_vel = '118kms'
        snapnum = int(snapnum)

        filename = "/n/hernquistfs3/mvogelsberger/GlobularClusters/InterfaceWArepo_All_" + res + '_' + vel + "/output/"
        filename2 = filename + "GasOnly_FOF"  #Used for readsubfHDF5
        ########## CHANGED FILENAME3 TO GROUPORDERED IN GAS ONLY
        filename3 = filename2 + "/snap-groupordered_" + str(snapnum).zfill(
            3)  #Used for hdf5lib, snapHDF5
        #### Not sure if this works with change but don't care about 2.8
        if res == '2.8Mpc':
            filename3 = filename + "snapdir_" + str(snapnum).zfill(
                3) + "/snap_" + str(snapnum).zfill(3)

        #Units
        GRAVITY_cgs = 6.672e-8
        UnitLength_in_cm = 3.085678e21  # code length unit in cm/h
        UnitMass_in_g = 1.989e43  # code length unit in g/h
        UnitVelocity_in_cm_per_s = 1.0e5
        UnitTime_in_s = UnitLength_in_cm / UnitVelocity_in_cm_per_s
        UnitDensity_in_cgs = UnitMass_in_g / np.power(UnitLength_in_cm, 3)
        UnitPressure_in_cgs = UnitMass_in_g / UnitLength_in_cm / np.power(
            UnitTime_in_s, 2)
        UnitEnergy_in_cgs = UnitMass_in_g * np.power(
            UnitLength_in_cm, 2) / np.power(UnitTime_in_s, 2)
        GCONST = GRAVITY_cgs / np.power(
            UnitLength_in_cm, 3) * UnitMass_in_g * np.power(UnitTime_in_s, 2)
        critical_density = 3.0 * .1 * .1 / 8.0 / np.pi / GCONST  #.1 is for 1/Mpc to 1/kpc, also in units of h^2

        header = snap.snapshot_header(filename3)
        if res == "2.8Mpc":
            fs = hdf5lib.OpenFile(filename3 + ".0.hdf5")
        else:
            fs = hdf5lib.OpenFile(filename3 + ".hdf5")
        red = hdf5lib.GetAttr(fs, "Header", "Redshift")
        atime = hdf5lib.GetAttr(fs, "Header", "Time")
        boxSize = hdf5lib.GetAttr(fs, "Header", "BoxSize")
        boxSize *= atime  #convert from ckpc/h to kpc/h
        Omega0 = hdf5lib.GetAttr(fs, "Header", "Omega0")
        OmegaLambda = hdf5lib.GetAttr(fs, "Header", "OmegaLambda")
        fs.close()
        cat = readsubfHDF5.subfind_catalog(filename2, snapnum)
        Omega_a = Omega0 / (Omega0 + OmegaLambda * atime * atime * atime)
        critical_density *= (Omega0 / Omega_a)
        r200 = cat.Group_R_Crit200
        r200 *= atime  #convert from ckpc/h to kpc/h
        m200 = cat.Group_M_Crit200
        haloCMvel = cat.GroupVel
        haloCMvel *= 1. / atime  #convert from km/s/a to km/s
        haloPos = cat.GroupPos
        haloPos *= atime  #convert from ckpc/h to kpc/h

        #Read in particles
        #read in all simulation masses to calculate cosmic baryon fraction
        massgassim = snap.read_block(filename + "snap_" +
                                     str(snapnum).zfill(3),
                                     "MASS",
                                     parttype=0)
        massdmsim = snap.read_block(filename + "snap_" + str(snapnum).zfill(3),
                                    "MASS",
                                    parttype=1)
        massgas = snap.read_block(filename3, "MASS", parttype=0)
        massdm = snap.read_block(filename3, "MASS", parttype=1)
        posgas = snap.read_block(filename3, "POS ", parttype=0)
        posdm = snap.read_block(filename3, "POS ", parttype=1)
        velgas = snap.read_block(filename3, "VEL ", parttype=0)
        veldm = snap.read_block(filename3, "VEL ", parttype=1)
        #redefine position units from ckpc/h to kpc/h
        posgas *= atime
        posdm *= atime
        #redefine velocity units from kmsqrt(a)/s to km/s
        velgas *= np.sqrt(atime)
        veldm *= np.sqrt(atime)

        fb = massgassim.sum(dtype="float64") / (
            massgassim.sum(dtype="float64") + massdmsim.sum(dtype="float64"))
        gaslimit = .4  # Set the limit for gas fraction in plots

        #boxSize hubble flow correction for halo CM velocity subtraction
        boxSizeVel = boxSize * .1 * UnitLength_in_cm / UnitVelocity_in_cm_per_s * np.sqrt(
            Omega0 / atime / atime / atime + OmegaLambda)

        #load particle indices
        pGas = snap.read_block(filename3, "POS ", parttype=0)
        mGas = snap.read_block(filename3, "MASS", parttype=0)
        pDM = snap.read_block(filename3, "POS ", parttype=1)
        halo100_indices = np.where(cat.GroupLenType[:, 0] > 100)[0]
        startAllGas = []
        endAllGas = []
        for i in halo100_indices:
            startAllGas += [np.sum(cat.GroupLenType[:i, 0])]
            endAllGas += [startAllGas[-1] + cat.GroupLenType[i, 0]]
        #Initialize arrays
        spinparam = np.zeros(np.size(halo100_indices))
        jsptotspinparam = np.zeros(np.size(halo100_indices))
        jspgasspinparam = np.zeros(np.size(halo100_indices))
        jspdmspinparam = np.zeros(np.size(halo100_indices))
        gasfrac = np.zeros(np.size(halo100_indices))
        costheta = np.zeros(np.size(halo100_indices))  #misalignment angle
        v200 = np.zeros(np.size(halo100_indices))
        velgasall = np.zeros(np.size(halo100_indices))
        veldmall = np.zeros(np.size(halo100_indices))
        virialratio = np.zeros(np.size(halo100_indices))
        numGas = np.zeros(np.size(halo100_indices))
        numDM = np.zeros(np.size(halo100_indices))

        j200gas = np.zeros(np.size(halo100_indices))
        j200dm = np.zeros(np.size(halo100_indices))
        j200 = np.zeros(np.size(halo100_indices))
        totmass = np.zeros(np.size(halo100_indices))
        gasmass = np.zeros(np.size(halo100_indices))
        DMmass = np.zeros(np.size(halo100_indices))
        rmax = np.zeros(np.size(halo100_indices))
        rmin = np.zeros(np.size(halo100_indices))
        j200gasNoNorm = np.zeros(np.size(halo100_indices))
        closestm200 = np.zeros(np.size(halo100_indices))
        #some radii are errors and  negative, will have a value of 1 to be excluded
        negradii = np.zeros(np.size(halo100_indices))

        #Indexing for global variable works because halos are ordered from largest to smallest so <100 particles are at the end and not counted.
        for i in halo100_indices:
            exec("cm = cm_%s_%s_%d[0][i]" % (s_res, s_vel, snapnum))
            exec("rotation = rotation_%s_%s_%d[0][i]" %
                 (s_res, s_vel, snapnum))
            exec("radii = radii_%s_%s_%d[0][i]" % (s_res, s_vel, snapnum))
            #some radii are errors and  negative, will have a value of 1 to be excluded
            if radii[0] < 0.:
                negradii[i] = 1.
            else:
                maxrad = radii[2]
                maxrad *= atime  #convert from ckpc to kpc
                exec("mDM=mDM_%s_%s_%d[0][i]" % (s_res, s_vel, snapnum))
                exec("DMinEll=DMindices_%s_%s_%d[0][i]" %
                     (s_res, s_vel, snapnum))
                exec("m200dm = M200dm_%s_%s[snapnum-10][i]" % (s_res, s_vel))
                #Check if CM is buggy
                if np.sum(cm == np.array([0., 0., 0.])) == 3:
                    # it's probbaly an error; recompute com
                    totalGas = np.sum(mGas[startAllGas[i]:endAllGas[i]])
                    cm = np.array([
                        np.sum(pGas[startAllGas[i]:endAllGas[i], j] *
                               mGas[startAllGas[i]:endAllGas[i]]) / totalGas
                        for j in range(3)
                    ])

                # Get positions of gas particles
                P = pGas[startAllGas[i]:endAllGas[i]]
                # Shift coordinate system to center on the center of the ellipsoid
                Precentered = dx_wrap(P - cm, boxSize / atime)
                # Rotate coordinated to the the axes point along x,y,z directions:
                Precentered = np.array(
                    [np.dot(pp, rotation.T) for pp in Precentered])
                # Figure out which particles are inside the ellipsoid
                inEll = (Precentered[:, 0]**2. / radii[0]**2. +
                         Precentered[:, 1]**2. / radii[1]**2 +
                         Precentered[:, 2]**2. / radii[2]**2) <= 1.

                #remove halo CM velocity
                tempvelgas = dx_wrap(
                    velgas[startAllGas[i]:endAllGas[i]][inEll] - haloCMvel[i],
                    boxSizeVel)
                tempveldm = dx_wrap(veldm[DMinEll] - haloCMvel[i], boxSizeVel)
                #redefine positions wrt COM
                tempposgas = dx_wrap(
                    posgas[startAllGas[i]:endAllGas[i]][inEll] - haloPos[i],
                    boxSize)
                tempposdm = dx_wrap(posdm[DMinEll] - haloPos[i], boxSize)
                numDM[i] = np.size(tempposdm)
                numGas[i] = np.size(tempposgas)
                #Calculating j200
                #j200 of all particles
                j200vecgas = np.sum(
                    np.cross(tempposgas, tempvelgas) *
                    massgas[startAllGas[i]:endAllGas[i]][inEll][:, np.newaxis],
                    axis=0)
                j200vecdm = np.sum(np.cross(tempposdm, tempveldm) *
                                   massdm[DMinEll][:, np.newaxis],
                                   axis=0)
                #if np.size(tempveldm)!=0: #can be no dm particles!
                #	costheta[i] = np.dot(j200vecgas,j200vecdm)/np.linalg.norm(j200vecgas)/np.linalg.norm(j200vecdm)
                j200vec = j200vecgas + j200vecdm
                j200[i] = np.linalg.norm(j200vec)
                j200dm[i] = np.linalg.norm(j200vecdm)

                j200gas[i] = np.linalg.norm(j200vecgas)
                j200gasNoNorm[i] = np.linalg.norm(j200vecgas)
                gasmass[i] = np.sum(
                    massgas[startAllGas[i]:endAllGas[i]][inEll])
                totmass[i] = gasmass[i] + mDM
                DMmass[i] = mDM
                rmax[i] = radii[2]
                rmin[i] = radii[0]
                closestm200[i] = m200dm
                #using fudicial m200~6mgas
                #get r200 from analytic formula in Barkana,Loeb 01 review
                if gasmass[i] != 0.:  #Some ellpsoids fit nothing
                    m200fid = 6. * gasmass[i]
                    omgz = .27 * atime**(-3.) / (.27 * atime**(-3.) + .73)
                    dfact = omgz - 1.
                    delc = 18. * np.pi**2. + 82 * dfact - 39. * dfact**2.
                    r200fid = .784 * (m200fid * 100.)**(1. / 3.) * (
                        .27 / omgz * delc / 18. / np.pi**2)**(-1. /
                                                              3.) * 10 * atime
                    v200fid = np.sqrt(GCONST * (m200fid) / r200fid)
                    j200gas[i] *= 1. / np.sqrt(2) / (
                        gasmass[i]) / v200fid / r200fid
                    j200[i] *= 1. / np.sqrt(2) / (
                        totmass[i]) / v200fid / r200fid
                    if mDM != 0.:
                        j200dm[i] *= 1. / np.sqrt(2) / mDM / v200fid / r200fid
                    gasfrac[i] = gasmass[i] / totmass[i]

        #Reindex to account for shrunken ellipsoids with gas particles >100

        goodidx, = np.where(np.logical_and(numGas > 100, negradii == 0.))

        self.j200gas = j200gas[goodidx]
        self.j200dm = j200dm[goodidx]
        self.j200 = j200[goodidx]
        self.j200gasNoNorm = j200gasNoNorm[goodidx]
        self.gasfrac = gasfrac[goodidx]
        self.totmass = totmass[goodidx]
        self.totmass *= 10**10
        #costheta = costheta[goodidx]
        self.rmax = rmax[goodidx]
        self.rmin = rmin[goodidx]
        #thetadeg = np.arccos(costheta)*180./np.pi
        self.gasmass = gasmass[goodidx]
        self.closestm200 = closestm200[goodidx]

        #Reindex the Rmin, R200_DM params
        exec("self.rclosest = Rmin_%s_%s[snapnum-10][goodidx]" %
             (s_res, s_vel))
        exec("self.R200dm = R200dm_%s_%s[snapnum-10][goodidx]" %
             (s_res, s_vel))
fig_gas_z = plt.figure(figsize=(5, 5))
fig_star_z = plt.figure(figsize=(5, 5))
fig_cgmf = plt.figure(figsize=(5, 5))

ax_sfr = fig_sfr.add_subplot(1, 1, 1)
ax_gas_z = fig_gas_z.add_subplot(1, 1, 1)
ax_star_z = fig_star_z.add_subplot(1, 1, 1)
ax_cgmf = fig_cgmf.add_subplot(1, 1, 1)

for redshift in target_redshifts:
    diff = np.abs(redshift - redshifts)
    snap = snapshots[diff == diff.min()]
    cat = readsubfHDF5.subfind_catalog(dir,
                                       snap[0],
                                       keysel=[
                                           'SubhaloMassType', 'SubhaloSFR',
                                           'SubhaloGasMetallicity',
                                           'SubhaloGasMetallicitySfr'
                                       ])

    stellar_masses = cat.SubhaloMassType[:,
                                         4] * 1e10 / little_h  # units of M_solar
    sfr = cat.SubhaloSFR  # units of M_solar / yr
    gas_z = cat.SubhaloGasMetallicity  # unitless metallicity
    gas_sfr_z = cat.SubhaloGasMetallicitySfr  # unitless metallicity
    star_z = readsubfHDF5.subhalo_stellar_metallicities(snap=snap[0])

    # read this guy in manually...
    file = '/n/home01/ptorrey/ReviewPlots/cold_gas_masses_z' + str(
        redshift) + '.hdf5'
    f = hdf5lib.OpenFile(file, mode='r')
Пример #15
0
    def __init__(self,overwrite=False):
        # self.run = "c0_128"
        # self.fnummax = 8
        # self.base="/n/hernquistfs1/Illustris/SmallBox/GFM/Production/Cosmo/Cosmo0_V6/L25n128/output/"
        # self.snapnum_arr = np.array([120])
        self.very_large_sim = True

        self.run = "ill1"
        self.fnummax=512 # future: find this automatically instead of having it as an input
        self.base="/n/ghernquist/Illustris/Runs/Illustris-1/output/"
        self.snapnum_arr = np.array([120])
        
        group_min_mass = 10.**11
        group_max_mass = 10.**18.
        self.dat_str_list = ["Masses","Coordinates","GFM_Metals","GFM_Metallicity","Velocities","Density","Volume","InternalEnergy","ElectronAbundance","NeutralHydrogenAbundance","SmoothingLength"] #future: add "Radius"
        self.savebase = '/n/home04/jsuresh/scratch1/Test/'
        # self.savebase = '/n/home04/jsuresh/data1/Projects/Feedback_and_CGM/CGM_new/data/CGM_snaps/'

        # Here's where the magic happens
        comm = MPI.COMM_WORLD
        self.rank = comm.Get_rank()
        print "my rank = {}".format(self.rank)
        self.size = comm.Get_size()
        # print "my size = {}".format(size)
        if self.rank == 0: print "Done with MPI comm/rank/size initialization!"

        if self.fnummax % self.size != 0:
            raise Exception("# of processes does not divide into # of subfiles!")

        for snapnum in self.snapnum_arr:
            if self.rank == 0:
                # Create necessary folder if it does not exist:
                CGM_snapdir = self.savebase+"{}/s{}/".format(self.run,snapnum)
                if not os.path.isdir(CGM_snapdir):
                    print "Trying to create {}".format(CGM_snapdir)
                    os.mkdir(CGM_snapdir)

                # Get header information before proceeding:
                fname = self.base+"snapdir_"+str(snapnum).zfill(3)+"/snap_"+str(snapnum).zfill(3)+".0.hdf5"
                print "fname ",fname
                f = h5py.File(fname,'r')
                self.redshift=f["Header"].attrs["Redshift"]
                self.hubble=f["Header"].attrs["HubbleParam"]
                self.box=f["Header"].attrs["BoxSize"]
                self.omegam=f["Header"].attrs["Omega0"]
                self.omegal=f["Header"].attrs["OmegaLambda"]
                f.close()

                cat=readsubfHDF5.subfind_catalog(self.base,snapnum,subcat=False,keysel=["Group_M_Crit200","GroupPos","Group_R_Crit200"]) #,"GroupBHMass","GroupBHMdot"

                # Select by minimum group mass threshold:
                self.grp_mass = np.array(cat.Group_M_Crit200)
                m = AU.PhysicalMass(self.grp_mass)
                mass_select = np.logical_and(m > group_min_mass, m < group_max_mass)

                self.grp_mass = self.grp_mass[mass_select]
                self.grp_ids = np.arange(np.float64(cat.ngroups))
                self.grp_ids = self.grp_ids[mass_select]
                self.grp_pos = np.array(cat.GroupPos)[mass_select]
                self.grp_Rvir = np.array(cat.Group_R_Crit200)[mass_select]
                # self.grp_BHmass = np.array(cat.GroupBHMass)[mass_select]
                # self.grp_BHMdot = np.array(cat.GroupBHMdot)[mass_select]
                self.n_selected_groups = np.float32(np.size(self.grp_mass))

                if not overwrite:
                    # First remove all groups which already have data files output:
                    keep = np.ones_like(self.grp_ids,dtype=bool)
                    for i in np.arange(self.n_selected_groups):
                        grp_id = self.grp_ids[i]
                        filepath = self.savebase+ "{}/s{}/{}.hdf5".format(self.run,snapnum,str(int(grp_id)).zfill(5))
                        if os.path.isfile(filepath):
                            if self.rank == 0:
                                print "File {} already exists!  Skipping...".format(filepath)
                                keep[i] = False
                            else:
                                pass
                    self.grp_ids = self.grp_ids[keep]
                    self.grp_pos = self.grp_pos[keep]
                    self.grp_Rvir = self.grp_Rvir[keep]
                    self.grp_mass = self.grp_mass[keep]
                    self.n_selected_groups = np.float32(np.size(self.grp_mass))

                print "CGM snapshots will be written for the following group ids: {}".format(self.grp_ids)

            else:
                self.redshift = None
                self.hubble = None
                self.box = None
                self.omegam = None
                self.omegal = None
                self.grp_mass = None
                self.grp_ids = None
                self.grp_pos = None
                self.grp_Rvir = None
                self.n_selected_groups = None

            if self.size > 1:
                # Broadcast necessary data from root process to other processes
                self.redshift = comm.bcast(self.redshift,root=0)
                self.hubble = comm.bcast(self.hubble,root=0)
                self.box = comm.bcast(self.box,root=0)
                self.omegam = comm.bcast(self.omegam,root=0)
                self.omegal = comm.bcast(self.omegal,root=0)
                self.grp_mass = comm.bcast(self.grp_mass,root=0)
                self.grp_ids = comm.bcast(self.grp_ids,root=0)
                self.grp_pos = comm.bcast(self.grp_pos,root=0)
                self.grp_Rvir = comm.bcast(self.grp_Rvir,root=0)
                self.n_selected_groups = comm.bcast(self.n_selected_groups,root=0)



            if self.very_large_sim:
                # Read in positions from entire snapshot:
                pos = self.subprocess_load_data(snapnum,"Coordinates")
                if self.rank == 0: print "Done loading full position array in snapshot"

                # Build KDtree of gas positions:
                gas_kdtree = cKDTree(pos)
                print "Rank {} is done building its gas KD tree".format(self.rank)

                # Loop over all group positions, and find indices of gas close to this group position
                index_dict = {}
                for i in np.arange(self.n_selected_groups):
                    grp_id = self.grp_ids[i]
                    gpos  = self.grp_pos[i]
                    # r200  = self.grp_Rvir[i]

                    dist_thresh = AU.CodePosition(500.*np.sqrt(3),self.redshift)
                    ind = self.get_gas_ind(gpos,dist_thresh,gas_kdtree)
                    index_dict[grp_id] = np.copy(ind)

                # Now we have full index dictionary for all groups.  We no longer need the KD-tree.
                # try: print "Size of index dictionary in bytes: ",sys.getsizeof(index_dict)
                # except: print "try1 failed"
                # try: print "Size of KD-tree in bytes: ",sys.getsizeof(gas_kdtree)
                # except: print "try2 failed"
                del gas_kdtree

                # Create data structure which will house data for all groups simultaneously:
                group_dict = {}
                for i in np.arange(self.n_selected_groups):
                    grp_id = self.grp_ids[i]
                    group_dict[grp_id] = {}

                # Now get position information for every group, since we already have the position array in memory.
                # Then delete the pos array, since it is probably very large.
                for i in np.arange(self.n_selected_groups):
                    grp_id = self.grp_ids[i]
                    ind = index_dict[grp_id]
                    group_dict[grp_id]["Coordinates"] = pos[ind]
                del pos
                print "Rank {} is done with the Coordinates field for all groups!".format(self.rank)

                # Now do the same for all other desired fields, deleting each one after we are done with it.
                for dat_str in self.dat_str_list:
                    if dat_str == "Coordinates":
                        pass
                    else:
                        dat = self.subprocess_load_data(snapnum,dat_str)
                        for i in np.arange(self.n_selected_groups):
                            grp_id = self.grp_ids[i]
                            ind = index_dict[grp_id]
                            group_dict[grp_id][dat_str] = dat[ind]
                        del dat
                    print "Rank {} is done with the {} field for all groups!".format(self.rank,dat_str)


                # We have now loaded all of the data for all of the groups!  First delete the large index_dict:
                del index_dict

                if self.size == 1:
                    # If there is only one process, then just trivially loop through all groups and save their data
                    for i in np.arange(self.n_selected_groups):
                        grp_id = self.grp_ids[i]
                        save_dict = group_dict[grp_id]
                        self.save_group_data(snapnum,i,save_dict)

                elif self.size > 1:
                    # If there are subprocesses, then send the data to the rank-0 process, where it will be saved.
                    comm.Barrier()
                    print "Rank {} hit the comm barrier!".format(self.rank)
                    # We now send data from the subprocesses to the rank-0 process, group by group.
                    for i in np.arange(self.n_selected_groups):
                        grp_id = self.grp_ids[i]
                        if self.rank == 0: 
                            save_dict = {}
                            # save_dict = group_dict[grp_id]

                        for dat_str in self.dat_str_list:
                            if group_dict.has_key(grp_id) and group_dict[grp_id].has_key(dat_str):
                                foo = np.copy(group_dict[grp_id][dat_str])
                            else:
                                foo = None
                            
                            # Now gather from all of the subprocesses to rank 0:
                            foo = comm.gather(foo,root=0)

                            if self.rank == 0:
                                savedat = self._cat_gather_list(foo)
                                save_dict[dat_str] = savedat

                        # Rank 0 now has all of the data for this group saved in save_dict.  Save to file:
                        if self.rank == 0:
                            self.save_group_data(snapnum,i,save_dict)
                            # del save_dict # HERE
                        # Now all processes remove this grp_id entry from their memory
                        # if group_dict.has_key(grp_id): 
                        #     del group_dict[grp_id]
                        # else: 
                        #     pass



            elif not self.very_large_sim:
                # Read in all data from snapshot a single time:
                local_dict = {}
                for dat_str in self.dat_str_list:
                    local_dict[dat_str] = subprocess_load_data(dat_str)
                if rank == 0: print "Done loading full snapshot"

                # Build KDtree of gas positions:
                pos = local_dict['Coordinates']
                gas_kdtree = cKDTree(pos)

                # Loop over all group positions, and find indices of gas close to this group position
                for i in np.arange(n_selected_groups):
                    grp_id = self.grp_ids[i]
                    gpos  = self.grp_pos[i]
                    # r200  = self.grp_Rvir[i]

                    # Check whether CGM data file already exists:
                    filepath = self.savebase+ "{}/s{}/{}.hdf5".format(run,snapnum,str(int(grp_id)).zfill(5))
                    if os.path.isfile(filepath):
                        if rank == 0:
                            print "File {} already exists!  Skipping...".format(filepath)
                        else:
                            pass
                    else: # If CGM data file does not exist yet, then continue:
                        dist_thresh = AU.CodePosition(500.*np.sqrt(3),redshift)
                        ind = get_gas_ind(gpos,dist_thresh,gas_kdtree,box)

                        send_dict = {}
                        if np.size(ind) > 0:
                            for dat_str in self.dat_str_list:
                                send_dict[dat_str] = local_dict[dat_str][ind]

                        # Future: hide this in another function
                        # Now that data has been compiled for this process, send it to rank-0 process
                        if rank == 0:
                            save_dict = send_dict
                        if size > 1:
                            comm.Barrier()
                            print "comm barrier!"
                            for dat_str in self.dat_str_list:
                                for temp_rank in range(1,size):
                                    if rank == temp_rank:
                                        if send_dict.has_key(dat_str):
                                            comm.send(send_dict[dat_str],dest=0,tag=i)
                                        else:
                                            comm.send('nothing',dest=0,tag=i)
                                    elif rank == 0:
                                        hold = comm.recv(source=temp_rank,tag=i)
                                        #print "this is what was received from rank {}: {}".format(temp_rank,hold)
                                        if hold != 'nothing':
                                            if save_dict.has_key(dat_str): 
                                                full_dict[dat_str] = np.append(save_dict[dat_str],hold,axis=0)
                                            else:
                                                full_dict[dat_str] = hold


                        # Now data for this halo has been sent to rank-0 process.  Time to save it.
                        if rank==0:
                            print "Saving group file now to: {}".format(filepath)
                            f=h5py.File(filepath,'a')

                            grp = f.create_group("Header")
                            grp.attrs["hubble"]=hubble
                            grp.attrs["omegam"]=omegam
                            grp.attrs["omegal"]=omegal
                            grp.attrs["redshift"]=redshift
                            grp.attrs["box"]=box

                            grp.attrs["grp_id"] = grp_id
                            grp.attrs["grp_mass"] = self.grp_mass[i]
                            grp.attrs["grp_pos"] = self.grp_pos[i]
                            grp.attrs["grp_Rvir"] = self.grp_Rvir[i]
                            grp.attrs["grp_BHmass"] = self.grp_BHmass[i]
                            grp.attrs["grp_BHMdot"] = self.grp_BHMdot[i]

                            p_grp = f.create_group('PartType0')
                            for key in save_dict:
                                #print save_dict[key]
                                p_grp.create_dataset(key,data=save_dict[key])
                            f.close()
Пример #16
0
smhm = np.zeros(n_bins)
# ============================================== #
scalefactors = illustris.load_scalefactors()
redshifts = 1.0 / scalefactors - 1.0
snapshots = np.arange(redshifts.shape[0])
# ============================================== #

fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(1, 1, 1)

for redshift in target_redshifts:
    diff = np.abs(redshift - redshifts)
    snap = snapshots[diff == diff.min()]
    cat = readsubfHDF5.subfind_catalog(dir,
                                       snap[0],
                                       keysel=[
                                           'SubhaloMassType', 'SubhaloGrNr',
                                           'GroupFirstSub', 'Group_M_Crit200'
                                       ])

    stellar_masses = cat.SubhaloMassType[:, 4] * 1e10 / little_h
    halo_masses = cat.SubhaloMassType[:, 1] * 1e10 / little_h
    m200c_halo_masses = cat.Group_M_Crit200 * 1e10 / little_h

    gr_first_sub = cat.GroupFirstSub  # the first subhalo ID for each group
    sub_grnr = cat.SubhaloGrNr  # the group ID for each subhalo
    sub_nr = np.arange(stellar_masses.shape[0])  # the subhalo ID

    sub_first_in_group = gr_first_sub[
        sub_grnr]  # the first subhalo for the group to which this subhalo belongs
    is_central = sub_first_in_group == sub_nr  # compare the  "" "" against this subhalo ID.  If matches, is a central
Пример #17
0
    #+ "&mass_stars__lt=" + str(max_mass) \
    #+ "&halfmassrad_stars__gt=" + str(2 / a0 * littleh) # 2 kpc

    cut1 = get(url_sbhalos + search_query)
    cut1['count']
    cut1 = get(url_sbhalos + search_query, {
        'limit': cut1['count'],
        'order_by': 'id'
    })

    sub_list = cut1['results']
    sub_ids = np.array([sub['id'] for sub in cut1['results']], dtype='i')

    if args.local:
        cat = readsubfHDF5.subfind_catalog(
            args.local,
            snapnum,  #grpcat=False, subcat=False,
            keysel=['GroupFirstSub', 'SubhaloGrNr'])
        sat = np.zeros(cat.SubhaloGrNr.size, dtype=bool)
        sat[sub_ids] = (sub_ids != cat.GroupFirstSub[cat.SubhaloGrNr[sub_ids]])
        del cat
        gc.collect()

else:
    sub_ids = None
    if args.local:
        sat = None

my_subs = scatter_work(sub_ids, rank, size)
sub_ids = comm.bcast(sub_ids, root=0)
if args.local:
    sat = comm.bcast(sat, root=0)
Пример #18
0
    def __init__(self,snap_dir,snapnum,savefile,group_min_mass=0,group_max_mass=10e16,rmin_pkpc=300,Rvir_min=1,load_savefile=False,BH_dat=True,fuzz_implemented=False):    
        """ Initiate class

        Parameters:
            snap_dir - directory of snapshots
            snapnum - snapshot number
            savefile - where to save calculation output """

        # Initiate variables
        self.snap_dir = snap_dir
        self.snapnum = snapnum
        self.savefile = savefile
        self.group_min_mass = group_min_mass
        self.group_max_mass = group_max_mass
        self.rmin_pkpc = rmin_pkpc
        self.Rvir_min = Rvir_min
        self.BH_dat = BH_dat
        #self.load_savefile = load_savefile
        #self.fuzz_implemented = fuzz_implemented


        if load_savefile:
            pass
            #self.load_savefile()
        else:
            ts = time.time()

            # Get basic parameters of snapshot
            f=hdfsim.get_file(snapnum,snap_dir,0)
            self.redshift=f["Header"].attrs["Redshift"]
            self.hubble=f["Header"].attrs["HubbleParam"]
            self.box=AU.PhysicalPosition(f["Header"].attrs["BoxSize"],self.redshift,hubble=self.hubble)
            self.omegam=f["Header"].attrs["Omega0"]
            self.omegal=f["Header"].attrs["OmegaLambda"]
            print "redshift: ",self.redshift
            f.close()

            # Other useful things:
            self.rho_barycrit = 0.0456*AU.GetRhoCrit0(hubble=self.hubble)
            self.tab = cc.CloudyTable(self.redshift)
            self.Hz = AU.CalcH_kms_over_Mpc(self.redshift, OmegaM=self.omegam, OmegaL=self.omegal, hubble=self.hubble)

            # Get group mass data so we can make a mass cut
            self.cat = readsubfHDF5.subfind_catalog(self.snap_dir,self.snapnum,long_ids=True)
            self.n_all_groups = np.float64(self.cat.ngroups)
            self.n_all_subs = np.float64(self.cat.nsubs)
            self.grp_ids = np.arange(self.n_all_groups)
            self.grp_mass = AU.PhysicalMass(np.array(self.cat.Group_M_Crit200),hubble=self.hubble)
            print "Total # of groups in snapshot: ",self.n_all_groups

            # Impose minimum halo mass cut and get additional group data:
            mass_select = np.logical_and(self.grp_mass > self.group_min_mass,self.grp_mass < self.group_max_mass)
            self.grp_ids = self.grp_ids[mass_select]
            self.grp_mass = self.grp_mass[mass_select]
            self.grp_pos = AU.PhysicalPosition(np.array(self.cat.GroupPos[mass_select]),self.redshift,hubble=self.hubble)
            self.grp_Rvir = AU.PhysicalPosition(np.array(self.cat.Group_R_Crit200[mass_select]),self.redshift,hubble=self.hubble)
            self.grp_vel = AU.PeculiarVelocity(np.array(self.cat.GroupVel[mass_select]),self.redshift)
            if self.BH_dat:
                self.grp_BHmass = AU.PhysicalMass(np.array(self.cat.GroupBHMass[mass_select]),hubble=self.hubble)
                self.grp_BHMdot = np.array(self.cat.GroupBHMdot[mass_select])
            self.n_selected_groups = np.float64(np.size(self.grp_mass))
            print "# of selected groups: ",self.n_selected_groups

            # Construct group/halo tables (assumes subfind-ordered snapshot)
            self.GroupOffset, self.HaloOffset = subfind_tables.constructtables(self.cat)
            self.GroupLenType = self.cat.GroupLenType
            self.SubhaloLenType = self.cat.SubhaloLenType

            print "Time for preamble: {}".format(time.time()-ts)

            # Get gas positions, masses, and metallicities for this subfile
            ts = time.time()
            self.get_gas_data()
            print "# of gas cells: ",self.ngas
            print "Time to get gas data: {}".format(time.time()-ts)

            # Generate kdtree for particle positions:
            self.gas_kdtree = cKDTree(self.full_gas_pos,leafsize=10)

            # Generate particle flag
            ts = time.time()
            self.full_gas_flag = subfind_tables.generate_particle_haloflags(self.snap_dir,self.snapnum,self.cat)
            print "Time to generate flags: {}".format(time.time()-ts)

            # Calculate background mass-weighted metallicity
            ts = time.time()
            self.metal_bg = self.calc_background_metallicity()
            print "background metallicity: ",self.metal_bg
            print "Time to calculate metal bg: {}".format(time.time()-ts)

            # Calculate enrichment radius for all groups
            self.extract_CGM_allhalos()
            self.save_data()
Пример #19
0











snap_nums = np.arange(snap_earliest,snap_latest)
snap_nums = snap_nums[::-1] #reverse

cat = readsubfHDF5.subfind_catalog(base, snap_num)

# I assume that the subhalo is the primary subhalo in its group
#sub_list = cat.GroupFirstSub
#grp_id = np.argmin( np.abs(sub_id-sub_list) )
grp_id = cat.SubhaloParent[sub_id]

sub_partIDs = get_subhalo_ids(base,snap_num,sub_id)
sub_pos = cat.SubhaloPos[sub_id]
sub_mass = cat.SubhaloMass[sub_id]
sub_vel = cat.SubhaloVel[sub_id]
sub_Rvir = cat.Group_R_Crit200[grp_id]

		
new_match_flag = 1
Пример #20
0
    def __init__(self, overwrite=False):
        # self.run = "c0_128"
        # self.fnummax = 8
        # self.base="/n/hernquistfs1/Illustris/SmallBox/GFM/Production/Cosmo/Cosmo0_V6/L25n128/output/"
        # self.snapnum_arr = np.array([120])
        self.very_large_sim = True

        self.run = "ill1"
        self.fnummax = 512  # future: find this automatically instead of having it as an input
        self.base = "/n/ghernquist/Illustris/Runs/Illustris-1/output/"
        self.snapnum_arr = np.array([120])

        group_min_mass = 10.**11
        group_max_mass = 10.**18.
        self.dat_str_list = [
            "Masses", "Coordinates", "GFM_Metals", "GFM_Metallicity",
            "Velocities", "Density", "Volume", "InternalEnergy",
            "ElectronAbundance", "NeutralHydrogenAbundance", "SmoothingLength"
        ]  #future: add "Radius"
        self.savebase = '/n/home04/jsuresh/scratch1/Test/'
        # self.savebase = '/n/home04/jsuresh/data1/Projects/Feedback_and_CGM/CGM_new/data/CGM_snaps/'

        # Here's where the magic happens
        comm = MPI.COMM_WORLD
        self.rank = comm.Get_rank()
        print "my rank = {}".format(self.rank)
        self.size = comm.Get_size()
        # print "my size = {}".format(size)
        if self.rank == 0: print "Done with MPI comm/rank/size initialization!"

        if self.fnummax % self.size != 0:
            raise Exception(
                "# of processes does not divide into # of subfiles!")

        for snapnum in self.snapnum_arr:
            if self.rank == 0:
                # Create necessary folder if it does not exist:
                CGM_snapdir = self.savebase + "{}/s{}/".format(
                    self.run, snapnum)
                if not os.path.isdir(CGM_snapdir):
                    print "Trying to create {}".format(CGM_snapdir)
                    os.mkdir(CGM_snapdir)

                # Get header information before proceeding:
                fname = self.base + "snapdir_" + str(snapnum).zfill(
                    3) + "/snap_" + str(snapnum).zfill(3) + ".0.hdf5"
                print "fname ", fname
                f = h5py.File(fname, 'r')
                self.redshift = f["Header"].attrs["Redshift"]
                self.hubble = f["Header"].attrs["HubbleParam"]
                self.box = f["Header"].attrs["BoxSize"]
                self.omegam = f["Header"].attrs["Omega0"]
                self.omegal = f["Header"].attrs["OmegaLambda"]
                f.close()

                cat = readsubfHDF5.subfind_catalog(
                    self.base,
                    snapnum,
                    subcat=False,
                    keysel=["Group_M_Crit200", "GroupPos",
                            "Group_R_Crit200"])  #,"GroupBHMass","GroupBHMdot"

                # Select by minimum group mass threshold:
                self.grp_mass = np.array(cat.Group_M_Crit200)
                m = AU.PhysicalMass(self.grp_mass)
                mass_select = np.logical_and(m > group_min_mass,
                                             m < group_max_mass)

                self.grp_mass = self.grp_mass[mass_select]
                self.grp_ids = np.arange(np.float64(cat.ngroups))
                self.grp_ids = self.grp_ids[mass_select]
                self.grp_pos = np.array(cat.GroupPos)[mass_select]
                self.grp_Rvir = np.array(cat.Group_R_Crit200)[mass_select]
                # self.grp_BHmass = np.array(cat.GroupBHMass)[mass_select]
                # self.grp_BHMdot = np.array(cat.GroupBHMdot)[mass_select]
                self.n_selected_groups = np.float32(np.size(self.grp_mass))

                if not overwrite:
                    # First remove all groups which already have data files output:
                    keep = np.ones_like(self.grp_ids, dtype=bool)
                    for i in np.arange(self.n_selected_groups):
                        grp_id = self.grp_ids[i]
                        filepath = self.savebase + "{}/s{}/{}.hdf5".format(
                            self.run, snapnum,
                            str(int(grp_id)).zfill(5))
                        if os.path.isfile(filepath):
                            if self.rank == 0:
                                print "File {} already exists!  Skipping...".format(
                                    filepath)
                                keep[i] = False
                            else:
                                pass
                    self.grp_ids = self.grp_ids[keep]
                    self.grp_pos = self.grp_pos[keep]
                    self.grp_Rvir = self.grp_Rvir[keep]
                    self.grp_mass = self.grp_mass[keep]
                    self.n_selected_groups = np.float32(np.size(self.grp_mass))

                print "CGM snapshots will be written for the following group ids: {}".format(
                    self.grp_ids)

            else:
                self.redshift = None
                self.hubble = None
                self.box = None
                self.omegam = None
                self.omegal = None
                self.grp_mass = None
                self.grp_ids = None
                self.grp_pos = None
                self.grp_Rvir = None
                self.n_selected_groups = None

            if self.size > 1:
                # Broadcast necessary data from root process to other processes
                self.redshift = comm.bcast(self.redshift, root=0)
                self.hubble = comm.bcast(self.hubble, root=0)
                self.box = comm.bcast(self.box, root=0)
                self.omegam = comm.bcast(self.omegam, root=0)
                self.omegal = comm.bcast(self.omegal, root=0)
                self.grp_mass = comm.bcast(self.grp_mass, root=0)
                self.grp_ids = comm.bcast(self.grp_ids, root=0)
                self.grp_pos = comm.bcast(self.grp_pos, root=0)
                self.grp_Rvir = comm.bcast(self.grp_Rvir, root=0)
                self.n_selected_groups = comm.bcast(self.n_selected_groups,
                                                    root=0)

            if self.very_large_sim:
                # Read in positions from entire snapshot:
                pos = self.subprocess_load_data(snapnum, "Coordinates")
                if self.rank == 0:
                    print "Done loading full position array in snapshot"

                # Build KDtree of gas positions:
                gas_kdtree = cKDTree(pos)
                print "Rank {} is done building its gas KD tree".format(
                    self.rank)

                # Loop over all group positions, and find indices of gas close to this group position
                index_dict = {}
                for i in np.arange(self.n_selected_groups):
                    grp_id = self.grp_ids[i]
                    gpos = self.grp_pos[i]
                    # r200  = self.grp_Rvir[i]

                    dist_thresh = AU.CodePosition(500. * np.sqrt(3),
                                                  self.redshift)
                    ind = self.get_gas_ind(gpos, dist_thresh, gas_kdtree)
                    index_dict[grp_id] = np.copy(ind)

                # Now we have full index dictionary for all groups.  We no longer need the KD-tree.
                # try: print "Size of index dictionary in bytes: ",sys.getsizeof(index_dict)
                # except: print "try1 failed"
                # try: print "Size of KD-tree in bytes: ",sys.getsizeof(gas_kdtree)
                # except: print "try2 failed"
                del gas_kdtree

                # Create data structure which will house data for all groups simultaneously:
                group_dict = {}
                for i in np.arange(self.n_selected_groups):
                    grp_id = self.grp_ids[i]
                    group_dict[grp_id] = {}

                # Now get position information for every group, since we already have the position array in memory.
                # Then delete the pos array, since it is probably very large.
                for i in np.arange(self.n_selected_groups):
                    grp_id = self.grp_ids[i]
                    ind = index_dict[grp_id]
                    group_dict[grp_id]["Coordinates"] = pos[ind]
                del pos
                print "Rank {} is done with the Coordinates field for all groups!".format(
                    self.rank)

                # Now do the same for all other desired fields, deleting each one after we are done with it.
                for dat_str in self.dat_str_list:
                    if dat_str == "Coordinates":
                        pass
                    else:
                        dat = self.subprocess_load_data(snapnum, dat_str)
                        for i in np.arange(self.n_selected_groups):
                            grp_id = self.grp_ids[i]
                            ind = index_dict[grp_id]
                            group_dict[grp_id][dat_str] = dat[ind]
                        del dat
                    print "Rank {} is done with the {} field for all groups!".format(
                        self.rank, dat_str)

                # We have now loaded all of the data for all of the groups!  First delete the large index_dict:
                del index_dict

                if self.size == 1:
                    # If there is only one process, then just trivially loop through all groups and save their data
                    for i in np.arange(self.n_selected_groups):
                        grp_id = self.grp_ids[i]
                        save_dict = group_dict[grp_id]
                        self.save_group_data(snapnum, i, save_dict)

                elif self.size > 1:
                    # If there are subprocesses, then send the data to the rank-0 process, where it will be saved.
                    comm.Barrier()
                    print "Rank {} hit the comm barrier!".format(self.rank)
                    # We now send data from the subprocesses to the rank-0 process, group by group.
                    for i in np.arange(self.n_selected_groups):
                        grp_id = self.grp_ids[i]
                        if self.rank == 0:
                            save_dict = {}
                            # save_dict = group_dict[grp_id]

                        for dat_str in self.dat_str_list:
                            if group_dict.has_key(grp_id) and group_dict[
                                    grp_id].has_key(dat_str):
                                foo = np.copy(group_dict[grp_id][dat_str])
                            else:
                                foo = None

                            # Now gather from all of the subprocesses to rank 0:
                            foo = comm.gather(foo, root=0)

                            if self.rank == 0:
                                savedat = self._cat_gather_list(foo)
                                save_dict[dat_str] = savedat

                        # Rank 0 now has all of the data for this group saved in save_dict.  Save to file:
                        if self.rank == 0:
                            self.save_group_data(snapnum, i, save_dict)
                            # del save_dict # HERE
                        # Now all processes remove this grp_id entry from their memory
                        # if group_dict.has_key(grp_id):
                        #     del group_dict[grp_id]
                        # else:
                        #     pass

            elif not self.very_large_sim:
                # Read in all data from snapshot a single time:
                local_dict = {}
                for dat_str in self.dat_str_list:
                    local_dict[dat_str] = subprocess_load_data(dat_str)
                if rank == 0: print "Done loading full snapshot"

                # Build KDtree of gas positions:
                pos = local_dict['Coordinates']
                gas_kdtree = cKDTree(pos)

                # Loop over all group positions, and find indices of gas close to this group position
                for i in np.arange(n_selected_groups):
                    grp_id = self.grp_ids[i]
                    gpos = self.grp_pos[i]
                    # r200  = self.grp_Rvir[i]

                    # Check whether CGM data file already exists:
                    filepath = self.savebase + "{}/s{}/{}.hdf5".format(
                        run, snapnum,
                        str(int(grp_id)).zfill(5))
                    if os.path.isfile(filepath):
                        if rank == 0:
                            print "File {} already exists!  Skipping...".format(
                                filepath)
                        else:
                            pass
                    else:  # If CGM data file does not exist yet, then continue:
                        dist_thresh = AU.CodePosition(500. * np.sqrt(3),
                                                      redshift)
                        ind = get_gas_ind(gpos, dist_thresh, gas_kdtree, box)

                        send_dict = {}
                        if np.size(ind) > 0:
                            for dat_str in self.dat_str_list:
                                send_dict[dat_str] = local_dict[dat_str][ind]

                        # Future: hide this in another function
                        # Now that data has been compiled for this process, send it to rank-0 process
                        if rank == 0:
                            save_dict = send_dict
                        if size > 1:
                            comm.Barrier()
                            print "comm barrier!"
                            for dat_str in self.dat_str_list:
                                for temp_rank in range(1, size):
                                    if rank == temp_rank:
                                        if send_dict.has_key(dat_str):
                                            comm.send(send_dict[dat_str],
                                                      dest=0,
                                                      tag=i)
                                        else:
                                            comm.send('nothing', dest=0, tag=i)
                                    elif rank == 0:
                                        hold = comm.recv(source=temp_rank,
                                                         tag=i)
                                        #print "this is what was received from rank {}: {}".format(temp_rank,hold)
                                        if hold != 'nothing':
                                            if save_dict.has_key(dat_str):
                                                full_dict[dat_str] = np.append(
                                                    save_dict[dat_str],
                                                    hold,
                                                    axis=0)
                                            else:
                                                full_dict[dat_str] = hold

                        # Now data for this halo has been sent to rank-0 process.  Time to save it.
                        if rank == 0:
                            print "Saving group file now to: {}".format(
                                filepath)
                            f = h5py.File(filepath, 'a')

                            grp = f.create_group("Header")
                            grp.attrs["hubble"] = hubble
                            grp.attrs["omegam"] = omegam
                            grp.attrs["omegal"] = omegal
                            grp.attrs["redshift"] = redshift
                            grp.attrs["box"] = box

                            grp.attrs["grp_id"] = grp_id
                            grp.attrs["grp_mass"] = self.grp_mass[i]
                            grp.attrs["grp_pos"] = self.grp_pos[i]
                            grp.attrs["grp_Rvir"] = self.grp_Rvir[i]
                            grp.attrs["grp_BHmass"] = self.grp_BHmass[i]
                            grp.attrs["grp_BHMdot"] = self.grp_BHMdot[i]

                            p_grp = f.create_group('PartType0')
                            for key in save_dict:
                                #print save_dict[key]
                                p_grp.create_dataset(key, data=save_dict[key])
                            f.close()
Omega0 = header.omega0
OmegaLambda = header.omegaL
massDMParticle = header.massarr[1] #all DM particles have same mass

#redshift evolution of critical_density
critical_density *= Omega0 + atime**3 * OmegaLambda
critical_density_gas = critical_density * baryonfraction

#load particle indices and catalogs
pGas = snapHDF5.read_block(filename3,"POS ", parttype=0)
mGas = snapHDF5.read_block(filename3,"MASS", parttype=0)
vGas = snapHDF5.read_block(filename3,"VEL ", parttype=0)
rGas = snapHDF5.read_block(filename3,"RHO ",parttype=0) 
uGas = snapHDF5.read_block(filename3,"U   ",parttype=0)
pDM = snapHDF5.read_block(filename3,"POS ",parttype=1)
catGas = readsubfHDF5.subfind_catalog(filename2, snapnum)


halo100_indices= np.where(catGas.GroupLenType[:,0] >100)[0]		
startAllGas = []
endAllGas   = []
for i in halo100_indices:
	startAllGas += [np.sum(catGas.GroupLenType[:i,0])]
	endAllGas   += [startAllGas[-1] + catGas.GroupLenType[i,0]]

cms = catGas.GroupPos / hubbleparam / atime #convert to physical
cvel = catGas.GroupVel / atime

#Initialize arrays
#some radii are errors and  negative, will have a value of 1 to be excluded
negradii = np.zeros(np.size(halo100_indices)) 
Пример #22
0
def run_series(base_id="base400", snap_num=314):
    import numpy as np

    # import readsubf
    import readsubfHDF5
    import os
    from base_lookup import directory

    from run_find_disks import run_find_disks

    # from run_J_profile import run_J_profile
    from subsnap_write import subsnap_write

    import time
    import glob

    #############
    # Control Parameters

    base = directory(base_id)[0]
    snap_num = 311
    # snap_num = 314 #directory(base_id)[1]
    # snap_num = 189 #for z = 2
    # snap_num = 250

    base_dir = base_id + "/"
    save_dir = base_dir + "snap" + str(snap_num).zfill(3) + "/"
    resnap_folder = "resnaps/"
    subsnap_folder = save_dir + "subsnaps/"
    dat_folder = "data/"

    # Overwrite past analysis?
    overw = 1

    # (Analysis restricted to only primary subhalos in given group)
    # Analyze subhalos within a given mass range:
    M_botlim = 100.0
    M_toplim = 1000.0

    # Analyze set # of subhalos?
    analyze_N = -1

    # JOB CONTROL:
    # Concurrent jobs?
    job_limit = 10
    # Time between job submission?
    wait_time = 10  # seconds

    ############################################################################################

    if not os.path.exists(base_dir):
        os.system("mkdir " + base_dir)

    if not os.path.exists(save_dir):
        os.system("mkdir " + save_dir)

    if not os.path.exists(subsnap_folder):
        os.system("mkdir " + subsnap_folder)

        # cat = readsubf.subfind_catalog(base,snap_num,masstab=True)
    cat = readsubfHDF5.subfind_catalog(base, snap_num)

    sub_ids = cat.group_firstsub
    M_subs = cat.sub_mass[sub_ids]

    ind = np.logical_and(M_subs >= M_botlim, M_subs < M_toplim)
    M_subs = M_subs[ind]
    sub_ids = sub_ids[ind]
    # N_subs = np.uint32(M_subs.size)

    # Can insert specific subhalos instead:
    # sub_ids = [1136, 1302, 1536]
    # sub_ids = [8037]
    # sub_ids = [4987]
    # sub_ids = [0]
    """	
	sub_ids = [4024,\
	4987,\
	5517,\
	5704,\
	5791,\
	6265,\
	6911,\
	7542,\
	7721,\
	7746,\
	8005,\
	8037]
	"""

    if analyze_N > 0.0:
        sub_ids = sub_ids[0:analyze_N]

    print "Will submit " + str(len(sub_ids)) + " jobs.\n"
    raw_input("Press Enter to continue...")

    #################################################################################################

    resnap = resnap_folder + base_id + "_" + "S" + str(snap_num).zfill(3) + ".resnap"

    # Write intermediate .snap files:
    subsnap_write(resnap_name=resnap, base=base, snap_num=snap_num, sub_ids=sub_ids, save_dir=save_dir)

    # Job group control:
    grp_name = "/bd_decomp"
    add_grp = "bgadd -L " + str(job_limit) + " " + grp_name
    mod_grp = "bgmod -L " + str(job_limit) + " " + grp_name

    os.system(add_grp)
    os.system(mod_grp)

    ################################################
    def submit_job(base_id, base, snap_num, sub_id, dat_list, dat_dir):
        if overw == 0:
            subsnap_name = dat_dir + str(sub_id).zfill(5) + ".dat"
            if subsnap_name in dat_list:
                print subsnap_name
                print "Done with this one!"
            else:
                print "Could not find ", subsnap_name, "... submitting job."
                # run_bd(base_id, base, snap_num, sub_id, grp_name)
                run_find_disks(base_id, base, snap_num, sub_id, grp_name)
                # run_J_profile(base_id, base, snap_num, sub_id, grp_name)

                print "Waiting ", str(wait_time), "seconds to submit next job..."
                time.sleep(wait_time)

        else:
            # run_bd(base_id, base, snap_num, sub_id, grp_name)
            run_find_disks(base_id, base, snap_num, sub_id, grp_name)
            # run_J_profile(base_id, base, snap_num, sub_id, grp_name)

            print "Waiting ", str(wait_time), "seconds to submit next job..."
            time.sleep(wait_time)
            ################################################

    dat_dir = save_dir + dat_folder
    dat_list = glob.glob(dat_dir + "*.dat")

    n_matches = len(sub_ids)
    for i in np.arange(n_matches):
        print str(i + 1) + " of " + str(n_matches)
        submit_job(base_id=base_id, base=base, snap_num=snap_num, sub_id=sub_ids[i], dat_list=dat_list, dat_dir=dat_dir)

    print "Job submission done!"
        critical_density *= Omega0 + atime**3 * OmegaLambda
        critical_density_gas = critical_density * baryonfraction

        print(filename3)
        #load particle indices and catalogs
        pGas = snapHDF5.read_block(filename4, "POS ", parttype=0)  #correct 4
        iGas2 = snapHDF5.read_block(filename4, "ID  ",
                                    parttype=0)  #correct 4 to filename3_032!
        mGas = snapHDF5.read_block(filename3, "MASS", parttype=0)
        eGas = snapHDF5.read_block(filename3, "U   ", parttype=0)
        dGas = snapHDF5.read_block(filename3, "RHO ", parttype=0)
        xHI = snapHDF5.read_block(filename3, "HI  ", parttype=0)
        if str(species) == 'H2':
            xH2I = snapHDF5.read_block(filename3, "H2I ", parttype=0)
        pDM = snapHDF5.read_block(filename3, "POS ", parttype=1)
        cat = readsubfHDF5.subfind_catalog(filename2, snapnum)
        r200 = cat.Group_R_Crit200
        print('loding catalog done')

        def find_adress(j):
            found_ad = np.where(iGas2 == j)
            if j not in iGas2:
                print('ID ' + str(j) + 'is not in the snapnumber ' +
                      str(snapnum))
                #fp_result.write('ID ' + str(j) + 'is not in the snapnumber ' + str(snapnum) + '\n')
            return found_ad

        for j in ID:
            if j in iGas2:
                adress = np.where(iGas == j)
                print('ID ' + str(j) + ' adress is ' + str(adress) + '\n')
Пример #24
0
	def __init__(self, res, vel, snapnum):
		self.vel = vel
		self.res = res
		self.snapnum = int(snapnum)
		self.s_vel = vel.replace(".","")
		self.s_res = res.replace(".","")

		#File paths
		filename = "/n/hernquistfs3/mvogelsberger/GlobularClusters/InterfaceWArepo_All_" + self.res + '_' + self.vel  + "/output/"
		filename2 = filename +  "DM_FOF" #Used for readsubfHDF5
		filename3 = filename + "snap_" + str(self.snapnum).zfill(3) #Used for hdf5lib, snapHDF5
		#Read header information	
		header = snapHDF5.snapshot_header(filename3)
		with hdf5lib.OpenFile(filename3 + ".hdf5") as fs:
			red = hdf5lib.GetAttr(fs, "Header", "Redshift")
			atime = hdf5lib.GetAttr(fs, "Header", "Time")
			boxSize = hdf5lib.GetAttr(fs, "Header", "BoxSize")
			boxSize *= atime / hubbleparam #convert from ckpc/h to kpc
			Omega0 = hdf5lib.GetAttr(fs, "Header", "Omega0")
			OmegaLambda = hdf5lib.GetAttr(fs, "Header", "OmegaLambda")
		
		#Read halo catalog
		cat = readsubfHDF5.subfind_catalog(filename2, self.snapnum)	
		#critical_density *= 1. / (Omega0 + OmegaLambda * atime * atime * atime) #redshift correction
		r200 = cat.Group_R_Crit200
		r200 *= atime / hubbleparam #convert from ckpc/h to kpc
		m200 = cat.Group_M_Crit200
		m200 *= 1. / hubbleparam #convert to 10^10 M_sun
		haloCMvel = cat.GroupVel
		haloCMvel *= 1. / atime #convert from km/s/a to km/s
		haloPos = cat.GroupPos
		haloPos *= atime / hubbleparam #convert from ckpc/h to kpc

		#Initialize arrays
		spinparamTotal = np.zeros(np.size(r200))
		spinparamGas = np.zeros(np.size(r200))
		spinparamDM = np.zeros(np.size(r200))
		gasfrac = np.zeros(np.size(r200))
		costheta = np.zeros(np.size(r200)) #misalignment angle
		v200 = np.zeros(np.size(r200))	
		numGas = np.zeros(np.size(r200))
		numDM = np.zeros(np.size(r200)) 

		#Read in particles
		massgas = snapHDF5.read_block(filename3, "MASS", parttype=0)
		massdm = snapHDF5.read_block(filename3, "MASS", parttype=1)
		posgas = snapHDF5.read_block(filename3, "POS ", parttype=0)
		posdm = snapHDF5.read_block(filename3, "POS ", parttype=1)
		velgas = snapHDF5.read_block(filename3, "VEL ", parttype=0)
		veldm = snapHDF5.read_block(filename3, "VEL ", parttype=1)
		#redefine position units from ckpc/h to kpc
		posgas *= atime / hubbleparam
		posdm *= atime / hubbleparam
		#redefine velocity units from kmsqrt(a)/s to km/s
		velgas *= np.sqrt(atime)
		veldm *= np.sqrt(atime)

		#boxSize hubble flow correction for halo CM velocity subtraction
		boxSizeVel = boxSize * hubbleparam * .1 * np.sqrt(Omega0/atime/atime/atime + OmegaLambda)
		


		#load particle indices
		over300idx, indgas, inddm = np.load('particleindex_' + self.res + '_' + self.vel + '_' + str(self.snapnum) + '.npy')
		over300idx = over300idx.astype(int)
		over1 = []

		for i,j in enumerate(over300idx):
			#remove halo CM velocity
			tempvelgas = dx_wrap(velgas[indgas[i]] - haloCMvel[j],boxSizeVel)
			tempveldm = dx_wrap(veldm[inddm[i]] - haloCMvel[j],boxSizeVel)
			#redefine positions wrt COM
			tempposgas = dx_wrap(posgas[indgas[i]] - haloPos[j],boxSize)
			tempposdm = dx_wrap(posdm[inddm[i]] - haloPos[j],boxSize)
			numDM[j] = np.size(tempposdm)
			numGas[j] = np.size(tempposgas)
			#Calculating j200
			#j200 of all particles
			j200vecgas = np.sum(np.cross(tempposgas,tempvelgas)*massgas[indgas[i]][:, np.newaxis],axis=0)
			j200vecdm = np.sum(np.cross(tempposdm,tempveldm)*massdm[inddm[i]][:, np.newaxis],axis=0)
			if np.size(tempvelgas)!=0: #can be no gas particles!
				costheta[j] = np.dot(j200vecgas,j200vecdm)/np.linalg.norm(j200vecgas)/np.linalg.norm(j200vecdm)
			j200vec = j200vecgas + j200vecdm
			j200 = np.linalg.norm(j200vec)
			j200gas = np.linalg.norm(j200vecgas)
			j200dm = np.linalg.norm(j200vecdm)
			v200[j] = np.sqrt(GCONST*m200[j]/r200[j])
			
			#Bullock spin parameter
			totalmass = massgas[indgas[i]].sum(dtype='float64') + massdm[inddm[i]].sum(dtype='float64')
			spinparamTotal[j] = j200/np.sqrt(2)/v200[j]/r200[j]/totalmass
			if np.size(tempveldm)!=0: #tempveldm can be empty no dm particles!
				spinparamDM[j] = j200dm/np.sqrt(2)/v200[j]/r200[j]/massdm[inddm[i]].sum(dtype='float64')
			if np.size(tempvelgas)!=0: #tempvelgas can be empty no gas particles!
				spinparamGas[j] = j200gas/np.sqrt(2)/v200[j]/r200[j]/massgas[indgas[i]].sum(dtype='float64')
			gasfrac[j] = massgas[indgas[i]].sum(dtype='float64') / (massgas[indgas[i]].sum(dtype='float64') + massdm[inddm[i]].sum(dtype='float64'))

		#Reindex over300idx to account for SO halos with DM particles >300
		over300idx2 = over300idx[numDM[over300idx] > 300]

		#Plotting
		#Redfine in terms of over300idx2
		self.spinparamTotal = spinparamTotal[over300idx2]
		self.spinparamGas = spinparamGas[over300idx2]
		self.spinparamDM = spinparamDM[over300idx2]
		self.gasfrac = gasfrac[over300idx2]	
		self.m200 = m200[over300idx2]
		self.m200 *= 10**10  #Convert to solar mass.
		self.costheta = costheta[over300idx2]
		self.gasfracCosTheta = self.gasfrac[self.costheta!=0.]
		self.m2002 = self.m200[self.costheta!=0.]
		self.costheta = self.costheta[self.costheta!=0.] #take out the 0 gas components
		self.thetadeg = np.arccos(self.costheta)*180./np.pi
Пример #25
0
def find_MC_tracer_ids(snap_num,sub_id,base,scale_factor,gal_radfac,gas_pos,gas_ids,star_pos,star_ids,tracer_ids,parent_ids):
	print "Finding MC tracer IDs..."
	cat = readsubfHDF5.subfind_catalog(base, snap_num)
	
	# I assume that the subhalo is the primary subhalo in its group
	sub_list = cat.GroupFirstSub
	grp_id = np.argmin( np.abs(sub_id-sub_list) )

	sub_pos = cat.SubhaloPos[sub_id]
	sub_mass = cat.SubhaloMass[sub_id]
	sub_Rvir = cat.Group_R_Crit200[grp_id]

	gal_rad = sub_Rvir * gal_radfac # * 0.1

	#snapname = base + "snapdir_"+str(snap_num).zfill(3)+"/snap_"+str(snap_num).zfill(3)
	snapname = base + "snap_"+str(snap_num).zfill(3)

	#Find all gas and star particles within 0.1 Rvir of the subhalo
	# GAS #
	#gas_pos = readsnapHDF5.read_block(snapname,"POS ",parttype=0)
	#gas_ids = readsnapHDF5.read_block(snapname,"ID  ",parttype=0)
	#readsnapHDF5.list_blocks(snapname+".hdf5")

	gas_x = np.logical_and(gas_pos[:,0] > sub_pos[0]-gal_rad, gas_pos[:,0] < sub_pos[0]+gal_rad)
	gas_y = np.logical_and(gas_pos[:,1] > sub_pos[1]-gal_rad, gas_pos[:,1] < sub_pos[1]+gal_rad)
	gas_z = np.logical_and(gas_pos[:,2] > sub_pos[2]-gal_rad, gas_pos[:,2] < sub_pos[2]+gal_rad)

	gas_xyz_ind = np.logical_and(np.logical_and(gas_x,gas_y),gas_z)
	gas_pos = gas_pos[gas_xyz_ind]
	gas_pos = gas_pos - sub_pos

	gas_r_ind = np.sqrt(gas_pos[:,0]**2 + gas_pos[:,1]**2 + gas_pos[:,2]**2) < gal_rad
	gal_gas_ids = gas_ids[gas_xyz_ind][gas_r_ind]
	print "len(gal_gas_ids )",len(gal_gas_ids)

	# Clear gas data:
	gas_pos = 0.
	gas_ids = 0.
	gas_x = 0.
	gas_y = 0.
	gas_z = 0.
	gas_xyz_ind = 0.
	gas_r_ind = 0.


	# STARS #
	#star_pos = readsnapHDF5.read_block(snapname,"POS ",parttype=4)
	#star_ids = readsnapHDF5.read_block(snapname,"ID  ",parttype=4)
	#readsnapHDF5.list_blocks(snapname+".hdf5")

	star_x = np.logical_and(star_pos[:,0] > sub_pos[0]-gal_rad, star_pos[:,0] < sub_pos[0]+gal_rad)
	star_y = np.logical_and(star_pos[:,1] > sub_pos[1]-gal_rad, star_pos[:,1] < sub_pos[1]+gal_rad)
	star_z = np.logical_and(star_pos[:,2] > sub_pos[2]-gal_rad, star_pos[:,2] < sub_pos[2]+gal_rad)

	star_xyz_ind = np.logical_and(np.logical_and(star_x,star_y),star_z)
	star_pos = star_pos[star_xyz_ind]
	star_pos = star_pos - sub_pos

	star_r_ind = np.sqrt(star_pos[:,0]**2 + star_pos[:,1]**2 + star_pos[:,2]**2) < gal_rad
	gal_star_ids = star_ids[star_xyz_ind][star_r_ind]
	print "len(gal_star_ids )",len(gal_star_ids)

	# Clear star data:
	star_pos = 0.
	star_ids = 0.
	star_x = 0.
	star_y = 0.
	star_z = 0.
	star_xyz_ind = 0.
	star_r_ind = 0.


	# Now find all MC tracers associated with the gas and star particles we have selected:
	pre_in1d = time.time()
	temp1 = tracer_ids[np.in1d(parent_ids,gal_gas_ids)]
	temp2 = tracer_ids[np.in1d(parent_ids,gal_star_ids)]
	post_in1d = time.time()

	print "in1d time: ",post_in1d-pre_in1d

	# update arrays after each iteration?

	gal_MC_ids = np.append(temp1,temp2)
	
	print "Done finding MC tracer IDs..."
	return gal_MC_ids
Пример #26
0
#Read header information
header = snapHDF5.snapshot_header(filename3)
red = header.redshift
atime = header.time
boxSize = header.boxsize
Omega0 = header.omega0
OmegaLambda = header.omegaL
massDMParticle = header.massarr[1]  #all DM particles have same mass

#redshift evolution of critical_density
critical_density *= Omega0 + atime**3 * OmegaLambda
critical_density_gas = critical_density * baryonfraction

#Read halo catalog
catGas = readsubfHDF5.subfind_catalog(filename + "GasOnly_FOF", snapnum)
catDM = readsubfHDF5.subfind_catalog(filename + "DM_FOF", snapnum)

#Get CM & R200 of all halos w/ >300 DM particles, >100 gas particles
GroupPos_Gas = catGas.GroupPos[catGas.GroupLenType[:, 0] > 100]
GroupPos_DM = catDM.GroupPos[catDM.GroupLenType[:, 1] > 300]
R200_DM = catDM.Group_R_Crit200[catDM.GroupLenType[:, 1] > 300]
M200_DM = catDM.Group_M_Crit200[catDM.GroupLenType[:, 1] > 300]

#Filter for nonzero R200
GroupPos_DM = GroupPos_DM[R200_DM != 0.]
M200_DM = M200_DM[R200_DM != 0.]
R200_DM = R200_DM[R200_DM != 0.]

#Allocate arrays
matchingHalos = []
Пример #27
0
	redshift = readsnapHDF5.snapshot_header(snapname).redshift
	scale_factor = 1./(1.+redshift)
	gal_radfac =  scale_factor * 2.5

	gal_vt_ids = find_vt_tracer_ids(snap_num,sub_id,base,scale_factor,gal_radfac)
	gal_MC_ids = find_MC_tracer_ids(snap_num,sub_id,base,scale_factor,gal_radfac)
	n_vt = np.uint32(gal_vt_ids.size)
	print "n_vt ",n_vt

	n_MC = np.uint32(gal_MC_ids.size)
	print "n_MC ",n_MC

	MC_vt_ratio = float(n_MC)/float(n_vt)
	print "n_MC/n_vt ", MC_vt_ratio

	cat = readsubfHDF5.subfind_catalog(base, snap_num)

	# I assume that the subhalo is the primary subhalo in its group
	#sub_list = cat.GroupFirstSub
	#grp_id = np.argmin( np.abs(sub_id-sub_list) )
	grp_id = cat.SubhaloParent[sub_id]

	sub_partIDs = get_subhalo_ids(base,snap_num,sub_id)
	sub_pos = cat.SubhaloPos[sub_id]
	sub_mass = cat.SubhaloMass[sub_id]
	sub_vel = cat.SubhaloVel[sub_id]
	sub_Rvir = cat.Group_R_Crit200[grp_id]



elif resume == 1:
Пример #28
0
import readsubfHDF5
import numpy as np

Base1 = '/n/hernquistfs1/Illustris/Runs/Illustris-3/output/'
Base2 = '/n/hernquistfs1/Illustris/Runs/Illustris-Dark-3/output/'

MatchBase = "./output/"
snapnum = 100

fname = MatchBase + "/matchdata/sub_match_" + str(snapnum).zfill(3)

cat1 = readsubfHDF5.subfind_catalog(Base1,
                                    snapnum,
                                    keysel=["SubhaloPos", "SubhaloLenType"])
cat2 = readsubfHDF5.subfind_catalog(Base2,
                                    snapnum,
                                    keysel=["SubhaloPos", "SubhaloLenType"])

ch = 200

print cat1.SubhaloPos[ch, :]
print cat2.SubhaloPos[match_halonr[ch], :]
Пример #29
0
scalefactors = illustris.load_scalefactors()
redshifts = 1.0 / scalefactors - 1.0
snapshots = np.arange(redshifts.shape[0])
# ============================================== #

start_time = time.time()
end_time = time.time()
count = 1

min_sf_density = 1000.0

for redshift in target_redshifts:
    diff = np.abs(redshift - redshifts)
    snap = snapshots[diff == diff.min()]
    if this_task == 0:
        cat = readsubfHDF5.subfind_catalog(
            dir, snap[0], keysel=['SubhaloMassType', 'SubhaloSFR'])
    else:
        cat = None
    cat = comm.bcast(cat, root=0)

    stellar_mass = cat.SubhaloMassType[:, 4] * 1e10 / little_h
    total_gas_mass = cat.SubhaloMassType[:, 0] * 1e10 / little_h
    sfr = cat.SubhaloSFR
    n_subs = cat.nsubs

    cold_gas_mass = np.zeros(n_subs)  # local array
    cold_gas_mass_fraction = np.zeros(n_subs)  # local array
    global_cold_gas_mass = np.zeros(n_subs)  # global array
    global_cold_gas_mass_fraction = np.zeros(n_subs)  # global array

    readhaloHDF5.reset()
Пример #30
0
#Last updated: 1/18/15.
#importing libraries.  Got 1/5/15 version of readsnapHDF5 from Volgelsburger repo.
import readsubfHDF5  
import numpy as np
import cosmo_const as cc

#switch variable.  If = 0, then runs
this_task = 0

#loading in catalog of halos
if this_task==0:
	cat = readsubfHDF5.subfind_catalog('/n/ghernquist/Illustris/Runs/Illustris-1/output/', 135, keysel=['Group_M_Crit200', 'Group_M_Mean200', 'Group_M_TopHat200', 'SubhaloMassType'] )
else:
	cat = None

massInsideData = np.transpose(np.load('./kdTree_complete_10-300kpc_dm_bigdata_illustris-1.npy'))
haloNum = np.array(massInsideData[0])
groupNum = np.array(massInsideData[1])
m_crit200 = np.empty(len(groupNum))
m_mean200 = np.empty(len(groupNum))
m_topHat200 = np.empty(len(groupNum))
m_star = np.empty(len(groupNum))
m_blackhole = np.empty(len(groupNum))
i = 0
while (i<len(groupNum)):
	m_crit200[i] = cat.Group_M_Crit200[groupNum[i]]*1e10*(cc.h_little**-1.)
	m_mean200[i] = cat.Group_M_Mean200[groupNum[i]]*1e10*(cc.h_little**-1.)
	m_topHat200[i] = cat.Group_M_TopHat200[groupNum[i]]*1e10*(cc.h_little**-1.)
	m_star[i] = (cat.SubhaloMassType[:,4])[haloNum[i]]*1e10*(cc.h_little**-1.)
	m_blackhole[i] = (cat.SubhaloMassType[:,5])[haloNum[i]]*1e10*(cc.h_little**-1.)
	
Пример #31
0
import readsubfHDF5
import numpy as np
import hdfsim

base = "/n/hernquistfs1/mvogelsberger/projects/GFM/Production/Cosmo/Cosmo0_V6/L25n512/output/"
snapnum = 68

halonum = 1

cat = readsubfHDF5.subfind_catalog(base,snapnum,long_ids=True)

GasFuzzOffset = cat.GroupFuzzOffsetType[:,0]

start = GasFuzzOffset[halonum]
end = GasFuzzOffset[halonum+1]-1


totngas = 0
for fnum in xrange(0,500):
    try:
        f=hdfsim.get_file(snapnum,base,fnum)
    except IOError:
        break

    NumPart_ThisFile = f["Header"].attrs["NumPart_ThisFile"][0]

    if totngas < start and totngas+NumPart_ThisFile > start:
        newstart = start - totngas
        newend = end - totngas

        bar=f["PartType0"]
Пример #32
0
snap_spacing = 1

# Mass cutoff for halos selected at latest_snap
mass_thresh = 10.

# Total number of halos to run for (mainly for testing purposes)
N_tot = -1

# Minimum tracer ID:
tracer_MIN = np.uint64(1000000010)

############################
pre_setup = time.time()

# Read in full subfind catalog
cat = readsubfHDF5.subfind_catalog(base, latest_snap)

# Look only at primary halos
sub_list = cat.GroupFirstSub
#grp_ids = np.argmin( np.abs(sub_id-sub_list) )
#grp_id = cat.SubhaloParent[sub_list]

# Apply mass threshold:
sub_mass = cat.SubhaloMass[sub_list]
mass_cutoff_ind = sub_mass > mass_thresh
sub_list = sub_list[mass_cutoff_ind]

# If N_tot is set, limit sub_list to that size
if N_tot > 0: sub_list = sub_list[: N_tot]

Пример #33
0
def readhalo(base, snapbase, num, block_name, parttype, fof_num, sub_num, long_ids=False, double_output=False, verbose=False):
	global FlagRead, cat, GroupOffset, HaloOffset, multiple, filename, Parttype, FileTypeNumbers, FileNum


	if (FlagRead==False) | ((parttype in Parttype) == False):	
		if (verbose):
			print("READHALO: INITIAL READ")

		#add parttype to list
		Parttype.append(parttype)

		if (verbose):
			print("READHALO: Parttype = ", Parttype)

		#read in catalog
		cat = readsubfHDF5.subfind_catalog(base, num, long_ids=long_ids, double_output=double_output, keysel=["GroupLenType","GroupNsubs","GroupFirstSub","SubhaloLenType","SubhaloMassType"])

		if (cat.ngroups==0):
			if (verbose):
				print("READHALO: no groups in catalog... returning")
			return

		
		if (FlagRead==False):
			GroupOffset = np.zeros([cat.ngroups, 6], dtype="int64")
			HaloOffset = np.zeros([cat.nsubs, 6], dtype="int64")
	
			filename = base+"/"+snapbase+"_"+str(num).zfill(3)
			multiple=False
			if (os.path.exists(filename+".hdf5")==False):
				filename = base+"/snapdir_"+str(num).zfill(3)+"/"+snapbase+"_"+str(num).zfill(3)+"."+str(0)
				multiple=True
			if (os.path.exists(filename+".hdf5")==False):
				print("READHALO: [error] file not found : ", filename)
				sys.exit()

			FlagRead=True


		#construct offset tables
		k=0
		for i in range(0, cat.ngroups):
			if (i>0):
				GroupOffset[i, parttype] =  GroupOffset[i-1, parttype] + cat.GroupLenType[i-1, parttype]
			if (cat.GroupNsubs[i]>0):
				HaloOffset[k, parttype] = GroupOffset[i, parttype]
				k+=1
				for j in range(1, cat.GroupNsubs[i]):
					HaloOffset[k, parttype] =  HaloOffset[k-1, parttype] + cat.SubhaloLenType[k-1, parttype]
					k+=1
		if (k!=cat.nsubs):
			print("READHALO: problem with offset table", k, cat.nsubs)
			sys.exit()

		#construct file tables
		if (multiple):
			filename = base+"/snapdir_"+str(num).zfill(3)+"/"+snapbase+"_"+str(num).zfill(3)+"."+str(0)
		else:
			filename = base+"/"+snapbase+"_"+str(num).zfill(3)

		head = snapHDF5.snapshot_header(filename)
		FileNum = head.filenum+1

		FileTypeNumbers = np.zeros([FileNum, 6], dtype="int64") 
		cumcount = np.zeros(6, dtype="int64")

		for fnr in range(0, FileNum-1):
			if (multiple):
				filename = base+"/snapdir_"+str(num).zfill(3)+"/"+snapbase+"_"+str(num).zfill(3)+"."+str(fnr)
			else:
				filename = base+"/"+snapbase+"_"+str(num).zfill(3)

			if (verbose):
				print("READHALO: initial reading file :", filename)

			head = snapHDF5.snapshot_header(filename)
	
			cumcount[:] += head.npart[:]
			FileTypeNumbers[fnr+1, :] = cumcount[:]
			
	
	if (sub_num>=0) & (fof_num < 0):
		off = HaloOffset[sub_num, parttype]
		left = cat.SubhaloLenType[sub_num, parttype]
		if (verbose):
			print("READHALO: nr / particle # / mass :", sub_num, cat.SubhaloLenType[sub_num, parttype], cat.SubhaloMassType[sub_num, parttype].astype("float64"))
	if (fof_num>=0) & (sub_num < 0):
		off = GroupOffset[fof_num, parttype]
		left = cat.GroupLenType[fof_num, parttype]
		if (verbose):
			print("READHALO: nr / particle # / mass :", fof_num, cat.GroupLenType[fof_num, parttype], cat.GroupMassType[fof_num, parttype].astype("float64"))
	if (sub_num>=0) & (fof_num>=0):
		real_sub_num = sub_num + cat.GroupFirstSub[fof_num]
		off = HaloOffset[real_sub_num, parttype]
		left = cat.SubhaloLenType[real_sub_num, parttype]
		if (verbose):
			print("READHALO: nr / particle # / mass :", real_sub_num, cat.SubhaloLenType[real_sub_num, parttype], cat.SubhaloMassType[real_sub_num, parttype].astype("float64"))
		

	if (left==0):
		if (verbose):
			print("READHALO: no particles of type... returning")
		return


	#get first file that contains particles of required halo/fof/etc
	findex = np.argmax(FileTypeNumbers[:, parttype] > off) - 1
	#in case we reached the end argmax returns 0
	if (findex == -1): 
		findex = FileNum - 1

	if (verbose):
		print("READHALO: first file that contains particles =", findex)

	for fnr in range(0, findex):
		off -= FileTypeNumbers[fnr+1, parttype] - FileTypeNumbers[fnr, parttype] 

	#read data from file
	first=True
	for fnr in range(findex, FileNum):
		if (multiple):
			filename = base+"/snapdir_"+str(num).zfill(3)+"/"+snapbase+"_"+str(num).zfill(3)+"."+str(fnr)
		else:
			filename = base+"/"+snapbase+"_"+str(num).zfill(3)
	
		if (verbose):
			print("READHALO: reading file :", filename)
	
		head = snapHDF5.snapshot_header(filename)
		nloc = head.npart[parttype]

		if (nloc > off):
			if (verbose):
				print("READHALO: data")
			start = off
			if (nloc - off > left):
				count = left	
			else:
				count = nloc - off

			if (first==True):	
				data = snapHDF5.read_block(filename, block_name, parttype, slab_start=start, slab_len=count)
				first=False
			else:
				data = np.append(data, snapHDF5.read_block(filename, block_name, parttype, slab_start=start, slab_len=count), axis=0)

			left -= count
			off += count
		if (left==0):
			break
		off -= nloc

	return data
#prints arguments from initial command line
print sys.argv
print snap

ext = '000'+str(snap)
ext = ext[-3:]

#loading in txt file with all particle files listed
partFileArray = np.genfromtxt(partFileTxt, dtype='str')
print partFileArray


#loading in catalog of halos
if this_task==0:
	cat = readsubfHDF5.subfind_catalog('/n/ghernquist/Illustris/Runs/'+run+'/output/', snap, keysel=['SubhaloPos', 'SubhaloMass', 'GroupFirstSub', 'SubhaloGrNr', 'Group_M_Crit200'] )
else:
	cat = None
print "Loaded In Halo Catalog"

groupFirstSub = cat.GroupFirstSub[np.where(cat.GroupFirstSub != -1)]
all_subhalo_nrs = np.arange(cat.SubhaloMass.shape[0])	#making an array of values 0 to the number of halos - 1 (index number array)
subhalo_masses = cat.SubhaloMass * 1e10 * (cc.h_little**-1.)	#correcting array of halo masses to be in units of Msun
groupNum = cat.SubhaloGrNr
groupMcrit200 = cat.Group_M_Crit200[np.where(cat.GroupFirstSub != -1)]*(cc.h_little**-1.)*(10.**10.)


#finding primary halos within mass range (e11.5-e12.5)
#groupFirstSub = groupFirstSub[(subhalo_masses[groupFirstSub] > (10.**11.5)) & (subhalo_masses[groupFirstSub] < (10.**12.5))]
groupFirstSub = groupFirstSub[(groupMcrit200 > (10.**11.)) & (groupMcrit200 < (10.**13.))]
print groupFirstSub
def calculate_halo(sub_id): #submit job to queue

	# I assume that the subhalo is the primary subhalo in its group
	sub_list = cat.GroupFirstSub
	grp_id = np.argmin( np.abs(sub_id-sub_list) )
	#grp_id = cat.SubhaloParent[sub_id]

	sub_partIDs = get_subhalo_ids(base,snap_num,sub_id)
	sub_pos = cat.SubhaloPos[sub_id]
	sub_mass = cat.SubhaloMass[sub_id]
	sub_vel = cat.SubhaloVel[sub_id]
	sub_Rvir = cat.Group_R_Crit200[grp_id]




	frame_num = 0
	for current_snap in snap_nums:
		# Find the halo in this snapshot that corresponds to the halo in our original snapshot:
		print "on snapshot number ",current_snap


		old_match_flag = new_match_flag
		old_subid = new_subid
		old_partIDs = new_partIDs
		old_subpos = new_subpos
		old_submass = new_submass
		old_subvel = new_subvel

		#new_snapname = base + "snapdir_"+str(current_snap).zfill(3)+"/snap_"+str(current_snap).zfill(3)
		new_snapname = base + "/snap_"+str(current_snap).zfill(3)
		new_cat = readsubfHDF5.subfind_catalog(base, current_snap)

		redshift = readsnapHDF5.snapshot_header(new_snapname).redshift
		scale_factor = 1./(1.+redshift)

		#readsnapHDF5.list_blocks(new_snapname+".hdf5")

		# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
		# MC tracer information:
		MC_ids = readsnapHDF5.read_block(new_snapname,"TFID",parttype=3) # May want to fix this for very large # of tracers (read in 1 snapshot at a time)
		MC_ind = np.in1d(MC_ids,gal_MC_ids,assume_unique=True) #This step takes a long time!
		MC_ids = 0.
		print "MC_ind.sum() ",MC_ind.sum()
		# now match the tracers with their parent particles:
		parent_ids = readsnapHDF5.read_block(new_snapname,"TRPI",parttype=3)[MC_ind]
		#parent_ids = np.unique(parent_ids)

		parent_ids.sort()
		(unique_parent_ids,unique_arg) = np.unique(parent_ids,return_index=True)
		foo = parent_ids.size-unique_arg[-1] # the number of reps for the very last unique entry in parent_ids

		rep_count = np.append(np.diff(unique_arg),foo)

		gas_ids = readsnapHDF5.read_block(new_snapname,"ID  ",parttype=0)
		gas_ind = np.in1d(gas_ids,unique_parent_ids,assume_unique=True)
		gas_ids = 0.
		gas_pos = readsnapHDF5.read_block(new_snapname,"POS ",parttype=0)
		pos1 = gas_pos[gas_ind]
		gas_pos = 0.
		gas_vel = readsnapHDF5.read_block(new_snapname,"VEL ",parttype=0)
		vel1 = gas_vel[gas_ind]
		gas_vel = 0.

		# now find which ones are stars (and get their properties):
		star_ids = readsnapHDF5.read_block(new_snapname,"ID  ",parttype=4)
		star_ind = np.in1d(star_ids,unique_parent_ids,assume_unique=True)
		star_ids = 0.
		star_pos = readsnapHDF5.read_block(new_snapname,"POS ",parttype=4)
		pos2 = star_pos[star_ind]
		star_pos = 0.
		star_vel = readsnapHDF5.read_block(new_snapname,"VEL ",parttype=4)
		vel2 = star_vel[star_ind]
		star_vel = 0.

		pos = np.concatenate((pos1,pos2))
		pos1 = 0.
		pos2 = 0.
		vel = np.concatenate((vel1,vel2))
		vel1 = 0.
		vel2 = 0.

		gal_MC_pos = np.repeat(pos,rep_count,axis=0)
		gal_MC_vel = np.repeat(vel,rep_count,axis=0)

		print "len(gal_MC_pos) ",len(gal_MC_pos)
		print "len(gal_MC_vel) ",len(gal_MC_vel)
		# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~


		# Generate image:
		#generate_tracer_image()
		print "not generating image "
	                        
		# Generate savefile: # save: snap_num, # of vts, gal_vt_ids at z=0, sub_id at that snap, 
		# subhalo_pos at that snap, Rvir at that time, 
		filename = save_dir + str(current_snap).zfill(3)+".dat"
		f = open(filename,'wb')

		primary_halo_flag = np.uint32(primary_halo_flag)
		gal_MC_posx = gal_MC_pos[:,0]
		gal_MC_posy = gal_MC_pos[:,1]
		gal_MC_posz = gal_MC_pos[:,2]
		gal_MC_velx = gal_MC_vel[:,0]
		gal_MC_vely = gal_MC_vel[:,1]
		gal_MC_velz = gal_MC_vel[:,2]

		print "len(gal_MC_posx) ",len(gal_MC_posx)
		print "len(gal_MC_velz) ",len(gal_MC_velz)


		# Saving stuff to file:
		# general stuff
		current_snap.astype("uint32").tofile(f)
		redshift.astype("float64").tofile(f)
		# MC tracer data
		n_MC.astype("uint32").tofile(f)
		gal_MC_ids.astype("uint64").tofile(f)
		gal_MC_posx.astype("float64").tofile(f)
		gal_MC_posy.astype("float64").tofile(f)
		gal_MC_posz.astype("float64").tofile(f)
		gal_MC_velx.astype("float64").tofile(f)
		gal_MC_vely.astype("float64").tofile(f)
		gal_MC_velz.astype("float64").tofile(f)
		# halo data
		primary_halo_flag.astype("uint32").tofile(f)
		new_subid.astype("uint32").tofile(f)
		new_subpos.astype("float64").tofile(f)
		new_subvel.astype("float64").tofile(f)
		new_subRvir.astype("float64").tofile(f)
		if False:
			if primary_halo_flag == 1: pass
			elif primary_halo_flag == 0: 
				primary_halo_pos.astype("float64").tofile(f)
				primary_halo_vel.astype("float64").tofile(f)		
		
		f.close()

		frame_num = frame_num + 1
		time4 = time.time()

	print "done!"
	return