def get_offsets(cat, part_types=[0, 1, 4, 5], snap=None, run=None): if snap and run: group_file = "/n/ghernquist/Illustris/Runs/%s/postprocessing/offsets/snap_offsets_group_%s.hdf5" % ( run, snap) halo_file = "/n/ghernquist/Illustris/Runs/%s/postprocessing/offsets/snap_offsets_subhalo_%s.hdf5" % ( run, snap) if os.path.isfile(group_file) and os.path.isfile(halo_file): print("READSUBF: found pretabulated offsets to read") f = hdf5lib.OpenFile(group_file) group_offsets = hdf5lib.GetData(f, "Offsets")[:] f.close() f = hdf5lib.OpenFile(halo_file) halo_offsets = hdf5lib.GetData(f, "Offsets")[:] f.close() return np.array(group_offsets), np.array(halo_offsets) else: # /n/hernquistfs3/IllustrisTNG/Runs/L75n910TNG/postprocessing/offsets/ group_file = "/n/hernquistfs3/IllustrisTNG/Runs/%s/postprocessing/offsets/offsets_%s.hdf5" % ( run, str(snap).zfill(3)) # sys.exit() if os.path.isfile(group_file): f = hdf5lib.OpenFile(group_file) group_offsets = np.copy(hdf5lib.GetData(f, "Group/SnapByType")) halo_offsets = np.copy(hdf5lib.GetData(f, "Subhalo/SnapByType")) return group_offsets, halo_offsets GroupOffset = np.zeros((cat.ngroups, 6), dtype="int64") HaloOffset = np.zeros((cat.nsubs, 6), dtype="int64") for parttype in part_types: print("Calculating offsets for PartType: %d" % parttype) k = 0 for i in range(0, cat.ngroups): if i > 0: GroupOffset[i, parttype] = GroupOffset[ i - 1, parttype] + cat.GroupLenType[i - 1, parttype] if cat.GroupNsubs[i] > 0: HaloOffset[k, parttype] = GroupOffset[i, parttype] k += 1 for j in range(1, cat.GroupNsubs[i]): HaloOffset[k, parttype] = HaloOffset[ k - 1, parttype] + cat.SubhaloLenType[k - 1, parttype] k += 1 if k != cat.nsubs: print("READHALO: problem with offset table", k, cat.nsubs) sys.exit() return np.array(GroupOffset), np.array(HaloOffset)
def return_tags(filename, parttype=-1, verbose=False): if os.path.exists(filename): curfilename=filename elif os.path.exists(filename+".hdf5"): curfilename = filename+".hdf5" elif os.path.exists(filename+".0.hdf5"): curfilename = filename+".0.hdf5" else: print("[error] file not found : {:s} in return_tags".format( filename )) sys.stdout.flush() sys.exit() all_blocks = [] f=hdf5lib.OpenFile(filename) for parttype in range(0,6): this_block=[] part_name='PartType'+str(parttype) if (hdf5lib.Contains(f,"",part_name)): iter = it=datablocks.__iter__() next = iter.next() while (1): if (hdf5lib.Contains(f,part_name,datablocks[next][0])): sys.stdout.flush() this_block.append(next) try: next=iter.next() except StopIteration: break all_blocks.append(this_block) f.close() gc.collect() return all_blocks
def contains_block(filename, tag, parttype=-1, verbose=False): if os.path.exists(filename): curfilename=filename elif os.path.exists(filename+".hdf5"): curfilename = filename+".hdf5" elif os.path.exists(filename+".0.hdf5"): curfilename = filename+".0.hdf5" else: print("[error] file not found : {:s} in contains_block ".format( filename )) sys.stdout.flush() sys.exit() contains_flag=False f=hdf5lib.OpenFile(filename) for parttype in range(0,6): part_name='PartType'+str(parttype) if (hdf5lib.Contains(f,"",part_name)): iter = it=datablocks.__iter__() next = iter.next() while (1): if (verbose): print("check ", next, datablocks[next][0]) sys.stdout.flush() if (hdf5lib.Contains(f,part_name,datablocks[next][0])): if (next.find(tag)>-1): contains_flag=True try: next=iter.next() except StopIteration: break f.close() gc.collect() return contains_flag
def subhalo_gas_kinematics(base, snap=135, which="v_5"): snaptag = str(snap) file = base + '/postprocessing/gas_kinematics/gas_kinematic_info_' + snaptag + '.hdf5' print(file) f = hdf5lib.OpenFile(file, mode='r') data = hdf5lib.GetData(f, "Subhalo/" + which)[:] f.close() return np.array(data)
def subhalo_gas_z_grad(base, snap=135, which="GradMetallicity_5"): snaptag = str(snap) file = base + '/postprocessing/gas_metallicity/gas_metallicity_info_' + snaptag + '.hdf5' print(file) f = hdf5lib.OpenFile(file, mode='r') data = hdf5lib.GetData(f, "Subhalo/" + which)[:] f.close() return np.array(data)
def mass_from_mergers(base, snap=135): snaptag = '000' + str(snap) snaptag = snaptag[-3:] f = hdf5lib.OpenFile(base + '/postprocessing/StellarAssembly/galaxies_' + snaptag + '.hdf5', mode='r') delta = hdf5lib.GetData(f, "StellarMassFromMergers")[:] f.close() return np.array(delta)
def subhalo_overdensity(base, snap=135): snaptag = '000' + str(snap) snaptag = snaptag[-3:] f = hdf5lib.OpenFile(base + '/postprocessing/environment/environment_' + snaptag + '.hdf5', mode='r') delta = hdf5lib.GetData(f, "delta")[:] f.close() return np.array(delta)
def subhalo_offsets(snap=135, run='Illustris-1'): snaptag = str(snap) f = hdf5lib.OpenFile('/n/ghernquist/Illustris/Runs/' + run + '/postprocessing/offsets/snap_offsets_subhalo_' + snaptag + '.hdf5', mode='r') data = hdf5lib.GetData(f, "Offsets")[:] f.close() return np.array(data)
def number_of_minor_mergers(base, snap=135): snaptag = '000' + str(snap) snaptag = snaptag[-3:] f = hdf5lib.OpenFile( base + '/postprocessing/MergerHistory/merger_history_' + snaptag + '.hdf5', mode='r') data = hdf5lib.GetData(f, "NumMinorMergersTotal")[:] f.close() return np.array(data)
def mass_from_minor_mergers(base, snap=135): snaptag = '000' + str(snap) snaptag = snaptag[-3:] f = hdf5lib.OpenFile( base + '/postprocessing/MergerHistory/merger_history_' + snaptag + '.hdf5', mode='r') data = hdf5lib.GetData(f, "StellarMassFromMinorMergers")[:] f.close() return np.array(data)
def subhalo_insitu_fraction(snap=135): snaptag = '000' + str(snap) snaptag = snaptag[-3:] f = hdf5lib.OpenFile( '/n/ghernquist/Illustris/Runs/Illustris-1/postprocessing/InSituFraction/insitu_stellar_fraction_' + snaptag + '.hdf5', mode='r') data = hdf5lib.GetData(f, "InSitu")[:] f.close() return np.array(data)
def subhalo_circularities(base, snap=135): snaptag = '000' + str(snap) snaptag = snaptag[-3:] print(base + '/postprocessing/circularities/circularities_' + snaptag + '.hdf5') f = hdf5lib.OpenFile( base + '/postprocessing/circularities/circularities_' + snaptag + '.hdf5', mode='r') data = np.array(hdf5lib.GetData(f, "CircAbove05Frac")[:]) data = np.reshape(data, -1) f.close() return data
def subhalo_stellar_age(snap=135): snaptag = '000' + str(snap) snaptag = snaptag[-3:] file = '/n/ghernquist/Illustris/Runs/Illustris-1/postprocessing/galprop/galprop_' + snaptag + '.hdf5' if os.path.exists(file): f = hdf5lib.OpenFile( '/n/ghernquist/Illustris/Runs/Illustris-1/postprocessing/galprop/galprop_' + snaptag + '.hdf5', mode='r') data = np.array(hdf5lib.GetData(f, "stellar_age_inrad")[:]) f.close() else: data = None return data
def subhalo_stellar_vel_disp(base, snap=135, which="StellarVelDisp_HalfMassRad"): #snaptag='000'+str(snap) #snaptag=snaptag[-3:] snaptag = str(snap) print(base + '/postprocessing/stellar_vel_disp/stellar_vel_disp_' + snaptag + '.hdf5') f = hdf5lib.OpenFile( base + '/postprocessing/stellar_vel_disp/stellar_vel_disp_' + snaptag + '.hdf5', mode='r') delta = hdf5lib.GetData(f, "Subhalo/" + which)[:] f.close() return np.array(delta)
def subhalo_petrosian_radius(snap=135): file = '/n/ghernquist/Illustris/Runs/Illustris-1/postprocessing/PhotometricMorphologies/nonparmorphs_iSDSS_135.hdf5' if os.path.exists(file): f = hdf5lib.OpenFile(file, mode='r') data0 = np.array(hdf5lib.GetData(f, "RP_cam0")[:]) data = np.zeros((4, data0.shape[0])) data[1, :] = np.array(hdf5lib.GetData(f, "RP_cam1")[:]) data[2, :] = np.array(hdf5lib.GetData(f, "RP_cam2")[:]) data[3, :] = np.array(hdf5lib.GetData(f, "RP_cam3")[:]) data = np.median(data, axis=0) f.close() else: data = None return data
def __init__(self, basedir, skipfac, snapnum, filenum=0, tree_start=-1, tree_num=-1, keysel=None): self.filebase = basedir + "trees_sf" + str(skipfac) + "_" + str( snapnum).zfill(3) self.basedir = basedir self.filenum = filenum filename = self.filebase + "." + str(filenum) + ".hdf5" f = hdf5lib.OpenFile(filename) self.NtreesPerFile = hdf5lib.GetAttr(f, "Header", "NtreesPerFile") self.NumberOfOutputFiles = hdf5lib.GetAttr(f, "Header", "NumberOfOutputFiles") self.ParticleMass = hdf5lib.GetAttr(f, "Header", "ParticleMass") if (self.ParticleMass == 0): print( "WARNING: ParticleMass = 0, needed for merger rate calculation" ) self.TreeNHalos = hdf5lib.GetData(f, "Header/TreeNHalos")[:] self.TotNsubhalos = hdf5lib.GetData(f, "Header/TotNsubhalos")[:] self.Redshifts = hdf5lib.GetData(f, "Header/Redshifts")[:] if (tree_start == -1) | (tree_num == -1): tree_start = 0 tree_num = self.NtreesPerFile self.trees = np.empty(tree_num - tree_start, dtype='object') self.tree_start = tree_start self.tree_num = tree_num for ntree in range(tree_start, tree_start + tree_num): list = [] if (keysel == None): for datablock in list(mergertree_datablocks.keys()): data = hdf5lib.GetData( f, "Tree" + str(ntree) + "/" + datablock)[:] list.append((datablock, data)) else: for datablock in keysel: if hdf5lib.Contains(f, "Tree" + str(ntree), datablock): data = hdf5lib.GetData( f, "Tree" + str(ntree) + "/" + datablock)[:] list.append((datablock, data)) self.trees[ntree - tree_start] = dict(list) f.close()
def list_blocks(self, parttype=-1, verbose=False): curfile = self.firstfile if not os.path.exists(curfile): print("file not found:", curfile) sys.exit() f = hdf5lib.OpenFile(curfile) iter = it = sub_datablocks.__iter__() next = iter.next() while (1): print(next) if (hdf5lib.Contains(f, "Subhalo", next)): print("Subhalo: " + next) sys.stdout.flush() try: next = iter.next() except StopIteration: break f.close()
def __init__(self, basedir, snapnum, keysel=None, long_ids=False): if long_ids: id_type = np.uint64 else: id_type = np.uint32 vardict = {} if keysel is None: keysel = galprop_datablocks.items() file = naming.return_galprop_file(basedir, snapnum) if os.path.exists(file): f = hdf5lib.OpenFile(file, mode='r') for key in keysel: if hdf5lib.Contains(f, "", key): val = galprop_datablocks[key] type = val[0] dim = val[1] vars(self)[key] = np.array(hdf5lib.GetData(f, key)[:]) f.close() else: print("Galprop File Not Found...")
def contains_block(filename, tag, parttype=-1, verbose=False): contains_flag=False f=hdf5lib.OpenFile(filename) for parttype in range(0,6): part_name='PartType'+str(parttype) if (hdf5lib.Contains(f,"",part_name)): iter = it=datablocks.__iter__() next = iter.next() while (1): if (verbose): print("check ", next, datablocks[next][0]) sys.stdout.flush() if (hdf5lib.Contains(f,part_name,datablocks[next][0])): if (next.find(tag)>-1): contains_flag=True try: next=iter.next() except StopIteration: break f.close() return contains_flag
def list_blocks(filename, parttype=-1, verbose=False): f=hdf5lib.OpenFile(filename) for parttype in range(0,6): part_name='PartType'+str(parttype) if (hdf5lib.Contains(f,"",part_name_)): print("Parttype contains : ", parttype) print("-------------------") sys.stdout.flush() iter = it=datablocks.__iter__() next = iter.next() while (1): if (verbose): print("check ", next, datablocks[next][0]) sys.stdout.flush() if (hdf5lib.Contains(f,part_name,datablocks[next][0])): print(next, datablocks[next][0]) sys.stdout.flush() try: next=iter.next() except StopIteration: break f.close()
def __init__(self, basedir, snapnum, long_ids=False, double_output=False, grpcat=True, subcat=True, name="fof_subhalo_tab", keysel=None): self.filebase = basedir + "/groups_" + str(snapnum).zfill( 3) + "/" + name + "_" + str(snapnum).zfill(3) + "." if long_ids: self.id_type = np.uint64 else: self.id_type = np.uint32 if double_output: self.double_type = np.float32 else: self.double_type = np.float64 filenum = 0 doneflag = False skip_gr = 0 skip_sub = 0 vardict = {} while not doneflag: curfile = self.filebase + str(filenum) + ".hdf5" if (not os.path.exists(curfile)): self.filebase = basedir + "/" + name + "_" + str( snapnum).zfill(3) curfile = self.filebase + ".hdf5" if (not os.path.exists(curfile)): print "file not found:", curfile sys.exit() f = hdf5lib.OpenFile(curfile) ngroups = hdf5lib.GetAttr(f, "Header", "Ngroups_ThisFile") nsubs = hdf5lib.GetAttr(f, "Header", "Nsubgroups_ThisFile") nfiles = hdf5lib.GetAttr(f, "Header", "NumFiles") if filenum == 0: self.ngroups = hdf5lib.GetAttr(f, "Header", "Ngroups_Total") self.nids = hdf5lib.GetAttr(f, "Header", "Nids_Total") self.nsubs = hdf5lib.GetAttr(f, "Header", "Nsubgroups_Total") #GROUPS if (grpcat == True): if (keysel == None): for key, val in grp_datablocks.items(): if hdf5lib.Contains(f, "Group", key): type = val[0] dim = val[1] if (type == 'FLOAT'): vars(self)[key] = np.empty( self.ngroups, dtype=np.dtype( (self.double_type, dim))) if (type == 'INT'): vars(self)[key] = np.empty(self.ngroups, dtype=np.dtype( (np.int32, dim))) if (type == 'INT64'): vars(self)[key] = np.empty(self.ngroups, dtype=np.dtype( (np.int64, dim))) if (type == 'ID'): vars(self)[key] = np.empty( self.ngroups, dtype=np.dtype((self.id_type, dim))) vardict[key] = vars(self)[key] else: for key in keysel: if hdf5lib.Contains(f, "Group", key): val = grp_datablocks[key] type = val[0] dim = val[1] if (type == 'FLOAT'): vars(self)[key] = np.empty( self.ngroups, dtype=np.dtype( (self.double_type, dim))) if (type == 'INT'): vars(self)[key] = np.empty(self.ngroups, dtype=np.dtype( (np.int32, dim))) if (type == 'INT64'): vars(self)[key] = np.empty(self.ngroups, dtype=np.dtype( (np.int64, dim))) if (type == 'ID'): vars(self)[key] = np.empty( self.ngroups, dtype=np.dtype((self.id_type, dim))) vardict[key] = vars(self)[key] #SUBHALOS if (subcat == True): if (keysel == None): for key, val in sub_datablocks.items(): if hdf5lib.Contains(f, "Subhalo", key): type = val[0] dim = val[1] if (type == 'FLOAT'): vars(self)[key] = np.empty( self.nsubs, dtype=np.dtype((self.double_type, dim))) if (type == 'INT'): vars(self)[key] = np.empty(self.nsubs, dtype=np.dtype( (np.int32, dim))) if (type == 'INT64'): vars(self)[key] = np.empty(self.nsubs, dtype=np.dtype( (np.int32, dim))) if (type == 'ID'): vars(self)[key] = np.empty(self.nsubs, dtype=np.dtype( (self.id_type, dim))) vardict[key] = vars(self)[key] else: for key in keysel: if hdf5lib.Contains(f, "Subhalo", key): val = sub_datablocks[key] type = val[0] dim = val[1] if (type == 'FLOAT'): vars(self)[key] = np.empty( self.nsubs, dtype=np.dtype( (self.double_type, dim))) if (type == 'INT'): vars(self)[key] = np.empty(self.nsubs, dtype=np.dtype( (np.int32, dim))) if (type == 'INT64'): vars(self)[key] = np.empty(self.nsubs, dtype=np.dtype( (np.int64, dim))) if (type == 'ID'): vars(self)[key] = np.empty( self.nsubs, dtype=np.dtype((self.id_type, dim))) vardict[key] = vars(self)[key] #GROUPS if (grpcat == True): if ngroups > 0: if (keysel == None): for key, val in grp_datablocks.items(): if hdf5lib.Contains(f, "Group", key): type = val[0] dim = val[1] a = hdf5lib.GetData(f, "Group/" + key) if dim == 1: vardict[key][skip_gr:skip_gr + ngroups] = a[:] else: for d in range(0, dim): vardict[key][skip_gr:skip_gr + ngroups, d] = a[:, d] else: for key in keysel: if hdf5lib.Contains(f, "Group", key): val = grp_datablocks[key] type = val[0] dim = val[1] a = hdf5lib.GetData(f, "Group/" + key) if dim == 1: vardict[key][skip_gr:skip_gr + ngroups] = a[:] else: for d in range(0, dim): vardict[key][skip_gr:skip_gr + ngroups, d] = a[:, d] skip_gr += ngroups #SUBHALOS if (subcat == True): if nsubs > 0: if (keysel == None): for key, val in sub_datablocks.items(): if hdf5lib.Contains(f, "Subhalo", key): type = val[0] dim = val[1] a = hdf5lib.GetData(f, "Subhalo/" + key) if dim == 1: vardict[key][skip_sub:skip_sub + nsubs] = a[:] else: for d in range(0, dim): vardict[key][skip_sub:skip_sub + nsubs, d] = a[:, d] else: for key in keysel: if hdf5lib.Contains(f, "Subhalo", key): val = sub_datablocks[key] type = val[0] dim = val[1] a = hdf5lib.GetData(f, "Subhalo/" + key) if dim == 1: vardict[key][skip_sub:skip_sub + nsubs] = a[:] else: for d in range(0, dim): vardict[key][skip_sub:skip_sub + nsubs, d] = a[:, d] skip_sub += nsubs f.close() filenum += 1 if filenum == nfiles: doneflag = True
def openfile(filename, mode="w"): f=hdf5lib.OpenFile(filename, mode = mode) return f
def check_file(filename): f=hdf5lib.OpenFile(filename) f.close()
def read_block_single_file(filename, block_name, dim2, parttype=-1, no_mass_replicate=False, fill_block_name="", slab_start=-1, slab_len=-1, verbose=False): if (verbose): print("[single] reading file : ", filename ) print("[single] reading : ", block_name) sys.stdout.flush() head = snapshot_header(filename) npart = head.npart massarr = head.massarr nall = head.nall filenum = head.filenum doubleflag = head.double #GADGET-2 change #doubleflag = 0 #GADGET-2 change if (parttype!=-1): if (head.npart[parttype]==0): return [0, False] else: if (head.npart.sum()==0): return [0, False] del head #construct indices for partial access if (slab_start!=-1) & (slab_len!=-1): data_slice = slice(slab_start, (slab_start+slab_len)) else: data_slice = slice(None, None, 1) if (verbose): print("[single] data slice: ", data_slice) sys.stdout.flush() f=hdf5lib.OpenFile(filename) #read specific particle type (parttype>=0, non-default) if parttype>=0: if (verbose): print("[single] parttype : ", parttype ) sys.stdout.flush() if ((block_name=="Masses") & (npart[parttype]>0) & (massarr[parttype]>0)): if (verbose): print("[single] replicate mass block") sys.stdout.flush() ret_val=np.repeat(massarr[parttype], npart[parttype])[data_slice] else: part_name='PartType'+str(parttype) ret_val = hdf5lib.GetData(f, part_name+"/"+block_name)[data_slice] if (verbose): print("[single] read particles (total) : ", ret_val.shape[0]/dim2) sys.stdout.flush() #read all particle types (parttype=-1, default) if parttype==-1: first=True dim1=long(0) for parttype in range(0,6): part_name='PartType'+str(parttype) if (hdf5lib.Contains(f,"",part_name)): if (verbose): print("[single] parttype : ", parttype) print("[single] massarr : ", massarr) print("[single] npart : ", npart) sys.stdout.flush() #replicate mass block per default (unless no_mass_replicate is set) if ((block_name=="Masses") & (npart[parttype]>0) & (massarr[parttype]>0) & (no_mass_replicate==False)): if (verbose): print("[single] replicate mass block") sys.stdout.flush() if (first): data=np.repeat(massarr[parttype], npart[parttype]) dim1+=long(data.shape[0]) ret_val=data first=False else: data=np.repeat(massarr[parttype], npart[parttype]) dim1+=long(data.shape[0]) ret_val=np.append(ret_val, data) if (verbose): print("[single] read particles (total) : ", ret_val.shape[0]/dim2) sys.stdout.flush() if (doubleflag==0): ret_val=ret_val.astype("float32") #fill fill_block_name with zeros if fill_block_name is set and particle type is present and fill_block_name not already stored in file for that particle type if ((block_name==fill_block_name) & (block_name!="Masses") & (npart[parttype]>0) & (hdf5lib.Contains(f,part_name, block_name)==False)): if (verbose): print("[single] replicate block name", fill_block_name) sys.stdout.flush() if (first): data=np.repeat(0.0, npart[parttype]*dim2) dim1+=long(data.shape[0]) ret_val=data first=False else: data=np.repeat(0.0, npart[parttype]*dim2) dim1+=long(data.shape[0]) ret_val=np.append(ret_val, data) if (verbose): print("[single] read particles (total) : ", ret_val.shape[0]/dim2) sys.stdout.flush() if (doubleflag==0): ret_val=ret_val.astype("float32") #default: just read the block if (hdf5lib.Contains(f,part_name,block_name)): if (first): data=hdf5lib.GetData(f, part_name+"/"+block_name)[:] dim1+=long(data.shape[0]) ret_val=data first=False else: data=hdf5lib.GetData(f, part_name+"/"+block_name)[:] dim1+=long(data.shape[0]) ret_val=np.append(ret_val, data) if (verbose): print("[single] read particles (total) : ", ret_val.shape[0]/dim2) sys.stdout.flush() if ((dim1>0) & (dim2>1)): ret_val=ret_val.reshape(dim1,dim2) if (verbose): print("[single] reshape done: ", ret_val.shape) sys.stdout.flush() f.close() return [ret_val, True]
def __init__(self, *args, **kwargs): if (len(args) == 1): filename = args[0] if os.path.exists(filename): curfilename=filename elif os.path.exists(filename+".hdf5"): curfilename = filename+".hdf5" elif os.path.exists(filename+".0.hdf5"): curfilename = filename+".0.hdf5" else: print("[error] file not found : ", filename) sys.stdout.flush() sys.exit() f=hdf5lib.OpenFile(curfilename) self.npart = hdf5lib.GetAttr(f, "Header", "NumPart_ThisFile") self.nall = hdf5lib.GetAttr(f, "Header", "NumPart_Total") self.nall_highword = hdf5lib.GetAttr(f, "Header", "NumPart_Total_HighWord") self.massarr = hdf5lib.GetAttr(f, "Header", "MassTable") self.time = hdf5lib.GetAttr(f, "Header", "Time") self.redshift = hdf5lib.GetAttr(f, "Header", "Redshift") self.boxsize = hdf5lib.GetAttr(f, "Header", "BoxSize") self.filenum = hdf5lib.GetAttr(f, "Header", "NumFilesPerSnapshot") self.omega0 = hdf5lib.GetAttr(f, "Header", "Omega0") self.omegaL = hdf5lib.GetAttr(f, "Header", "OmegaLambda") self.hubble = hdf5lib.GetAttr(f, "Header", "HubbleParam") self.sfr = hdf5lib.GetAttr(f, "Header", "Flag_Sfr") self.cooling = hdf5lib.GetAttr(f, "Header", "Flag_Cooling") self.stellar_age = hdf5lib.GetAttr(f, "Header", "Flag_StellarAge") self.metals = hdf5lib.GetAttr(f, "Header", "Flag_Metals") self.feedback = hdf5lib.GetAttr(f, "Header", "Flag_Feedback") self.double = hdf5lib.GetAttr(f, "Header", "Flag_DoublePrecision") #GADGET-2 change f.close() else: #read arguments self.npart = kwargs.get("npart") self.nall = kwargs.get("nall") self.nall_highword = kwargs.get("nall_highword") self.massarr = kwargs.get("massarr") self.time = kwargs.get("time") self.redshift = kwargs.get("redshift") self.boxsize = kwargs.get("boxsize") self.filenum = kwargs.get("filenum") self.omega0 = kwargs.get("omega0") self.omegaL = kwargs.get("omegaL") self.hubble = kwargs.get("hubble") self.sfr = kwargs.get("sfr") self.cooling = kwargs.get("cooling") self.stellar_age = kwargs.get("stellar_age") self.metals = kwargs.get("metals") self.feedback = kwargs.get("feedback") self.double = kwargs.get("double") #set default values if (self.npart == None): self.npart = np.array([0,0,0,0,0,0], dtype="int32") if (self.nall == None): self.nall = np.array([0,0,0,0,0,0], dtype="uint32") if (self.nall_highword == None): self.nall_highword = np.array([0,0,0,0,0,0], dtype="uint32") if (self.massarr == None): self.massarr = np.array([0,0,0,0,0,0], dtype="float64") if (self.time == None): self.time = np.array([0], dtype="float64") if (self.redshift == None): self.redshift = np.array([0], dtype="float64") if (self.boxsize == None): self.boxsize = np.array([0], dtype="float64") if (self.filenum == None): self.filenum = np.array([1], dtype="int32") if (self.omega0 == None): self.omega0 = np.array([0], dtype="float64") if (self.omegaL == None): self.omegaL = np.array([0], dtype="float64") if (self.hubble == None): self.hubble = np.array([0], dtype="float64") if (self.sfr == None): self.sfr = np.array([0], dtype="int32") if (self.cooling == None): self.cooling = np.array([0], dtype="int32") if (self.stellar_age == None): self.stellar_age = np.array([0], dtype="int32") if (self.metals == None): self.metals = np.array([0], dtype="int32") if (self.feedback == None): self.feedback = np.array([0], dtype="int32") if (self.double == None): self.double = np.array([0], dtype="int32")
def read_insitu(): file="/n/ghernquist/vrodrigu/StellarAssembly/output/Illustris/L75n1820FP/stars_135.hdf5" f=hdf5lib.OpenFile(file, mode ='r' ) data=hdf5lib.GetData(f, "InSitu")[:] f.close() return np.array(data)
def read_block(filename, block, parttype=-1, no_mass_replicate=False, fill_block="", slab_start=-1, slab_len=-1, ids=-1, verbose=False, multiple_files=False): if os.path.exists(filename): curfilename=filename elif os.path.exists(filename+".hdf5"): curfilename = filename+".hdf5" elif os.path.exists(filename+".0.hdf5"): curfilename = filename+".0.hdf5" multiple_files=True else: print("[error] file not found : {:s} in 'read_block' ".format( filename )) sys.stdout.flush() sys.exit() if (verbose): print("reading block : ", block) sys.stdout.flush() if parttype not in [-1,0,1,2,3,4,5]: print("[error] wrong parttype given") sys.stdout.flush() sys.exit() slabflag=False if ((slab_start!=-1) | (slab_len!=-1)): slabflag=True if (parttype==-1): print("[error] slabs only supported for specific parttype") sys.stdout.flush() sys.exit() idsflag=False if type(ids) != int: idsflag=True ids = np.array(ids) if parttype==-1: print("[error] id list only supported for specific parttype") sys.stdout.flush() sys.exit() if np.sum(ids==np.sort(ids)) < len(ids): print("[error] input ids not sorted. must be in order!") return if (verbose): print(curfilename) head = snapshot_header(curfilename) filenum = head.filenum npart_all = np.array(head.nall) highword = head.nall_highword npart_all.astype(long) # Need to determine size of array and data type to pre-allocate data. if idsflag==True: length = len(ids) if slabflag==True: length = slab_len if idsflag==False and slabflag==False: if parttype!=-1: length=head.nall[parttype] if highword[parttype]==1: length += 2**32 else: length=head.nall.sum() if (verbose): print("Length of data allocation:", length) #GREG if (datablocks.has_key(block)): block_name=datablocks[block][0] dim2=datablocks[block][1] first=True if (verbose): print("Reading HDF5 : ", block_name) print("Data dimension : ", dim2) print("Multiple file : ", multiple_files) print("Slab data : ", slabflag) sys.stdout.flush() else: print("[error] Block type ", block, "not known!") sys.stdout.flush() sys.exit() fill_block_name="" if (fill_block!=""): if (datablocks.has_key(fill_block)): fill_block_name=datablocks[fill_block][0] dim2=datablocks[fill_block][1] if (verbose): print("Block filling active : ", fill_block_name) sys.stdout.flush() # set alloc_type here. read in example item to determine data type. alloc_type=None # need to loop over all files until block is found, or no more files left. if not multiple_files: filepaths= [curfilename] else: filepaths = [filename+"."+str(subfile)+".hdf5" for subfile in np.arange(filenum)] for filepath in filepaths: g=hdf5lib.OpenFile(filepath) if parttype==-1: for ptype in range(0,6): try: contains=hdf5lib.Contains(g,'PartType'+str(ptype),block_name) except: contains=False if contains: alloc_type = str(hdf5lib.GetData(g,'PartType'+str(ptype)+'/'+block_name)[0:1].dtype) break else: try: contains=hdf5lib.Contains(g,'PartType'+str(parttype),block_name) except: contains=False if contains: alloc_type = str(hdf5lib.GetData(g,'PartType'+str(parttype)+'/'+block_name)[0:1].dtype) g.close() gc.collect() if contains==True: break # if block does not exist if alloc_type==None: if block=="ID ": alloc_type=np.uint32 #else: # alloc_type=np.float32 elif block=="MASS": alloc_type=np.float32 #default to float32 for MASS else: print("[error] block : ", block, "of parttype : ", parttype, "not found") sys.stdout.flush() sys.exit() if dim2 > 1: ret_val = np.ndarray((length,dim2),alloc_type) else: ret_val = np.ndarray((length,),alloc_type) if (verbose): print("Allocated array") if (multiple_files): if slabflag==False and idsflag==False: first=True dim1=long(0) for num in range(0,filenum): curfilename=filename+"."+str(num)+".hdf5" if (verbose): print("Reading file : ", num, curfilename) sys.stdout.flush() data, succ = read_block_single_file(curfilename, block_name, dim2, parttype, no_mass_replicate, fill_block_name, slab_start, slab_len, verbose=False) if succ == True: if dim2 > 1: ret_val[dim1:dim1+len(data),:] = data else: ret_val[dim1:dim1+len(data)] = data dim1 += long(data.shape[0]) if (verbose): if (succ): print("Read particles (total) : ", ret_val.shape[0]) sys.stdout.flush() else: print("Read particles (total) : none") sys.stdout.flush() # Implementation of reading specified particle positions. if (idsflag==True): dim1=long(0) for num in range(0,filenum): curfilename=filename+"."+str(num)+".hdf5" head = snapshot_header(curfilename) nloc=head.npart[parttype] low=ids[0] # First particle to read high=ids[-1] # Last particle to read if (nloc > low): # Something to read in this file toread = ids[ids<nloc] # Need to get subset of ids still in this file if (verbose): print("Reading file : ", num, curfilename) sys.stdout.flush() data, succ = read_block_single_file(curfilename, block_name, dim2, parttype, no_mass_replicate, fill_block_name, ids=toread, verbose=verbose) if (succ == True): if dim2 > 1: ret_val[dim1:dim1+len(data),:] = data else: ret_val[dim1:dim1+len(data)] = data dim1+=data.shape[0] if (verbose): if (succ): # No longer need to divide by dim2. append would flatten array, not now. print("Read particles (total) : ", ret_val.shape[0]) sys.stdout.flush() else: print("Read particles (total) : none") sys.stdout.flush() ids -= nloc ids = ids[ids>=0] # Only keep particles not yet read if (len(ids)==0 or high<0): break if (slabflag==True): off=slab_start left=slab_len first=True dim1=long(0) for num in range(0,filenum): curfilename=filename+"."+str(num)+".hdf5" head = snapshot_header(curfilename) nloc=head.npart[parttype] if (nloc > off): start = off if (nloc - off > left): count = left else: count = nloc - off if (verbose): print("Reading file : ", num, curfilename, start, count) sys.stdout.flush() data, succ = read_block_single_file(curfilename, block_name, dim2, parttype, no_mass_replicate, fill_block_name, slab_start=start, slab_len=count, verbose=verbose) if (succ == True): if dim2 > 1: ret_val[dim1:dim1+len(data),:] = data else: ret_val[dim1:dim1+len(data)] = data dim1 += data.shape[0] if (verbose): if (succ): # No longer need to divide by dim2, append would flatten array, not now. print("Read particles (total) : ", ret_val.shape[0]) sys.stdout.flush() else: print("Read particles (total) : none") sys.stdout.flush() left -= count off += count if (left==0): break off -= nloc if (verbose): print("all partial files read in") sys.stdout.flush() else: ret_val, succ = read_block_single_file(curfilename, block_name, dim2, parttype, no_mass_replicate, fill_block_name, slab_start, slab_len, verbose) return ret_val
def __init__(self, res, vel, snapnum): #self.res = res #self.vel = vel #self.snapnum = snapnum if res == "1.12Mpc": s_res = '112Mpc' elif res == "1.4Mpc": s_res = '14Mpc' if vel == "Sig0": s_vel = "Sig0" elif vel == "11.8kms": s_vel = '118kms' snapnum = int(snapnum) filename = "/n/hernquistfs3/mvogelsberger/GlobularClusters/InterfaceWArepo_All_" + res + '_' + vel + "/output/" filename2 = filename + "GasOnly_FOF" #Used for readsubfHDF5 ########## CHANGED FILENAME3 TO GROUPORDERED IN GAS ONLY filename3 = filename2 + "/snap-groupordered_" + str(snapnum).zfill( 3) #Used for hdf5lib, snapHDF5 #### Not sure if this works with change but don't care about 2.8 if res == '2.8Mpc': filename3 = filename + "snapdir_" + str(snapnum).zfill( 3) + "/snap_" + str(snapnum).zfill(3) #Units GRAVITY_cgs = 6.672e-8 UnitLength_in_cm = 3.085678e21 # code length unit in cm/h UnitMass_in_g = 1.989e43 # code length unit in g/h UnitVelocity_in_cm_per_s = 1.0e5 UnitTime_in_s = UnitLength_in_cm / UnitVelocity_in_cm_per_s UnitDensity_in_cgs = UnitMass_in_g / np.power(UnitLength_in_cm, 3) UnitPressure_in_cgs = UnitMass_in_g / UnitLength_in_cm / np.power( UnitTime_in_s, 2) UnitEnergy_in_cgs = UnitMass_in_g * np.power( UnitLength_in_cm, 2) / np.power(UnitTime_in_s, 2) GCONST = GRAVITY_cgs / np.power( UnitLength_in_cm, 3) * UnitMass_in_g * np.power(UnitTime_in_s, 2) critical_density = 3.0 * .1 * .1 / 8.0 / np.pi / GCONST #.1 is for 1/Mpc to 1/kpc, also in units of h^2 header = snap.snapshot_header(filename3) if res == "2.8Mpc": fs = hdf5lib.OpenFile(filename3 + ".0.hdf5") else: fs = hdf5lib.OpenFile(filename3 + ".hdf5") red = hdf5lib.GetAttr(fs, "Header", "Redshift") atime = hdf5lib.GetAttr(fs, "Header", "Time") boxSize = hdf5lib.GetAttr(fs, "Header", "BoxSize") boxSize *= atime #convert from ckpc/h to kpc/h Omega0 = hdf5lib.GetAttr(fs, "Header", "Omega0") OmegaLambda = hdf5lib.GetAttr(fs, "Header", "OmegaLambda") fs.close() cat = readsubfHDF5.subfind_catalog(filename2, snapnum) Omega_a = Omega0 / (Omega0 + OmegaLambda * atime * atime * atime) critical_density *= (Omega0 / Omega_a) r200 = cat.Group_R_Crit200 r200 *= atime #convert from ckpc/h to kpc/h m200 = cat.Group_M_Crit200 haloCMvel = cat.GroupVel haloCMvel *= 1. / atime #convert from km/s/a to km/s haloPos = cat.GroupPos haloPos *= atime #convert from ckpc/h to kpc/h #Read in particles #read in all simulation masses to calculate cosmic baryon fraction massgassim = snap.read_block(filename + "snap_" + str(snapnum).zfill(3), "MASS", parttype=0) massdmsim = snap.read_block(filename + "snap_" + str(snapnum).zfill(3), "MASS", parttype=1) massgas = snap.read_block(filename3, "MASS", parttype=0) massdm = snap.read_block(filename3, "MASS", parttype=1) posgas = snap.read_block(filename3, "POS ", parttype=0) posdm = snap.read_block(filename3, "POS ", parttype=1) velgas = snap.read_block(filename3, "VEL ", parttype=0) veldm = snap.read_block(filename3, "VEL ", parttype=1) #redefine position units from ckpc/h to kpc/h posgas *= atime posdm *= atime #redefine velocity units from kmsqrt(a)/s to km/s velgas *= np.sqrt(atime) veldm *= np.sqrt(atime) fb = massgassim.sum(dtype="float64") / ( massgassim.sum(dtype="float64") + massdmsim.sum(dtype="float64")) gaslimit = .4 # Set the limit for gas fraction in plots #boxSize hubble flow correction for halo CM velocity subtraction boxSizeVel = boxSize * .1 * UnitLength_in_cm / UnitVelocity_in_cm_per_s * np.sqrt( Omega0 / atime / atime / atime + OmegaLambda) #load particle indices pGas = snap.read_block(filename3, "POS ", parttype=0) mGas = snap.read_block(filename3, "MASS", parttype=0) pDM = snap.read_block(filename3, "POS ", parttype=1) halo100_indices = np.where(cat.GroupLenType[:, 0] > 100)[0] startAllGas = [] endAllGas = [] for i in halo100_indices: startAllGas += [np.sum(cat.GroupLenType[:i, 0])] endAllGas += [startAllGas[-1] + cat.GroupLenType[i, 0]] #Initialize arrays spinparam = np.zeros(np.size(halo100_indices)) jsptotspinparam = np.zeros(np.size(halo100_indices)) jspgasspinparam = np.zeros(np.size(halo100_indices)) jspdmspinparam = np.zeros(np.size(halo100_indices)) gasfrac = np.zeros(np.size(halo100_indices)) costheta = np.zeros(np.size(halo100_indices)) #misalignment angle v200 = np.zeros(np.size(halo100_indices)) velgasall = np.zeros(np.size(halo100_indices)) veldmall = np.zeros(np.size(halo100_indices)) virialratio = np.zeros(np.size(halo100_indices)) numGas = np.zeros(np.size(halo100_indices)) numDM = np.zeros(np.size(halo100_indices)) j200gas = np.zeros(np.size(halo100_indices)) j200dm = np.zeros(np.size(halo100_indices)) j200 = np.zeros(np.size(halo100_indices)) totmass = np.zeros(np.size(halo100_indices)) gasmass = np.zeros(np.size(halo100_indices)) DMmass = np.zeros(np.size(halo100_indices)) rmax = np.zeros(np.size(halo100_indices)) rmin = np.zeros(np.size(halo100_indices)) j200gasNoNorm = np.zeros(np.size(halo100_indices)) closestm200 = np.zeros(np.size(halo100_indices)) #some radii are errors and negative, will have a value of 1 to be excluded negradii = np.zeros(np.size(halo100_indices)) #Indexing for global variable works because halos are ordered from largest to smallest so <100 particles are at the end and not counted. for i in halo100_indices: exec("cm = cm_%s_%s_%d[0][i]" % (s_res, s_vel, snapnum)) exec("rotation = rotation_%s_%s_%d[0][i]" % (s_res, s_vel, snapnum)) exec("radii = radii_%s_%s_%d[0][i]" % (s_res, s_vel, snapnum)) #some radii are errors and negative, will have a value of 1 to be excluded if radii[0] < 0.: negradii[i] = 1. else: maxrad = radii[2] maxrad *= atime #convert from ckpc to kpc exec("mDM=mDM_%s_%s_%d[0][i]" % (s_res, s_vel, snapnum)) exec("DMinEll=DMindices_%s_%s_%d[0][i]" % (s_res, s_vel, snapnum)) exec("m200dm = M200dm_%s_%s[snapnum-10][i]" % (s_res, s_vel)) #Check if CM is buggy if np.sum(cm == np.array([0., 0., 0.])) == 3: # it's probbaly an error; recompute com totalGas = np.sum(mGas[startAllGas[i]:endAllGas[i]]) cm = np.array([ np.sum(pGas[startAllGas[i]:endAllGas[i], j] * mGas[startAllGas[i]:endAllGas[i]]) / totalGas for j in range(3) ]) # Get positions of gas particles P = pGas[startAllGas[i]:endAllGas[i]] # Shift coordinate system to center on the center of the ellipsoid Precentered = dx_wrap(P - cm, boxSize / atime) # Rotate coordinated to the the axes point along x,y,z directions: Precentered = np.array( [np.dot(pp, rotation.T) for pp in Precentered]) # Figure out which particles are inside the ellipsoid inEll = (Precentered[:, 0]**2. / radii[0]**2. + Precentered[:, 1]**2. / radii[1]**2 + Precentered[:, 2]**2. / radii[2]**2) <= 1. #remove halo CM velocity tempvelgas = dx_wrap( velgas[startAllGas[i]:endAllGas[i]][inEll] - haloCMvel[i], boxSizeVel) tempveldm = dx_wrap(veldm[DMinEll] - haloCMvel[i], boxSizeVel) #redefine positions wrt COM tempposgas = dx_wrap( posgas[startAllGas[i]:endAllGas[i]][inEll] - haloPos[i], boxSize) tempposdm = dx_wrap(posdm[DMinEll] - haloPos[i], boxSize) numDM[i] = np.size(tempposdm) numGas[i] = np.size(tempposgas) #Calculating j200 #j200 of all particles j200vecgas = np.sum( np.cross(tempposgas, tempvelgas) * massgas[startAllGas[i]:endAllGas[i]][inEll][:, np.newaxis], axis=0) j200vecdm = np.sum(np.cross(tempposdm, tempveldm) * massdm[DMinEll][:, np.newaxis], axis=0) #if np.size(tempveldm)!=0: #can be no dm particles! # costheta[i] = np.dot(j200vecgas,j200vecdm)/np.linalg.norm(j200vecgas)/np.linalg.norm(j200vecdm) j200vec = j200vecgas + j200vecdm j200[i] = np.linalg.norm(j200vec) j200dm[i] = np.linalg.norm(j200vecdm) j200gas[i] = np.linalg.norm(j200vecgas) j200gasNoNorm[i] = np.linalg.norm(j200vecgas) gasmass[i] = np.sum( massgas[startAllGas[i]:endAllGas[i]][inEll]) totmass[i] = gasmass[i] + mDM DMmass[i] = mDM rmax[i] = radii[2] rmin[i] = radii[0] closestm200[i] = m200dm #using fudicial m200~6mgas #get r200 from analytic formula in Barkana,Loeb 01 review if gasmass[i] != 0.: #Some ellpsoids fit nothing m200fid = 6. * gasmass[i] omgz = .27 * atime**(-3.) / (.27 * atime**(-3.) + .73) dfact = omgz - 1. delc = 18. * np.pi**2. + 82 * dfact - 39. * dfact**2. r200fid = .784 * (m200fid * 100.)**(1. / 3.) * ( .27 / omgz * delc / 18. / np.pi**2)**(-1. / 3.) * 10 * atime v200fid = np.sqrt(GCONST * (m200fid) / r200fid) j200gas[i] *= 1. / np.sqrt(2) / ( gasmass[i]) / v200fid / r200fid j200[i] *= 1. / np.sqrt(2) / ( totmass[i]) / v200fid / r200fid if mDM != 0.: j200dm[i] *= 1. / np.sqrt(2) / mDM / v200fid / r200fid gasfrac[i] = gasmass[i] / totmass[i] #Reindex to account for shrunken ellipsoids with gas particles >100 goodidx, = np.where(np.logical_and(numGas > 100, negradii == 0.)) self.j200gas = j200gas[goodidx] self.j200dm = j200dm[goodidx] self.j200 = j200[goodidx] self.j200gasNoNorm = j200gasNoNorm[goodidx] self.gasfrac = gasfrac[goodidx] self.totmass = totmass[goodidx] self.totmass *= 10**10 #costheta = costheta[goodidx] self.rmax = rmax[goodidx] self.rmin = rmin[goodidx] #thetadeg = np.arccos(costheta)*180./np.pi self.gasmass = gasmass[goodidx] self.closestm200 = closestm200[goodidx] #Reindex the Rmin, R200_DM params exec("self.rclosest = Rmin_%s_%s[snapnum-10][goodidx]" % (s_res, s_vel)) exec("self.R200dm = R200dm_%s_%s[snapnum-10][goodidx]" % (s_res, s_vel))
'SubhaloMassType', 'SubhaloSFR', 'SubhaloGasMetallicity', 'SubhaloGasMetallicitySfr' ]) stellar_masses = cat.SubhaloMassType[:, 4] * 1e10 / little_h # units of M_solar sfr = cat.SubhaloSFR # units of M_solar / yr gas_z = cat.SubhaloGasMetallicity # unitless metallicity gas_sfr_z = cat.SubhaloGasMetallicitySfr # unitless metallicity star_z = readsubfHDF5.subhalo_stellar_metallicities(snap=snap[0]) # read this guy in manually... file = '/n/home01/ptorrey/ReviewPlots/cold_gas_masses_z' + str( redshift) + '.hdf5' f = hdf5lib.OpenFile(file, mode='r') cold_gas_mass = np.array( hdf5lib.GetData(f, "ColdGasMass") [:]) # should already be in units of M_solar (per cold_gas_catalog.py) f.close() output_cgm = open( './data/cold_gas_mass_vs_stellar_mass_z' + str(redshift) + '.txt', 'w') output_cgm.write( '# Dense gas mass vs stellar mass (note, I used "cold" to describe this data throughout,\n' ) output_cgm.write( '# but its really only a density cut!!! per Romeels email) \n') output_cgm.write('# col1 = stellar mass (in solar masses) \n') output_cgm.write('# col2 = dense gas mass (in solar masses)\n') output_cgm.write('# col3 = dense gas mass / stellar mass (unitless) \n')
def __init__(self, basedir, snapnum, long_ids=False, double_output=False, grpcat=True, subcat=True, name="fof_subhalo_tab", keysel=[]): if long_ids: self.id_type = np.uint64 else: self.id_type = np.uint32 if double_output: self.double_type = np.float32 else: self.double_type = np.float64 filenum = 0 doneflag = False skip_gr = 0 skip_sub = 0 vardict = {} if keysel is None: keysel = grp_datablocks.items() while not doneflag: self.filebase, curfile = naming.return_subfind_filebase( basedir, snapnum, name, filenum) self.firstfile = curfile f = hdf5lib.OpenFile(curfile) ngroups = hdf5lib.GetAttr(f, "Header", "Ngroups_ThisFile") nsubs = hdf5lib.GetAttr(f, "Header", "Nsubgroups_ThisFile") nfiles = hdf5lib.GetAttr(f, "Header", "NumFiles") if filenum == 0: self.ngroups = hdf5lib.GetAttr(f, "Header", "Ngroups_Total") self.nids = hdf5lib.GetAttr(f, "Header", "Nids_Total") self.nsubs = hdf5lib.GetAttr(f, "Header", "Nsubgroups_Total") #GROUPS if grpcat: for key in keysel: if hdf5lib.Contains(f, "Group", key): val = grp_datablocks[key] type = val[0] dim = val[1] if (type == 'FLOAT'): vars(self)[key] = np.empty( self.ngroups, dtype=np.dtype((self.double_type, dim))) if (type == 'INT'): vars(self)[key] = np.empty(self.ngroups, dtype=np.dtype( (np.int32, dim))) if (type == 'INT64'): vars(self)[key] = np.empty(self.ngroups, dtype=np.dtype( (np.int64, dim))) if (type == 'ID'): vars(self)[key] = np.empty(self.ngroups, dtype=np.dtype( (self.id_type, dim))) vardict[key] = vars(self)[key] #SUBHALOS if subcat: for key in keysel: if hdf5lib.Contains(f, "Subhalo", key): val = sub_datablocks[key] type = val[0] dim = val[1] if (type == 'FLOAT'): vars(self)[key] = np.empty( self.nsubs, dtype=np.dtype((self.double_type, dim))) if (type == 'INT'): vars(self)[key] = np.empty(self.nsubs, dtype=np.dtype( (np.int32, dim))) if (type == 'INT64'): vars(self)[key] = np.empty(self.nsubs, dtype=np.dtype( (np.int64, dim))) if (type == 'ID'): vars(self)[key] = np.empty(self.nsubs, dtype=np.dtype( (self.id_type, dim))) vardict[key] = vars(self)[key] #GROUPS if grpcat: if ngroups > 0: for key in keysel: if hdf5lib.Contains(f, "Group", key): val = grp_datablocks[key] type = val[0] dim = val[1] a = hdf5lib.GetData(f, "Group/" + key) if dim == 1: vardict[key][skip_gr:skip_gr + ngroups] = a[:] else: for d in range(0, dim): vardict[key][skip_gr:skip_gr + ngroups, d] = a[:, d] skip_gr += ngroups #SUBHALOS if subcat: if nsubs > 0: for key in keysel: if hdf5lib.Contains(f, "Subhalo", key): val = sub_datablocks[key] type = val[0] dim = val[1] a = hdf5lib.GetData(f, "Subhalo/" + key) if dim == 1: vardict[key][skip_sub:skip_sub + nsubs] = a[:] else: for d in range(0, dim): vardict[key][skip_sub:skip_sub + nsubs, d] = a[:, d] skip_sub += nsubs f.close() filenum += 1 if filenum == nfiles: doneflag = True