def return_tags(filename, parttype=-1, verbose=False): if os.path.exists(filename): curfilename=filename elif os.path.exists(filename+".hdf5"): curfilename = filename+".hdf5" elif os.path.exists(filename+".0.hdf5"): curfilename = filename+".0.hdf5" else: print("[error] file not found : {:s} in return_tags".format( filename )) sys.stdout.flush() sys.exit() all_blocks = [] f=hdf5lib.OpenFile(filename) for parttype in range(0,6): this_block=[] part_name='PartType'+str(parttype) if (hdf5lib.Contains(f,"",part_name)): iter = it=datablocks.__iter__() next = iter.next() while (1): if (hdf5lib.Contains(f,part_name,datablocks[next][0])): sys.stdout.flush() this_block.append(next) try: next=iter.next() except StopIteration: break all_blocks.append(this_block) f.close() gc.collect() return all_blocks
def contains_block(filename, tag, parttype=-1, verbose=False): if os.path.exists(filename): curfilename=filename elif os.path.exists(filename+".hdf5"): curfilename = filename+".hdf5" elif os.path.exists(filename+".0.hdf5"): curfilename = filename+".0.hdf5" else: print("[error] file not found : {:s} in contains_block ".format( filename )) sys.stdout.flush() sys.exit() contains_flag=False f=hdf5lib.OpenFile(filename) for parttype in range(0,6): part_name='PartType'+str(parttype) if (hdf5lib.Contains(f,"",part_name)): iter = it=datablocks.__iter__() next = iter.next() while (1): if (verbose): print("check ", next, datablocks[next][0]) sys.stdout.flush() if (hdf5lib.Contains(f,part_name,datablocks[next][0])): if (next.find(tag)>-1): contains_flag=True try: next=iter.next() except StopIteration: break f.close() gc.collect() return contains_flag
def contains_block(filename, tag, parttype=-1, verbose=False): contains_flag=False f=hdf5lib.OpenFile(filename) for parttype in range(0,6): part_name='PartType'+str(parttype) if (hdf5lib.Contains(f,"",part_name)): iter = it=datablocks.__iter__() next = iter.next() while (1): if (verbose): print("check ", next, datablocks[next][0]) sys.stdout.flush() if (hdf5lib.Contains(f,part_name,datablocks[next][0])): if (next.find(tag)>-1): contains_flag=True try: next=iter.next() except StopIteration: break f.close() return contains_flag
def list_blocks(filename, parttype=-1, verbose=False): f=hdf5lib.OpenFile(filename) for parttype in range(0,6): part_name='PartType'+str(parttype) if (hdf5lib.Contains(f,"",part_name_)): print("Parttype contains : ", parttype) print("-------------------") sys.stdout.flush() iter = it=datablocks.__iter__() next = iter.next() while (1): if (verbose): print("check ", next, datablocks[next][0]) sys.stdout.flush() if (hdf5lib.Contains(f,part_name,datablocks[next][0])): print(next, datablocks[next][0]) sys.stdout.flush() try: next=iter.next() except StopIteration: break f.close()
def __init__(self, basedir, skipfac, snapnum, filenum=0, tree_start=-1, tree_num=-1, keysel=None): self.filebase = basedir + "trees_sf" + str(skipfac) + "_" + str( snapnum).zfill(3) self.basedir = basedir self.filenum = filenum filename = self.filebase + "." + str(filenum) + ".hdf5" f = hdf5lib.OpenFile(filename) self.NtreesPerFile = hdf5lib.GetAttr(f, "Header", "NtreesPerFile") self.NumberOfOutputFiles = hdf5lib.GetAttr(f, "Header", "NumberOfOutputFiles") self.ParticleMass = hdf5lib.GetAttr(f, "Header", "ParticleMass") if (self.ParticleMass == 0): print( "WARNING: ParticleMass = 0, needed for merger rate calculation" ) self.TreeNHalos = hdf5lib.GetData(f, "Header/TreeNHalos")[:] self.TotNsubhalos = hdf5lib.GetData(f, "Header/TotNsubhalos")[:] self.Redshifts = hdf5lib.GetData(f, "Header/Redshifts")[:] if (tree_start == -1) | (tree_num == -1): tree_start = 0 tree_num = self.NtreesPerFile self.trees = np.empty(tree_num - tree_start, dtype='object') self.tree_start = tree_start self.tree_num = tree_num for ntree in range(tree_start, tree_start + tree_num): list = [] if (keysel == None): for datablock in list(mergertree_datablocks.keys()): data = hdf5lib.GetData( f, "Tree" + str(ntree) + "/" + datablock)[:] list.append((datablock, data)) else: for datablock in keysel: if hdf5lib.Contains(f, "Tree" + str(ntree), datablock): data = hdf5lib.GetData( f, "Tree" + str(ntree) + "/" + datablock)[:] list.append((datablock, data)) self.trees[ntree - tree_start] = dict(list) f.close()
def write_block(f, block, parttype, data): part_name="PartType"+str(parttype) if (hdf5lib.Contains(f, "", part_name)==False): group=hdf5lib.CreateGroup(f, part_name) else: group=hdf5lib.GetGroup(f, part_name) if (datablocks.has_key(block)): block_name=datablocks[block][0] dim2=datablocks[block][1] if (hdf5lib.ContainsGroup(group, block_name)==False): hdf5lib.CreateArray(f, group, block_name, data) else: print("I/O block already written") sys.stdout.flush() else: print("Unknown I/O block") sys.stdout.flush()
def list_blocks(self, parttype=-1, verbose=False): curfile = self.firstfile if not os.path.exists(curfile): print("file not found:", curfile) sys.exit() f = hdf5lib.OpenFile(curfile) iter = it = sub_datablocks.__iter__() next = iter.next() while (1): print(next) if (hdf5lib.Contains(f, "Subhalo", next)): print("Subhalo: " + next) sys.stdout.flush() try: next = iter.next() except StopIteration: break f.close()
def __init__(self, basedir, snapnum, keysel=None, long_ids=False): if long_ids: id_type = np.uint64 else: id_type = np.uint32 vardict = {} if keysel is None: keysel = galprop_datablocks.items() file = naming.return_galprop_file(basedir, snapnum) if os.path.exists(file): f = hdf5lib.OpenFile(file, mode='r') for key in keysel: if hdf5lib.Contains(f, "", key): val = galprop_datablocks[key] type = val[0] dim = val[1] vars(self)[key] = np.array(hdf5lib.GetData(f, key)[:]) f.close() else: print("Galprop File Not Found...")
def write_block(f, block, parttype, data, data_name=None ): print("writing block!") part_name="PartType"+str(parttype) if (hdf5lib.Contains(f, "", part_name)==False): group=hdf5lib.CreateGroup(f, part_name) else: group=hdf5lib.GetGroup(f, part_name) if data_name!=None: for block in datablocks: # print block # print datablocks[block] if datablocks[block][0] == data_name: break #dim2=datablocks[block][1] #if (hdf5lib.ContainsGroup(group, block_name)==False): # hdf5lib.CreateArray(f, group, block_name, data) #else: # print "I/O block already written" # sys.stdout.flush() # sys.exit() # print block if (datablocks.has_key(block)): block_name=datablocks[block][0] dim2=datablocks[block][1] if (hdf5lib.ContainsGroup(group, block_name)==False): hdf5lib.CreateArray(f, group, block_name, data) else: print("I/O block already written") sys.stdout.flush() else: print("Unknown I/O block") sys.stdout.flush()
def __init__(self, basedir, snapnum, long_ids=False, double_output=False, grpcat=True, subcat=True, name="fof_subhalo_tab", keysel=None): self.filebase = basedir + "/groups_" + str(snapnum).zfill( 3) + "/" + name + "_" + str(snapnum).zfill(3) + "." if long_ids: self.id_type = np.uint64 else: self.id_type = np.uint32 if double_output: self.double_type = np.float32 else: self.double_type = np.float64 filenum = 0 doneflag = False skip_gr = 0 skip_sub = 0 vardict = {} while not doneflag: curfile = self.filebase + str(filenum) + ".hdf5" if (not os.path.exists(curfile)): self.filebase = basedir + "/" + name + "_" + str( snapnum).zfill(3) curfile = self.filebase + ".hdf5" if (not os.path.exists(curfile)): print "file not found:", curfile sys.exit() f = hdf5lib.OpenFile(curfile) ngroups = hdf5lib.GetAttr(f, "Header", "Ngroups_ThisFile") nsubs = hdf5lib.GetAttr(f, "Header", "Nsubgroups_ThisFile") nfiles = hdf5lib.GetAttr(f, "Header", "NumFiles") if filenum == 0: self.ngroups = hdf5lib.GetAttr(f, "Header", "Ngroups_Total") self.nids = hdf5lib.GetAttr(f, "Header", "Nids_Total") self.nsubs = hdf5lib.GetAttr(f, "Header", "Nsubgroups_Total") #GROUPS if (grpcat == True): if (keysel == None): for key, val in grp_datablocks.items(): if hdf5lib.Contains(f, "Group", key): type = val[0] dim = val[1] if (type == 'FLOAT'): vars(self)[key] = np.empty( self.ngroups, dtype=np.dtype( (self.double_type, dim))) if (type == 'INT'): vars(self)[key] = np.empty(self.ngroups, dtype=np.dtype( (np.int32, dim))) if (type == 'INT64'): vars(self)[key] = np.empty(self.ngroups, dtype=np.dtype( (np.int64, dim))) if (type == 'ID'): vars(self)[key] = np.empty( self.ngroups, dtype=np.dtype((self.id_type, dim))) vardict[key] = vars(self)[key] else: for key in keysel: if hdf5lib.Contains(f, "Group", key): val = grp_datablocks[key] type = val[0] dim = val[1] if (type == 'FLOAT'): vars(self)[key] = np.empty( self.ngroups, dtype=np.dtype( (self.double_type, dim))) if (type == 'INT'): vars(self)[key] = np.empty(self.ngroups, dtype=np.dtype( (np.int32, dim))) if (type == 'INT64'): vars(self)[key] = np.empty(self.ngroups, dtype=np.dtype( (np.int64, dim))) if (type == 'ID'): vars(self)[key] = np.empty( self.ngroups, dtype=np.dtype((self.id_type, dim))) vardict[key] = vars(self)[key] #SUBHALOS if (subcat == True): if (keysel == None): for key, val in sub_datablocks.items(): if hdf5lib.Contains(f, "Subhalo", key): type = val[0] dim = val[1] if (type == 'FLOAT'): vars(self)[key] = np.empty( self.nsubs, dtype=np.dtype((self.double_type, dim))) if (type == 'INT'): vars(self)[key] = np.empty(self.nsubs, dtype=np.dtype( (np.int32, dim))) if (type == 'INT64'): vars(self)[key] = np.empty(self.nsubs, dtype=np.dtype( (np.int32, dim))) if (type == 'ID'): vars(self)[key] = np.empty(self.nsubs, dtype=np.dtype( (self.id_type, dim))) vardict[key] = vars(self)[key] else: for key in keysel: if hdf5lib.Contains(f, "Subhalo", key): val = sub_datablocks[key] type = val[0] dim = val[1] if (type == 'FLOAT'): vars(self)[key] = np.empty( self.nsubs, dtype=np.dtype( (self.double_type, dim))) if (type == 'INT'): vars(self)[key] = np.empty(self.nsubs, dtype=np.dtype( (np.int32, dim))) if (type == 'INT64'): vars(self)[key] = np.empty(self.nsubs, dtype=np.dtype( (np.int64, dim))) if (type == 'ID'): vars(self)[key] = np.empty( self.nsubs, dtype=np.dtype((self.id_type, dim))) vardict[key] = vars(self)[key] #GROUPS if (grpcat == True): if ngroups > 0: if (keysel == None): for key, val in grp_datablocks.items(): if hdf5lib.Contains(f, "Group", key): type = val[0] dim = val[1] a = hdf5lib.GetData(f, "Group/" + key) if dim == 1: vardict[key][skip_gr:skip_gr + ngroups] = a[:] else: for d in range(0, dim): vardict[key][skip_gr:skip_gr + ngroups, d] = a[:, d] else: for key in keysel: if hdf5lib.Contains(f, "Group", key): val = grp_datablocks[key] type = val[0] dim = val[1] a = hdf5lib.GetData(f, "Group/" + key) if dim == 1: vardict[key][skip_gr:skip_gr + ngroups] = a[:] else: for d in range(0, dim): vardict[key][skip_gr:skip_gr + ngroups, d] = a[:, d] skip_gr += ngroups #SUBHALOS if (subcat == True): if nsubs > 0: if (keysel == None): for key, val in sub_datablocks.items(): if hdf5lib.Contains(f, "Subhalo", key): type = val[0] dim = val[1] a = hdf5lib.GetData(f, "Subhalo/" + key) if dim == 1: vardict[key][skip_sub:skip_sub + nsubs] = a[:] else: for d in range(0, dim): vardict[key][skip_sub:skip_sub + nsubs, d] = a[:, d] else: for key in keysel: if hdf5lib.Contains(f, "Subhalo", key): val = sub_datablocks[key] type = val[0] dim = val[1] a = hdf5lib.GetData(f, "Subhalo/" + key) if dim == 1: vardict[key][skip_sub:skip_sub + nsubs] = a[:] else: for d in range(0, dim): vardict[key][skip_sub:skip_sub + nsubs, d] = a[:, d] skip_sub += nsubs f.close() filenum += 1 if filenum == nfiles: doneflag = True
def read_block_single_file(filename, block_name, dim2, parttype=-1, no_mass_replicate=False, fill_block_name="", slab_start=-1, slab_len=-1, verbose=False): if (verbose): print("[single] reading file : ", filename ) print("[single] reading : ", block_name) sys.stdout.flush() head = snapshot_header(filename) npart = head.npart massarr = head.massarr nall = head.nall filenum = head.filenum doubleflag = head.double #GADGET-2 change #doubleflag = 0 #GADGET-2 change if (parttype!=-1): if (head.npart[parttype]==0): return [0, False] else: if (head.npart.sum()==0): return [0, False] del head #construct indices for partial access if (slab_start!=-1) & (slab_len!=-1): data_slice = slice(slab_start, (slab_start+slab_len)) else: data_slice = slice(None, None, 1) if (verbose): print("[single] data slice: ", data_slice) sys.stdout.flush() f=hdf5lib.OpenFile(filename) #read specific particle type (parttype>=0, non-default) if parttype>=0: if (verbose): print("[single] parttype : ", parttype ) sys.stdout.flush() if ((block_name=="Masses") & (npart[parttype]>0) & (massarr[parttype]>0)): if (verbose): print("[single] replicate mass block") sys.stdout.flush() ret_val=np.repeat(massarr[parttype], npart[parttype])[data_slice] else: part_name='PartType'+str(parttype) ret_val = hdf5lib.GetData(f, part_name+"/"+block_name)[data_slice] if (verbose): print("[single] read particles (total) : ", ret_val.shape[0]/dim2) sys.stdout.flush() #read all particle types (parttype=-1, default) if parttype==-1: first=True dim1=long(0) for parttype in range(0,6): part_name='PartType'+str(parttype) if (hdf5lib.Contains(f,"",part_name)): if (verbose): print("[single] parttype : ", parttype) print("[single] massarr : ", massarr) print("[single] npart : ", npart) sys.stdout.flush() #replicate mass block per default (unless no_mass_replicate is set) if ((block_name=="Masses") & (npart[parttype]>0) & (massarr[parttype]>0) & (no_mass_replicate==False)): if (verbose): print("[single] replicate mass block") sys.stdout.flush() if (first): data=np.repeat(massarr[parttype], npart[parttype]) dim1+=long(data.shape[0]) ret_val=data first=False else: data=np.repeat(massarr[parttype], npart[parttype]) dim1+=long(data.shape[0]) ret_val=np.append(ret_val, data) if (verbose): print("[single] read particles (total) : ", ret_val.shape[0]/dim2) sys.stdout.flush() if (doubleflag==0): ret_val=ret_val.astype("float32") #fill fill_block_name with zeros if fill_block_name is set and particle type is present and fill_block_name not already stored in file for that particle type if ((block_name==fill_block_name) & (block_name!="Masses") & (npart[parttype]>0) & (hdf5lib.Contains(f,part_name, block_name)==False)): if (verbose): print("[single] replicate block name", fill_block_name) sys.stdout.flush() if (first): data=np.repeat(0.0, npart[parttype]*dim2) dim1+=long(data.shape[0]) ret_val=data first=False else: data=np.repeat(0.0, npart[parttype]*dim2) dim1+=long(data.shape[0]) ret_val=np.append(ret_val, data) if (verbose): print("[single] read particles (total) : ", ret_val.shape[0]/dim2) sys.stdout.flush() if (doubleflag==0): ret_val=ret_val.astype("float32") #default: just read the block if (hdf5lib.Contains(f,part_name,block_name)): if (first): data=hdf5lib.GetData(f, part_name+"/"+block_name)[:] dim1+=long(data.shape[0]) ret_val=data first=False else: data=hdf5lib.GetData(f, part_name+"/"+block_name)[:] dim1+=long(data.shape[0]) ret_val=np.append(ret_val, data) if (verbose): print("[single] read particles (total) : ", ret_val.shape[0]/dim2) sys.stdout.flush() if ((dim1>0) & (dim2>1)): ret_val=ret_val.reshape(dim1,dim2) if (verbose): print("[single] reshape done: ", ret_val.shape) sys.stdout.flush() f.close() return [ret_val, True]
def read_block(filename, block, parttype=-1, no_mass_replicate=False, fill_block="", slab_start=-1, slab_len=-1, ids=-1, verbose=False, multiple_files=False): if os.path.exists(filename): curfilename=filename elif os.path.exists(filename+".hdf5"): curfilename = filename+".hdf5" elif os.path.exists(filename+".0.hdf5"): curfilename = filename+".0.hdf5" multiple_files=True else: print("[error] file not found : {:s} in 'read_block' ".format( filename )) sys.stdout.flush() sys.exit() if (verbose): print("reading block : ", block) sys.stdout.flush() if parttype not in [-1,0,1,2,3,4,5]: print("[error] wrong parttype given") sys.stdout.flush() sys.exit() slabflag=False if ((slab_start!=-1) | (slab_len!=-1)): slabflag=True if (parttype==-1): print("[error] slabs only supported for specific parttype") sys.stdout.flush() sys.exit() idsflag=False if type(ids) != int: idsflag=True ids = np.array(ids) if parttype==-1: print("[error] id list only supported for specific parttype") sys.stdout.flush() sys.exit() if np.sum(ids==np.sort(ids)) < len(ids): print("[error] input ids not sorted. must be in order!") return if (verbose): print(curfilename) head = snapshot_header(curfilename) filenum = head.filenum npart_all = np.array(head.nall) highword = head.nall_highword npart_all.astype(long) # Need to determine size of array and data type to pre-allocate data. if idsflag==True: length = len(ids) if slabflag==True: length = slab_len if idsflag==False and slabflag==False: if parttype!=-1: length=head.nall[parttype] if highword[parttype]==1: length += 2**32 else: length=head.nall.sum() if (verbose): print("Length of data allocation:", length) #GREG if (datablocks.has_key(block)): block_name=datablocks[block][0] dim2=datablocks[block][1] first=True if (verbose): print("Reading HDF5 : ", block_name) print("Data dimension : ", dim2) print("Multiple file : ", multiple_files) print("Slab data : ", slabflag) sys.stdout.flush() else: print("[error] Block type ", block, "not known!") sys.stdout.flush() sys.exit() fill_block_name="" if (fill_block!=""): if (datablocks.has_key(fill_block)): fill_block_name=datablocks[fill_block][0] dim2=datablocks[fill_block][1] if (verbose): print("Block filling active : ", fill_block_name) sys.stdout.flush() # set alloc_type here. read in example item to determine data type. alloc_type=None # need to loop over all files until block is found, or no more files left. if not multiple_files: filepaths= [curfilename] else: filepaths = [filename+"."+str(subfile)+".hdf5" for subfile in np.arange(filenum)] for filepath in filepaths: g=hdf5lib.OpenFile(filepath) if parttype==-1: for ptype in range(0,6): try: contains=hdf5lib.Contains(g,'PartType'+str(ptype),block_name) except: contains=False if contains: alloc_type = str(hdf5lib.GetData(g,'PartType'+str(ptype)+'/'+block_name)[0:1].dtype) break else: try: contains=hdf5lib.Contains(g,'PartType'+str(parttype),block_name) except: contains=False if contains: alloc_type = str(hdf5lib.GetData(g,'PartType'+str(parttype)+'/'+block_name)[0:1].dtype) g.close() gc.collect() if contains==True: break # if block does not exist if alloc_type==None: if block=="ID ": alloc_type=np.uint32 #else: # alloc_type=np.float32 elif block=="MASS": alloc_type=np.float32 #default to float32 for MASS else: print("[error] block : ", block, "of parttype : ", parttype, "not found") sys.stdout.flush() sys.exit() if dim2 > 1: ret_val = np.ndarray((length,dim2),alloc_type) else: ret_val = np.ndarray((length,),alloc_type) if (verbose): print("Allocated array") if (multiple_files): if slabflag==False and idsflag==False: first=True dim1=long(0) for num in range(0,filenum): curfilename=filename+"."+str(num)+".hdf5" if (verbose): print("Reading file : ", num, curfilename) sys.stdout.flush() data, succ = read_block_single_file(curfilename, block_name, dim2, parttype, no_mass_replicate, fill_block_name, slab_start, slab_len, verbose=False) if succ == True: if dim2 > 1: ret_val[dim1:dim1+len(data),:] = data else: ret_val[dim1:dim1+len(data)] = data dim1 += long(data.shape[0]) if (verbose): if (succ): print("Read particles (total) : ", ret_val.shape[0]) sys.stdout.flush() else: print("Read particles (total) : none") sys.stdout.flush() # Implementation of reading specified particle positions. if (idsflag==True): dim1=long(0) for num in range(0,filenum): curfilename=filename+"."+str(num)+".hdf5" head = snapshot_header(curfilename) nloc=head.npart[parttype] low=ids[0] # First particle to read high=ids[-1] # Last particle to read if (nloc > low): # Something to read in this file toread = ids[ids<nloc] # Need to get subset of ids still in this file if (verbose): print("Reading file : ", num, curfilename) sys.stdout.flush() data, succ = read_block_single_file(curfilename, block_name, dim2, parttype, no_mass_replicate, fill_block_name, ids=toread, verbose=verbose) if (succ == True): if dim2 > 1: ret_val[dim1:dim1+len(data),:] = data else: ret_val[dim1:dim1+len(data)] = data dim1+=data.shape[0] if (verbose): if (succ): # No longer need to divide by dim2. append would flatten array, not now. print("Read particles (total) : ", ret_val.shape[0]) sys.stdout.flush() else: print("Read particles (total) : none") sys.stdout.flush() ids -= nloc ids = ids[ids>=0] # Only keep particles not yet read if (len(ids)==0 or high<0): break if (slabflag==True): off=slab_start left=slab_len first=True dim1=long(0) for num in range(0,filenum): curfilename=filename+"."+str(num)+".hdf5" head = snapshot_header(curfilename) nloc=head.npart[parttype] if (nloc > off): start = off if (nloc - off > left): count = left else: count = nloc - off if (verbose): print("Reading file : ", num, curfilename, start, count) sys.stdout.flush() data, succ = read_block_single_file(curfilename, block_name, dim2, parttype, no_mass_replicate, fill_block_name, slab_start=start, slab_len=count, verbose=verbose) if (succ == True): if dim2 > 1: ret_val[dim1:dim1+len(data),:] = data else: ret_val[dim1:dim1+len(data)] = data dim1 += data.shape[0] if (verbose): if (succ): # No longer need to divide by dim2, append would flatten array, not now. print("Read particles (total) : ", ret_val.shape[0]) sys.stdout.flush() else: print("Read particles (total) : none") sys.stdout.flush() left -= count off += count if (left==0): break off -= nloc if (verbose): print("all partial files read in") sys.stdout.flush() else: ret_val, succ = read_block_single_file(curfilename, block_name, dim2, parttype, no_mass_replicate, fill_block_name, slab_start, slab_len, verbose) return ret_val
def __init__(self, basedir, snapnum, long_ids=False, double_output=False, grpcat=True, subcat=True, name="fof_subhalo_tab", keysel=[]): if long_ids: self.id_type = np.uint64 else: self.id_type = np.uint32 if double_output: self.double_type = np.float32 else: self.double_type = np.float64 filenum = 0 doneflag = False skip_gr = 0 skip_sub = 0 vardict = {} if keysel is None: keysel = grp_datablocks.items() while not doneflag: self.filebase, curfile = naming.return_subfind_filebase( basedir, snapnum, name, filenum) self.firstfile = curfile f = hdf5lib.OpenFile(curfile) ngroups = hdf5lib.GetAttr(f, "Header", "Ngroups_ThisFile") nsubs = hdf5lib.GetAttr(f, "Header", "Nsubgroups_ThisFile") nfiles = hdf5lib.GetAttr(f, "Header", "NumFiles") if filenum == 0: self.ngroups = hdf5lib.GetAttr(f, "Header", "Ngroups_Total") self.nids = hdf5lib.GetAttr(f, "Header", "Nids_Total") self.nsubs = hdf5lib.GetAttr(f, "Header", "Nsubgroups_Total") #GROUPS if grpcat: for key in keysel: if hdf5lib.Contains(f, "Group", key): val = grp_datablocks[key] type = val[0] dim = val[1] if (type == 'FLOAT'): vars(self)[key] = np.empty( self.ngroups, dtype=np.dtype((self.double_type, dim))) if (type == 'INT'): vars(self)[key] = np.empty(self.ngroups, dtype=np.dtype( (np.int32, dim))) if (type == 'INT64'): vars(self)[key] = np.empty(self.ngroups, dtype=np.dtype( (np.int64, dim))) if (type == 'ID'): vars(self)[key] = np.empty(self.ngroups, dtype=np.dtype( (self.id_type, dim))) vardict[key] = vars(self)[key] #SUBHALOS if subcat: for key in keysel: if hdf5lib.Contains(f, "Subhalo", key): val = sub_datablocks[key] type = val[0] dim = val[1] if (type == 'FLOAT'): vars(self)[key] = np.empty( self.nsubs, dtype=np.dtype((self.double_type, dim))) if (type == 'INT'): vars(self)[key] = np.empty(self.nsubs, dtype=np.dtype( (np.int32, dim))) if (type == 'INT64'): vars(self)[key] = np.empty(self.nsubs, dtype=np.dtype( (np.int64, dim))) if (type == 'ID'): vars(self)[key] = np.empty(self.nsubs, dtype=np.dtype( (self.id_type, dim))) vardict[key] = vars(self)[key] #GROUPS if grpcat: if ngroups > 0: for key in keysel: if hdf5lib.Contains(f, "Group", key): val = grp_datablocks[key] type = val[0] dim = val[1] a = hdf5lib.GetData(f, "Group/" + key) if dim == 1: vardict[key][skip_gr:skip_gr + ngroups] = a[:] else: for d in range(0, dim): vardict[key][skip_gr:skip_gr + ngroups, d] = a[:, d] skip_gr += ngroups #SUBHALOS if subcat: if nsubs > 0: for key in keysel: if hdf5lib.Contains(f, "Subhalo", key): val = sub_datablocks[key] type = val[0] dim = val[1] a = hdf5lib.GetData(f, "Subhalo/" + key) if dim == 1: vardict[key][skip_sub:skip_sub + nsubs] = a[:] else: for d in range(0, dim): vardict[key][skip_sub:skip_sub + nsubs, d] = a[:, d] skip_sub += nsubs f.close() filenum += 1 if filenum == nfiles: doneflag = True