Ejemplo n.º 1
0
    def __init__(self, basedir, snapnum):
        self.filename = basedir + "tree_offsets_subgroup_" + str(
            snapnum) + "_135.hdf5"
        self.snapnum = snapnum

        f = hdf5lib.OpenFile(self.filename)
        self.TreeFile = hdf5lib.GetData(f, "TreeFile")[:]
        self.TreeIndex = hdf5lib.GetData(f, "TreeIndex")[:]
        self.TreeNum = hdf5lib.GetData(f, "TreeNum")[:]
        f.close()
Ejemplo n.º 2
0
def mass_from_mergers(base, snap=135):
    snaptag='000'+str(snap)
    snaptag=snaptag[-3:]
    f=hdf5lib.OpenFile(base+'/postprocessing/StellarAssembly/galaxies_'+snaptag+'.hdf5', mode ='r' )
    delta=hdf5lib.GetData(f, "StellarMassFromMergers")[:]
    f.close()
    return np.array(delta)
Ejemplo n.º 3
0
def mass_from_minor_mergers(base, snap=135):
    snaptag='000'+str(snap)
    snaptag=snaptag[-3:]
    f=hdf5lib.OpenFile(base+'/postprocessing/MergerHistory/merger_history_'+snaptag+'.hdf5', mode ='r' )
    data=hdf5lib.GetData(f, "StellarMassFromMinorMergers")[:]
    f.close()
    return np.array(data)
Ejemplo n.º 4
0
def subhalo_overdensity(base, snap = 135):
    snaptag='000'+str(snap)
    snaptag=snaptag[-3:]
    f=hdf5lib.OpenFile(base+'/postprocessing/environment/environment_'+snaptag+'.hdf5', mode ='r' )
    delta=hdf5lib.GetData(f, "delta")[:]
    f.close()
    return np.array(delta)
Ejemplo n.º 5
0
def number_of_minor_mergers(base, snap=135):
    snaptag='000'+str(snap)
    snaptag=snaptag[-3:]
    f=hdf5lib.OpenFile(base+'/postprocessing/MergerHistory/merger_history_'+snaptag+'.hdf5', mode ='r' )
    data=hdf5lib.GetData(f, "NumMinorMergersTotal")[:]
    f.close()
    return np.array(data)
Ejemplo n.º 6
0
def subhalo_insitu_fraction(snap = 135):
    snaptag='000'+str(snap)
    snaptag=snaptag[-3:]
    f=hdf5lib.OpenFile('/n/ghernquist/Illustris/Runs/Illustris-1/postprocessing/InSituFraction/insitu_stellar_fraction_'+snaptag+'.hdf5', mode ='r' )
    data=hdf5lib.GetData(f, "InSitu")[:]
    f.close()
    return np.array(data)
Ejemplo n.º 7
0
    def __init__(self,
                 basedir,
                 skipfac,
                 snapnum,
                 filenum=0,
                 tree_start=-1,
                 tree_num=-1,
                 keysel=None):

        self.filebase = basedir + "trees_sf" + str(skipfac) + "_" + str(
            snapnum).zfill(3)
        self.basedir = basedir
        self.filenum = filenum
        filename = self.filebase + "." + str(filenum) + ".hdf5"
        f = hdf5lib.OpenFile(filename)
        self.NtreesPerFile = hdf5lib.GetAttr(f, "Header", "NtreesPerFile")
        self.NumberOfOutputFiles = hdf5lib.GetAttr(f, "Header",
                                                   "NumberOfOutputFiles")
        self.ParticleMass = hdf5lib.GetAttr(f, "Header", "ParticleMass")
        if self.ParticleMass == 0:
            print "WARNING: ParticleMass = 0, needed for merger rate calculation"
        self.TreeNHalos = hdf5lib.GetData(f, "Header/TreeNHalos")[:]
        self.TotNsubhalos = hdf5lib.GetData(f, "Header/TotNsubhalos")[:]
        self.Redshifts = hdf5lib.GetData(f, "Header/Redshifts")[:]
        if (tree_start == -1) | (tree_num == -1):
            tree_start = 0
            tree_num = self.NtreesPerFile
#        self.trees = np.empty(tree_num - tree_start, dtype='object')
        self.trees = np.empty(tree_num, dtype='object')
        self.tree_start = tree_start
        self.tree_num = tree_num
        for ntree in range(tree_start, tree_start + tree_num):
            list = []
            if keysel is None:
                for datablock in mergertree_datablocks.keys():
                    data = hdf5lib.GetData(
                        f, "Tree" + str(ntree) + "/" + datablock)[:]
                    list.append((datablock, data))
            else:
                for datablock in keysel:
                    if hdf5lib.Contains(f, "Tree" + str(ntree), datablock):
                        data = hdf5lib.GetData(
                            f, "Tree" + str(ntree) + "/" + datablock)[:]
                        list.append((datablock, data))
            print ntree, tree_start
            self.trees[ntree - tree_start] = dict(list)
        f.close()
Ejemplo n.º 8
0
def get_offsets(cat, part_types=[0, 1, 4, 5], snap=None, run=None):
    if snap and run:
        group_file = "/n/ghernquist/Illustris/Runs/%s/postprocessing/offsets/snap_offsets_group_%s.hdf5" % (run, snap)
        halo_file = "/n/ghernquist/Illustris/Runs/%s/postprocessing/offsets/snap_offsets_subhalo_%s.hdf5" % (run, snap)
        if os.path.isfile(group_file) and os.path.isfile(halo_file):
	    print "READSUBF: found pretabulated offsets to read"
            f = hdf5lib.OpenFile(group_file)
            group_offsets = hdf5lib.GetData(f, "Offsets")[:]
            f.close()

            f = hdf5lib.OpenFile(halo_file)
            halo_offsets = hdf5lib.GetData(f, "Offsets")[:]
            f.close()

            return np.array(group_offsets), np.array(halo_offsets)

        else:
          # /n/hernquistfs3/IllustrisTNG/Runs/L75n910TNG/postprocessing/offsets/
            group_file = "/n/hernquistfs3/IllustrisTNG/Runs/%s/postprocessing/offsets/offsets_%s.hdf5" % (run, str(snap).zfill(3)) 	
#            sys.exit()
            if os.path.isfile(group_file):
                f = hdf5lib.OpenFile(group_file)
                group_offsets = np.copy(hdf5lib.GetData(f, "Group/SnapByType"))
                halo_offsets  = np.copy(hdf5lib.GetData(f, "Subhalo/SnapByType"))
                return group_offsets, halo_offsets
     
   
    GroupOffset = np.zeros((cat.ngroups, 6), dtype="int64")
    HaloOffset  = np.zeros((cat.nsubs, 6), dtype="int64")

    for parttype in part_types:
        print "Calculating offsets for PartType: %d" % parttype
        k = 0
        for i in range(0, cat.ngroups):
                    if i > 0:
                           GroupOffset[i, parttype] = GroupOffset[i-1, parttype] + cat.GroupLenType[i-1, parttype]
                    if cat.GroupNsubs[i] > 0:
                            HaloOffset[k, parttype] = GroupOffset[i, parttype]
                            k += 1
                            for j in range(1, cat.GroupNsubs[i]):
                                    HaloOffset[k, parttype] =  HaloOffset[k-1, parttype] + cat.SubhaloLenType[k-1, parttype]
                                    k += 1
    if k != cat.nsubs:
        print "READHALO: problem with offset table", k, cat.nsubs
        sys.exit()

    return np.array(GroupOffset), np.array(HaloOffset)
Ejemplo n.º 9
0
def subhalo_gas_kinematics(base, snap = 135, which="v_5"):
    snaptag=str(snap)
    file = base+'/postprocessing/gas_kinematics/gas_kinematic_info_'+snaptag+'.hdf5'
    print file
    f=hdf5lib.OpenFile(file, mode ='r' )
    data=hdf5lib.GetData(f, "Subhalo/"+which)[:]
    f.close()
    return np.array(data)
Ejemplo n.º 10
0
def subhalo_gas_z_grad(base, snap = 135, which="GradMetallicity_5"):
    snaptag=str(snap)
    file = base+'/postprocessing/gas_metallicity/gas_metallicity_info_'+snaptag+'.hdf5'
    print file
    f=hdf5lib.OpenFile(file, mode ='r' )
    data=hdf5lib.GetData(f, "Subhalo/"+which)[:]
    f.close()
    return np.array(data)
Ejemplo n.º 11
0
def subhalo_petrosian_radius(snap = 135):
    file = '/n/ghernquist/Illustris/Runs/Illustris-1/postprocessing/PhotometricMorphologies/nonparmorphs_iSDSS_135.hdf5'
    if os.path.exists(file):
        f=hdf5lib.OpenFile(file,mode='r')
        data0=np.array(hdf5lib.GetData(f, "RP_cam0")[:])
        data = np.zeros( (4 , data0.shape[0]) )

        data[1,:] = np.array(hdf5lib.GetData(f, "RP_cam1")[:])
        data[2,:] = np.array(hdf5lib.GetData(f, "RP_cam2")[:])
        data[3,:] = np.array(hdf5lib.GetData(f, "RP_cam3")[:])

	data = np.median( data, axis=0 )
 

        f.close()
    else:
        data = None
    return data
Ejemplo n.º 12
0
def subhalo_circularities(base, snap = 135):
    snaptag='000'+str(snap)
    snaptag=snaptag[-3:]
    print base+'/postprocessing/circularities/circularities_'+snaptag+'.hdf5'
    f=hdf5lib.OpenFile(base+'/postprocessing/circularities/circularities_'+snaptag+'.hdf5', mode ='r' )
    data=np.array(hdf5lib.GetData(f, "CircAbove05Frac")[:])
    data=np.reshape(data, -1)
    f.close()
    return data
Ejemplo n.º 13
0
def subhalo_stellar_vel_disp(base, snap = 135, which="StellarVelDisp_HalfMassRad"):
    #snaptag='000'+str(snap)
    #snaptag=snaptag[-3:]
    snaptag=str(snap)
    print base+'/postprocessing/stellar_vel_disp/stellar_vel_disp_'+snaptag+'.hdf5'
    f=hdf5lib.OpenFile(base+'/postprocessing/stellar_vel_disp/stellar_vel_disp_'+snaptag+'.hdf5', mode ='r' )
    delta=hdf5lib.GetData(f, "Subhalo/"+which)[:]
    f.close()
    return np.array(delta)
Ejemplo n.º 14
0
def subhalo_stellar_age(snap = 135):
    snaptag='000'+str(snap)
    snaptag=snaptag[-3:]
    file='/n/ghernquist/Illustris/Runs/Illustris-1/postprocessing/galprop/galprop_'+snaptag+'.hdf5'
    if os.path.exists(file):
        f=hdf5lib.OpenFile('/n/ghernquist/Illustris/Runs/Illustris-1/postprocessing/galprop/galprop_'+snaptag+'.hdf5', mode='r')
        data=np.array(hdf5lib.GetData(f, "stellar_age_inrad")[:])
        f.close()
    else:
        data = None
    return data
Ejemplo n.º 15
0
    def __init__(self,
                 basedir,
                 snapnum,
                 long_ids=False,
                 name="fof_subhalo_tab",
                 verbose=False):
        self.filebase = basedir + "/groups_" + str(snapnum).zfill(
            3) + "/" + name + "_" + str(snapnum).zfill(3) + "."

        if long_ids: self.id_type = np.uint64
        else: self.id_type = np.uint32

        filenum = 0
        doneflag = False
        skip_ids = 0

        while not doneflag:
            curfile = self.filebase + str(filenum) + ".hdf5"

            if not os.path.exists(curfile):
                self.filebase = basedir + "/" + name + "_" + str(
                    snapnum).zfill(3)
                curfile = self.filebase + ".hdf5"

            if not os.path.exists(curfile):
                print "file not found:", curfile
                sys.exit()

            f = hdf5lib.OpenFile(curfile)
            nfiles = hdf5lib.GetAttr(f, "Header", "NumFiles")
            nids = hdf5lib.GetAttr(f, "Header", "Nids_ThisFile")
            idlen = hdf5lib.GetAttr(f, "Header", "Nids_Total")

            if filenum == 0:
                if not long_ids:
                    self.IDs = np.empty(idlen, dtype=np.uint32)
                else:
                    self.IDs = np.empty(idlen, dtype=np.uint64)

            # TODO: This call seems to fail when FOF_STOREIDS is not
            # switched on in Config.sh, since the dataset IDs/ID
            # will not exist.
            self.IDs[skip_ids:skip_ids + nids] = hdf5lib.GetData(f, "IDs/ID")

            skip_ids += nids

            f.close()
            filenum += 1
            if filenum == self.nfiles: doneflag = True
Ejemplo n.º 16
0
 def __init__(self, basedir, snapnum, keysel=None, long_ids=False):
     
     if long_ids: id_type = np.uint64
     else: id_type = np.uint32
     
     vardict = {}
     if keysel is None:
         keysel = galprop_datablocks.items()
     
     file=naming.return_galprop_file(basedir, snapnum)
     if os.path.exists(file):
         f=hdf5lib.OpenFile(file, mode='r')
         for key in keysel:
             if hdf5lib.Contains(f, "", key):
                 val = galprop_datablocks[key]
                 type = val[0]
                 dim = val[1]
                 vars(self)[key]=np.array(hdf5lib.GetData(f, key)[:])
         f.close()
     else:
         print "Galprop File Not Found..."
Ejemplo n.º 17
0
def read_block_single_file(filename,
                           block_name,
                           dim2,
                           parttype=-1,
                           no_mass_replicate=False,
                           fill_block_name="",
                           slab_start=-1,
                           slab_len=-1,
                           ids=-1,
                           verbose=False):

    if os.path.exists(filename):
        curfilename = filename
    elif os.path.exists(filename + ".hdf5"):
        curfilename = filename + ".hdf5"
    elif os.path.exists(filename + ".0.hdf5"):
        curfilename = filename + ".0.hdf5"
    else:
        print "[error] file not found : ", filename
        sys.stdout.flush()
        sys.exit()

    if verbose:
        print "[single] reading file           : ", curfilename
        print "[single] reading                : ", block_name
        sys.stdout.flush()

    head = snapshot_header(curfilename)
    npart = head.npart
    massarr = head.massarr
    nall = head.nall
    filenum = head.filenum
    doubleflag = head.double

    if (parttype != -1):
        if (head.npart[parttype] == 0):
            return [0, False]
    else:
        if (head.npart.sum() == 0):
            return [0, False]
    del head

    #construct indices for partial access
    idsflag = False
    if (slab_start != -1) and (slab_len != -1):
        data_slice = slice(slab_start, (slab_start + slab_len))
    elif type(ids) == np.ndarray:
        idsflag = True
        data_slice = ids
    else:
        data_slice = slice(None, None, 1)

    if verbose:
        print "[single] data slice: ", data_slice
        sys.stdout.flush()

    f = hdf5lib.OpenFile(curfilename)

    #read specific particle type (parttype>=0, non-default)
    if parttype >= 0:
        if verbose:
            print "[single] parttype               : ", parttype
            sys.stdout.flush()
        if ((block_name == "Masses") and (npart[parttype] > 0)
                and (massarr[parttype] > 0)):
            if verbose:
                print "[single] replicate mass block"
                sys.stdout.flush()
            ret_val = np.repeat(massarr[parttype], npart[parttype])[data_slice]
        else:
            part_name = 'PartType' + str(parttype)
            if idsflag:
                ret_val = hdf5lib.GetData(f, part_name + "/" +
                                          block_name)[:][[data_slice]]
            else:
                ret_val = hdf5lib.GetData(f, part_name + "/" +
                                          block_name)[data_slice]
        if verbose:
            print "[single] read particles (total) : ", ret_val.shape[0] / dim2
            sys.stdout.flush()

    #read all particle types (parttype=-1, default)
    if parttype == -1:
        first = True
        dim1 = long(0)
        for parttype in range(0, 6):
            part_name = 'PartType' + str(parttype)
            if hdf5lib.Contains(f, "", part_name):
                if verbose:
                    print "[single] parttype               : ", parttype
                    print "[single] massarr                : ", massarr
                    print "[single] npart                  : ", npart
                    sys.stdout.flush()

                #replicate mass block per default (unless no_mass_replicate is set)
                if ((block_name == "Masses") and (npart[parttype] > 0)
                        and (massarr[parttype] > 0)
                        and (no_mass_replicate == False)):
                    if (verbose):
                        print "[single] replicate mass block"
                        sys.stdout.flush()
                    if (first):
                        data = np.repeat(massarr[parttype], npart[parttype])
                        dim1 += long(data.shape[0])
                        ret_val = data
                        first = False
                    else:
                        data = np.repeat(massarr[parttype], npart[parttype])
                        dim1 += long(data.shape[0])
                        ret_val = np.append(ret_val, data)
                    if (verbose):
                        print "[single] read particles (total) : ", ret_val.shape[
                            0] / dim2
                        sys.stdout.flush()
                    if (doubleflag == 0):
                        ret_val = ret_val.astype("float32")

                #fill fill_block_name with zeros if fill_block_name is set and particle type is present and fill_block_name not already stored in file for that particle type
                if ((block_name == fill_block_name)
                        and (block_name != "Masses") and (npart[parttype] > 0)
                        and
                    (hdf5lib.Contains(f, part_name, block_name) == False)):
                    if (verbose):
                        print "[single] replicate block name", fill_block_name
                        sys.stdout.flush()
                    if (first):
                        data = np.repeat(0.0, npart[parttype] * dim2)
                        dim1 += long(data.shape[0])
                        ret_val = data
                        first = False
                    else:
                        data = np.repeat(0.0, npart[parttype] * dim2)
                        dim1 += long(data.shape[0])
                        ret_val = np.append(ret_val, data)
                    if (verbose):
                        print "[single] read particles (total) : ", ret_val.shape[
                            0] / dim2
                        sys.stdout.flush()
                    if (doubleflag == 0):
                        ret_val = ret_val.astype("float32")

                #default: just read the block
                if (hdf5lib.Contains(f, part_name, block_name)):
                    if (first):
                        data = hdf5lib.GetData(f,
                                               part_name + "/" + block_name)[:]
                        dim1 += long(data.shape[0])
                        ret_val = data
                        first = False
                    else:
                        data = hdf5lib.GetData(f,
                                               part_name + "/" + block_name)[:]
                        dim1 += long(data.shape[0])
                        ret_val = np.append(ret_val, data)
                    if (verbose):
                        print "[single] read particles (total) : ", ret_val.shape[
                            0] / dim2
                        sys.stdout.flush()

        if ((dim1 > 0) & (dim2 > 1)):
            ret_val = ret_val.reshape(dim1, dim2)
            if (verbose):
                print "[single] reshape done: ", ret_val.shape
                sys.stdout.flush()

    f.close()
    gc.collect()

    return [ret_val, True]
Ejemplo n.º 18
0
def read_block(filename,
               block,
               parttype=-1,
               no_mass_replicate=False,
               fill_block="",
               slab_start=-1,
               slab_len=-1,
               ids=-1,
               verbose=False,
               multiple_files=False):

    if os.path.exists(filename):
        curfilename = filename
    elif os.path.exists(filename + ".hdf5"):
        curfilename = filename + ".hdf5"
    elif os.path.exists(filename + ".0.hdf5"):
        curfilename = filename + ".0.hdf5"
        multiple_files = True
    else:
        print "[error] file not found : ", filename
        sys.stdout.flush()
        sys.exit()

    if (verbose):
        print "reading block          : ", block
        sys.stdout.flush()

    if parttype not in [-1, 0, 1, 2, 3, 4, 5]:
        print "[error] wrong parttype given"
        sys.stdout.flush()
        sys.exit()

    slabflag = False
    if ((slab_start != -1) | (slab_len != -1)):
        slabflag = True
        if (parttype == -1):
            print "[error] slabs only supported for specific parttype"
            sys.stdout.flush()
            sys.exit()
    idsflag = False
    if type(ids) != int:
        idsflag = True
        ids = np.array(ids)
        if parttype == -1:
            print "[error] id list only supported for specific parttype"
            sys.stdout.flush()
            sys.exit()
        if np.sum(ids == np.sort(ids)) < len(ids):
            print "[error] input ids not sorted. must be in order!"
            return

    if (verbose):
        print curfilename
    head = snapshot_header(curfilename)
    filenum = head.filenum
    npart_all = np.array(head.nall)
    highword = head.nall_highword

    npart_all.astype(long)

    # Need to determine size of array and data type to pre-allocate data.
    if idsflag == True:
        length = len(ids)
    if slabflag == True:
        length = slab_len
    if idsflag == False and slabflag == False:
        if parttype != -1:
            length = head.nall[parttype]
            if highword[parttype] == 1:
                length += 2**32
        else:
            length = head.nall.sum()
    if (verbose):
        print "Length of data allocation:", length  #GREG

    if (datablocks.has_key(block)):
        block_name = datablocks[block][0]
        dim2 = datablocks[block][1]
        first = True
        if (verbose):
            print "Reading HDF5           : ", block_name
            print "Data dimension         : ", dim2
            print "Multiple file          : ", multiple_files
            print "Slab data              : ", slabflag
            sys.stdout.flush()
    else:
        print "[error] Block type ", block, "not known!"
        sys.stdout.flush()
        sys.exit()

    fill_block_name = ""
    if (fill_block != ""):
        if (datablocks.has_key(fill_block)):
            fill_block_name = datablocks[fill_block][0]
            dim2 = datablocks[fill_block][1]
            if (verbose):
                print "Block filling active   : ", fill_block_name
                sys.stdout.flush()

    # set alloc_type here. read in example item to determine data type.
    alloc_type = None

    # need to loop over all files until block is found, or no more files left.
    if not multiple_files:
        filepaths = [curfilename]
    else:
        filepaths = [
            filename + "." + str(subfile) + ".hdf5"
            for subfile in np.arange(filenum)
        ]
    for filepath in filepaths:
        g = hdf5lib.OpenFile(filepath)
        if parttype == -1:
            for ptype in range(0, 6):
                try:
                    contains = hdf5lib.Contains(g, 'PartType' + str(ptype),
                                                block_name)
                except:
                    contains = False
                if contains:
                    alloc_type = str(
                        hdf5lib.GetData(
                            g, 'PartType' + str(ptype) + '/' +
                            block_name)[0:1].dtype)
                    break
        else:
            try:
                contains = hdf5lib.Contains(g, 'PartType' + str(parttype),
                                            block_name)
            except:
                contains = False
            if contains:
                alloc_type = str(
                    hdf5lib.GetData(
                        g, 'PartType' + str(parttype) + '/' +
                        block_name)[0:1].dtype)
        g.close()
        gc.collect()
        if contains == True:
            break

    # if block does not exist
    if alloc_type == None:
        if block == "ID  ":
            alloc_type = np.uint32
        #else:
        #       alloc_type=np.float32
        elif block == "MASS":
            alloc_type = np.float32  #default to float32 for MASS
        else:
            print "[error] block : ", block, "of parttype : ", parttype, "not found"
            sys.stdout.flush()
            sys.exit()

    if dim2 > 1:
        ret_val = np.ndarray((length, dim2), alloc_type)
    else:
        ret_val = np.ndarray((length, ), alloc_type)
    if (verbose):
        print "Allocated array"

    if (multiple_files):
        if slabflag == False and idsflag == False:
            first = True
            dim1 = long(0)
            for num in range(0, filenum):
                curfilename = filename + "." + str(num) + ".hdf5"
                if (verbose):
                    print "Reading file           : ", num, curfilename
                    sys.stdout.flush()

                data, succ = read_block_single_file(curfilename,
                                                    block_name,
                                                    dim2,
                                                    parttype,
                                                    no_mass_replicate,
                                                    fill_block_name,
                                                    slab_start,
                                                    slab_len,
                                                    verbose=False)

                if succ == True:
                    if dim2 > 1:
                        ret_val[dim1:dim1 + len(data), :] = data
                    else:
                        ret_val[dim1:dim1 + len(data)] = data
                    dim1 += long(data.shape[0])

                if (verbose):
                    if (succ):
                        print "Read particles (total) : ", ret_val.shape[0]
                        sys.stdout.flush()
                    else:
                        print "Read particles (total) : none"
                        sys.stdout.flush()

        # Implementation of reading specified particle positions.
        if (idsflag == True):
            dim1 = long(0)
            for num in range(0, filenum):
                curfilename = filename + "." + str(num) + ".hdf5"
                head = snapshot_header(curfilename)
                nloc = head.npart[parttype]

                low = ids[0]  # First particle to read
                high = ids[-1]  # Last particle to read

                if (nloc > low):  # Something to read in this file
                    toread = ids[
                        ids <
                        nloc]  # Need to get subset of ids still in this file
                    if (verbose):
                        print "Reading file           : ", num, curfilename
                        sys.stdout.flush()
                    data, succ = read_block_single_file(curfilename,
                                                        block_name,
                                                        dim2,
                                                        parttype,
                                                        no_mass_replicate,
                                                        fill_block_name,
                                                        ids=toread,
                                                        verbose=verbose)
                    if (succ == True):
                        if dim2 > 1:
                            ret_val[dim1:dim1 + len(data), :] = data
                        else:
                            ret_val[dim1:dim1 + len(data)] = data
                        dim1 += data.shape[0]
                    if (verbose):
                        if (succ):
                            # No longer need to divide by dim2. append would flatten array, not now.
                            print "Read particles (total) : ", ret_val.shape[0]
                            sys.stdout.flush()
                        else:
                            print "Read particles (total) : none"
                            sys.stdout.flush()

                ids -= nloc
                ids = ids[ids >= 0]  # Only keep particles not yet read
                if (len(ids) == 0 or high < 0):
                    break

        if (slabflag == True):
            off = slab_start
            left = slab_len
            first = True
            dim1 = long(0)
            for num in range(0, filenum):
                curfilename = filename + "." + str(num) + ".hdf5"
                head = snapshot_header(curfilename)
                nloc = head.npart[parttype]
                if (nloc > off):
                    start = off
                    if (nloc - off > left):
                        count = left
                    else:
                        count = nloc - off
                    if (verbose):
                        print "Reading file           : ", num, curfilename, start, count
                        sys.stdout.flush()
                    data, succ = read_block_single_file(curfilename,
                                                        block_name,
                                                        dim2,
                                                        parttype,
                                                        no_mass_replicate,
                                                        fill_block_name,
                                                        slab_start=start,
                                                        slab_len=count,
                                                        verbose=verbose)
                    if (succ == True):
                        if dim2 > 1:
                            ret_val[dim1:dim1 + len(data), :] = data
                        else:
                            ret_val[dim1:dim1 + len(data)] = data
                        dim1 += data.shape[0]
                    if (verbose):
                        if (succ):
                            # No longer need to divide by dim2, append would flatten array, not now.
                            print "Read particles (total) : ", ret_val.shape[0]
                            sys.stdout.flush()
                        else:
                            print "Read particles (total) : none"
                            sys.stdout.flush()

                    left -= count
                    off += count
                if (left == 0):
                    break
                off -= nloc

        if (verbose):
            print "all partial files read in"
            sys.stdout.flush()

    else:
        ret_val, succ = read_block_single_file(curfilename, block_name, dim2,
                                               parttype, no_mass_replicate,
                                               fill_block_name, slab_start,
                                               slab_len, verbose)

    return ret_val
Ejemplo n.º 19
0
def subhalo_offsets(snap = 135, run='Illustris-1'):
    snaptag=str(snap)
    f=hdf5lib.OpenFile('/n/ghernquist/Illustris/Runs/'+run+'/postprocessing/offsets/snap_offsets_subhalo_'+snaptag+'.hdf5', mode ='r' )
    data=hdf5lib.GetData(f, "Offsets")[:]
    f.close()
    return np.array(data)
Ejemplo n.º 20
0
def read_insitu():
    file = "/n/ghernquist/vrodrigu/StellarAssembly/output/Illustris/L75n1820FP/stars_135.hdf5"
    f = hdf5lib.OpenFile(file, mode='r')
    data = hdf5lib.GetData(f, "InSitu")[:]
    f.close()
    return np.array(data)
Ejemplo n.º 21
0
    def __init__(self, basedir, snapnum, long_ids=False, double_output=False, grpcat=True, subcat=True, name="fof_subhalo_tab", keysel=[]):

        if long_ids: self.id_type = np.uint64
        else: self.id_type = np.uint32
        if double_output: self.double_type = np.float32
        else: self.double_type = np.float64

        filenum = 0
        doneflag = False
        skip_gr = 0
        skip_sub = 0
        vardict = {}
        if keysel is None:
            keysel = grp_datablocks.items()

        while not doneflag:
            self.filebase, curfile = naming.return_subfind_filebase(basedir, snapnum, name, filenum)
            self.firstfile = curfile

            f=hdf5lib.OpenFile(curfile)
            ngroups = hdf5lib.GetAttr(f, "Header", "Ngroups_ThisFile")
            nsubs = hdf5lib.GetAttr(f, "Header", "Nsubgroups_ThisFile")
            nfiles = hdf5lib.GetAttr(f, "Header", "NumFiles")
            if filenum == 0:
                self.ngroups = hdf5lib.GetAttr(f, "Header", "Ngroups_Total")
                self.nids = hdf5lib.GetAttr(f, "Header", "Nids_Total")
                self.nsubs = hdf5lib.GetAttr(f, "Header", "Nsubgroups_Total")
                #GROUPS
                if grpcat:
                    for key in keysel:
                        if hdf5lib.Contains(f, "Group", key):
                            val = grp_datablocks[key]
                            type = val[0]
                            dim = val[1]
                            if (type=='FLOAT'):
                                vars(self)[key]=np.empty(self.ngroups, dtype=np.dtype((self.double_type,dim)))
                            if (type=='INT'):
                                vars(self)[key]=np.empty(self.ngroups, dtype=np.dtype((np.int32,dim)))
                            if (type=='INT64'):
                                vars(self)[key]=np.empty(self.ngroups, dtype=np.dtype((np.int64,dim)))
                            if (type=='ID'):
                                vars(self)[key]=np.empty(self.ngroups, dtype=np.dtype((self.id_type,dim)))
                            vardict[key]=vars(self)[key]


                #SUBHALOS
                if subcat:
                    for key in keysel:
                        if hdf5lib.Contains(f, "Subhalo", key):
                            val = sub_datablocks[key]
                            type = val[0]
                            dim = val[1]
                            if (type=='FLOAT'):
                                vars(self)[key]=np.empty(self.nsubs, dtype=np.dtype((self.double_type,dim)))
                            if (type=='INT'):
                                vars(self)[key]=np.empty(self.nsubs, dtype=np.dtype((np.int32,dim)))
                            if (type=='INT64'):
                                vars(self)[key]=np.empty(self.nsubs, dtype=np.dtype((np.int64,dim)))
                            if (type=='ID'):
                                vars(self)[key]=np.empty(self.nsubs, dtype=np.dtype((self.id_type,dim)))
                            vardict[key]=vars(self)[key]

            #GROUPS
            if grpcat:
                if ngroups > 0:
                    for key in keysel:
                        if hdf5lib.Contains(f, "Group", key):
                            val = grp_datablocks[key]
                            type = val[0]
                            dim = val[1]
                            a=hdf5lib.GetData(f, "Group/"+key)
                            if dim==1:
                                vardict[key][skip_gr:skip_gr + ngroups]=a[:]
                            else:
                                for d in range(0,dim):
                                    vardict[key][skip_gr:skip_gr + ngroups,d]=a[:,d]

                    skip_gr += ngroups
            #SUBHALOS
            if subcat:
                if nsubs > 0:
                    for key in keysel:
                        if hdf5lib.Contains(f, "Subhalo", key):
                            val = sub_datablocks[key]
                            type = val[0]
                            dim = val[1]
                            a=hdf5lib.GetData(f, "Subhalo/"+key)
                            if dim==1:
                                vardict[key][skip_sub:skip_sub + nsubs]=a[:]
                            else:
                                for d in range(0,dim):
                                    vardict[key][skip_sub:skip_sub + nsubs,d]=a[:,d]

                    skip_sub += nsubs

            f.close()

            filenum += 1
            if filenum == nfiles: doneflag = True