Exemple #1
0
 def __init__(self, fname, *args):
     BinaryFile.__init__(self, fname, *args)
     self.add_dataset("Nsubgroups", np.int32)
     nsub = self["Nsubgroups"][...]
     self.add_dataset("SubMostBoundID", np.uint64, (nsub, ))
     self.add_dataset("Desc_FileNr", np.int32, (nsub, ))
     self.add_dataset("Desc_SubIndex", np.int32, (nsub, ))
Exemple #2
0
    def __init__(self, fname, id_bytes=8, float_bytes=4, *args):
        BinaryFile.__init__(self, fname, *args)

        # Get number of bytes used to store IDs
        if id_bytes == 4:
            self.id_type = np.uint32
        elif id_bytes == 8:
            self.id_type = np.uint64
        else:
            raise ValueError("id_bytes must be 4 or 8")

        self.add_dataset("Ngroups", np.int32)
        self.add_dataset("TotNgroups", np.int32)
        self.add_dataset("Nids", np.int32)
        self.add_dataset("TotNids", np.int64)
        self.add_dataset("NTask", np.int32)
        self.add_dataset("SendOffset", np.int32)

        # Establish endian-ness by sanity check on number of files
        nfiles = self["NTask"][...]
        if nfiles < 1 or nfiles > 65535:
            self.enable_byteswap(True)

        # Read header
        Nids = self["Nids"][...]

        # Add dataset with particle IDs
        self.add_dataset("GroupIDs", self.id_type, (Nids, ))
Exemple #3
0
    def __init__(self, fname, id_bytes=8, *args):
        BinaryFile.__init__(self, fname, *args)

        # Header
        self.add_dataset("Ngroups", np.int32)
        self.add_dataset("Nids", np.int32)
        self.add_dataset("TotNgroups", np.int32)
        self.add_dataset("NTask", np.int32)

        # Establish endian-ness by sanity check on number of files
        nfiles = self["NFiles"][...]
        if nfiles < 1 or nfiles > 65535:
            self.enable_byteswap(True)

        # We also need to know the data types used for particle IDs
        if id_bytes == 4:
            self.id_type = np.uint32
        elif id_bytes == 8:
            self.id_type = np.uint64
        else:
            raise ValueError("id_bytes must be 4 or 8")

        # Read header
        Nids = self["Nids"][...]

        # Particle IDs
        self.add_dataset("GroupIDs", self.id_type, (Nids, ))
Exemple #4
0
    def __init__(self, fname, *args):
        BinaryFile.__init__(self, fname, *args)

        self.add_dataset("TotNsubhalos", np.int32)
        TotNsubhalos = self["TotNsubhalos"][...]
        self.add_dataset("HaloIndex", np.int32, (TotNsubhalos, ))
        self.add_dataset("SnapNum", np.int32, (TotNsubhalos, ))
Exemple #5
0
    def __init__(self, fname, id_bytes=8, *args):
        BinaryFile.__init__(self, fname, *args)

        # We need to know the data type used for particle IDs
        if id_bytes == 4:
            self.id_type = np.uint32
        elif id_bytes == 8:
            self.id_type = np.uint64
        else:
            raise ValueError("id_bytes must be 4 or 8")

        # File header
        self.add_dataset("TotHalos", np.int32)
        self.add_dataset("Totunique", np.int32)
        self.add_dataset("Ntrees", np.int32)
        self.add_dataset("TotSnaps", np.int32)

        # Read header values we need
        TotHalos = self["TotHalos"][...]
        Totunique = self["Totunique"][...]
        Ntrees = self["Ntrees"][...]
        TotSnaps = self["TotSnaps"][...]

        # Indexing
        self.add_dataset("CountID_Snap", np.int32, (TotSnaps, ))
        self.add_dataset("OffsetID_Snap", np.int32, (TotSnaps, ))
        self.add_dataset("CountID_SnapTree", np.int32, (TotSnaps, Ntrees))
        self.add_dataset("OffsetID_SnapTree", np.int32, (TotSnaps, Ntrees))
        self.add_dataset("Nunique", np.int32, (TotHalos, ))
        self.add_dataset("OffsetID_Halo", np.int32, (TotHalos, ))

        # Particle data arrays
        self.add_dataset("IDs", self.id_type, (Totunique, ))
        self.add_dataset("Pos", np.float32, (Totunique, 3))
        self.add_dataset("Vel", np.float64, (Totunique, 3))
Exemple #6
0
    def __init__(self, fname, id_bytes=8, *args):
        BinaryFile.__init__(self, fname, *args)

        # We need to know the data type used for particle IDs
        if id_bytes == 4:
            self.id_type = np.uint32
        elif id_bytes == 8:
            self.id_type = np.uint64

        # Define data blocks in the subhalo_tab file
        # Header
        self.add_dataset("Ngroups", np.int32)
        self.add_dataset("TotNgroups", np.int32)
        self.add_dataset("Nids", np.int32)
        self.add_dataset("TotNids", np.int64)
        self.add_dataset("NTask", np.int32)
        self.add_dataset("SendOffset", np.int32)

        # Establish endian-ness by sanity check on number of files
        nfiles = self["NTask"][...]
        if nfiles < 1 or nfiles > 65535:
            self.enable_byteswap(True)

        # Read header
        Nids = self["Nids"][...]

        # Add dataset with particle IDs
        self.add_dataset("GroupIDs", self.id_type, (Nids, ))
Exemple #7
0
    def __init__(self, fname, *args):
        BinaryFile.__init__(self, fname, *args)

        # Header
        self.start_fortran_record(auto_byteswap=True)
        self.add_attribute("Header/NumPart_ThisFile", np.uint32, (6, ))
        self.add_attribute("Header/MassTable", np.float64, (6, ))
        self.add_attribute("Header/Time", np.float64)
        self.add_attribute("Header/Redshift", np.float64)
        self.add_attribute("Header/Flag_Sfr", np.int32)
        self.add_attribute("Header/Flag_Feedback", np.int32)
        self.add_attribute("Header/NumPart_Total", np.uint32, (6, ))
        self.add_attribute("Header/Flag_Cooling", np.int32)
        self.add_attribute("Header/NumFilesPerSnapshot", np.int32)
        self.add_attribute("Header/BoxSize", np.float64)
        self.add_attribute("Header/Omega0", np.float64)
        self.add_attribute("Header/OmegaLambda", np.float64)
        self.add_attribute("Header/HubbleParam", np.float64)
        self.add_attribute("Header/Flag_StellarAge", np.int32)
        self.add_attribute("Header/Flag_Metals", np.int32)
        self.add_attribute("Header/HashTabSize", np.int32)
        self.add_attribute("Header/fill", np.uint8, (84, ))
        self.end_fortran_record()

        # Get number of particles in this file
        n = self["Header"].attrs["NumPart_ThisFile"][1]

        # Coordinates
        self.start_fortran_record()
        self.add_dataset("PartType1/Coordinates", np.float32, (n, 3))
        self.end_fortran_record()

        # Velocities
        self.start_fortran_record()
        self.add_dataset("PartType1/Velocities", np.float32, (n, 3))
        self.end_fortran_record()

        # IDs
        self.start_fortran_record()
        self.add_dataset("PartType1/ParticleIDs", np.uint64, (n, ))
        self.end_fortran_record()

        # Range of hash cells in this file
        self.start_fortran_record()
        self.add_dataset("first_hash_cell", np.int32)
        self.add_dataset("last_hash_cell", np.int32)
        self.end_fortran_record()

        # Calculate how many hash cells we have in this file
        nhash = self["last_hash_cell"][...] - self["first_hash_cell"][...] + 1

        # Location of first particle in each cell relative to start of file
        self.start_fortran_record()
        self.add_dataset("blockid", np.int32, (nhash, ))
        self.end_fortran_record()
Exemple #8
0
    def __init__(self, fname, id_bytes=8, *args):
        BinaryFile.__init__(self, fname, *args)

        # Header
        self.add_dataset("Ngroups", np.int32)
        self.add_dataset("Nids", np.int32)
        self.add_dataset("TotNgroups", np.int32)
        self.add_dataset("NFiles", np.int32)
        self.add_dataset("Nsubhalos", np.int32)

        # Establish endian-ness by sanity check on number of files
        nfiles = self["NFiles"][...]
        if nfiles < 1 or nfiles > 65535:
            self.enable_byteswap(True)

        # We also need to know the data types used for particle IDs
        if id_bytes == 4:
            self.id_type = np.uint32
        elif id_bytes == 8:
            self.id_type = np.uint64
        else:
            raise ValueError("id_bytes must be 4 or 8")

        # Read header
        ngroups = self["Ngroups"][...]
        nids = self["Nids"][...]
        nfiles = self["NFiles"][...]
        nsubgroups = self["Nsubhalos"][...]

        # FoF group info
        self.add_dataset("NsubPerHalo", np.int32, (ngroups, ))
        self.add_dataset("FirstSubOfHalo", np.int32, (ngroups, ))

        # Subhalo info
        self.add_dataset("SubLen", np.int32, (nsubgroups, ))
        self.add_dataset("SubOffset", np.int32, (nsubgroups, ))
        self.add_dataset("SubParentHalo", np.int32, (nsubgroups, ))

        # Spherical overdensity masses and radii
        self.add_dataset("Halo_M_Mean200", np.float32, (ngroups, ))
        self.add_dataset("Halo_R_Mean200", np.float32, (ngroups, ))
        self.add_dataset("Halo_M_Crit200", np.float32, (ngroups, ))
        self.add_dataset("Halo_R_Crit200", np.float32, (ngroups, ))
        self.add_dataset("Halo_M_TopHat200", np.float32, (ngroups, ))
        self.add_dataset("Halo_R_TopHat200", np.float32, (ngroups, ))

        # Subhalo properties
        self.add_dataset("SubPos", np.float32, (nsubgroups, 3))
        self.add_dataset("SubVel", np.float32, (nsubgroups, 3))
        self.add_dataset("SubVelDisp", np.float32, (nsubgroups, ))
        self.add_dataset("SubVmax", np.float32, (nsubgroups, ))
        self.add_dataset("SubSpin", np.float32, (nsubgroups, 3))
        self.add_dataset("SubMostBoundID", self.id_type, (nsubgroups, ))
        self.add_dataset("SubHalfMass", np.float32, (nsubgroups, ))
Exemple #9
0
    def __init__(self, fname, id_bytes=8, float_bytes=4, *args):
        BinaryFile.__init__(self, fname, *args)

        # We need to know the data types used for particle IDs
        # and floating point subhalo properties (again, this can't be read from the file).
        if id_bytes == 4:
            self.id_type = np.uint32
        elif id_bytes == 8:
            self.id_type = np.uint64
        else:
            raise ValueError("id_bytes must be 4 or 8")
        if float_bytes == 4:
            self.float_type = np.float32
        elif float_bytes == 8:
            self.float_type = np.float64
        else:
            raise ValueError("float_bytes must be 4 or 8")

        # Define data blocks in the group_tab file
        # Header
        self.add_dataset("Ngroups", np.int32)
        self.add_dataset("TotNgroups", np.int32)
        self.add_dataset("Nids", np.int32)
        self.add_dataset("TotNids", np.int64)
        self.add_dataset("NTask", np.int32)

        # Establish endian-ness by sanity check on number of files
        nfiles = self["NTask"][...]
        if nfiles < 1 or nfiles > 65535:
            self.enable_byteswap(True)

        # FoF group information
        ngroups = self["Ngroups"][...]
        self.add_dataset("GroupLen", np.int32, (ngroups, ))
        self.add_dataset(
            "GroupOffset", np.uint32,
            (ngroups, ))  # Assume uint32 to avoid overflow in AqA1
        self.add_dataset("GroupMass", self.float_type, (ngroups, ))
        self.add_dataset("GroupCofM", self.float_type, (ngroups, 3))
        self.add_dataset("GroupVel", self.float_type, (ngroups, 3))
        self.add_dataset("GroupLenType", np.int32, (ngroups, 6))
        self.add_dataset("GroupMassType", self.float_type, (ngroups, 6))
Exemple #10
0
    def __init__(self, fname, *args):
        BinaryFile.__init__(self, fname, *args)

        # Header
        self.add_dataset("Ngroups", np.int32)
        self.add_dataset("Nids", np.int32)
        self.add_dataset("TotNgroups", np.int32)
        self.add_dataset("NTask", np.int32)

        # Establish endian-ness by sanity check on number of files
        nfiles = self["NTask"][...]
        if nfiles < 1 or nfiles > 65535:
            self.enable_byteswap(True)

        # FoF group info
        ngroups = self["Ngroups"][...]
        self.add_dataset("GroupLen", np.int32, (ngroups, ))
        self.add_dataset("GroupOffset", np.int32, (ngroups, ))
        self.add_dataset("GroupMinLen", np.int32)
        minlen = self["GroupMinLen"][...]
        self.add_dataset("Count", np.int32, (minlen, ))
Exemple #11
0
    def __init__(self, fname, id_bytes=8, SAVE_MASS_TAB=False, *args):
        BinaryFile.__init__(self, fname, *args)

        # We need to know the data type used for particle IDs
        if id_bytes == 4:
            self.id_type = np.uint32
        elif id_bytes == 8:
            self.id_type = np.uint64
        else:
            raise ValueError("id_bytes must be 4 or 8")

        # Define numpy record type corresponding to the halo_data struct
        fields = [("Descendant", np.int32), ("FirstProgenitor", np.int32),
                  ("NextProgenitor", np.int32),
                  ("FirstHaloInFOFgroup", np.int32),
                  ("NextHaloInFOFgroup", np.int32), ("Len", np.int32),
                  ("M_Mean200", np.float32), ("M_Crit200", np.float32),
                  ("M_TopHat", np.float32), ("Pos", np.float32, (3, )),
                  ("Vel", np.float32, (3, )), ("VelDisp", np.float32),
                  ("VMax", np.float32), ("Spin", np.float32, (3, )),
                  ("MostBoundID", self.id_type), ("SnapNum", np.int32),
                  ("FileNr", np.int32), ("SubhaloIndex", np.int32),
                  ("SubhalohalfMass", np.float32)]
        if SAVE_MASS_TAB:
            fields.append(("SubMassTab", np.float32, (6, )))
        self.halo_data = np.dtype(fields)

        # File header
        self.add_dataset("Ntrees", np.int32)
        self.add_dataset("Nhalos", np.int32)
        Ntrees = self["Ntrees"][...]
        Nhalos = self["Nhalos"][...]

        # Number of halos per tree
        self.add_dataset("NPerTree", np.int32, (Ntrees, ))

        # Array of halos
        self.add_dataset("HaloList", self.halo_data, (Nhalos, ))
Exemple #12
0
    def __init__(self, fname, *args):
        BinaryFile.__init__(self, fname, *args)

        # Read header and check if we need byte swapping
        irec = self.read_and_skip(np.int32)
        self.enable_byteswap(irec != 8)
        self.add_dataset("NTask", np.int32)
        self.add_dataset("BoxSize", np.float64)
        self.add_dataset("nn", np.int32)
        self.add_dataset("isw_slabs_per_task", np.int32)
        irec = self.read_and_skip(np.int32)

        # Set up the grid dataset
        nn = self["nn"][()]
        isw_slabs_per_task = self["isw_slabs_per_task"][()]
        irec = self.read_and_skip(np.int32)
        if irec != 4 * isw_slabs_per_task * nn * nn:
            raise IOError(
                "Start of grid record has wrong length in density field file")
        self.add_dataset("grid", np.float32, (isw_slabs_per_task, nn, nn))
        irec = self.read_and_skip(np.int32)
        if irec != 4 * isw_slabs_per_task * nn * nn:
            raise IOError(
                "End of grid record has wrong length in density field file")
Exemple #13
0
    def __init__(self,
                 fname,
                 SO_VEL_DISPERSIONS=False,
                 SO_BAR_INFO=False,
                 WRITE_SUB_IN_SNAP_FORMAT=False,
                 id_bytes=8,
                 float_bytes=4,
                 *args):
        BinaryFile.__init__(self, fname, *args)

        # Haven't implemented these
        if WRITE_SUB_IN_SNAP_FORMAT:
            raise NotImplementedError(
                "Subfind outputs in type 2 binary snapshot format are not implemented"
            )
        if SO_BAR_INFO:
            raise NotImplementedError(
                "Subfind outputs with SO_BAR_INFO set are not implemented")

        # These parameters, which correspond to macros in Gadget's Config.sh,
        # modify the file format. The file cannot be read correctly unless these
        # are known - their values are not stored in the output.
        self.WRITE_SUB_IN_SNAP_FORMAT = WRITE_SUB_IN_SNAP_FORMAT
        self.SO_VEL_DISPERSIONS = SO_VEL_DISPERSIONS
        self.SO_BAR_INFO = SO_BAR_INFO

        # We also need to know the data types used for particle IDs
        # and floating point subhalo properties (again, this can't be read from the file).
        if id_bytes == 4:
            self.id_type = np.uint32
        elif id_bytes == 8:
            self.id_type = np.uint64
        else:
            raise ValueError("id_bytes must be 4 or 8")
        if float_bytes == 4:
            self.float_type = np.float32
        elif float_bytes == 8:
            self.float_type = np.float64
        else:
            raise ValueError("float_bytes must be 4 or 8")

        # Define data blocks in the subhalo_tab file
        # Header
        self.add_dataset("Ngroups", np.int32)
        self.add_dataset("TotNgroups", np.int32)
        self.add_dataset("Nids", np.int32)
        self.add_dataset("TotNids", np.int64)
        self.add_dataset("NTask", np.int32)
        self.add_dataset("Nsubgroups", np.int32)
        self.add_dataset("TotNsubgroups", np.int32)

        # Establish endian-ness by sanity check on number of files
        nfiles = self["NTask"][...]
        if nfiles < 1 or nfiles > 65535:
            self.enable_byteswap(True)

        # FoF group information
        ngroups = self["Ngroups"][...]
        self.add_dataset("GroupLen", np.int32, (ngroups, ))
        self.add_dataset(
            "GroupOffset", np.uint32,
            (ngroups, ))  # Assume uint32 to avoid overflow in AqA1
        self.add_dataset("GroupMass", self.float_type, (ngroups, ))
        self.add_dataset("GroupPos", self.float_type, (ngroups, 3))
        self.add_dataset("Halo_M_Mean200", self.float_type, (ngroups, ))
        self.add_dataset("Halo_R_Mean200", self.float_type, (ngroups, ))
        self.add_dataset("Halo_M_Crit200", self.float_type, (ngroups, ))
        self.add_dataset("Halo_R_Crit200", self.float_type, (ngroups, ))
        self.add_dataset("Halo_M_TopHat200", self.float_type, (ngroups, ))
        self.add_dataset("Halo_R_TopHat200", self.float_type, (ngroups, ))

        # Optional extra FoF fields
        if SO_VEL_DISPERSIONS:
            self.add_dataset("VelDisp_Mean200", self.float_type, (ngroups, ))
            self.add_dataset("VelDisp_Crit200", self.float_type, (ngroups, ))
            self.add_dataset("VelDisp_TopHat200", self.float_type, (ngroups, ))

        # FoF contamination info
        self.add_dataset("ContaminationLen", np.int32, (ngroups, ))
        self.add_dataset("ContaminationMass", self.float_type, (ngroups, ))

        # Count and offset to subhalos in each FoF group
        self.add_dataset("Nsubs", np.int32, (ngroups, ))
        self.add_dataset("FirstSub", np.int32, (ngroups, ))

        # Subhalo properties
        nsubgroups = self["Nsubgroups"][...]
        self.add_dataset("SubLen", np.int32, (nsubgroups, ))
        self.add_dataset("SubOffset", np.int32, (nsubgroups, ))
        self.add_dataset("SubParent", np.int32, (nsubgroups, ))
        self.add_dataset("SubMass", self.float_type, (nsubgroups, ))
        self.add_dataset("SubPos", self.float_type, (nsubgroups, 3))
        self.add_dataset("SubVel", self.float_type, (nsubgroups, 3))
        self.add_dataset("SubCofM", self.float_type, (nsubgroups, 3))
        self.add_dataset("SubSpin", self.float_type, (nsubgroups, 3))
        self.add_dataset("SubVelDisp", self.float_type, (nsubgroups, ))
        self.add_dataset("SubVmax", self.float_type, (nsubgroups, ))
        self.add_dataset("SubRVmax", self.float_type, (nsubgroups, ))
        self.add_dataset("SubHalfMass", self.float_type, (nsubgroups, ))
        self.add_dataset("SubMostBoundID", self.id_type, (nsubgroups, ))
        self.add_dataset("SubGrNr", np.int32, (nsubgroups, ))
Exemple #14
0
    def __init__(self, fname, ikeepfof=0, 
                 GASON=False, STARON=False, BHON=False, HIGHRES=False, 
                 *args):
        BinaryFile.__init__(self, fname, *args)
        
        # Define header
        self.add_attribute("Header/ThisTask", np.int32)
        self.add_attribute("Header/NProcs",   np.int32)
        self.add_attribute("Header/ng",       np.uint64) # "unsigned long" in code - assuming 64 bit system here
        self.add_attribute("Header/ngtot",    np.uint64)
        self.add_attribute("Header/hsize",    np.int)
        hsize = self["Header"].attrs["hsize"]
        for i in range(hsize):
            self.add_attribute("Header/entry%03d" % i, np.dtype("S40"))

        # Define type to store each object
        type_list = [
            ("haloid",  np.uint64),
            ("ibound",  np.uint64),
            ("hostid",  np.uint64),
            ("numsubs", np.uint64),
            ("num",     np.uint64),
            ("stype",   np.uint32),
            ]
        if ikeepfof != 0:
            type_list += [("directhostid", np.uint64),
                          ("hostfofid",    np.uint64)]
        type_list += [
            ("gMvir",  np.float64),
            ("gcm",    np.float64, (3,)),
            ("gpos",   np.float64, (3,)),
            ("gcmvel", np.float64, (3,)),
            ("gvel",   np.float64, (3,)),
            ("gmass",  np.float64),
            ("gMFOF",  np.float64),
            ("gM200m", np.float64),
            ("gM200c", np.float64),
            ("gMvir_again",  np.float64),
            ("Efrac",  np.float64),
            ("gRvir",  np.float64),
            ("gsize",  np.float64),
            ("gR200m", np.float64),
            ("gR200c", np.float64),
            ("gRvir_again",  np.float64),
            ("gRhalfmass", np.float64),
            ("gRmaxvel",   np.float64),
            ("gmaxvel",    np.float64),
            ("gsigma_v",   np.float64),
            ("gveldisp",   np.float64, (3,3)),
            ("glambda_B",  np.float64),
            ("gJ",         np.float64, (3,)),
            ("gq",         np.float64),
            ("gs",         np.float64),
            ("geigvec",    np.float64, (3,3)),
            ("cNFW",       np.float64),
            ("Krot",       np.float64),
            ("T",          np.float64),
            ("Pot",        np.float64),
            ("RV_sigma_v", np.float64),
            ("RV_veldisp", np.float64, (3,3)),
            ("RV_lambda_B", np.float64),
            ("RV_J",       np.float64, (3,)),
            ("RV_q",       np.float64),
            ("RV_s",       np.float64),
            ("RV_eigvec",  np.float64, (3,3)),
            ]

        if GASON or STARON or BHON or HIGHRES:
            raise NotImplementedError("Runs with GASON?BHON/STARON/HIGHRES not supported!")

        halo_t = np.dtype(type_list, align=False)
        self.add_dataset("Halo", halo_t, (self["Header"].attrs["ng"],))
Exemple #15
0
    def __init__(self,
                 fname,
                 SO_VEL_DISPERSIONS=False,
                 SUB_SHAPES=False,
                 id_bytes=4,
                 float_bytes=4,
                 *args):
        BinaryFile.__init__(self, fname, *args)

        # These parameters, which correspond to macros in Gadget's Config.sh,
        # modify the file format. The file cannot be read correctly unless these
        # are known - their values are not stored in the output.
        self.SO_VEL_DISPERSIONS = SO_VEL_DISPERSIONS
        self.SUB_SHAPES = SUB_SHAPES

        # We also need to know the data types used for particle IDs
        # and floating point subhalo properties (again, this can't be read from the file).
        if id_bytes == 4:
            self.id_type = np.uint32
        elif id_bytes == 8:
            self.id_type = np.uint64
        else:
            raise ValueError("id_bytes must be 4 or 8")
        if float_bytes == 4:
            self.float_type = np.float32
        elif float_bytes == 8:
            self.float_type = np.float64
        else:
            raise ValueError("float_bytes must be 4 or 8")

        # Define data blocks in the subhalo_tab file
        # Header
        self.add_dataset("Ngroups", np.int32)
        self.add_dataset("TotNgroups", np.int64)
        self.add_dataset("Nids", np.int32)
        self.add_dataset("TotNids", np.int64)
        self.add_dataset("NTask", np.int32)
        self.add_dataset("Nsubgroups", np.int32)
        self.add_dataset("TotNsubgroups", np.int64)

        # Establish endian-ness by sanity check on number of files
        nfiles = self["NTask"][...]
        if nfiles < 1 or nfiles > 65535:
            self.enable_byteswap(True)

        # FoF group information
        ngroups = self["Ngroups"][...]
        self.add_dataset("GroupLen", np.int32, (ngroups, ))
        self.add_dataset("GroupOffset", np.int32, (ngroups, ))
        self.add_dataset("GroupNr", np.int64, (ngroups, ))
        self.add_dataset("GroupCofM", self.float_type, (ngroups, 3))
        self.add_dataset("GroupVel", self.float_type, (ngroups, 3))
        self.add_dataset("GroupPos", self.float_type, (ngroups, 3))
        self.add_dataset("Halo_M_Mean200", self.float_type, (ngroups, ))
        self.add_dataset("Halo_M_Crit200", self.float_type, (ngroups, ))
        self.add_dataset("Halo_M_TopHat200", self.float_type, (ngroups, ))
        self.add_dataset("GroupVelDisp", self.float_type, (ngroups, ))

        # Optional extra FoF fields
        if SO_VEL_DISPERSIONS:
            self.add_dataset("VelDisp_Mean200", self.float_type, (ngroups, ))
            self.add_dataset("VelDisp_Crit200", self.float_type, (ngroups, ))
            self.add_dataset("VelDisp_TopHat200", self.float_type, (ngroups, ))

        # Count and offset to subhalos in each FoF group
        self.add_dataset("Nsubs", np.int32, (ngroups, ))
        self.add_dataset("FirstSub", np.int32, (ngroups, ))

        # Subhalo properties
        nsubgroups = self["Nsubgroups"][...]
        self.add_dataset("SubLen", np.int32, (nsubgroups, ))
        self.add_dataset("SubOffset", np.int32, (nsubgroups, ))
        self.add_dataset("SubGrNr", np.int64, (nsubgroups, ))
        self.add_dataset("SubNr", np.int64, (nsubgroups, ))
        self.add_dataset("SubPos", self.float_type, (nsubgroups, 3))
        self.add_dataset("SubVel", self.float_type, (nsubgroups, 3))
        self.add_dataset("SubCofM", self.float_type, (nsubgroups, 3))
        self.add_dataset("SubSpin", self.float_type, (nsubgroups, 3))
        self.add_dataset("SubVelDisp", self.float_type, (nsubgroups, ))
        self.add_dataset("SubVmax", self.float_type, (nsubgroups, ))
        self.add_dataset("SubRVmax", self.float_type, (nsubgroups, ))
        self.add_dataset("SubHalfMass", self.float_type, (nsubgroups, ))
        if SUB_SHAPES:
            self.add_dataset("SubShape", self.float_type, (nsubgroups, 6))
        self.add_dataset("SubBindingEnergy", self.float_type, (nsubgroups, ))
        self.add_dataset("SubPotentialEnergy", self.float_type, (nsubgroups, ))
        self.add_dataset("SubProfile", self.float_type, (nsubgroups, 9))
Exemple #16
0
    def __init__(self, fname, id_bytes=8, *args):
        BinaryFile.__init__(self, fname, *args)

        # We need to know the data types used for particle IDs
        if id_bytes == 4:
            self.id_type = np.uint32
        elif id_bytes == 8:
            self.id_type = np.uint64
        else:
            raise ValueError("id_bytes must be 4 or 8")

        # Read file header and determine endian-ness
        self.start_fortran_record(auto_byteswap=True)
        self.add_dataset("Ngroups",       np.int32)
        self.add_dataset("Nsubgroups",    np.int32)
        self.add_dataset("Nids",          np.int32)
        self.add_dataset("TotNgroups",    np.int32)
        self.add_dataset("TotNsubgroups", np.int32)
        self.add_dataset("TotNids",       np.int32)
        self.add_dataset("NFiles",        np.int32)
        self.add_dataset("padding1",      np.int32)
        self.add_dataset("Time",        np.float64)
        self.add_dataset("Redshift",    np.float64)
        self.add_dataset("HubbleParam", np.float64)
        self.add_dataset("BoxSize",     np.float64)
        self.add_dataset("Omega0",      np.float64)
        self.add_dataset("OmegaLambda", np.float64)
        self.add_dataset("flag_dp",     np.int32)
        self.add_dataset("padding2",      np.int32)
        self.end_fortran_record()

        # Check if this output uses double precision floats
        flag_dp = self["flag_dp"][...]
        if flag_dp == 0:
            self.float_type = np.float32
        else:
            self.float_type = np.float64

        # Data blocks for FoF groups
        # These are Fortran records which are only present if ngroups > 0.
        ngroups = self["Ngroups"][...]
        if ngroups > 0:
            self.add_record("GroupLen",          np.int32,        (ngroups,))
            self.add_record("GroupMass",         self.float_type, (ngroups,))
            self.add_record("GroupPos",          self.float_type, (ngroups,3))
            self.add_record("GroupVel",          self.float_type, (ngroups,3))
            self.add_record("GroupLenType",      np.int32,        (ngroups,6))
            self.add_record("GroupMassType",     self.float_type, (ngroups,6))
            self.add_record("Halo_M_Mean200",    self.float_type, (ngroups,))
            self.add_record("Halo_R_Mean200",    self.float_type, (ngroups,))
            self.add_record("Halo_M_Crit200",    self.float_type, (ngroups,))
            self.add_record("Halo_R_Crit200",    self.float_type, (ngroups,))
            self.add_record("Halo_M_TopHat200",  self.float_type, (ngroups,))
            self.add_record("Halo_R_TopHat200",  self.float_type, (ngroups,))
            self.add_record("Nsubs",             np.int32,        (ngroups,))
            self.add_record("FirstSub",          np.int32,        (ngroups,))
            
        # Data blocks for Subfind groups
        # These are Fortran records which are only present if nsubgroups > 0.
        nsubgroups = self["Nsubgroups"][...]
        if nsubgroups > 0:
            self.add_record("SubLen",             np.int32,        (nsubgroups,))
            self.add_record("SubMass",            self.float_type, (nsubgroups,))
            self.add_record("SubPos",             self.float_type, (nsubgroups,3))
            self.add_record("SubVel",             self.float_type, (nsubgroups,3))
            self.add_record("SubLenType",         np.int32,        (nsubgroups,6))
            self.add_record("SubMassType",        self.float_type, (nsubgroups,6))
            self.add_record("SubCofM",            self.float_type, (nsubgroups,3))
            self.add_record("SubSpin",            self.float_type, (nsubgroups,3))
            self.add_record("SubVelDisp",         self.float_type, (nsubgroups,))
            self.add_record("SubVmax",            self.float_type, (nsubgroups,))
            self.add_record("SubRVmax",           self.float_type, (nsubgroups,))
            self.add_record("SubHalfMassRad",     self.float_type, (nsubgroups,))
            self.add_record("SubHalfMassRadType", self.float_type, (nsubgroups,6))
            self.add_record("SubMassInRad",       self.float_type, (nsubgroups,))
            self.add_record("SubMassInRadType",   self.float_type, (nsubgroups,6))
            self.add_record("SubMostBoundID",     self.id_type,    (nsubgroups,))
            self.add_record("SubGrNr",            np.int32,        (nsubgroups,))
            self.add_record("SubParent",          np.int32,        (nsubgroups,))
Exemple #17
0
    def __init__(self, fname, extra=None):
        BinaryFile.__init__(self, fname)

        # Read the header record marker and establish endian-ness
        irec = self.read_and_skip(np.uint32)
        if irec == 256:
            self.enable_byteswap(False)
        elif irec == 65536:
            self.enable_byteswap(True)
        else:
            raise IOError("Header record length is incorrect!")

        # Define header blocks
        self.add_attribute("Header/NumPart_ThisFile", np.int32, (6, ))
        self.add_attribute("Header/MassTable", np.float64, (6, ))
        self.add_attribute("Header/Time", np.float64)
        self.add_attribute("Header/Redshift", np.float64)
        self.add_attribute("Header/Flag_Sfr", np.int32)
        self.add_attribute("Header/Flag_Feedback", np.int32)
        self.add_attribute("Header/NumPart_Total", np.uint32, (6, ))
        self.add_attribute("Header/Flag_Cooling", np.int32)
        self.add_attribute("Header/NumFilesPerSnapshot", np.int32)
        self.add_attribute("Header/BoxSize", np.float64)
        self.add_attribute("Header/Omega0", np.float64)
        self.add_attribute("Header/OmegaLambda", np.float64)
        self.add_attribute("Header/HubbleParam", np.float64)
        self.add_attribute("Header/Flag_StellarAge", np.int32)
        self.add_attribute("Header/Flag_Metals", np.int32)
        self.add_attribute("Header/NumPart_Total_HighWord", np.uint32, (6, ))
        self.skip_bytes(256 + 4 - self.offset)

        # Get total number of particles in this file
        npart_type = self["Header"].attrs["NumPart_ThisFile"][...]
        masstable = self["Header"].attrs["MassTable"][...]
        npart = sum(npart_type)

        # Check end of header marker
        irec = self.read_and_skip(np.uint32)
        if irec != 256:
            raise IOError("Header end of record marker is incorrect!")

        # Make full list of datasets to read
        all_datasets = (
            ("Coordinates", "float", (3, ), (True, ) * 6),
            ("Velocities", "float", (3, ), (True, ) * 6),
            ("ParticleIDs", "int", (), (True, ) * 6),
            ("Masses", "float", (), masstable == 0),
            ("InternalEnergy", "float", (), (True, False, False, False, False,
                                             False)),
            ("Density", "float", (), (True, False, False, False, False,
                                      False)),
            ("SmoothingLength", "float", (), (True, False, False, False, False,
                                              False)),
        )
        # Add any user specified fields
        if extra is not None:
            all_datasets += extra

        # Determine what datasets are present in this file
        count_records = 0
        for (name, typestr, shape, ptypes) in all_datasets:

            # Calculate number of particles we expect in this dataset
            nextra = sum(npart_type[np.asarray(ptypes, dtype=np.bool)])
            if nextra > 0:

                # Calculate number of numbers per particle
                n_per_part = 1
                for s in shape:
                    n_per_part *= s

                # Check if there's another record in the file
                try:
                    irec = self.read_and_skip(np.uint32)
                except IOError:
                    if count_records < 3:
                        # pos, vel, ids should always be present
                        raise
                    else:
                        break
                else:
                    count_records += 1

                # Determine bytes per quantitiy
                nbytes = np.int64(irec) // (n_per_part * nextra)
                if (nbytes != 4 and
                        nbytes != 8) or nbytes * n_per_part * nextra != irec:
                    raise IOError("%s record has unexpected length!" % name)

                # Determine data type for this record
                if typestr == "int":
                    if nbytes == 4:
                        dtype = np.int32
                    else:
                        dtype = np.int64
                elif typestr == "float":
                    if nbytes == 4:
                        dtype = np.float32
                    else:
                        dtype = np.float64
                else:
                    raise ValueError(
                        "typestr parameter should be 'int' or 'float'")

                # Loop over particle types and add datasets
                for i in range(6):
                    if ptypes[i] and npart_type[i] > 0:
                        full_shape = (npart_type[i], ) + tuple(shape)
                        self.add_dataset("PartType%i/%s" % (i, name), dtype,
                                         full_shape)

                # Read end of record marker
                irec = self.read_and_skip(np.uint32)
                if irec != n_per_part * np.dtype(dtype).itemsize * nextra:
                    raise IOError("%s end of record marker is incorrect!" %
                                  name)