Esempio n. 1
0
 def fill(self, content, fields, selector):
     # Here we get a copy of the file, which we skip through and read the
     # bits we want.
     oct_handler = self.oct_handler
     all_fields = self.domain.ds.index.fluid_field_list
     fields = [f for ft, f in fields]
     tr = {}
     cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
     levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
         selector, self.domain_id, cell_count)
     for field in fields:
         tr[field] = np.zeros(cell_count, 'float64')
     for level, offset in enumerate(self.domain.hydro_offset):
         if offset == -1: continue
         content.seek(offset)
         nc = self.domain.level_count[level]
         temp = {}
         for field in all_fields:
             temp[field] = np.empty((nc, 8), dtype="float64")
         for i in range(8):
             for field in all_fields:
                 if field not in fields:
                     fpu.skip(content)
                 else:
                     temp[field][:, i] = fpu.read_vector(content,
                                                         'd')  # cell 1
         oct_handler.fill_level(level, levels, cell_inds, file_inds, tr,
                                temp)
     return tr
Esempio n. 2
0
    def _count_art_octs(self, f, offset, MinLev, MaxLevelNow):
        level_oct_offsets = [0, ]
        level_child_offsets = [0, ]
        f.seek(offset)
        nchild, ntot = 8, 0
        Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
        iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
        iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
        for Lev in range(MinLev + 1, MaxLevelNow+1):
            level_oct_offsets.append(f.tell())

            # Get the info for this level, skip the rest
            # print "Reading oct tree data for level", Lev
            # print 'offset:',f.tell()
            Level[Lev], iNOLL[Lev], iHOLL[Lev] = fpu.read_vector(f, 'i', '>')
            # print 'Level %i : '%Lev, iNOLL
            # print 'offset after level record:',f.tell()
            nLevel = iNOLL[Lev]
            ntot = ntot + nLevel

            # Skip all the oct hierarchy data
            ns = fpu.peek_record_size(f, endian='>')
            size = struct.calcsize('>i') + ns + struct.calcsize('>i')
            f.seek(f.tell()+size * nLevel)

            level_child_offsets.append(f.tell())
            # Skip the child vars data
            ns = fpu.peek_record_size(f, endian='>')
            size = struct.calcsize('>i') + ns + struct.calcsize('>i')
            f.seek(f.tell()+size * nLevel*nchild)

            # find nhydrovars
            nhydrovars = 8+2
        f.seek(offset)
        return nhydrovars, iNOLL, level_oct_offsets, level_child_offsets
Esempio n. 3
0
    def fill(self, content, fields, selector, file_handler):
        # Here we get a copy of the file, which we skip through and read the
        # bits we want.
        oct_handler = self.oct_handler
        all_fields = [f for ft, f in file_handler.field_list]
        fields = [f for ft, f in fields]
        tr = {}
        cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
        levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
            selector, self.domain_id, cell_count)
        # Initializing data container
        for field in fields:
            tr[field] = np.zeros(cell_count, 'float64')

        # Loop over levels
        for level, offset in enumerate(file_handler.offset):
            if offset == -1: continue
            content.seek(offset)
            nc = file_handler.level_count[level]
            tmp = {}
            # Initalize temporary data container for io
            for field in all_fields:
                tmp[field] = np.empty((nc, 8), dtype="float64")
            for i in range(8):
                # Read the selected fields
                for field in all_fields:
                    if field not in fields:
                        fpu.skip(content)
                    else:
                        tmp[field][:, i] = fpu.read_vector(content,
                                                           'd')  # i-th cell

            oct_handler.fill_level(level, levels, cell_inds, file_inds, tr,
                                   tmp)
        return tr
 def fill(self, content, fields, selector):
     # Here we get a copy of the file, which we skip through and read the
     # bits we want.
     oct_handler = self.oct_handler
     all_fields = self.domain.ds.index.fluid_field_list
     fields = [f for ft, f in fields]
     tr = {}
     cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
     levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
         selector, self.domain_id, cell_count)
     for field in fields:
         tr[field] = np.zeros(cell_count, 'float64')
     for level, offset in enumerate(self.domain.hydro_offset):
         if offset == -1: continue
         content.seek(offset)
         nc = self.domain.level_count[level]
         temp = {}
         for field in all_fields:
             temp[field] = np.empty((nc, 8), dtype="float64")
         for i in range(8):
             for field in all_fields:
                 if field not in fields:
                     fpu.skip(content)
                 else:
                     temp[field][:,i] = fpu.read_vector(content, 'd') # cell 1
         oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, temp)
     return tr
Esempio n. 5
0
File: io.py Progetto: victorgabr/yt
 def _read_particle_subset(self, subset, fields):
     f = open(subset.domain.part_fn, "rb")
     foffsets = subset.domain.particle_field_offsets
     tr = {}
     # We do *all* conversion into boxlen here.
     # This means that no other conversions need to be applied to convert
     # positions into the same domain as the octs themselves.
     for field in sorted(fields, key=lambda a: foffsets[a]):
         f.seek(foffsets[field])
         dt = subset.domain.particle_field_types[field]
         tr[field] = fpu.read_vector(f, dt)
         if field[1].startswith("particle_position"):
             np.divide(tr[field], subset.domain.ds["boxlen"], tr[field])
         cosmo = subset.domain.ds.cosmological_simulation
         if cosmo == 1 and field[1] == "particle_age":
             tf = subset.domain.ds.t_frw
             dtau = subset.domain.ds.dtau
             tauf = subset.domain.ds.tau_frw
             tsim = subset.domain.ds.time_simu
             h100 = subset.domain.ds.hubble_constant
             nOver2 = subset.domain.ds.n_frw / 2
             t_scale = 1. / (h100 * 100 * cm_per_km /
                             cm_per_mpc) / subset.domain.ds['unit_t']
             ages = tr[field]
             tr[field] = get_ramses_ages(tf, tauf, dtau, tsim, t_scale,
                                         ages, nOver2, len(ages))
     return tr
Esempio n. 6
0
    def _read_particle_header(self):
        if not os.path.exists(self.part_fn):
            self.local_particle_count = 0
            self.particle_field_offsets = {}
            return
        f = open(self.part_fn, "rb")
        f.seek(0, os.SEEK_END)
        flen = f.tell()
        f.seek(0)
        hvals = {}
        attrs = ( ('ncpu', 1, 'I'),
                  ('ndim', 1, 'I'),
                  ('npart', 1, 'I') )
        hvals.update(fpu.read_attrs(f, attrs))
        fpu.read_vector(f, 'I')

        attrs = ( ('nstar_tot', 1, 'I'),
                  ('mstar_tot', 1, 'd'),
                  ('mstar_lost', 1, 'd'),
                  ('nsink', 1, 'I') )
        hvals.update(fpu.read_attrs(f, attrs))
        self.particle_header = hvals
        self.local_particle_count = hvals['npart']
        particle_fields = [
                ("particle_position_x", "d"),
                ("particle_position_y", "d"),
                ("particle_position_z", "d"),
                ("particle_velocity_x", "d"),
                ("particle_velocity_y", "d"),
                ("particle_velocity_z", "d"),
                ("particle_mass", "d"),
                ("particle_identifier", "I"),
                ("particle_refinement_level", "I")]
        if hvals["nstar_tot"] > 0:
            particle_fields += [("particle_age", "d"),
                                ("particle_metallicity", "d")]

        field_offsets = {}
        _pfields = {}
        for field, vtype in particle_fields:
            if f.tell() >= flen: break
            field_offsets["io", field] = f.tell()
            _pfields["io", field] = vtype
            fpu.skip(f, 1)
        self.particle_field_offsets = field_offsets
        self.particle_field_types = _pfields
        self.particle_types = self.particle_types_raw = ("io",)
    def _read_particle_header(self):
        if not os.path.exists(self.part_fn):
            self.local_particle_count = 0
            self.particle_field_offsets = {}
            return
        f = open(self.part_fn, "rb")
        f.seek(0, os.SEEK_END)
        flen = f.tell()
        f.seek(0)
        hvals = {}
        attrs = ( ('ncpu', 1, 'I'),
                  ('ndim', 1, 'I'),
                  ('npart', 1, 'I') )
        hvals.update(fpu.read_attrs(f, attrs))
        fpu.read_vector(f, 'I')

        attrs = ( ('nstar_tot', 1, 'I'),
                  ('mstar_tot', 1, 'd'),
                  ('mstar_lost', 1, 'd'),
                  ('nsink', 1, 'I') )
        hvals.update(fpu.read_attrs(f, attrs))
        self.particle_header = hvals
        self.local_particle_count = hvals['npart']
        particle_fields = [
                ("particle_position_x", "d"),
                ("particle_position_y", "d"),
                ("particle_position_z", "d"),
                ("particle_velocity_x", "d"),
                ("particle_velocity_y", "d"),
                ("particle_velocity_z", "d"),
                ("particle_mass", "d"),
                ("particle_identifier", "I"),
                ("particle_refinement_level", "I")]
        if hvals["nstar_tot"] > 0:
            particle_fields += [("particle_age", "d"),
                                ("particle_metallicity", "d")]

        field_offsets = {}
        _pfields = {}
        for field, vtype in particle_fields:
            if f.tell() >= flen: break
            field_offsets["io", field] = f.tell()
            _pfields["io", field] = vtype
            fpu.skip(f, 1)
        self.particle_field_offsets = field_offsets
        self.particle_field_types = _pfields
        self.particle_types = self.particle_types_raw = ("io",)
Esempio n. 8
0
    def create_cooling_fields(self):
        num = os.path.basename(
            self.ds.parameter_filename).split(".")[0].split("_")[1]
        filename = "%s/cooling_%05i.out" % (os.path.dirname(
            self.ds.parameter_filename), int(num))

        if not os.path.exists(filename): return

        def _create_field(name, interp_object):
            def _func(field, data):
                shape = data["temperature"].shape
                d = {
                    'lognH': np.log10(_X * data["density"] / mh).ravel(),
                    'logT': np.log10(data["temperature"]).ravel()
                }
                rv = 10**interp_object(d).reshape(shape)
                # Return array in unit 'per volume' consistently with line below
                return data.ds.arr(rv, 'code_length**-3')

            self.add_field(name=name,
                           sampling_type="cell",
                           function=_func,
                           units="code_length**-3")

        avals = {}
        tvals = {}
        with open(filename, "rb") as f:
            n1, n2 = fpu.read_vector(f, 'i')
            n = n1 * n2
            for ax in _cool_axes:
                avals[ax] = fpu.read_vector(f, 'd')
            for tname in _cool_arrs:
                var = fpu.read_vector(f, 'd')
                if var.size == n1 * n2:
                    tvals[tname] = var.reshape((n1, n2), order='F')
                else:
                    var = var.reshape((n1, n2, var.size // (n1 * n2)),
                                      order='F')
                    for i in range(var.shape[-1]):
                        tvals[_cool_species[i]] = var[:, :, i]

        for n in tvals:
            interp = BilinearFieldInterpolator(tvals[n],
                                               (avals["lognH"], avals["logT"]),
                                               ["lognH", "logT"],
                                               truncate=True)
            _create_field(("gas", n), interp)
 def _read_hydro_header(self):
     # If no hydro file is found, return
     if not self._is_hydro():
         return
     if self.nvar > 0: return self.nvar
     # Read the number of hydro  variables
     f = open(self.hydro_fn, "rb")
     fpu.skip(f, 1)
     self.nvar = fpu.read_vector(f, "i")[0]
Esempio n. 10
0
 def _read_hydro_header(self):
     # If no hydro file is found, return
     if not self._is_hydro():
         return
     if self.nvar > 0: return self.nvar
     # Read the number of hydro  variables
     f = open(self.hydro_fn, "rb")
     fpu.skip(f, 1)
     self.nvar = fpu.read_vector(f, "i")[0]
Esempio n. 11
0
def read_star_field(file, field=None):
    data = {}
    with open(file, "rb") as fh:
        for dtype, variables in star_struct:
            found = (isinstance(variables, tuple)
                     and field in variables) or field == variables
            if found:
                data[field] = read_vector(fh, dtype[1], dtype[0])
            else:
                skip(fh, endian=">")
    return data.pop(field)
Esempio n. 12
0
 def _read_particle_subset(self, subset, fields):
     f = open(subset.domain.part_fn, "rb")
     foffsets = subset.domain.particle_field_offsets
     tr = {}
     # We do *all* conversion into boxlen here.
     # This means that no other conversions need to be applied to convert
     # positions into the same domain as the octs themselves.
     for field in sorted(fields, key = lambda a: foffsets[a]):
         f.seek(foffsets[field])
         dt = subset.domain.particle_field_types[field]
         tr[field] = fpu.read_vector(f, dt)
         if field[1].startswith("particle_position"):
             np.divide(tr[field], subset.domain.ds["boxlen"], tr[field])
     return tr
 def _read_amr_header(self):
     hvals = {}
     f = open(self.amr_fn, "rb")
     for header in ramses_header(hvals):
         hvals.update(fpu.read_attrs(f, header))
     # That's the header, now we skip a few.
     hvals['numbl'] = np.array(hvals['numbl']).reshape(
         (hvals['nlevelmax'], hvals['ncpu']))
     fpu.skip(f)
     if hvals['nboundary'] > 0:
         fpu.skip(f, 2)
         self.ngridbound = fpu.read_vector(f, 'i').astype("int64")
     else:
         self.ngridbound = np.zeros(hvals['nlevelmax'], dtype='int64')
     free_mem = fpu.read_attrs(f, (('free_mem', 5, 'i'), ) )
     ordering = fpu.read_vector(f, 'c')
     fpu.skip(f, 4)
     # Now we're at the tree itself
     # Now we iterate over each level and each CPU.
     self.amr_header = hvals
     self.amr_offset = f.tell()
     self.local_oct_count = hvals['numbl'][self.ds.min_level:, self.domain_id - 1].sum()
     self.total_oct_count = hvals['numbl'][self.ds.min_level:,:].sum(axis=0)
Esempio n. 14
0
 def _read_particle_subset(self, subset, fields):
     f = open(subset.domain.part_fn, "rb")
     foffsets = subset.domain.particle_field_offsets
     tr = {}
     # We do *all* conversion into boxlen here.
     # This means that no other conversions need to be applied to convert
     # positions into the same domain as the octs themselves.
     for field in sorted(fields, key=lambda a: foffsets[a]):
         f.seek(foffsets[field])
         dt = subset.domain.particle_field_types[field]
         tr[field] = fpu.read_vector(f, dt)
         if field[1].startswith("particle_position"):
             np.divide(tr[field], subset.domain.ds["boxlen"], tr[field])
     return tr
Esempio n. 15
0
 def _read_amr_header(self):
     hvals = {}
     f = open(self.amr_fn, "rb")
     for header in ramses_header(hvals):
         hvals.update(fpu.read_attrs(f, header))
     # That's the header, now we skip a few.
     hvals['numbl'] = np.array(hvals['numbl']).reshape(
         (hvals['nlevelmax'], hvals['ncpu']))
     fpu.skip(f)
     if hvals['nboundary'] > 0:
         fpu.skip(f, 2)
         self.ngridbound = fpu.read_vector(f, 'i').astype("int64")
     else:
         self.ngridbound = np.zeros(hvals['nlevelmax'], dtype='int64')
     free_mem = fpu.read_attrs(f, (('free_mem', 5, 'i'), ) )  # NOQA
     ordering = fpu.read_vector(f, 'c')  # NOQA
     fpu.skip(f, 4)
     # Now we're at the tree itself
     # Now we iterate over each level and each CPU.
     self.amr_header = hvals
     self.amr_offset = f.tell()
     self.local_oct_count = hvals['numbl'][self.ds.min_level:, self.domain_id - 1].sum()
     self.total_oct_count = hvals['numbl'][self.ds.min_level:,:].sum(axis=0)
Esempio n. 16
0
    def create_cooling_fields(self):
        num = os.path.basename(self.ds.parameter_filename).split("."
                )[0].split("_")[1]
        filename = "%s/cooling_%05i.out" % (
            os.path.dirname(self.ds.parameter_filename), int(num))

        if not os.path.exists(filename): return
        def _create_field(name, interp_object):
            def _func(field, data):
                shape = data["temperature"].shape
                d = {'lognH': np.log10(_X*data["density"]/mh).ravel(),
                     'logT' : np.log10(data["temperature"]).ravel()}
                rv = 10**interp_object(d).reshape(shape)
                return rv
            self.add_field(name = name, function=_func,
                                 units = "code_length**-3")
        avals = {}
        tvals = {}
        with open(filename, "rb") as f:
            n1, n2 = fpu.read_vector(f, 'i')
            n = n1 * n2
            for ax in _cool_axes:
                avals[ax] = fpu.read_vector(f, 'd')
            for tname in _cool_arrs:
                var = fpu.read_vector(f, 'd')
                if var.size == n1*n2:
                    tvals[tname] = var.reshape((n1, n2), order='F')
                else:
                    var = var.reshape((n1, n2, var.size / (n1*n2)), order='F')
                    for i in range(var.shape[-1]):
                        tvals[_cool_species[i]] = var[:,:,i]
        
        for n in tvals:
            interp = BilinearFieldInterpolator(tvals[n],
                        (avals["lognH"], avals["logT"]),
                        ["lognH", "logT"], truncate = True)
            _create_field(("gas", n), interp)
Esempio n. 17
0
File: io.py Progetto: vadsem/yt
def _ramses_particle_file_handler(fname, foffsets, data_types, subset, fields,
                                  count):
    '''General file handler, called by _read_particle_subset

    Parameters
    ----------
    fname : string
        filename to read from
    foffsets: dict
        Offsets in file of the fields
    data_types: dict
        Data type of the fields
    subset: ``RAMSESDomainSubset``
        A RAMSES domain subset object
    fields: list of tuple
        The fields to read
    count: integer
        The number of elements to count
    '''
    tr = {}
    with open(fname, "rb") as f:
        # We do *all* conversion into boxlen here.
        # This means that no other conversions need to be applied to convert
        # positions into the same domain as the octs themselves.
        for field in sorted(fields, key=lambda a: foffsets[a]):
            if count == 0:
                tr[field] = np.empty(0, dtype=data_types[field])
                continue
            f.seek(foffsets[field])
            dt = data_types[field]
            tr[field] = fpu.read_vector(f, dt)
            if field[1].startswith("particle_position"):
                np.divide(tr[field], subset.domain.ds["boxlen"], tr[field])
            cosmo = subset.domain.ds.cosmological_simulation
            if cosmo == 1 and field[1] == "particle_age":
                tf = subset.domain.ds.t_frw
                dtau = subset.domain.ds.dtau
                tauf = subset.domain.ds.tau_frw
                tsim = subset.domain.ds.time_simu
                h100 = subset.domain.ds.hubble_constant
                nOver2 = subset.domain.ds.n_frw / 2
                t_scale = 1. / (h100 * 100 * cm_per_km /
                                cm_per_mpc) / subset.domain.ds['unit_t']
                ages = tr[field]
                tr[field] = get_ramses_ages(tf, tauf, dtau, tsim, t_scale,
                                            ages, nOver2, len(ages))
    return tr
    def _count_art_octs(self, f, offset, MinLev, MaxLevelNow):
        level_oct_offsets = [0, ]
        level_child_offsets = [0, ]
        f.seek(offset)
        nchild, ntot = 8, 0
        Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
        iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
        iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
        for Lev in range(MinLev + 1, MaxLevelNow+1):
            level_oct_offsets.append(f.tell())

            # Get the info for this level, skip the rest
            # print "Reading oct tree data for level", Lev
            # print 'offset:',f.tell()
            Level[Lev], iNOLL[Lev], iHOLL[Lev] = fpu.read_vector(f, 'i', '>')
            # print 'Level %i : '%Lev, iNOLL
            # print 'offset after level record:',f.tell()
            iOct = iHOLL[Lev] - 1
            nLevel = iNOLL[Lev]
            nLevCells = nLevel * nchild
            ntot = ntot + nLevel

            # Skip all the oct hierarchy data
            ns = fpu.peek_record_size(f, endian='>')
            size = struct.calcsize('>i') + ns + struct.calcsize('>i')
            f.seek(f.tell()+size * nLevel)

            level_child_offsets.append(f.tell())
            # Skip the child vars data
            ns = fpu.peek_record_size(f, endian='>')
            size = struct.calcsize('>i') + ns + struct.calcsize('>i')
            f.seek(f.tell()+size * nLevel*nchild)

            # find nhydrovars
            nhydrovars = 8+2
        f.seek(offset)
        return nhydrovars, iNOLL, level_oct_offsets, level_child_offsets
    def _parse_parameter_file(self):
        """
        Get the various simulation parameters & constants.
        """
        self.dimensionality = 3
        self.refine_by = 2
        self.periodicity = (True, True, True)
        self.cosmological_simulation = True
        self.parameters = {}
        self.unique_identifier = \
            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
        self.parameters.update(constants)
        self.parameters['Time'] = 1.0
        # read the amr header
        with open(self._file_amr, 'rb') as f:
            amr_header_vals = fpu.read_attrs(f, amr_header_struct, '>')
            for to_skip in ['tl', 'dtl', 'tlold', 'dtlold', 'iSO']:
                skipped = fpu.skip(f, endian='>')
            (self.ncell) = fpu.read_vector(f, 'i', '>')[0]
            # Try to figure out the root grid dimensions
            est = int(np.rint(self.ncell**(1.0 / 3.0)))
            # Note here: this is the number of *cells* on the root grid.
            # This is not the same as the number of Octs.
            # domain dimensions is the number of root *cells*
            self.domain_dimensions = np.ones(3, dtype='int64') * est
            self.root_grid_mask_offset = f.tell()
            self.root_nocts = self.domain_dimensions.prod() / 8
            self.root_ncells = self.root_nocts * 8
            mylog.debug(
                "Estimating %i cells on a root grid side," + "%i root octs",
                est, self.root_nocts)
            self.root_iOctCh = fpu.read_vector(f, 'i', '>')[:self.root_ncells]
            self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,
                                                        order='F')
            self.root_grid_offset = f.tell()
            self.root_nhvar = fpu.skip(f, endian='>')
            self.root_nvar = fpu.skip(f, endian='>')
            # make sure that the number of root variables is a multiple of
            # rootcells
            assert self.root_nhvar % self.root_ncells == 0
            assert self.root_nvar % self.root_ncells == 0
            self.nhydro_variables = ((self.root_nhvar + self.root_nvar) /
                                     self.root_ncells)
            self.iOctFree, self.nOct = fpu.read_vector(f, 'i', '>')
            self.child_grid_offset = f.tell()
            self.parameters.update(amr_header_vals)
            amr_header_vals = None
            # estimate the root level
            float_center, fl, iocts, nocts, root_level = _read_art_level_info(
                f, [0, self.child_grid_offset],
                1,
                coarse_grid=self.domain_dimensions[0])
            del float_center, fl, iocts, nocts
            self.root_level = root_level
            mylog.info("Using root level of %02i", self.root_level)
        # read the particle header
        self.particle_types = []
        self.particle_types_raw = ()
        if not self.skip_particles and self._file_particle_header:
            with open(self._file_particle_header, "rb") as fh:
                particle_header_vals = fpu.read_attrs(fh,
                                                      particle_header_struct,
                                                      '>')
                fh.seek(seek_extras)
                n = particle_header_vals['Nspecies']
                wspecies = np.fromfile(fh, dtype='>f', count=10)
                lspecies = np.fromfile(fh, dtype='>i', count=10)
            self.parameters['wspecies'] = wspecies[:n]
            self.parameters['lspecies'] = lspecies[:n]
            for specie in range(n):
                self.particle_types.append("specie%i" % specie)
            self.particle_types_raw = tuple(self.particle_types)
            ls_nonzero = np.diff(lspecies)[:n - 1]
            ls_nonzero = np.append(lspecies[0], ls_nonzero)
            self.star_type = len(ls_nonzero)
            mylog.info("Discovered %i species of particles", len(ls_nonzero))
            mylog.info("Particle populations: " + '%9i ' * len(ls_nonzero),
                       *ls_nonzero)
            for k, v in particle_header_vals.items():
                if k in self.parameters.keys():
                    if not self.parameters[k] == v:
                        mylog.info("Inconsistent parameter %s %1.1e  %1.1e", k,
                                   v, self.parameters[k])
                else:
                    self.parameters[k] = v
            self.parameters_particles = particle_header_vals
            self.parameters.update(particle_header_vals)
            self.parameters['ng'] = self.parameters['Ngridc']
            self.parameters['ncell0'] = self.parameters['ng']**3

        # setup standard simulation params yt expects to see
        self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
        self.omega_lambda = self.parameters['Oml0']
        self.omega_matter = self.parameters['Om0']
        self.hubble_constant = self.parameters['hubble']
        self.min_level = self.parameters['min_level']
        self.max_level = self.parameters['max_level']
        if self.limit_level is not None:
            self.max_level = min(self.limit_level,
                                 self.parameters['max_level'])
        if self.force_max_level is not None:
            self.max_level = self.force_max_level
        self.hubble_time = 1.0 / (self.hubble_constant * 100 / 3.08568025e19)
        self.current_time = self.quan(b2t(self.parameters['t']), 'Gyr')
        self.gamma = self.parameters["gamma"]
        mylog.info("Max level is %02i", self.max_level)
    def _parse_parameter_file(self):
        """
        Get the various simulation parameters & constants.
        """
        self.dimensionality = 3
        self.refine_by = 2
        self.periodicity = (True, True, True)
        self.cosmological_simulation = True
        self.parameters = {}
        self.unique_identifier = \
            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
        self.parameters.update(constants)
        self.parameters['Time'] = 1.0
        # read the amr header
        with open(self._file_amr, 'rb') as f:
            amr_header_vals = fpu.read_attrs(f, amr_header_struct, '>')
            for to_skip in ['tl', 'dtl', 'tlold', 'dtlold', 'iSO']:
                skipped = fpu.skip(f, endian='>')
            (self.ncell) = fpu.read_vector(f, 'i', '>')[0]
            # Try to figure out the root grid dimensions
            est = int(np.rint(self.ncell**(1.0/3.0)))
            # Note here: this is the number of *cells* on the root grid.
            # This is not the same as the number of Octs.
            # domain dimensions is the number of root *cells*
            self.domain_dimensions = np.ones(3, dtype='int64')*est
            self.root_grid_mask_offset = f.tell()
            self.root_nocts = self.domain_dimensions.prod()/8
            self.root_ncells = self.root_nocts*8
            mylog.debug("Estimating %i cells on a root grid side," +
                        "%i root octs", est, self.root_nocts)
            self.root_iOctCh = fpu.read_vector(f, 'i', '>')[:self.root_ncells]
            self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,
                                                        order='F')
            self.root_grid_offset = f.tell()
            self.root_nhvar = fpu.skip(f, endian='>')
            self.root_nvar = fpu.skip(f, endian='>')
            # make sure that the number of root variables is a multiple of
            # rootcells
            assert self.root_nhvar % self.root_ncells == 0
            assert self.root_nvar % self.root_ncells == 0
            self.nhydro_variables = ((self.root_nhvar+self.root_nvar) /
                                     self.root_ncells)
            self.iOctFree, self.nOct = fpu.read_vector(f, 'i', '>')
            self.child_grid_offset = f.tell()
            self.parameters.update(amr_header_vals)
            amr_header_vals = None
            # estimate the root level
            float_center, fl, iocts, nocts, root_level = _read_art_level_info(
                f,
                [0, self.child_grid_offset], 1,
                coarse_grid=self.domain_dimensions[0])
            del float_center, fl, iocts, nocts
            self.root_level = root_level
            mylog.info("Using root level of %02i", self.root_level)
        # read the particle header
        self.particle_types = []
        self.particle_types_raw = ()
        if not self.skip_particles and self._file_particle_header:
            with open(self._file_particle_header, "rb") as fh:
                particle_header_vals = fpu.read_attrs(
                    fh, particle_header_struct, '>')
                fh.seek(seek_extras)
                n = particle_header_vals['Nspecies']
                wspecies = np.fromfile(fh, dtype='>f', count=10)
                lspecies = np.fromfile(fh, dtype='>i', count=10)
            self.parameters['wspecies'] = wspecies[:n]
            self.parameters['lspecies'] = lspecies[:n]
            for specie in range(n):
                self.particle_types.append("specie%i" % specie)
            self.particle_types_raw = tuple(
                self.particle_types)
            ls_nonzero = np.diff(lspecies)[:n-1]
            ls_nonzero = np.append(lspecies[0], ls_nonzero)
            self.star_type = len(ls_nonzero)
            mylog.info("Discovered %i species of particles", len(ls_nonzero))
            mylog.info("Particle populations: "+'%9i '*len(ls_nonzero),
                       *ls_nonzero)
            for k, v in particle_header_vals.items():
                if k in self.parameters.keys():
                    if not self.parameters[k] == v:
                        mylog.info(
                            "Inconsistent parameter %s %1.1e  %1.1e", k, v,
                            self.parameters[k])
                else:
                    self.parameters[k] = v
            self.parameters_particles = particle_header_vals
            self.parameters.update(particle_header_vals)
            self.parameters['ng'] = self.parameters['Ngridc']
            self.parameters['ncell0'] = self.parameters['ng']**3


        # setup standard simulation params yt expects to see
        self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
        self.omega_lambda = self.parameters['Oml0']
        self.omega_matter = self.parameters['Om0']
        self.hubble_constant = self.parameters['hubble']
        self.min_level = self.parameters['min_level']
        self.max_level = self.parameters['max_level']
        if self.limit_level is not None:
            self.max_level = min(
                self.limit_level, self.parameters['max_level'])
        if self.force_max_level is not None:
            self.max_level = self.force_max_level
        self.hubble_time = 1.0/(self.hubble_constant*100/3.08568025e19)
        self.current_time = self.quan(b2t(self.parameters['t']), 'Gyr')
        self.gamma = self.parameters["gamma"]
        mylog.info("Max level is %02i", self.max_level)
Esempio n. 21
0
def _read_art_level_info(f,
                         level_oct_offsets,
                         level,
                         coarse_grid=128,
                         ncell0=None,
                         root_level=None):
    pos = f.tell()
    f.seek(level_oct_offsets[level])
    # Get the info for this level, skip the rest
    junk, nLevel, iOct = read_vector(f, "i", ">")

    # fortran indices start at 1

    # Skip all the oct index data
    le = np.zeros((nLevel, 3), dtype="int64")
    fl = np.ones((nLevel, 6), dtype="int64")
    iocts = np.zeros(nLevel + 1, dtype="int64")
    idxa, idxb = 0, 0
    chunk = int(1e6)  # this is ~111MB for 15 dimensional 64 bit arrays
    left = nLevel
    while left > 0:
        this_chunk = min(chunk, left)
        idxb = idxa + this_chunk
        data = np.fromfile(f, dtype=">i", count=this_chunk * 15)
        data = data.reshape(this_chunk, 15)
        left -= this_chunk
        le[idxa:idxb, :] = data[:, 1:4]
        fl[idxa:idxb, 1] = np.arange(idxa, idxb)
        # pad byte is last, LL2, then ioct right before it
        iocts[idxa:idxb] = data[:, -3]
        idxa = idxa + this_chunk
    del data

    # emulate fortran code
    #     do ic1 = 1 , nLevel
    #       read(19) (iOctPs(i,iOct),i=1,3),(iOctNb(i,iOct),i=1,6),
    # &                iOctPr(iOct), iOctLv(iOct), iOctLL1(iOct),
    # &                iOctLL2(iOct)
    #       iOct = iOctLL1(iOct)

    # ioct always represents the index of the next variable
    # not the current, so shift forward one index
    # the last index isn't used
    iocts[1:] = iocts[:-1]  # shift
    iocts = iocts[:nLevel]  # chop off the last, unused, index
    iocts[0] = iOct  # starting value

    # now correct iocts for fortran indices start @ 1
    iocts = iocts - 1

    assert np.unique(iocts).shape[0] == nLevel

    # left edges are expressed as if they were on
    # level 15, so no matter what level max(le)=2**15
    # correct to the yt convention
    # le = le/2**(root_level-1-level)-1

    # try to find the root_level first
    def cfc(root_level, level, le):
        d_x = 1.0 / (2.0**(root_level - level + 1))
        fc = (d_x * le) - 2**(level - 1)
        return fc

    if root_level is None:
        root_level = np.floor(np.log2(le.max() * 1.0 / coarse_grid))
        root_level = root_level.astype("int64")
        for _ in range(10):
            fc = cfc(root_level, level, le)
            go = np.diff(np.unique(fc)).min() < 1.1
            if go:
                break
            root_level += 1
    else:
        fc = cfc(root_level, level, le)
    unitary_center = fc / (coarse_grid * 2.0**(level - 1))
    assert np.all(unitary_center < 1.0)

    # again emulate the fortran code
    # This is all for calculating child oct locations
    # iC_ = iC + nbshift
    # iO = ishft ( iC_ , - ndim )
    # id = ishft ( 1, MaxLevel - iOctLv(iO) )
    # j  = iC_ + 1 - ishft( iO , ndim )
    # Posx   = d_x * (iOctPs(1,iO) + sign ( id , idelta(j,1) ))
    # Posy   = d_x * (iOctPs(2,iO) + sign ( id , idelta(j,2) ))
    # Posz   = d_x * (iOctPs(3,iO) + sign ( id , idelta(j,3) ))
    # idelta = [[-1,  1, -1,  1, -1,  1, -1,  1],
    #           [-1, -1,  1,  1, -1, -1,  1,  1],
    #           [-1, -1, -1, -1,  1,  1,  1,  1]]
    # idelta = np.array(idelta)
    # if ncell0 is None:
    #     ncell0 = coarse_grid**3
    # nchild = 8
    # ndim = 3
    # nshift = nchild -1
    # nbshift = nshift - ncell0
    # iC = iocts #+ nbshift
    # iO = iC >> ndim #possibly >>
    # id = 1 << (root_level - level)
    # j = iC + 1 - ( iO << 3)
    # delta = np.abs(id)*idelta[:,j-1]

    # try without the -1
    # le = le/2**(root_level+1-level)
    # now read the hvars and vars arrays
    # we are looking for iOctCh
    # we record if iOctCh is >0, in which it is subdivided
    # iOctCh  = np.zeros((nLevel+1,8),dtype='bool')
    f.seek(pos)
    return unitary_center, fl, iocts, nLevel, root_level
Esempio n. 22
0
    def _parse_parameter_file(self):
        """
        Get the various simulation parameters & constants.
        """
        self.domain_left_edge = np.zeros(3, dtype="float")
        self.domain_right_edge = np.zeros(3, dtype="float") + 1.0
        self.dimensionality = 3
        self.refine_by = 2
        self.periodicity = (True, True, True)
        self.cosmological_simulation = True
        self.parameters = {}
        self.parameters.update(constants)
        self.parameters["Time"] = 1.0
        # read the amr header
        with open(self._file_amr, "rb") as f:
            amr_header_vals = fpu.read_attrs(f, amr_header_struct, ">")
            n_to_skip = len(("tl", "dtl", "tlold", "dtlold", "iSO"))
            fpu.skip(f, n_to_skip, endian=">")
            (self.ncell) = fpu.read_vector(f, "i", ">")[0]
            # Try to figure out the root grid dimensions
            est = int(np.rint(self.ncell**(1.0 / 3.0)))
            # Note here: this is the number of *cells* on the root grid.
            # This is not the same as the number of Octs.
            # domain dimensions is the number of root *cells*
            self.domain_dimensions = np.ones(3, dtype="int64") * est
            self.root_grid_mask_offset = f.tell()
            self.root_nocts = self.domain_dimensions.prod() // 8
            self.root_ncells = self.root_nocts * 8
            mylog.debug(
                "Estimating %i cells on a root grid side, %i root octs",
                est,
                self.root_nocts,
            )
            self.root_iOctCh = fpu.read_vector(f, "i", ">")[:self.root_ncells]
            self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,
                                                        order="F")
            self.root_grid_offset = f.tell()
            self.root_nhvar = fpu.skip(f, endian=">")
            self.root_nvar = fpu.skip(f, endian=">")
            # make sure that the number of root variables is a multiple of
            # rootcells
            assert self.root_nhvar % self.root_ncells == 0
            assert self.root_nvar % self.root_ncells == 0
            self.nhydro_variables = (self.root_nhvar +
                                     self.root_nvar) / self.root_ncells
            self.iOctFree, self.nOct = fpu.read_vector(f, "i", ">")
            self.child_grid_offset = f.tell()
            # lextra needs to be loaded as a string, but it's actually
            # array values.  So pop it off here, and then re-insert.
            lextra = amr_header_vals.pop("lextra")
            amr_header_vals["lextra"] = np.fromstring(lextra, ">f4")
            self.parameters.update(amr_header_vals)
            amr_header_vals = None
            # estimate the root level
            float_center, fl, iocts, nocts, root_level = _read_art_level_info(
                f, [0, self.child_grid_offset],
                1,
                coarse_grid=self.domain_dimensions[0])
            del float_center, fl, iocts, nocts
            self.root_level = root_level
            mylog.info("Using root level of %02i", self.root_level)
        # read the particle header
        self.particle_types = []
        self.particle_types_raw = ()
        if not self.skip_particles and self._file_particle_header:
            with open(self._file_particle_header, "rb") as fh:
                particle_header_vals = fpu.read_attrs(fh,
                                                      particle_header_struct,
                                                      ">")
                fh.seek(seek_extras)
                n = particle_header_vals["Nspecies"]
                wspecies = np.fromfile(fh, dtype=">f", count=10)
                lspecies = np.fromfile(fh, dtype=">i", count=10)
                # extras needs to be loaded as a string, but it's actually
                # array values.  So pop it off here, and then re-insert.
                extras = particle_header_vals.pop("extras")
                particle_header_vals["extras"] = np.fromstring(extras, ">f4")
            self.parameters["wspecies"] = wspecies[:n]
            self.parameters["lspecies"] = lspecies[:n]
            for specie in range(n):
                self.particle_types.append("specie%i" % specie)
            self.particle_types_raw = tuple(self.particle_types)
            ls_nonzero = np.diff(lspecies)[:n - 1]
            ls_nonzero = np.append(lspecies[0], ls_nonzero)
            self.star_type = len(ls_nonzero)
            mylog.info("Discovered %i species of particles", len(ls_nonzero))
            info_str = "Particle populations: " + "%9i " * len(ls_nonzero)
            mylog.info(info_str, *ls_nonzero)
            self._particle_type_counts = dict(
                zip(self.particle_types_raw, ls_nonzero))
            for k, v in particle_header_vals.items():
                if k in self.parameters.keys():
                    if not self.parameters[k] == v:
                        mylog.info(
                            "Inconsistent parameter %s %1.1e  %1.1e",
                            k,
                            v,
                            self.parameters[k],
                        )
                else:
                    self.parameters[k] = v
            self.parameters_particles = particle_header_vals
            self.parameters.update(particle_header_vals)
            self.parameters["ng"] = self.parameters["Ngridc"]
            self.parameters["ncell0"] = self.parameters["ng"]**3

        # setup standard simulation params yt expects to see
        self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
        self.omega_lambda = self.parameters["Oml0"]
        self.omega_matter = self.parameters["Om0"]
        self.hubble_constant = self.parameters["hubble"]
        self.min_level = self.parameters["min_level"]
        self.max_level = self.parameters["max_level"]
        if self.limit_level is not None:
            self.max_level = min(self.limit_level,
                                 self.parameters["max_level"])
        if self.force_max_level is not None:
            self.max_level = self.force_max_level
        self.hubble_time = 1.0 / (self.hubble_constant * 100 / 3.08568025e19)
        self.current_time = self.quan(b2t(self.parameters["t"]), "Gyr")
        self.gamma = self.parameters["gamma"]
        mylog.info("Max level is %02i", self.max_level)
 def _read_amr(self):
     """Open the oct file, read in octs level-by-level.
        For each oct, only the position, index, level and domain 
        are needed - its position in the octree is found automatically.
        The most important is finding all the information to feed
        oct_handler.add
     """
     self.oct_handler = RAMSESOctreeContainer(self.ds.domain_dimensions/2,
             self.ds.domain_left_edge, self.ds.domain_right_edge)
     root_nodes = self.amr_header['numbl'][self.ds.min_level,:].sum()
     self.oct_handler.allocate_domains(self.total_oct_count, root_nodes)
     fb = open(self.amr_fn, "rb")
     fb.seek(self.amr_offset)
     f = BytesIO()
     f.write(fb.read())
     f.seek(0)
     mylog.debug("Reading domain AMR % 4i (%0.3e, %0.3e)",
         self.domain_id, self.total_oct_count.sum(), self.ngridbound.sum())
     def _ng(c, l):
         if c < self.amr_header['ncpu']:
             ng = self.amr_header['numbl'][l, c]
         else:
             ng = self.ngridbound[c - self.amr_header['ncpu'] +
                             self.amr_header['nboundary']*l]
         return ng
     min_level = self.ds.min_level
     # yt max level is not the same as the RAMSES one.
     # yt max level is the maximum number of additional refinement levels
     # so for a uni grid run with no refinement, it would be 0. 
     # So we initially assume that.
     max_level = 0
     nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
     for level in range(self.amr_header['nlevelmax']):
         # Easier if do this 1-indexed
         for cpu in range(self.amr_header['nboundary'] + self.amr_header['ncpu']):
             #ng is the number of octs on this level on this domain
             ng = _ng(cpu, level)
             if ng == 0: continue
             ind = fpu.read_vector(f, "I").astype("int64")
             fpu.skip(f, 2)
             pos = np.empty((ng, 3), dtype='float64')
             pos[:,0] = fpu.read_vector(f, "d") - nx
             pos[:,1] = fpu.read_vector(f, "d") - ny
             pos[:,2] = fpu.read_vector(f, "d") - nz
             #pos *= self.ds.domain_width
             #pos += self.dataset.domain_left_edge
             fpu.skip(f, 31)
             #parents = fpu.read_vector(f, "I")
             #fpu.skip(f, 6)
             #children = np.empty((ng, 8), dtype='int64')
             #for i in range(8):
             #    children[:,i] = fpu.read_vector(f, "I")
             #cpu_map = np.empty((ng, 8), dtype="int64")
             #for i in range(8):
             #    cpu_map[:,i] = fpu.read_vector(f, "I")
             #rmap = np.empty((ng, 8), dtype="int64")
             #for i in range(8):
             #    rmap[:,i] = fpu.read_vector(f, "I")
             # We don't want duplicate grids.
             # Note that we're adding *grids*, not individual cells.
             if level >= min_level:
                 assert(pos.shape[0] == ng)
                 n = self.oct_handler.add(cpu + 1, level - min_level, pos,
                             count_boundary = 1)
                 self._error_check(cpu, level, pos, n, ng, (nx, ny, nz))
                 if n > 0: max_level = max(level - min_level, max_level)
     self.max_level = max_level
     self.oct_handler.finalize()
Esempio n. 24
0
    def _read_amr(self):
        """Open the oct file, read in octs level-by-level.
           For each oct, only the position, index, level and domain
           are needed - its position in the octree is found automatically.
           The most important is finding all the information to feed
           oct_handler.add
        """
        self.oct_handler = RAMSESOctreeContainer(self.ds.domain_dimensions / 2,
                                                 self.ds.domain_left_edge,
                                                 self.ds.domain_right_edge)
        root_nodes = self.amr_header['numbl'][self.ds.min_level, :].sum()
        self.oct_handler.allocate_domains(self.total_oct_count, root_nodes)
        mylog.debug("Reading domain AMR % 4i (%0.3e, %0.3e)", self.domain_id,
                    self.total_oct_count.sum(), self.ngridbound.sum())

        f = self.amr_file

        f.seek(self.amr_offset)

        def _ng(c, l):
            if c < self.amr_header['ncpu']:
                ng = self.amr_header['numbl'][l, c]
            else:
                ng = self.ngridbound[c - self.amr_header['ncpu'] +
                                     self.amr_header['nboundary'] * l]
            return ng

        min_level = self.ds.min_level
        # yt max level is not the same as the RAMSES one.
        # yt max level is the maximum number of additional refinement levels
        # so for a uni grid run with no refinement, it would be 0.
        # So we initially assume that.
        max_level = 0
        nx, ny, nz = (((i - 1.0) / 2.0) for i in self.amr_header['nx'])
        for level in range(self.amr_header['nlevelmax']):
            # Easier if do this 1-indexed
            for cpu in range(self.amr_header['nboundary'] +
                             self.amr_header['ncpu']):
                #ng is the number of octs on this level on this domain
                ng = _ng(cpu, level)
                if ng == 0: continue
                ind = fpu.read_vector(f, "I").astype("int64")  # NOQA
                fpu.skip(f, 2)
                pos = np.empty((ng, 3), dtype='float64')
                pos[:, 0] = fpu.read_vector(f, "d") - nx
                pos[:, 1] = fpu.read_vector(f, "d") - ny
                pos[:, 2] = fpu.read_vector(f, "d") - nz
                #pos *= self.ds.domain_width
                #pos += self.dataset.domain_left_edge
                fpu.skip(f, 31)
                #parents = fpu.read_vector(f, "I")
                #fpu.skip(f, 6)
                #children = np.empty((ng, 8), dtype='int64')
                #for i in range(8):
                #    children[:,i] = fpu.read_vector(f, "I")
                #cpu_map = np.empty((ng, 8), dtype="int64")
                #for i in range(8):
                #    cpu_map[:,i] = fpu.read_vector(f, "I")
                #rmap = np.empty((ng, 8), dtype="int64")
                #for i in range(8):
                #    rmap[:,i] = fpu.read_vector(f, "I")
                # We don't want duplicate grids.
                # Note that we're adding *grids*, not individual cells.
                if level >= min_level:
                    assert (pos.shape[0] == ng)
                    n = self.oct_handler.add(cpu + 1,
                                             level - min_level,
                                             pos,
                                             count_boundary=1)
                    self._error_check(cpu, level, pos, n, ng, (nx, ny, nz))
                    if n > 0: max_level = max(level - min_level, max_level)
        self.max_level = max_level
        self.oct_handler.finalize()

        # Close AMR file
        f.close()
Esempio n. 25
0
    def _read_particle_header(self):
        if not os.path.exists(self.part_fn):
            self.local_particle_count = 0
            self.particle_field_offsets = {}
            return

        f = open(self.part_fn, "rb")
        f.seek(0, os.SEEK_END)
        flen = f.tell()
        f.seek(0)
        hvals = {}
        attrs = (('ncpu', 1, 'I'), ('ndim', 1, 'I'), ('npart', 1, 'I'))
        hvals.update(fpu.read_attrs(f, attrs))
        fpu.read_vector(f, 'I')

        attrs = (('nstar_tot', 1, 'I'), ('mstar_tot', 1, 'd'),
                 ('mstar_lost', 1, 'd'), ('nsink', 1, 'I'))
        hvals.update(fpu.read_attrs(f, attrs))
        self.particle_header = hvals
        self.local_particle_count = hvals['npart']

        # Try reading particle file descriptor
        if self._has_part_descriptor:
            particle_fields = (_read_part_file_descriptor(
                self._part_file_descriptor))
        else:
            particle_fields = [("particle_position_x", "d"),
                               ("particle_position_y", "d"),
                               ("particle_position_z", "d"),
                               ("particle_velocity_x", "d"),
                               ("particle_velocity_y", "d"),
                               ("particle_velocity_z", "d"),
                               ("particle_mass", "d"),
                               ("particle_identifier", "i"),
                               ("particle_refinement_level", "I")]

            if self.ds._extra_particle_fields is not None:
                particle_fields += self.ds._extra_particle_fields

        ptype = 'io'

        field_offsets = {}
        _pfields = {}

        # Read offsets
        for field, vtype in particle_fields:
            if f.tell() >= flen: break
            field_offsets[ptype, field] = f.tell()
            _pfields[ptype, field] = vtype
            fpu.skip(f, 1)

        iextra = 0
        while f.tell() < flen:
            iextra += 1
            field, vtype = ('particle_extra_field_%i' % iextra, 'd')
            particle_fields.append((field, vtype))

            field_offsets[ptype, field] = f.tell()
            _pfields[ptype, field] = vtype
            fpu.skip(f, 1)

        if iextra > 0 and not self.ds._warn_extra_fields:
            self.ds._warn_extra_fields = True
            w = ("Detected %s extra particle fields assuming kind "
                 "`double`. Consider using the `extra_particle_fields` "
                 "keyword argument if you have unexpected behavior.")
            mylog.warning(w % iextra)

        self.particle_field_offsets = field_offsets
        self.particle_field_types = _pfields

        # Register the particle type
        self._add_ptype(ptype)