コード例 #1
0
 def hydro_offset(self):
     if self._hydro_offset is not None: return self._hydro_offset
     # We now have to open the file and calculate it
     f = open(self.hydro_fn, "rb")
     fpu.skip(f, 6)
     # It goes: level, CPU, 8-variable
     min_level = self.ds.min_level
     n_levels = self.amr_header['nlevelmax'] - min_level
     hydro_offset = np.zeros(n_levels, dtype='int64')
     hydro_offset -= 1
     level_count = np.zeros(n_levels, dtype='int64')
     skipped = []
     for level in range(self.amr_header['nlevelmax']):
         for cpu in range(self.amr_header['nboundary'] +
                          self.amr_header['ncpu']):
             header = ( ('file_ilevel', 1, 'I'),
                        ('file_ncache', 1, 'I') )
             try:
                 hvals = fpu.read_attrs(f, header, "=")
             except AssertionError:
                 print("You are running with the wrong number of fields.")
                 print("If you specified these in the load command, check the array length.")
                 print("In this file there are %s hydro fields." % skipped)
                 #print "The last set of field sizes was: %s" % skipped
                 raise
             if hvals['file_ncache'] == 0: continue
             assert(hvals['file_ilevel'] == level+1)
             if cpu + 1 == self.domain_id and level >= min_level:
                 hydro_offset[level - min_level] = f.tell()
                 level_count[level - min_level] = hvals['file_ncache']
             skipped = fpu.skip(f, 8 * self.nvar)
     self._hydro_offset = hydro_offset
     self._level_count = level_count
     return self._hydro_offset
コード例 #2
0
    def _read_particle_header(self):
        if not os.path.exists(self.part_fn):
            self.local_particle_count = 0
            self.particle_field_offsets = {}
            return
        f = open(self.part_fn, "rb")
        f.seek(0, os.SEEK_END)
        flen = f.tell()
        f.seek(0)
        hvals = {}
        attrs = ( ('ncpu', 1, 'I'),
                  ('ndim', 1, 'I'),
                  ('npart', 1, 'I') )
        hvals.update(fpu.read_attrs(f, attrs))
        fpu.read_vector(f, 'I')

        attrs = ( ('nstar_tot', 1, 'I'),
                  ('mstar_tot', 1, 'd'),
                  ('mstar_lost', 1, 'd'),
                  ('nsink', 1, 'I') )
        hvals.update(fpu.read_attrs(f, attrs))
        self.particle_header = hvals
        self.local_particle_count = hvals['npart']
        particle_fields = [
                ("particle_position_x", "d"),
                ("particle_position_y", "d"),
                ("particle_position_z", "d"),
                ("particle_velocity_x", "d"),
                ("particle_velocity_y", "d"),
                ("particle_velocity_z", "d"),
                ("particle_mass", "d"),
                ("particle_identifier", "I"),
                ("particle_refinement_level", "I")]
        if hvals["nstar_tot"] > 0:
            particle_fields += [("particle_age", "d"),
                                ("particle_metallicity", "d")]

        field_offsets = {}
        _pfields = {}
        for field, vtype in particle_fields:
            if f.tell() >= flen: break
            field_offsets["io", field] = f.tell()
            _pfields["io", field] = vtype
            fpu.skip(f, 1)
        self.particle_field_offsets = field_offsets
        self.particle_field_types = _pfields
        self.particle_types = self.particle_types_raw = ("io",)
コード例 #3
0
    def detect_fields(cls, ds):
        # Try to get the detected fields
        detected_fields = cls.get_detected_fields(ds)
        if detected_fields:
            return detected_fields

        fname = ds.parameter_filename.replace('info_', 'info_rt_')

        rheader = {}

        def read_rhs(cast):
            line = f.readline()
            p, v = line.split("=")
            rheader[p.strip()] = cast(v)

        with open(fname, 'r') as f:
            for i in range(4):
                read_rhs(int)
            f.readline()
            for i in range(2):
                read_rhs(float)
            f.readline()
            for i in range(3):
                read_rhs(float)
            f.readline()
            for i in range(3):
                read_rhs(float)

            # Touchy part, we have to read the photon group properties
            mylog.debug('Not reading photon group properties')

            cls.rt_parameters = rheader

        ngroups = rheader['nGroups']

        iout = int(str(ds).split('_')[1])
        basedir = os.path.split(ds.parameter_filename)[0]
        fname = os.path.join(basedir, cls.fname.format(iout=iout, icpu=1))
        with open(fname, 'rb') as f:
            cls.parameters = fpu.read_attrs(f, cls.attrs)

        fields = []
        for ng in range(ngroups):
            tmp = [
                "Photon_density_%s", "Photon_flux_x_%s", "Photon_flux_y_%s",
                "Photon_flux_z_%s"
            ]
            fields.extend([t % (ng + 1) for t in tmp])

        cls.field_list = [(cls.ftype, e) for e in fields]

        cls.set_detected_fields(ds, fields)
        return fields
コード例 #4
0
    def offset(self):
        '''
        Compute the offsets of the fields.

        By default, it skips the header (as defined by `cls.attrs`)
        and computes the offset at each level.

        It should be generic enough for most of the cases, but if the
        *structure* of your fluid file is non-canonical, change this.
        '''

        if getattr(self, '_offset', None) is not None:
            return self._offset

        nvar = self.parameters['nvar']
        ndim = self.domain.ds.dimensionality
        twotondim = 2**ndim

        with open(self.fname, 'rb') as f:
            # Skip headers
            nskip = len(self.attrs)
            fpu.skip(f, nskip)

            # It goes: level, CPU, 8-variable (1 cube)
            min_level = self.domain.ds.min_level
            n_levels = self.domain.amr_header['nlevelmax'] - min_level
            offset = np.zeros(n_levels, dtype='int64')
            offset -= 1
            level_count = np.zeros(n_levels, dtype='int64')
            skipped = []
            amr_header = self.domain.amr_header
            for level in range(amr_header['nlevelmax']):
                for cpu in range(amr_header['nboundary'] + amr_header['ncpu']):
                    header = (('file_ilevel', 1, 'I'), ('file_ncache', 1, 'I'))
                    try:
                        hvals = fpu.read_attrs(f, header, "=")
                    except AssertionError:
                        mylog.error(
                            "You are running with the wrong number of fields. "
                            "If you specified these in the load command, check the array length. "
                            "In this file there are %s hydro fields." %
                            skipped)
                        raise
                    if hvals['file_ncache'] == 0: continue
                    assert (hvals['file_ilevel'] == level + 1)
                    if cpu + 1 == self.domain_id and level >= min_level:
                        offset[level - min_level] = f.tell()
                        level_count[level - min_level] = hvals['file_ncache']
                    skipped = fpu.skip(f, twotondim * nvar)
        self._offset = offset
        self._level_count = level_count
        return self._offset
コード例 #5
0
ファイル: data_structures.py プロジェクト: victorgabr/yt
 def _read_amr_header(self):
     hvals = {}
     f = open(self.amr_fn, "rb")
     for header in ramses_header(hvals):
         hvals.update(fpu.read_attrs(f, header))
     # That's the header, now we skip a few.
     hvals['numbl'] = np.array(hvals['numbl']).reshape(
         (hvals['nlevelmax'], hvals['ncpu']))
     fpu.skip(f)
     if hvals['nboundary'] > 0:
         fpu.skip(f, 2)
         self.ngridbound = fpu.read_vector(f, 'i').astype("int64")
     else:
         self.ngridbound = np.zeros(hvals['nlevelmax'], dtype='int64')
     free_mem = fpu.read_attrs(f, (('free_mem', 5, 'i'), ) )  # NOQA
     ordering = fpu.read_vector(f, 'c')  # NOQA
     fpu.skip(f, 4)
     # Now we're at the tree itself
     # Now we iterate over each level and each CPU.
     self.amr_header = hvals
     self.amr_offset = f.tell()
     self.local_oct_count = hvals['numbl'][self.ds.min_level:, self.domain_id - 1].sum()
     self.total_oct_count = hvals['numbl'][self.ds.min_level:,:].sum(axis=0)
コード例 #6
0
    def read_header(self):
        if not self.exists:
            self.field_offsets = {}
            self.field_types = {}
            self.local_particle_count = 0
            return
        f = open(self.fname, "rb")
        f.seek(0, os.SEEK_END)
        flen = f.tell()
        f.seek(0)
        hvals = {}
        # Read the header of the file
        attrs = self.attrs

        hvals.update(fpu.read_attrs(f, attrs))
        self._header = hvals

        # This is somehow a trick here: we only want one domain to
        # be read, as ramses writes all the sinks in all the
        # domains. Here, we set the local_particle_count to 0 except
        # for the first domain to be red.
        if getattr(self.ds, '_sink_file_flag', False):
            self.local_particle_count = 0
        else:
            self.ds._sink_file_flag = True
            self.local_particle_count = hvals['nsink']

        # Read the fields + add the sink properties
        if self.has_part_descriptor:
            fields = (_read_part_file_descriptor(self.file_descriptor))
        else:
            fields = list(self.known_fields)

        for i in range(self.ds.dimensionality * 2 + 1):
            for j in range(self.ds.max_level, self.ds.min_level):
                fields.append(("particle_prop_%s_%s" % (i, j), "d"))

        field_offsets = {}
        _pfields = {}

        # Fill the fields, offsets and types
        self.fields = []
        for field, vtype in fields:
            self.fields.append(field)
            if f.tell() >= flen: break
            field_offsets[self.ptype, field] = f.tell()
            _pfields[self.ptype, field] = vtype
            fpu.skip(f, 1)
        self.field_offsets = field_offsets
        self.field_types = _pfields
コード例 #7
0
 def _read_amr_header(self):
     hvals = {}
     f = open(self.amr_fn, "rb")
     for header in ramses_header(hvals):
         hvals.update(fpu.read_attrs(f, header))
     # That's the header, now we skip a few.
     hvals['numbl'] = np.array(hvals['numbl']).reshape(
         (hvals['nlevelmax'], hvals['ncpu']))
     fpu.skip(f)
     if hvals['nboundary'] > 0:
         fpu.skip(f, 2)
         self.ngridbound = fpu.read_vector(f, 'i').astype("int64")
     else:
         self.ngridbound = np.zeros(hvals['nlevelmax'], dtype='int64')
     free_mem = fpu.read_attrs(f, (('free_mem', 5, 'i'), ) )
     ordering = fpu.read_vector(f, 'c')
     fpu.skip(f, 4)
     # Now we're at the tree itself
     # Now we iterate over each level and each CPU.
     self.amr_header = hvals
     self.amr_offset = f.tell()
     self.local_oct_count = hvals['numbl'][self.ds.min_level:, self.domain_id - 1].sum()
     self.total_oct_count = hvals['numbl'][self.ds.min_level:,:].sum(axis=0)
コード例 #8
0
 def _is_valid(self, *args, **kwargs):
     """
     Defined for the NMSU file naming scheme.
     This could differ for other formats.
     """
     f = ("%s" % args[0])
     prefix, suffix = filename_pattern['amr']
     if not os.path.isfile(f): return False
     with open(f, 'rb') as fh:
         try:
             amr_header_vals = fpu.read_attrs(fh, amr_header_struct, '>')
             return True
         except:
             return False
     return False
コード例 #9
0
 def _is_valid(self, *args, **kwargs):
     """
     Defined for the NMSU file naming scheme.
     This could differ for other formats.
     """
     f = ("%s" % args[0])
     prefix, suffix = filename_pattern['amr']
     if not os.path.isfile(f):
         return False
     if not f.endswith(suffix):
         return False
     with open(f, 'rb') as fh:
         try:
             amr_header_vals = fpu.read_attrs(fh, amr_header_struct, '>')
             return True
         except:
             return False
     return False
コード例 #10
0
ファイル: data_structures.py プロジェクト: egentry/yt
    def _read_sink_header(self):
        if not self._has_sink:
            self.local_sink_count = 0
            self.sink_field_offsets = {}
            return
        f = open(self.sink_fn, "rb")
        f.seek(0, os.SEEK_END)
        flen = f.tell()
        f.seek(0)
        hvals = {}
        attrs = (('nsink', 1, 'I'), ('nindsink', 1, 'I'))
        hvals.update(fpu.read_attrs(f, attrs))
        self.sink_header = hvals
        self.local_sink_count = hvals['nsink']

        sink_fields = [("particle_identifier", "i"), ("particle_mass", "d"),
                       ("particle_position_x", "d"),
                       ("particle_position_y", "d"),
                       ("particle_position_z", "d"),
                       ("particle_velocity_x", "d"),
                       ("particle_velocity_y", "d"),
                       ("particle_velocity_z", "d"), ("particle_age", "d"),
                       ("BH_real_accretion", "d"), ("BH_bondi_accretion", "d"),
                       ("BH_eddington_accretion", "d"), ("BH_esave", "d"),
                       ("gas_spin_x", "d"), ("gas_spin_y", "d"),
                       ("gas_spin_z", "d"), ("BH_spin_x", "d"),
                       ("BH_spin_y", "d"), ("BH_spin_z", "d"),
                       ("BH_spin", "d"), ("BH_efficiency", "d")]

        for i in range(self.ds.dimensionality * 2 + 1):
            for j in range(self.ds.max_level, self.ds.min_level):
                sink_fields.append(("particle_prop_%s_%s" % (i, j), "d"))

        field_offsets = {}
        _pfields = {}
        for field, vtype in sink_fields:
            if f.tell() >= flen: break
            field_offsets["sink", field] = f.tell()
            _pfields["sink", field] = vtype
            fpu.skip(f, 1)
        self.sink_field_offsets = field_offsets
        self.sink_field_types = _pfields

        self._add_ptype('sink')
コード例 #11
0
    def _parse_parameter_file(self):
        """
        Get the various simulation parameters & constants.
        """
        self.dimensionality = 3
        self.refine_by = 2
        self.periodicity = (True, True, True)
        self.cosmological_simulation = True
        self.parameters = {}
        self.unique_identifier = \
            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
        self.parameters.update(constants)
        self.parameters['Time'] = 1.0
        # read the amr header
        with open(self._file_amr, 'rb') as f:
            amr_header_vals = fpu.read_attrs(f, amr_header_struct, '>')
            for to_skip in ['tl', 'dtl', 'tlold', 'dtlold', 'iSO']:
                skipped = fpu.skip(f, endian='>')
            (self.ncell) = fpu.read_vector(f, 'i', '>')[0]
            # Try to figure out the root grid dimensions
            est = int(np.rint(self.ncell**(1.0 / 3.0)))
            # Note here: this is the number of *cells* on the root grid.
            # This is not the same as the number of Octs.
            # domain dimensions is the number of root *cells*
            self.domain_dimensions = np.ones(3, dtype='int64') * est
            self.root_grid_mask_offset = f.tell()
            self.root_nocts = self.domain_dimensions.prod() / 8
            self.root_ncells = self.root_nocts * 8
            mylog.debug(
                "Estimating %i cells on a root grid side," + "%i root octs",
                est, self.root_nocts)
            self.root_iOctCh = fpu.read_vector(f, 'i', '>')[:self.root_ncells]
            self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,
                                                        order='F')
            self.root_grid_offset = f.tell()
            self.root_nhvar = fpu.skip(f, endian='>')
            self.root_nvar = fpu.skip(f, endian='>')
            # make sure that the number of root variables is a multiple of
            # rootcells
            assert self.root_nhvar % self.root_ncells == 0
            assert self.root_nvar % self.root_ncells == 0
            self.nhydro_variables = ((self.root_nhvar + self.root_nvar) /
                                     self.root_ncells)
            self.iOctFree, self.nOct = fpu.read_vector(f, 'i', '>')
            self.child_grid_offset = f.tell()
            self.parameters.update(amr_header_vals)
            amr_header_vals = None
            # estimate the root level
            float_center, fl, iocts, nocts, root_level = _read_art_level_info(
                f, [0, self.child_grid_offset],
                1,
                coarse_grid=self.domain_dimensions[0])
            del float_center, fl, iocts, nocts
            self.root_level = root_level
            mylog.info("Using root level of %02i", self.root_level)
        # read the particle header
        self.particle_types = []
        self.particle_types_raw = ()
        if not self.skip_particles and self._file_particle_header:
            with open(self._file_particle_header, "rb") as fh:
                particle_header_vals = fpu.read_attrs(fh,
                                                      particle_header_struct,
                                                      '>')
                fh.seek(seek_extras)
                n = particle_header_vals['Nspecies']
                wspecies = np.fromfile(fh, dtype='>f', count=10)
                lspecies = np.fromfile(fh, dtype='>i', count=10)
            self.parameters['wspecies'] = wspecies[:n]
            self.parameters['lspecies'] = lspecies[:n]
            for specie in range(n):
                self.particle_types.append("specie%i" % specie)
            self.particle_types_raw = tuple(self.particle_types)
            ls_nonzero = np.diff(lspecies)[:n - 1]
            ls_nonzero = np.append(lspecies[0], ls_nonzero)
            self.star_type = len(ls_nonzero)
            mylog.info("Discovered %i species of particles", len(ls_nonzero))
            mylog.info("Particle populations: " + '%9i ' * len(ls_nonzero),
                       *ls_nonzero)
            for k, v in particle_header_vals.items():
                if k in self.parameters.keys():
                    if not self.parameters[k] == v:
                        mylog.info("Inconsistent parameter %s %1.1e  %1.1e", k,
                                   v, self.parameters[k])
                else:
                    self.parameters[k] = v
            self.parameters_particles = particle_header_vals
            self.parameters.update(particle_header_vals)
            self.parameters['ng'] = self.parameters['Ngridc']
            self.parameters['ncell0'] = self.parameters['ng']**3

        # setup standard simulation params yt expects to see
        self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
        self.omega_lambda = self.parameters['Oml0']
        self.omega_matter = self.parameters['Om0']
        self.hubble_constant = self.parameters['hubble']
        self.min_level = self.parameters['min_level']
        self.max_level = self.parameters['max_level']
        if self.limit_level is not None:
            self.max_level = min(self.limit_level,
                                 self.parameters['max_level'])
        if self.force_max_level is not None:
            self.max_level = self.force_max_level
        self.hubble_time = 1.0 / (self.hubble_constant * 100 / 3.08568025e19)
        self.current_time = self.quan(b2t(self.parameters['t']), 'Gyr')
        self.gamma = self.parameters["gamma"]
        mylog.info("Max level is %02i", self.max_level)
コード例 #12
0
ファイル: data_structures.py プロジェクト: pshriwise/yt
    def _parse_parameter_file(self):
        """
        Get the various simulation parameters & constants.
        """
        self.domain_left_edge = np.zeros(3, dtype="float")
        self.domain_right_edge = np.zeros(3, dtype="float") + 1.0
        self.dimensionality = 3
        self.refine_by = 2
        self.periodicity = (True, True, True)
        self.cosmological_simulation = True
        self.parameters = {}
        self.parameters.update(constants)
        self.parameters["Time"] = 1.0
        # read the amr header
        with open(self._file_amr, "rb") as f:
            amr_header_vals = fpu.read_attrs(f, amr_header_struct, ">")
            n_to_skip = len(("tl", "dtl", "tlold", "dtlold", "iSO"))
            fpu.skip(f, n_to_skip, endian=">")
            (self.ncell) = fpu.read_vector(f, "i", ">")[0]
            # Try to figure out the root grid dimensions
            est = int(np.rint(self.ncell**(1.0 / 3.0)))
            # Note here: this is the number of *cells* on the root grid.
            # This is not the same as the number of Octs.
            # domain dimensions is the number of root *cells*
            self.domain_dimensions = np.ones(3, dtype="int64") * est
            self.root_grid_mask_offset = f.tell()
            self.root_nocts = self.domain_dimensions.prod() // 8
            self.root_ncells = self.root_nocts * 8
            mylog.debug(
                "Estimating %i cells on a root grid side, %i root octs",
                est,
                self.root_nocts,
            )
            self.root_iOctCh = fpu.read_vector(f, "i", ">")[:self.root_ncells]
            self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,
                                                        order="F")
            self.root_grid_offset = f.tell()
            self.root_nhvar = fpu.skip(f, endian=">")
            self.root_nvar = fpu.skip(f, endian=">")
            # make sure that the number of root variables is a multiple of
            # rootcells
            assert self.root_nhvar % self.root_ncells == 0
            assert self.root_nvar % self.root_ncells == 0
            self.nhydro_variables = (self.root_nhvar +
                                     self.root_nvar) / self.root_ncells
            self.iOctFree, self.nOct = fpu.read_vector(f, "i", ">")
            self.child_grid_offset = f.tell()
            # lextra needs to be loaded as a string, but it's actually
            # array values.  So pop it off here, and then re-insert.
            lextra = amr_header_vals.pop("lextra")
            amr_header_vals["lextra"] = np.fromstring(lextra, ">f4")
            self.parameters.update(amr_header_vals)
            amr_header_vals = None
            # estimate the root level
            float_center, fl, iocts, nocts, root_level = _read_art_level_info(
                f, [0, self.child_grid_offset],
                1,
                coarse_grid=self.domain_dimensions[0])
            del float_center, fl, iocts, nocts
            self.root_level = root_level
            mylog.info("Using root level of %02i", self.root_level)
        # read the particle header
        self.particle_types = []
        self.particle_types_raw = ()
        if not self.skip_particles and self._file_particle_header:
            with open(self._file_particle_header, "rb") as fh:
                particle_header_vals = fpu.read_attrs(fh,
                                                      particle_header_struct,
                                                      ">")
                fh.seek(seek_extras)
                n = particle_header_vals["Nspecies"]
                wspecies = np.fromfile(fh, dtype=">f", count=10)
                lspecies = np.fromfile(fh, dtype=">i", count=10)
                # extras needs to be loaded as a string, but it's actually
                # array values.  So pop it off here, and then re-insert.
                extras = particle_header_vals.pop("extras")
                particle_header_vals["extras"] = np.fromstring(extras, ">f4")
            self.parameters["wspecies"] = wspecies[:n]
            self.parameters["lspecies"] = lspecies[:n]
            for specie in range(n):
                self.particle_types.append("specie%i" % specie)
            self.particle_types_raw = tuple(self.particle_types)
            ls_nonzero = np.diff(lspecies)[:n - 1]
            ls_nonzero = np.append(lspecies[0], ls_nonzero)
            self.star_type = len(ls_nonzero)
            mylog.info("Discovered %i species of particles", len(ls_nonzero))
            info_str = "Particle populations: " + "%9i " * len(ls_nonzero)
            mylog.info(info_str, *ls_nonzero)
            self._particle_type_counts = dict(
                zip(self.particle_types_raw, ls_nonzero))
            for k, v in particle_header_vals.items():
                if k in self.parameters.keys():
                    if not self.parameters[k] == v:
                        mylog.info(
                            "Inconsistent parameter %s %1.1e  %1.1e",
                            k,
                            v,
                            self.parameters[k],
                        )
                else:
                    self.parameters[k] = v
            self.parameters_particles = particle_header_vals
            self.parameters.update(particle_header_vals)
            self.parameters["ng"] = self.parameters["Ngridc"]
            self.parameters["ncell0"] = self.parameters["ng"]**3

        # setup standard simulation params yt expects to see
        self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
        self.omega_lambda = self.parameters["Oml0"]
        self.omega_matter = self.parameters["Om0"]
        self.hubble_constant = self.parameters["hubble"]
        self.min_level = self.parameters["min_level"]
        self.max_level = self.parameters["max_level"]
        if self.limit_level is not None:
            self.max_level = min(self.limit_level,
                                 self.parameters["max_level"])
        if self.force_max_level is not None:
            self.max_level = self.force_max_level
        self.hubble_time = 1.0 / (self.hubble_constant * 100 / 3.08568025e19)
        self.current_time = self.quan(b2t(self.parameters["t"]), "Gyr")
        self.gamma = self.parameters["gamma"]
        mylog.info("Max level is %02i", self.max_level)
コード例 #13
0
    def _setup_auto_fields(self):
        '''
        If no fluid fields are set, the code tries to set up a fluids array by hand
        '''
        # TODO: SUPPORT RT - THIS REQUIRES IMPLEMENTING A NEW FILE READER!
        # Find nvar

        # TODO: copy/pasted from DomainFile; needs refactoring!
        num = os.path.basename(
            self._ds.parameter_filename).split(".")[0].split("_")[1]
        testdomain = 1  # Just pick the first domain file to read
        basename = "%s/%%s_%s.out%05i" % (os.path.abspath(
            os.path.dirname(self._ds.parameter_filename)), num, testdomain)
        hydro_fn = basename % "hydro"
        # Do we have a hydro file?
        if not os.path.exists(hydro_fn):
            self.fluid_field_list = []
            return
        # Read the number of hydro  variables
        f = open(hydro_fn, "rb")
        hydro_header = (('ncpu', 1, 'i'), ('nvar', 1, 'i'), ('ndim', 1, 'i'),
                        ('nlevelmax', 1, 'i'), ('nboundary', 1, 'i'), ('gamma',
                                                                       1, 'd'))
        hvals = fpu.read_attrs(f, hydro_header)
        self.ds.gamma = hvals['gamma']
        nvar = hvals['nvar']
        # OK, we got NVAR, now set up the arrays depending on what NVAR is
        # Allow some wiggle room for users to add too many variables
        if nvar < 5:
            mylog.debug(
                "nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s"
            )
            raise ValueError
        # Basic hydro runs
        if nvar == 5:
            fields = [
                "Density", "x-velocity", "y-velocity", "z-velocity", "Pressure"
            ]
        if nvar > 5 and nvar < 11:
            fields = [
                "Density", "x-velocity", "y-velocity", "z-velocity",
                "Pressure", "Metallicity"
            ]
        # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
        if nvar == 11:
            fields = [
                "Density", "x-velocity", "y-velocity", "z-velocity",
                "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
                "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
                "Pressure"
            ]
        if nvar > 11:
            fields = [
                "Density", "x-velocity", "y-velocity", "z-velocity",
                "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
                "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
                "Pressure", "Metallicity"
            ]
        while len(fields) < nvar:
            fields.append("var" + str(len(fields)))
        mylog.debug(
            "No fields specified by user; automatically setting fields array to %s",
            str(fields))
        self.fluid_field_list = fields
コード例 #14
0
    def read_header(self):
        if not self.exists:
            self.field_offsets = {}
            self.field_types = {}
            self.local_particle_count = 0
            return
        f = open(self.fname, "rb")
        f.seek(0, os.SEEK_END)
        flen = f.tell()
        f.seek(0)
        hvals = {}
        attrs = self.attrs
        hvals.update(fpu.read_attrs(f, attrs))
        self.header = hvals
        self.local_particle_count = hvals['npart']
        extra_particle_fields = self.ds._extra_particle_fields

        if self.has_part_descriptor:
            particle_fields = (_read_part_file_descriptor(
                self.file_descriptor))
        else:
            particle_fields = list(self.known_fields)

            if extra_particle_fields is not None:
                particle_fields += extra_particle_fields

        if hvals["nstar_tot"] > 0 and extra_particle_fields is not None:
            particle_fields += [("particle_birth_time", "d"),
                                ("particle_metallicity", "d")]

        field_offsets = {}
        _pfields = {}

        ptype = self.ptype

        # Read offsets
        for field, vtype in particle_fields:
            if f.tell() >= flen: break
            field_offsets[ptype, field] = f.tell()
            _pfields[ptype, field] = vtype
            fpu.skip(f, 1)

        iextra = 0
        while f.tell() < flen:
            iextra += 1
            field, vtype = ('particle_extra_field_%i' % iextra, 'd')
            particle_fields.append((field, vtype))

            field_offsets[ptype, field] = f.tell()
            _pfields[ptype, field] = vtype
            fpu.skip(f, 1)

        if iextra > 0 and not self.ds._warn_extra_fields:
            self.ds._warn_extra_fields = True
            w = ("Detected %s extra particle fields assuming kind "
                 "`double`. Consider using the `extra_particle_fields` "
                 "keyword argument if you have unexpected behavior.")
            mylog.warning(w % iextra)

        self.field_offsets = field_offsets
        self.field_types = _pfields
コード例 #15
0
ファイル: data_structures.py プロジェクト: egentry/yt
 def _setup_auto_fields(self):
     '''
     If no fluid fields are set, the code tries to set up a fluids array by hand
     '''
     # TODO: copy/pasted from DomainFile; needs refactoring!
     num = os.path.basename(
         self.dataset.parameter_filename).split(".")[0].split("_")[1]
     testdomain = 1  # Just pick the first domain file to read
     basename = "%s/%%s_%s.out%05i" % (os.path.abspath(
         os.path.dirname(self.dataset.parameter_filename)), num, testdomain)
     hydro_fn = basename % "hydro"
     # Do we have a hydro file?
     if not os.path.exists(hydro_fn):
         self.fluid_field_list = []
         return
     # Read the number of hydro variables
     f = open(hydro_fn, "rb")
     hydro_header = (('ncpu', 1, 'i'), ('nvar', 1, 'i'), ('ndim', 1, 'i'),
                     ('nlevelmax', 1, 'i'), ('nboundary', 1, 'i'), ('gamma',
                                                                    1, 'd'))
     hvals = fpu.read_attrs(f, hydro_header)
     self.ds.gamma = hvals['gamma']
     nvar = hvals['nvar']
     # OK, we got NVAR, now set up the arrays depending on what NVAR is
     # but first check for radiative transfer!
     foldername = os.path.abspath(
         os.path.dirname(self.ds.parameter_filename))
     rt_flag = any(glob.glob(os.sep.join([foldername, 'info_rt_*.txt'])))
     if rt_flag:  # rt run
         if nvar < 10:
             mylog.info('Detected RAMSES-RT file WITHOUT IR trapping.')
             fields = [
                 "Density", "x-velocity", "y-velocity", "z-velocity",
                 "Pressure", "Metallicity", "HII", "HeII", "HeIII"
             ]
         else:
             mylog.info('Detected RAMSES-RT file WITH IR trapping.')
             fields = [
                 "Density", "x-velocity", "y-velocity", "z-velocity",
                 "Pres_IR", "Pressure", "Metallicity", "HII", "HeII",
                 "HeIII"
             ]
     else:
         if nvar < 5:
             mylog.debug(
                 "nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s"
             )
             raise ValueError
         # Basic hydro runs
         if nvar == 5:
             fields = [
                 "Density", "x-velocity", "y-velocity", "z-velocity",
                 "Pressure"
             ]
         if nvar > 5 and nvar < 11:
             fields = [
                 "Density", "x-velocity", "y-velocity", "z-velocity",
                 "Pressure", "Metallicity"
             ]
         # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
         if nvar == 11:
             fields = [
                 "Density", "x-velocity", "y-velocity", "z-velocity",
                 "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
                 "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
                 "Pressure"
             ]
         if nvar > 11:
             fields = [
                 "Density", "x-velocity", "y-velocity", "z-velocity",
                 "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
                 "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
                 "Pressure", "Metallicity"
             ]
     # Allow some wiggle room for users to add too many variables
     while len(fields) < nvar:
         fields.append("var" + str(len(fields)))
     mylog.debug(
         "No fields specified by user; automatically setting fields array to %s",
         str(fields))
     self.fluid_field_list = fields
コード例 #16
0
ファイル: data_structures.py プロジェクト: egentry/yt
    def _read_particle_header(self):
        if not os.path.exists(self.part_fn):
            self.local_particle_count = 0
            self.particle_field_offsets = {}
            return

        f = open(self.part_fn, "rb")
        f.seek(0, os.SEEK_END)
        flen = f.tell()
        f.seek(0)
        hvals = {}
        attrs = (('ncpu', 1, 'I'), ('ndim', 1, 'I'), ('npart', 1, 'I'))
        hvals.update(fpu.read_attrs(f, attrs))
        fpu.read_vector(f, 'I')

        attrs = (('nstar_tot', 1, 'I'), ('mstar_tot', 1, 'd'),
                 ('mstar_lost', 1, 'd'), ('nsink', 1, 'I'))
        hvals.update(fpu.read_attrs(f, attrs))
        self.particle_header = hvals
        self.local_particle_count = hvals['npart']

        # Try reading particle file descriptor
        if self._has_part_descriptor:
            particle_fields = (_read_part_file_descriptor(
                self._part_file_descriptor))
        else:
            particle_fields = [("particle_position_x", "d"),
                               ("particle_position_y", "d"),
                               ("particle_position_z", "d"),
                               ("particle_velocity_x", "d"),
                               ("particle_velocity_y", "d"),
                               ("particle_velocity_z", "d"),
                               ("particle_mass", "d"),
                               ("particle_identifier", "i"),
                               ("particle_refinement_level", "I")]

            if self.ds._extra_particle_fields is not None:
                particle_fields += self.ds._extra_particle_fields

        ptype = 'io'

        field_offsets = {}
        _pfields = {}

        # Read offsets
        for field, vtype in particle_fields:
            if f.tell() >= flen: break
            field_offsets[ptype, field] = f.tell()
            _pfields[ptype, field] = vtype
            fpu.skip(f, 1)

        iextra = 0
        while f.tell() < flen:
            iextra += 1
            field, vtype = ('particle_extra_field_%i' % iextra, 'd')
            particle_fields.append((field, vtype))

            field_offsets[ptype, field] = f.tell()
            _pfields[ptype, field] = vtype
            fpu.skip(f, 1)

        if iextra > 0 and not self.ds._warn_extra_fields:
            self.ds._warn_extra_fields = True
            w = ("Detected %s extra particle fields assuming kind "
                 "`double`. Consider using the `extra_particle_fields` "
                 "keyword argument if you have unexpected behavior.")
            mylog.warning(w % iextra)

        self.particle_field_offsets = field_offsets
        self.particle_field_types = _pfields

        # Register the particle type
        self._add_ptype(ptype)
コード例 #17
0
    def _parse_parameter_file(self):
        """
        Get the various simulation parameters & constants.
        """
        self.dimensionality = 3
        self.refine_by = 2
        self.periodicity = (True, True, True)
        self.cosmological_simulation = True
        self.parameters = {}
        self.unique_identifier = \
            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
        self.parameters.update(constants)
        self.parameters['Time'] = 1.0
        # read the amr header
        with open(self._file_amr, 'rb') as f:
            amr_header_vals = fpu.read_attrs(f, amr_header_struct, '>')
            for to_skip in ['tl', 'dtl', 'tlold', 'dtlold', 'iSO']:
                skipped = fpu.skip(f, endian='>')
            (self.ncell) = fpu.read_vector(f, 'i', '>')[0]
            # Try to figure out the root grid dimensions
            est = int(np.rint(self.ncell**(1.0/3.0)))
            # Note here: this is the number of *cells* on the root grid.
            # This is not the same as the number of Octs.
            # domain dimensions is the number of root *cells*
            self.domain_dimensions = np.ones(3, dtype='int64')*est
            self.root_grid_mask_offset = f.tell()
            self.root_nocts = self.domain_dimensions.prod()/8
            self.root_ncells = self.root_nocts*8
            mylog.debug("Estimating %i cells on a root grid side," +
                        "%i root octs", est, self.root_nocts)
            self.root_iOctCh = fpu.read_vector(f, 'i', '>')[:self.root_ncells]
            self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,
                                                        order='F')
            self.root_grid_offset = f.tell()
            self.root_nhvar = fpu.skip(f, endian='>')
            self.root_nvar = fpu.skip(f, endian='>')
            # make sure that the number of root variables is a multiple of
            # rootcells
            assert self.root_nhvar % self.root_ncells == 0
            assert self.root_nvar % self.root_ncells == 0
            self.nhydro_variables = ((self.root_nhvar+self.root_nvar) /
                                     self.root_ncells)
            self.iOctFree, self.nOct = fpu.read_vector(f, 'i', '>')
            self.child_grid_offset = f.tell()
            self.parameters.update(amr_header_vals)
            amr_header_vals = None
            # estimate the root level
            float_center, fl, iocts, nocts, root_level = _read_art_level_info(
                f,
                [0, self.child_grid_offset], 1,
                coarse_grid=self.domain_dimensions[0])
            del float_center, fl, iocts, nocts
            self.root_level = root_level
            mylog.info("Using root level of %02i", self.root_level)
        # read the particle header
        self.particle_types = []
        self.particle_types_raw = ()
        if not self.skip_particles and self._file_particle_header:
            with open(self._file_particle_header, "rb") as fh:
                particle_header_vals = fpu.read_attrs(
                    fh, particle_header_struct, '>')
                fh.seek(seek_extras)
                n = particle_header_vals['Nspecies']
                wspecies = np.fromfile(fh, dtype='>f', count=10)
                lspecies = np.fromfile(fh, dtype='>i', count=10)
            self.parameters['wspecies'] = wspecies[:n]
            self.parameters['lspecies'] = lspecies[:n]
            for specie in range(n):
                self.particle_types.append("specie%i" % specie)
            self.particle_types_raw = tuple(
                self.particle_types)
            ls_nonzero = np.diff(lspecies)[:n-1]
            ls_nonzero = np.append(lspecies[0], ls_nonzero)
            self.star_type = len(ls_nonzero)
            mylog.info("Discovered %i species of particles", len(ls_nonzero))
            mylog.info("Particle populations: "+'%9i '*len(ls_nonzero),
                       *ls_nonzero)
            for k, v in particle_header_vals.items():
                if k in self.parameters.keys():
                    if not self.parameters[k] == v:
                        mylog.info(
                            "Inconsistent parameter %s %1.1e  %1.1e", k, v,
                            self.parameters[k])
                else:
                    self.parameters[k] = v
            self.parameters_particles = particle_header_vals
            self.parameters.update(particle_header_vals)
            self.parameters['ng'] = self.parameters['Ngridc']
            self.parameters['ncell0'] = self.parameters['ng']**3


        # setup standard simulation params yt expects to see
        self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
        self.omega_lambda = self.parameters['Oml0']
        self.omega_matter = self.parameters['Om0']
        self.hubble_constant = self.parameters['hubble']
        self.min_level = self.parameters['min_level']
        self.max_level = self.parameters['max_level']
        if self.limit_level is not None:
            self.max_level = min(
                self.limit_level, self.parameters['max_level'])
        if self.force_max_level is not None:
            self.max_level = self.force_max_level
        self.hubble_time = 1.0/(self.hubble_constant*100/3.08568025e19)
        self.current_time = self.quan(b2t(self.parameters['t']), 'Gyr')
        self.gamma = self.parameters["gamma"]
        mylog.info("Max level is %02i", self.max_level)
コード例 #18
0
    def _setup_auto_fields(self):
        '''
        If no fluid fields are set, the code tries to set up a fluids array by hand
        '''
        # TODO: SUPPORT RT - THIS REQUIRES IMPLEMENTING A NEW FILE READER!
        # Find nvar
        

        # TODO: copy/pasted from DomainFile; needs refactoring!
        num = os.path.basename(self.dataset.parameter_filename).split("."
                )[0].split("_")[1]
        testdomain = 1 # Just pick the first domain file to read
        basename = "%s/%%s_%s.out%05i" % (
            os.path.abspath(
              os.path.dirname(self.dataset.parameter_filename)),
            num, testdomain)
        hydro_fn = basename % "hydro"
        # Do we have a hydro file?
        if not os.path.exists(hydro_fn):
            self.fluid_field_list = []
            return
        # Read the number of hydro  variables
        f = open(hydro_fn, "rb")
        hydro_header = ( ('ncpu', 1, 'i'),
                         ('nvar', 1, 'i'),
                         ('ndim', 1, 'i'),
                         ('nlevelmax', 1, 'i'),
                         ('nboundary', 1, 'i'),
                         ('gamma', 1, 'd')
                         )
        hvals = fpu.read_attrs(f, hydro_header)
        self.ds.gamma = hvals['gamma']
        nvar = hvals['nvar']
        # OK, we got NVAR, now set up the arrays depending on what NVAR is
        # Allow some wiggle room for users to add too many variables
        if nvar < 5:
            mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
            raise ValueError
        # Basic hydro runs
        if nvar == 5:
            fields = ["Density", 
                      "x-velocity", "y-velocity", "z-velocity", 
                      "Pressure"]
        if nvar > 5 and nvar < 11:
            fields = ["Density", 
                      "x-velocity", "y-velocity", "z-velocity", 
                      "Pressure", "Metallicity"]
        # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
        if nvar == 11:
            fields = ["Density", 
                      "x-velocity", "y-velocity", "z-velocity", 
                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
                      "Pressure"]
        if nvar > 11:
            fields = ["Density", 
                      "x-velocity", "y-velocity", "z-velocity", 
                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
                      "Pressure","Metallicity"]
        while len(fields) < nvar:
            fields.append("var"+str(len(fields)))
        mylog.debug("No fields specified by user; automatically setting fields array to %s", str(fields))
        self.fluid_field_list = fields
コード例 #19
0
ファイル: load_bricks.py プロジェクト: cphyc/yt_treefiles
def load_brick(filename, bbox=None, center=None, return_ds=True):
    """Load a brick file as outputed by HaloFinder.

    You can pass a bbox (in Mpc) to force the box size."""
    with open(filename, "rb") as f:
        # Load headers
        hvals = {}
        attrs = (('nbodies', 1, 'i'),
                 ('particle_mass', 1, 'f'),
                 ('aexp', 1, 'f'),
                 ('omega_t', 1, 'f'),
                 ('age_univ', 1, 'f'),
                 (('nhalos', 'nsubhalos'), 2, 'i'))

        hvals.update(fpu.read_attrs(f, attrs))

        # Load halo data
        halos = {}
        for _ in range(hvals['nhalos']):
            tmp = _read_halo(f)

            halo_id = tmp.get('particle_identifier')
            halos[halo_id] = tmp

        for _ in range(hvals['nsubhalos']):
            tmp = _read_halo(f)

            halo_id = tmp.get('particle_identifier')
            halos[halo_id] = tmp

    if not return_ds:
        return halos

    # Now converts everything into yt-aware quantities
    def g(key):
        return [halos[i][key] for i in halos]

    am_unit = (1, 'Msun*Mpc*km/s')
    unit_dict = {
        'particle_identifier': (1, '1'), 'particle_mass': (1e11, 'Msun'),
        'particle_position_x': (1, 'Mpc'), 'particle_position_y': (1, 'Mpc'),
        'particle_position_z': (1, 'Mpc'), 'particle_velocity_x': (1, 'km/s'),
        'particle_velocity_y': (1, 'km/s'), 'particle_velocity_z': (1, 'km/s'),
        'subhalo_level': (1, '1'), 'subhalo_hosthalo': (1, '1'),
        'subhalo_host': (1, '1'),
        'subhalo_number': (1, '1'), 'subhalo_next': (1, '1'),
        'particle_angular_momentum_x': am_unit,
        'particle_angular_momentum_y': am_unit,
        'particle_angular_momentum_z': am_unit,
        'virial_radius': (1, 'Mpc'), 'virial_mass': (1e11, 'Msun'),
        'virial_temp': (1, 'K'),
        'virial_vel': (1, 'km/s'),
        'particle_spin': (1, '1'), 'particle_radius': (1, 'Mpc'),
        'particle_axis_a': (1, 'Mpc'), 'particle_axis_b': (1, 'Mpc'),
        'particle_axis_c': (1, 'Mpc')}

    data = {}
    for key in unit_dict:
        intensity = unit_dict[key][0]
        unit = unit_dict[key][1]
        arr = np.array([halos[i][key] for i in halos]) * intensity
        data[key] = (arr, unit)

    n_ref = 1

    ppx, ppy, ppz = [data['particle_position_%s' % d][0]
                     for d in ('x', 'y', 'z')]

    if bbox is not None:
        try:
            bbox = np.array(bbox.to('Mpc'))
        except:
            bbox = np.array(bbox)
        width = bbox[1] - bbox[0]
        left = -width / 2
        right = +width / 2
    else:
        left = np.array([min(ppx), min(ppy), min(ppz)])
        right = np.array([max(ppx), max(ppy), max(ppz)])

    data['particle_position_x'] = (ppx - left[0]), 'Mpc'
    data['particle_position_y'] = (ppy - left[1]), 'Mpc'
    data['particle_position_z'] = (ppz - left[2]), 'Mpc'

    right -= left
    left -= left

    bbox = np.array([left, right]).T

    ds = yt.load_particles(data, length_unit=U.Mpc, mass_unit=1e11*U.Msun,
                           bbox=bbox, n_ref=n_ref)

    @yt.particle_filter('halos', requires=["particle_mass"],
                        filtered_type='all')
    def is_halo(pfilter, data):
        return data[(pfilter.filtered_type, "particle_mass")] > 0

    ds.add_particle_filter('halos')
    return ds
コード例 #20
0
    def detect_fields(cls, ds):
        num = os.path.basename(ds.parameter_filename).split("."
                )[0].split("_")[1]
        testdomain = 1 # Just pick the first domain file to read
        basepath = os.path.abspath(
              os.path.dirname(ds.parameter_filename))
        basename = "%s/%%s_%s.out%05i" % (
            basepath, num, testdomain)
        fname = basename % 'hydro'
        fname_desc = os.path.join(basepath, cls.file_descriptor)

        f = open(fname, 'rb')
        attrs = cls.attrs
        hvals = fpu.read_attrs(f, attrs)
        cls.parameters = hvals

        # Store some metadata
        ds.gamma = hvals['gamma']
        nvar = hvals['nvar']

        ok = False
        if ds._fields_in_file is not None:
            fields = list(ds._fields_in_file)
            ok = True
        elif os.path.exists(fname_desc):
            mylog.debug('Reading hydro file descriptor.')
            # For now, we can only read double precision fields
            fields = [e[0] for e in _read_fluid_file_descriptor(fname_desc)]

            # We get no fields for old-style hydro file descriptor
            ok = len(fields) > 0

        if not ok:
            foldername  = os.path.abspath(os.path.dirname(ds.parameter_filename))
            rt_flag = any(glob.glob(os.sep.join([foldername, 'info_rt_*.txt'])))
            if rt_flag: # rt run
                if nvar < 10:
                    mylog.info('Detected RAMSES-RT file WITHOUT IR trapping.')
                    fields = ["Density", "x-velocity", "y-velocity", "z-velocity", "Pressure",
                              "Metallicity", "HII", "HeII", "HeIII"]
                else:
                    mylog.info('Detected RAMSES-RT file WITH IR trapping.')
                    fields = ["Density", "x-velocity", "y-velocity", "z-velocity", "Pres_IR",
                              "Pressure", "Metallicity", "HII", "HeII", "HeIII"]
            else:
                if nvar < 5:
                    mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
                    raise ValueError
                # Basic hydro runs
                if nvar == 5:
                    fields = ["Density",
                              "x-velocity", "y-velocity", "z-velocity",
                              "Pressure"]
                if nvar > 5 and nvar < 11:
                    fields = ["Density",
                              "x-velocity", "y-velocity", "z-velocity",
                              "Pressure", "Metallicity"]
                # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
                if nvar == 11:
                    fields = ["Density",
                              "x-velocity", "y-velocity", "z-velocity",
                              "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
                              "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
                              "Pressure"]
                if nvar > 11:
                    fields = ["Density",
                              "x-velocity", "y-velocity", "z-velocity",
                              "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
                              "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
                              "Pressure", "Metallicity"]
            mylog.debug("No fields specified by user; automatically setting fields array to %s"
                        % str(fields))

        # Allow some wiggle room for users to add too many variables
        count_extra = 0
        while len(fields) < nvar:
            fields.append("var"+str(len(fields)))
            count_extra += 1
        if count_extra > 0:
            mylog.debug('Detected %s extra fluid fields.' % count_extra)
        cls.field_list = [(cls.ftype, e) for e in fields]

        return fields