Пример #1
0
def validate_image_name(filename, suffix: Optional[str] = None) -> str:
    """
    Build a valid image filename with a specified extension (default to png).
    The suffix parameter is ignored if the input filename has a valid extension already.
    Otherwise, suffix is appended to the filename, replacing any existing extension.
    """
    name, psuffix = os.path.splitext(filename)
    if psuffix in SUPPORTED_FORMATS:
        if suffix is not None:
            suffix = normalize_extension_string(suffix)
        if suffix in SUPPORTED_FORMATS and suffix != psuffix:
            mylog.warning(
                "Received two valid image formats '%s' (from `filename`) "
                "and '%s' (from `suffix`). The former is ignored.",
                psuffix,
                suffix,
            )
            return f"{name}{suffix}"
        return str(filename)

    if suffix is None:
        suffix = ".png"

    suffix = normalize_extension_string(suffix)

    if suffix not in SUPPORTED_FORMATS:
        raise ValueError("Unsupported file format '{suffix}'.")

    return f"{filename}{suffix}"
Пример #2
0
def parse_orion_sinks(fn):
    '''
    Orion sink particles are stored in text files. This function
    is for figuring what particle fields are present based on the
    number of entries per line in the \*.sink file.
    '''

    # Figure out the format of the particle file
    with open(fn, 'r') as f:
        lines = f.readlines()

    try:
        line = lines[1]
    except IndexError:
        # a particle file exists, but there is only one line,
        # so no sinks have been created yet.
        index = {}
        return index

    # The basic fields that all sink particles have
    index = {'particle_mass': 0,
             'particle_position_x': 1,
             'particle_position_y': 2,
             'particle_position_z': 3,
             'particle_momentum_x': 4,
             'particle_momentum_y': 5,
             'particle_momentum_z': 6,
             'particle_angmomen_x': 7,
             'particle_angmomen_y': 8,
             'particle_angmomen_z': 9,
             'particle_id': -1}

    if len(line.strip().split()) == 11:
        # these are vanilla sinks, do nothing
        pass

    elif len(line.strip().split()) == 17:
        # these are old-style stars, add stellar model parameters
        index['particle_mlast']     = 10
        index['particle_r']         = 11
        index['particle_mdeut']     = 12
        index['particle_n']         = 13
        index['particle_mdot']      = 14
        index['particle_burnstate'] = 15

    elif (len(line.strip().split()) == 18 or len(line.strip().split()) == 19):
        # these are the newer style, add luminosity as well
        index['particle_mlast']     = 10
        index['particle_r']         = 11
        index['particle_mdeut']     = 12
        index['particle_n']         = 13
        index['particle_mdot']      = 14
        index['particle_burnstate'] = 15
        index['particle_luminosity']= 16
    else:
        # give a warning if none of the above apply:
        mylog.warning('Warning - could not figure out particle output file')
        mylog.warning('These results could be nonsense!')

    return index
Пример #3
0
    def add_field(self,
                  name,
                  function,
                  sampling_type,
                  *,
                  force_override=False,
                  **kwargs):

        sampling_type = self._sanitize_sampling_type(sampling_type)

        if isinstance(name, str) or not is_sequence(name):
            if sampling_type == "particle":
                ftype = "all"
            else:
                ftype = "gas"
            name = (ftype, name)

        # Handle the case where the field has already been added.
        if not force_override and name in self:
            mylog.warning(
                "Field %s already exists. To override use `force_override=True`.",
                name,
            )

        return super().add_field(name,
                                 function,
                                 sampling_type,
                                 force_override=force_override,
                                 **kwargs)
Пример #4
0
    def _calculate_simulation_bounds(self):
        """
        Figure out the starting and stopping time and redshift for the simulation.
        """

        if 'StopCycle' in self.parameters:
            self.stop_cycle = self.parameters['StopCycle']

        # Convert initial/final redshifts to times.
        if self.cosmological_simulation:
            self.initial_time = self.cosmology.t_from_z(self.initial_redshift)
            self.initial_time.units.registry = self.unit_registry
            self.final_time = self.cosmology.t_from_z(self.final_redshift)
            self.final_time.units.registry = self.unit_registry

        # If not a cosmology simulation, figure out the stopping criteria.
        else:
            if 'InitialTime' in self.parameters:
                self.initial_time = self.quan(self.parameters['InitialTime'],
                                              "code_time")
            else:
                self.initial_time = self.quan(0., "code_time")

            if 'StopTime' in self.parameters:
                self.final_time = self.quan(self.parameters['StopTime'],
                                            "code_time")
            else:
                self.final_time = None
            if not ('StopTime' in self.parameters
                    or 'StopCycle' in self.parameters):
                raise NoStoppingCondition(self.parameter_filename)
            if self.final_time is None:
                mylog.warning(
                    "Simulation %s has no stop time set, stopping condition " +
                    "will be based only on cycles.", self.parameter_filename)
Пример #5
0
 def _calculate_field_offsets(self, field_list, pcount,
                              offset, file_size = None):
     # field_list is (ftype, fname) but the blocks are ordered
     # (fname, ftype) in the file.
     pos = offset
     fs = self._field_size
     offsets = {}
     for field in self._fields:
         if not isinstance(field, str):
             field = field[0]
         if not any( (ptype, field) in field_list
                     for ptype in self._ptypes):
             continue
         pos += 4
         any_ptypes = False
         for ptype in self._ptypes:
             if field == "Mass" and ptype not in self.var_mass:
                 continue
             if (ptype, field) not in field_list:
                 continue
             offsets[(ptype, field)] = pos
             any_ptypes = True
             if field in self._vector_fields:
                 pos += self._vector_fields[field] * pcount[ptype] * fs
             else:
                 pos += pcount[ptype] * fs
         pos += 4
         if not any_ptypes: pos -= 8
     if file_size is not None:
         if file_size != pos:
             mylog.warning("Your Gadget-2 file may have extra " +
                           "columns or different precision!" +
                           " (%s file vs %s computed)",
                           file_size, pos)
     return offsets
Пример #6
0
    def _calculate_cycle_outputs(self):
        """
        Calculate cycle outputs.
        """

        mylog.warning(
            'Calculating cycle outputs.  Dataset times will be unavailable.')

        if self.stop_cycle is None or \
            'CycleSkipDataDump' not in self.parameters or \
            self.parameters['CycleSkipDataDump'] <= 0.0:
            return []

        self.all_time_outputs = []
        index = 0
        for cycle in range(0, self.stop_cycle + 1,
                           self.parameters['CycleSkipDataDump']):
            filename = os.path.join(
                self.parameters['GlobalDir'],
                "%s%04d" % (self.parameters['DataDumpDir'], index),
                "%s%04d" % (self.parameters['DataDumpName'], index))

            output = {'index': index, 'filename': filename, 'cycle': cycle}
            self.all_time_outputs.append(output)
            index += 1
Пример #7
0
 def preload(self, chunk, fields, max_size):
     if len(fields) == 0:
         yield self
         return
     old_cache_on = self._cache_on
     old_cached_fields = self._cached_fields
     self._cached_fields = cf = {}
     self._cache_on = True
     for gid in old_cached_fields:
         # Will not copy numpy arrays, which is good!
         cf[gid] = old_cached_fields[gid].copy() 
     self._hits = self._misses = 0
     self._cached_fields = self._read_chunk_data(chunk, fields)
     mylog.debug("(1st) Hits = % 10i Misses = % 10i",
         self._hits, self._misses)
     self._hits = self._misses = 0
     yield self
     mylog.debug("(2nd) Hits = % 10i Misses = % 10i",
         self._hits, self._misses)
     self._cached_fields = old_cached_fields
     self._cache_on = old_cache_on
     # Randomly remove some grids from the cache.  Note that we're doing
     # this on a grid basis, not a field basis.  Performance will be
     # slightly non-deterministic as a result of this, but it should roughly
     # be statistically alright, assuming (as we do) that this will get
     # called during largely unbalanced stuff.
     if len(self._cached_fields) > max_size:
         to_remove = random.sample(self._cached_fields.keys(),
             len(self._cached_fields) - max_size)
         mylog.debug("Purging from cache %s", len(to_remove))
         for k in to_remove:
             self._cached_fields.pop(k)
     else:
         mylog.warning("Cache size % 10i (max % 10i)",
             len(self._cached_fields), max_size)
Пример #8
0
    def __init__(self,
                 base_region,
                 ds,
                 oct_handler,
                 over_refine_factor=1,
                 num_ghost_zones=0):
        self._over_refine_factor = over_refine_factor
        self._num_zones = 1 << (over_refine_factor)
        self.field_data = YTFieldData()
        self.field_parameters = {}
        self.ds = ds
        self.oct_handler = oct_handler
        self._last_mask = None
        self._last_selector_id = None
        self._current_particle_type = "io"
        self._current_fluid_type = self.ds.default_fluid_type
        self.base_region = base_region
        self.base_selector = base_region.selector

        self._num_ghost_zones = num_ghost_zones

        if num_ghost_zones > 0:
            if not all(ds.periodicity):
                mylog.warning(
                    "Ghost zones will wrongly assume the domain to be periodic."
                )
            base_grid = StreamOctreeSubset(base_region, ds, oct_handler,
                                           over_refine_factor)
            self._base_grid = base_grid
Пример #9
0
 def add_field(self, name, function=None, sampling_type=None, **kwargs):
     if not isinstance(name, tuple):
         if kwargs.setdefault('particle_type', False):
             name = ('all', name)
         else:
             name = ('gas', name)
     override = kwargs.get("force_override", False)
     # Handle the case where the field has already been added.
     if not override and name in self:
         mylog.warning(
             "Field %s already exists. To override use " +
             "force_override=True.", name)
     if kwargs.setdefault('particle_type', False):
         if sampling_type is not None and sampling_type != "particle":
             raise RuntimeError(
                 "Clashing definition of 'sampling_type' and "
                 "'particle_type'. Note that 'particle_type' is "
                 "deprecated. Please just use 'sampling_type'.")
         else:
             sampling_type = "particle"
     if sampling_type is None:
         warnings.warn("Because 'sampling_type' not specified, yt will "
                       "assume a cell 'sampling_type'")
         sampling_type = "cell"
     return super(LocalFieldInfoContainer,
                  self).add_field(name, sampling_type, function, **kwargs)
Пример #10
0
 def _setup_filenames(self):
     template = self.dataset.filename_template
     ndoms = self.dataset.file_count
     cls = self.dataset._file_class
     self.data_files = []
     fi = 0
     for i in range(int(ndoms)):
         start = 0
         if self.chunksize > 0:
             end = start + self.chunksize
         else:
             end = None
         while True:
             try:
                 _filename = template % {"num": i}
                 df = cls(self.dataset, self.io, _filename, fi,
                          (start, end))
             except FileNotFoundError:
                 mylog.warning(
                     "Failed to load '%s' (missing file or directory)",
                     _filename)
             if max(df.total_particles.values()) == 0:
                 break
             fi += 1
             self.data_files.append(df)
             if self.chunksize <= 0:
                 break
             start = end
             end += self.chunksize
Пример #11
0
 def __init__(self,
              start_point,
              end_point,
              ds=None,
              field_parameters=None,
              data_source=None):
     validate_3d_array(start_point)
     validate_3d_array(end_point)
     validate_object(ds, Dataset)
     validate_object(field_parameters, dict)
     validate_object(data_source, YTSelectionContainer)
     super().__init__(ds, field_parameters, data_source)
     if isinstance(start_point, YTArray):
         self.start_point = self.ds.arr(start_point).to("code_length")
     else:
         self.start_point = self.ds.arr(start_point,
                                        "code_length",
                                        dtype="float64")
     if isinstance(end_point, YTArray):
         self.end_point = self.ds.arr(end_point).to("code_length")
     else:
         self.end_point = self.ds.arr(end_point,
                                      "code_length",
                                      dtype="float64")
     if (self.start_point < self.ds.domain_left_edge).any() or (
             self.end_point > self.ds.domain_right_edge).any():
         mylog.warning(
             "Ray start or end is outside the domain. "
             "Returned data will only be for the ray section inside the domain."
         )
     self.vec = self.end_point - self.start_point
     self._set_center(self.start_point)
     self.set_field_parameter("center", self.start_point)
     self._dts, self._ts = None, None
Пример #12
0
    def _calculate_cycle_outputs(self):
        """
        Calculate cycle outputs.
        """

        mylog.warning("Calculating cycle outputs.  Dataset times will be unavailable.")

        if (
            self.stop_cycle is None
            or "CycleSkipDataDump" not in self.parameters
            or self.parameters["CycleSkipDataDump"] <= 0.0
        ):
            return []

        self.all_time_outputs = []
        index = 0
        for cycle in range(
            0, self.stop_cycle + 1, self.parameters["CycleSkipDataDump"]
        ):
            filename = os.path.join(
                self.parameters["GlobalDir"],
                "%s%04d" % (self.parameters["DataDumpDir"], index),
                "%s%04d" % (self.parameters["DataDumpName"], index),
            )

            output = {"index": index, "filename": filename, "cycle": cycle}
            self.all_time_outputs.append(output)
            index += 1
Пример #13
0
    def add_field(self,
                  name,
                  function,
                  sampling_type,
                  *,
                  force_override=False,
                  **kwargs):
        if isinstance(name, str) or not is_sequence(name):
            # the base method only accepts proper tuple field keys
            # and is only used internally, while this method is exposed to users
            # and is documented as usable with single strings as name
            if sampling_type == "particle":
                ftype = "all"
            else:
                ftype = "gas"
            name = (ftype, name)

        # Handle the case where the field has already been added.
        if not force_override and name in self:
            mylog.warning(
                "Field %s already exists. To override use `force_override=True`.",
                name,
            )

        return super().add_field(name,
                                 function,
                                 sampling_type,
                                 force_override=force_override,
                                 **kwargs)
Пример #14
0
def get_image_suffix(name):
    suffix = os.path.splitext(name)[1]
    supported_suffixes = ['.png', '.eps', '.ps', '.pdf', '.jpg', '.jpeg']
    if suffix in supported_suffixes or suffix == '':
        return suffix
    else:
        mylog.warning('Unsupported image suffix requested (%s)' % suffix)
        return ''
Пример #15
0
def get_image_suffix(name):
    suffix = os.path.splitext(name)[1]
    supported_suffixes = [".png", ".eps", ".ps", ".pdf", ".jpg", ".jpeg"]
    if suffix in supported_suffixes or suffix == "":
        return suffix
    else:
        mylog.warning("Unsupported image suffix requested (%s)", suffix)
        return ""
Пример #16
0
    def _set_code_unit_attributes(self):
        """
        Generates the conversion to various physical _units
        based on the parameter file
        """

        # This should be improved.
        h5f = h5py.File(self.parameter_filename, mode="r")
        for field_name in h5f["/field_types"]:
            current_field = h5f[f"/field_types/{field_name}"]
            if "field_to_cgs" in current_field.attrs:
                field_conv = current_field.attrs["field_to_cgs"]
                self.field_units[field_name] = just_one(field_conv)
            elif "field_units" in current_field.attrs:
                field_units = current_field.attrs["field_units"]
                if isinstance(field_units, str):
                    current_field_units = current_field.attrs["field_units"]
                else:
                    current_field_units = just_one(
                        current_field.attrs["field_units"])
                self.field_units[field_name] = current_field_units.decode(
                    "utf8")
            else:
                self.field_units[field_name] = ""

        if "dataset_units" in h5f:
            for unit_name in h5f["/dataset_units"]:
                current_unit = h5f[f"/dataset_units/{unit_name}"]
                value = current_unit[()]
                unit = current_unit.attrs["unit"]
                # need to convert to a Unit object and check dimensions
                # because unit can be things like
                # 'dimensionless/dimensionless**3' so naive string
                # comparisons are insufficient
                unit = Unit(unit, registry=self.unit_registry)
                if unit_name.endswith(
                        "_unit") and unit.dimensions is sympy_one:
                    # Catch code units and if they are dimensionless,
                    # assign CGS units. setdefaultattr will catch code units
                    # which have already been set via units_override.
                    un = unit_name[:-5]
                    un = un.replace("magnetic", "magnetic_field_cgs", 1)
                    unit = unit_system_registry["cgs"][un]
                    setdefaultattr(self, unit_name, self.quan(value, unit))
                setdefaultattr(self, unit_name, self.quan(value, unit))
                if unit_name in h5f["/field_types"]:
                    if unit_name in self.field_units:
                        mylog.warning(
                            "'field_units' was overridden by 'dataset_units/%s'",
                            unit_name,
                        )
                    self.field_units[unit_name] = str(unit)
        else:
            setdefaultattr(self, "length_unit", self.quan(1.0, "cm"))
            setdefaultattr(self, "mass_unit", self.quan(1.0, "g"))
            setdefaultattr(self, "time_unit", self.quan(1.0, "s"))

        h5f.close()
Пример #17
0
def enable_plugins(pluginfilename=None):
    """Forces a plugin file to be parsed.

    A plugin file is a means of creating custom fields, quantities,
    data objects, colormaps, and other code classes and objects to be used
    in yt scripts without modifying the yt source directly.

    If <pluginfilename> is omited, this function will look for a plugin file at
    ``$HOME/.config/yt/my_plugins.py``, which is the prefered behaviour for a
    system-level configuration.

    Warning: a script using this function will only be reproducible if your plugin
    file is shared with it.
    """
    import yt
    from yt.config import CONFIG_DIR, ytcfg
    from yt.fields.my_plugin_fields import my_plugins_fields

    if pluginfilename is not None:
        _fn = pluginfilename
        if not os.path.isfile(_fn):
            raise FileNotFoundError(_fn)
    else:
        # Determine global plugin location. By decreasing priority order:
        # - absolute path
        # - CONFIG_DIR
        # - obsolete config dir.
        my_plugin_name = ytcfg.get("yt", "pluginfilename")
        old_config_dir = os.path.join(os.path.expanduser("~"), ".yt")
        for base_prefix in ("", CONFIG_DIR, old_config_dir):
            if os.path.isfile(os.path.join(base_prefix, my_plugin_name)):
                _fn = os.path.join(base_prefix, my_plugin_name)
                break
        else:
            raise FileNotFoundError(
                "Could not find a global system plugin file.")

        if _fn.startswith(old_config_dir):
            mylog.warning(
                "Your plugin file is located in a deprecated directory. "
                "Please move it from %s to %s",
                os.path.join(old_config_dir, my_plugin_name),
                os.path.join(CONFIG_DIR, my_plugin_name),
            )

    mylog.info("Loading plugins from %s", _fn)
    ytdict = yt.__dict__
    execdict = ytdict.copy()
    execdict["add_field"] = my_plugins_fields.add_field
    with open(_fn) as f:
        code = compile(f.read(), _fn, "exec")
        exec(code, execdict, execdict)
    ytnamespace = list(ytdict.keys())
    for k in execdict.keys():
        if k not in ytnamespace:
            if callable(execdict[k]):
                setattr(yt, k, execdict[k])
Пример #18
0
 def _load_info_records(self):
     """
     Returns parsed version of the info_records.
     """
     try:
         return load_info_records(self._vars['info_records'])
     except (KeyError, TypeError):
         mylog.warning("No info_records found")
         return []
Пример #19
0
 def _load_info_records(self):
     """
     Returns parsed version of the info_records.
     """
     with self._handle.open_ds() as ds:
         try:
             return load_info_records(ds.variables["info_records"])
         except (KeyError, TypeError):
             mylog.warning("No info_records found")
             return []
 def add_field(self, name, function=None, **kwargs):
     if not isinstance(name, tuple):
         name = ('gas', name)
     override = kwargs.get("force_override", False)
     # Handle the case where the field has already been added.
     if not override and name in self:
         mylog.warning("Field %s already exists. To override use " +
                       "force_override=True.", name)
     return super(LocalFieldInfoContainer,
                  self).add_field(name, function, **kwargs)
Пример #21
0
 def add_field(self, name, function=None, **kwargs):
     if not isinstance(name, tuple):
         name = ('gas', name)
     override = kwargs.get("force_override", False)
     # Handle the case where the field has already been added.
     if not override and name in self:
         mylog.warning(
             "Field %s already exists. To override use " +
             "force_override=True.", name)
     return super(LocalFieldInfoContainer,
                  self).add_field(name, function, **kwargs)
Пример #22
0
    def _set_code_unit_attributes(self):
        """
        Generates the conversion to various physical _units
        based on the parameter file
        """

        # This should be improved.
        h5f = h5py.File(self.parameter_filename, "r")
        for field_name in h5f["/field_types"]:
            current_field = h5f["/field_types/%s" % field_name]
            if 'field_to_cgs' in current_field.attrs:
                field_conv = current_field.attrs['field_to_cgs']
                self.field_units[field_name] = just_one(field_conv)
            elif 'field_units' in current_field.attrs:
                field_units = current_field.attrs['field_units']
                if isinstance(field_units, string_types):
                    current_field_units = current_field.attrs['field_units']
                else:
                    current_field_units = \
                        just_one(current_field.attrs['field_units'])
                self.field_units[field_name] = current_field_units.decode(
                    "utf8")
            else:
                self.field_units[field_name] = ""

        if "dataset_units" in h5f:
            for unit_name in h5f["/dataset_units"]:
                current_unit = h5f["/dataset_units/%s" % unit_name]
                value = current_unit.value
                unit = current_unit.attrs["unit"]
                # need to convert to a Unit object and check dimensions
                # because unit can be things like
                # 'dimensionless/dimensionless**3' so naive string
                # comparisons are insufficient
                unit = Unit(unit, registry=self.unit_registry)
                if unit_name.endswith(
                        '_unit') and unit.dimensions is sympy_one:
                    un = unit_name[:-5]
                    un = un.replace('magnetic', 'magnetic_field', 1)
                    unit = self.unit_system[un]
                    setdefaultattr(self, unit_name, self.quan(value, unit))
                setdefaultattr(self, unit_name, self.quan(value, unit))
                if unit_name in h5f["/field_types"]:
                    if unit_name in self.field_units:
                        mylog.warning(
                            "'field_units' was overridden by 'dataset_units/%s'"
                            % (unit_name))
                    self.field_units[unit_name] = str(unit)
        else:
            setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
            setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
            setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))

        h5f.close()
Пример #23
0
 def _DeprecatedFieldFunc(field, data):
     # Only log a warning if we've already done
     # field detection
     if data.ds.fields_detected:
         args = [field.name, since, removal]
         msg = ("The Derived Field %s is deprecated as of yt v%s "
                "and will be removed in yt v%s. ")
         if ret_field != field.name:
             msg += "Use %s instead."
             args.append(ret_field)
         mylog.warning(msg, *args)
     return func(field, data)
Пример #24
0
Файл: io.py Проект: jisuoqing/yt
 def _generate_smoothing_length(self, index):
     data_files = index.data_files
     if not self.ds.gen_hsmls:
         return
     hsml_fn = data_files[0].filename.replace(".hdf5", ".hsml.hdf5")
     if os.path.exists(hsml_fn):
         with h5py.File(hsml_fn, mode="r") as f:
             file_hash = f.attrs["q"]
         if file_hash != self.ds._file_hash:
             mylog.warning("Replacing hsml files.")
             for data_file in data_files:
                 hfn = data_file.filename.replace(".hdf5", ".hsml.hdf5")
                 os.remove(hfn)
         else:
             return
     positions = []
     counts = defaultdict(int)
     for data_file in data_files:
         for _, ppos in self._yield_coordinates(
             data_file, needed_ptype=self.ds._sph_ptypes[0]
         ):
             counts[data_file.filename] += ppos.shape[0]
             positions.append(ppos)
     if not positions:
         return
     offsets = {}
     offset = 0
     for fn, count in counts.items():
         offsets[fn] = offset
         offset += count
     kdtree = index.kdtree
     positions = uconcatenate(positions)[kdtree.idx]
     hsml = generate_smoothing_length(
         positions.astype("float64"), kdtree, self.ds._num_neighbors
     )
     dtype = positions.dtype
     hsml = hsml[np.argsort(kdtree.idx)].astype(dtype)
     mylog.warning("Writing smoothing lengths to hsml files.")
     for i, data_file in enumerate(data_files):
         si, ei = data_file.start, data_file.end
         fn = data_file.filename
         hsml_fn = data_file.filename.replace(".hdf5", ".hsml.hdf5")
         with h5py.File(hsml_fn, mode="a") as f:
             if i == 0:
                 f.attrs["q"] = self.ds._file_hash
             g = f.require_group(self.ds._sph_ptypes[0])
             d = g.require_dataset(
                 "SmoothingLength", dtype=dtype, shape=(counts[fn],)
             )
             begin = si + offsets[fn]
             end = min(ei, d.size) + offsets[fn]
             d[si:ei] = hsml[begin:end]
Пример #25
0
Файл: io.py Проект: philchang/yt
    def _calculate_field_offsets(self,
                                 field_list,
                                 pcount,
                                 offset,
                                 file_size=None):
        # field_list is (ftype, fname) but the blocks are ordered
        # (fname, ftype) in the file.
        if self._format == 2:
            # Need to subtract offset due to extra header block
            pos = offset - SNAP_FORMAT_2_OFFSET
        else:
            pos = offset
        fs = self._field_size
        offsets = {}

        for field in self._fields:
            if not isinstance(field, string_types):
                field = field[0]
            if not any((ptype, field) in field_list for ptype in self._ptypes):
                continue
            if self._format == 2:
                pos += 20  # skip block header
            elif self._format == 1:
                pos += 4
            else:
                raise RuntimeError("incorrect Gadget format %s!" %
                                   str(self._format))
            any_ptypes = False
            for ptype in self._ptypes:
                if field == "Mass" and ptype not in self.var_mass:
                    continue
                if (ptype, field) not in field_list:
                    continue
                offsets[(ptype, field)] = pos
                any_ptypes = True
                if field in self._vector_fields:
                    pos += self._vector_fields[field] * pcount[ptype] * fs
                else:
                    pos += pcount[ptype] * fs
            pos += 4
            if not any_ptypes:
                pos -= 8
        if file_size is not None:
            if (file_size != pos) & (self._format
                                     == 1):  # ignore the rest of format 2
                mylog.warning(
                    "Your Gadget-2 file may have extra " +
                    "columns or different precision!" +
                    " (%s file vs %s computed)", file_size, pos)
        return offsets
Пример #26
0
    def _get_nod_names(self):
        """

        Returns the names of the node vars, if available

        """

        with self._handle.open_ds() as ds:
            if "name_nod_var" not in ds.variables:
                mylog.warning("name_nod_var not found")
                return []
            else:
                return [sanitize_string(v.tostring()) for v in
                        ds.variables["name_nod_var"]]
Пример #27
0
def get_canvas(name):
    suffix = get_image_suffix(name)

    if suffix == '':
        suffix = '.png'
    if suffix == ".png":
        canvas_cls = mpl.FigureCanvasAgg
    elif suffix == ".pdf":
        canvas_cls = mpl.FigureCanvasPdf
    elif suffix in (".eps", ".ps"):
        canvas_cls = mpl.FigureCanvasPS
    else:
        mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
        canvas_cls = mpl.FigureCanvasAgg
    return canvas_cls
def get_canvas(name):
    suffix = get_image_suffix(name)
    
    if suffix == '':
        suffix = '.png'
    if suffix == ".png":
        canvas_cls = mpl.FigureCanvasAgg
    elif suffix == ".pdf":
        canvas_cls = mpl.FigureCanvasPdf
    elif suffix in (".eps", ".ps"):
        canvas_cls = mpl.FigureCanvasPS
    else:
        mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
        canvas_cls = mpl.FigureCanvasAgg
    return canvas_cls
Пример #29
0
    def _get_nod_names(self):
        """

        Returns the names of the node vars, if available

        """

        if "name_nod_var" not in self._vars:
            mylog.warning("name_nod_var not found")
            return []
        else:
            return [
                sanitize_string(v.tostring())
                for v in self._vars["name_nod_var"]
            ]
Пример #30
0
    def _is_valid(cls, filename, *args, **kwargs):
        # This accepts a filename or a set of arguments and returns True or
        # False depending on if the file is of the type requested.

        warn_netcdf(filename)
        try:
            nc4_file = NetCDF4FileHandler(filename)
            with nc4_file.open_ds(keepweakref=True) as _handle:
                is_cm1_lofs = hasattr(_handle, "cm1_lofs_version")
                is_cm1 = hasattr(_handle,
                                 "cm1 version")  # not a typo, it is a space...

                # ensure coordinates of each variable array exists in the dataset
                coords = _handle.dimensions  # get the dataset wide coordinates
                failed_vars = []  # list of failed variables
                for var in _handle.variables:  # iterate over the variables
                    vcoords = _handle[
                        var].dimensions  # get the dims for the variable
                    ncoords = len(vcoords)  # number of coordinates in variable
                    # number of coordinates that pass for a variable
                    coordspassed = sum(vc in coords for vc in vcoords)
                    if coordspassed != ncoords:
                        failed_vars.append(var)

                if failed_vars:
                    mylog.warning(
                        "Trying to load a cm1_lofs netcdf file but the "
                        "coordinates of the following fields do not match the "
                        "coordinates of the dataset: %s",
                        failed_vars,
                    )
                    return False

            if not is_cm1_lofs:
                if is_cm1:
                    mylog.warning(
                        "It looks like you are trying to load a cm1 netcdf file, "
                        "but at present yt only supports cm1_lofs output. Until"
                        " support is added, you can likely use"
                        " yt.load_uniform_grid() to load your cm1 file manually."
                    )
                return False
        except (OSError, AttributeError, ImportError):
            return False

        return True
Пример #31
0
    def _set_code_unit_attributes(self):
        if self.cosmological_simulation:
            k = cosmology_get_units(
                self.hubble_constant,
                self.omega_matter,
                self.parameters["CosmologyComovingBoxSize"],
                self.parameters["CosmologyInitialRedshift"],
                self.current_redshift,
            )
            # Now some CGS values
            box_size = self.parameters["CosmologyComovingBoxSize"]
            setdefaultattr(self, "length_unit", self.quan(box_size, "Mpccm/h"))
            setdefaultattr(
                self,
                "mass_unit",
                self.quan(k["urho"], "g/cm**3") *
                (self.length_unit.in_cgs())**3,
            )
            setdefaultattr(self, "time_unit", self.quan(k["utim"], "s"))
            setdefaultattr(self, "velocity_unit", self.quan(k["uvel"], "cm/s"))
        else:
            if "LengthUnits" in self.parameters:
                length_unit = self.parameters["LengthUnits"]
                mass_unit = self.parameters["DensityUnits"] * length_unit**3
                time_unit = self.parameters["TimeUnits"]
            elif "SimulationControl" in self.parameters:
                units = self.parameters["SimulationControl"]["Units"]
                length_unit = units["Length"]
                mass_unit = units["Density"] * length_unit**3
                time_unit = units["Time"]
            else:
                mylog.warning("Setting 1.0 in code units to be 1.0 cm")
                mylog.warning("Setting 1.0 in code units to be 1.0 s")
                length_unit = mass_unit = time_unit = 1.0

            setdefaultattr(self, "length_unit", self.quan(length_unit, "cm"))
            setdefaultattr(self, "mass_unit", self.quan(mass_unit, "g"))
            setdefaultattr(self, "time_unit", self.quan(time_unit, "s"))
            setdefaultattr(self, "velocity_unit",
                           self.length_unit / self.time_unit)

        density_unit = self.mass_unit / self.length_unit**3
        magnetic_unit = np.sqrt(4 * np.pi * density_unit) * self.velocity_unit
        magnetic_unit = np.float64(magnetic_unit.in_cgs())
        setdefaultattr(self, "magnetic_unit",
                       self.quan(magnetic_unit, "gauss"))
Пример #32
0
 def print_key_parameters(self):
     for a in [
             "current_time", "domain_dimensions", "domain_left_edge",
             "domain_right_edge", "cosmological_simulation"
     ]:
         v = getattr(self, a)
         if v is not None: mylog.info("Parameters: %-25s = %s", a, v)
     if hasattr(self, "cosmological_simulation") and \
        getattr(self, "cosmological_simulation"):
         for a in [
                 "current_redshift", "omega_lambda", "omega_matter",
                 "hubble_constant"
         ]:
             v = getattr(self, a)
             if v is not None: mylog.info("Parameters: %-25s = %s", a, v)
     mylog.warning(
         "Geometric data selection not available for this dataset type.")
    def _set_code_unit_attributes(self):
        """
        Generates the conversion to various physical _units
        based on the parameter file
        """

        # This should be improved.
        h5f = h5py.File(self.parameter_filename, "r")
        for field_name in h5f["/field_types"]:
            current_field = h5f["/field_types/%s" % field_name]
            if 'field_to_cgs' in current_field.attrs:
                field_conv = current_field.attrs['field_to_cgs']
                self.field_units[field_name] = just_one(field_conv)
            elif 'field_units' in current_field.attrs:
                field_units = current_field.attrs['field_units']
                if isinstance(field_units, str):
                    current_field_units = current_field.attrs['field_units']
                else:
                    current_field_units = \
                        just_one(current_field.attrs['field_units'])
                self.field_units[field_name] = current_field_units
            else:
                self.field_units[field_name] = ""

        if "dataset_units" in h5f:
            for unit_name in h5f["/dataset_units"]:
                current_unit = h5f["/dataset_units/%s" % unit_name]
                value = current_unit.value
                unit = current_unit.attrs["unit"]
                setattr(self, unit_name, self.quan(value, unit))
                if unit_name in h5f["/field_types"]:
                    if unit_name in self.field_units:
                        mylog.warning("'field_units' was overridden by 'dataset_units/%s'"
                                      % (unit_name))
                    self.field_units[unit_name] = unit
        else:
            self.length_unit = self.quan(1.0, "cm")
            self.mass_unit = self.quan(1.0, "g")
            self.time_unit = self.quan(1.0, "s")

        h5f.close()
    def _set_code_unit_attributes(self):
        if self.cosmological_simulation:
            k = self.cosmology_get_units()
            # Now some CGS values
            box_size = self.parameters.get("CosmologyComovingBoxSize", None)
            if box_size is None:
                box_size = self.parameters["Physics"]["Cosmology"]\
                    ["CosmologyComovingBoxSize"]
            self.length_unit = self.quan(box_size, "Mpccm/h")
            self.mass_unit = \
                self.quan(k['urho'], 'g/cm**3') * (self.length_unit.in_cgs())**3
            self.time_unit = self.quan(k['utim'], 's')
            self.velocity_unit = self.quan(k['uvel'], 'cm/s')
        else:
            if "LengthUnits" in self.parameters:
                length_unit = self.parameters["LengthUnits"]
                mass_unit = self.parameters["DensityUnits"] * length_unit**3
                time_unit = self.parameters["TimeUnits"]
            elif "SimulationControl" in self.parameters:
                units = self.parameters["SimulationControl"]["Units"]
                length_unit = units["Length"]
                mass_unit = units["Density"] * length_unit**3
                time_unit = units["Time"]
            else:
                mylog.warning("Setting 1.0 in code units to be 1.0 cm")
                mylog.warning("Setting 1.0 in code units to be 1.0 s")
                length_unit = mass_unit = time_unit = 1.0

            self.length_unit = self.quan(length_unit, "cm")
            self.mass_unit = self.quan(mass_unit, "g")
            self.time_unit = self.quan(time_unit, "s")
            self.velocity_unit = self.length_unit / self.time_unit

        magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
                                (self.time_unit**2 * self.length_unit))
        magnetic_unit = np.float64(magnetic_unit.in_cgs())
        self.magnetic_unit = self.quan(magnetic_unit, "gauss")
Пример #35
0
def parse_orion_sinks(fn):
    '''

    Orion sink particles are stored in text files. This function
    is for figuring what particle fields are present based on the
    number of entries per line in the *.sink file.

    '''

    # Figure out the format of the particle file
    with open(fn, 'r') as f:
        lines = f.readlines()

    try:
        line = lines[1]
    except IndexError:
        # a particle file exists, but there is only one line,
        # so no sinks have been created yet.
        index = {}
        return index

    # The basic fields that all sink particles have
    index = {'particle_mass': 0,
             'particle_position_x': 1,
             'particle_position_y': 2,
             'particle_position_z': 3,
             'particle_momentum_x': 4,
             'particle_momentum_y': 5,
             'particle_momentum_z': 6,
             'particle_angmomen_x': 7,
             'particle_angmomen_y': 8,
             'particle_angmomen_z': 9,
             'particle_id': -1}

    if len(line.strip().split()) == 11:
        # these are vanilla sinks, do nothing
        pass

    elif len(line.strip().split()) == 17:
        # these are old-style stars, add stellar model parameters
        index['particle_mlast']     = 10
        index['particle_r']         = 11
        index['particle_mdeut']     = 12
        index['particle_n']         = 13
        index['particle_mdot']      = 14
        index['particle_burnstate'] = 15

    elif (len(line.strip().split()) == 18 or len(line.strip().split()) == 19):
        # these are the newer style, add luminosity as well
        index['particle_mlast']     = 10
        index['particle_r']         = 11
        index['particle_mdeut']     = 12
        index['particle_n']         = 13
        index['particle_mdot']      = 14
        index['particle_burnstate'] = 15
        index['particle_luminosity']= 16
    else:
        # give a warning if none of the above apply:
        mylog.warning('Warning - could not figure out particle output file')
        mylog.warning('These results could be nonsense!')

    return index
    def _setup_plots(self):
        if self._plot_valid:
            return
        for f, data in self.profile.items():
            fig = None
            axes = None
            cax = None
            draw_colorbar = True
            draw_axes = True
            zlim = (None, None)
            if f in self.plots:
                draw_colorbar = self.plots[f]._draw_colorbar
                draw_axes = self.plots[f]._draw_axes
                zlim = (self.plots[f].zmin, self.plots[f].zmax)
                if self.plots[f].figure is not None:
                    fig = self.plots[f].figure
                    axes = self.plots[f].axes
                    cax = self.plots[f].cax

            x_scale, y_scale, z_scale = self._get_field_log(f, self.profile)
            x_title, y_title, z_title = self._get_field_title(f, self.profile)

            if zlim == (None, None):
                if z_scale == 'log':
                    positive_values = data[data > 0.0]
                    if len(positive_values) == 0:
                        mylog.warning("Profiled field %s has no positive "
                                      "values.  Max = %f." %
                                      (f, np.nanmax(data)))
                        mylog.warning("Switching to linear colorbar scaling.")
                        zmin = np.nanmin(data)
                        z_scale = 'linear'
                        self._field_transform[f] = linear_transform
                    else:
                        zmin = positive_values.min()
                        self._field_transform[f] = log_transform
                else:
                    zmin = np.nanmin(data)
                    self._field_transform[f] = linear_transform
                zlim = [zmin, np.nanmax(data)]

            font_size = self._font_properties.get_size()
            f = self.profile.data_source._determine_fields(f)[0]

            # if this is a Particle Phase Plot AND if we using a single color,
            # override the colorbar here.
            splat_color = getattr(self, "splat_color", None)
            if splat_color is not None:
                cmap = matplotlib.colors.ListedColormap(splat_color, 'dummy')
            else:
                cmap = self._colormaps[f]

            self.plots[f] = PhasePlotMPL(self.profile.x, self.profile.y, data,
                                         x_scale, y_scale, z_scale,
                                         cmap, zlim,
                                         self.figure_size, font_size,
                                         fig, axes, cax)

            self.plots[f]._toggle_axes(draw_axes)
            self.plots[f]._toggle_colorbar(draw_colorbar)

            self.plots[f].axes.xaxis.set_label_text(x_title)
            self.plots[f].axes.yaxis.set_label_text(y_title)
            self.plots[f].cax.yaxis.set_label_text(z_title)

            if f in self._plot_text:
                self.plots[f].axes.text(self._text_xpos[f], self._text_ypos[f],
                                        self._plot_text[f],
                                        fontproperties=self._font_properties,
                                        **self._text_kwargs[f])

            if f in self.plot_title:
                self.plots[f].axes.set_title(self.plot_title[f])

            # x-y axes minorticks
            if f not in self._minorticks:
                self._minorticks[f] = True
            if self._minorticks[f] is True:
                self.plots[f].axes.minorticks_on()
            else:
                self.plots[f].axes.minorticks_off()

            # colorbar minorticks
            if f not in self._cbar_minorticks:
                self._cbar_minorticks[f] = True
            if self._cbar_minorticks[f] is True:
                if self._field_transform[f] == linear_transform:
                    self.plots[f].cax.minorticks_on()
                else:
                    vmin = np.float64( self.plots[f].cb.norm.vmin )
                    vmax = np.float64( self.plots[f].cb.norm.vmax )
                    mticks = self.plots[f].image.norm( get_log_minorticks(vmin, vmax) )
                    self.plots[f].cax.yaxis.set_ticks(mticks, minor=True)
            else:
                self.plots[f].cax.minorticks_off()

        self._set_font_properties()

        # if this is a particle plot with one color only, hide the cbar here
        if hasattr(self, "use_cbar") and self.use_cbar is False:
            self.plots[f].hide_colorbar()

        self._plot_valid = True