def __getitem__(self, item): # At first, we will only implement this as accepting a slice that is # (optionally) unitful corresponding to a specific set of coordinates # that result in a rectangular prism or a slice. try: return self.all_data[item] except (TypeError, YTFieldNotParseable): pass if isinstance(item, slice): if obj_length(item.start) == 3 and obj_length(item.stop) == 3: # This is for a ray that is not orthogonal to an axis. # it's straightforward to do this, so we create a ray # and drop out here. return self._create_ray(item) else: # This is for the case where we give a slice as an index; one # possible use case of this would be where we supply something # like ds.r[::256j] . This would be expanded, implicitly into # ds.r[::256j, ::256j, ::256j]. Other cases would be if we do # ds.r[0.1:0.9] where it will be expanded along all dimensions. item = tuple(item for _ in range(self.ds.dimensionality)) if item is Ellipsis: item = (Ellipsis, ) # from this point, item is implicitly assumed to be iterable if Ellipsis in item: # expand "..." into the appropriate number of ":" item = list(item) idx = item.index(Ellipsis) item.pop(idx) if Ellipsis in item: # this error mimics numpy's raise IndexError( "an index can only have a single ellipsis ('...')") while len(item) < self.ds.dimensionality: item.insert(idx, slice(None)) if len(item) != self.ds.dimensionality: # Not the right specification, and we don't want to do anything # implicitly. Note that this happens *after* the implicit expansion # of a single slice. raise YTDimensionalityError(len(item), self.ds.dimensionality) # OK, now we need to look at our slices. How many are a specific # coordinate? nslices = sum(isinstance(v, slice) for v in item) if nslices == 0: return self._create_point(item) elif nslices == 1: return self._create_ortho_ray(item) elif nslices == 2: return self._create_slice(item) else: if all(s.start is s.stop is s.step is None for s in item): return self.all_data return self._create_region(item)
def __getitem__(self, item): # At first, we will only implement this as accepting a slice that is # (optionally) unitful corresponding to a specific set of coordinates # that result in a rectangular prism or a slice. if isinstance(item, string_types): # This is some field; we will instead pass this back to the # all_data object. return self.all_data[item] if isinstance(item, tuple) and isinstance(item[1], string_types): return self.all_data[item] if isinstance(item, slice): if obj_length(item.start) == 3 and obj_length(item.stop) == 3: # This is for a ray that is not orthogonal to an axis. # it's straightforward to do this, so we create a ray # and drop out here. return self._create_ray(item) else: # This is for the case where we give a slice as an index; one # possible use case of this would be where we supply something # like ds.r[::256j] . This would be expanded, implicitly into # ds.r[::256j, ::256j, ::256j]. Other cases would be if we do # ds.r[0.1:0.9] where it will be expanded along three dimensions. item = (item, item, item) if len(item) != self.ds.dimensionality: # Not the right specification, and we don't want to do anything # implicitly. Note that this happens *after* the implicit expansion # of a single slice. raise YTDimensionalityError(len(item), self.ds.dimensionality) if self.ds.dimensionality != 3: # We'll pass on this for the time being. raise RuntimeError # OK, now we need to look at our slices. How many are a specific # coordinate? nslices = sum(isinstance(v, slice) for v in item) if nslices == 0: return self._create_point(item) elif nslices == 1: return self._create_ortho_ray(item) elif nslices == 2: return self._create_slice(item) else: if all(s.start is s.stop is s.step is None for s in item): return self.all_data return self._create_region(item)
def print_all_fields(fl): for fn in sorted(fl): df = fl[fn] f = df._function s = f"{df.name}" print(s) print("^" * len(s)) print() if obj_length(df.units) > 0: # Most universal fields are in CGS except for these special fields if df.name[1] in [ "particle_position", "particle_position_x", "particle_position_y", "particle_position_z", "entropy", "kT", "metallicity", "dx", "dy", "dz", "cell_volume", "x", "y", "z", ]: print(f" * Units: :math:`{fix_units(df.units)}`") else: print( f" * Units: :math:`{fix_units(df.units, in_cgs=True)}`") print(f" * Sampling Method: {df.sampling_type}") print() print("**Field Source**") print() if f == NullFunc: print("No source available.") print() continue else: print(".. code-block:: python") print() for line in inspect.getsource(f).split("\n"): print(" " + line) print()
def add_field( self, name: Tuple[str, str], function: Callable, sampling_type: str, *, alias: Optional[DerivedField] = None, force_override: bool = False, **kwargs, ) -> None: """ Add a new field, along with supplemental metadata, to the list of available fields. This respects a number of arguments, all of which are passed on to the constructor for :class:`~yt.data_objects.api.DerivedField`. Parameters ---------- name : tuple[str, str] field (or particle) type, field name function : callable A function handle that defines the field. Should accept arguments (field, data) sampling_type: str "cell" or "particle" or "local" force_override: bool If False (default), an error will be raised if a field of the same name already exists. alias: DerivedField (optional): existing field to be aliased units : str A plain text string encoding the unit. Powers must be in python syntax (** instead of ^). If set to "auto" the units will be inferred from the return value of the field function. take_log : bool Describes whether the field should be logged validators : list A list of :class:`FieldValidator` objects vector_field : bool Describes the dimensionality of the field. Currently unused. display_name : str A name used in the plots """ # Handle the case where the field has already been added. if not force_override and name in self: return kwargs.setdefault("ds", self.ds) if not isinstance(function, Callable): # type: ignore [arg-type] # type-checking is disabled because of https://github.com/python/mypy/issues/11071 # this is compatible with lambdas and functools.partial objects raise TypeError( f"Expected a callable object, got {function} with type {type(function)}" ) # lookup parameters that do not have default values fparams = inspect.signature(function).parameters nodefaults = tuple(p.name for p in fparams.values() if p.default is p.empty) if nodefaults != ("field", "data"): raise TypeError( f"Received field function {function} with invalid signature. " f"Expected exactly 2 positional parameters ('field', 'data'), got {nodefaults!r}" ) if any(fparams[name].kind == fparams[name].KEYWORD_ONLY for name in ("field", "data")): raise TypeError( f"Received field function {function} with invalid signature. " "Parameters 'field' and 'data' must accept positional values " "(they cannot be keyword-only)") sampling_type = self._sanitize_sampling_type(sampling_type) if (not isinstance(name, str) and obj_length(name) == 2 and all(isinstance(e, str) for e in name)): self[name] = DerivedField(name, sampling_type, function, alias=alias, **kwargs) else: raise ValueError( f"Expected name to be a tuple[str, str], got {name}")