def __init__(self, output_dir,
                 cparam_filename="inputs",
                 fparam_filename="probin",
                 dataset_type='boxlib_native',
                 storage_filename=None,
                 units_override=None):
        """
        The paramfile is usually called "inputs"
        and there may be a fortran inputs file usually called "probin"
        plotname here will be a directory name
        as per BoxLib, dataset_type will be Native (implemented here), IEEE (not
        yet implemented) or ASCII (not yet implemented.)
        """
        self.fluid_types += ("boxlib",)
        self.output_dir = os.path.abspath(os.path.expanduser(output_dir))
        self.cparam_filename = self._localize_check(cparam_filename)
        self.fparam_filename = self._localize_check(fparam_filename)
        self.storage_filename = storage_filename

        Dataset.__init__(self, output_dir, dataset_type,
                         units_override=units_override)

        # These are still used in a few places.
        if "HydroMethod" not in self.parameters.keys():
            self.parameters["HydroMethod"] = 'boxlib'
        self.parameters["Time"] = 1.     # default unit is 1...
        self.parameters["EOSType"] = -1  # default
        self.parameters["gamma"] = self.parameters.get(
            "materials.gamma", 1.6667)
Ejemplo n.º 2
0
 def __init__(self, filename, dataset_type='art',
              fields=None, storage_filename=None,
              skip_particles=False, skip_stars=False,
              limit_level=None, spread_age=True,
              force_max_level=None, file_particle_header=None,
              file_particle_data=None, file_particle_stars=None,
              units_override=None, unit_system="cgs"):
     self.fluid_types += ("art", )
     if fields is None:
         fields = fluid_fields
     filename = os.path.abspath(filename)
     self._fields_in_file = fields
     self._file_amr = filename
     self._file_particle_header = file_particle_header
     self._file_particle_data = file_particle_data
     self._file_particle_stars = file_particle_stars
     self._find_files(filename)
     self.parameter_filename = filename
     self.skip_particles = skip_particles
     self.skip_stars = skip_stars
     self.limit_level = limit_level
     self.max_level = limit_level
     self.force_max_level = force_max_level
     self.spread_age = spread_age
     Dataset.__init__(self, filename, dataset_type,
                      units_override=units_override,
                      unit_system=unit_system)
     self.storage_filename = storage_filename
Ejemplo n.º 3
0
 def __init__(self,
              filename,
              dataset_type=None,
              file_style=None,
              parameter_override=None,
              conversion_override=None,
              storage_filename=None,
              units_override=None,
              unit_system="cgs"):
     """
     This class is a stripped down class that simply reads and parses
     *filename* without looking at the index.  *dataset_type* gets passed
     to the index to pre-determine the style of data-output.  However,
     it is not strictly necessary.  Optionally you may specify a
     *parameter_override* dictionary that will override anything in the
     parameter file and a *conversion_override* dictionary that consists
     of {fieldname : conversion_to_cgs} that will override the #DataCGS.
     """
     self.fluid_types += ("enzo", )
     if filename.endswith(".hierarchy"): filename = filename[:-10]
     if parameter_override is None: parameter_override = {}
     self._parameter_override = parameter_override
     if conversion_override is None: conversion_override = {}
     self._conversion_override = conversion_override
     self.storage_filename = storage_filename
     Dataset.__init__(self,
                      filename,
                      dataset_type,
                      file_style=file_style,
                      units_override=units_override,
                      unit_system=unit_system)
 def __init__(self, filename, dataset_type='art',
              fields=None, storage_filename=None,
              skip_particles=False, skip_stars=False,
              limit_level=None, spread_age=True,
              force_max_level=None, file_particle_header=None,
              file_particle_data=None, file_particle_stars=None,
              units_override=None):
     self.fluid_types += ("art", )
     if fields is None:
         fields = fluid_fields
     filename = os.path.abspath(filename)
     self._fields_in_file = fields
     self._file_amr = filename
     self._file_particle_header = file_particle_header
     self._file_particle_data = file_particle_data
     self._file_particle_stars = file_particle_stars
     self._find_files(filename)
     self.parameter_filename = filename
     self.skip_particles = skip_particles
     self.skip_stars = skip_stars
     self.limit_level = limit_level
     self.max_level = limit_level
     self.force_max_level = force_max_level
     self.spread_age = spread_age
     self.domain_left_edge = np.zeros(3, dtype='float')
     self.domain_right_edge = np.zeros(3, dtype='float')+1.0
     Dataset.__init__(self, filename, dataset_type,
                      units_override=units_override)
     self.storage_filename = storage_filename
Ejemplo n.º 5
0
 def __init__(self,
              filename,
              dataset_type='athena_pp',
              storage_filename=None,
              parameters=None,
              units_override=None,
              unit_system="code"):
     self.fluid_types += ("athena_pp", )
     if parameters is None:
         parameters = {}
     self.specified_parameters = parameters
     if units_override is None:
         units_override = {}
     self._handle = HDF5FileHandler(filename)
     xrat = self._handle.attrs["RootGridX1"][2]
     yrat = self._handle.attrs["RootGridX2"][2]
     zrat = self._handle.attrs["RootGridX3"][2]
     if xrat != 1.0 or yrat != 1.0 or zrat != 1.0:
         self._index_class = AthenaPPLogarithmicIndex
         self.logarithmic = True
     else:
         self._index_class = AthenaPPHierarchy
         self.logarithmic = False
     Dataset.__init__(self,
                      filename,
                      dataset_type,
                      units_override=units_override,
                      unit_system=unit_system)
     self.filename = filename
     if storage_filename is None:
         storage_filename = '%s.yt' % filename.split('/')[-1]
     self.storage_filename = storage_filename
     self.backup_filename = self.filename[:-4] + "_backup.gdf"
Ejemplo n.º 6
0
 def __init__(self,
              filename,
              dataset_type="openPMD",
              storage_filename=None,
              units_override=None,
              unit_system="mks",
              **kwargs):
     self._handle = HDF5FileHandler(filename)
     self.gridsize = kwargs.pop("open_pmd_virtual_gridsize", 10**9)
     self.standard_version = StrictVersion(self._handle.attrs["openPMD"].decode())
     self.iteration = kwargs.pop("iteration", None)
     self._set_paths(self._handle, path.dirname(filename), self.iteration)
     Dataset.__init__(self,
                      filename,
                      dataset_type,
                      units_override=units_override,
                      unit_system=unit_system)
     self.storage_filename = storage_filename
     self.fluid_types += ("openPMD",)
     try:
         particles = tuple(str(c) for c in self._handle[self.base_path + self.particles_path].keys())
         if len(particles) > 1:
             # Only use on-disk particle names if there is more than one species
             self.particle_types = particles
         mylog.debug("self.particle_types: {}".format(self.particle_types))
         self.particle_types_raw = self.particle_types
         self.particle_types = tuple(self.particle_types)
     except(KeyError, TypeError, AttributeError):
         pass
    def __init__(self, filename, dataset_type='flash_hdf5',
                 storage_filename = None,
                 particle_filename = None, 
                 units_override = None):

        self.fluid_types += ("flash",)
        if self._handle is not None: return
        self._handle = HDF5FileHandler(filename)

        self.particle_filename = particle_filename

        if self.particle_filename is None:
            self._particle_handle = self._handle
        else:
            try:
                self._particle_handle = HDF5FileHandler(self.particle_filename)
            except:
                raise IOError(self.particle_filename)
        # These should be explicitly obtained from the file, but for now that
        # will wait until a reorganization of the source tree and better
        # generalization.
        self.refine_by = 2

        Dataset.__init__(self, filename, dataset_type, units_override=units_override)
        self.storage_filename = storage_filename

        self.parameters["HydroMethod"] = 'flash' # always PPM DE
        self.parameters["Time"] = 1. # default unit is 1...
 def __init__(self, filename, dataset_type='skeleton',
              storage_filename=None,
              units_override=None):
     self.fluid_types += ('skeleton',)
     Dataset.__init__(self, filename, dataset_type,
                      units_override=units_override)
     self.storage_filename = storage_filename
Ejemplo n.º 9
0
 def __init__(self,
              filename,
              dataset_type='ramses',
              fields=None,
              storage_filename=None,
              units_override=None,
              unit_system="cgs",
              extra_particle_fields=None,
              cosmological=None,
              bbox=None):
     # Here we want to initiate a traceback, if the reader is not built.
     if isinstance(fields, string_types):
         fields = field_aliases[fields]
     '''
     fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
             If set to None, will try a default set of fields
     extra_particle_fields: An array of extra particle variables in order of position in the particle_XXXXX.outYYYYY file.
     cosmological: If set to None, automatically detect cosmological simulation. If a boolean, force
                   its value.
     '''
     self._fields_in_file = fields
     self._extra_particle_fields = extra_particle_fields
     self._warn_extra_fields = False
     self.force_cosmological = cosmological
     self._bbox = bbox
     Dataset.__init__(self,
                      filename,
                      dataset_type,
                      units_override=units_override,
                      unit_system=unit_system)
     for FH in get_field_handlers():
         if FH.any_exist(self):
             self.fluid_types += (FH.ftype, )
     self.storage_filename = storage_filename
Ejemplo n.º 10
0
    def __init__(
        self,
        filename,
        dataset_type="flash_hdf5",
        storage_filename=None,
        particle_filename=None,
        units_override=None,
        unit_system="cgs",
    ):

        self.fluid_types += ("flash", )
        if self._handle is not None:
            return
        self._handle = HDF5FileHandler(filename)

        self.particle_filename = particle_filename

        if self.particle_filename is None:
            # try to guess the particle filename
            try:
                self._particle_handle = HDF5FileHandler(
                    filename.replace("plt_cnt", "part"))
                self.particle_filename = filename.replace("plt_cnt", "part")
                mylog.info("Particle file found: %s",
                           self.particle_filename.split("/")[-1])
            except OSError:
                self._particle_handle = self._handle
        else:
            # particle_filename is specified by user
            self._particle_handle = HDF5FileHandler(self.particle_filename)

        # Check if the particle file has the same time
        if self._particle_handle != self._handle:
            part_time = self._particle_handle.handle.get("real scalars")[0][1]
            plot_time = self._handle.handle.get("real scalars")[0][1]
            if not np.isclose(part_time, plot_time):
                self._particle_handle = self._handle
                mylog.warning(
                    "%s and %s are not at the same time. "
                    "This particle file will not be used.",
                    self.particle_filename,
                    filename,
                )

        # These should be explicitly obtained from the file, but for now that
        # will wait until a reorganization of the source tree and better
        # generalization.
        self.refine_by = 2

        Dataset.__init__(
            self,
            filename,
            dataset_type,
            units_override=units_override,
            unit_system=unit_system,
        )
        self.storage_filename = storage_filename

        self.parameters["HydroMethod"] = "flash"  # always PPM DE
        self.parameters["Time"] = 1.0  # default unit is 1...
 def __init__(
     self, filename, dataset_type="athena", storage_filename=None, parameters=None, units_override=None, nprocs=1
 ):
     self.fluid_types += ("athena",)
     self.nprocs = nprocs
     if parameters is None:
         parameters = {}
     self.specified_parameters = parameters.copy()
     if units_override is None:
         units_override = {}
     # This is for backwards-compatibility
     already_warned = False
     for k, v in self.specified_parameters.items():
         if k.endswith("_unit") and k not in units_override:
             if not already_warned:
                 mylog.warning(
                     "Supplying unit conversions from the parameters dict is deprecated, "
                     + "and will be removed in a future release. Use units_override instead."
                 )
                 already_warned = True
             units_override[k] = self.specified_parameters.pop(k)
     Dataset.__init__(self, filename, dataset_type, units_override=units_override)
     self.filename = filename
     if storage_filename is None:
         storage_filename = "%s.yt" % filename.split("/")[-1]
     self.storage_filename = storage_filename
     self.backup_filename = self.filename[:-4] + "_backup.gdf"
     # Unfortunately we now have to mandate that the index gets
     # instantiated so that we can make sure we have the correct left
     # and right domain edges.
     self.index
Ejemplo n.º 12
0
 def __init__(
     self,
     filename,
     dataset_type="athena",
     storage_filename=None,
     parameters=None,
     units_override=None,
     nprocs=1,
     unit_system="cgs",
     default_species_fields=None,
 ):
     self.fluid_types += ("athena", )
     self.nprocs = nprocs
     if parameters is None:
         parameters = {}
     self.specified_parameters = parameters.copy()
     if units_override is None:
         units_override = {}
     Dataset.__init__(
         self,
         filename,
         dataset_type,
         units_override=units_override,
         unit_system=unit_system,
         default_species_fields=default_species_fields,
     )
     self.filename = filename
     if storage_filename is None:
         storage_filename = f"{filename.split('/')[-1]}.yt"
     self.storage_filename = storage_filename
     self.backup_filename = self.filename[:-4] + "_backup.gdf"
     # Unfortunately we now have to mandate that the index gets
     # instantiated so that we can make sure we have the correct left
     # and right domain edges.
     self.index
Ejemplo n.º 13
0
    def __init__(
        self,
        filename,
        dataset_type="chombo_hdf5",
        storage_filename=None,
        ini_filename=None,
        units_override=None,
        unit_system="cgs",
    ):
        self.fluid_types += ("chombo", )
        self._handle = HDF5FileHandler(filename)
        self.dataset_type = dataset_type

        self.geometry = "cartesian"
        self.ini_filename = ini_filename
        self.fullplotdir = os.path.abspath(filename)
        Dataset.__init__(
            self,
            filename,
            self.dataset_type,
            units_override=units_override,
            unit_system=unit_system,
        )
        self.storage_filename = storage_filename
        self.cosmological_simulation = False

        # These are parameters that I very much wish to get rid of.
        self.parameters["HydroMethod"] = "chombo"
        self.parameters["DualEnergyFormalism"] = 0
        self.parameters["EOSType"] = -1  # default
Ejemplo n.º 14
0
    def __init__(
        self,
        filename,
        dataset_type="artio",
        storage_filename=None,
        max_range=1024,
        units_override=None,
        unit_system="cgs",
        default_species_fields=None,
    ):
        from sys import version

        if self._handle is not None:
            return
        self.max_range = max_range
        self.fluid_types += ("artio", )
        self._filename = filename
        self._fileset_prefix = filename[:-4]
        if version < "3":
            self._handle = artio_fileset(self._fileset_prefix)
        else:
            self._handle = artio_fileset(bytes(self._fileset_prefix, "utf-8"))
        self.artio_parameters = self._handle.parameters
        # Here we want to initiate a traceback, if the reader is not built.
        Dataset.__init__(
            self,
            filename,
            dataset_type,
            units_override=units_override,
            unit_system=unit_system,
            default_species_fields=default_species_fields,
        )
        self.storage_filename = storage_filename
Ejemplo n.º 15
0
 def __init__(self, filename, dataset_type=None,
              file_style = None,
              parameter_override = None,
              conversion_override = None,
              storage_filename = None,
              units_override=None,
              unit_system="cgs"):
     """
     This class is a stripped down class that simply reads and parses
     *filename* without looking at the index.  *dataset_type* gets passed
     to the index to pre-determine the style of data-output.  However,
     it is not strictly necessary.  Optionally you may specify a
     *parameter_override* dictionary that will override anything in the
     parameter file and a *conversion_override* dictionary that consists
     of {fieldname : conversion_to_cgs} that will override the #DataCGS.
     """
     self.fluid_types += ("enzop",)
     if parameter_override is None: parameter_override = {}
     self._parameter_override = parameter_override
     if conversion_override is None: conversion_override = {}
     self._conversion_override = conversion_override
     self.storage_filename = storage_filename
     Dataset.__init__(self, filename, dataset_type, file_style=file_style,
                      units_override=units_override, unit_system=unit_system)
     warnings.warn(
         "The Enzo-P file format is still under development and may " +
         "change. If loading fails, simulation data will need to be " +
         "re-generated.")
Ejemplo n.º 16
0
    def __init__(
        self,
        filename,
        dataset_type="flash_particle_hdf5",
        storage_filename=None,
        units_override=None,
        index_order=None,
        index_filename=None,
        unit_system="cgs",
    ):
        self.index_order = validate_index_order(index_order)
        self.index_filename = index_filename

        if self._handle is not None:
            return
        self._handle = HDF5FileHandler(filename)
        self.refine_by = 2
        Dataset.__init__(
            self,
            filename,
            dataset_type,
            units_override=units_override,
            unit_system=unit_system,
        )
        self.storage_filename = storage_filename
 def __init__(self, filename, dataset_type='grid_data_format',
              storage_filename=None, geometry=None,
              units_override=None):
     self.geometry = geometry
     self.fluid_types += ("gdf",)
     Dataset.__init__(self, filename, dataset_type, units_override=units_override)
     self.storage_filename = storage_filename
     self.filename = filename
Ejemplo n.º 18
0
    def __init__(self, parameter_override=None, conversion_override=None):
        self.fluid_types += ("enzo", )
        if parameter_override is None: parameter_override = {}
        self._parameter_override = parameter_override
        if conversion_override is None: conversion_override = {}
        self._conversion_override = conversion_override

        Dataset.__init__(self, "InMemoryParameterFile", self._dataset_type)
Ejemplo n.º 19
0
 def __init__(self, pyne_mesh, dataset_type='moab_hex8_pyne',
              storage_filename = None):
     self.fluid_types += ("pyne",)
     filename = "pyne_mesh_" + str(id(pyne_mesh))
     self.pyne_mesh = pyne_mesh
     Dataset.__init__(self, str(filename), dataset_type)
     self.storage_filename = storage_filename
     self.filename = filename
 def __init__(self, filename, dataset_type='moab_hex8',
              storage_filename = None, units_override=None):
     self.fluid_types += ("moab",)
     Dataset.__init__(self, filename, dataset_type,
                      units_override=units_override)
     self.storage_filename = storage_filename
     self.filename = filename
     self._handle = HDF5FileHandler(filename)
    def __init__(self, parameter_override=None, conversion_override=None):
        self.fluid_types += ("enzo",)
        if parameter_override is None: parameter_override = {}
        self._parameter_override = parameter_override
        if conversion_override is None: conversion_override = {}
        self._conversion_override = conversion_override

        Dataset.__init__(self, "InMemoryParameterFile", self._dataset_type)
 def __init__(self, pyne_mesh, dataset_type='moab_hex8_pyne',
              storage_filename = None, units_override=None):
     self.fluid_types += ("pyne",)
     filename = "pyne_mesh_" + str(id(pyne_mesh))
     self.pyne_mesh = pyne_mesh
     Dataset.__init__(self, str(filename), dataset_type,
                      units_override=units_override)
     self.storage_filename = storage_filename
     self.filename = filename
Ejemplo n.º 23
0
    def __init__(self, filename, dataset_type='ramses',
                 fields=None, storage_filename=None,
                 units_override=None, unit_system="cgs",
                 extra_particle_fields=None, cosmological=None,
                 bbox=None):
        # Here we want to initiate a traceback, if the reader is not built.
        if isinstance(fields, string_types):
            fields = field_aliases[fields]
        '''
        fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
                If set to None, will try a default set of fields
        extra_particle_fields: An array of extra particle variables in order of position in the particle_XXXXX.outYYYYY file.
        cosmological: If set to None, automatically detect cosmological simulation. If a boolean, force
                      its value.
        '''

        self._fields_in_file = fields
        # By default, extra fields have not triggered a warning
        self._warned_extra_fields = defaultdict(lambda: False)
        self._extra_particle_fields = extra_particle_fields
        self.force_cosmological = cosmological
        self._bbox = bbox

        # Infer if the output is organized in groups
        root_folder, group_folder = os.path.split(os.path.split(filename)[0])

        if group_folder == 'group_00001':
            # Count the number of groups
            # note: we exclude the unlikely event that one of the group is actually a file
            # instad of a folder
            self.num_groups = len(
                [_ for _ in glob(os.path.join(root_folder, 'group_?????'))
                 if os.path.isdir(_)])
            self.root_folder = root_folder
        else:
            self.root_folder = os.path.split(filename)[0]
            self.num_groups = 0

        Dataset.__init__(self, filename, dataset_type, units_override=units_override,
                         unit_system=unit_system)

        # Add the particle types
        ptypes = []
        for PH in get_particle_handlers():
            if PH.any_exist(self):
                ptypes.append(PH.ptype)

        ptypes = tuple(ptypes)
        self.particle_types = self.particle_types_raw = ptypes

        # Add the fluid types
        for FH in get_field_handlers():
            FH.purge_detected_fields(self)
            if FH.any_exist(self):
                self.fluid_types += (FH.ftype, )

        self.storage_filename = storage_filename
Ejemplo n.º 24
0
    def __init__(self, filename,
                 dataset_type='fits',
                 auxiliary_files=[],
                 nprocs=None,
                 storage_filename=None,
                 nan_mask=None,
                 suppress_astropy_warnings=True,
                 parameters=None,
                 units_override=None,
                 unit_system="cgs"):

        if parameters is None:
            parameters = {}
        parameters["nprocs"] = nprocs
        self.specified_parameters = parameters

        if suppress_astropy_warnings:
            warnings.filterwarnings('ignore', module="astropy", append=True)
        auxiliary_files = ensure_list(auxiliary_files)
        self.filenames = [filename] + auxiliary_files
        self.num_files = len(self.filenames)
        self.fluid_types += ("fits",)
        if nan_mask is None:
            self.nan_mask = {}
        elif isinstance(nan_mask, float):
            self.nan_mask = {"all":nan_mask}
        elif isinstance(nan_mask, dict):
            self.nan_mask = nan_mask
        self._handle = FITSFileHandler(self.filenames[0])
        if (isinstance(self.filenames[0], _astropy.pyfits.hdu.image._ImageBaseHDU) or
            isinstance(self.filenames[0], _astropy.pyfits.HDUList)):
            fn = "InMemoryFITSFile_%s" % uuid.uuid4().hex
        else:
            fn = self.filenames[0]
        self._handle._fits_files.append(self._handle)
        if self.num_files > 1:
            for fits_file in auxiliary_files:
                if isinstance(fits_file, _astropy.pyfits.hdu.image._ImageBaseHDU):
                    f = _astropy.pyfits.HDUList([fits_file])
                elif isinstance(fits_file, _astropy.pyfits.HDUList):
                    f = fits_file
                else:
                    if os.path.exists(fits_file):
                        fn = fits_file
                    else:
                        fn = os.path.join(ytcfg.get("yt","test_data_dir"),fits_file)
                    f = _astropy.pyfits.open(fn, memmap=True,
                                             do_not_scale_image_data=True,
                                             ignore_blank=True)
                self._handle._fits_files.append(f)

        self.refine_by = 2

        Dataset.__init__(self, fn, dataset_type, units_override=units_override,
                         unit_system=unit_system)
        self.storage_filename = storage_filename
Ejemplo n.º 25
0
 def __init__(self,
              filename,
              dataset_type='grid_data_format',
              storage_filename=None,
              geometry='cartesian'):
     self.geometry = geometry
     self.fluid_types += ("gdf", )
     Dataset.__init__(self, filename, dataset_type)
     self.storage_filename = storage_filename
     self.filename = filename
Ejemplo n.º 26
0
 def __init__(self,
              filename,
              dataset_type='skeleton',
              storage_filename=None,
              units_override=None):
     self.fluid_types += ('skeleton', )
     Dataset.__init__(self,
                      filename,
                      dataset_type,
                      units_override=units_override)
     self.storage_filename = storage_filename
Ejemplo n.º 27
0
    def __init__(self, filename, dataset_type='flash_particle_hdf5',
                 storage_filename = None,
                 units_override = None,
                 n_ref = 64, unit_system = "cgs"):

        if self._handle is not None: return
        self._handle = HDF5FileHandler(filename)
        self.n_ref = n_ref
        self.refine_by = 2
        Dataset.__init__(self, filename, dataset_type, units_override=units_override,
                         unit_system=unit_system)
        self.storage_filename = storage_filename
Ejemplo n.º 28
0
 def __init__(self,
              filename,
              dataset_type='moab_hex8',
              storage_filename=None,
              units_override=None):
     self.fluid_types += ("moab", )
     Dataset.__init__(self,
                      filename,
                      dataset_type,
                      units_override=units_override)
     self.storage_filename = storage_filename
     self.filename = filename
     self._handle = HDF5FileHandler(filename)
Ejemplo n.º 29
0
 def __init__(self, filename, dataset_type='artio',
              storage_filename=None, max_range = 1024):
     if self._handle is not None:
         return
     self.max_range = max_range
     self.fluid_types += ("artio",)
     self._filename = filename
     self._fileset_prefix = filename[:-4]
     self._handle = artio_fileset(self._fileset_prefix)
     self.artio_parameters = self._handle.parameters
     # Here we want to initiate a traceback, if the reader is not built.
     Dataset.__init__(self, filename, dataset_type)
     self.storage_filename = storage_filename
 def __init__(self, filename, dataset_type='ramses',
              fields = None, storage_filename = None,
              units_override=None):
     # Here we want to initiate a traceback, if the reader is not built.
     if isinstance(fields, str):
         fields = field_aliases[fields]
     '''
     fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
             If set to None, will try a default set of fields
     '''
     self.fluid_types += ("ramses",)
     self._fields_in_file = fields
     Dataset.__init__(self, filename, dataset_type, units_override=units_override)
     self.storage_filename = storage_filename
Ejemplo n.º 31
0
 def __init__(self,
              filename,
              dataset_type='grid_data_format',
              storage_filename=None,
              geometry=None,
              units_override=None):
     self.geometry = geometry
     self.fluid_types += ("gdf", )
     Dataset.__init__(self,
                      filename,
                      dataset_type,
                      units_override=units_override)
     self.storage_filename = storage_filename
     self.filename = filename
Ejemplo n.º 32
0
 def __init__(self,
              filename,
              dataset_type='enzo-p',
              storage_filename=None,
              units_override=None):
     self.fluid_types += ('enzo-p', )
     self.parameter_filename = filename + ".parameters"
     Dataset.__init__(self,
                      filename,
                      dataset_type,
                      units_override=units_override)
     self.storage_filename = storage_filename
     # refinement factor between a grid and its subgrid
     self.refine_by = 2
Ejemplo n.º 33
0
    def __init__(self, filename, dataset_type='pluto_hdf5',
                 storage_filename = None, ini_filename = None):
        self._handle = HDF5FileHandler(filename)
        self.current_time = self._handle.attrs['time']
        self.ini_filename = ini_filename
        self.fullplotdir = os.path.abspath(filename)
        Dataset.__init__(self,filename,dataset_type)
        self.storage_filename = storage_filename
        self.cosmological_simulation = False

        # These are parameters that I very much wish to get rid of.
        self.parameters["HydroMethod"] = 'chombo' # always PPM DE
        self.parameters["DualEnergyFormalism"] = 0 
        self.parameters["EOSType"] = -1 # default
Ejemplo n.º 34
0
 def __init__(self, filename, dataset_type='ramses',
              fields = None, storage_filename = None,
              units_override=None, unit_system="cgs"):
     # Here we want to initiate a traceback, if the reader is not built.
     if isinstance(fields, string_types):
         fields = field_aliases[fields]
     '''
     fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
             If set to None, will try a default set of fields
     '''
     self.fluid_types += ("ramses",)
     self._fields_in_file = fields
     Dataset.__init__(self, filename, dataset_type, units_override=units_override,
                      unit_system=unit_system)
     self.storage_filename = storage_filename
Ejemplo n.º 35
0
    def __init__(self,
                 filename,
                 dataset_type='ramses',
                 fields=None,
                 storage_filename=None,
                 units_override=None,
                 unit_system="cgs",
                 extra_particle_fields=None,
                 cosmological=None,
                 bbox=None):
        # Here we want to initiate a traceback, if the reader is not built.
        if isinstance(fields, string_types):
            fields = field_aliases[fields]
        '''
        fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
                If set to None, will try a default set of fields
        extra_particle_fields: An array of extra particle variables in order of position in the particle_XXXXX.outYYYYY file.
        cosmological: If set to None, automatically detect cosmological simulation. If a boolean, force
                      its value.
        '''
        self._fields_in_file = fields
        # By default, extra fields have not triggered a warning
        self._warned_extra_fields = defaultdict(lambda: False)
        self._extra_particle_fields = extra_particle_fields
        self.force_cosmological = cosmological
        self._bbox = bbox
        Dataset.__init__(self,
                         filename,
                         dataset_type,
                         units_override=units_override,
                         unit_system=unit_system)

        # Add the particle types
        ptypes = []
        for PH in get_particle_handlers():
            if PH.any_exist(self):
                ptypes.append(PH.ptype)

        ptypes = tuple(ptypes)
        self.particle_types = self.particle_types_raw = ptypes

        # Add the fluid types
        for FH in get_field_handlers():
            FH.purge_detected_fields(self)
            if FH.any_exist(self):
                self.fluid_types += (FH.ftype, )

        self.storage_filename = storage_filename
Ejemplo n.º 36
0
def test_schema_validation():

    valid_schemas = [
        {
            "length_unit": 1.0
        },
        {
            "length_unit": [1.0]
        },
        {
            "length_unit": (1.0, )
        },
        {
            "length_unit": int(1.0)
        },
        {
            "length_unit": (1.0, "m")
        },
        {
            "length_unit": [1.0, "m"]
        },
        {
            "length_unit": YTQuantity(1.0, "m")
        },
    ]

    for schema in valid_schemas:
        uo = Dataset._sanitize_units_override(schema)
        for v in uo.values():
            q = mock_quan(v)  # check that no error (TypeError) is raised
            q.to("pc")  # check that q is a length
Ejemplo n.º 37
0
 def __init__(self, filename, dataset_type='athena',
              storage_filename=None, parameters=None):
     self.fluid_types += ("athena",)
     if parameters is None:
         parameters = {}
     self.specified_parameters = parameters
     Dataset.__init__(self, filename, dataset_type)
     self.filename = filename
     if storage_filename is None:
         storage_filename = '%s.yt' % filename.split('/')[-1]
     self.storage_filename = storage_filename
     self.backup_filename = self.filename[:-4] + "_backup.gdf"
     # Unfortunately we now have to mandate that the index gets 
     # instantiated so that we can make sure we have the correct left 
     # and right domain edges.
     self.index
    def __init__(self, filename, dataset_type='openPMD',
                 storage_filename=None,
                 units_override=None):
    # This defines which of the data sets are meshes (fields)
        self.fluid_types += ('openPMD',)
    # This defines which of the data sets are particles
	self.particle_types = ["electrons","ions","all"]
	self.particle_types = tuple(self.particle_types)
        self.particle_types_raw = self.particle_types

    # Opens a HDF5 file and stores its file handle in _handle
    # All _handle objects refers to the file
	self._handle = HDF5FileHandler(filename)
        Dataset.__init__(self, filename, dataset_type,
                         units_override=units_override)
        self.storage_filename = storage_filename
Ejemplo n.º 39
0
 def __init__(self,
              pyne_mesh,
              dataset_type='moab_hex8_pyne',
              storage_filename=None,
              units_override=None,
              unit_system="cgs"):
     self.fluid_types += ("pyne", )
     filename = "pyne_mesh_" + str(id(pyne_mesh))
     self.pyne_mesh = pyne_mesh
     Dataset.__init__(self,
                      str(filename),
                      dataset_type,
                      units_override=units_override,
                      unit_system=unit_system)
     self.storage_filename = storage_filename
     self.filename = filename
Ejemplo n.º 40
0
    def __init__(
        self,
        filename,
        dataset_type="gamer",
        storage_filename=None,
        particle_filename=None,
        units_override=None,
        unit_system="cgs",
    ):

        if self._handle is not None:
            return

        self.fluid_types += ("gamer", )
        self._handle = HDF5FileHandler(filename)
        self.particle_filename = particle_filename

        # to catch both the new and old data formats for the grid data
        try:
            self._group_grid = self._handle["GridData"]
        except KeyError:
            self._group_grid = self._handle["Data"]

        if "Particle" in self._handle:
            self._group_particle = self._handle["Particle"]

        if self.particle_filename is None:
            self._particle_handle = self._handle
        else:
            try:
                self._particle_handle = HDF5FileHandler(self.particle_filename)
            except Exception:
                raise IOError(self.particle_filename)

        # currently GAMER only supports refinement by a factor of 2
        self.refine_by = 2

        Dataset.__init__(
            self,
            filename,
            dataset_type,
            units_override=units_override,
            unit_system=unit_system,
        )
        self.storage_filename = storage_filename
Ejemplo n.º 41
0
 def __init__(
     self,
     filename,
     dataset_type="moab_hex8",
     storage_filename=None,
     units_override=None,
     unit_system="cgs",
 ):
     self.fluid_types += ("moab", )
     Dataset.__init__(
         self,
         filename,
         dataset_type,
         units_override=units_override,
         unit_system=unit_system,
     )
     self.storage_filename = storage_filename
     self._handle = HDF5FileHandler(filename)
    def __init__(self, filename, dataset_type='chombo_hdf5',
                 storage_filename = None, ini_filename = None,
                 units_override=None):
        self.fluid_types += ("chombo",)
        self._handle = HDF5FileHandler(filename)
        self.dataset_type = dataset_type

        self.geometry = "cartesian"
        self.ini_filename = ini_filename
        self.fullplotdir = os.path.abspath(filename)
        Dataset.__init__(self, filename, self.dataset_type,
                         units_override=units_override)
        self.storage_filename = storage_filename
        self.cosmological_simulation = False

        # These are parameters that I very much wish to get rid of.
        self.parameters["HydroMethod"] = 'chombo'
        self.parameters["DualEnergyFormalism"] = 0 
        self.parameters["EOSType"] = -1 # default
 def __init__(self, filename, dataset_type='artio',
              storage_filename=None, max_range = 1024,
              units_override=None):
     from sys import version
     if self._handle is not None:
         return
     self.max_range = max_range
     self.fluid_types += ("artio",)
     self._filename = filename
     self._fileset_prefix = filename[:-4]
     if version < '3':
         self._handle = artio_fileset(self._fileset_prefix)
     else:
         self._handle = artio_fileset(bytes(self._fileset_prefix,'utf-8'))
     self.artio_parameters = self._handle.parameters
     # Here we want to initiate a traceback, if the reader is not built.
     Dataset.__init__(self, filename, dataset_type,
                      units_override=units_override)
     self.storage_filename = storage_filename
 def __init__(self, filename, dataset_type='dm_art',
                       fields=None, storage_filename=None,
                       skip_particles=False, skip_stars=False,
              limit_level=None, spread_age=True,
              force_max_level=None, file_particle_header=None,
              file_particle_stars=None):
     self.over_refine_factor = 1
     self.n_ref = 64
     self.particle_types += ("all",)
     if fields is None:
         fields = particle_fields
         filename = os.path.abspath(filename)
     self._fields_in_file = fields
     self._file_particle = filename
     self._file_particle_header = file_particle_header
     self._find_files(filename)
     self.parameter_filename = filename
     self.skip_stars = skip_stars
     self.spread_age = spread_age
     self.domain_left_edge = np.zeros(3, dtype='float')
     self.domain_right_edge = np.zeros(3, dtype='float')+1.0
     Dataset.__init__(self, filename, dataset_type)
     self.storage_filename = storage_filename
 def __init__(self, filename, dataset_type=None,
              file_style = None,
              parameter_override = None,
              conversion_override = None,
              storage_filename = None,
              units_override=None):
     """
     This class is a stripped down class that simply reads and parses
     *filename* without looking at the index.  *dataset_type* gets passed
     to the index to pre-determine the style of data-output.  However,
     it is not strictly necessary.  Optionally you may specify a
     *parameter_override* dictionary that will override anything in the
     paarmeter file and a *conversion_override* dictionary that consists
     of {fieldname : conversion_to_cgs} that will override the #DataCGS.
     """
     self.fluid_types += ("enzo",)
     if filename.endswith(".hierarchy"): filename = filename[:-10]
     if parameter_override is None: parameter_override = {}
     self._parameter_override = parameter_override
     if conversion_override is None: conversion_override = {}
     self._conversion_override = conversion_override
     self.storage_filename = storage_filename
     Dataset.__init__(self, filename, dataset_type, file_style=file_style,
                      units_override=units_override)
    def __init__(self, filename,
                 dataset_type='fits',
                 auxiliary_files=[],
                 nprocs=None,
                 storage_filename=None,
                 nan_mask=None,
                 spectral_factor=1.0,
                 z_axis_decomp=False,
                 suppress_astropy_warnings=True,
                 parameters=None,
                 units_override=None):

        if parameters is None:
            parameters = {}
        parameters["nprocs"] = nprocs
        self.specified_parameters = parameters

        self.z_axis_decomp = z_axis_decomp
        self.spectral_factor = spectral_factor

        if suppress_astropy_warnings:
            warnings.filterwarnings('ignore', module="astropy", append=True)
        auxiliary_files = ensure_list(auxiliary_files)
        self.filenames = [filename] + auxiliary_files
        self.num_files = len(self.filenames)
        self.fluid_types += ("fits",)
        if nan_mask is None:
            self.nan_mask = {}
        elif isinstance(nan_mask, float):
            self.nan_mask = {"all":nan_mask}
        elif isinstance(nan_mask, dict):
            self.nan_mask = nan_mask
        self._handle = FITSFileHandler(self.filenames[0])
        if (isinstance(self.filenames[0], _astropy.pyfits.hdu.image._ImageBaseHDU) or
            isinstance(self.filenames[0], _astropy.pyfits.HDUList)):
            fn = "InMemoryFITSFile_%s" % uuid.uuid4().hex
        else:
            fn = self.filenames[0]
        self._handle._fits_files.append(self._handle)
        if self.num_files > 1:
            for fits_file in auxiliary_files:
                if isinstance(fits_file, _astropy.pyfits.hdu.image._ImageBaseHDU):
                    f = _astropy.pyfits.HDUList([fits_file])
                elif isinstance(fits_file, _astropy.pyfits.HDUList):
                    f = fits_file
                else:
                    if os.path.exists(fits_file):
                        fn = fits_file
                    else:
                        fn = os.path.join(ytcfg.get("yt","test_data_dir"),fits_file)
                    f = _astropy.pyfits.open(fn, memmap=True,
                                             do_not_scale_image_data=True,
                                             ignore_blank=True)
                self._handle._fits_files.append(f)

        if len(self._handle) > 1 and self._handle[1].name == "EVENTS":
            self.events_data = True
            self.first_image = 1
            self.primary_header = self._handle[self.first_image].header
            self.naxis = 2
            self.wcs = _astropy.pywcs.WCS(naxis=2)
            self.events_info = {}
            for k,v in self.primary_header.items():
                if k.startswith("TTYP"):
                    if v.lower() in ["x","y"]:
                        num = k.strip("TTYPE")
                        self.events_info[v.lower()] = (self.primary_header["TLMIN"+num],
                                                       self.primary_header["TLMAX"+num],
                                                       self.primary_header["TCTYP"+num],
                                                       self.primary_header["TCRVL"+num],
                                                       self.primary_header["TCDLT"+num],
                                                       self.primary_header["TCRPX"+num])
                    elif v.lower() in ["energy","time"]:
                        num = k.strip("TTYPE")
                        unit = self.primary_header["TUNIT"+num].lower()
                        if unit.endswith("ev"): unit = unit.replace("ev","eV")
                        self.events_info[v.lower()] = unit
            self.axis_names = [self.events_info[ax][2] for ax in ["x","y"]]
            self.reblock = 1
            if "reblock" in self.specified_parameters:
                self.reblock = self.specified_parameters["reblock"]
            self.wcs.wcs.cdelt = [self.events_info["x"][4]*self.reblock,
                                  self.events_info["y"][4]*self.reblock]
            self.wcs.wcs.crpix = [(self.events_info["x"][5]-0.5)/self.reblock+0.5,
                                  (self.events_info["y"][5]-0.5)/self.reblock+0.5]
            self.wcs.wcs.ctype = [self.events_info["x"][2],self.events_info["y"][2]]
            self.wcs.wcs.cunit = ["deg","deg"]
            self.wcs.wcs.crval = [self.events_info["x"][3],self.events_info["y"][3]]
            self.dims = [(self.events_info["x"][1]-self.events_info["x"][0])/self.reblock,
                         (self.events_info["y"][1]-self.events_info["y"][0])/self.reblock]
        else:
            self.events_data = False
            # Sometimes the primary hdu doesn't have an image
            if len(self._handle) > 1 and self._handle[0].header["naxis"] == 0:
                self.first_image = 1
            else:
                self.first_image = 0
            self.primary_header = self._handle[self.first_image].header
            self.naxis = self.primary_header["naxis"]
            self.axis_names = [self.primary_header.get("ctype%d" % (i+1),"LINEAR")
                               for i in range(self.naxis)]
            self.dims = [self.primary_header["naxis%d" % (i+1)]
                         for i in range(self.naxis)]
            wcs = _astropy.pywcs.WCS(header=self.primary_header)
            if self.naxis == 4:
                self.wcs = _astropy.pywcs.WCS(naxis=3)
                self.wcs.wcs.crpix = wcs.wcs.crpix[:3]
                self.wcs.wcs.cdelt = wcs.wcs.cdelt[:3]
                self.wcs.wcs.crval = wcs.wcs.crval[:3]
                self.wcs.wcs.cunit = [str(unit) for unit in wcs.wcs.cunit][:3]
                self.wcs.wcs.ctype = [type for type in wcs.wcs.ctype][:3]
            else:
                self.wcs = wcs

        self.refine_by = 2

        Dataset.__init__(self, fn, dataset_type, units_override=units_override)
        self.storage_filename = storage_filename