示例#1
0
    def _parse_parameter_file(self):

        hvals = self._get_hvals()

        self.dimensionality = 3
        self.refine_by = 2
        self.parameters["HydroMethod"] = "sph"
        self.unique_identifier = \
            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
        # Set standard values

        # We may have an overridden bounding box.
        if self.domain_left_edge is None:
            self.domain_left_edge = np.zeros(3, "float64")
            self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
        nz = 1 << self.over_refine_factor
        self.domain_dimensions = np.ones(3, "int32") * nz
        self.periodicity = (True, True, True)

        self.cosmological_simulation = 1

        self.current_redshift = hvals["Redshift"]
        self.omega_lambda = hvals["OmegaLambda"]
        self.omega_matter = hvals["Omega0"]
        self.hubble_constant = hvals["HubbleParam"]
        # According to the Gadget manual, OmegaLambda will be zero for
        # non-cosmological datasets.  However, it may be the case that
        # individuals are running cosmological simulations *without* Lambda, in
        # which case we may be doing something incorrect here.
        # It may be possible to deduce whether ComovingIntegration is on
        # somehow, but opinions on this vary.
        if self.omega_lambda == 0.0:
            only_on_root(mylog.info, "Omega Lambda is 0.0, so we are turning off Cosmology.")
            self.hubble_constant = 1.0  # So that scaling comes out correct
            self.cosmological_simulation = 0
            self.current_redshift = 0.0
            # This may not be correct.
            self.current_time = hvals["Time"]
        else:
            # Now we calculate our time based on the cosmology, because in
            # ComovingIntegration hvals["Time"] will in fact be the expansion
            # factor, not the actual integration time, so we re-calculate
            # global time from our Cosmology.
            cosmo = Cosmology(self.hubble_constant,
                              self.omega_matter, self.omega_lambda)
            self.current_time = cosmo.hubble_time(self.current_redshift)
            only_on_root(mylog.info, "Calculating time from %0.3e to be %0.3e seconds",
                         hvals["Time"], self.current_time)
        self.parameters = hvals

        prefix = os.path.abspath(
            os.path.join(os.path.dirname(self.parameter_filename),
                         os.path.basename(self.parameter_filename).split(".", 1)[0]))

        if hvals["NumFiles"] > 1:
            self.filename_template = "%s.%%(num)s%s" % (prefix, self._suffix)
        else:
            self.filename_template = self.parameter_filename

        self.file_count = hvals["NumFiles"]
示例#2
0
    def _find_outputs(self):
        """
        Search for directories matching the data dump keywords.
        If found, get dataset times py opening the ds.
        """

        # look for time outputs.
        potential_time_outputs = glob.glob(
            os.path.join(
                self.parameters["GlobalDir"], f"{self.parameters['DataDumpDir']}*"
            )
        )
        self.all_time_outputs = self._check_for_outputs(potential_time_outputs)
        self.all_time_outputs.sort(key=lambda obj: obj["time"])

        # look for redshift outputs.
        potential_redshift_outputs = glob.glob(
            os.path.join(
                self.parameters["GlobalDir"], f"{self.parameters['RedshiftDumpDir']}*"
            )
        )
        self.all_redshift_outputs = self._check_for_outputs(potential_redshift_outputs)
        self.all_redshift_outputs.sort(key=lambda obj: obj["time"])

        self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
        self.all_outputs.sort(key=lambda obj: obj["time"])
        only_on_root(mylog.info, "Located %d total outputs.", len(self.all_outputs))

        # manually set final time and redshift with last output
        if self.all_outputs:
            self.final_time = self.all_outputs[-1]["time"]
            if self.cosmological_simulation:
                self.final_redshift = self.all_outputs[-1]["redshift"]
    def _check_for_outputs(self, potential_outputs):
        r"""
        Check a list of files to see if they are valid datasets.
        """

        only_on_root(mylog.info, "Checking %d potential outputs.", 
                     len(potential_outputs))

        my_outputs = {}
        llevel = mylog.level
        # suppress logging as we load every dataset, unless set to debug
        if llevel > 10 and llevel < 40:
            mylog.setLevel(40)
        for my_storage, output in parallel_objects(potential_outputs, 
                                                   storage=my_outputs):
            if os.path.exists(output):
                try:
                    ds = load(output)
                    if ds is not None:
                        my_storage.result = {"filename": output,
                                             "time": ds.current_time.in_units("s")}
                        if ds.cosmological_simulation:
                            my_storage.result["redshift"] = ds.current_redshift
                except YTUnidentifiedDataType:
                    mylog.error("Failed to load %s", output)
        mylog.setLevel(llevel)
        my_outputs = [my_output for my_output in my_outputs.values() \
                      if my_output is not None]
        return my_outputs
    def _check_for_outputs(self, potential_outputs):
        r"""
        Check a list of files to see if they are valid datasets.
        """

        only_on_root(mylog.info, "Checking %d potential outputs.",
                     len(potential_outputs))

        my_outputs = {}
        for my_storage, output in parallel_objects(potential_outputs,
                                                   storage=my_outputs):
            try:
                ds = load(output)
            except (FileNotFoundError, YTUnidentifiedDataType):
                mylog.error("Failed to load %s", output)
                continue
            my_storage.result = {
                "filename": output,
                "time": ds.current_time.in_units("s"),
            }
            if ds.cosmological_simulation:
                my_storage.result["redshift"] = ds.current_redshift

        my_outputs = [
            my_output for my_output in my_outputs.values()
            if my_output is not None
        ]
        return my_outputs
示例#5
0
    def _set_code_unit_attributes(self):
        """
        Sets the units from the SWIFT internal unit system.

        Currently sets length, mass, time, and temperature.

        SWIFT uses comoving coordinates without the usual h-factors.
        """
        units = self._get_info_attributes("Units")

        if self.cosmological_simulation == 1:
            msg = "Assuming length units are in comoving centimetres"
            only_on_root(mylog.info, msg)
            self.length_unit = self.quan(
                float(units["Unit length in cgs (U_L)"]), "cmcm")
        else:
            msg = "Assuming length units are in physical centimetres"
            only_on_root(mylog.info, msg)
            self.length_unit = self.quan(
                float(units["Unit length in cgs (U_L)"]), "cm")

        self.mass_unit = self.quan(float(units["Unit mass in cgs (U_M)"]), "g")
        self.time_unit = self.quan(float(units["Unit time in cgs (U_t)"]), "s")
        self.temperature_unit = self.quan(
            float(units["Unit temperature in cgs (U_T)"]), "K")

        return
示例#6
0
    def _check_for_outputs(self, potential_outputs):
        r"""
        Check a list of files to see if they are valid datasets.
        """

        only_on_root(mylog.info, "Checking %d potential outputs.",
                     len(potential_outputs))

        my_outputs = {}
        for my_storage, output in parallel_objects(potential_outputs,
                                                   storage=my_outputs):
            if os.path.exists(output):
                try:
                    ds = load(output)
                    if ds is not None:
                        num_steps = ds.num_steps
                        my_storage.result = {
                            "filename": output,
                            "num_steps": num_steps
                        }
                except YTOutputNotIdentified:
                    mylog.error("Failed to load %s", output)
        my_outputs = [
            my_output for my_output in my_outputs.values()
            if my_output is not None
        ]
        return my_outputs
 def load_all_plugins(self, ftype="gas"):
     loaded = []
     for n in sorted(field_plugins):
         loaded += self.load_plugin(n, ftype)
         only_on_root(mylog.info, "Loaded %s (%s new fields)",
                      n, len(loaded))
     self.find_dependencies(loaded)
示例#8
0
    def _check_for_outputs(self, potential_outputs):
        r"""
        Check a list of files to see if they are valid datasets.
        """

        only_on_root(mylog.info, "Checking %d potential outputs.",
                     len(potential_outputs))

        my_outputs = {}
        for my_storage, output in parallel_objects(potential_outputs,
                                                   storage=my_outputs):
            if os.path.exists(output):
                try:
                    ds = load(output)
                    if ds is not None:
                        my_storage.result = {
                            "filename": output,
                            "time": ds.current_time.in_units("s")
                        }
                        if ds.cosmological_simulation:
                            my_storage.result["redshift"] = ds.current_redshift
                except YTOutputNotIdentified:
                    mylog.error("Failed to load %s", output)
        my_outputs = [my_output for my_output in my_outputs.values() \
                      if my_output is not None]
        return my_outputs
    def _initialize_particle_handler(self):
        self._setup_data_io()
        self._setup_filenames()

        index_ptype = self.index_ptype
        if index_ptype == "all":
            self.total_particles = sum(
                sum(d.total_particles.values()) for d in self.data_files)
        else:
            self.total_particles = sum(d.total_particles[index_ptype]
                                       for d in self.data_files)
        ds = self.dataset
        self.oct_handler = ParticleOctreeContainer(
            [1, 1, 1],
            ds.domain_left_edge,
            ds.domain_right_edge,
            over_refine=ds.over_refine_factor)
        self.oct_handler.n_ref = ds.n_ref
        only_on_root(
            mylog.info, "Allocating for %0.3e particles "
            "(index particle type '%s')", self.total_particles, index_ptype)
        # No more than 256^3 in the region finder.
        N = min(len(self.data_files), 256)
        self.regions = ParticleRegions(ds.domain_left_edge,
                                       ds.domain_right_edge, [N, N, N],
                                       len(self.data_files))
        self._initialize_indices()
        self.oct_handler.finalize()
        self.max_level = self.oct_handler.max_level
        self.dataset.max_level = self.max_level
        tot = sum(self.oct_handler.recursively_count().values())
        only_on_root(mylog.info, "Identified %0.3e octs", tot)
示例#10
0
    def _set_code_unit_attributes(self):
        # Set a sane default for cosmological simulations.
        if self._unit_base is None and self.cosmological_simulation == 1:
            only_on_root(mylog.info,
                         "Assuming length units are in Mpc/h (comoving)")
            self._unit_base = dict(length=(1.0, "Mpccm/h"))
        # The other same defaults we will use from the standard Gadget
        # defaults.
        unit_base = self._unit_base or {}

        if "length" in unit_base:
            length_unit = unit_base["length"]
        elif "UnitLength_in_cm" in unit_base:
            if self.cosmological_simulation == 0:
                length_unit = (unit_base["UnitLength_in_cm"], "cm")
            else:
                length_unit = (unit_base["UnitLength_in_cm"], "cmcm/h")
        else:
            raise RuntimeError
        length_unit = _fix_unit_ordering(length_unit)
        setdefaultattr(self, 'length_unit',
                       self.quan(length_unit[0], length_unit[1]))

        if "velocity" in unit_base:
            velocity_unit = unit_base["velocity"]
        elif "UnitVelocity_in_cm_per_s" in unit_base:
            velocity_unit = (unit_base["UnitVelocity_in_cm_per_s"], "cm/s")
        else:
            if self.cosmological_simulation == 0:
                velocity_unit = (1e5, "cm/s")
            else:
                velocity_unit = (1e5, "cmcm/s")
        velocity_unit = _fix_unit_ordering(velocity_unit)
        setdefaultattr(self, 'velocity_unit',
                       self.quan(velocity_unit[0], velocity_unit[1]))

        # We set hubble_constant = 1.0 for non-cosmology, so this is safe.
        # Default to 1e10 Msun/h if mass is not specified.
        if "mass" in unit_base:
            mass_unit = unit_base["mass"]
        elif "UnitMass_in_g" in unit_base:
            if self.cosmological_simulation == 0:
                mass_unit = (unit_base["UnitMass_in_g"], "g")
            else:
                mass_unit = (unit_base["UnitMass_in_g"], "g/h")
        else:
            # Sane default
            mass_unit = (1.0, "1e10*Msun/h")
        mass_unit = _fix_unit_ordering(mass_unit)
        setdefaultattr(self, 'mass_unit', self.quan(mass_unit[0],
                                                    mass_unit[1]))

        if "time" in unit_base:
            time_unit = unit_base["time"]
        elif "UnitTime_in_s" in unit_base:
            time_unit = (unit_base["UnitTime_in_s"], "s")
        else:
            time_unit = (1., "s")
        setdefaultattr(self, 'time_unit', self.quan(time_unit[0],
                                                    time_unit[1]))
    def __init__(self, parameter_filename, simulation_type,
                 near_redshift, far_redshift,
                 observer_redshift=0.0,
                 use_minimum_datasets=True, deltaz_min=0.0,
                 minimum_coherent_box_fraction=0.0,
                 time_data=True, redshift_data=True,
                 find_outputs=False, set_parameters=None,
                 output_dir="LC", output_prefix="LightCone"):

        self.near_redshift = near_redshift
        self.far_redshift = far_redshift
        self.observer_redshift = observer_redshift
        self.use_minimum_datasets = use_minimum_datasets
        self.deltaz_min = deltaz_min
        self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
        if set_parameters is None:
            self.set_parameters = {}
        else:
            self.set_parameters = set_parameters
        self.output_dir = output_dir
        self.output_prefix = output_prefix

        # Create output directory.
        if not os.path.exists(self.output_dir):
            only_on_root(os.mkdir, self.output_dir)

        # Calculate light cone solution.
        CosmologySplice.__init__(self, parameter_filename, simulation_type,
                                 find_outputs=find_outputs)
        self.light_cone_solution = \
          self.create_cosmology_splice(self.near_redshift, self.far_redshift,
                                       minimal=self.use_minimum_datasets,
                                       deltaz_min=self.deltaz_min,
                                       time_data=time_data,
                                       redshift_data=redshift_data)
示例#12
0
 def load_all_plugins(self, ftype="gas"):
     loaded = []
     for n in sorted(field_plugins):
         loaded += self.load_plugin(n, ftype)
         only_on_root(mylog.debug, "Loaded %s (%s new fields)", n,
                      len(loaded))
     self.find_dependencies(loaded)
示例#13
0
 def load_all_plugins(self, ftype: Optional[str] = "gas"):
     if ftype is None:
         return
     mylog.debug("Loading field plugins for field type: %s.", ftype)
     loaded = []
     for n in sorted(field_plugins):
         loaded += self.load_plugin(n, ftype)
         only_on_root(mylog.debug, "Loaded %s (%s new fields)", n,
                      len(loaded))
     self.find_dependencies(loaded)
示例#14
0
    def _find_outputs(self):
        """
        Search for directories matching the data dump keywords.
        If found, get dataset times py opening the ds.
        """
        potential_outputs = glob.glob(self._snapshot_format())
        self.all_outputs = self._check_for_outputs(potential_outputs)
        self.all_outputs.sort(key=lambda obj: obj["time"])
        only_on_root(mylog.info, "Located %d total outputs.", len(self.all_outputs))

        # manually set final time and redshift with last output
        if self.all_outputs:
            self.final_time = self.all_outputs[-1]["time"]
            if self.cosmological_simulation:
                self.final_redshift = self.all_outputs[-1]["redshift"]
示例#15
0
    def _check_for_outputs(self, potential_outputs):
        """
        Check a list of files to see if they are valid datasets.
        """

        only_on_root(
            mylog.info, "Checking %d potential outputs.", len(potential_outputs)
        )

        my_outputs = {}
        llevel = mylog.level
        # suppress logging as we load every dataset, unless set to debug
        if llevel > 10 and llevel < 40:
            mylog.setLevel(40)
        for my_storage, output in parallel_objects(
            potential_outputs, storage=my_outputs
        ):
            if self.parameters["DataDumpDir"] in output:
                dir_key = self.parameters["DataDumpDir"]
                output_key = self.parameters["DataDumpName"]
            else:
                dir_key = self.parameters["RedshiftDumpDir"]
                output_key = self.parameters["RedshiftDumpName"]
            index = output[output.find(dir_key) + len(dir_key) :]
            filename = os.path.join(
                self.parameters["GlobalDir"],
                f"{dir_key}{index}",
                f"{output_key}{index}",
            )
            try:
                ds = load(filename)
            except (FileNotFoundError, YTUnidentifiedDataType):
                mylog.error("Failed to load %s", filename)
                continue
            my_storage.result = {
                "filename": filename,
                "time": ds.current_time.in_units("s"),
            }
            if ds.cosmological_simulation:
                my_storage.result["redshift"] = ds.current_redshift
        mylog.setLevel(llevel)
        my_outputs = [
            my_output for my_output in my_outputs.values() if my_output is not None
        ]

        return my_outputs
示例#16
0
    def __init__(self,
                 parameter_filename,
                 simulation_type,
                 near_redshift,
                 far_redshift,
                 observer_redshift=0.0,
                 use_minimum_datasets=True,
                 deltaz_min=0.0,
                 minimum_coherent_box_fraction=0.0,
                 time_data=True,
                 redshift_data=True,
                 find_outputs=False,
                 set_parameters=None,
                 output_dir="LC",
                 output_prefix="LightCone"):

        self.near_redshift = near_redshift
        self.far_redshift = far_redshift
        self.observer_redshift = observer_redshift
        self.use_minimum_datasets = use_minimum_datasets
        self.deltaz_min = deltaz_min
        self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
        if set_parameters is None:
            self.set_parameters = {}
        else:
            self.set_parameters = set_parameters
        self.output_dir = output_dir
        self.output_prefix = output_prefix

        # Create output directory.
        if not os.path.exists(self.output_dir):
            only_on_root(os.mkdir, self.output_dir)

        # Calculate light cone solution.
        CosmologySplice.__init__(self,
                                 parameter_filename,
                                 simulation_type,
                                 find_outputs=find_outputs)
        self.light_cone_solution = \
          self.create_cosmology_splice(self.near_redshift, self.far_redshift,
                                       minimal=self.use_minimum_datasets,
                                       deltaz_min=self.deltaz_min,
                                       time_data=time_data,
                                       redshift_data=redshift_data)
示例#17
0
    def _check_for_outputs(self, potential_outputs):
        """
        Check a list of files to see if they are valid datasets.
        """

        only_on_root(mylog.info, "Checking %d potential outputs.",
                     len(potential_outputs))

        my_outputs = {}
        llevel = mylog.level
        # suppress logging as we load every dataset, unless set to debug
        if llevel > 10 and llevel < 40:
            mylog.setLevel(40)
        for my_storage, output in parallel_objects(potential_outputs,
                                                   storage=my_outputs):
            if self.parameters['DataDumpDir'] in output:
                dir_key = self.parameters['DataDumpDir']
                output_key = self.parameters['DataDumpName']
            else:
                dir_key = self.parameters['RedshiftDumpDir']
                output_key = self.parameters['RedshiftDumpName']
            index = output[output.find(dir_key) + len(dir_key):]
            filename = os.path.join(self.parameters['GlobalDir'],
                                    "%s%s" % (dir_key, index),
                                    "%s%s" % (output_key, index))
            if os.path.exists(filename):
                try:
                    ds = load(filename)
                    if ds is not None:
                        my_storage.result = {
                            'filename': filename,
                            'time': ds.current_time.in_units("s")
                        }
                        if ds.cosmological_simulation:
                            my_storage.result['redshift'] = ds.current_redshift
                except YTOutputNotIdentified:
                    mylog.error('Failed to load %s', filename)
        mylog.setLevel(llevel)
        my_outputs = [my_output for my_output in my_outputs.values() \
                      if my_output is not None]

        return my_outputs
    def __init__(self, filename=None):

        default_filename = False
        if filename is None:
            filename = _get_data_file()
            default_filename = True

        if not os.path.exists(filename):
            mylog.warning("File %s does not exist, will attempt to find it." % filename)
            filename = _get_data_file(data_file=filename)
        only_on_root(mylog.info, "Loading emissivity data from %s." % filename)
        in_file = h5py.File(filename, "r")
        if "info" in in_file.attrs:
            only_on_root(mylog.info, in_file.attrs["info"])
        if default_filename and \
          in_file.attrs["version"] < xray_data_version:
            raise ObsoleteDataException()
        else:
            only_on_root(mylog.info, "X-ray emissivity data version: %s." % \
                         in_file.attrs["version"])

        for field in ["emissivity_primordial", "emissivity_metals",
                      "log_nH", "log_T", "log_E"]:
            if field in in_file:
                setattr(self, field, in_file[field][:])
        in_file.close()

        E_diff = np.diff(self.log_E)
        self.E_bins = \
                  YTArray(np.power(10, np.concatenate([self.log_E[:-1] - 0.5 * E_diff,
                                                      [self.log_E[-1] - 0.5 * E_diff[-1],
                                                       self.log_E[-1] + 0.5 * E_diff[-1]]])),
                          "keV")
        self.dnu = (np.diff(self.E_bins)/hcgs).in_units("Hz")
示例#19
0
    def __init__(self,
                 table_type,
                 redshift=0.0,
                 data_dir=None,
                 use_metals=True):

        filename = _get_data_file(table_type, data_dir=data_dir)
        only_on_root(mylog.info, "Loading emissivity data from %s", filename)
        in_file = h5py.File(filename, mode="r")
        if "info" in in_file.attrs:
            only_on_root(mylog.info, parse_h5_attr(in_file, "info"))
        if parse_h5_attr(in_file, "version") != data_version[table_type]:
            raise ObsoleteDataException(table_type)
        else:
            only_on_root(
                mylog.info,
                "X-ray '%s' emissivity data version: %s." %
                (table_type, parse_h5_attr(in_file, "version")),
            )

        self.log_T = in_file["log_T"][:]
        self.emissivity_primordial = in_file["emissivity_primordial"][:]
        if "log_nH" in in_file:
            self.log_nH = in_file["log_nH"][:]
        if use_metals:
            self.emissivity_metals = in_file["emissivity_metals"][:]
        self.ebin = YTArray(in_file["E"], "keV")
        in_file.close()
        self.dE = np.diff(self.ebin)
        self.emid = 0.5 * (self.ebin[1:] + self.ebin[:-1]).to("erg")
        self.redshift = redshift
示例#20
0
    def _check_for_outputs(self, potential_outputs):
        r"""
        Check a list of files to see if they are valid datasets.
        """

        only_on_root(mylog.info, "Checking %d potential outputs.",
                     len(potential_outputs))

        my_outputs = {}
        for my_storage, output in parallel_objects(potential_outputs,
                                                   storage=my_outputs):
            try:
                ds = load(output)
            except (FileNotFoundError, YTUnidentifiedDataType):
                mylog.error("Failed to load %s", output)
                continue
            my_storage.result = {"filename": output, "num_steps": ds.num_steps}

        my_outputs = [
            my_output for my_output in my_outputs.values()
            if my_output is not None
        ]
        return my_outputs
示例#21
0
    def __init__(self, filename=None):

        default_filename = False
        if filename is None:
            filename = _get_data_file()
            default_filename = True

        if not os.path.exists(filename):
            mylog.warning("File %s does not exist, will attempt to find it." %
                          filename)
            filename = _get_data_file(data_file=filename)
        only_on_root(mylog.info, "Loading emissivity data from %s." % filename)
        in_file = h5py.File(filename, "r")
        if "info" in in_file.attrs:
            only_on_root(mylog.info, in_file.attrs["info"])
        if default_filename and \
          in_file.attrs["version"] < xray_data_version:
            raise ObsoleteDataException()
        else:
            only_on_root(mylog.info, "X-ray emissivity data version: %s." % \
                         in_file.attrs["version"])

        for field in [
                "emissivity_primordial", "emissivity_metals", "log_nH",
                "log_T", "log_E"
        ]:
            if field in in_file:
                setattr(self, field, in_file[field][:])
        in_file.close()

        E_diff = np.diff(self.log_E)
        self.E_bins = \
                  YTArray(np.power(10, np.concatenate([self.log_E[:-1] - 0.5 * E_diff,
                                                      [self.log_E[-1] - 0.5 * E_diff[-1],
                                                       self.log_E[-1] + 0.5 * E_diff[-1]]])),
                          "keV")
        self.dnu = (np.diff(self.E_bins) / hcgs).in_units("Hz")
示例#22
0
    def __init__(self, filename, dataset_type="gadget_binary",
                 additional_fields=(),
                 unit_base=None,
                 index_order=None,
                 index_filename=None,
                 kdtree_filename=None,
                 kernel_name=None,
                 bounding_box = None,
                 header_spec = "default",
                 field_spec = "default",
                 ptype_spec = "default",
                 long_ids = False,
                 units_override=None,
                 mean_molecular_weight=None,
                 header_offset = 0,
                 unit_system="cgs",
                 use_dark_factor = False,
                 w_0 = -1.0,
                 w_a = 0.0):
        if self._instantiated:
            return
        # Check if filename is a directory
        if os.path.isdir(filename):
            # Get the .0 snapshot file. We know there's only 1 and it's valid since we
            # came through _is_valid in load()
            for f in os.listdir(filename):
                fname = os.path.join(filename, f)
                if ('.0' in f) and ('.ewah' not in f) and os.path.isfile(fname):
                    filename = os.path.join(filename, f)
                    break
        self._header = GadgetBinaryHeader(filename, header_spec)
        header_size = self._header.size
        if header_size != [256]:
            only_on_root(
                mylog.warn,
                "Non-standard header size is detected! "
                "Gadget-2 standard header is 256 bytes, but yours is %s. "
                "Make sure a non-standard header is actually expected. "
                "Otherwise something is wrong, "
                "and you might want to check how the dataset is loaded. "
                "Futher information about header specification can be found in "
                "https://yt-project.org/docs/dev/examining/loading_data.html#header-specification.",
                header_size
            )
        self._field_spec = self._setup_binary_spec(
            field_spec, gadget_field_specs)
        self._ptype_spec = self._setup_binary_spec(
            ptype_spec, gadget_ptype_specs)
        self.storage_filename = None
        if long_ids:
            self._id_dtype = 'u8'
        else:
            self._id_dtype = 'u4'
        self.long_ids = long_ids
        self.header_offset = header_offset
        if unit_base is not None and "UnitLength_in_cm" in unit_base:
            # We assume this is comoving, because in the absence of comoving
            # integration the redshift will be zero.
            unit_base['cmcm'] = 1.0 / unit_base["UnitLength_in_cm"]
        self._unit_base = unit_base
        if bounding_box is not None:
            # This ensures that we know a bounding box has been applied
            self._domain_override = True
            bbox = np.array(bounding_box, dtype="float64")
            if bbox.shape == (2, 3):
                bbox = bbox.transpose()
            self.domain_left_edge = bbox[:, 0]
            self.domain_right_edge = bbox[:, 1]
        else:
            self.domain_left_edge = self.domain_right_edge = None
        if units_override is not None:
            raise RuntimeError("units_override is not supported for GadgetDataset. " +
                               "Use unit_base instead.")

        # Set dark energy parameters before cosmology object is created
        self.use_dark_factor = use_dark_factor
        self.w_0 = w_0
        self.w_a = w_a

        super(GadgetDataset, self).__init__(
            filename, dataset_type=dataset_type,
            unit_system=unit_system,
            index_order=index_order,
            index_filename=index_filename,
            kdtree_filename=kdtree_filename,
            kernel_name=kernel_name)
        if self.cosmological_simulation:
            self.time_unit.convert_to_units('s/h')
            self.length_unit.convert_to_units('kpccm/h')
            self.mass_unit.convert_to_units('g/h')
        else:
            self.time_unit.convert_to_units('s')
            self.length_unit.convert_to_units('kpc')
            self.mass_unit.convert_to_units('Msun')
        if mean_molecular_weight is None:
            self.mu = default_mu
        else:
            self.mu = mean_molecular_weight
示例#23
0
    def _set_code_unit_attributes(self):
        # If no units passed in by user, set a sane default (Gadget-2 users
        # guide).
        if self._unit_base is None:
            if self.cosmological_simulation == 1:
                only_on_root(
                    mylog.info, "Assuming length units are in kpc/h (comoving)")
                self._unit_base = dict(length=(1.0, "kpccm/h"))
            else:
                only_on_root(
                    mylog.info, "Assuming length units are in kpc (physical)")
                self._unit_base = dict(length=(1.0, "kpc"))

        # If units passed in by user, decide what to do about
        # co-moving and factors of h
        unit_base = self._unit_base or {}
        if "length" in unit_base:
            length_unit = unit_base["length"]
        elif "UnitLength_in_cm" in unit_base:
            if self.cosmological_simulation == 0:
                length_unit = (unit_base["UnitLength_in_cm"], "cm")
            else:
                length_unit = (unit_base["UnitLength_in_cm"], "cmcm/h")
        else:
            raise RuntimeError
        length_unit = _fix_unit_ordering(length_unit)
        self.length_unit = self.quan(length_unit[0], length_unit[1])

        unit_base = self._unit_base or {}

        if self.cosmological_simulation:
            # see http://www.mpa-garching.mpg.de/gadget/gadget-list/0113.html
            # for why we need to include a factor of square root of the
            # scale factor
            vel_units = "cm/s * sqrt(a)"
        else:
            vel_units = "cm/s"

        if "velocity" in unit_base:
            velocity_unit = unit_base["velocity"]
        elif "UnitVelocity_in_cm_per_s" in unit_base:
            velocity_unit = (unit_base["UnitVelocity_in_cm_per_s"], vel_units)
        else:
            velocity_unit = (1e5, vel_units)
        velocity_unit = _fix_unit_ordering(velocity_unit)
        self.velocity_unit = self.quan(velocity_unit[0], velocity_unit[1])

        # We set hubble_constant = 1.0 for non-cosmology, so this is safe.
        # Default to 1e10 Msun/h if mass is not specified.
        if "mass" in unit_base:
            mass_unit = unit_base["mass"]
        elif "UnitMass_in_g" in unit_base:
            if self.cosmological_simulation == 0:
                mass_unit = (unit_base["UnitMass_in_g"], "g")
            else:
                mass_unit = (unit_base["UnitMass_in_g"], "g/h")
        else:
            # Sane default
            mass_unit = (1e10, "Msun/h")
        mass_unit = _fix_unit_ordering(mass_unit)
        self.mass_unit = self.quan(mass_unit[0], mass_unit[1])
        if self.cosmological_simulation:
            # self.velocity_unit is the unit to rescale on-disk velocities, The
            # actual internal velocity unit is really in comoving units
            # since the time unit is derived from the internal velocity unit, we
            # infer the internal velocity unit here and name it vel_unit
            #
            # see http://www.mpa-garching.mpg.de/gadget/gadget-list/0113.html
            if 'velocity' in unit_base:
                vel_unit = unit_base['velocity']
            elif "UnitVelocity_in_cm_per_s" in unit_base:
                vel_unit = (unit_base['UnitVelocity_in_cm_per_s'], 'cmcm/s')
            else:
                vel_unit = (1, 'kmcm/s')
            vel_unit = self.quan(*vel_unit)
        else:
            vel_unit = self.velocity_unit
        self.time_unit = self.length_unit / vel_unit

        if "specific_energy" in unit_base:
            specific_energy_unit = unit_base["specific_energy"]
        elif "UnitEnergy_in_cgs" in unit_base and "UnitMass_in_g" in unit_base:
            specific_energy_unit = \
                unit_base["UnitEnergy_in_cgs"] / unit_base["UnitMass_in_g"]
            specific_energy_unit = (specific_energy_unit, "(cm/s)**2")
        else:
            # Sane default
            specific_energy_unit = (1, "(km/s) ** 2")
        specific_energy_unit = _fix_unit_ordering(specific_energy_unit)
        self.specific_energy_unit = self.quan(*specific_energy_unit)
示例#24
0
    def __init__(self,
                 filename,
                 dataset_type="gadget_binary",
                 additional_fields=(),
                 unit_base=None,
                 n_ref=64,
                 over_refine_factor=1,
                 kernel_name=None,
                 index_ptype="all",
                 bounding_box=None,
                 header_spec="default",
                 field_spec="default",
                 ptype_spec="default",
                 units_override=None,
                 unit_system="cgs",
                 use_dark_factor=False,
                 w_0=-1.0,
                 w_a=0.0):
        if self._instantiated:
            return
        self._header = GadgetBinaryHeader(filename, header_spec)
        header_size = self._header.size
        if header_size != [256]:
            only_on_root(
                mylog.warn, "Non-standard header size is detected! "
                "Gadget-2 standard header is 256 bytes, but yours is %s. "
                "Make sure a non-standard header is actually expected. "
                "Otherwise something is wrong, "
                "and you might want to check how the dataset is loaded. "
                "Futher information about header specification can be found in "
                "https://yt-project.org/docs/dev/examining/loading_data.html#header-specification.",
                header_size)
        self._field_spec = self._setup_binary_spec(field_spec,
                                                   gadget_field_specs)
        self._ptype_spec = self._setup_binary_spec(ptype_spec,
                                                   gadget_ptype_specs)
        self.index_ptype = index_ptype
        self.storage_filename = None
        if unit_base is not None and "UnitLength_in_cm" in unit_base:
            # We assume this is comoving, because in the absence of comoving
            # integration the redshift will be zero.
            unit_base['cmcm'] = 1.0 / unit_base["UnitLength_in_cm"]
        self._unit_base = unit_base
        if bounding_box is not None:
            bbox = np.array(bounding_box, dtype="float64")
            if bbox.shape == (2, 3):
                bbox = bbox.transpose()
            self.domain_left_edge = bbox[:, 0]
            self.domain_right_edge = bbox[:, 1]
        else:
            self.domain_left_edge = self.domain_right_edge = None
        if units_override is not None:
            raise RuntimeError(
                "units_override is not supported for GadgetDataset. " +
                "Use unit_base instead.")

        # Set dark energy parameters before cosmology object is created
        self.use_dark_factor = use_dark_factor
        self.w_0 = w_0
        self.w_a = w_a

        super(GadgetDataset,
              self).__init__(filename,
                             dataset_type=dataset_type,
                             unit_system=unit_system,
                             n_ref=n_ref,
                             over_refine_factor=over_refine_factor,
                             kernel_name=kernel_name)
        if self.cosmological_simulation:
            self.time_unit.convert_to_units('s/h')
            self.length_unit.convert_to_units('kpccm/h')
            self.mass_unit.convert_to_units('g/h')
        else:
            self.time_unit.convert_to_units('s')
            self.length_unit.convert_to_units('kpc')
            self.mass_unit.convert_to_units('Msun')
    def project_light_cone(self, field_of_view, image_resolution, field,
                           weight_field=None, photon_field=False,
                           save_stack=True, save_final_image=True,
                           save_slice_images=False,
                           cmap_name="algae",
                           njobs=1, dynamic=False):
        r"""Create projections for light cone, then add them together.

        Parameters
        ----------
        field_of_view : YTQuantity or tuple of (float, str)
            The field of view of the image and the units.
        image_resolution : YTQuantity or tuple of (float, str)
            The size of each image pixel and the units.
        field : string
            The projected field.
        weight_field : string
            the weight field of the projection.  This has the same meaning as
            in standard projections.
            Default: None.
        photon_field : bool
            if True, the projection data for each slice is decremented by 4 Pi
            R^2`, where R is the luminosity distance between the observer and
            the slice redshift.
            Default: False.
        save_stack : bool
            if True, the light cone data including each individual
            slice is written to an hdf5 file.
            Default: True.
        save_final_image : bool
            if True, save an image of the final light cone projection.
            Default: True.
        save_slice_images : bool
            save images for each individual projection slice.
            Default: False.
        cmap_name : string
            color map for images.
            Default: "algae".
        njobs : int
            The number of parallel jobs over which the light cone projection
            will be split.  Choose -1 for one processor per individual
            projection and 1 to have all processors work together on each
            projection.
            Default: 1.
        dynamic : bool
            If True, use dynamic load balancing to create the projections.
            Default: False.

        """

        if isinstance(field_of_view, tuple) and len(field_of_view) == 2:
            field_of_view = self.simulation.quan(field_of_view[0],
                                                 field_of_view[1])
        elif not isinstance(field_of_view, YTArray):
          raise RuntimeError("field_of_view argument must be either a YTQauntity " +
                             "or a tuple of type (float, str).")
        if isinstance(image_resolution, tuple) and len(image_resolution) == 2:
            image_resolution = self.simulation.quan(image_resolution[0],
                                                    image_resolution[1])
        elif not isinstance(image_resolution, YTArray):
          raise RuntimeError("image_resolution argument must be either a YTQauntity " +
                             "or a tuple of type (float, str).")
        
        # Calculate number of pixels on a side.
        pixels = (field_of_view / image_resolution).in_units("")

        # Clear projection stack.
        projection_stack = []
        projection_weight_stack = []
        if "object" in self.light_cone_solution[-1]:
            del self.light_cone_solution[-1]["object"]

        # for q, output in enumerate(self.light_cone_solution):
        all_storage = {}
        for my_storage, output in parallel_objects(self.light_cone_solution,
                                                   storage=all_storage,
                                                   dynamic=dynamic):
            output["object"] = load(output["filename"])
            output["object"].parameters.update(self.set_parameters)

            # Calculate fraction of box required for width corresponding to
            # requested image size.
            proper_box_size = self.simulation.box_size / \
              (1.0 + output["redshift"])
            output["box_width_fraction"] = (output["box_width_per_angle"] *
                                            field_of_view).in_units("")
            
            frb = _light_cone_projection(output, field, pixels,
                                         weight_field=weight_field)

            if photon_field:
                # Decrement the flux by the luminosity distance.
                # Assume field in frb is in erg/s/cm^2/Hz
                dL = self.cosmology.luminosity_distance(self.observer_redshift,
                                                        output["redshift"])
                proper_box_size = self.simulation.box_size / \
                  (1.0 + output["redshift"])
                pixel_area = (proper_box_size.in_cgs() / pixels)**2 #in proper cm^2
                factor = pixel_area / (4.0 * np.pi * dL.in_cgs()**2)
                mylog.info("Distance to slice = %s" % dL)
                frb[field] *= factor #in erg/s/cm^2/Hz on observer"s image plane.

            if weight_field is None:
                my_storage.result = {"field": frb[field]}
            else:
                my_storage.result = {"field": (frb[field] *
                                               frb["weight_field"]),
                                     "weight_field": frb["weight_field"]}

            del output["object"]

        # Combine results from each slice.
        all_slices = list(all_storage.keys())
        all_slices.sort()
        for my_slice in all_slices:
            if save_slice_images:
                name = os.path.join(self.output_dir,
                                    "%s_%04d_%04d" %
                                    (self.output_prefix,
                                     my_slice, len(self.light_cone_solution)))
                if weight_field is None:
                    my_image = all_storage[my_slice]["field"]
                else:
                    my_image = all_storage[my_slice]["field"] / \
                      all_storage[my_slice]["weight_field"]
                only_on_root(write_image, np.log10(my_image),
                             "%s_%s.png" % (name, field), cmap_name=cmap_name)

            projection_stack.append(all_storage[my_slice]["field"])
            if weight_field is not None:
                projection_weight_stack.append(all_storage[my_slice]["field"])

        projection_stack = self.simulation.arr(projection_stack)
        projection_weight_stack = self.simulation.arr(projection_weight_stack)
                
        # Add up slices to make light cone projection.
        if (weight_field is None):
            light_cone_projection = projection_stack.sum(axis=0)
        else:
            light_cone_projection = \
              projection_stack.sum(axis=0) / \
              self.simulation.arr(projection_weight_stack).sum(axis=0)

        filename = os.path.join(self.output_dir, self.output_prefix)

        # Write image.
        if save_final_image:
            only_on_root(write_image, np.log10(light_cone_projection),
                         "%s_%s.png" % (filename, field), cmap_name=cmap_name)

        # Write stack to hdf5 file.
        if save_stack:
            self._save_light_cone_stack(field, weight_field,
                projection_stack, projection_weight_stack,
                filename=filename,
                attrs={"field_of_view": str(field_of_view),
                       "image_resolution": str(image_resolution)})
示例#26
0
    def project_light_cone(self,
                           field_of_view,
                           image_resolution,
                           field,
                           weight_field=None,
                           photon_field=False,
                           save_stack=True,
                           save_final_image=True,
                           save_slice_images=False,
                           cmap_name=None,
                           njobs=1,
                           dynamic=False):
        r"""Create projections for light cone, then add them together.

        Parameters
        ----------
        field_of_view : YTQuantity or tuple of (float, str)
            The field of view of the image and the units.
        image_resolution : YTQuantity or tuple of (float, str)
            The size of each image pixel and the units.
        field : string
            The projected field.
        weight_field : string
            the weight field of the projection.  This has the same meaning as
            in standard projections.
            Default: None.
        photon_field : bool
            if True, the projection data for each slice is decremented by 4 Pi
            R^2`, where R is the luminosity distance between the observer and
            the slice redshift.
            Default: False.
        save_stack : bool
            if True, the light cone data including each individual
            slice is written to an hdf5 file.
            Default: True.
        save_final_image : bool
            if True, save an image of the final light cone projection.
            Default: True.
        save_slice_images : bool
            save images for each individual projection slice.
            Default: False.
        cmap_name : string
            color map for images.
            Default: your default colormap.
        njobs : int
            The number of parallel jobs over which the light cone projection
            will be split.  Choose -1 for one processor per individual
            projection and 1 to have all processors work together on each
            projection.
            Default: 1.
        dynamic : bool
            If True, use dynamic load balancing to create the projections.
            Default: False.

        """

        if cmap_name is None:
            cmap_name = ytcfg.get("yt", "default_colormap")

        if isinstance(field_of_view, tuple) and len(field_of_view) == 2:
            field_of_view = self.simulation.quan(field_of_view[0],
                                                 field_of_view[1])
        elif not isinstance(field_of_view, YTArray):
            raise RuntimeError(
                "field_of_view argument must be either a YTQuantity " +
                "or a tuple of type (float, str).")
        if isinstance(image_resolution, tuple) and len(image_resolution) == 2:
            image_resolution = self.simulation.quan(image_resolution[0],
                                                    image_resolution[1])
        elif not isinstance(image_resolution, YTArray):
            raise RuntimeError(
                "image_resolution argument must be either a YTQuantity " +
                "or a tuple of type (float, str).")

        # Calculate number of pixels on a side.
        pixels = int((field_of_view / image_resolution).in_units(""))

        # Clear projection stack.
        projection_stack = []
        projection_weight_stack = []
        if "object" in self.light_cone_solution[-1]:
            del self.light_cone_solution[-1]["object"]

        # for q, output in enumerate(self.light_cone_solution):
        all_storage = {}
        for my_storage, output in parallel_objects(self.light_cone_solution,
                                                   storage=all_storage,
                                                   dynamic=dynamic):
            output["object"] = load(output["filename"])
            output["object"].parameters.update(self.set_parameters)

            # Calculate fraction of box required for width corresponding to
            # requested image size.
            proper_box_size = self.simulation.box_size / \
              (1.0 + output["redshift"])
            output["box_width_fraction"] = (output["box_width_per_angle"] *
                                            field_of_view).in_units("")

            frb = _light_cone_projection(output,
                                         field,
                                         pixels,
                                         weight_field=weight_field)

            if photon_field:
                # Decrement the flux by the luminosity distance.
                # Assume field in frb is in erg/s/cm^2/Hz
                dL = self.cosmology.luminosity_distance(
                    self.observer_redshift, output["redshift"])
                proper_box_size = self.simulation.box_size / \
                  (1.0 + output["redshift"])
                pixel_area = (proper_box_size.in_cgs() /
                              pixels)**2  #in proper cm^2
                factor = pixel_area / (4.0 * np.pi * dL.in_cgs()**2)
                mylog.info("Distance to slice = %s" % dL)
                frb[field] *= factor  #in erg/s/cm^2/Hz on observer"s image plane.

            if weight_field is None:
                my_storage.result = {"field": frb[field]}
            else:
                my_storage.result = {
                    "field": (frb[field] * frb["weight_field"]),
                    "weight_field": frb["weight_field"]
                }

            del output["object"]

        # Combine results from each slice.
        all_slices = list(all_storage.keys())
        all_slices.sort()
        for my_slice in all_slices:
            if save_slice_images:
                name = os.path.join(
                    self.output_dir,
                    "%s_%04d_%04d" % (self.output_prefix, my_slice,
                                      len(self.light_cone_solution)))
                if weight_field is None:
                    my_image = all_storage[my_slice]["field"]
                else:
                    my_image = all_storage[my_slice]["field"] / \
                      all_storage[my_slice]["weight_field"]
                only_on_root(write_image,
                             np.log10(my_image),
                             "%s_%s.png" % (name, field),
                             cmap_name=cmap_name)

            projection_stack.append(all_storage[my_slice]["field"])
            if weight_field is not None:
                projection_weight_stack.append(all_storage[my_slice]["field"])

        projection_stack = self.simulation.arr(projection_stack)
        projection_weight_stack = self.simulation.arr(projection_weight_stack)

        # Add up slices to make light cone projection.
        if (weight_field is None):
            light_cone_projection = projection_stack.sum(axis=0)
        else:
            light_cone_projection = \
              projection_stack.sum(axis=0) / \
              self.simulation.arr(projection_weight_stack).sum(axis=0)

        filename = os.path.join(self.output_dir, self.output_prefix)

        # Write image.
        if save_final_image:
            only_on_root(write_image,
                         np.log10(light_cone_projection),
                         "%s_%s.png" % (filename, field),
                         cmap_name=cmap_name)

        # Write stack to hdf5 file.
        if save_stack:
            self._save_light_cone_stack(field,
                                        weight_field,
                                        projection_stack,
                                        projection_weight_stack,
                                        filename=filename,
                                        attrs={
                                            "field_of_view":
                                            str(field_of_view),
                                            "image_resolution":
                                            str(image_resolution)
                                        })
示例#27
0
    def _initialize_index(self):
        ds = self.dataset
        only_on_root(
            mylog.info,
            "Allocating for %0.3e particles",
            self.total_particles,
            global_rootonly=True,
        )

        # if we have not yet set domain_left_edge and domain_right_edge then do
        # an I/O pass over the particle coordinates to determine a bounding box
        if self.ds.domain_left_edge is None:
            min_ppos = np.empty(3, dtype="float64")
            min_ppos[:] = np.nan
            max_ppos = np.empty(3, dtype="float64")
            max_ppos[:] = np.nan
            only_on_root(
                mylog.info,
                "Bounding box cannot be inferred from metadata, reading "
                "particle positions to infer bounding box",
            )
            for df in self.data_files:
                for _, ppos in self.io._yield_coordinates(df):
                    min_ppos = np.nanmin(np.vstack([min_ppos, ppos]), axis=0)
                    max_ppos = np.nanmax(np.vstack([max_ppos, ppos]), axis=0)
            only_on_root(
                mylog.info,
                "Load this dataset with bounding_box=[%s, %s] to avoid I/O "
                "overhead from inferring bounding_box." % (min_ppos, max_ppos),
            )
            ds.domain_left_edge = ds.arr(1.05 * min_ppos, "code_length")
            ds.domain_right_edge = ds.arr(1.05 * max_ppos, "code_length")
            ds.domain_width = ds.domain_right_edge - ds.domain_left_edge

        # use a trivial morton index for datasets containing a single chunk
        if len(self.data_files) == 1:
            order1 = 1
            order2 = 1
        else:
            order1 = ds.index_order[0]
            order2 = ds.index_order[1]

        if order1 == 1 and order2 == 1:
            dont_cache = True
        else:
            dont_cache = False

        # If we have applied a bounding box then we can't cache the
        # ParticleBitmap because it is doman dependent
        if getattr(ds, "_domain_override", False):
            dont_cache = True

        if not hasattr(self.ds, "_file_hash"):
            self.ds._file_hash = self._generate_hash()

        self.regions = ParticleBitmap(
            ds.domain_left_edge,
            ds.domain_right_edge,
            ds.periodicity,
            self.ds._file_hash,
            len(self.data_files),
            index_order1=order1,
            index_order2=order2,
        )

        # Load Morton index from file if provided
        if getattr(ds, "index_filename", None) is None:
            fname = ds.parameter_filename + ".index{}_{}.ewah".format(
                self.regions.index_order1, self.regions.index_order2
            )
        else:
            fname = ds.index_filename

        dont_load = dont_cache and not hasattr(ds, "index_filename")
        try:
            if dont_load:
                raise OSError
            rflag = self.regions.load_bitmasks(fname)
            rflag = self.regions.check_bitmasks()
            self._initialize_frontend_specific()
            if rflag == 0:
                raise OSError
        except (OSError, struct.error):
            self.regions.reset_bitmasks()
            self._initialize_coarse_index()
            self._initialize_refined_index()
            wdir = os.path.dirname(fname)
            if not dont_cache and os.access(wdir, os.W_OK):
                # Sometimes os mis-reports whether a directory is writable,
                # So pass if writing the bitmask file fails.
                try:
                    self.regions.save_bitmasks(fname)
                except OSError:
                    pass
            rflag = self.regions.check_bitmasks()
示例#28
0
    def _parse_parameter_file(self):

        hvals = self._get_hvals()

        self.dimensionality = 3
        self.refine_by = 2
        self.parameters["HydroMethod"] = "sph"
        # Set standard values

        # We may have an overridden bounding box.
        if self.domain_left_edge is None and hvals["BoxSize"] != 0:
            self.domain_left_edge = np.zeros(3, "float64")
            self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]

        self.domain_dimensions = np.ones(3, "int32")
        self.periodicity = (True, True, True)

        self.cosmological_simulation = 1

        try:
            self.current_redshift = hvals["Redshift"]
        except KeyError:
            # Probably not a cosmological dataset, we should just set
            # z = 0 and let the user know
            self.current_redshift = 0.0
            only_on_root(mylog.info, "Redshift is not set in Header. Assuming z=0.")

        try:
            self.omega_lambda = hvals["OmegaLambda"]
            self.omega_matter = hvals["Omega0"]
            self.hubble_constant = hvals["HubbleParam"]
        except KeyError:
            # If these are not set it is definitely not a cosmological dataset.
            self.omega_lambda = 0.0
            self.omega_matter = 1.0  # Just in case somebody asks for it.
            # Hubble is set below for Omega Lambda = 0.

        # According to the Gadget manual, OmegaLambda will be zero for
        # non-cosmological datasets.  However, it may be the case that
        # individuals are running cosmological simulations *without* Lambda, in
        # which case we may be doing something incorrect here.
        # It may be possible to deduce whether ComovingIntegration is on
        # somehow, but opinions on this vary.
        if self.omega_lambda == 0.0:
            only_on_root(
                mylog.info, "Omega Lambda is 0.0, so we are turning off Cosmology."
            )
            self.hubble_constant = 1.0  # So that scaling comes out correct
            self.cosmological_simulation = 0
            self.current_redshift = 0.0
            # This may not be correct.
            self.current_time = hvals["Time"]
        else:
            # Now we calculate our time based on the cosmology, because in
            # ComovingIntegration hvals["Time"] will in fact be the expansion
            # factor, not the actual integration time, so we re-calculate
            # global time from our Cosmology.
            cosmo = Cosmology(
                hubble_constant=self.hubble_constant,
                omega_matter=self.omega_matter,
                omega_lambda=self.omega_lambda,
            )
            self.current_time = cosmo.lookback_time(self.current_redshift, 1e6)
            only_on_root(
                mylog.info,
                "Calculating time from %0.3e to be %0.3e seconds",
                hvals["Time"],
                self.current_time,
            )
        self.parameters = hvals

        prefix = os.path.abspath(
            os.path.join(
                os.path.dirname(self.parameter_filename),
                os.path.basename(self.parameter_filename).split(".", 1)[0],
            )
        )

        if hvals["NumFiles"] > 1:
            self.filename_template = f"{prefix}.%(num)s{self._suffix}"
        else:
            self.filename_template = self.parameter_filename

        self.file_count = hvals["NumFiles"]