Пример #1
0
def _ramses_particle_file_handler(fname, foffsets, data_types, subset, fields,
                                  count):
    '''General file handler, called by _read_particle_subset

    Parameters
    ----------
    fname : string
        filename to read from
    foffsets: dict
        Offsets in file of the fields
    data_types: dict
        Data type of the fields
    subset: ``RAMSESDomainSubset``
        A RAMSES domain subset object
    fields: list of tuple
        The fields to read
    count: integer
        The number of elements to count
    '''
    tr = {}
    ds = subset.domain.ds
    with FortranFile(fname) as fd:
        # We do *all* conversion into boxlen here.
        # This means that no other conversions need to be applied to convert
        # positions into the same domain as the octs themselves.
        for field in sorted(fields, key=lambda a: foffsets[a]):
            if count == 0:
                tr[field] = np.empty(0, dtype=data_types[field])
                continue
            fd.seek(foffsets[field])
            dt = data_types[field]
            tr[field] = fd.read_vector(dt)
            if field[1].startswith("particle_position"):
                np.divide(tr[field], ds["boxlen"], tr[field])
            if ds.cosmological_simulation and field[1] == "particle_birth_time":
                conformal_age = tr[field]
                tr[field] = convert_ramses_ages(ds, conformal_age)
                # arbitrarily set particles with zero conformal_age to zero
                # particle_age. This corresponds to DM particles.
                tr[field][conformal_age == 0] = 0
    return tr
Пример #2
0
    def _get_particle_positions(self):
        """Read the particles and return them in code_units"""
        data = getattr(self, '_particle_positions', None)
        if data is not None:
            return data

        with FortranFile(self.ds.parameter_filename) as fpu:
            params = fpu.read_attrs(HEADER_ATTRIBUTES)

            todo = _todo_from_attributes(
                ('particle_identifier', 'raw_position_x', 'raw_position_y',
                 'raw_position_z'))

            nhalos = params['nhalos'] + params['nsubs']
            data = np.zeros((nhalos, 3))
            offset_map = np.zeros((nhalos, 2), dtype=int)
            for ihalo in range(nhalos):
                ipos = fpu.tell()
                for it in todo:
                    if isinstance(it, int):
                        fpu.skip(it)
                    elif it[0][0] != 'particle_identifier':
                        # Small optimisation here: we can read as vector
                        # dt = fpu.read_attrs(it)
                        # data[ihalo, 0] = dt['particle_position_x']
                        # data[ihalo, 1] = dt['particle_position_y']
                        # data[ihalo, 2] = dt['particle_position_z']
                        data[ihalo, :] = fpu.read_vector(it[0][-1])
                    else:
                        halo_id = fpu.read_int()
                        offset_map[ihalo, 0] = halo_id
                        offset_map[ihalo, 1] = ipos
        data = self.ds.arr(data, "code_length") + self.ds.domain_width / 2

        # Make sure halos are loaded in increasing halo_id order
        assert np.all(np.diff(offset_map[:, 0]) > 0)

        # Cache particle positions as one do not expect a (very) large number of halos anyway
        self._particle_positions = data
        self._offsets = offset_map
        return data
Пример #3
0
    def _read_fluid_selection(self, chunks, selector, fields, size):
        tr = defaultdict(list)

        # Set of field types
        ftypes = set(f[0] for f in fields)
        for chunk in chunks:
            # Gather fields by type to minimize i/o operations
            for ft in ftypes:
                # Get all the fields of the same type
                field_subs = list(filter(lambda f: f[0] == ft, fields))

                # Loop over subsets
                for subset in chunk.objs:
                    fname = None
                    for fh in subset.domain.field_handlers:
                        if fh.ftype == ft:
                            file_handler = fh
                            fname = fh.fname
                            break

                    if fname is None:
                        raise YTFieldTypeNotFound(ft)

                    # Now we read the entire thing
                    with FortranFile(fname) as fd:
                        # This contains the boundary information, so we skim through
                        # and pick off the right vectors
                        rv = subset.fill(fd, field_subs, selector,
                                         file_handler)
                    for ft, f in field_subs:
                        d = rv.pop(f)
                        mylog.debug(
                            "Filling %s with %s (%0.3e %0.3e) (%s zones)", f,
                            d.size, d.min(), d.max(), d.size)
                        tr[(ft, f)].append(d)
        d = {}
        for field in fields:
            d[field] = np.concatenate(tr.pop(field))

        return d
Пример #4
0
    def _parse_parameter_file(self):
        with FortranFile(self.parameter_filename) as fpu:
            params = fpu.read_attrs(HEADER_ATTRIBUTES)
        self.dimensionality = 3
        self.unique_identifier = int(
            os.stat(self.parameter_filename)[stat.ST_CTIME])
        # Domain related things
        self.filename_template = self.parameter_filename
        self.file_count = 1
        nz = 1 << self.over_refine_factor
        self.domain_dimensions = np.ones(3, "int32") * nz

        # Set things up
        self.cosmological_simulation = 1
        self.current_redshift = (1.0 / params["aexp"]) - 1.0
        self.omega_matter = params["omega_t"]
        self.current_time = self.quan(params["age"], "Gyr")
        self.omega_lambda = 0.724  # hard coded if not inferred from parent ds
        self.hubble_constant = 0.7  # hard coded if not inferred from parent ds
        self.periodicity = (True, True, True)
        self.particle_types = "halos"
        self.particle_types_raw = "halos"

        # Inherit stuff from parent ds -- if they exist
        for k in ("omega_lambda", "hubble_constant", "omega_matter",
                  "omega_radiation"):
            v = getattr(self.parent_ds, k, None)
            if v is not None:
                setattr(self, k, v)

        self.domain_left_edge = np.array([0.0, 0.0, 0.0])
        self.domain_right_edge = (
            self.parent_ds.domain_right_edge.to("Mpc").value *
            self._code_length_to_Mpc)

        self.parameters.update(params)
Пример #5
0
    def read_header(self):
        if not self.exists:
            self.field_offsets = {}
            self.field_types = {}
            self.local_particle_count = 0
            return
        fd = FortranFile(self.fname)
        fd.seek(0, os.SEEK_END)
        flen = fd.tell()
        fd.seek(0)
        hvals = {}
        # Read the header of the file
        attrs = self.attrs

        hvals.update(fd.read_attrs(attrs))
        self._header = hvals

        # This is somehow a trick here: we only want one domain to
        # be read, as ramses writes all the sinks in all the
        # domains. Here, we set the local_particle_count to 0 except
        # for the first domain to be red.
        if getattr(self.ds, '_sink_file_flag', False):
            self.local_particle_count = 0
        else:
            self.ds._sink_file_flag = True
            self.local_particle_count = hvals['nsink']

        # Read the fields + add the sink properties
        if self.has_part_descriptor:
            fields = (_read_part_file_descriptor(self.file_descriptor))
        else:
            fields = list(self.known_fields)

        for i in range(self.ds.dimensionality * 2 + 1):
            for j in range(self.ds.max_level, self.ds.min_level):
                fields.append(("particle_prop_%s_%s" % (i, j), "d"))

        field_offsets = {}
        _pfields = {}

        # Fill the fields, offsets and types
        self.fields = []
        for field, vtype in fields:
            self.fields.append(field)
            if fd.tell() >= flen: break
            field_offsets[self.ptype, field] = fd.tell()
            _pfields[self.ptype, field] = vtype
            fd.skip(1)
        self.field_offsets = field_offsets
        self.field_types = _pfields
        fd.close()
Пример #6
0
    def read_header(self):
        if not self.exists:
            self.field_offsets = {}
            self.field_types = {}
            self.local_particle_count = 0
            return

        fd = FortranFile(self.fname)
        fd.seek(0, os.SEEK_END)
        flen = fd.tell()
        fd.seek(0)
        hvals = {}
        attrs = self.attrs
        hvals.update(fd.read_attrs(attrs))
        self.header = hvals
        self.local_particle_count = hvals['npart']
        extra_particle_fields = self.ds._extra_particle_fields

        if self.has_part_descriptor:
            particle_fields = (_read_part_file_descriptor(
                self.file_descriptor))
        else:
            particle_fields = list(self.known_fields)

            if extra_particle_fields is not None:
                particle_fields += extra_particle_fields

        if hvals["nstar_tot"] > 0 and extra_particle_fields is not None:
            particle_fields += [("particle_birth_time", "d"),
                                ("particle_metallicity", "d")]

        field_offsets = {}
        _pfields = {}

        ptype = self.ptype

        # Read offsets
        for field, vtype in particle_fields:
            if fd.tell() >= flen: break
            field_offsets[ptype, field] = fd.tell()
            _pfields[ptype, field] = vtype
            fd.skip(1)

        iextra = 0
        while fd.tell() < flen:
            iextra += 1
            field, vtype = ('particle_extra_field_%i' % iextra, 'd')
            particle_fields.append((field, vtype))

            field_offsets[ptype, field] = fd.tell()
            _pfields[ptype, field] = vtype
            fd.skip(1)

        fd.close()

        if iextra > 0 and not self.ds._warned_extra_fields['io']:
            w = ("Detected %s extra particle fields assuming kind "
                 "`double`. Consider using the `extra_particle_fields` "
                 "keyword argument if you have unexpected behavior.")
            mylog.warning(w % iextra)
            self.ds._warned_extra_fields['io'] = True

        self.field_offsets = field_offsets
        self.field_types = _pfields
Пример #7
0
    def create_cooling_fields(self):
        num = os.path.basename(
            self.ds.parameter_filename).split(".")[0].split("_")[1]
        filename = "%s/cooling_%05i.out" % (os.path.dirname(
            self.ds.parameter_filename), int(num))

        if not os.path.exists(filename):
            mylog.warning('This output has no cooling fields')
            return

        #Function to create the cooling fields
        def _create_field(name, interp_object, unit):
            def _func(field, data):
                shape = data["temperature"].shape
                d = {
                    'lognH': np.log10(_X * data["density"] / mh).ravel(),
                    'logT': np.log10(data["temperature"]).ravel()
                }
                rv = interp_object(d).reshape(shape)
                if name[-1] != 'mu':
                    rv = 10**interp_object(d).reshape(shape)
                cool = data.ds.arr(rv, unit)
                if 'metal' in name[-1].split('_'):
                    cool = cool * data[
                        'metallicity'] / 0.02  #Ramses uses Zsolar=0.02
                elif 'compton' in name[-1].split('_'):
                    cool = data.ds.arr(rv, unit + '/cm**3')
                    cool = cool / data[
                        'number_density']  #Compton cooling/heating is written to file in erg/s
                return cool

            self.add_field(name=name,
                           sampling_type="cell",
                           function=_func,
                           units=unit)

        #Load cooling files
        avals = {}
        tvals = {}
        with FortranFile(filename) as fd:
            n1, n2 = fd.read_vector('i')
            for ax in _cool_axes:
                avals[ax] = fd.read_vector('d')
            for i, (tname, unit) in enumerate(_cool_arrs):
                var = fd.read_vector('d')
                if var.size == n1 and i == 0:
                    #If this case occurs, the cooling files were produced pre-2010 in a format
                    #that is no longer supported
                    mylog.warning(
                        'This cooling file format is no longer supported. Cooling field loading skipped.'
                    )
                    return
                if var.size == n1 * n2:
                    tvals[tname] = dict(data=var.reshape((n1, n2), order='F'),
                                        unit=unit)
                else:
                    var = var.reshape((n1, n2, var.size // (n1 * n2)),
                                      order='F')
                    for i in range(var.shape[-1]):
                        tvals[_cool_species[i]] = dict(data=var[:, :, i],
                                                       unit="1/cm**3")

        #Add the mu field first, as it is needed for the number density
        interp = BilinearFieldInterpolator(tvals['mu']['data'],
                                           (avals["lognH"], avals["logT"]),
                                           ["lognH", "logT"],
                                           truncate=True)
        _create_field(("gas", 'mu'), interp, tvals['mu']['unit'])

        #Add the number density field, based on mu
        def _number_density(field, data):
            return data[('gas', 'density')] / mp / data['mu']

        self.add_field(name=('gas', 'number_density'),
                       sampling_type="cell",
                       function=_number_density,
                       units=number_density_unit)

        #Add the cooling and heating fields, which need the number density field
        for key in tvals:
            if key != 'mu':
                interp = BilinearFieldInterpolator(
                    tvals[key]['data'], (avals["lognH"], avals["logT"]),
                    ["lognH", "logT"],
                    truncate=True)
                _create_field(("gas", key), interp, tvals[key]['unit'])

        #Add total cooling and heating fields
        def _all_cool(field, data):
            return data['cooling_primordial'] + data['cooling_metal'] + data[
                'cooling_compton']

        def _all_heat(field, data):
            return data['heating_primordial'] + data['heating_compton']

        self.add_field(name=('gas', 'cooling_total'),
                       sampling_type="cell",
                       function=_all_cool,
                       units=cooling_function_units)
        self.add_field(name=('gas', 'heating_total'),
                       sampling_type="cell",
                       function=_all_heat,
                       units=cooling_function_units)

        #Add net cooling fields
        def _net_cool(field, data):
            return data['cooling_total'] - data['heating_total']

        self.add_field(name=('gas', 'cooling_net'),
                       sampling_type="cell",
                       function=_net_cool,
                       units=cooling_function_units)
Пример #8
0
    def detect_fields(cls, ds):
        # Try to get the detected fields
        detected_fields = cls.get_detected_fields(ds)
        if detected_fields:
            return detected_fields

        num = os.path.basename(
            ds.parameter_filename).split(".")[0].split("_")[1]
        testdomain = 1  # Just pick the first domain file to read
        basepath = os.path.abspath(os.path.dirname(ds.parameter_filename))
        basename = "%s/%%s_%s.out%05i" % (basepath, num, testdomain)
        fname = basename % 'hydro'
        fname_desc = os.path.join(basepath, cls.file_descriptor)

        attrs = cls.attrs
        with FortranFile(fname) as fd:
            hvals = fd.read_attrs(attrs)
        cls.parameters = hvals

        # Store some metadata
        ds.gamma = hvals['gamma']
        nvar = hvals['nvar']

        ok = False

        # Either the fields are given by dataset
        if ds._fields_in_file is not None:
            fields = list(ds._fields_in_file)
            ok = True
        elif os.path.exists(fname_desc):
            # Or there is an hydro file descriptor
            mylog.debug('Reading hydro file descriptor.')
            # For now, we can only read double precision fields
            fields = [e[0] for e in _read_fluid_file_descriptor(fname_desc)]

            # We get no fields for old-style hydro file descriptor
            ok = len(fields) > 0
        elif cls.config_field and ytcfg.has_section(cls.config_field):
            # Or this is given by the config
            cfg = ytcfg.get(cls.config_field, 'fields')
            known_fields = []
            for field in (_.strip() for _ in cfg.split('\n')
                          if _.strip() != ''):
                known_fields.append(field.strip())
            fields = known_fields

            ok = True

        # Else, attempt autodetection
        if not ok:
            foldername = os.path.abspath(os.path.dirname(
                ds.parameter_filename))
            rt_flag = any(glob.glob(os.sep.join([foldername,
                                                 'info_rt_*.txt'])))
            if rt_flag:  # rt run
                if nvar < 10:
                    mylog.info('Detected RAMSES-RT file WITHOUT IR trapping.')

                    fields = [
                        "Density", "x-velocity", "y-velocity", "z-velocity",
                        "Pressure", "Metallicity", "HII", "HeII", "HeIII"
                    ]
                else:
                    mylog.info('Detected RAMSES-RT file WITH IR trapping.')

                    fields = [
                        "Density", "x-velocity", "y-velocity", "z-velocity",
                        "Pres_IR", "Pressure", "Metallicity", "HII", "HeII",
                        "HeIII"
                    ]
            else:
                if nvar < 5:
                    mylog.debug(
                        "nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s"
                    )
                    raise ValueError
                # Basic hydro runs
                if nvar == 5:
                    fields = [
                        "Density", "x-velocity", "y-velocity", "z-velocity",
                        "Pressure"
                    ]
                if nvar > 5 and nvar < 11:
                    fields = [
                        "Density", "x-velocity", "y-velocity", "z-velocity",
                        "Pressure", "Metallicity"
                    ]
                # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
                if nvar == 11:
                    fields = [
                        "Density", "x-velocity", "y-velocity", "z-velocity",
                        "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
                        "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
                        "Pressure"
                    ]
                if nvar > 11:
                    fields = [
                        "Density", "x-velocity", "y-velocity", "z-velocity",
                        "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
                        "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
                        "Pressure", "Metallicity"
                    ]
            mylog.debug(
                "No fields specified by user; automatically setting fields array to %s"
                % str(fields))

        # Allow some wiggle room for users to add too many variables
        count_extra = 0
        while len(fields) < nvar:
            fields.append("var" + str(len(fields)))
            count_extra += 1
        if count_extra > 0:
            mylog.debug('Detected %s extra fluid fields.' % count_extra)
        cls.field_list = [(cls.ftype, e) for e in fields]

        cls.set_detected_fields(ds, fields)

        return fields
Пример #9
0
    def detect_fields(cls, ds):
        # Try to get the detected fields
        detected_fields = cls.get_detected_fields(ds)
        if detected_fields:
            return detected_fields

        fname = ds.parameter_filename.replace("info_", "info_rt_")

        rheader = {}

        def read_rhs(cast):
            line = f.readline()
            p, v = line.split("=")
            rheader[p.strip()] = cast(v)

        with open(fname) as f:
            # Read nRTvar, nions, ngroups, iions
            for _ in range(4):
                read_rhs(int)
            f.readline()

            # Read X and Y fractions
            for _ in range(2):
                read_rhs(float)
            f.readline()

            # Reat unit_np, unit_pfd
            for _ in range(2):
                read_rhs(float)

            # Read rt_c_frac
            # Note: when using variable speed of light, this line will contain multiple
            # values corresponding the the velocity at each level
            read_rhs(lambda line: [float(e) for e in line.split()])
            f.readline()

            # Read n star, t2star, g_star
            for _ in range(3):
                read_rhs(float)

            # Touchy part, we have to read the photon group properties
            mylog.debug("Not reading photon group properties")

            cls.rt_parameters = rheader

        ngroups = rheader["nGroups"]

        iout = int(str(ds).split("_")[1])
        basedir = os.path.split(ds.parameter_filename)[0]
        fname = os.path.join(basedir, cls.fname.format(iout=iout, icpu=1))
        with FortranFile(fname) as fd:
            cls.parameters = fd.read_attrs(cls.attrs)

        fields = cls.load_fields_from_yt_config()

        if not fields:
            fields = []
            tmp = [
                "Photon_density_%s",
                "Photon_flux_x_%s",
                "Photon_flux_y_%s",
                "Photon_flux_z_%s",
            ]
            for ng in range(ngroups):
                fields.extend([t % (ng + 1) for t in tmp])

        cls.field_list = [(cls.ftype, e) for e in fields]

        cls.set_detected_fields(ds, fields)
        return fields
Пример #10
0
    def detect_fields(cls, ds):
        # Try to get the detected fields
        detected_fields = cls.get_detected_fields(ds)
        if detected_fields:
            return detected_fields

        num = os.path.basename(
            ds.parameter_filename).split(".")[0].split("_")[1]
        testdomain = 1  # Just pick the first domain file to read
        basepath = os.path.abspath(os.path.dirname(ds.parameter_filename))
        basename = "%s/%%s_%s.out%05i" % (basepath, num, testdomain)
        fname = basename % "hydro"
        fname_desc = os.path.join(basepath, cls.file_descriptor)

        attrs = cls.attrs
        with FortranFile(fname) as fd:
            hvals = fd.read_attrs(attrs)
        cls.parameters = hvals

        # Store some metadata
        ds.gamma = hvals["gamma"]
        nvar = hvals["nvar"]

        ok = False

        if ds._fields_in_file is not None:
            # Case 1: fields are provided by users on construction of dataset
            fields = list(ds._fields_in_file)
            ok = True
        else:
            # Case 2: fields are provided by users in the config
            fields = cls.load_fields_from_yt_config()
            ok = len(fields) > 0

        if not ok and os.path.exists(fname_desc):
            # Case 3: there is a file descriptor
            # Or there is an hydro file descriptor
            mylog.debug("Reading hydro file descriptor.")
            # For now, we can only read double precision fields
            fields = [e[0] for e in _read_fluid_file_descriptor(fname_desc)]

            # We get no fields for old-style hydro file descriptor
            ok = len(fields) > 0

        if not ok:
            # Case 4: attempt autodetection with usual fields
            foldername = os.path.abspath(os.path.dirname(
                ds.parameter_filename))
            rt_flag = any(glob.glob(os.sep.join([foldername,
                                                 "info_rt_*.txt"])))
            if rt_flag:  # rt run
                if nvar < 10:
                    mylog.info("Detected RAMSES-RT file WITHOUT IR trapping.")

                    fields = [
                        "Density",
                        "x-velocity",
                        "y-velocity",
                        "z-velocity",
                        "Pressure",
                        "Metallicity",
                        "HII",
                        "HeII",
                        "HeIII",
                    ]
                else:
                    mylog.info("Detected RAMSES-RT file WITH IR trapping.")

                    fields = [
                        "Density",
                        "x-velocity",
                        "y-velocity",
                        "z-velocity",
                        "Pres_IR",
                        "Pressure",
                        "Metallicity",
                        "HII",
                        "HeII",
                        "HeIII",
                    ]
            else:
                if nvar < 5:
                    mylog.debug("nvar=%s is too small! YT doesn't currently "
                                "support 1D/2D runs in RAMSES %s")
                    raise ValueError
                # Basic hydro runs
                if nvar == 5:
                    fields = [
                        "Density",
                        "x-velocity",
                        "y-velocity",
                        "z-velocity",
                        "Pressure",
                    ]
                if nvar > 5 and nvar < 11:
                    fields = [
                        "Density",
                        "x-velocity",
                        "y-velocity",
                        "z-velocity",
                        "Pressure",
                        "Metallicity",
                    ]
                # MHD runs - NOTE:
                # THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
                if nvar == 11:
                    fields = [
                        "Density",
                        "x-velocity",
                        "y-velocity",
                        "z-velocity",
                        "B_x_left",
                        "B_y_left",
                        "B_z_left",
                        "B_x_right",
                        "B_y_right",
                        "B_z_right",
                        "Pressure",
                    ]
                if nvar > 11:
                    fields = [
                        "Density",
                        "x-velocity",
                        "y-velocity",
                        "z-velocity",
                        "B_x_left",
                        "B_y_left",
                        "B_z_left",
                        "B_x_right",
                        "B_y_right",
                        "B_z_right",
                        "Pressure",
                        "Metallicity",
                    ]
            mylog.debug(
                "No fields specified by user; automatically setting fields array to %s",
                fields,
            )

        # Allow some wiggle room for users to add too many variables
        count_extra = 0
        while len(fields) < nvar:
            fields.append(f"var_{len(fields)}")
            count_extra += 1
        if count_extra > 0:
            mylog.debug("Detected %s extra fluid fields.", count_extra)
        cls.field_list = [(cls.ftype, e) for e in fields]

        cls.set_detected_fields(ds, fields)

        return fields
Пример #11
0
Файл: io.py Проект: pshriwise/yt
    def _read_particle_fields(self, chunks, ptf, selector):
        # Now we have all the sizes, and we can allocate
        chunks = list(chunks)
        data_files = set([])
        # Only support halo reading for now.
        assert len(ptf) == 1
        assert list(ptf.keys())[0] == "halos"
        for chunk in chunks:
            for obj in chunk.objs:
                data_files.update(obj.data_files)

        def iterate_over_attributes(attr_list):
            for attr, *_ in attr_list:
                if isinstance(attr, tuple):
                    for a in attr:
                        yield a
                else:
                    yield attr

        for data_file in sorted(data_files, key=attrgetter("filename")):
            pcount = (data_file.ds.parameters["nhalos"] +
                      data_file.ds.parameters["nsubs"])
            if pcount == 0:
                continue
            ptype = "halos"
            field_list0 = sorted(ptf[ptype], key=_find_attr_position)
            field_list_pos = [f"raw_position_{k}" for k in "xyz"]
            field_list = sorted(set(field_list0 + field_list_pos),
                                key=_find_attr_position)

            with FortranFile(self.ds.parameter_filename) as fpu:
                params = fpu.read_attrs(HEADER_ATTRIBUTES)

                todo = _todo_from_attributes(field_list)

                nhalos = params["nhalos"] + params["nsubs"]
                data = np.zeros((nhalos, len(field_list)))
                for ihalo in range(nhalos):
                    jj = 0
                    for it in todo:
                        if isinstance(it, int):
                            fpu.skip(it)
                        else:
                            tmp = fpu.read_attrs(it)
                            for key in iterate_over_attributes(it):
                                v = tmp[key]
                                if key not in field_list:
                                    continue
                                data[ihalo, jj] = v
                                jj += 1
            ipos = [field_list.index(k) for k in field_list_pos]
            w = self.ds.domain_width.to("code_length")[0].value / 2
            x, y, z = (data[:, i] + w for i in ipos)
            mask = selector.select_points(x, y, z, 0.0)
            del x, y, z

            if mask is None:
                continue
            for field in field_list0:
                i = field_list.index(field)
                yield (ptype, field), data[mask, i]
Пример #12
0
    def create_cooling_fields(self):
        num = os.path.basename(
            self.ds.parameter_filename).split(".")[0].split("_")[1]
        filename = "%s/cooling_%05i.out" % (
            os.path.dirname(self.ds.parameter_filename),
            int(num),
        )

        if not os.path.exists(filename):
            mylog.warning("This output has no cooling fields")
            return

        # Function to create the cooling fields
        def _create_field(name, interp_object, unit):
            def _func(field, data):
                shape = data[("gas", "temperature")].shape
                d = {
                    "lognH":
                    np.log10(_X * data[("gas", "density")] / mh).ravel(),
                    "logT": np.log10(data[("gas", "temperature")]).ravel(),
                }
                rv = interp_object(d).reshape(shape)
                if name[-1] != "mu":
                    rv = 10**interp_object(d).reshape(shape)
                cool = data.ds.arr(rv, unit)
                if "metal" in name[-1].split("_"):
                    cool = (cool * data[("gas", "metallicity")] / 0.02
                            )  # Ramses uses Zsolar=0.02
                elif "compton" in name[-1].split("_"):
                    cool = data.ds.arr(rv, unit + "/cm**3")
                    cool = (
                        cool / data[("gas", "number_density")]
                    )  # Compton cooling/heating is written to file in erg/s
                return cool

            self.add_field(name=name,
                           sampling_type="cell",
                           function=_func,
                           units=unit)

        # Load cooling files
        avals = {}
        tvals = {}
        with FortranFile(filename) as fd:
            n1, n2 = fd.read_vector("i")
            for ax in _cool_axes:
                avals[ax] = fd.read_vector("d")
            for i, (tname, unit) in enumerate(_cool_arrs):
                var = fd.read_vector("d")
                if var.size == n1 and i == 0:
                    # If this case occurs, the cooling files were produced pre-2010 in
                    # a format that is no longer supported
                    mylog.warning(
                        "This cooling file format is no longer supported. "
                        "Cooling field loading skipped.")
                    return
                if var.size == n1 * n2:
                    tvals[tname] = dict(data=var.reshape((n1, n2), order="F"),
                                        unit=unit)
                else:
                    var = var.reshape((n1, n2, var.size // (n1 * n2)),
                                      order="F")
                    for i in range(var.shape[-1]):
                        tvals[_cool_species[i]] = dict(data=var[:, :, i],
                                                       unit="1/cm**3")

        # Add the mu field first, as it is needed for the number density
        interp = BilinearFieldInterpolator(
            tvals["mu"]["data"],
            (avals["lognH"], avals["logT"]),
            ["lognH", "logT"],
            truncate=True,
        )
        _create_field(("gas", "mu"), interp, tvals["mu"]["unit"])

        # Add the number density field, based on mu
        def _number_density(field, data):
            return data[("gas", "density")] / mp / data[("gas", "mu")]

        self.add_field(
            name=("gas", "number_density"),
            sampling_type="cell",
            function=_number_density,
            units=number_density_unit,
        )

        # Add the cooling and heating fields, which need the number density field
        for key in tvals:
            if key != "mu":
                interp = BilinearFieldInterpolator(
                    tvals[key]["data"],
                    (avals["lognH"], avals["logT"]),
                    ["lognH", "logT"],
                    truncate=True,
                )
                _create_field(("gas", key), interp, tvals[key]["unit"])

        # Add total cooling and heating fields
        def _all_cool(field, data):
            return (data[("gas", "cooling_primordial")] +
                    data[("gas", "cooling_metal")] +
                    data[("gas", "cooling_compton")])

        def _all_heat(field, data):
            return (data[("gas", "heating_primordial")] +
                    data[("gas", "heating_compton")])

        self.add_field(
            name=("gas", "cooling_total"),
            sampling_type="cell",
            function=_all_cool,
            units=cooling_function_units,
        )
        self.add_field(
            name=("gas", "heating_total"),
            sampling_type="cell",
            function=_all_heat,
            units=cooling_function_units,
        )

        # Add net cooling fields
        def _net_cool(field, data):
            return data[("gas", "cooling_total")] - data[("gas",
                                                          "heating_total")]

        self.add_field(
            name=("gas", "cooling_net"),
            sampling_type="cell",
            function=_net_cool,
            units=cooling_function_units,
        )