Example #1
0
File: io.py Project: jisuoqing/yt
 def _generate_smoothing_length(self, index):
     data_files = index.data_files
     if not self.ds.gen_hsmls:
         return
     hsml_fn = data_files[0].filename.replace(".hdf5", ".hsml.hdf5")
     if os.path.exists(hsml_fn):
         with h5py.File(hsml_fn, mode="r") as f:
             file_hash = f.attrs["q"]
         if file_hash != self.ds._file_hash:
             mylog.warning("Replacing hsml files.")
             for data_file in data_files:
                 hfn = data_file.filename.replace(".hdf5", ".hsml.hdf5")
                 os.remove(hfn)
         else:
             return
     positions = []
     counts = defaultdict(int)
     for data_file in data_files:
         for _, ppos in self._yield_coordinates(
             data_file, needed_ptype=self.ds._sph_ptypes[0]
         ):
             counts[data_file.filename] += ppos.shape[0]
             positions.append(ppos)
     if not positions:
         return
     offsets = {}
     offset = 0
     for fn, count in counts.items():
         offsets[fn] = offset
         offset += count
     kdtree = index.kdtree
     positions = uconcatenate(positions)[kdtree.idx]
     hsml = generate_smoothing_length(
         positions.astype("float64"), kdtree, self.ds._num_neighbors
     )
     dtype = positions.dtype
     hsml = hsml[np.argsort(kdtree.idx)].astype(dtype)
     mylog.warning("Writing smoothing lengths to hsml files.")
     for i, data_file in enumerate(data_files):
         si, ei = data_file.start, data_file.end
         fn = data_file.filename
         hsml_fn = data_file.filename.replace(".hdf5", ".hsml.hdf5")
         with h5py.File(hsml_fn, mode="a") as f:
             if i == 0:
                 f.attrs["q"] = self.ds._file_hash
             g = f.require_group(self.ds._sph_ptypes[0])
             d = g.require_dataset(
                 "SmoothingLength", dtype=dtype, shape=(counts[fn],)
             )
             begin = si + offsets[fn]
             end = min(ei, d.size) + offsets[fn]
             d[si:ei] = hsml[begin:end]
Example #2
0
 def _generate_smoothing_length(self, data_files, kdtree):
     if os.path.exists(self.hsml_filename):
         with open(self.hsml_filename, "rb") as f:
             file_hash = struct.unpack("q", f.read(struct.calcsize("q")))[0]
         if file_hash != self.ds._file_hash:
             os.remove(self.hsml_filename)
         else:
             return
     positions = []
     for data_file in data_files:
         for _, ppos in self._yield_coordinates(
             data_file, needed_ptype=self.ds._sph_ptypes[0]
         ):
             positions.append(ppos)
     if positions == []:
         return
     positions = np.concatenate(positions)[kdtree.idx]
     hsml = generate_smoothing_length(positions, kdtree, self.ds._num_neighbors)
     hsml = hsml[np.argsort(kdtree.idx)]
     dtype = self._pdtypes["Gas"]["Coordinates"][0]
     with open(self.hsml_filename, "wb") as f:
         f.write(struct.pack("q", self.ds._file_hash))
         f.write(hsml.astype(dtype).tostring())
Example #3
0
    def add_sph_fields(self, n_neighbors=32, kernel="cubic", sph_ptype="io"):
        """Add SPH fields for the specified particle type.

        For a particle type with "particle_position" and "particle_mass" already
        defined, this method adds the "smoothing_length" and "density" fields.
        "smoothing_length" is computed as the distance to the nth nearest
        neighbor. "density" is computed as the SPH (gather) smoothed mass. The
        SPH fields are added only if they don't already exist.

        Parameters
        ----------
        n_neighbors : int
            The number of neighbors to use in smoothing length computation.
        kernel : str
            The kernel function to use in density estimation.
        sph_ptype : str
            The SPH particle type. Each dataset has one sph_ptype only. This
            method will overwrite existing sph_ptype of the dataset.

        """
        mylog.info("Generating SPH fields")

        # Unify units
        l_unit = "code_length"
        m_unit = "code_mass"
        d_unit = "code_mass / code_length**3"

        # Read basic fields
        ad = self.all_data()
        pos = ad[sph_ptype, "particle_position"].to(l_unit).d
        mass = ad[sph_ptype, "particle_mass"].to(m_unit).d

        # Construct k-d tree
        kdtree = PyKDTree(
            pos.astype("float64"),
            left_edge=self.domain_left_edge.to_value(l_unit),
            right_edge=self.domain_right_edge.to_value(l_unit),
            periodic=self.periodicity,
            leafsize=2 * int(n_neighbors),
        )
        order = np.argsort(kdtree.idx)

        def exists(fname):
            if (sph_ptype, fname) in self.derived_field_list:
                mylog.info("Field ('%s','%s') already exists. Skipping",
                           sph_ptype, fname)
                return True
            else:
                mylog.info("Generating field ('%s','%s')", sph_ptype, fname)
                return False

        data = {}

        # Add smoothing length field
        fname = "smoothing_length"
        if not exists(fname):
            hsml = generate_smoothing_length(pos[kdtree.idx], kdtree,
                                             n_neighbors)
            hsml = hsml[order]
            data[(sph_ptype, "smoothing_length")] = (hsml, l_unit)
        else:
            hsml = ad[sph_ptype, fname].to(l_unit).d

        # Add density field
        fname = "density"
        if not exists(fname):
            dens = estimate_density(
                pos[kdtree.idx],
                mass[kdtree.idx],
                hsml[kdtree.idx],
                kdtree,
                kernel_name=kernel,
            )
            dens = dens[order]
            data[(sph_ptype, "density")] = (dens, d_unit)

        # Add fields
        self._sph_ptypes = (sph_ptype, )
        self.index.update_data(data)
        self.num_neighbors = n_neighbors