def test_save_load_octree():
    np.random.seed(int(0x4d3d3d3))
    pos = np.random.normal(0.5, scale=0.05, size=(NPART,3)) * (DRE-DLE) + DLE
    octree = ParticleOctreeContainer((1, 1, 1), DLE, DRE)
    octree.n_ref = 32
    for i in range(3):
        np.clip(pos[:,i], DLE[i], DRE[i], pos[:,i])
    # Convert to integers
    pos = np.floor((pos - DLE)/dx).astype("uint64")
    morton = get_morton_indices(pos)
    morton.sort()
    octree.add(morton)
    octree.finalize()
    saved = octree.save_octree()
    loaded = OctreeContainer.load_octree(saved)
    always = AlwaysSelector(None)
    ir1 = octree.ires(always)
    ir2 = loaded.ires(always)
    yield assert_equal, ir1, ir2

    fc1 = octree.fcoords(always)
    fc2 = loaded.fcoords(always)
    yield assert_equal, fc1, fc2

    fw1 = octree.fwidth(always)
    fw2 = loaded.fwidth(always)
    yield assert_equal, fw1, fw2
예제 #2
0
    def _initialize_particle_handler(self):
        self._setup_data_io()
        self._setup_filenames()

        index_ptype = self.index_ptype
        if index_ptype == "all":
            self.total_particles = sum(
                sum(d.total_particles.values()) for d in self.data_files)
        else:
            self.total_particles = sum(d.total_particles[index_ptype]
                                       for d in self.data_files)
        ds = self.dataset
        self.oct_handler = ParticleOctreeContainer(
            [1, 1, 1],
            ds.domain_left_edge,
            ds.domain_right_edge,
            over_refine=ds.over_refine_factor)
        self.oct_handler.n_ref = ds.n_ref
        only_on_root(
            mylog.info, "Allocating for %0.3e particles "
            "(index particle type '%s')", self.total_particles, index_ptype)
        # No more than 256^3 in the region finder.
        N = min(len(self.data_files), 256)
        self.regions = ParticleRegions(ds.domain_left_edge,
                                       ds.domain_right_edge, [N, N, N],
                                       len(self.data_files))
        self._initialize_indices()
        self.oct_handler.finalize()
        self.max_level = self.oct_handler.max_level
        self.dataset.max_level = self.max_level
        tot = sum(self.oct_handler.recursively_count().values())
        only_on_root(mylog.info, "Identified %0.3e octs", tot)
예제 #3
0
 def _initialize_particle_handler(self):
     self._setup_data_io()
     template = self.dataset.filename_template
     ndoms = self.dataset.file_count
     cls = self.dataset._file_class
     self.data_files = [
         cls(self.dataset, self.io, template % {'num': i}, i)
         for i in range(ndoms)
     ]
     self.total_particles = sum(
         sum(d.total_particles.values()) for d in self.data_files)
     ds = self.dataset
     self.oct_handler = ParticleOctreeContainer(
         [1, 1, 1],
         ds.domain_left_edge,
         ds.domain_right_edge,
         over_refine=ds.over_refine_factor)
     self.oct_handler.n_ref = ds.n_ref
     mylog.info("Allocating for %0.3e particles", self.total_particles)
     # No more than 256^3 in the region finder.
     N = min(len(self.data_files), 256)
     self.regions = ParticleRegions(ds.domain_left_edge,
                                    ds.domain_right_edge, [N, N, N],
                                    len(self.data_files))
     self._initialize_indices()
     self.oct_handler.finalize()
     self.max_level = self.oct_handler.max_level
     tot = sum(self.oct_handler.recursively_count().values())
     mylog.info("Identified %0.3e octs", tot)
예제 #4
0
def test_add_particles_random():
    np.random.seed(int(0x4d3d3d3))
    pos = np.random.normal(0.5, scale=0.05,
                           size=(NPART, 3)) * (DRE - DLE) + DLE
    # Now convert to integers
    for i in range(3):
        np.clip(pos[:, i], DLE[i], DRE[i], pos[:, i])
    # Convert to integers
    pos = np.floor((pos - DLE) / dx).astype("uint64")
    morton = get_morton_indices(pos)
    morton.sort()
    for ndom in [1, 2, 4, 8]:
        octree = ParticleOctreeContainer((1, 1, 1), DLE, DRE)
        octree.n_ref = 32
        for dom, split in enumerate(np.array_split(morton, ndom)):
            octree.add(split)
        octree.finalize()
        # This visits every oct.
        tc = octree.recursively_count()
        total_count = np.zeros(len(tc), dtype="int32")
        for i in sorted(tc):
            total_count[i] = tc[i]
        yield assert_equal, octree.nocts, total_count.sum()
        # This visits every cell -- including those covered by octs.
        #for dom in range(ndom):
        #    level_count += octree.count_levels(total_count.size-1, dom, mask)
        yield assert_equal, total_count, [1, 8, 64, 64, 256, 536, 1856, 1672]
 def _initialize_particle_handler(self):
     self._setup_data_io()
     template = self.dataset.filename_template
     ndoms = self.dataset.file_count
     cls = self.dataset._file_class
     self.data_files = [cls(self.dataset, self.io, template % {'num':i}, i)
                        for i in range(ndoms)]
     self.total_particles = sum(
             sum(d.total_particles.values()) for d in self.data_files)
     ds = self.dataset
     self.oct_handler = ParticleOctreeContainer(
         [1, 1, 1], ds.domain_left_edge, ds.domain_right_edge,
         over_refine = ds.over_refine_factor)
     self.oct_handler.n_ref = ds.n_ref
     mylog.info("Allocating for %0.3e particles", self.total_particles)
     # No more than 256^3 in the region finder.
     N = min(len(self.data_files), 256) 
     self.regions = ParticleRegions(
             ds.domain_left_edge, ds.domain_right_edge,
             [N, N, N], len(self.data_files))
     self._initialize_indices()
     self.oct_handler.finalize()
     self.max_level = self.oct_handler.max_level
     tot = sum(self.oct_handler.recursively_count().values())
     mylog.info("Identified %0.3e octs", tot)
def test_add_particles_random():
    np.random.seed(int(0x4d3d3d3))
    pos = np.random.normal(0.5, scale=0.05, size=(NPART,3)) * (DRE-DLE) + DLE
    # Now convert to integers
    for i in range(3):
        np.clip(pos[:,i], DLE[i], DRE[i], pos[:,i])
    # Convert to integers
    pos = np.floor((pos - DLE)/dx).astype("uint64")
    morton = get_morton_indices(pos)
    morton.sort()
    for ndom in [1, 2, 4, 8]:
        octree = ParticleOctreeContainer((1, 1, 1), DLE, DRE)
        octree.n_ref = 32
        for dom, split in enumerate(np.array_split(morton, ndom)):
            octree.add(split)
        octree.finalize()
        # This visits every oct.
        tc = octree.recursively_count()
        total_count = np.zeros(len(tc), dtype="int32")
        for i in sorted(tc):
            total_count[i] = tc[i]
        yield assert_equal, octree.nocts, total_count.sum()
        # This visits every cell -- including those covered by octs.
        #for dom in range(ndom):
        #    level_count += octree.count_levels(total_count.size-1, dom, mask)
        yield assert_equal, total_count, [1, 8, 64, 64, 256, 536, 1856, 1672]
예제 #7
0
 def to_octree(self, over_refine_factor=1, dims=(1, 1, 1), n_ref=64):
     mi = self.morton
     mi.sort()
     eps = np.finfo(self.dtype).eps
     LE = self.min(axis=0)
     LE -= np.abs(LE) * eps
     RE = self.max(axis=0)
     RE += np.abs(RE) * eps
     octree = ParticleOctreeContainer(dims, LE, RE, over_refine=over_refine_factor)
     octree.n_ref = n_ref
     octree.add(mi)
     octree.finalize()
     return octree
 def to_octree(self, over_refine_factor = 1, dims = (1,1,1),
               n_ref = 64):
     mi = self.morton
     mi.sort()
     eps = np.finfo(self.dtype).eps
     LE = self.min(axis=0)
     LE -= np.abs(LE) * eps
     RE = self.max(axis=0)
     RE += np.abs(RE) * eps
     octree = ParticleOctreeContainer(dims, LE, RE, 
         over_refine = over_refine_factor)
     octree.n_ref = n_ref
     octree.add(mi)
     octree.finalize()
     return octree
예제 #9
0
파일: octree_subset.py 프로젝트: vadsem/yt
    def particle_operation(self,
                           positions,
                           fields=None,
                           method=None,
                           nneighbors=64,
                           kernel_name='cubic'):
        r"""Operate on particles, in a particle-against-particle fashion.

        This uses the octree indexing system to call a "smoothing" operation
        (defined in yt/geometry/particle_smooth.pyx) that expects to be called
        in a particle-by-particle fashion.  For instance, the canonical example
        of this would be to compute the Nth nearest neighbor, or to compute the
        density for a given particle based on some kernel operation.

        Many of the arguments to this are identical to those used in the smooth
        and deposit functions.  Note that the `fields` argument must not be
        empty, as these fields will be modified in place.

        Parameters
        ----------
        positions : array_like (Nx3)
            The positions of all of the particles to be examined.  A new
            indexed octree will be constructed on these particles.
        fields : list of arrays
            All the necessary fields for computing the particle operation.  For
            instance, this might include mass, velocity, etc.  One of these
            will likely be modified in place.
        method : string
            This is the "method name" which will be looked up in the
            `particle_smooth` namespace as `methodname_smooth`.
        nneighbors : int, default 64
            The number of neighbors to examine during the process.
        kernel_name : string, default 'cubic'
            This is the name of the smoothing kernel to use. Current supported
            kernel names include `cubic`, `quartic`, `quintic`, `wendland2`,
            `wendland4`, and `wendland6`.

        Returns
        -------
        Nothing.

        """
        # Here we perform our particle deposition.
        positions.convert_to_units("code_length")
        morton = compute_morton(positions[:, 0], positions[:, 1],
                                positions[:, 2], self.ds.domain_left_edge,
                                self.ds.domain_right_edge)
        morton.sort()
        particle_octree = ParticleOctreeContainer([1, 1, 1],
                                                  self.ds.domain_left_edge,
                                                  self.ds.domain_right_edge,
                                                  over_refine=1)
        particle_octree.n_ref = nneighbors * 2
        particle_octree.add(morton)
        particle_octree.finalize()
        pdom_ind = particle_octree.domain_ind(self.selector)
        if fields is None: fields = []
        cls = getattr(particle_smooth, "%s_smooth" % method, None)
        if cls is None:
            raise YTParticleDepositionNotImplemented(method)
        nz = self.nz
        mdom_ind = self.domain_ind
        nvals = (nz, nz, nz, (mdom_ind >= 0).sum())
        op = cls(nvals, len(fields), nneighbors, kernel_name)
        op.initialize()
        mylog.debug("Smoothing %s particles into %s Octs", positions.shape[0],
                    nvals[-1])
        op.process_particles(particle_octree, pdom_ind, positions, fields,
                             self.domain_id, self._domain_offset,
                             self.ds.periodicity, self.ds.geometry)
        vals = op.finalize()
        if vals is None: return
        if isinstance(vals, list):
            vals = [np.asfortranarray(v) for v in vals]
        else:
            vals = np.asfortranarray(vals)
        return vals
예제 #10
0
파일: octree_subset.py 프로젝트: vadsem/yt
    def smooth(self,
               positions,
               fields=None,
               index_fields=None,
               method=None,
               create_octree=False,
               nneighbors=64,
               kernel_name='cubic'):
        r"""Operate on the mesh, in a particle-against-mesh fashion, with
        non-local input.

        This uses the octree indexing system to call a "smoothing" operation
        (defined in yt/geometry/particle_smooth.pyx) that can take input from
        several (non-local) particles and construct some value on the mesh.
        The canonical example is to conduct a smoothing kernel operation on the
        mesh.

        Parameters
        ----------
        positions : array_like (Nx3)
            The positions of all of the particles to be examined.  A new
            indexed octree will be constructed on these particles.
        fields : list of arrays
            All the necessary fields for computing the particle operation.  For
            instance, this might include mass, velocity, etc.
        index_fields : list of arrays
            All of the fields defined on the mesh that may be used as input to
            the operation.
        method : string
            This is the "method name" which will be looked up in the
            `particle_smooth` namespace as `methodname_smooth`.  Current
            methods include `volume_weighted`, `nearest`, `idw`,
            `nth_neighbor`, and `density`.
        create_octree : bool
            Should we construct a new octree for indexing the particles?  In
            cases where we are applying an operation on a subset of the
            particles used to construct the mesh octree, this will ensure that
            we are able to find and identify all relevant particles.
        nneighbors : int, default 64
            The number of neighbors to examine during the process.
        kernel_name : string, default 'cubic'
            This is the name of the smoothing kernel to use. Current supported
            kernel names include `cubic`, `quartic`, `quintic`, `wendland2`,
            `wendland4`, and `wendland6`.

        Returns
        -------
        List of fortran-ordered, mesh-like arrays.
        """
        # Here we perform our particle deposition.
        positions.convert_to_units("code_length")
        if create_octree:
            morton = compute_morton(positions[:, 0], positions[:, 1],
                                    positions[:, 2], self.ds.domain_left_edge,
                                    self.ds.domain_right_edge)
            morton.sort()
            particle_octree = ParticleOctreeContainer(
                [1, 1, 1],
                self.ds.domain_left_edge,
                self.ds.domain_right_edge,
                over_refine=self._oref)
            # This should ensure we get everything within one neighbor of home.
            particle_octree.n_ref = nneighbors * 2
            particle_octree.add(morton)
            particle_octree.finalize()
            pdom_ind = particle_octree.domain_ind(self.selector)
        else:
            particle_octree = self.oct_handler
            pdom_ind = self.domain_ind
        if fields is None: fields = []
        if index_fields is None: index_fields = []
        cls = getattr(particle_smooth, "%s_smooth" % method, None)
        if cls is None:
            raise YTParticleDepositionNotImplemented(method)
        nz = self.nz
        mdom_ind = self.domain_ind
        nvals = (nz, nz, nz, (mdom_ind >= 0).sum())
        op = cls(nvals, len(fields), nneighbors, kernel_name)
        op.initialize()
        mylog.debug("Smoothing %s particles into %s Octs", positions.shape[0],
                    nvals[-1])
        # Pointer operations within 'process_octree' require arrays to be
        # contiguous cf. https://bitbucket.org/yt_analysis/yt/issues/1079
        fields = [np.ascontiguousarray(f, dtype="float64") for f in fields]
        op.process_octree(self.oct_handler, mdom_ind, positions, self.fcoords,
                          fields, self.domain_id, self._domain_offset,
                          self.ds.periodicity, index_fields, particle_octree,
                          pdom_ind, self.ds.geometry)
        # If there are 0s in the smoothing field this will not throw an error,
        # but silently return nans for vals where dividing by 0
        # Same as what is currently occurring, but suppressing the div by zero
        # error.
        with np.errstate(invalid='ignore'):
            vals = op.finalize()
        if vals is None: return
        if isinstance(vals, list):
            vals = [np.asfortranarray(v) for v in vals]
        else:
            vals = np.asfortranarray(vals)
        return vals
예제 #11
0
def test_save_load_octree():
    np.random.seed(int(0x4d3d3d3))
    pos = np.random.normal(0.5, scale=0.05,
                           size=(NPART, 3)) * (DRE - DLE) + DLE
    octree = ParticleOctreeContainer((1, 1, 1), DLE, DRE)
    octree.n_ref = 32
    for i in range(3):
        np.clip(pos[:, i], DLE[i], DRE[i], pos[:, i])
    # Convert to integers
    pos = np.floor((pos - DLE) / dx).astype("uint64")
    morton = get_morton_indices(pos)
    morton.sort()
    octree.add(morton)
    octree.finalize()
    saved = octree.save_octree()
    loaded = OctreeContainer.load_octree(saved)
    always = AlwaysSelector(None)
    ir1 = octree.ires(always)
    ir2 = loaded.ires(always)
    yield assert_equal, ir1, ir2

    fc1 = octree.fcoords(always)
    fc2 = loaded.fcoords(always)
    yield assert_equal, fc1, fc2

    fw1 = octree.fwidth(always)
    fw2 = loaded.fwidth(always)
    yield assert_equal, fw1, fw2
예제 #12
0
class ParticleIndex(Index):
    """The Index subclass for particle datasets"""
    _global_mesh = False

    def __init__(self, ds, dataset_type):
        self.dataset_type = dataset_type
        self.dataset = weakref.proxy(ds)
        self.index_filename = self.dataset.parameter_filename
        self.directory = os.path.dirname(self.index_filename)
        self.float_type = np.float64
        super(ParticleIndex, self).__init__(ds, dataset_type)

    @property
    def index_ptype(self):
        if hasattr(self.dataset, "index_ptype"):
            return self.dataset.index_ptype
        else:
            return "all"

    def _setup_geometry(self):
        mylog.debug("Initializing Particle Geometry Handler.")
        self._initialize_particle_handler()

    def get_smallest_dx(self):
        """
        Returns (in code units) the smallest cell size in the simulation.
        """
        ML = self.oct_handler.max_level
        dx = 1.0 / (self.dataset.domain_dimensions * 2**ML)
        dx = dx * (self.dataset.domain_right_edge -
                   self.dataset.domain_left_edge)
        return dx.min()

    def _get_particle_type_counts(self):
        result = collections.defaultdict(lambda: 0)
        for df in self.data_files:
            for k in df.total_particles.keys():
                result[k] += df.total_particles[k]
        return dict(result)

    def convert(self, unit):
        return self.dataset.conversion_factors[unit]

    def _setup_filenames(self):
        template = self.dataset.filename_template
        ndoms = self.dataset.file_count
        cls = self.dataset._file_class
        self.data_files = \
          [cls(self.dataset, self.io, template % {'num':i}, i)
           for i in range(ndoms)]

    def _initialize_particle_handler(self):
        self._setup_data_io()
        self._setup_filenames()

        index_ptype = self.index_ptype
        if index_ptype == "all":
            self.total_particles = sum(
                sum(d.total_particles.values()) for d in self.data_files)
        else:
            self.total_particles = sum(d.total_particles[index_ptype]
                                       for d in self.data_files)
        ds = self.dataset
        self.oct_handler = ParticleOctreeContainer(
            [1, 1, 1],
            ds.domain_left_edge,
            ds.domain_right_edge,
            over_refine=ds.over_refine_factor)
        self.oct_handler.n_ref = ds.n_ref
        only_on_root(
            mylog.info, "Allocating for %0.3e particles "
            "(index particle type '%s')", self.total_particles, index_ptype)
        # No more than 256^3 in the region finder.
        N = min(len(self.data_files), 256)
        self.regions = ParticleRegions(ds.domain_left_edge,
                                       ds.domain_right_edge, [N, N, N],
                                       len(self.data_files))
        self._initialize_indices()
        self.oct_handler.finalize()
        self.max_level = self.oct_handler.max_level
        self.dataset.max_level = self.max_level
        tot = sum(self.oct_handler.recursively_count().values())
        only_on_root(mylog.info, "Identified %0.3e octs", tot)

    def _initialize_indices(self):
        # This will be replaced with a parallel-aware iteration step.
        # Roughly outlined, what we will do is:
        #   * Generate Morton indices on each set of files that belong to
        #     an individual processor
        #   * Create a global, accumulated histogram
        #   * Cut based on estimated load balancing
        #   * Pass particles to specific processors, along with NREF buffer
        #   * Broadcast back a serialized octree to join
        #
        # For now we will do this in serial.
        index_ptype = self.index_ptype
        # Set the index_ptype attribute of self.io dynamically here, so we don't
        # need to assume that the dataset has the attribute.
        self.io.index_ptype = index_ptype
        morton = np.empty(self.total_particles, dtype="uint64")
        ind = 0
        for data_file in self.data_files:
            if index_ptype == "all":
                npart = sum(data_file.total_particles.values())
            else:
                npart = data_file.total_particles[index_ptype]
            morton[ind:ind + npart] = \
                self.io._initialize_index(data_file, self.regions)
            ind += npart
        morton.sort()
        # Now we add them all at once.
        self.oct_handler.add(morton)

    def _detect_output_fields(self):
        # TODO: Add additional fields
        dsl = []
        units = {}
        for dom in self.data_files:
            fl, _units = self.io._identify_fields(dom)
            units.update(_units)
            dom._calculate_offsets(fl)
            for f in fl:
                if f not in dsl: dsl.append(f)
        self.field_list = dsl
        ds = self.dataset
        ds.particle_types = tuple(set(pt for pt, ds in dsl))
        # This is an attribute that means these particle types *actually*
        # exist.  As in, they are real, in the dataset.
        ds.field_units.update(units)
        ds.particle_types_raw = ds.particle_types

    def _identify_base_chunk(self, dobj):
        if getattr(dobj, "_chunk_info", None) is None:
            data_files = getattr(dobj, "data_files", None)
            if data_files is None:
                data_files = [
                    self.data_files[i]
                    for i in self.regions.identify_data_files(dobj.selector)
                ]
            base_region = getattr(dobj, "base_region", dobj)
            oref = self.dataset.over_refine_factor
            subset = [
                ParticleOctreeSubset(base_region,
                                     data_files,
                                     self.dataset,
                                     over_refine_factor=oref)
            ]
            dobj._chunk_info = subset
        dobj._current_chunk = list(self._chunk_all(dobj))[0]

    def _chunk_all(self, dobj):
        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
        yield YTDataChunk(dobj, "all", oobjs, None)

    def _chunk_spatial(self, dobj, ngz, sort=None, preload_fields=None):
        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
        # We actually do not really use the data files except as input to the
        # ParticleOctreeSubset.
        # This is where we will perform cutting of the Octree and
        # load-balancing.  That may require a specialized selector object to
        # cut based on some space-filling curve index.
        for i, og in enumerate(sobjs):
            if ngz > 0:
                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
            else:
                g = og
            yield YTDataChunk(dobj, "spatial", [g])

    def _chunk_io(self, dobj, cache=True, local_only=False):
        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
        for subset in oobjs:
            yield YTDataChunk(dobj, "io", [subset], None, cache=cache)
class ParticleIndex(Index):
    """The Index subclass for particle datasets"""
    _global_mesh = False

    def __init__(self, ds, dataset_type):
        self.dataset_type = dataset_type
        self.dataset = weakref.proxy(ds)
        self.index_filename = self.dataset.parameter_filename
        self.directory = os.path.dirname(self.index_filename)
        self.float_type = np.float64
        super(ParticleIndex, self).__init__(ds, dataset_type)

    def _setup_geometry(self):
        mylog.debug("Initializing Particle Geometry Handler.")
        self._initialize_particle_handler()

    def get_smallest_dx(self):
        """
        Returns (in code units) the smallest cell size in the simulation.
        """
        ML = self.oct_handler.max_level
        dx = 1.0/(self.dataset.domain_dimensions*2**ML)
        dx = dx * (self.dataset.domain_right_edge -
                   self.dataset.domain_left_edge)
        return dx.min()

    def convert(self, unit):
        return self.dataset.conversion_factors[unit]

    def _initialize_particle_handler(self):
        self._setup_data_io()
        template = self.dataset.filename_template
        ndoms = self.dataset.file_count
        cls = self.dataset._file_class
        self.data_files = [cls(self.dataset, self.io, template % {'num':i}, i)
                           for i in range(ndoms)]
        self.total_particles = sum(
                sum(d.total_particles.values()) for d in self.data_files)
        ds = self.dataset
        self.oct_handler = ParticleOctreeContainer(
            [1, 1, 1], ds.domain_left_edge, ds.domain_right_edge,
            over_refine = ds.over_refine_factor)
        self.oct_handler.n_ref = ds.n_ref
        mylog.info("Allocating for %0.3e particles", self.total_particles)
        # No more than 256^3 in the region finder.
        N = min(len(self.data_files), 256) 
        self.regions = ParticleRegions(
                ds.domain_left_edge, ds.domain_right_edge,
                [N, N, N], len(self.data_files))
        self._initialize_indices()
        self.oct_handler.finalize()
        self.max_level = self.oct_handler.max_level
        tot = sum(self.oct_handler.recursively_count().values())
        mylog.info("Identified %0.3e octs", tot)

    def _initialize_indices(self):
        # This will be replaced with a parallel-aware iteration step.
        # Roughly outlined, what we will do is:
        #   * Generate Morton indices on each set of files that belong to
        #     an individual processor
        #   * Create a global, accumulated histogram
        #   * Cut based on estimated load balancing
        #   * Pass particles to specific processors, along with NREF buffer
        #   * Broadcast back a serialized octree to join
        #
        # For now we will do this in serial.
        morton = np.empty(self.total_particles, dtype="uint64")
        ind = 0
        for data_file in self.data_files:
            npart = sum(data_file.total_particles.values())
            morton[ind:ind + npart] = \
                self.io._initialize_index(data_file, self.regions)
            ind += npart
        morton.sort()
        # Now we add them all at once.
        self.oct_handler.add(morton)

    def _detect_output_fields(self):
        # TODO: Add additional fields
        dsl = []
        units = {}
        for dom in self.data_files:
            fl, _units = self.io._identify_fields(dom)
            units.update(_units)
            dom._calculate_offsets(fl)
            for f in fl:
                if f not in dsl: dsl.append(f)
        self.field_list = dsl
        ds = self.dataset
        ds.particle_types = tuple(set(pt for pt, ds in dsl))
        # This is an attribute that means these particle types *actually*
        # exist.  As in, they are real, in the dataset.
        ds.field_units.update(units)
        ds.particle_types_raw = ds.particle_types

    def _identify_base_chunk(self, dobj):
        if getattr(dobj, "_chunk_info", None) is None:
            data_files = getattr(dobj, "data_files", None)
            if data_files is None:
                data_files = [self.data_files[i] for i in
                              self.regions.identify_data_files(dobj.selector)]
            base_region = getattr(dobj, "base_region", dobj)
            oref = self.dataset.over_refine_factor
            subset = [ParticleOctreeSubset(base_region, data_files, 
                        self.dataset, over_refine_factor = oref)]
            dobj._chunk_info = subset
        dobj._current_chunk = list(self._chunk_all(dobj))[0]

    def _chunk_all(self, dobj):
        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
        yield YTDataChunk(dobj, "all", oobjs, None)

    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
        # We actually do not really use the data files except as input to the
        # ParticleOctreeSubset.
        # This is where we will perform cutting of the Octree and
        # load-balancing.  That may require a specialized selector object to
        # cut based on some space-filling curve index.
        for i,og in enumerate(sobjs):
            if ngz > 0:
                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
            else:
                g = og
            yield YTDataChunk(dobj, "spatial", [g])

    def _chunk_io(self, dobj, cache = True, local_only = False):
        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
        for subset in oobjs:
            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
    def particle_operation(self, positions, fields = None,
            method = None, nneighbors = 64):
        r"""Operate on particles, in a particle-against-particle fashion.

        This uses the octree indexing system to call a "smoothing" operation
        (defined in yt/geometry/particle_smooth.pyx) that expects to be called
        in a particle-by-particle fashion.  For instance, the canonical example
        of this would be to compute the Nth nearest neighbor, or to compute the
        density for a given particle based on some kernel operation.

        Many of the arguments to this are identical to those used in the smooth
        and deposit functions.  Note that the `fields` argument must not be
        empty, as these fields will be modified in place.

        Parameters
        ----------
        positions : array_like (Nx3)
            The positions of all of the particles to be examined.  A new
            indexed octree will be constructed on these particles.
        fields : list of arrays
            All the necessary fields for computing the particle operation.  For
            instance, this might include mass, velocity, etc.  One of these
            will likely be modified in place.
        method : string
            This is the "method name" which will be looked up in the
            `particle_smooth` namespace as `methodname_smooth`.
        nneighbors : int, default 64
            The number of neighbors to examine during the process.

        Returns
        -------
        Nothing.

        """
        # Here we perform our particle deposition.
        positions.convert_to_units("code_length")
        morton = compute_morton(
            positions[:,0], positions[:,1], positions[:,2],
            self.ds.domain_left_edge,
            self.ds.domain_right_edge)
        morton.sort()
        particle_octree = ParticleOctreeContainer([1, 1, 1],
            self.ds.domain_left_edge,
            self.ds.domain_right_edge,
            over_refine = 1)
        particle_octree.n_ref = nneighbors * 2
        particle_octree.add(morton)
        particle_octree.finalize()
        pdom_ind = particle_octree.domain_ind(self.selector)
        if fields is None: fields = []
        cls = getattr(particle_smooth, "%s_smooth" % method, None)
        if cls is None:
            raise YTParticleDepositionNotImplemented(method)
        nz = self.nz
        mdom_ind = self.domain_ind
        nvals = (nz, nz, nz, (mdom_ind >= 0).sum())
        op = cls(nvals, len(fields), nneighbors)
        op.initialize()
        mylog.debug("Smoothing %s particles into %s Octs",
            positions.shape[0], nvals[-1])
        op.process_particles(particle_octree, pdom_ind, positions, 
            fields, self.domain_id, self._domain_offset, self.ds.periodicity,
            self.ds.geometry)
        vals = op.finalize()
        if vals is None: return
        if isinstance(vals, list):
            vals = [np.asfortranarray(v) for v in vals]
        else:
            vals = np.asfortranarray(vals)
        return vals
    def smooth(self, positions, fields = None, index_fields = None,
               method = None, create_octree = False, nneighbors = 64):
        r"""Operate on the mesh, in a particle-against-mesh fashion, with
        non-local input.

        This uses the octree indexing system to call a "smoothing" operation
        (defined in yt/geometry/particle_smooth.pyx) that can take input from
        several (non-local) particles and construct some value on the mesh.
        The canonical example is to conduct a smoothing kernel operation on the
        mesh.

        Parameters
        ----------
        positions : array_like (Nx3)
            The positions of all of the particles to be examined.  A new
            indexed octree will be constructed on these particles.
        fields : list of arrays
            All the necessary fields for computing the particle operation.  For
            instance, this might include mass, velocity, etc.  
        index_fields : list of arrays
            All of the fields defined on the mesh that may be used as input to
            the operation.
        method : string
            This is the "method name" which will be looked up in the
            `particle_smooth` namespace as `methodname_smooth`.  Current
            methods include `volume_weighted`, `nearest`, `idw`,
            `nth_neighbor`, and `density`.
        create_octree : bool
            Should we construct a new octree for indexing the particles?  In
            cases where we are applying an operation on a subset of the
            particles used to construct the mesh octree, this will ensure that
            we are able to find and identify all relevant particles.
        nneighbors : int, default 64
            The number of neighbors to examine during the process.

        Returns
        -------
        List of fortran-ordered, mesh-like arrays.
        """
        # Here we perform our particle deposition.
        positions.convert_to_units("code_length")
        if create_octree:
            morton = compute_morton(
                positions[:,0], positions[:,1], positions[:,2],
                self.ds.domain_left_edge,
                self.ds.domain_right_edge)
            morton.sort()
            particle_octree = ParticleOctreeContainer([1, 1, 1],
                self.ds.domain_left_edge,
                self.ds.domain_right_edge,
                over_refine = self._oref)
            # This should ensure we get everything within one neighbor of home.
            particle_octree.n_ref = nneighbors * 2
            particle_octree.add(morton)
            particle_octree.finalize()
            pdom_ind = particle_octree.domain_ind(self.selector)
        else:
            particle_octree = self.oct_handler
            pdom_ind = self.domain_ind
        if fields is None: fields = []
        if index_fields is None: index_fields = []
        cls = getattr(particle_smooth, "%s_smooth" % method, None)
        if cls is None:
            raise YTParticleDepositionNotImplemented(method)
        nz = self.nz
        mdom_ind = self.domain_ind
        nvals = (nz, nz, nz, (mdom_ind >= 0).sum())
        op = cls(nvals, len(fields), nneighbors)
        op.initialize()
        mylog.debug("Smoothing %s particles into %s Octs",
            positions.shape[0], nvals[-1])
        op.process_octree(self.oct_handler, mdom_ind, positions, 
            self.fcoords, fields,
            self.domain_id, self._domain_offset, self.ds.periodicity,
            index_fields, particle_octree, pdom_ind, self.ds.geometry)
        # If there are 0s in the smoothing field this will not throw an error, 
        # but silently return nans for vals where dividing by 0
        # Same as what is currently occurring, but suppressing the div by zero
        # error.
        with np.errstate(invalid='ignore'):
            vals = op.finalize()
        if vals is None: return
        if isinstance(vals, list):
            vals = [np.asfortranarray(v) for v in vals]
        else:
            vals = np.asfortranarray(vals)
        return vals