Beispiel #1
0
def test_psize_2d():
    procs = dec.get_psize(np.array([5, 1, 7]), 6)
    assert_array_equal(procs, np.array([3, 1, 2]))
    procs = dec.get_psize(np.array([1, 7, 5]), 6)
    assert_array_equal(procs, np.array([1, 2, 3]))
    procs = dec.get_psize(np.array([7, 5, 1]), 6)
    assert_array_equal(procs, np.array([2, 3, 1]))
def test_psize_2d():
    procs = dec.get_psize(np.array([5, 1, 7]), 6)
    assert_array_equal(procs, np.array([3, 1, 2]))
    procs = dec.get_psize(np.array([1, 7, 5]), 6)
    assert_array_equal(procs, np.array([1, 2, 3]))
    procs = dec.get_psize(np.array([7, 5, 1]), 6)
    assert_array_equal(procs, np.array([2, 3, 1]))
Beispiel #3
0
 def _domain_decomp(self):
     bbox = np.array([self.ds.domain_left_edge,
                      self.ds.domain_right_edge]).transpose()
     dims = self.ds.domain_dimensions
     psize = get_psize(dims, self.num_grids)
     gle, gre, shapes, slices = decompose_array(dims, psize, bbox)
     self.grid_left_edge = self.ds.arr(gle, "code_length")
     self.grid_right_edge = self.ds.arr(gre, "code_length")
     self.grid_dimensions = np.array(shapes, dtype="int32")
    def _parse_index(self):
        f = self._handle  # shortcut
        ds = self.dataset  # shortcut

        # If nprocs > 1, decompose the domain into virtual grids
        if self.num_grids > 1:
            if self.ds.z_axis_decomp:
                dz = ds.quan(1.0, "code_length") * ds.spectral_factor
                self.grid_dimensions[:, 2] = np.around(
                    float(ds.domain_dimensions[2]) /
                    self.num_grids).astype("int")
                self.grid_dimensions[-1, 2] += (ds.domain_dimensions[2] %
                                                self.num_grids)
                self.grid_left_edge[0, 2] = ds.domain_left_edge[2]
                self.grid_left_edge[1:,2] = ds.domain_left_edge[2] + \
                                            np.cumsum(self.grid_dimensions[:-1,2])*dz
                self.grid_right_edge[:,
                                     2] = self.grid_left_edge[:,
                                                              2] + self.grid_dimensions[:,
                                                                                        2] * dz
                self.grid_left_edge[:, :2] = ds.domain_left_edge[:2]
                self.grid_right_edge[:, :2] = ds.domain_right_edge[:2]
                self.grid_dimensions[:, :2] = ds.domain_dimensions[:2]
            else:
                bbox = np.array([[
                    le, re
                ] for le, re in zip(ds.domain_left_edge, ds.domain_right_edge)
                                 ])
                dims = np.array(ds.domain_dimensions)
                psize = get_psize(dims, self.num_grids)
                gle, gre, shapes, slices = decompose_array(dims, psize, bbox)
                self.grid_left_edge = self.ds.arr(gle, "code_length")
                self.grid_right_edge = self.ds.arr(gre, "code_length")
                self.grid_dimensions = np.array([shape for shape in shapes],
                                                dtype="int32")
        else:
            self.grid_left_edge[0, :] = ds.domain_left_edge
            self.grid_right_edge[0, :] = ds.domain_right_edge
            self.grid_dimensions[0] = ds.domain_dimensions

        if ds.events_data:
            try:
                self.grid_particle_count[:] = ds.primary_header["naxis2"]
            except KeyError:
                self.grid_particle_count[:] = 0.0
            self._particle_indices = np.zeros(self.num_grids + 1,
                                              dtype='int64')
            self._particle_indices[1] = self.grid_particle_count.squeeze()

        self.grid_levels.flat[:] = 0
        self.grids = np.empty(self.num_grids, dtype='object')
        for i in range(self.num_grids):
            self.grids[i] = self.grid(i, self, self.grid_levels[i, 0])
    def _parse_index(self):
        f = self._handle # shortcut
        ds = self.dataset # shortcut

        # If nprocs > 1, decompose the domain into virtual grids
        if self.num_grids > 1:
            if self.ds.z_axis_decomp:
                dz = ds.quan(1.0, "code_length")*ds.spectral_factor
                self.grid_dimensions[:,2] = np.around(float(ds.domain_dimensions[2])/
                                                            self.num_grids).astype("int")
                self.grid_dimensions[-1,2] += (ds.domain_dimensions[2] % self.num_grids)
                self.grid_left_edge[0,2] = ds.domain_left_edge[2]
                self.grid_left_edge[1:,2] = ds.domain_left_edge[2] + \
                                            np.cumsum(self.grid_dimensions[:-1,2])*dz
                self.grid_right_edge[:,2] = self.grid_left_edge[:,2]+self.grid_dimensions[:,2]*dz
                self.grid_left_edge[:,:2] = ds.domain_left_edge[:2]
                self.grid_right_edge[:,:2] = ds.domain_right_edge[:2]
                self.grid_dimensions[:,:2] = ds.domain_dimensions[:2]
            else:
                bbox = np.array([[le,re] for le, re in zip(ds.domain_left_edge,
                                                           ds.domain_right_edge)])
                dims = np.array(ds.domain_dimensions)
                psize = get_psize(dims, self.num_grids)
                gle, gre, shapes, slices = decompose_array(dims, psize, bbox)
                self.grid_left_edge = self.ds.arr(gle, "code_length")
                self.grid_right_edge = self.ds.arr(gre, "code_length")
                self.grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
        else:
            self.grid_left_edge[0,:] = ds.domain_left_edge
            self.grid_right_edge[0,:] = ds.domain_right_edge
            self.grid_dimensions[0] = ds.domain_dimensions

        if ds.events_data:
            try:
                self.grid_particle_count[:] = ds.primary_header["naxis2"]
            except KeyError:
                self.grid_particle_count[:] = 0.0
            self._particle_indices = np.zeros(self.num_grids + 1, dtype='int64')
            self._particle_indices[1] = self.grid_particle_count.squeeze()

        self.grid_levels.flat[:] = 0
        self.grids = np.empty(self.num_grids, dtype='object')
        for i in range(self.num_grids):
            self.grids[i] = self.grid(i, self, self.grid_levels[i,0])
Beispiel #6
0
    def _parse_index(self):
        f = open(self.index_filename, "rb")
        grid = {}
        grid["read_field"] = None
        grid["read_type"] = None
        line = f.readline()
        while grid["read_field"] is None:
            parse_line(line, grid)
            if check_break(line):
                break
            line = f.readline()
        f.close()

        # It seems some datasets have a mismatch between ncells and
        # the actual grid dimensions.
        if np.prod(grid["dimensions"]) != grid["ncells"]:
            grid["dimensions"] -= 1
            grid["dimensions"][grid["dimensions"] == 0] = 1
        if np.prod(grid["dimensions"]) != grid["ncells"]:
            mylog.error(
                "product of dimensions %i not equal to number of cells %i",
                np.prod(grid["dimensions"]),
                grid["ncells"],
            )
            raise TypeError

        # Need to determine how many grids: self.num_grids
        dataset_dir = os.path.dirname(self.index_filename)
        dname = os.path.split(self.index_filename)[-1]
        if dataset_dir.endswith("id0"):
            dname = "id0/" + dname
            dataset_dir = dataset_dir[:-3]

        gridlistread = sglob(
            os.path.join(dataset_dir, f"id*/{dname[4:-9]}-id*{dname[-9:]}"))
        gridlistread.insert(0, self.index_filename)
        if "id0" in dname:
            gridlistread += sglob(
                os.path.join(dataset_dir,
                             f"id*/lev*/{dname[4:-9]}*-lev*{dname[-9:]}"))
        else:
            gridlistread += sglob(
                os.path.join(dataset_dir,
                             f"lev*/{dname[:-9]}*-lev*{dname[-9:]}"))
        ndots = dname.count(".")
        gridlistread = [
            fn for fn in gridlistread
            if os.path.basename(fn).count(".") == ndots
        ]
        self.num_grids = len(gridlistread)
        dxs = []
        levels = np.zeros(self.num_grids, dtype="int32")
        glis = np.empty((self.num_grids, 3), dtype="float64")
        gdds = np.empty((self.num_grids, 3), dtype="float64")
        gdims = np.ones_like(glis)
        j = 0
        self.grid_filenames = gridlistread
        while j < (self.num_grids):
            f = open(gridlistread[j], "rb")
            gridread = {}
            gridread["read_field"] = None
            gridread["read_type"] = None
            line = f.readline()
            while gridread["read_field"] is None:
                parse_line(line, gridread)
                splitup = line.strip().split()
                if chk23("X_COORDINATES") in splitup:
                    gridread["left_edge"] = np.zeros(3)
                    gridread["dds"] = np.zeros(3)
                    v = np.fromfile(f, dtype=">f8", count=2)
                    gridread["left_edge"][0] = v[0] - 0.5 * (v[1] - v[0])
                    gridread["dds"][0] = v[1] - v[0]
                if chk23("Y_COORDINATES") in splitup:
                    v = np.fromfile(f, dtype=">f8", count=2)
                    gridread["left_edge"][1] = v[0] - 0.5 * (v[1] - v[0])
                    gridread["dds"][1] = v[1] - v[0]
                if chk23("Z_COORDINATES") in splitup:
                    v = np.fromfile(f, dtype=">f8", count=2)
                    gridread["left_edge"][2] = v[0] - 0.5 * (v[1] - v[0])
                    gridread["dds"][2] = v[1] - v[0]
                if check_break(line):
                    break
                line = f.readline()
            f.close()
            levels[j] = gridread.get("level", 0)
            glis[j, 0] = gridread["left_edge"][0]
            glis[j, 1] = gridread["left_edge"][1]
            glis[j, 2] = gridread["left_edge"][2]
            # It seems some datasets have a mismatch between ncells and
            # the actual grid dimensions.
            if np.prod(gridread["dimensions"]) != gridread["ncells"]:
                gridread["dimensions"] -= 1
                gridread["dimensions"][gridread["dimensions"] == 0] = 1
            if np.prod(gridread["dimensions"]) != gridread["ncells"]:
                mylog.error(
                    "product of dimensions %i not equal to number of cells %i",
                    np.prod(gridread["dimensions"]),
                    gridread["ncells"],
                )
                raise TypeError
            gdims[j, 0] = gridread["dimensions"][0]
            gdims[j, 1] = gridread["dimensions"][1]
            gdims[j, 2] = gridread["dimensions"][2]
            # Setting dds=1 for non-active dimensions in 1D/2D datasets
            gridread["dds"][gridread["dimensions"] == 1] = 1.0
            gdds[j, :] = gridread["dds"]

            j = j + 1

        gres = glis + gdims * gdds
        # Now we convert the glis, which were left edges (floats), to indices
        # from the domain left edge.  Then we do a bunch of fixing now that we
        # know the extent of all the grids.
        glis = np.round((glis - self.dataset.domain_left_edge.ndarray_view()) /
                        gdds).astype("int")
        new_dre = np.max(gres, axis=0)
        dre_units = self.dataset.domain_right_edge.uq
        self.dataset.domain_right_edge = np.round(new_dre,
                                                  decimals=12) * dre_units
        self.dataset.domain_width = (self.dataset.domain_right_edge -
                                     self.dataset.domain_left_edge)
        self.dataset.domain_center = 0.5 * (self.dataset.domain_left_edge +
                                            self.dataset.domain_right_edge)
        self.dataset.domain_dimensions = np.round(self.dataset.domain_width /
                                                  gdds[0]).astype("int")

        if self.dataset.dimensionality <= 2:
            self.dataset.domain_dimensions[2] = np.int(1)
        if self.dataset.dimensionality == 1:
            self.dataset.domain_dimensions[1] = np.int(1)

        dle = self.dataset.domain_left_edge
        dre = self.dataset.domain_right_edge
        dx_root = (
            self.dataset.domain_right_edge -
            self.dataset.domain_left_edge) / self.dataset.domain_dimensions

        if self.dataset.nprocs > 1:
            gle_all = []
            gre_all = []
            shapes_all = []
            levels_all = []
            new_gridfilenames = []
            file_offsets = []
            read_dims = []
            for i in range(levels.shape[0]):
                dx = dx_root / self.dataset.refine_by**(levels[i])
                gle_orig = self.ds.arr(
                    np.round(dle + dx * glis[i], decimals=12), "code_length")
                gre_orig = self.ds.arr(
                    np.round(gle_orig + dx * gdims[i], decimals=12),
                    "code_length")
                bbox = np.array([[le, re]
                                 for le, re in zip(gle_orig, gre_orig)])
                psize = get_psize(self.ds.domain_dimensions, self.ds.nprocs)
                gle, gre, shapes, slices = decompose_array(
                    gdims[i], psize, bbox)
                gle_all += gle
                gre_all += gre
                shapes_all += shapes
                levels_all += [levels[i]] * self.dataset.nprocs
                new_gridfilenames += [self.grid_filenames[i]
                                      ] * self.dataset.nprocs
                file_offsets += [[slc[0].start, slc[1].start, slc[2].start]
                                 for slc in slices]
                read_dims += [
                    np.array([gdims[i][0], gdims[i][1], shape[2]], dtype="int")
                    for shape in shapes
                ]
            self.num_grids *= self.dataset.nprocs
            self.grids = np.empty(self.num_grids, dtype="object")
            self.grid_filenames = new_gridfilenames
            self.grid_left_edge = self.ds.arr(gle_all, "code_length")
            self.grid_right_edge = self.ds.arr(gre_all, "code_length")
            self.grid_dimensions = np.array([shape for shape in shapes_all],
                                            dtype="int32")
            gdds = (self.grid_right_edge -
                    self.grid_left_edge) / self.grid_dimensions
            glis = np.round((self.grid_left_edge - self.ds.domain_left_edge) /
                            gdds).astype("int")
            for i in range(self.num_grids):
                self.grids[i] = self.grid(
                    i,
                    self,
                    levels_all[i],
                    glis[i],
                    shapes_all[i],
                    file_offsets[i],
                    read_dims[i],
                )
        else:
            self.grids = np.empty(self.num_grids, dtype="object")
            for i in range(levels.shape[0]):
                self.grids[i] = self.grid(i, self, levels[i], glis[i],
                                          gdims[i], [0] * 3, gdims[i])
                dx = dx_root / self.dataset.refine_by**(levels[i])
                dxs.append(dx)

            dx = self.ds.arr(dxs, "code_length")
            self.grid_left_edge = self.ds.arr(
                np.round(dle + dx * glis, decimals=12), "code_length")
            self.grid_dimensions = gdims.astype("int32")
            self.grid_right_edge = self.ds.arr(
                np.round(self.grid_left_edge + dx * self.grid_dimensions,
                         decimals=12),
                "code_length",
            )
        if self.dataset.dimensionality <= 2:
            self.grid_right_edge[:, 2] = dre[2]
        if self.dataset.dimensionality == 1:
            self.grid_right_edge[:, 1:] = dre[1:]
        self.grid_particle_count = np.zeros([self.num_grids, 1], dtype="int64")
def test_psize_3d():
    procs = dec.get_psize(np.array([33, 35, 37]), 12)
    assert_array_equal(procs, np.array([3, 2, 2]))
Beispiel #8
0
def test_psize_3d():
    procs = dec.get_psize(np.array([33, 35, 37]), 12)
    assert_array_equal(procs, np.array([3, 2, 2]))
Beispiel #9
0
def load_uniform_grid(
    data,
    domain_dimensions,
    length_unit=None,
    bbox=None,
    nprocs=1,
    sim_time=0.0,
    mass_unit=None,
    time_unit=None,
    velocity_unit=None,
    magnetic_unit=None,
    periodicity=(True, True, True),
    geometry="cartesian",
    unit_system="cgs",
):
    r"""Load a uniform grid of data into yt as a
    :class:`~yt.frontends.stream.data_structures.StreamHandler`.

    This should allow a uniform grid of data to be loaded directly into yt and
    analyzed as would any others.  This comes with several caveats:

    * Units will be incorrect unless the unit system is explicitly
      specified.
    * Some functions may behave oddly, and parallelism will be
      disappointing or non-existent in most cases.
    * Particles may be difficult to integrate.

    Particle fields are detected as one-dimensional fields.

    Parameters
    ----------
    data : dict
        This is a dict of numpy arrays or (numpy array, unit spec) tuples.
        The keys are the field names.
    domain_dimensions : array_like
        This is the domain dimensions of the grid
    length_unit : string
        Unit to use for lengths.  Defaults to unitless.
    bbox : array_like (xdim:zdim, LE:RE), optional
        Size of computational domain in units specified by length_unit.
        Defaults to a cubic unit-length domain.
    nprocs: integer, optional
        If greater than 1, will create this number of subarrays out of data
    sim_time : float, optional
        The simulation time in seconds
    mass_unit : string
        Unit to use for masses.  Defaults to unitless.
    time_unit : string
        Unit to use for times.  Defaults to unitless.
    velocity_unit : string
        Unit to use for velocities.  Defaults to unitless.
    magnetic_unit : string
        Unit to use for magnetic fields. Defaults to unitless.
    periodicity : tuple of booleans
        Determines whether the data will be treated as periodic along
        each axis
    geometry : string or tuple
        "cartesian", "cylindrical", "polar", "spherical", "geographic" or
        "spectral_cube".  Optionally, a tuple can be provided to specify the
        axis ordering -- for instance, to specify that the axis ordering should
        be z, x, y, this would be: ("cartesian", ("z", "x", "y")).  The same
        can be done for other coordinates, for instance:
        ("spherical", ("theta", "phi", "r")).

    Examples
    --------
    >>> np.random.seed(int(0x4D3D3D3))
    >>> bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]])
    >>> arr = np.random.random((128, 128, 128))
    >>> data = dict(density=arr)
    >>> ds = load_uniform_grid(data, arr.shape, length_unit='cm',
    ...                        bbox=bbox, nprocs=12)
    >>> dd = ds.all_data()
    >>> dd['density']
    unyt_array([0.76017901, 0.96855994, 0.49205428, ..., 0.78798258,
                0.97569432, 0.99453904], 'g/cm**3')
    """
    from yt.frontends.stream.data_structures import (
        StreamDataset,
        StreamDictFieldHandler,
        StreamHandler,
    )
    from yt.frontends.stream.definitions import (
        assign_particle_data,
        process_data,
        set_particle_types,
    )

    domain_dimensions = np.array(domain_dimensions)
    if bbox is None:
        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64")
    domain_left_edge = np.array(bbox[:, 0], "float64")
    domain_right_edge = np.array(bbox[:, 1], "float64")
    grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1))
    # If someone included this throw it away--old API
    if "number_of_particles" in data:
        issue_deprecation_warning(
            "It is no longer necessary to include "
            "the number of particles in the data "
            "dict. The number of particles is "
            "determined from the sizes of the "
            "particle fields.",
            since="4.0.0",
            removal="4.1.0",
        )
        data.pop("number_of_particles")
    # First we fix our field names, apply units to data
    # and check for consistency of field shapes
    field_units, data, number_of_particles = process_data(
        data, grid_dims=tuple(domain_dimensions)
    )

    sfh = StreamDictFieldHandler()

    if number_of_particles > 0:
        particle_types = set_particle_types(data)
        # Used much further below.
        pdata = {"number_of_particles": number_of_particles}
        for key in list(data.keys()):
            if len(data[key].shape) == 1 or key[0] == "io":
                if not isinstance(key, tuple):
                    field = ("io", key)
                    mylog.debug("Reassigning '%s' to '%s'", key, field)
                else:
                    field = key
                sfh._additional_fields += (field,)
                pdata[field] = data.pop(key)
    else:
        particle_types = {}

    if nprocs > 1:
        temp = {}
        new_data = {}
        for key in data.keys():
            psize = get_psize(np.array(data[key].shape), nprocs)
            grid_left_edges, grid_right_edges, shapes, slices = decompose_array(
                data[key].shape, psize, bbox
            )
            grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
            temp[key] = [data[key][slice] for slice in slices]
        for gid in range(nprocs):
            new_data[gid] = {}
            for key in temp.keys():
                new_data[gid].update({key: temp[key][gid]})
        sfh.update(new_data)
        del new_data, temp
    else:
        sfh.update({0: data})
        grid_left_edges = domain_left_edge
        grid_right_edges = domain_right_edge
        grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32")

    if length_unit is None:
        length_unit = "code_length"
    if mass_unit is None:
        mass_unit = "code_mass"
    if time_unit is None:
        time_unit = "code_time"
    if velocity_unit is None:
        velocity_unit = "code_velocity"
    if magnetic_unit is None:
        magnetic_unit = "code_magnetic"

    handler = StreamHandler(
        grid_left_edges,
        grid_right_edges,
        grid_dimensions,
        grid_levels,
        -np.ones(nprocs, dtype="int64"),
        np.zeros(nprocs, dtype="int64").reshape(nprocs, 1),  # particle count
        np.zeros(nprocs).reshape((nprocs, 1)),
        sfh,
        field_units,
        (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
        particle_types=particle_types,
        periodicity=periodicity,
    )

    handler.name = "UniformGridData"
    handler.domain_left_edge = domain_left_edge
    handler.domain_right_edge = domain_right_edge
    handler.refine_by = 2
    if np.all(domain_dimensions[1:] == 1):
        dimensionality = 1
    elif domain_dimensions[2] == 1:
        dimensionality = 2
    else:
        dimensionality = 3
    handler.dimensionality = dimensionality
    handler.domain_dimensions = domain_dimensions
    handler.simulation_time = sim_time
    handler.cosmology_simulation = 0

    sds = StreamDataset(handler, geometry=geometry, unit_system=unit_system)

    # Now figure out where the particles go
    if number_of_particles > 0:
        # This will update the stream handler too
        assign_particle_data(sds, pdata, bbox)

    return sds
    def _parse_index(self):
        f = open(self.index_filename, "rb")
        grid = {}
        grid["read_field"] = None
        grid["read_type"] = None
        table_read = False
        line = f.readline()
        while grid["read_field"] is None:
            parse_line(line, grid)
            if check_break(line):
                break
            line = f.readline()
        f.close()

        # It seems some datasets have a mismatch between ncells and
        # the actual grid dimensions.
        if np.prod(grid["dimensions"]) != grid["ncells"]:
            grid["dimensions"] -= 1
            grid["dimensions"][grid["dimensions"] == 0] = 1
        if np.prod(grid["dimensions"]) != grid["ncells"]:
            mylog.error(
                "product of dimensions %i not equal to number of cells %i"
                % (np.prod(grid["dimensions"]), grid["ncells"])
            )
            raise TypeError

        # Need to determine how many grids: self.num_grids
        dataset_dir = os.path.dirname(self.index_filename)
        dname = os.path.split(self.index_filename)[-1]
        if dataset_dir.endswith("id0"):
            dname = "id0/" + dname
            dataset_dir = dataset_dir[:-3]

        gridlistread = glob.glob(os.path.join(dataset_dir, "id*/%s-id*%s" % (dname[4:-9], dname[-9:])))
        gridlistread.insert(0, self.index_filename)
        if "id0" in dname:
            gridlistread += glob.glob(os.path.join(dataset_dir, "id*/lev*/%s*-lev*%s" % (dname[4:-9], dname[-9:])))
        else:
            gridlistread += glob.glob(os.path.join(dataset_dir, "lev*/%s*-lev*%s" % (dname[:-9], dname[-9:])))
        ndots = dname.count(".")
        gridlistread = [fn for fn in gridlistread if os.path.basename(fn).count(".") == ndots]
        self.num_grids = len(gridlistread)
        dxs = []
        levels = np.zeros(self.num_grids, dtype="int32")
        glis = np.empty((self.num_grids, 3), dtype="float64")
        gdds = np.empty((self.num_grids, 3), dtype="float64")
        gdims = np.ones_like(glis)
        j = 0
        self.grid_filenames = gridlistread
        while j < (self.num_grids):
            f = open(gridlistread[j], "rb")
            gridread = {}
            gridread["read_field"] = None
            gridread["read_type"] = None
            table_read = False
            line = f.readline()
            while gridread["read_field"] is None:
                parse_line(line, gridread)
                splitup = line.strip().split()
                if chk23("X_COORDINATES") in splitup:
                    gridread["left_edge"] = np.zeros(3)
                    gridread["dds"] = np.zeros(3)
                    v = np.fromfile(f, dtype=">f8", count=2)
                    gridread["left_edge"][0] = v[0] - 0.5 * (v[1] - v[0])
                    gridread["dds"][0] = v[1] - v[0]
                if chk23("Y_COORDINATES") in splitup:
                    v = np.fromfile(f, dtype=">f8", count=2)
                    gridread["left_edge"][1] = v[0] - 0.5 * (v[1] - v[0])
                    gridread["dds"][1] = v[1] - v[0]
                if chk23("Z_COORDINATES") in splitup:
                    v = np.fromfile(f, dtype=">f8", count=2)
                    gridread["left_edge"][2] = v[0] - 0.5 * (v[1] - v[0])
                    gridread["dds"][2] = v[1] - v[0]
                if check_break(line):
                    break
                line = f.readline()
            f.close()
            levels[j] = gridread.get("level", 0)
            glis[j, 0] = gridread["left_edge"][0]
            glis[j, 1] = gridread["left_edge"][1]
            glis[j, 2] = gridread["left_edge"][2]
            # It seems some datasets have a mismatch between ncells and
            # the actual grid dimensions.
            if np.prod(gridread["dimensions"]) != gridread["ncells"]:
                gridread["dimensions"] -= 1
                gridread["dimensions"][gridread["dimensions"] == 0] = 1
            if np.prod(gridread["dimensions"]) != gridread["ncells"]:
                mylog.error(
                    "product of dimensions %i not equal to number of cells %i"
                    % (np.prod(gridread["dimensions"]), gridread["ncells"])
                )
                raise TypeError
            gdims[j, 0] = gridread["dimensions"][0]
            gdims[j, 1] = gridread["dimensions"][1]
            gdims[j, 2] = gridread["dimensions"][2]
            # Setting dds=1 for non-active dimensions in 1D/2D datasets
            gridread["dds"][gridread["dimensions"] == 1] = 1.0
            gdds[j, :] = gridread["dds"]

            j = j + 1

        gres = glis + gdims * gdds
        # Now we convert the glis, which were left edges (floats), to indices
        # from the domain left edge.  Then we do a bunch of fixing now that we
        # know the extent of all the grids.
        glis = np.round((glis - self.dataset.domain_left_edge.ndarray_view()) / gdds).astype("int")
        new_dre = np.max(gres, axis=0)
        self.dataset.domain_right_edge[:] = np.round(new_dre, decimals=12)[:]
        self.dataset.domain_width = self.dataset.domain_right_edge - self.dataset.domain_left_edge
        self.dataset.domain_center = 0.5 * (self.dataset.domain_left_edge + self.dataset.domain_right_edge)
        self.dataset.domain_dimensions = np.round(self.dataset.domain_width / gdds[0]).astype("int")

        # Need to reset the units in the dataset based on the correct
        # domain left/right/dimensions.
        # DEV: Is this really necessary?
        # self.dataset._set_code_unit_attributes()

        if self.dataset.dimensionality <= 2:
            self.dataset.domain_dimensions[2] = np.int(1)
        if self.dataset.dimensionality == 1:
            self.dataset.domain_dimensions[1] = np.int(1)

        dle = self.dataset.domain_left_edge
        dre = self.dataset.domain_right_edge
        dx_root = (self.dataset.domain_right_edge - self.dataset.domain_left_edge) / self.dataset.domain_dimensions

        if self.dataset.nprocs > 1:
            gle_all = []
            gre_all = []
            shapes_all = []
            levels_all = []
            new_gridfilenames = []
            file_offsets = []
            read_dims = []
            for i in range(levels.shape[0]):
                dx = dx_root / self.dataset.refine_by ** (levels[i])
                gle_orig = self.ds.arr(np.round(dle + dx * glis[i], decimals=12), "code_length")
                gre_orig = self.ds.arr(np.round(gle_orig + dx * gdims[i], decimals=12), "code_length")
                bbox = np.array([[le, re] for le, re in zip(gle_orig, gre_orig)])
                psize = get_psize(self.ds.domain_dimensions, self.ds.nprocs)
                gle, gre, shapes, slices = decompose_array(gdims[i], psize, bbox)
                gle_all += gle
                gre_all += gre
                shapes_all += shapes
                levels_all += [levels[i]] * self.dataset.nprocs
                new_gridfilenames += [self.grid_filenames[i]] * self.dataset.nprocs
                file_offsets += [[slc[0].start, slc[1].start, slc[2].start] for slc in slices]
                read_dims += [np.array([gdims[i][0], gdims[i][1], shape[2]], dtype="int") for shape in shapes]
            self.num_grids *= self.dataset.nprocs
            self.grids = np.empty(self.num_grids, dtype="object")
            self.grid_filenames = new_gridfilenames
            self.grid_left_edge = self.ds.arr(gle_all, "code_length")
            self.grid_right_edge = self.ds.arr(gre_all, "code_length")
            self.grid_dimensions = np.array([shape for shape in shapes_all], dtype="int32")
            gdds = (self.grid_right_edge - self.grid_left_edge) / self.grid_dimensions
            glis = np.round((self.grid_left_edge - self.ds.domain_left_edge) / gdds).astype("int")
            for i in range(self.num_grids):
                self.grids[i] = self.grid(i, self, levels_all[i], glis[i], shapes_all[i], file_offsets[i], read_dims[i])
        else:
            self.grids = np.empty(self.num_grids, dtype="object")
            for i in range(levels.shape[0]):
                self.grids[i] = self.grid(i, self, levels[i], glis[i], gdims[i], [0] * 3, gdims[i])
                dx = dx_root / self.dataset.refine_by ** (levels[i])
                dxs.append(dx)

            dx = self.ds.arr(dxs, "code_length")
            self.grid_left_edge = self.ds.arr(np.round(dle + dx * glis, decimals=12), "code_length")
            self.grid_dimensions = gdims.astype("int32")
            self.grid_right_edge = self.ds.arr(
                np.round(self.grid_left_edge + dx * self.grid_dimensions, decimals=12), "code_length"
            )
        if self.dataset.dimensionality <= 2:
            self.grid_right_edge[:, 2] = dre[2]
        if self.dataset.dimensionality == 1:
            self.grid_right_edge[:, 1:] = dre[1:]
        self.grid_particle_count = np.zeros([self.num_grids, 1], dtype="int64")