def _parse_parameter_file(self): self._handle = open(self.parameter_filename, "rb") # Read the start of a grid to get simulation parameters. grid = {} grid["read_field"] = None line = self._handle.readline() while grid["read_field"] is None: parse_line(line, grid) splitup = line.strip().split() if chk23("X_COORDINATES") in splitup: grid["left_edge"] = np.zeros(3) grid["dds"] = np.zeros(3) v = np.fromfile(self._handle, dtype=">f8", count=2) grid["left_edge"][0] = v[0] - 0.5 * (v[1] - v[0]) grid["dds"][0] = v[1] - v[0] if chk23("Y_COORDINATES") in splitup: v = np.fromfile(self._handle, dtype=">f8", count=2) grid["left_edge"][1] = v[0] - 0.5 * (v[1] - v[0]) grid["dds"][1] = v[1] - v[0] if chk23("Z_COORDINATES") in splitup: v = np.fromfile(self._handle, dtype=">f8", count=2) grid["left_edge"][2] = v[0] - 0.5 * (v[1] - v[0]) grid["dds"][2] = v[1] - v[0] if check_break(line): break line = self._handle.readline() self.domain_left_edge = grid["left_edge"] mylog.info( "Temporarily setting domain_right_edge = -domain_left_edge. " "This will be corrected automatically if it is not the case.") self.domain_right_edge = -self.domain_left_edge self.domain_width = self.domain_right_edge - self.domain_left_edge self.domain_dimensions = np.round(self.domain_width / grid["dds"]).astype("int32") refine_by = None if refine_by is None: refine_by = 2 self.refine_by = refine_by dimensionality = 3 if grid["dimensions"][2] == 1: dimensionality = 2 if grid["dimensions"][1] == 1: dimensionality = 1 if dimensionality <= 2: self.domain_dimensions[2] = np.int32(1) if dimensionality == 1: self.domain_dimensions[1] = np.int32(1) if dimensionality != 3 and self.nprocs > 1: raise RuntimeError( "Virtual grids are only supported for 3D outputs!") self.dimensionality = dimensionality self.current_time = grid["time"] self.unique_identifier = self.parameter_filename.__hash__() self.cosmological_simulation = False self.num_ghost_zones = 0 self.field_ordering = "fortran" self.boundary_conditions = [1] * 6 self.periodicity = tuple( self.specified_parameters.get("periodicity", (True, True, True))) if "gamma" in self.specified_parameters: self.gamma = float(self.specified_parameters["gamma"]) else: self.gamma = 5.0 / 3.0 dataset_dir = os.path.dirname(self.parameter_filename) dname = os.path.split(self.parameter_filename)[-1] if dataset_dir.endswith("id0"): dname = "id0/" + dname dataset_dir = dataset_dir[:-3] gridlistread = sglob( os.path.join(dataset_dir, f"id*/{dname[4:-9]}-id*{dname[-9:]}")) if "id0" in dname: gridlistread += sglob( os.path.join(dataset_dir, f"id*/lev*/{dname[4:-9]}*-lev*{dname[-9:]}")) else: gridlistread += sglob( os.path.join(dataset_dir, f"lev*/{dname[:-9]}*-lev*{dname[-9:]}")) ndots = dname.count(".") gridlistread = [ fn for fn in gridlistread if os.path.basename(fn).count(".") == ndots ] self.nvtk = len(gridlistread) + 1 self.current_redshift = (self.omega_lambda) = ( self.omega_matter ) = self.hubble_constant = self.cosmological_simulation = 0.0 self.parameters[ "Time"] = self.current_time # Hardcode time conversion for now. self.parameters[ "HydroMethod"] = 0 # Hardcode for now until field staggering is supported. if "gamma" in self.specified_parameters: self.parameters["Gamma"] = self.specified_parameters["gamma"] else: self.parameters["Gamma"] = 5.0 / 3.0 self.geometry = self.specified_parameters.get("geometry", "cartesian") self._handle.close() self.mu = self.specified_parameters.get("mu", default_mu)
def _parse_index(self): f = open(self.index_filename, "rb") grid = {} grid["read_field"] = None grid["read_type"] = None line = f.readline() while grid["read_field"] is None: parse_line(line, grid) if check_break(line): break line = f.readline() f.close() # It seems some datasets have a mismatch between ncells and # the actual grid dimensions. if np.prod(grid["dimensions"]) != grid["ncells"]: grid["dimensions"] -= 1 grid["dimensions"][grid["dimensions"] == 0] = 1 if np.prod(grid["dimensions"]) != grid["ncells"]: mylog.error( "product of dimensions %i not equal to number of cells %i", np.prod(grid["dimensions"]), grid["ncells"], ) raise TypeError # Need to determine how many grids: self.num_grids dataset_dir = os.path.dirname(self.index_filename) dname = os.path.split(self.index_filename)[-1] if dataset_dir.endswith("id0"): dname = "id0/" + dname dataset_dir = dataset_dir[:-3] gridlistread = sglob( os.path.join(dataset_dir, f"id*/{dname[4:-9]}-id*{dname[-9:]}")) gridlistread.insert(0, self.index_filename) if "id0" in dname: gridlistread += sglob( os.path.join(dataset_dir, f"id*/lev*/{dname[4:-9]}*-lev*{dname[-9:]}")) else: gridlistread += sglob( os.path.join(dataset_dir, f"lev*/{dname[:-9]}*-lev*{dname[-9:]}")) ndots = dname.count(".") gridlistread = [ fn for fn in gridlistread if os.path.basename(fn).count(".") == ndots ] self.num_grids = len(gridlistread) dxs = [] levels = np.zeros(self.num_grids, dtype="int32") glis = np.empty((self.num_grids, 3), dtype="float64") gdds = np.empty((self.num_grids, 3), dtype="float64") gdims = np.ones_like(glis) j = 0 self.grid_filenames = gridlistread while j < (self.num_grids): f = open(gridlistread[j], "rb") gridread = {} gridread["read_field"] = None gridread["read_type"] = None line = f.readline() while gridread["read_field"] is None: parse_line(line, gridread) splitup = line.strip().split() if chk23("X_COORDINATES") in splitup: gridread["left_edge"] = np.zeros(3) gridread["dds"] = np.zeros(3) v = np.fromfile(f, dtype=">f8", count=2) gridread["left_edge"][0] = v[0] - 0.5 * (v[1] - v[0]) gridread["dds"][0] = v[1] - v[0] if chk23("Y_COORDINATES") in splitup: v = np.fromfile(f, dtype=">f8", count=2) gridread["left_edge"][1] = v[0] - 0.5 * (v[1] - v[0]) gridread["dds"][1] = v[1] - v[0] if chk23("Z_COORDINATES") in splitup: v = np.fromfile(f, dtype=">f8", count=2) gridread["left_edge"][2] = v[0] - 0.5 * (v[1] - v[0]) gridread["dds"][2] = v[1] - v[0] if check_break(line): break line = f.readline() f.close() levels[j] = gridread.get("level", 0) glis[j, 0] = gridread["left_edge"][0] glis[j, 1] = gridread["left_edge"][1] glis[j, 2] = gridread["left_edge"][2] # It seems some datasets have a mismatch between ncells and # the actual grid dimensions. if np.prod(gridread["dimensions"]) != gridread["ncells"]: gridread["dimensions"] -= 1 gridread["dimensions"][gridread["dimensions"] == 0] = 1 if np.prod(gridread["dimensions"]) != gridread["ncells"]: mylog.error( "product of dimensions %i not equal to number of cells %i", np.prod(gridread["dimensions"]), gridread["ncells"], ) raise TypeError gdims[j, 0] = gridread["dimensions"][0] gdims[j, 1] = gridread["dimensions"][1] gdims[j, 2] = gridread["dimensions"][2] # Setting dds=1 for non-active dimensions in 1D/2D datasets gridread["dds"][gridread["dimensions"] == 1] = 1.0 gdds[j, :] = gridread["dds"] j = j + 1 gres = glis + gdims * gdds # Now we convert the glis, which were left edges (floats), to indices # from the domain left edge. Then we do a bunch of fixing now that we # know the extent of all the grids. glis = np.round((glis - self.dataset.domain_left_edge.ndarray_view()) / gdds).astype("int") new_dre = np.max(gres, axis=0) dre_units = self.dataset.domain_right_edge.uq self.dataset.domain_right_edge = np.round(new_dre, decimals=12) * dre_units self.dataset.domain_width = (self.dataset.domain_right_edge - self.dataset.domain_left_edge) self.dataset.domain_center = 0.5 * (self.dataset.domain_left_edge + self.dataset.domain_right_edge) self.dataset.domain_dimensions = np.round(self.dataset.domain_width / gdds[0]).astype("int") if self.dataset.dimensionality <= 2: self.dataset.domain_dimensions[2] = np.int(1) if self.dataset.dimensionality == 1: self.dataset.domain_dimensions[1] = np.int(1) dle = self.dataset.domain_left_edge dre = self.dataset.domain_right_edge dx_root = ( self.dataset.domain_right_edge - self.dataset.domain_left_edge) / self.dataset.domain_dimensions if self.dataset.nprocs > 1: gle_all = [] gre_all = [] shapes_all = [] levels_all = [] new_gridfilenames = [] file_offsets = [] read_dims = [] for i in range(levels.shape[0]): dx = dx_root / self.dataset.refine_by**(levels[i]) gle_orig = self.ds.arr( np.round(dle + dx * glis[i], decimals=12), "code_length") gre_orig = self.ds.arr( np.round(gle_orig + dx * gdims[i], decimals=12), "code_length") bbox = np.array([[le, re] for le, re in zip(gle_orig, gre_orig)]) psize = get_psize(self.ds.domain_dimensions, self.ds.nprocs) gle, gre, shapes, slices = decompose_array( gdims[i], psize, bbox) gle_all += gle gre_all += gre shapes_all += shapes levels_all += [levels[i]] * self.dataset.nprocs new_gridfilenames += [self.grid_filenames[i] ] * self.dataset.nprocs file_offsets += [[slc[0].start, slc[1].start, slc[2].start] for slc in slices] read_dims += [ np.array([gdims[i][0], gdims[i][1], shape[2]], dtype="int") for shape in shapes ] self.num_grids *= self.dataset.nprocs self.grids = np.empty(self.num_grids, dtype="object") self.grid_filenames = new_gridfilenames self.grid_left_edge = self.ds.arr(gle_all, "code_length") self.grid_right_edge = self.ds.arr(gre_all, "code_length") self.grid_dimensions = np.array([shape for shape in shapes_all], dtype="int32") gdds = (self.grid_right_edge - self.grid_left_edge) / self.grid_dimensions glis = np.round((self.grid_left_edge - self.ds.domain_left_edge) / gdds).astype("int") for i in range(self.num_grids): self.grids[i] = self.grid( i, self, levels_all[i], glis[i], shapes_all[i], file_offsets[i], read_dims[i], ) else: self.grids = np.empty(self.num_grids, dtype="object") for i in range(levels.shape[0]): self.grids[i] = self.grid(i, self, levels[i], glis[i], gdims[i], [0] * 3, gdims[i]) dx = dx_root / self.dataset.refine_by**(levels[i]) dxs.append(dx) dx = self.ds.arr(dxs, "code_length") self.grid_left_edge = self.ds.arr( np.round(dle + dx * glis, decimals=12), "code_length") self.grid_dimensions = gdims.astype("int32") self.grid_right_edge = self.ds.arr( np.round(self.grid_left_edge + dx * self.grid_dimensions, decimals=12), "code_length", ) if self.dataset.dimensionality <= 2: self.grid_right_edge[:, 2] = dre[2] if self.dataset.dimensionality == 1: self.grid_right_edge[:, 1:] = dre[1:] self.grid_particle_count = np.zeros([self.num_grids, 1], dtype="int64")