def _initialize_index(self, data_file, regions): pcount = sum(data_file.total_particles.values()) morton = np.empty(pcount, dtype='uint64') if pcount == 0: return morton mylog.debug("Initializing index % 5i (% 7i particles)", data_file.file_id, pcount) ind = 0 with h5py.File(data_file.filename, mode="r") as f: if not f.keys(): return None dx = np.finfo(f["FOF"]['CenterOfMass'].dtype).eps dx = 2.0 * self.ds.quan(dx, "code_length") for ptype in data_file.ds.particle_types_raw: if data_file.total_particles[ptype] == 0: continue pos = f[ptype]["CenterOfMass"][()].astype("float64") pos = np.resize(pos, (data_file.total_particles[ptype], 3)) pos = data_file.ds.arr(pos, "code_length") # These are 32 bit numbers, so we give a little lee-way. # Otherwise, for big sets of particles, we often will bump into the # domain edges. This helps alleviate that. np.clip(pos, self.ds.domain_left_edge + dx, self.ds.domain_right_edge - dx, pos) if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \ np.any(pos.max(axis=0) > self.ds.domain_right_edge): raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0), self.ds.domain_left_edge, self.ds.domain_right_edge) regions.add_data_file(pos, data_file.file_id) morton[ind:ind + pos.shape[0]] = compute_morton( pos[:, 0], pos[:, 1], pos[:, 2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge) ind += pos.shape[0] return morton
def _initialize_index(self, data_file, regions): if self.index_ptype == "all": ptypes = self.ds.particle_types_raw pcount = sum(data_file.total_particles.values()) else: ptypes = [self.index_ptype] pcount = data_file.total_particles[self.index_ptype] morton = np.empty(pcount, dtype='uint64') if pcount == 0: return morton mylog.debug("Initializing index % 5i (% 7i particles)", data_file.file_id, pcount) ind = 0 with h5py.File(data_file.filename, "r") as f: if not f.keys(): return None dx = np.finfo(f["Group"]["GroupPos"].dtype).eps dx = 2.0 * self.ds.quan(dx, "code_length") for ptype in ptypes: if data_file.total_particles[ptype] == 0: continue pos = data_file._get_particle_positions(ptype, f=f) pos = self.ds.arr(pos, "code_length") if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \ np.any(pos.max(axis=0) > self.ds.domain_right_edge): raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0), self.ds.domain_left_edge, self.ds.domain_right_edge) regions.add_data_file(pos, data_file.file_id) morton[ind:ind + pos.shape[0]] = compute_morton( pos[:, 0], pos[:, 1], pos[:, 2], self.ds.domain_left_edge, self.ds.domain_right_edge) ind += pos.shape[0] return morton
def _initialize_index(self, data_file, regions): pcount = data_file.header["num_halos"] morton = np.empty(pcount, dtype='uint64') mylog.debug("Initializing index % 5i (% 7i particles)", data_file.file_id, pcount) ind = 0 with h5py.File(data_file.filename, "r") as f: if not f.keys(): return None pos = np.empty((pcount, 3), dtype="float64") pos = data_file.ds.arr(pos, "code_length") dx = np.finfo(f['particle_position_x'].dtype).eps dx = 2.0*self.ds.quan(dx, "code_length") pos[:,0] = f["particle_position_x"].value pos[:,1] = f["particle_position_y"].value pos[:,2] = f["particle_position_z"].value # These are 32 bit numbers, so we give a little lee-way. # Otherwise, for big sets of particles, we often will bump into the # domain edges. This helps alleviate that. np.clip(pos, self.ds.domain_left_edge + dx, self.ds.domain_right_edge - dx, pos) if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \ np.any(pos.max(axis=0) > self.ds.domain_right_edge): raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0), self.ds.domain_left_edge, self.ds.domain_right_edge) regions.add_data_file(pos, data_file.file_id) morton[ind:ind+pos.shape[0]] = compute_morton( pos[:,0], pos[:,1], pos[:,2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge) return morton
def _initialize_index(self, data_file, regions): index_ptype = self.index_ptype f = h5py.File(data_file.filename, "r") if index_ptype == "all": pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum() keys = f.keys() else: pt = int(index_ptype[-1]) pcount = f["/Header"].attrs["NumPart_ThisFile"][pt] keys = [index_ptype] morton = np.empty(pcount, dtype='uint64') ind = 0 for key in keys: if not key.startswith("PartType"): continue if "Coordinates" not in f[key]: continue ds = f[key]["Coordinates"] dt = ds.dtype.newbyteorder("N") # Native pos = np.empty(ds.shape, dtype=dt) pos[:] = ds regions.add_data_file(pos, data_file.file_id, data_file.ds.filter_bbox) morton[ind:ind+pos.shape[0]] = compute_morton( pos[:,0], pos[:,1], pos[:,2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge, data_file.ds.filter_bbox) ind += pos.shape[0] f.close() return morton
def _initialize_index(self, data_file, regions): pcount = data_file.header["num_halos"] morton = np.empty(pcount, dtype="uint64") mylog.debug("Initializing index % 5i (% 7i particles)", data_file.file_id, pcount) if pcount == 0: return morton ind = 0 ptype = "halos" with open(data_file.filename, "rb") as f: pos = data_file._get_particle_positions(ptype, f=f) pos = data_file.ds.arr(pos, "code_length") if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or np.any( pos.max(axis=0) > self.ds.domain_right_edge): raise YTDomainOverflow( pos.min(axis=0), pos.max(axis=0), self.ds.domain_left_edge, self.ds.domain_right_edge, ) regions.add_data_file(pos, data_file.file_id) morton[ind:ind + pos.shape[0]] = compute_morton( pos[:, 0], pos[:, 1], pos[:, 2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge, ) return morton
def _initialize_index(self, data_file, regions): dle = self.ds.domain_left_edge.in_units("code_length").d dre = self.ds.domain_right_edge.in_units("code_length").d pcount = 0 for dd in self.ds.midx.iter_bbox_data( dle, dre, ['x']): pcount += dd['x'].size morton = np.empty(pcount, dtype='uint64') ind = 0 chunk_id = 0 for dd in self.ds.midx.iter_bbox_data( dle, dre, ['x','y','z']): npart = dd['x'].size pos = np.empty((npart, 3), dtype=dd['x'].dtype) pos[:,0] = dd['x'] pos[:,1] = dd['y'] pos[:,2] = dd['z'] if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \ np.any(pos.max(axis=0) > self.ds.domain_right_edge): raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0), self.ds.domain_left_edge, self.ds.domain_right_edge) regions.add_data_file(pos, chunk_id) morton[ind:ind+npart] = compute_morton( pos[:,0], pos[:,1], pos[:,2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge) ind += npart return morton
def _initialize_index(self, data_file, regions): pcount = data_file.header["num_halos"] morton = np.empty(pcount, dtype='uint64') mylog.debug("Initializing index % 5i (% 7i particles)", data_file.file_id, pcount) ind = 0 with open(data_file.filename, "rb") as f: f.seek(data_file._position_offset, os.SEEK_SET) halos = np.fromfile(f, dtype=self._halo_dt, count = pcount) pos = np.empty((halos.size, 3), dtype="float64") # These positions are in Mpc, *not* "code" units pos = data_file.ds.arr(pos, "code_length") dx = np.finfo(halos['particle_position_x'].dtype).eps dx = 2.0*self.ds.quan(dx, "code_length") pos[:,0] = halos["particle_position_x"] pos[:,1] = halos["particle_position_y"] pos[:,2] = halos["particle_position_z"] # These are 32 bit numbers, so we give a little lee-way. # Otherwise, for big sets of particles, we often will bump into the # domain edges. This helps alleviate that. np.clip(pos, self.ds.domain_left_edge + dx, self.ds.domain_right_edge - dx, pos) #del halos if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \ np.any(pos.max(axis=0) > self.ds.domain_right_edge): raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0), self.ds.domain_left_edge, self.ds.domain_right_edge) regions.add_data_file(pos, data_file.file_id) morton[ind:ind+pos.shape[0]] = compute_morton( pos[:,0], pos[:,1], pos[:,2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge) return morton
def _initialize_index(self, data_file, regions): pcount = data_file.ds.parameters["nhalos"] + data_file.ds.parameters[ "nsubs"] morton = np.empty(pcount, dtype="uint64") mylog.debug("Initializing index % 5i (% 7i particles)", data_file.file_id, pcount) if pcount == 0: return morton ind = 0 pos = self._get_particle_positions() if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or np.any( pos.max(axis=0) > self.ds.domain_right_edge): raise YTDomainOverflow( pos.min(axis=0), pos.max(axis=0), self.ds.domain_left_edge, self.ds.domain_right_edge, ) regions.add_data_file(pos, data_file.file_id) morton[ind:ind + pos.shape[0]] = compute_morton( pos[:, 0], pos[:, 1], pos[:, 2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge, ) return morton
def _initialize_index(self, data_file, regions): halos = data_file.read_data(usecols=['ID', 'Xc', 'Yc', 'Zc']) pcount = len(halos['ID']) morton = np.empty(pcount, dtype='uint64') mylog.debug('Initializing index % 5i (% 7i particles)', data_file.file_id, pcount) if pcount == 0: return morton ind = 0 pos = np.empty((pcount, 3), dtype='float64') pos = data_file.ds.arr(pos, 'code_length') dx = np.finfo(halos['Xc'].dtype).eps dx = 2.0 * self.ds.quan(dx, 'code_length') pos[:, 0] = halos['Xc'] pos[:, 1] = halos['Yc'] pos[:, 2] = halos['Zc'] dle = self.ds.domain_left_edge dre = self.ds.domain_right_edge # These are 32 bit numbers, so we give a little lee-way. # Otherwise, for big sets of particles, we often will bump into the # domain edges. This helps alleviate that. np.clip(pos, dle + dx, dre - dx, pos) if np.any(pos.min(axis=0) < dle) or np.any(pos.max(axis=0) > dre): raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0), dle, dre) regions.add_data_file(pos, data_file.file_id) morton[ind:ind + pos.shape[0]] = compute_morton( pos[:, 0], pos[:, 1], pos[:, 2], dle, dre) return morton
def _initialize_index(self, data_file, regions): dle = self.ds.domain_left_edge.in_units("code_length").d dre = self.ds.domain_right_edge.in_units("code_length").d pcount = 0 for dd in self.ds.midx.iter_bbox_data(dle, dre, ["x"]): pcount += dd["x"].size morton = np.empty(pcount, dtype="uint64") ind = 0 chunk_id = 0 for dd in self.ds.midx.iter_bbox_data(dle, dre, ["x", "y", "z"]): npart = dd["x"].size pos = np.empty((npart, 3), dtype=dd["x"].dtype) pos[:, 0] = dd["x"] pos[:, 1] = dd["y"] pos[:, 2] = dd["z"] if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or np.any( pos.max(axis=0) > self.ds.domain_right_edge): raise YTDomainOverflow( pos.min(axis=0), pos.max(axis=0), self.ds.domain_left_edge, self.ds.domain_right_edge, ) regions.add_data_file(pos, chunk_id) morton[ind:ind + npart] = compute_morton( pos[:, 0], pos[:, 1], pos[:, 2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge, ) ind += npart return morton
def _initialize_index(self, data_file, regions): pcount = data_file.header["num_halos"] morton = np.empty(pcount, dtype='uint64') mylog.debug("Initializing index % 5i (% 7i particles)", data_file.file_id, pcount) ind = 0 with h5py.File(data_file.filename, "r") as f: if not f.keys(): return None pos = np.empty((pcount, 3), dtype="float64") pos = data_file.ds.arr(pos, "code_length") dx = np.finfo(f['particle_position_x'].dtype).eps dx = 2.0*self.ds.quan(dx, "code_length") pos[:,0] = f["particle_position_x"].value pos[:,1] = f["particle_position_y"].value pos[:,2] = f["particle_position_z"].value # These are 32 bit numbers, so we give a little lee-way. # Otherwise, for big sets of particles, we often will bump into the # domain edges. This helps alleviate that. np.clip(pos, self.ds.domain_left_edge + dx, self.ds.domain_right_edge - dx, pos) if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \ np.any(pos.max(axis=0) > self.ds.domain_right_edge): raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0), self.ds.domain_left_edge, self.ds.domain_right_edge) regions.add_data_file(pos, data_file.file_id) morton[ind:ind+pos.shape[0]] = compute_morton( pos[:,0], pos[:,1], pos[:,2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge) return morton
def morton(self): self.validate() eps = np.finfo(self.dtype).eps LE = self.min(axis=0) LE -= np.abs(LE) * eps RE = self.max(axis=0) RE += np.abs(RE) * eps morton = compute_morton(self[:, 0], self[:, 1], self[:, 2], LE, RE) return morton
def morton(self): self.validate() eps = np.finfo(self.dtype).eps LE = self.min(axis=0) LE -= np.abs(LE) * eps RE = self.max(axis=0) RE += np.abs(RE) * eps morton = compute_morton( self[:,0], self[:,1], self[:,2], LE, RE) return morton
def _initialize_index(self, data_file, regions): count = sum(data_file.total_particles.values()) DLE = data_file.ds.domain_left_edge DRE = data_file.ds.domain_right_edge with open(data_file.filename, "rb") as f: # We add on an additionally 4 for the first record. f.seek(data_file._position_offset + 4) # The first total_particles * 3 values are positions pp = np.fromfile(f, dtype = 'float32', count = count*3) pp.shape = (count, 3) regions.add_data_file(pp, data_file.file_id, data_file.ds.filter_bbox) morton = compute_morton(pp[:,0], pp[:,1], pp[:,2], DLE, DRE, data_file.ds.filter_bbox) return morton
def _get_morton_from_position(self, data_file, count, offset_count, regions, DLE, DRE): with open(data_file.filename, "rb") as f: # We add on an additionally 4 for the first record. f.seek(data_file._position_offset + 4 + offset_count * 12) # The first total_particles * 3 values are positions pp = np.fromfile(f, dtype=self._endian + self._float_type, count=count * 3) pp.shape = (count, 3) pp = pp.astype(self._float_type) regions.add_data_file(pp, data_file.file_id, data_file.ds.filter_bbox) morton = compute_morton(pp[:, 0], pp[:, 1], pp[:, 2], DLE, DRE, data_file.ds.filter_bbox) return morton
def _initialize_index(self, data_file, regions): totcount = 4096**2 #file is always this size count = data_file.ds.parameters['lspecies'][-1] DLE = data_file.ds.domain_left_edge DRE = data_file.ds.domain_right_edge dx = (DRE - DLE) / 2**_ORDER_MAX with open(data_file.filename, "rb") as f: # The first total_particles * 3 values are positions pp = np.fromfile(f, dtype = '>f4', count = totcount*3) pp.shape = (3, totcount) pp = pp[:,:count] #remove zeros pp = np.transpose(pp).astype(np.float32) #cast as float32 for compute_morton pp = (pp - 1.)/data_file.ds.parameters['ng'] #correct the dm particle units regions.add_data_file(pp, data_file.file_id) morton = compute_morton(pp[:,0], pp[:,1], pp[:,2], DLE, DRE) return morton
def _initialize_index(self, data_file, regions): totcount = 4096**2 #file is always this size count = data_file.ds.parameters['lspecies'][-1] DLE = data_file.ds.domain_left_edge DRE = data_file.ds.domain_right_edge with open(data_file.filename, "rb") as f: # The first total_particles * 3 values are positions pp = np.fromfile(f, dtype='>f4', count=totcount * 3) pp.shape = (3, totcount) pp = pp[:, :count] #remove zeros pp = np.transpose(pp).astype( np.float32) #cast as float32 for compute_morton pp = (pp - 1.) / data_file.ds.parameters[ 'ng'] #correct the dm particle units regions.add_data_file(pp, data_file.file_id) morton = compute_morton(pp[:, 0], pp[:, 1], pp[:, 2], DLE, DRE) return morton
def _initialize_index(self, data_file, regions): header = self.ds.parameters ptypes = header["particle_count"][data_file.file_id].keys() pcount = sum(header["particle_count"][data_file.file_id].values()) morton = np.empty(pcount, dtype='uint64') ind = 0 for ptype in ptypes: s = self._open_stream(data_file, (ptype, "Coordinates")) c = np.frombuffer(s, dtype="float64") c.shape = (c.shape[0] / 3.0, 3) regions.add_data_file(c, data_file.file_id, data_file.ds.filter_bbox) morton[ind:ind + c.shape[0]] = compute_morton( c[:, 0], c[:, 1], c[:, 2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge, data_file.ds.filter_bbox) ind += c.shape[0] return morton
def _initialize_index(self, data_file, regions): x, y, z = (self._handle[ax] for ax in 'xyz') pcount = x.size morton = np.empty(pcount, dtype='uint64') ind = 0 while ind < pcount: npart = min(CHUNKSIZE, pcount - ind) pos = np.empty((npart, 3), dtype=x.dtype) pos[:, 0] = x[ind:ind + npart] pos[:, 1] = y[ind:ind + npart] pos[:, 2] = z[ind:ind + npart] regions.add_data_file(pos, data_file.file_id) morton[ind:ind + npart] = compute_morton( pos[:, 0], pos[:, 1], pos[:, 2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge) ind += CHUNKSIZE return morton
def _morton_index(field, data): """This is the morton index, which is properly a uint64 field. Because we make some assumptions that the fields returned by derived fields are float64, this returns a "view" on the data that is float64. To get back the original uint64, you need to call .view("uint64") on it; however, it should be true that if you sort the uint64, you will get the same order as if you sort the float64 view. """ eps = np.finfo("f8").eps uq = data.ds.domain_left_edge.uq LE = data.ds.domain_left_edge - eps * uq RE = data.ds.domain_right_edge + eps * uq # .ravel() only copies if it needs to morton = compute_morton(data["index", "x"].ravel(), data["index", "y"].ravel(), data["index", "z"].ravel(), LE, RE) morton.shape = data["index", "x"].shape return morton.view("f8")
def _initialize_index(self, data_file, regions): halos = data_file.read_data(usecols=['ID']) pcount = len(halos['ID']) morton = np.empty(pcount, dtype='uint64') mylog.debug('Initializing index % 5i (% 7i particles)', data_file.file_id, pcount) if pcount == 0: return morton ind = 0 pos = data_file._get_particle_positions('halos') pos = data_file.ds.arr(pos, 'code_length') dle = self.ds.domain_left_edge dre = self.ds.domain_right_edge if np.any(pos.min(axis=0) < dle) or np.any(pos.max(axis=0) > dre): raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0), dle, dre) regions.add_data_file(pos, data_file.file_id) morton[ind:ind + pos.shape[0]] = compute_morton( pos[:, 0], pos[:, 1], pos[:, 2], dle, dre) return morton
def _initialize_index(self, data_file, regions): x, y, z = (self._handle[ax] for ax in 'xyz') pcount = x.size morton = np.empty(pcount, dtype='uint64') ind = 0 while ind < pcount: npart = min(CHUNKSIZE, pcount - ind) pos = np.empty((npart, 3), dtype=x.dtype) pos[:,0] = x[ind:ind+npart] pos[:,1] = y[ind:ind+npart] pos[:,2] = z[ind:ind+npart] regions.add_data_file(pos, data_file.file_id) morton[ind:ind+npart] = compute_morton( pos[:,0], pos[:,1], pos[:,2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge) ind += CHUNKSIZE return morton
def _initialize_index(self, data_file, regions): header = self.ds.parameters ptypes = header["particle_count"][data_file.file_id].keys() pcount = sum(header["particle_count"][data_file.file_id].values()) morton = np.empty(pcount, dtype='uint64') ind = 0 for ptype in ptypes: s = self._open_stream(data_file, (ptype, "Coordinates")) c = np.frombuffer(s, dtype="float64") c.shape = (c.shape[0]/3.0, 3) regions.add_data_file(c, data_file.file_id, data_file.ds.filter_bbox) morton[ind:ind+c.shape[0]] = compute_morton( c[:,0], c[:,1], c[:,2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge, data_file.ds.filter_bbox) ind += c.shape[0] return morton
def _initialize_index(self, data_file, regions): p_fields = self._handle["/tracer particles"] px, py, pz = self._position_fields pcount = self._count_particles(data_file)["io"] morton = np.empty(pcount, dtype='uint64') ind = 0 while ind < pcount: npart = min(self._chunksize, pcount - ind) pos = np.empty((npart, 3), dtype="=f8") pos[:, 0] = p_fields[ind:ind + npart, px] pos[:, 1] = p_fields[ind:ind + npart, py] pos[:, 2] = p_fields[ind:ind + npart, pz] regions.add_data_file(pos, data_file.file_id) morton[ind:ind+npart] = \ compute_morton(pos[:,0], pos[:,1], pos[:,2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge) ind += self._chunksize return morton
def _initialize_index(self, data_file, regions): all_count = self._count_particles(data_file) pcount = sum(all_count.values()) morton = np.empty(pcount, dtype="uint64") mylog.debug("Initializing index % 5i (% 7i particles)", data_file.file_id, pcount) ind = 0 with h5py.File(data_file.filename, mode="r") as f: for ptype in all_count: if ptype not in f or all_count[ptype] == 0: continue pos = np.empty((all_count[ptype], 3), dtype="float64") units = _get_position_array_units(ptype, f, "x") if ptype == "grid": dx = f["grid"]["dx"][()].min() dx = self.ds.quan(dx, parse_h5_attr(f["grid"]["dx"], "units")).to("code_length") else: dx = 2.0 * np.finfo( f[ptype]["particle_position_x"].dtype).eps dx = self.ds.quan(dx, units).to("code_length") pos[:, 0] = _get_position_array(ptype, f, "x") pos[:, 1] = _get_position_array(ptype, f, "y") pos[:, 2] = _get_position_array(ptype, f, "z") pos = self.ds.arr(pos, units).to("code_length") dle = self.ds.domain_left_edge.to("code_length") dre = self.ds.domain_right_edge.to("code_length") # These are 32 bit numbers, so we give a little lee-way. # Otherwise, for big sets of particles, we often will bump into the # domain edges. This helps alleviate that. np.clip(pos, dle + dx, dre - dx, pos) if np.any(pos.min(axis=0) < dle) or np.any( pos.max(axis=0) > dre): raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0), dle, dre) regions.add_data_file(pos, data_file.file_id) morton[ind:ind + pos.shape[0]] = compute_morton( pos[:, 0], pos[:, 1], pos[:, 2], dle, dre) ind += pos.shape[0] return morton
def _initialize_index(self, data_file, regions): # self.fields[g.id][fname] is the pattern here morton = [] for ptype in self.ds.particle_types_raw: try: pos = np.column_stack(self.fields[data_file.filename][ (ptype, "particle_position_%s" % ax)] for ax in 'xyz') except KeyError: pos = self.fields[data_file.filename][ptype, "particle_position"] if np.any(pos.min(axis=0) < data_file.ds.domain_left_edge) or \ np.any(pos.max(axis=0) > data_file.ds.domain_right_edge): raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0), data_file.ds.domain_left_edge, data_file.ds.domain_right_edge) regions.add_data_file(pos, data_file.file_id) morton.append(compute_morton( pos[:,0], pos[:,1], pos[:,2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge)) return np.concatenate(morton)
def _initialize_index(self, data_file, regions): # self.fields[g.id][fname] is the pattern here morton = [] for ptype in self.ds.particle_types_raw: try: pos = np.column_stack(self.fields[data_file.filename][ (ptype, "particle_position_%s" % ax)] for ax in 'xyz') except KeyError: pos = self.fields[data_file.filename][ptype, "particle_position"] if np.any(pos.min(axis=0) < data_file.ds.domain_left_edge) or \ np.any(pos.max(axis=0) > data_file.ds.domain_right_edge): raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0), data_file.ds.domain_left_edge, data_file.ds.domain_right_edge) regions.add_data_file(pos, data_file.file_id) morton.append(compute_morton( pos[:,0], pos[:,1], pos[:,2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge)) return np.concatenate(morton)
def _initialize_index(self, data_file, regions): ds = data_file.ds morton = np.empty(sum(data_file.total_particles.values()), dtype="uint64") ind = 0 DLE, DRE = ds.domain_left_edge, ds.domain_right_edge dx = (DRE - DLE) / (2**_ORDER_MAX) self.domain_left_edge = DLE.in_units("code_length").ndarray_view() self.domain_right_edge = DRE.in_units("code_length").ndarray_view() with open(data_file.filename, "rb") as f: f.seek(ds._header_offset) for iptype, ptype in enumerate(self._ptypes): # We'll just add the individual types separately count = data_file.total_particles[ptype] if count == 0: continue start, stop = ind, ind + count while ind < stop: c = min(CHUNKSIZE, stop - ind) pp = np.fromfile(f, dtype = self._pdtypes[ptype], count = c) mis = np.empty(3, dtype="float64") mas = np.empty(3, dtype="float64") for axi, ax in enumerate('xyz'): mi = pp["Coordinates"][ax].min() ma = pp["Coordinates"][ax].max() mylog.debug("Spanning: %0.3e .. %0.3e in %s", mi, ma, ax) mis[axi] = mi mas[axi] = ma pos = np.empty((pp.size, 3), dtype="float64") for i, ax in enumerate("xyz"): eps = np.finfo(pp["Coordinates"][ax].dtype).eps pos[:,i] = pp["Coordinates"][ax] regions.add_data_file(pos, data_file.file_id, data_file.ds.filter_bbox) morton[ind:ind+c] = compute_morton( pos[:,0], pos[:,1], pos[:,2], DLE, DRE, data_file.ds.filter_bbox) ind += c mylog.info("Adding %0.3e particles", morton.size) return morton
def _initialize_index(self, data_file, regions): ds = data_file.ds morton = np.empty(sum(data_file.total_particles.values()), dtype="uint64") ind = 0 DLE, DRE = ds.domain_left_edge, ds.domain_right_edge dx = (DRE - DLE) / (2**_ORDER_MAX) self.domain_left_edge = DLE.in_units("code_length").ndarray_view() self.domain_right_edge = DRE.in_units("code_length").ndarray_view() with open(data_file.filename, "rb") as f: f.seek(ds._header_offset) for iptype, ptype in enumerate(self._ptypes): # We'll just add the individual types separately count = data_file.total_particles[ptype] if count == 0: continue start, stop = ind, ind + count while ind < stop: c = min(CHUNKSIZE, stop - ind) pp = np.fromfile(f, dtype=self._pdtypes[ptype], count=c) mis = np.empty(3, dtype="float64") mas = np.empty(3, dtype="float64") for axi, ax in enumerate('xyz'): mi = pp["Coordinates"][ax].min() ma = pp["Coordinates"][ax].max() mylog.debug("Spanning: %0.3e .. %0.3e in %s", mi, ma, ax) mis[axi] = mi mas[axi] = ma pos = np.empty((pp.size, 3), dtype="float64") for i, ax in enumerate("xyz"): eps = np.finfo(pp["Coordinates"][ax].dtype).eps pos[:, i] = pp["Coordinates"][ax] regions.add_data_file(pos, data_file.file_id, data_file.ds.filter_bbox) morton[ind:ind + c] = compute_morton( pos[:, 0], pos[:, 1], pos[:, 2], DLE, DRE, data_file.ds.filter_bbox) ind += c mylog.info("Adding %0.3e particles", morton.size) return morton
def _initialize_index(self, data_file, regions): f = _get_h5_handle(data_file.filename) pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum() morton = np.empty(pcount, dtype='uint64') ind = 0 for key in f.keys(): if not key.startswith("PartType"): continue if "Coordinates" not in f[key]: continue ds = f[key]["Coordinates"] dt = ds.dtype.newbyteorder("N") # Native pos = np.empty(ds.shape, dtype=dt) pos[:] = ds regions.add_data_file(pos, data_file.file_id, data_file.ds.filter_bbox) morton[ind:ind+pos.shape[0]] = compute_morton( pos[:,0], pos[:,1], pos[:,2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge, data_file.ds.filter_bbox) ind += pos.shape[0] f.close() return morton
def _initialize_index(self, data_file, regions): all_count = self._count_particles(data_file) pcount = sum(all_count.values()) morton = np.empty(pcount, dtype='uint64') mylog.debug("Initializing index % 5i (% 7i particles)", data_file.file_id, pcount) ind = 0 with h5py.File(data_file.filename, "r") as f: for ptype in all_count: if ptype not in f or all_count[ptype] == 0: continue pos = np.empty((all_count[ptype], 3), dtype="float64") pos = data_file.ds.arr(pos, "code_length") if ptype == "grid": dx = f["grid"]["pdx"].value.min() else: raise NotImplementedError dx = self.ds.quan(dx, "code_length") pos[:, 0] = _get_position_array(ptype, f, "px") pos[:, 1] = _get_position_array(ptype, f, "py") pos[:,2] = np.zeros(all_count[ptype], dtype="float64") + \ self.ds.domain_left_edge[2].in_cgs().d # These are 32 bit numbers, so we give a little lee-way. # Otherwise, for big sets of particles, we often will bump into the # domain edges. This helps alleviate that. np.clip(pos, self.ds.domain_left_edge + dx, self.ds.domain_right_edge - dx, pos) if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \ np.any(pos.max(axis=0) > self.ds.domain_right_edge): raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0), self.ds.domain_left_edge, self.ds.domain_right_edge) regions.add_data_file(pos, data_file.file_id) morton[ind:ind + pos.shape[0]] = compute_morton( pos[:, 0], pos[:, 1], pos[:, 2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge) ind += pos.shape[0] return morton
def _initialize_index(self, data_file, regions): pcount = data_file.header["num_halos"] morton = np.empty(pcount, dtype='uint64') mylog.debug("Initializing index % 5i (% 7i particles)", data_file.file_id, pcount) ind = 0 if pcount == 0: return None ptype = 'halos' with h5py.File(data_file.filename, "r") as f: if not f.keys(): return None units = parse_h5_attr(f["particle_position_x"], "units") pos = data_file._get_particle_positions(ptype, f=f) pos = data_file.ds.arr(pos, units).to("code_length") dle = self.ds.domain_left_edge.to("code_length") dre = self.ds.domain_right_edge.to("code_length") if np.any(pos.min(axis=0) < dle) or \ np.any(pos.max(axis=0) > dre): raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0), dle, dre) regions.add_data_file(pos, data_file.file_id) morton[ind:ind + pos.shape[0]] = compute_morton( pos[:, 0], pos[:, 1], pos[:, 2], dle, dre) return morton
def _initialize_index(self, data_file, regions): pcount = data_file.header["num_halos"] morton = np.empty(pcount, dtype='uint64') mylog.debug("Initializing index % 5i (% 7i particles)", data_file.file_id, pcount) if pcount == 0: return morton ind = 0 with open(data_file.filename, "rb") as f: f.seek(data_file._position_offset, os.SEEK_SET) halos = np.fromfile(f, dtype=self._halo_dt, count = pcount) pos = np.empty((halos.size, 3), dtype="float64") # These positions are in Mpc, *not* "code" units pos = data_file.ds.arr(pos, "code_length") dx = np.finfo(halos['particle_position_x'].dtype).eps dx = 2.0*self.ds.quan(dx, "code_length") pos[:,0] = halos["particle_position_x"] pos[:,1] = halos["particle_position_y"] pos[:,2] = halos["particle_position_z"] # These are 32 bit numbers, so we give a little lee-way. # Otherwise, for big sets of particles, we often will bump into the # domain edges. This helps alleviate that. np.clip(pos, self.ds.domain_left_edge + dx, self.ds.domain_right_edge - dx, pos) del halos if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \ np.any(pos.max(axis=0) > self.ds.domain_right_edge): raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0), self.ds.domain_left_edge, self.ds.domain_right_edge) regions.add_data_file(pos, data_file.file_id) morton[ind:ind+pos.shape[0]] = compute_morton( pos[:,0], pos[:,1], pos[:,2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge) return morton
def _initialize_index(self, data_file, regions): pcount = sum(data_file.total_particles.values()) morton = np.empty(pcount, dtype='uint64') if pcount == 0: return morton mylog.debug("Initializing index % 5i (% 7i particles)", data_file.file_id, pcount) ind = 0 with h5py.File(data_file.filename, "r") as f: if not f.keys(): return None dx = np.finfo(f["Group"]["GroupPos"].dtype).eps dx = 2.0*self.ds.quan(dx, "code_length") for ptype in data_file.ds.particle_types_raw: if data_file.total_particles[ptype] == 0: continue pos = f[ptype]["%sPos" % ptype].value.astype("float64") pos = np.resize(pos, (data_file.total_particles[ptype], 3)) pos = data_file.ds.arr(pos, "code_length") # These are 32 bit numbers, so we give a little lee-way. # Otherwise, for big sets of particles, we often will bump into the # domain edges. This helps alleviate that. np.clip(pos, self.ds.domain_left_edge + dx, self.ds.domain_right_edge - dx, pos) if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \ np.any(pos.max(axis=0) > self.ds.domain_right_edge): raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0), self.ds.domain_left_edge, self.ds.domain_right_edge) regions.add_data_file(pos, data_file.file_id) morton[ind:ind+pos.shape[0]] = compute_morton( pos[:,0], pos[:,1], pos[:,2], data_file.ds.domain_left_edge, data_file.ds.domain_right_edge) ind += pos.shape[0] return morton
def smooth(self, positions, fields = None, index_fields = None, method = None, create_octree = False, nneighbors = 64): r"""Operate on the mesh, in a particle-against-mesh fashion, with non-local input. This uses the octree indexing system to call a "smoothing" operation (defined in yt/geometry/particle_smooth.pyx) that can take input from several (non-local) particles and construct some value on the mesh. The canonical example is to conduct a smoothing kernel operation on the mesh. Parameters ---------- positions : array_like (Nx3) The positions of all of the particles to be examined. A new indexed octree will be constructed on these particles. fields : list of arrays All the necessary fields for computing the particle operation. For instance, this might include mass, velocity, etc. index_fields : list of arrays All of the fields defined on the mesh that may be used as input to the operation. method : string This is the "method name" which will be looked up in the `particle_smooth` namespace as `methodname_smooth`. Current methods include `volume_weighted`, `nearest`, `idw`, `nth_neighbor`, and `density`. create_octree : bool Should we construct a new octree for indexing the particles? In cases where we are applying an operation on a subset of the particles used to construct the mesh octree, this will ensure that we are able to find and identify all relevant particles. nneighbors : int, default 64 The number of neighbors to examine during the process. Returns ------- List of fortran-ordered, mesh-like arrays. """ # Here we perform our particle deposition. positions.convert_to_units("code_length") if create_octree: morton = compute_morton( positions[:,0], positions[:,1], positions[:,2], self.ds.domain_left_edge, self.ds.domain_right_edge) morton.sort() particle_octree = ParticleOctreeContainer([1, 1, 1], self.ds.domain_left_edge, self.ds.domain_right_edge, over_refine = self._oref) # This should ensure we get everything within one neighbor of home. particle_octree.n_ref = nneighbors * 2 particle_octree.add(morton) particle_octree.finalize() pdom_ind = particle_octree.domain_ind(self.selector) else: particle_octree = self.oct_handler pdom_ind = self.domain_ind if fields is None: fields = [] if index_fields is None: index_fields = [] cls = getattr(particle_smooth, "%s_smooth" % method, None) if cls is None: raise YTParticleDepositionNotImplemented(method) nz = self.nz mdom_ind = self.domain_ind nvals = (nz, nz, nz, (mdom_ind >= 0).sum()) op = cls(nvals, len(fields), nneighbors) op.initialize() mylog.debug("Smoothing %s particles into %s Octs", positions.shape[0], nvals[-1]) op.process_octree(self.oct_handler, mdom_ind, positions, self.fcoords, fields, self.domain_id, self._domain_offset, self.ds.periodicity, index_fields, particle_octree, pdom_ind, self.ds.geometry) # If there are 0s in the smoothing field this will not throw an error, # but silently return nans for vals where dividing by 0 # Same as what is currently occurring, but suppressing the div by zero # error. with np.errstate(invalid='ignore'): vals = op.finalize() if vals is None: return if isinstance(vals, list): vals = [np.asfortranarray(v) for v in vals] else: vals = np.asfortranarray(vals) return vals
def particle_operation(self, positions, fields=None, method=None, nneighbors=64, kernel_name='cubic'): r"""Operate on particles, in a particle-against-particle fashion. This uses the octree indexing system to call a "smoothing" operation (defined in yt/geometry/particle_smooth.pyx) that expects to be called in a particle-by-particle fashion. For instance, the canonical example of this would be to compute the Nth nearest neighbor, or to compute the density for a given particle based on some kernel operation. Many of the arguments to this are identical to those used in the smooth and deposit functions. Note that the `fields` argument must not be empty, as these fields will be modified in place. Parameters ---------- positions : array_like (Nx3) The positions of all of the particles to be examined. A new indexed octree will be constructed on these particles. fields : list of arrays All the necessary fields for computing the particle operation. For instance, this might include mass, velocity, etc. One of these will likely be modified in place. method : string This is the "method name" which will be looked up in the `particle_smooth` namespace as `methodname_smooth`. nneighbors : int, default 64 The number of neighbors to examine during the process. kernel_name : string, default 'cubic' This is the name of the smoothing kernel to use. Current supported kernel names include `cubic`, `quartic`, `quintic`, `wendland2`, `wendland4`, and `wendland6`. Returns ------- Nothing. """ # Here we perform our particle deposition. positions.convert_to_units("code_length") morton = compute_morton(positions[:, 0], positions[:, 1], positions[:, 2], self.ds.domain_left_edge, self.ds.domain_right_edge) morton.sort() particle_octree = ParticleOctreeContainer([1, 1, 1], self.ds.domain_left_edge, self.ds.domain_right_edge, over_refine=1) particle_octree.n_ref = nneighbors * 2 particle_octree.add(morton) particle_octree.finalize() pdom_ind = particle_octree.domain_ind(self.selector) if fields is None: fields = [] cls = getattr(particle_smooth, "%s_smooth" % method, None) if cls is None: raise YTParticleDepositionNotImplemented(method) nz = self.nz mdom_ind = self.domain_ind nvals = (nz, nz, nz, (mdom_ind >= 0).sum()) op = cls(nvals, len(fields), nneighbors, kernel_name) op.initialize() mylog.debug("Smoothing %s particles into %s Octs", positions.shape[0], nvals[-1]) op.process_particles(particle_octree, pdom_ind, positions, fields, self.domain_id, self._domain_offset, self.ds.periodicity, self.ds.geometry) vals = op.finalize() if vals is None: return if isinstance(vals, list): vals = [np.asfortranarray(v) for v in vals] else: vals = np.asfortranarray(vals) return vals
def smooth(self, positions, fields=None, index_fields=None, method=None, create_octree=False, nneighbors=64, kernel_name='cubic'): r"""Operate on the mesh, in a particle-against-mesh fashion, with non-local input. This uses the octree indexing system to call a "smoothing" operation (defined in yt/geometry/particle_smooth.pyx) that can take input from several (non-local) particles and construct some value on the mesh. The canonical example is to conduct a smoothing kernel operation on the mesh. Parameters ---------- positions : array_like (Nx3) The positions of all of the particles to be examined. A new indexed octree will be constructed on these particles. fields : list of arrays All the necessary fields for computing the particle operation. For instance, this might include mass, velocity, etc. index_fields : list of arrays All of the fields defined on the mesh that may be used as input to the operation. method : string This is the "method name" which will be looked up in the `particle_smooth` namespace as `methodname_smooth`. Current methods include `volume_weighted`, `nearest`, `idw`, `nth_neighbor`, and `density`. create_octree : bool Should we construct a new octree for indexing the particles? In cases where we are applying an operation on a subset of the particles used to construct the mesh octree, this will ensure that we are able to find and identify all relevant particles. nneighbors : int, default 64 The number of neighbors to examine during the process. kernel_name : string, default 'cubic' This is the name of the smoothing kernel to use. Current supported kernel names include `cubic`, `quartic`, `quintic`, `wendland2`, `wendland4`, and `wendland6`. Returns ------- List of fortran-ordered, mesh-like arrays. """ # Here we perform our particle deposition. positions.convert_to_units("code_length") if create_octree: morton = compute_morton(positions[:, 0], positions[:, 1], positions[:, 2], self.ds.domain_left_edge, self.ds.domain_right_edge) morton.sort() particle_octree = ParticleOctreeContainer( [1, 1, 1], self.ds.domain_left_edge, self.ds.domain_right_edge, over_refine=self._oref) # This should ensure we get everything within one neighbor of home. particle_octree.n_ref = nneighbors * 2 particle_octree.add(morton) particle_octree.finalize() pdom_ind = particle_octree.domain_ind(self.selector) else: particle_octree = self.oct_handler pdom_ind = self.domain_ind if fields is None: fields = [] if index_fields is None: index_fields = [] cls = getattr(particle_smooth, "%s_smooth" % method, None) if cls is None: raise YTParticleDepositionNotImplemented(method) nz = self.nz mdom_ind = self.domain_ind nvals = (nz, nz, nz, (mdom_ind >= 0).sum()) op = cls(nvals, len(fields), nneighbors, kernel_name) op.initialize() mylog.debug("Smoothing %s particles into %s Octs", positions.shape[0], nvals[-1]) # Pointer operations within 'process_octree' require arrays to be # contiguous cf. https://bitbucket.org/yt_analysis/yt/issues/1079 fields = [np.ascontiguousarray(f, dtype="float64") for f in fields] op.process_octree(self.oct_handler, mdom_ind, positions, self.fcoords, fields, self.domain_id, self._domain_offset, self.ds.periodicity, index_fields, particle_octree, pdom_ind, self.ds.geometry) # If there are 0s in the smoothing field this will not throw an error, # but silently return nans for vals where dividing by 0 # Same as what is currently occurring, but suppressing the div by zero # error. with np.errstate(invalid='ignore'): vals = op.finalize() if vals is None: return if isinstance(vals, list): vals = [np.asfortranarray(v) for v in vals] else: vals = np.asfortranarray(vals) return vals
def particle_operation(self, positions, fields = None, method = None, nneighbors = 64): r"""Operate on particles, in a particle-against-particle fashion. This uses the octree indexing system to call a "smoothing" operation (defined in yt/geometry/particle_smooth.pyx) that expects to be called in a particle-by-particle fashion. For instance, the canonical example of this would be to compute the Nth nearest neighbor, or to compute the density for a given particle based on some kernel operation. Many of the arguments to this are identical to those used in the smooth and deposit functions. Note that the `fields` argument must not be empty, as these fields will be modified in place. Parameters ---------- positions : array_like (Nx3) The positions of all of the particles to be examined. A new indexed octree will be constructed on these particles. fields : list of arrays All the necessary fields for computing the particle operation. For instance, this might include mass, velocity, etc. One of these will likely be modified in place. method : string This is the "method name" which will be looked up in the `particle_smooth` namespace as `methodname_smooth`. nneighbors : int, default 64 The number of neighbors to examine during the process. Returns ------- Nothing. """ # Here we perform our particle deposition. positions.convert_to_units("code_length") morton = compute_morton( positions[:,0], positions[:,1], positions[:,2], self.ds.domain_left_edge, self.ds.domain_right_edge) morton.sort() particle_octree = ParticleOctreeContainer([1, 1, 1], self.ds.domain_left_edge, self.ds.domain_right_edge, over_refine = 1) particle_octree.n_ref = nneighbors * 2 particle_octree.add(morton) particle_octree.finalize() pdom_ind = particle_octree.domain_ind(self.selector) if fields is None: fields = [] cls = getattr(particle_smooth, "%s_smooth" % method, None) if cls is None: raise YTParticleDepositionNotImplemented(method) nz = self.nz mdom_ind = self.domain_ind nvals = (nz, nz, nz, (mdom_ind >= 0).sum()) op = cls(nvals, len(fields), nneighbors) op.initialize() mylog.debug("Smoothing %s particles into %s Octs", positions.shape[0], nvals[-1]) op.process_particles(particle_octree, pdom_ind, positions, fields, self.domain_id, self._domain_offset, self.ds.periodicity, self.ds.geometry) vals = op.finalize() if vals is None: return if isinstance(vals, list): vals = [np.asfortranarray(v) for v in vals] else: vals = np.asfortranarray(vals) return vals