def _read_fluid_selection(self, g, selector, fields): rv = {} if selector.__class__.__name__ == "GridSelector": if g.id in self._cached_fields: gf = self._cached_fields[g.id] rv.update(gf) if len(rv) == len(fields): return rv f = h5py.File(u(g.filename), "r") gds = f["data"] for field in fields: if field in rv: self._hits += 1 continue self._misses += 1 ftype, fname = field rv[(ftype, fname)] = gds[fname].value if self._cache_on: for gid in rv: self._cached_fields.setdefault(gid, {}) self._cached_fields[gid].update(rv[gid]) f.close() return rv else: raise RuntimeError( "Geometric selection not supported for non-spatial datasets.")
def _read_fluid_selection(self, chunks, selector, fields, size): # This needs to allocate a set of arrays inside a dictionary, where the # keys are the (ftype, fname) tuples and the values are arrays that # have been masked using whatever selector method is appropriate. The # dict gets returned at the end and it should be flat, with selected # data. Note that if you're reading grid data, you might need to # special-case a grid selector object. rv = {} # Now we have to do something unpleasant #print "readFluid" chunks = list(chunks) if selector.__class__.__name__ == "GridSelector": if not (len(chunks) == len(chunks[0].objs) == 1): raise RuntimeError g = chunks[0].objs[0] # Open the file f = h5py.File(u(g.filename), 'r') for ftype, fname in fields: # Data is loaded with the _read_data function rv[ftype, fname] = self._read_data(g, fname) f.close() #print rv return rv if size is None: size = sum( (g.count(selector) for chunk in chunks for g in chunk.objs)) for field in fields: ftype, fname = field fsize = size rv[field] = np.empty(fsize, dtype="float64") ng = sum(len(c.objs) for c in chunks) #print "Reading %s cells of %s fields in %s grids",size, [f2 for f1, f2 in fields], ng ind = 0 for chunk in chunks: for g in chunk.objs: # Open file f = h5py.File(u(g.filename), 'r') for ftype, fname in fields: # Data is loaded with the _read_data function rv[ftype, fname] = self._read_data(g, fname) f.close() return rv
def _read_fluid_selection(self, chunks, selector, fields, size): # This needs to allocate a set of arrays inside a dictionary, where the # keys are the (ftype, fname) tuples and the values are arrays that # have been masked using whatever selector method is appropriate. The # dict gets returned at the end and it should be flat, with selected # data. Note that if you're reading grid data, you might need to # special-case a grid selector object. rv = {} # Now we have to do something unpleasant # print "readFluid" chunks = list(chunks) if selector.__class__.__name__ == "GridSelector": if not (len(chunks) == len(chunks[0].objs) == 1): raise RuntimeError g = chunks[0].objs[0] # Open the file f = h5py.File(u(g.filename), "r") for ftype, fname in fields: # Data is loaded with the _read_data function rv[ftype, fname] = self._read_data(g, fname) f.close() # print rv return rv if size is None: size = sum((g.count(selector) for chunk in chunks for g in chunk.objs)) for field in fields: ftype, fname = field fsize = size rv[field] = np.empty(fsize, dtype="float64") ng = sum(len(c.objs) for c in chunks) # print "Reading %s cells of %s fields in %s grids",size, [f2 for f1, f2 in fields], ng ind = 0 for chunk in chunks: for g in chunk.objs: # Open file f = h5py.File(u(g.filename), "r") for ftype, fname in fields: # Data is loaded with the _read_data function rv[ftype, fname] = self._read_data(g, fname) f.close() return rv
def _read_particle_fields(self, chunks, ptf, selector): # This gets called after the arrays have been allocated. It needs to # yield ((ptype, field), data) where data is the masked results of # reading ptype, field and applying the selector to the data read in. # Selector objects have a .select_points(x,y,z) that returns a mask, so # you need to do your masking here. #print "readPartField" chunks = list(chunks) # A chunk returns a grid element wiht a file name for chunk in chunks: # These should be organized by grid filename f = None for g in chunk.objs: if g.filename is None: continue if f is None: #print "Opening (count) %s" % g.filename #open a HDF5-file to read f = h5py.File(u(g.filename), 'r') #print g.filename + " io.py" ds = f.get(f.attrs["basePath"]) for ptype, field_list in sorted(ptf.items()): # ptf.items() returns a list of all known particle fields and # particle types pds = ds.get("%s/%s/" % (f.attrs["particlesPath"], ptype)) # The particle coords have to be loaded again x, y, z = (np.asarray(pds.get("position/" + ax).value, dtype="=f8") for ax in 'xyz') mask = selector.select_points(x, y, z, 0.0) if mask is None: continue for field in field_list: nfield = field.replace("particle_", "") nfield = nfield.replace("_", "/") # Load the field informations out of file # !!! Incomplete because mass and charge are a attributes and # the rest are arrays if field == "particle_mass" or field == "particle_charge": data = np.full(x.shape[0], pds.get(nfield).attrs["value"], "=f8") else: data = np.asarray(pds.get(nfield), "=f8") # Here you could multiply mass with weighting #if field in _convert_mass: # data *= g.dds.prod(dtype="f8") # This returns particle type, field name and the masked field data yield (ptype, field), data[mask] if f: f.close()
def _read_particle_fields(self, chunks, ptf, selector): # This gets called after the arrays have been allocated. It needs to # yield ((ptype, field), data) where data is the masked results of # reading ptype, field and applying the selector to the data read in. # Selector objects have a .select_points(x,y,z) that returns a mask, so # you need to do your masking here. # print "readPartField" chunks = list(chunks) # A chunk returns a grid element wiht a file name for chunk in chunks: # These should be organized by grid filename f = None for g in chunk.objs: if g.filename is None: continue if f is None: # print "Opening (count) %s" % g.filename # open a HDF5-file to read f = h5py.File(u(g.filename), "r") # print g.filename + " io.py" ds = f.get(f.attrs["basePath"]) for ptype, field_list in sorted(ptf.items()): # ptf.items() returns a list of all known particle fields and # particle types pds = ds.get("%s/%s/" % (f.attrs["particlesPath"], ptype)) # The particle coords have to be loaded again x, y, z = (np.asarray(pds.get("position/" + ax).value, dtype="=f8") for ax in "xyz") mask = selector.select_points(x, y, z, 0.0) if mask is None: continue for field in field_list: nfield = field.replace("particle_", "") nfield = nfield.replace("_", "/") # Load the field informations out of file # !!! Incomplete because mass and charge are a attributes and # the rest are arrays if field == "particle_mass" or field == "particle_charge": data = np.full(x.shape[0], pds.get(nfield).attrs["value"], "=f8") else: data = np.asarray(pds.get(nfield), "=f8") # Here you could multiply mass with weighting # if field in _convert_mass: # data *= g.dds.prod(dtype="f8") # This returns particle type, field name and the masked field data yield (ptype, field), data[mask] if f: f.close()
def _read_fluid_selection(self, chunks, selector, fields, size): rv = {} # Now we have to do something unpleasant chunks = list(chunks) if selector.__class__.__name__ == "GridSelector": if not (len(chunks) == len(chunks[0].objs) == 1): raise RuntimeError g = chunks[0].objs[0] f = h5py.File(u(g.filename), 'r') if g.id in self._cached_fields: gf = self._cached_fields[g.id] rv.update(gf) if len(rv) == len(fields): return rv gds = f.get("/Grid%08i" % g.id) for field in fields: if field in rv: self._hits += 1 continue self._misses += 1 ftype, fname = field if fname in gds: rv[(ftype, fname)] = gds.get(fname).value.swapaxes(0,2) else: rv[(ftype, fname)] = np.zeros(g.ActiveDimensions) if self._cache_on: for gid in rv: self._cached_fields.setdefault(gid, {}) self._cached_fields[gid].update(rv[gid]) f.close() return rv if size is None: size = sum((g.count(selector) for chunk in chunks for g in chunk.objs)) for field in fields: ftype, fname = field fsize = size rv[field] = np.empty(fsize, dtype="float64") ng = sum(len(c.objs) for c in chunks) mylog.debug("Reading %s cells of %s fields in %s grids", size, [f2 for f1, f2 in fields], ng) ind = 0 h5_type = self._field_dtype for chunk in chunks: fid = None for g in chunk.objs: if g.filename is None: continue if fid is None: fid = h5py.h5f.open(b(g.filename), h5py.h5f.ACC_RDONLY) gf = self._cached_fields.get(g.id, {}) data = np.empty(g.ActiveDimensions[::-1], dtype=h5_type) data_view = data.swapaxes(0,2) nd = 0 for field in fields: if field in gf: nd = g.select(selector, gf[field], rv[field], ind) self._hits += 1 continue self._misses += 1 ftype, fname = field try: node = "/Grid%08i/%s" % (g.id, fname) dg = h5py.h5d.open(fid, b(node)) except KeyError: if fname == "Dark_Matter_Density": continue raise dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data) if self._cache_on: self._cached_fields.setdefault(g.id, {}) # Copy because it's a view into an empty temp array self._cached_fields[g.id][field] = data_view.copy() nd = g.select(selector, data_view, rv[field], ind) # caches ind += nd if fid: fid.close() return rv
def _read_fluid_selection(self, chunks, selector, fields, size): rv = {} # Now we have to do something unpleasant chunks = list(chunks) if selector.__class__.__name__ == "GridSelector": if not (len(chunks) == len(chunks[0].objs) == 1): raise RuntimeError g = chunks[0].objs[0] if g.id in self._cached_fields: gf = self._cached_fields[g.id] rv.update(gf) if len(rv) == len(fields): return rv f = h5py.File(u(g.filename), "r") gds = f[self.ds.default_fluid_type] for field in fields: if field in rv: self._hits += 1 continue self._misses += 1 ftype, fname = field rv[(ftype, fname)] = gds[fname].value if self._cache_on: for gid in rv: self._cached_fields.setdefault(gid, {}) self._cached_fields[gid].update(rv[gid]) f.close() return rv if size is None: size = sum( (g.count(selector) for chunk in chunks for g in chunk.objs)) for field in fields: ftype, fname = field fsize = size rv[field] = np.empty(fsize, dtype="float64") ng = sum(len(c.objs) for c in chunks) mylog.debug("Reading %s cells of %s fields in %s grids", size, [f2 for f1, f2 in fields], ng) ind = 0 for chunk in chunks: f = None for g in chunk.objs: if g.filename is None: continue if f is None: f = h5py.File(g.filename, "r") gf = self._cached_fields.get(g.id, {}) nd = 0 for field in fields: if field in gf: nd = g.select(selector, gf[field], rv[field], ind) self._hits += 1 continue self._misses += 1 ftype, fname = field # add extra dimensions to make data 3D data = f[ftype][fname].value.astype(self._field_dtype) for dim in range(len(data.shape), 3): data = np.expand_dims(data, dim) if self._cache_on: self._cached_fields.setdefault(g.id, {}) self._cached_fields[g.id][field] = data nd = g.select(selector, data, rv[field], ind) # caches ind += nd if f: f.close() return rv