def _check_consistency(self):
     for gi in range(self.connectivity_indices.shape[0]):
         ind = self.connectivity_indices[gi, :] - self._index_offset
         coords = self.connectivity_coords[ind, :]
         for i in range(3):
             assert(np.unique(coords[:,i]).size == 2)
     mylog.debug("Connectivity is consistent.")
Пример #2
0
 def _initialize_index(self, data_file, regions):
     pcount = data_file.header["num_halos"]
     morton = np.empty(pcount, dtype='uint64')
     mylog.debug("Initializing index % 5i (% 7i particles)",
                 data_file.file_id, pcount)
     ind = 0
     with h5py.File(data_file.filename, "r") as f:
         if not f.keys(): return None
         pos = np.empty((pcount, 3), dtype="float64")
         pos = data_file.ds.arr(pos, "code_length")
         dx = np.finfo(f['particle_position_x'].dtype).eps
         dx = 2.0*self.ds.quan(dx, "code_length")
         pos[:,0] = f["particle_position_x"].value
         pos[:,1] = f["particle_position_y"].value
         pos[:,2] = f["particle_position_z"].value
         # These are 32 bit numbers, so we give a little lee-way.
         # Otherwise, for big sets of particles, we often will bump into the
         # domain edges.  This helps alleviate that.
         np.clip(pos, self.ds.domain_left_edge + dx,
                      self.ds.domain_right_edge - dx, pos)
         if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \
            np.any(pos.max(axis=0) > self.ds.domain_right_edge):
             raise YTDomainOverflow(pos.min(axis=0),
                                    pos.max(axis=0),
                                    self.ds.domain_left_edge,
                                    self.ds.domain_right_edge)
         regions.add_data_file(pos, data_file.file_id)
         morton[ind:ind+pos.shape[0]] = compute_morton(
             pos[:,0], pos[:,1], pos[:,2],
             data_file.ds.domain_left_edge,
             data_file.ds.domain_right_edge)
     return morton
Пример #3
0
    def _read_fluid_selection(self, chunks, selector, fields, size):
        chunks = list(chunks)
        assert(len(chunks) == 1)
        tags = {}
        rv = {}
        pyne_mesh = self.ds.pyne_mesh
        mesh = pyne_mesh.mesh
        for field in fields:
            rv[field] = np.empty(size, dtype="float64")
        ngrids = sum(len(chunk.objs) for chunk in chunks)
        mylog.debug("Reading %s cells of %s fields in %s blocks",
                    size, [fname for ftype, fname in fields], ngrids)
        for field in fields:
            ftype, fname = field
            if pyne_mesh.structured:
                tag = pyne_mesh.mesh.getTagHandle('idx')
                indices = [tag[ent] for ent in pyne_mesh.structured_iterate_hex()]
            else:
                indices = slice(None)
            ds = np.asarray(getattr(pyne_mesh, fname)[indices], 'float64')

            ind = 0
            for chunk in chunks:
                for g in chunk.objs:
                    ind += g.select(selector, ds, rv[field], ind) # caches
        return rv
Пример #4
0
    def filter_sphere(self, center, radius, myiter):
        """
        Filter data by masking out data outside of a sphere defined
        by a center and radius. Account for periodicity of data, allowing
        left/right to be outside of the domain.
        """

        # Get left/right for periodicity considerations
        left = center - radius
        right = center + radius
        for data in myiter:
            pos = np.array([data['x'].copy(), data['y'].copy(), data['z'].copy()]).T

            DW = self.true_domain_width
            _shift_periodic(pos, left, right, DW)

            # Now get all particles that are within the sphere 
            mask = ((pos-center)**2).sum(axis=1)**0.5 < radius

            mylog.debug("Filtering particles, returning %i out of %i" % (mask.sum(), mask.shape[0]))

            if not np.any(mask):
                continue

            filtered = {ax: pos[:, i][mask] for i, ax in enumerate('xyz')}
            for f in data.keys():
                if f in 'xyz':
                    continue
                filtered[f] = data[f][mask]

            yield filtered
Пример #5
0
 def get_key_data(self, key, fields):
     if key > self._max_key:
         raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (key, self._max_key))
     base = self.indexdata['base'][key]
     length = self.indexdata['len'][key] - base
     if length > 0:
         mylog.debug('Getting contiguous chunk of size %i starting at %i' % (length, base))
     return self.get_data(slice(base, base + length), fields)
Пример #6
0
    def iter_sphere_data(self, center, radius, fields):
        """
        Iterate over all data within some sphere defined by a center and
        a radius.
        """
        _ensure_xyz_fields(fields)
        mylog.debug('MIDX Loading spherical region %s to %s' %(center, radius))
        inds = self.get_bbox(center-radius, center+radius)

        for dd in self.filter_sphere(
            center, radius,
            self.iter_data(inds, fields)):
            yield dd
Пример #7
0
    def __init__(self, ds, domain_id):
        self.ds = ds
        self.domain_id = domain_id

        num = os.path.basename(ds.parameter_filename).split(".")[0].split("_")[1]
        rootdir = ds.root_folder
        basedir = os.path.abspath(os.path.dirname(ds.parameter_filename))
        basename = "%s/%%s_%s.out%05i" % (basedir, num, domain_id)
        part_file_descriptor = f"{basedir}/part_file_descriptor.txt"
        if ds.num_groups > 0:
            igroup = ((domain_id - 1) // ds.group_size) + 1
            basename = "%s/group_%05i/%%s_%s.out%05i" % (
                rootdir,
                igroup,
                num,
                domain_id,
            )
        else:
            basename = "%s/%%s_%s.out%05i" % (basedir, num, domain_id)
        for t in ["grav", "amr"]:
            setattr(self, f"{t}_fn", basename % t)
        self._part_file_descriptor = part_file_descriptor
        self._read_amr_header()

        # Autodetect field files
        field_handlers = [FH(self) for FH in get_field_handlers() if FH.any_exist(ds)]
        self.field_handlers = field_handlers
        for fh in field_handlers:
            mylog.debug("Detected fluid type %s in domain_id=%s", fh.ftype, domain_id)
            fh.detect_fields(ds)
            # self._add_ftype(fh.ftype)

        # Autodetect particle files
        particle_handlers = [
            PH(self) for PH in get_particle_handlers() if PH.any_exist(ds)
        ]
        self.particle_handlers = particle_handlers
        for ph in particle_handlers:
            mylog.debug(
                "Detected particle type %s in domain_id=%s", ph.ptype, domain_id
            )
            ph.read_header()
            # self._add_ptype(ph.ptype)

        # Load the AMR structure
        self._read_amr()
Пример #8
0
 def _identify_base_chunk(self, dobj):
     if getattr(dobj, "_chunk_info", None) is None:
         domains = [dom for dom in self.domains if dom.included(dobj.selector)]
         base_region = getattr(dobj, "base_region", dobj)
         if len(domains) > 1:
             mylog.debug("Identified %s intersecting domains", len(domains))
         subsets = [
             RAMSESDomainSubset(
                 base_region,
                 domain,
                 self.dataset,
                 num_ghost_zones=dobj._num_ghost_zones,
             )
             for domain in domains
         ]
         dobj._chunk_info = subsets
     dobj._current_chunk = list(self._chunk_all(dobj))[0]
Пример #9
0
    def iter_bbox_data(self, left, right, fields):
        """
        Iterate over all data within a bounding box defined by a left
        and a right.
        """
        _ensure_xyz_fields(fields)
        mylog.debug('MIDX Loading region from %s to %s' % (left, right))
        inds = self.get_bbox(left, right)
        # Need to put left/right in float32 to avoid fp roundoff errors
        # in the bbox later.
        #left = left.astype('float32')
        #right = right.astype('float32')

        #my_filter = bbox_filter(left, right, self.true_domain_width)
        data = []
        for dd in self.filter_bbox(left, right, self.iter_data(inds, fields)):
            yield dd
Пример #10
0
 def _identify_base_chunk(self, dobj):
     """
     Take the passed in data source dobj, and use its embedded selector
     to calculate the domain mask, build the reduced domain
     subsets and oct counts. Attach this information to dobj.
     """
     if getattr(dobj, "_chunk_info", None) is None:
         # Get all octs within this oct handler
         domains = [dom for dom in self.domains if
                    dom.included(dobj.selector)]
         base_region = getattr(dobj, "base_region", dobj)
         if len(domains) > 1:
             mylog.debug("Identified %s intersecting domains", len(domains))
         subsets = [ARTDomainSubset(base_region, domain, self.dataset)
                    for domain in domains]
         dobj._chunk_info = subsets
     dobj._current_chunk = list(self._chunk_all(dobj))[0]
Пример #11
0
 def _read_amr_root(self, oct_handler):
     self.level_offsets
     # add the root *cell* not *oct* mesh
     root_octs_side = self.ds.domain_dimensions[0] / 2
     NX = np.ones(3) * root_octs_side
     LE = np.array([0.0, 0.0, 0.0], dtype='float64')
     RE = np.array([1.0, 1.0, 1.0], dtype='float64')
     root_dx = (RE - LE) / NX
     LL = LE + root_dx / 2.0
     RL = RE - root_dx / 2.0
     # compute floating point centers of root octs
     root_fc = np.mgrid[LL[0]:RL[0]:NX[0] * 1j, LL[1]:RL[1]:NX[1] * 1j,
                        LL[2]:RL[2]:NX[2] * 1j]
     root_fc = np.vstack([p.ravel() for p in root_fc]).T
     oct_handler.add(self.domain_id, 0, root_fc)
     assert (oct_handler.nocts == root_fc.shape[0])
     mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
                 root_octs_side**3, 0, oct_handler.nocts)
Пример #12
0
Файл: io.py Проект: cgyurgyik/yt
    def _initialize_index(self, data_file, regions):
        if self.index_ptype == "all":
            ptypes = self.ds.particle_types_raw
            pcount = sum(data_file.total_particles.values())
        else:
            ptypes = [self.index_ptype]
            pcount = data_file.total_particles[self.index_ptype]
        morton = np.empty(pcount, dtype="uint64")
        if pcount == 0:
            return morton
        mylog.debug("Initializing index % 5i (% 7i particles)",
                    data_file.file_id, pcount)
        ind = 0
        with h5py.File(data_file.filename, mode="r") as f:
            if not f.keys():
                return None
            dx = np.finfo(f["Group"]["GroupPos"].dtype).eps
            dx = 2.0 * self.ds.quan(dx, "code_length")

            for ptype in ptypes:
                if data_file.total_particles[ptype] == 0:
                    continue
                pos = data_file._get_particle_positions(ptype, f=f)
                pos = self.ds.arr(pos, "code_length")

                if np.any(
                        pos.min(axis=0) < self.ds.domain_left_edge) or np.any(
                            pos.max(axis=0) > self.ds.domain_right_edge):
                    raise YTDomainOverflow(
                        pos.min(axis=0),
                        pos.max(axis=0),
                        self.ds.domain_left_edge,
                        self.ds.domain_right_edge,
                    )
                regions.add_data_file(pos, data_file.file_id)
                morton[ind:ind + pos.shape[0]] = compute_morton(
                    pos[:, 0],
                    pos[:, 1],
                    pos[:, 2],
                    self.ds.domain_left_edge,
                    self.ds.domain_right_edge,
                )
                ind += pos.shape[0]
        return morton
Пример #13
0
 def calculate_parentage_fractions(self, other_catalog, radius = 0.10):
     parentage_fractions = {}
     if self.halo_positions is None or other_catalog.halo_positions is None:
         return parentage_fractions
     mylog.debug("Ball-tree query with radius %0.3e", radius)
     all_nearest = self.halo_kdtree.query_ball_tree(
         other_catalog.halo_kdtree, radius)
     pbar = get_pbar("Halo Mergers", self.halo_positions.shape[0])
     for hid1, nearest in enumerate(all_nearest):
         pbar.update(hid1)
         parentage_fractions[hid1] = {}
         HPL1 = self.read_particle_ids(hid1)
         for hid2 in sorted(nearest):
             HPL2 = other_catalog.read_particle_ids(hid2)
             p1, p2 = HPL1.find_relative_parentage(HPL2)
             parentage_fractions[hid1][hid2] = (p1, p2, HPL2.number_of_particles)
         parentage_fractions[hid1]["NumberOfParticles"] = HPL1.number_of_particles
     pbar.finish()
     return parentage_fractions
Пример #14
0
 def _read_fluid_selection(self, chunks, selector, fields, size):
     chunks = list(chunks)
     assert(len(chunks) == 1)
     fhandle = self._handle
     rv = {}
     for field in fields:
         ftype, fname = field
         rv[field] = np.empty(size, dtype=fhandle[field_dname(fname)].dtype)
     ngrids = sum(len(chunk.objs) for chunk in chunks)
     mylog.debug("Reading %s cells of %s fields in %s blocks",
                 size, [fname for ftype, fname in fields], ngrids)
     for field in fields:
         ftype, fname = field
         ds = np.array(fhandle[field_dname(fname)][:], dtype="float64")
         ind = 0
         for chunk in chunks:
             for g in chunk.objs:
                 ind += g.select(selector, ds, rv[field], ind) # caches
     return rv
Пример #15
0
    def iter_bbox_data(self, left, right, fields):
        """
        Iterate over all data within a bounding box defined by a left
        and a right.
        """
        _ensure_xyz_fields(fields)
        mylog.debug('MIDX Loading region from %s to %s' %(left, right))
        inds = self.get_bbox(left, right)
        # Need to put left/right in float32 to avoid fp roundoff errors
        # in the bbox later.
        #left = left.astype('float32')
        #right = right.astype('float32')

        #my_filter = bbox_filter(left, right, self.true_domain_width)
        data = []
        for dd in self.filter_bbox(
            left, right,
            self.iter_data(inds, fields)):
            yield dd
Пример #16
0
 def _reconstruct_parent_child(self):
     mask = np.empty(len(self.grids), dtype="int32")
     mylog.debug("First pass; identifying child grids")
     for i, grid in enumerate(self.grids):
         get_box_grids_level(
             self.grid_left_edge[i, :],
             self.grid_right_edge[i, :],
             self.grid_levels[i] + 1,
             self.grid_left_edge,
             self.grid_right_edge,
             self.grid_levels,
             mask,
         )
         ids = np.where(mask.astype("bool"))  # where is a tuple
         grid._children_ids = ids[0] + grid._id_offset
     mylog.debug("Second pass; identifying parents")
     for i, grid in enumerate(self.grids):  # Second pass
         for child in grid.Children:
             child._parent_id.append(i + grid._id_offset)
Пример #17
0
 def _read_fluid_selection(self, chunks, selector, fields, size):
     chunks = list(chunks)
     assert (len(chunks) == 1)
     fhandle = self._handle
     rv = {}
     for field in fields:
         ftype, fname = field
         rv[field] = np.empty(size, dtype=fhandle[field_dname(fname)].dtype)
     ngrids = sum(len(chunk.objs) for chunk in chunks)
     mylog.debug("Reading %s cells of %s fields in %s blocks", size,
                 [fname for ftype, fname in fields], ngrids)
     for field in fields:
         ftype, fname = field
         ds = np.array(fhandle[field_dname(fname)][:], dtype="float64")
         ind = 0
         for chunk in chunks:
             for g in chunk.objs:
                 ind += g.select(selector, ds, rv[field], ind)  # caches
     return rv
Пример #18
0
 def _initialize_index(self, data_file, regions):
     halos = data_file.read_data(usecols=['ID'])
     pcount = len(halos['ID'])
     morton = np.empty(pcount, dtype='uint64')
     mylog.debug('Initializing index % 5i (% 7i particles)',
                 data_file.file_id, pcount)
     if pcount == 0:
         return morton
     ind = 0
     pos = data_file._get_particle_positions('halos')
     pos = data_file.ds.arr(pos, 'code_length')
     dle = self.ds.domain_left_edge
     dre = self.ds.domain_right_edge
     if np.any(pos.min(axis=0) < dle) or np.any(pos.max(axis=0) > dre):
         raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0), dle, dre)
     regions.add_data_file(pos, data_file.file_id)
     morton[ind:ind + pos.shape[0]] = compute_morton(
         pos[:, 0], pos[:, 1], pos[:, 2], dle, dre)
     return morton
Пример #19
0
    def _guess_headers_from_file(self, filename) -> None:
        with FortranFile(filename) as fpu:
            ok = False
            for dp, longint in product((True, False), (True, False)):
                fpu.seek(0)
                try:
                    header_attributes = HEADER_ATTRIBUTES(double=dp,
                                                          longint=longint)
                    fpu.read_attrs(header_attributes)
                    ok = True
                    break
                except (ValueError, OSError):
                    pass

            if not ok:
                raise OSError("Could not read headers from file %s" % filename)

            istart = fpu.tell()
            fpu.seek(0, 2)
            iend = fpu.tell()

            # Try different templates
            ok = False
            for name, cls in ADAPTAHOP_TEMPLATES.items():
                fpu.seek(istart)
                attributes = cls(longint, dp).HALO_ATTRIBUTES
                mylog.debug("Trying %s(longint=%s, dp=%s)", name, longint, dp)
                try:
                    # Try to read two halos to be sure
                    fpu.read_attrs(attributes)
                    if fpu.tell() < iend:
                        fpu.read_attrs(attributes)
                    ok = True
                    break
                except (ValueError, OSError):
                    continue

        if not ok:
            raise OSError("Could not guess fields from file %s" % filename)

        self._header_attributes = header_attributes
        self._halo_attributes = attributes
Пример #20
0
    def write_input(self, **kwargs):
        outputs = self.outputs
        ds = self.datasets[-1]

        # Halo finder only supports flat lambda-CDM models for which
        # omega_lambda + omega_matter = 1
        omega_lambda = 1 - ds.omega_matter

        # Compute last expansion factor
        config = dict(
            af=1 / (ds.current_redshift + 1),
            lbox=ds.domain_width.in_units('Mpccm').value[0],  # in Mpc
            H_f=ds.hubble_constant * 100,  # in km/s/Mpc
            omega_f=ds.omega_matter,
            lambda_f=omega_lambda,
            npart=100,
            method='MSM',
            cdm=False,
            b=0.2,
            nvoisins=20,
            nhop=20,
            rhot=80.,
            fudge=4.,  # parameter for adaptahop (usually 4.)
            fudgepsilon=0.001,  # parameter for adaptahop (usually 0.05)
            alphap=1.0,  # parameter for adaptahop (usually 1.)
            verbose=False,  # verbose parameter for both halo finder
            megaverbose=False,  # parameter for adaptahop
            nsteps=len(outputs),  # Number of time steps to analyse
            FlagPeriod=1,  # Periodic boundaries (1:True)
            DPMMC=False,  # Activate the densest point in the most massive cell
            SC=True,  # Activate the com in concentric spheres
            dcell_min=3.05e-3  # smallest possible cell size in Mpc (levelmax=15)
        )

        config.update(kwargs)

        res = dict2conf(config)
        self.input_file = path.join(self.prefix, 'input_HaloMaker.dat')
        mylog.info('Writing input parameters to %s' % self.input_file)
        with open(self.input_file, 'w') as f:
            mylog.debug(res)
            f.write(res)
Пример #21
0
Файл: io.py Проект: pshriwise/yt
    def _initialize_index(self, data_file, regions):
        all_count = self._count_particles(data_file)
        pcount = sum(all_count.values())
        morton = np.empty(pcount, dtype="uint64")
        mylog.debug("Initializing index % 5i (% 7i particles)",
                    data_file.file_id, pcount)
        ind = 0
        with h5py.File(data_file.filename, mode="r") as f:
            for ptype in all_count:
                if ptype not in f or all_count[ptype] == 0:
                    continue
                pos = np.empty((all_count[ptype], 3), dtype="float64")
                units = _get_position_array_units(ptype, f, "x")
                if ptype == "grid":
                    dx = f["grid"]["dx"][()].min()
                    dx = self.ds.quan(dx,
                                      parse_h5_attr(f["grid"]["dx"],
                                                    "units")).to("code_length")
                else:
                    dx = 2.0 * np.finfo(
                        f[ptype]["particle_position_x"].dtype).eps
                    dx = self.ds.quan(dx, units).to("code_length")
                pos[:, 0] = _get_position_array(ptype, f, "x")
                pos[:, 1] = _get_position_array(ptype, f, "y")
                pos[:, 2] = _get_position_array(ptype, f, "z")
                pos = self.ds.arr(pos, units).to("code_length")
                dle = self.ds.domain_left_edge.to("code_length")
                dre = self.ds.domain_right_edge.to("code_length")

                # These are 32 bit numbers, so we give a little lee-way.
                # Otherwise, for big sets of particles, we often will bump into the
                # domain edges.  This helps alleviate that.
                np.clip(pos, dle + dx, dre - dx, pos)
                if np.any(pos.min(axis=0) < dle) or np.any(
                        pos.max(axis=0) > dre):
                    raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
                                           dle, dre)
                regions.add_data_file(pos, data_file.file_id)
                morton[ind:ind + pos.shape[0]] = compute_morton(
                    pos[:, 0], pos[:, 1], pos[:, 2], dle, dre)
                ind += pos.shape[0]
        return morton
Пример #22
0
 def _read_fluid_selection(self, chunks, selector, fields, size):
     chunks = list(chunks)
     assert (len(chunks) == 1)
     tags = {}
     rv = {}
     pyne_mesh = self.ds.pyne_mesh
     mesh = pyne_mesh.mesh
     for field in fields:
         rv[field] = np.empty(size, dtype="float64")
     ngrids = sum(len(chunk.objs) for chunk in chunks)
     mylog.debug("Reading %s cells of %s fields in %s blocks", size,
                 [fname for ftype, fname in fields], ngrids)
     for field in fields:
         ftype, fname = field
         ds = np.asarray(getattr(pyne_mesh, fname)[:], 'float64')
         ind = 0
         for chunk in chunks:
             for g in chunk.objs:
                 ind += g.select(selector, ds, rv[field], ind)  # caches
     return rv
Пример #23
0
 def _read_fluid_selection(self, chunks, selector, fields, size):
     chunks = list(chunks)
     if any((ftype != "boxlib" for ftype, fname in fields)):
         raise NotImplementedError
     rv = {}
     for field in fields:
         rv[field] = np.empty(size, dtype="float64")
     ng = sum(len(c.objs) for c in chunks)
     mylog.debug("Reading %s cells of %s fields in %s grids",
                 size, [f2 for f1, f2 in fields], ng)
     ind = 0
     for chunk in chunks:
         data = self._read_chunk_data(chunk, fields)
         for g in chunk.objs:
             for field in fields:
                 ds = data[g.id].pop(field)
                 nd = g.select(selector, ds, rv[field], ind) # caches
             ind += nd
             data.pop(g.id)
     return rv
Пример #24
0
 def _reconstruct_parent_child(self):
     mask = np.empty(len(self.grids), dtype="int32")
     mylog.debug("First pass; identifying child grids")
     for i, grid in enumerate(self.grids):
         get_box_grids_level(
             self.grid_left_edge[i, :],
             self.grid_right_edge[i, :],
             self.grid_levels[i] + 1,
             self.grid_left_edge,
             self.grid_right_edge,
             self.grid_levels,
             mask,
         )
         grid.Children = [
             g for g in self.grids[mask.astype("bool")] if g.Level == grid.Level + 1
         ]
     mylog.debug("Second pass; identifying parents")
     for i, grid in enumerate(self.grids):  # Second pass
         for child in grid.Children:
             child.Parent.append(grid)
Пример #25
0
    def get_contiguous_chunk(self, left_key, right_key, fields):

        lbase=0
        if left_key > self._max_key:
            raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % \
                               (left_key, self._max_key))
        right_key = min(right_key, self._max_key)

        left_key = self.get_next_nonzero_chunk(left_key, right_key-1)
        right_key = self.get_previous_nonzero_chunk(right_key, left_key)

        lbase = self.indexdata['base'][left_key]

        rbase = self.indexdata['base'][right_key]
        rlen = self.indexdata['len'][right_key]

        length = rbase + rlen - lbase
        if length > 0:
            mylog.debug('Getting contiguous chunk of size %i starting at %i' % (length, lbase))
        return self.get_data(slice(lbase, lbase + length), fields)
Пример #26
0
 def deposit(self, positions, fields = None, method = None,
             kernel_name = 'cubic'):
     # Here we perform our particle deposition.
     if fields is None: fields = []
     cls = getattr(particle_deposit, "deposit_%s" % method, None)
     if cls is None:
         raise YTParticleDepositionNotImplemented(method)
     nz = self.nz
     nvals = (nz, nz, nz, self.ires.size)
     # We allocate number of zones, not number of octs
     op = cls(nvals, kernel_name)
     op.initialize()
     mylog.debug("Depositing %s (%s^3) particles into %s Root Mesh",
         positions.shape[0], positions.shape[0]**0.3333333, nvals[-1])
     pos = np.array(positions, dtype="float64")
     f64 = [np.array(f, dtype="float64") for f in fields]
     self.oct_handler.deposit(op, self.base_selector, pos, f64)
     vals = op.finalize()
     if vals is None: return
     return np.asfortranarray(vals)
Пример #27
0
 def _read_fluid_selection(self, chunks, selector, fields, size):
     chunks = list(chunks)
     if any((ftype != "boxlib" for ftype, fname in fields)):
         raise NotImplementedError
     rv = {}
     for field in fields:
         rv[field] = np.empty(size, dtype="float64")
     ng = sum(len(c.objs) for c in chunks)
     mylog.debug("Reading %s cells of %s fields in %s grids",
                 size, [f2 for f1, f2 in fields], ng)
     ind = 0
     for chunk in chunks:
         data = self._read_chunk_data(chunk, fields)
         for g in chunk.objs:
             for field in fields:
                 ds = data[g.id].pop(field)
                 nd = g.select(selector, ds, rv[field], ind) # caches
             ind += nd
             data.pop(g.id)
     return rv
Пример #28
0
 def _read_amr_level(self, oct_handler):
     """Open the oct file, read in octs level-by-level.
        For each oct, only the position, index, level and domain
        are needed - its position in the octree is found automatically.
        The most important is finding all the information to feed
        oct_handler.add
     """
     self.level_offsets
     f = open(self.ds._file_amr, "rb")
     for level in range(1, self.ds.max_level + 1):
         unitary_center, fl, iocts, nocts, root_level = \
             _read_art_level_info( f,
                 self._level_oct_offsets, level,
                 coarse_grid=self.ds.domain_dimensions[0],
                 root_level=self.ds.root_level)
         nocts_check = oct_handler.add(self.domain_id, level,
                                       unitary_center)
         assert (nocts_check == nocts)
         mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
                     nocts, level, oct_handler.nocts)
Пример #29
0
    def get_cell_data(self, level, cell_iarr, fields):
        """
        Get data from requested cell

        This uses the raw cell index, and doesn't account for periodicity or
        an expanded domain (non-power of 2).

        level: int
            Requested level
        cell_iarr: array-like, length 3
            Requested cell from given level.         fields: list
            Requested fields

        Returns:
            cell_data: dict
                Dictionary of field_name, field_data
        """
        cell_iarr = np.array(cell_iarr, dtype="int64")
        lk, rk = self.get_key_bounds(level, cell_iarr)
        mylog.debug("Reading contiguous chunk from %i to %i" % (lk, rk))
        return self.get_contiguous_chunk(lk, rk, fields)
Пример #30
0
 def find_min(self, field):
     """
     Returns (value, center) of location of minimum for a given field
     """
     gI = np.where(self.grid_levels >= 0) # Slow but pedantic
     minVal = HUGE
     for grid in self.grids[gI[0]]:
         mylog.debug("Checking %s (level %s)", grid.id, grid.Level)
         val, coord = grid.find_min(field)
         if val < minVal:
             minCoord = coord
             minVal = val
             minGrid = grid
     mc = np.array(minCoord)
     pos=minGrid.get_position(mc)
     mylog.info("Min Value is %0.5e at %0.16f %0.16f %0.16f in grid %s at level %s", \
           minVal, pos[0], pos[1], pos[2], minGrid, minGrid.Level)
     self.center = pos
     self.parameters["Min%sValue" % (field)] = minVal
     self.parameters["Min%sPos" % (field)] = "%s" % (pos)
     return minVal, pos
Пример #31
0
    def check_tree(self):
        for node in self.trunk.depth_traverse():
            if node.grid == -1:
                continue
            grid = self.ds.index.grids[node.grid - self._id_offset]
            dds = grid.dds
            gle = grid.LeftEdge
            nle = self.ds.arr(node.get_left_edge(), input_units="code_length")
            nre = self.ds.arr(node.get_right_edge(), input_units="code_length")
            li = np.rint((nle-gle)/dds).astype('int32')
            ri = np.rint((nre-gle)/dds).astype('int32')
            dims = (ri - li).astype('int32')
            assert(np.all(grid.LeftEdge <= nle))
            assert(np.all(grid.RightEdge >= nre))
            assert(np.all(dims > 0))
            # print grid, dims, li, ri

        # Calculate the Volume
        vol = self.trunk.kd_sum_volume()
        mylog.debug('AMRKDTree volume = %e' % vol)
        self.trunk.kd_node_check()
Пример #32
0
    def get_cell_data(self, level, cell_iarr, fields):
        """
        Get data from requested cell

        This uses the raw cell index, and doesn't account for periodicity or
        an expanded domain (non-power of 2).

        level: int
            Requested level
        cell_iarr: array-like, length 3
            Requested cell from given level.         fields: list
            Requested fields

        Returns:
            cell_data: dict
                Dictionary of field_name, field_data
        """
        cell_iarr = np.array(cell_iarr, dtype="int64")
        lk, rk =self.get_key_bounds(level, cell_iarr)
        mylog.debug("Reading contiguous chunk from %i to %i" % (lk, rk))
        return self.get_contiguous_chunk(lk, rk, fields)
Пример #33
0
    def iter_data(self, inds, fields):
        num_inds = len(inds)
        num_reads = 0
        mylog.debug('MIDX Reading %i chunks' % num_inds)
        i = 0
        while (i < num_inds):
            ind = inds[i]
            base = self.indexdata['base'][ind]
            length = self.indexdata['len'][ind]
            # Concatenate aligned reads
            nexti = i+1
            combined = 0
            while nexti < num_inds:
                nextind = inds[nexti]
                #        print 'b: %i l: %i end: %i  next: %i' % ( base, length, base + length, self.indexdata['base'][nextind] )
                if combined < 1024 and base + length == self.indexdata['base'][nextind]:
                    length += self.indexdata['len'][nextind]
                    i += 1
                    nexti += 1
                    combined += 1
                else:
                    break

            chunk = slice(base, base+length)
            mylog.debug('Reading chunk %i of length %i after catting %i starting at %i' % (i, length, combined, ind))
            num_reads += 1
            if length > 0:
                data = self.get_data(chunk, fields)
                yield data
                del data
            i += 1
        mylog.debug('Read %i chunks, batched into %i reads' % (num_inds, num_reads))
Пример #34
0
    def iter_data(self, inds, fields):
        num_inds = len(inds)
        num_reads = 0
        mylog.debug('MIDX Reading %i chunks' % num_inds)
        i = 0
        while (i < num_inds):
            ind = inds[i]
            base = self.indexdata['base'][ind]
            length = self.indexdata['len'][ind]
            # Concatenate aligned reads
            nexti = i+1
            combined = 0
            while nexti < num_inds:
                nextind = inds[nexti]
                #        print 'b: %i l: %i end: %i  next: %i' % ( base, length, base + length, self.indexdata['base'][nextind] )
                if combined < 1024 and base + length == self.indexdata['base'][nextind]:
                    length += self.indexdata['len'][nextind]
                    i += 1
                    nexti += 1
                    combined += 1
                else:
                    break

            chunk = slice(base, base+length)
            mylog.debug('Reading chunk %i of length %i after catting %i starting at %i' % (i, length, combined, ind))
            num_reads += 1
            if length > 0:
                data = self.get_data(chunk, fields)
                yield data
                del data
            i += 1
        mylog.debug('Read %i chunks, batched into %i reads' % (num_inds, num_reads))
Пример #35
0
 def check_derived_fields(self, fields_to_check=None):
     deps = {}
     unavailable = []
     fields_to_check = fields_to_check or self.keys()
     for field in fields_to_check:
         mylog.debug("Checking %s", field)
         if field not in self: raise RuntimeError
         fi = self[field]
         try:
             fd = fi.get_dependencies(ds=self.ds)
         except Exception as e:
             if field in self._show_field_errors:
                 raise
             if type(e) != YTFieldNotFound:
                 mylog.debug("Raises %s during field %s detection.",
                             str(type(e)), field)
             continue
         # This next bit checks that we can't somehow generate everything.
         # We also manually update the 'requested' attribute
         missing = not all(f in self.field_list for f in fd.requested)
         if missing:
             self.pop(field)
             unavailable.append(field)
             continue
         fd.requested = set(fd.requested)
         deps[field] = fd
         mylog.debug("Succeeded with %s (needs %s)", field, fd.requested)
     dfl = set(self.ds.derived_field_list).union(deps.keys())
     self.ds.derived_field_list = list(sorted(dfl))
     return deps, unavailable
 def check_derived_fields(self, fields_to_check = None):
     deps = {}
     unavailable = []
     fields_to_check = fields_to_check or list(self.keys())
     for field in fields_to_check:
         mylog.debug("Checking %s", field)
         if field not in self: raise RuntimeError
         fi = self[field]
         try:
             fd = fi.get_dependencies(ds = self.ds)
         except Exception as e:
             if field in self._show_field_errors:
                 raise
             if type(e) != YTFieldNotFound:
                 mylog.debug("Raises %s during field %s detection.",
                             str(type(e)), field)
             self.pop(field)
             continue
         # This next bit checks that we can't somehow generate everything.
         # We also manually update the 'requested' attribute
         missing = not all(f in self.field_list for f in fd.requested)
         if missing:
             self.pop(field)
             unavailable.append(field)
             continue
         fd.requested = set(fd.requested)
         deps[field] = fd
         mylog.debug("Succeeded with %s (needs %s)", field, fd.requested)
     dfl = set(self.ds.derived_field_list).union(deps.keys())
     self.ds.derived_field_list = list(sorted(dfl))
     return deps, unavailable
Пример #37
0
    def __init__(self, ds, domain_id):
        self.ds = ds
        self.domain_id = domain_id

        num = os.path.basename(
            ds.parameter_filename).split(".")[0].split("_")[1]
        basedir = os.path.abspath(os.path.dirname(ds.parameter_filename))
        basename = "%s/%%s_%s.out%05i" % (basedir, num, domain_id)
        part_file_descriptor = "%s/part_file_descriptor.txt" % basedir
        for t in ['grav', 'amr']:
            setattr(self, "%s_fn" % t, basename % t)
        self._part_file_descriptor = part_file_descriptor
        self._read_amr_header()
        # self._read_hydro_header()

        # Autodetect field files
        field_handlers = [
            FH(self) for FH in get_field_handlers() if FH.any_exist(ds)
        ]
        self.field_handlers = field_handlers
        for fh in field_handlers:
            mylog.debug('Detected fluid type %s in domain_id=%s' %
                        (fh.ftype, domain_id))
            fh.detect_fields(ds)
            # self._add_ftype(fh.ftype)

        # Autodetect particle files
        particle_handlers = [
            PH(ds, domain_id) for PH in get_particle_handlers()
            if PH.any_exist(ds)
        ]
        self.particle_handlers = particle_handlers
        for ph in particle_handlers:
            mylog.debug('Detected particle type %s in domain_id=%s' %
                        (ph.ptype, domain_id))
            ph.read_header()
            # self._add_ptype(ph.ptype)

        # Load the AMR structure
        self._read_amr()
Пример #38
0
    def _initialize_index(self, data_file, regions):
        if self.index_ptype == "all":
            ptypes = self.ds.particle_types_raw
            pcount = sum(data_file.total_particles.values())
        else:
            ptypes = [self.index_ptype]
            pcount = data_file.total_particles[self.index_ptype]
        morton = np.empty(pcount, dtype='uint64')
        if pcount == 0: return morton
        mylog.debug("Initializing index % 5i (% 7i particles)",
                    data_file.file_id, pcount)
        ind = 0
        with h5py.File(data_file.filename, "r") as f:
            if not f.keys(): return None
            dx = np.finfo(f["Group"]["GroupPos"].dtype).eps
            dx = 2.0 * self.ds.quan(dx, "code_length")

            for ptype in ptypes:
                if data_file.total_particles[ptype] == 0: continue
                pos = f[ptype]["%sPos" % ptype].value.astype("float64")
                pos = np.resize(pos, (data_file.total_particles[ptype], 3))
                pos = data_file.ds.arr(pos, "code_length")

                # These are 32 bit numbers, so we give a little lee-way.
                # Otherwise, for big sets of particles, we often will bump into the
                # domain edges.  This helps alleviate that.
                np.clip(pos, self.ds.domain_left_edge + dx,
                        self.ds.domain_right_edge - dx, pos)
                if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \
                   np.any(pos.max(axis=0) > self.ds.domain_right_edge):
                    raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
                                           self.ds.domain_left_edge,
                                           self.ds.domain_right_edge)
                regions.add_data_file(pos, data_file.file_id)
                morton[ind:ind + pos.shape[0]] = compute_morton(
                    pos[:, 0], pos[:, 1], pos[:, 2],
                    data_file.ds.domain_left_edge,
                    data_file.ds.domain_right_edge)
                ind += pos.shape[0]
        return morton
Пример #39
0
    def filter_bbox(self, left, right, myiter):
        """
        Filter data by masking out data outside of a bbox defined
        by left/right. Account for periodicity of data, allowing left/right
        to be outside of the domain.
        """

        for data in myiter:
            # mask = np.zeros_like(data, dtype='bool')
            pos = np.array([data["x"].copy(), data["y"].copy(), data["z"].copy()]).T

            DW = self.true_domain_width
            # This hurts, but is useful for periodicity. Probably should check first
            # if it is even needed for a given left/right
            _shift_periodic(pos, left, right, DW)

            # Now get all particles that are within the bbox
            mask = np.all(pos >= left, axis=1) * np.all(pos < right, axis=1)
            # print('Mask shape, sum:', mask.shape, mask.sum())

            mylog.debug(
                "Filtering particles, returning %i out of %i"
                % (mask.sum(), mask.shape[0])
            )

            if not np.any(mask):
                continue

            filtered = {ax: pos[:, i][mask] for i, ax in enumerate("xyz")}
            for f in data.keys():
                if f in "xyz":
                    continue
                filtered[f] = data[f][mask]

            # for i, ax in enumerate('xyz'):
            #    #print(left, right)
            #    assert np.all(filtered[ax] >= left[i])
            #    assert np.all(filtered[ax] < right[i])

            yield filtered
Пример #40
0
    def _initialize_oct_handler(self):
        """
        Just count the number of octs per domain and
        allocate the requisite memory in the oct tree
        """
        nv = len(self.fluid_field_list)
        self.oct_handler = ARTOctreeContainer(
            self.dataset.domain_dimensions / 2,  # dd is # of root cells
            self.dataset.domain_left_edge,
            self.dataset.domain_right_edge,
            1)
        # The 1 here refers to domain_id == 1 always for ARTIO.
        self.domains = [ARTDomainFile(self.dataset, nv, self.oct_handler, 1)]
        self.octs_per_domain = [dom.level_count.sum() for dom in self.domains]

        self.total_octs = sum(self.octs_per_domain)
        mylog.debug("Allocating %s octs", self.total_octs)
        self.oct_handler.allocate_domains(self.octs_per_domain)
        domain = self.domains[0]
        domain._read_amr_root(self.oct_handler)
        domain._read_amr_level(self.oct_handler)
        self.oct_handler.finalize()
Пример #41
0
    def __init__(self, points, ds=None, field_parameters=None, data_source=None):
        validate_object(ds, Dataset)
        validate_object(field_parameters, dict)
        validate_object(data_source, YTSelectionContainer)
        validate_object(points, YTArray)

        points = fix_length(points, ds)
        if len(points) < 2:
            raise YTException(
                f"Not enough points. Expected at least 2, got {len(points)}"
            )
        mylog.debug("Building minimal sphere around points.")
        mb = _miniball.Miniball(points)
        if not mb.is_valid():
            raise YTException("Could not build valid sphere around points.")

        center = ds.arr(mb.center(), points.units)
        radius = ds.quan(np.sqrt(mb.squared_radius()), points.units)
        super(YTMinimalSphere, self).__init__(center, ds, field_parameters, data_source)
        self.set_field_parameter("radius", radius)
        self.set_field_parameter("center", self.center)
        self.radius = radius
Пример #42
0
 def _initialize_index(self, data_file, regions):
     pcount = data_file.header["num_halos"]
     morton = np.empty(pcount, dtype='uint64')
     mylog.debug("Initializing index % 5i (% 7i particles)",
                 data_file.file_id, pcount)
     if pcount == 0:
         return morton
     ind = 0
     ptype = 'halos'
     with open(data_file.filename, "rb") as f:
         pos = data_file._get_particle_positions(ptype, f=f)
         pos = data_file.ds.arr(pos, "code_length")
         if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \
            np.any(pos.max(axis=0) > self.ds.domain_right_edge):
             raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
                                    self.ds.domain_left_edge,
                                    self.ds.domain_right_edge)
         regions.add_data_file(pos, data_file.file_id)
         morton[ind:ind + pos.shape[0]] = compute_morton(
             pos[:, 0], pos[:, 1], pos[:, 2], data_file.ds.domain_left_edge,
             data_file.ds.domain_right_edge)
     return morton
Пример #43
0
    def filter_bbox(self, left, right, myiter):
        """
        Filter data by masking out data outside of a bbox defined
        by left/right. Account for periodicity of data, allowing left/right
        to be outside of the domain.
        """

        for data in myiter:
            #mask = np.zeros_like(data, dtype='bool')
            pos = np.array([data['x'].copy(), data['y'].copy(), data['z'].copy()]).T

            DW = self.true_domain_width
            # This hurts, but is useful for periodicity. Probably should check first
            # if it is even needed for a given left/right
            _shift_periodic(pos, left, right, DW)

            # Now get all particles that are within the bbox
            mask = np.all(pos >= left, axis=1) * np.all(pos < right, axis=1)
            #print 'Mask shape, sum:', mask.shape, mask.sum()

            mylog.debug("Filtering particles, returning %i out of %i" % (mask.sum(), mask.shape[0]))

            if not np.any(mask):
                continue

            filtered = {ax: pos[:, i][mask] for i, ax in enumerate('xyz')}
            for f in data.keys():
                if f in 'xyz':
                    continue
                filtered[f] = data[f][mask]

            #for i, ax in enumerate('xyz'):
            #    #print left, right
            #    assert np.all(filtered[ax] >= left[i])
            #    assert np.all(filtered[ax] < right[i])

            yield filtered
Пример #44
0
 def _initialize_index(self, data_file, regions):
     pcount = data_file.header["num_halos"]
     morton = np.empty(pcount, dtype='uint64')
     mylog.debug("Initializing index % 5i (% 7i particles)",
                 data_file.file_id, pcount)
     if pcount == 0: return morton
     ind = 0
     with open(data_file.filename, "rb") as f:
         f.seek(data_file._position_offset, os.SEEK_SET)
         halos = np.fromfile(f, dtype=self._halo_dt, count = pcount)
         pos = np.empty((halos.size, 3), dtype="float64")
         # These positions are in Mpc, *not* "code" units
         pos = data_file.ds.arr(pos, "code_length")
         dx = np.finfo(halos['particle_position_x'].dtype).eps
         dx = 2.0*self.ds.quan(dx, "code_length")
         pos[:,0] = halos["particle_position_x"]
         pos[:,1] = halos["particle_position_y"]
         pos[:,2] = halos["particle_position_z"]
         # These are 32 bit numbers, so we give a little lee-way.
         # Otherwise, for big sets of particles, we often will bump into the
         # domain edges.  This helps alleviate that.
         np.clip(pos, self.ds.domain_left_edge + dx,
                      self.ds.domain_right_edge - dx, pos)
         del halos
         if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \
            np.any(pos.max(axis=0) > self.ds.domain_right_edge):
             raise YTDomainOverflow(pos.min(axis=0),
                                    pos.max(axis=0),
                                    self.ds.domain_left_edge,
                                    self.ds.domain_right_edge)
         regions.add_data_file(pos, data_file.file_id)
         morton[ind:ind+pos.shape[0]] = compute_morton(
             pos[:,0], pos[:,1], pos[:,2],
             data_file.ds.domain_left_edge,
             data_file.ds.domain_right_edge)
     return morton
Пример #45
0
    def get_contiguous_chunk(self, left_key, right_key, fields):
        liarr = self.get_ind_from_key(left_key)
        riarr = self.get_ind_from_key(right_key)

        lbase=0
        llen = 0
        if left_key > self._max_key:
            raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % \
                               (left_key, self._max_key))
        right_key = min(right_key, self._max_key)

        left_key = self.get_next_nonzero_chunk(left_key, right_key-1)
        right_key = self.get_previous_nonzero_chunk(right_key, left_key)

        lbase = self.indexdata['base'][left_key]
        llen = self.indexdata['len'][left_key]

        rbase = self.indexdata['base'][right_key]
        rlen = self.indexdata['len'][right_key]

        length = rbase + rlen - lbase
        if length > 0:
            mylog.debug('Getting contiguous chunk of size %i starting at %i' % (length, lbase))
        return self.get_data(slice(lbase, lbase + length), fields)
Пример #46
0
    def _initialize_index(self, data_file, regions):
        pcount = sum(data_file.total_particles.values())
        morton = np.empty(pcount, dtype='uint64')
        if pcount == 0: return morton
        mylog.debug("Initializing index % 5i (% 7i particles)",
                    data_file.file_id, pcount)
        ind = 0
        with h5py.File(data_file.filename, "r") as f:
            if not f.keys(): return None
            dx = np.finfo(f["Group"]["GroupPos"].dtype).eps
            dx = 2.0*self.ds.quan(dx, "code_length")

            for ptype in data_file.ds.particle_types_raw:
                if data_file.total_particles[ptype] == 0: continue
                pos = f[ptype]["%sPos" % ptype].value.astype("float64")
                pos = np.resize(pos, (data_file.total_particles[ptype], 3))
                pos = data_file.ds.arr(pos, "code_length")
                
                # These are 32 bit numbers, so we give a little lee-way.
                # Otherwise, for big sets of particles, we often will bump into the
                # domain edges.  This helps alleviate that.
                np.clip(pos, self.ds.domain_left_edge + dx,
                             self.ds.domain_right_edge - dx, pos)
                if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \
                   np.any(pos.max(axis=0) > self.ds.domain_right_edge):
                    raise YTDomainOverflow(pos.min(axis=0),
                                           pos.max(axis=0),
                                           self.ds.domain_left_edge,
                                           self.ds.domain_right_edge)
                regions.add_data_file(pos, data_file.file_id)
                morton[ind:ind+pos.shape[0]] = compute_morton(
                    pos[:,0], pos[:,1], pos[:,2],
                    data_file.ds.domain_left_edge,
                    data_file.ds.domain_right_edge)
                ind += pos.shape[0]
        return morton
Пример #47
0
    def iter_filtered_bbox_fields(self, left, right, data,
                                  pos_fields, fields):
        """
        This function should be destroyed, as it will only work with units.
        """

        kpcuq = left.in_units('kpccm').uq
        mpcuq = left.in_units('Mpccm/h').uq
        DW = (self.true_domain_width * kpcuq).in_units('Mpc/h')
        if pos_fields is None:
            pos_fields = 'x','y','z'
        xf, yf, zf = pos_fields
        mylog.debug("Using position fields: %s" % pos_fields)

        # I'm sorry.
        pos = mpcuq * np.array([data[xf].in_units('Mpccm/h'), data[yf].in_units('Mpccm/h'), data[zf].in_units('Mpccm/h')]).T

        # This hurts, but is useful for periodicity. Probably should check first
        # if it is even needed for a given left/right
        _shift_periodic(pos, left, right, DW)

        mylog.debug("Periodic filtering, %s %s %s %s" % (left, right, pos.min(axis=0), pos.max(axis=0)))
        # Now get all particles that are within the bbox
        mask = np.all(pos >= left, axis=1) * np.all(pos < right, axis=1)

        mylog.debug("Filtering particles, returning %i out of %i" % (mask.sum(), mask.shape[0]))

        if np.any(mask):
            for i,f in enumerate(pos_fields):
                yield f, pos[:, i][mask]

            for f in fields:
                if f in pos_fields:
                    continue
                # print 'yielding nonpos field', f
                yield f, data[f][mask]
Пример #48
0
    def _read_fluid_selection(self, chunks, selector, fields, size):
        from sys import version
        rv = {}
        chunks = list(chunks)

        if selector.__class__.__name__ == "GridSelector":
            if not (len(chunks) == len(chunks[0].objs) == 1):
                raise RuntimeError
            grid = chunks[0].objs[0]
            h5f = h5py.File(grid.filename, 'r')
            gds = h5f.get(_grid_dname(grid.id))
            for ftype, fname in fields:
                if self.ds.field_ordering == 1:
                    rv[(ftype, fname)] = gds.get(fname).value.swapaxes(0, 2)
                else:
                    rv[(ftype, fname)] = gds.get(fname).value
            h5f.close()
            return rv
        if size is None:
            size = sum((grid.count(selector) for chunk in chunks
                        for grid in chunk.objs))

        if any((ftype != "gdf" for ftype, fname in fields)):
            raise NotImplementedError

        for field in fields:
            ftype, fname = field
            fsize = size
            # check the dtype instead
            rv[field] = np.empty(fsize, dtype="float64")
        ngrids = sum(len(chunk.objs) for chunk in chunks)
        mylog.debug("Reading %s cells of %s fields in %s blocks",
                    size, [fname for ftype, fname in fields], ngrids)
        ind = 0
        for chunk in chunks:
            fid = None
            for grid in chunk.objs:
                if grid.filename is None:
                    continue
                if fid is None:
                    if version < '3':
                        fid = h5py.h5f.open(grid.filename,h5py.h5f.ACC_RDONLY)
                    else:
                        fid = h5py.h5f.open(bytes(grid.filename,'utf-8'),h5py.h5f.ACC_RDONLY)
                if self.ds.field_ordering == 1:
                    # check the dtype instead
                    data = np.empty(grid.ActiveDimensions[::-1],
                                    dtype="float64")
                    data_view = data.swapaxes(0, 2)
                else:
                    # check the dtype instead
                    data_view = data = np.empty(grid.ActiveDimensions,
                                                dtype="float64")
                for field in fields:
                    ftype, fname = field
                    if version < '3':
                        dg = h5py.h5d.open(fid, _field_dname(grid.id, fname))
                    else:
                        dg = h5py.h5d.open(fid, bytes(_field_dname(grid.id, fname),'utf-8'))
                    dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
                    # caches
                    nd = grid.select(selector, data_view, rv[field], ind)
                ind += nd    # I don't get that part, only last nd is added
            if fid is not None:
                fid.close()
        return rv
Пример #49
0
 def iter_ibbox_data(self, left, right, fields):
     mylog.debug('MIDX Loading region from %s to %s' %(left, right))
     inds = self.get_ibbox(left, right)
     return self.iter_data(inds, fields)
Пример #50
0
    def set_bounds(self):
        if ('x_min' in self.sdfdata.parameters and 'x_max' in self.sdfdata.parameters) or \
           ('theta_min' in self.sdfdata.parameters and 'theta_max' in self.sdfdata.parameters):
            if 'x_min' in self.sdfdata.parameters:
                rmin = np.array([self.sdfdata.parameters['x_min'],
                                 self.sdfdata.parameters['y_min'],
                                 self.sdfdata.parameters['z_min']])
                rmax = np.array([self.sdfdata.parameters['x_max'],
                                 self.sdfdata.parameters['y_max'],
                                 self.sdfdata.parameters['z_max']])
            elif 'theta_min' in self.sdfdata.parameters:
                rmin = np.array([self.sdfdata.parameters['r_min'],
                                 self.sdfdata.parameters['theta_min'],
                                 self.sdfdata.parameters['phi_min']])
                rmax = np.array([self.sdfdata.parameters['r_max'],
                                 self.sdfdata.parameters['theta_max'],
                                 self.sdfdata.parameters['phi_max']])
            self._fix_rexact(rmin, rmax)
            self.true_domain_left = self.rmin.copy()
            self.true_domain_right = self.rmax.copy()
            self.true_domain_width = self.rmax - self.rmin
            self.domain_width = self.rmax - self.rmin
            self.domain_dims = 1 << self.level
            self.domain_buffer = 0
            self.domain_active_dims = self.domain_dims
        else:
            mylog.debug("Setting up older data")
            rx = self.sdfdata.parameters.get('Rx')
            ry = self.sdfdata.parameters.get('Ry')
            rz = self.sdfdata.parameters.get('Rz')
            a =  self.sdfdata.parameters.get("a", 1.0)
            rmin = -a * np.array([rx, ry, rz])
            rmax = a * np.array([rx, ry, rz])
            self.true_domain_left = rmin.copy()
            self.true_domain_right = rmax.copy()
            self.true_domain_width = rmax - rmin

            expand_root = 0.0
            morton_xyz = self.sdfdata.parameters.get("morton_xyz", False)
            if not morton_xyz:
                mylog.debug("Accounting for wandering particles")
                self.wandering_particles = True
                ic_Nmesh = self.sdfdata.parameters.get('ic_Nmesh',0)
                # Expand root for non power-of-2
                if ic_Nmesh != 0:
                    f2 = 1<<int(np.log2(ic_Nmesh-1)+1)
                    if (f2 != ic_Nmesh):
                        expand_root = 1.0*f2/ic_Nmesh - 1.0;
                        mylog.debug("Expanding: %s, %s, %s" % (f2, ic_Nmesh, expand_root))
                        rmin *= (1.0 + expand_root)
                        rmax *= (1.0 + expand_root)

            self._fix_rexact(rmin, rmax)
            self.domain_width = self.rmax - self.rmin
            self.domain_dims = 1 << self.level
            self.domain_buffer = (self.domain_dims - int(self.domain_dims/(1.0 + expand_root)))/2
            self.domain_active_dims = self.domain_dims - 2*self.domain_buffer

        mylog.debug("MIDX rmin: %s, rmax: %s" % (self.rmin, self.rmax))
        mylog.debug("MIDX: domain_width: %s, domain_dims: %s, domain_active_dims: %s " %
                    (self.domain_width, self.domain_dims, self.domain_active_dims))
Пример #51
0
    def get_ibbox(self, ileft, iright):
        """
        Given left and right indicies, return a mask and
        set of offsets+lengths into the sdf data.
        """
        #print 'Getting data from ileft to iright:',  ileft, iright

        ix, iy, iz = (iright-ileft)*1j
        mylog.debug('MIDX IBBOX: %s %s %s %s %s' % (ileft, iright, ix, iy, iz))

        # plus 1 that is sliced, plus a bit since mgrid is not inclusive
        Z, Y, X = np.mgrid[ileft[2]:iright[2]+1.01,
                           ileft[1]:iright[1]+1.01,
                           ileft[0]:iright[0]+1.01]

        mask = slice(0, -1, None)
        X = X[mask, mask, mask].astype('int64').ravel()
        Y = Y[mask, mask, mask].astype('int64').ravel()
        Z = Z[mask, mask, mask].astype('int64').ravel()

        if self.wandering_particles:
            # Need to get padded bbox around the border to catch
            # wandering particles.
            dmask = X < self.domain_buffer
            dmask += Y < self.domain_buffer
            dmask += Z < self.domain_buffer
            dmask += X >= self.domain_dims
            dmask += Y >= self.domain_dims
            dmask += Z >= self.domain_dims
            dinds = self.get_keyv([X[dmask], Y[dmask], Z[dmask]])
            dinds = dinds[dinds < self._max_key]
            dinds = dinds[self.indexdata['len'][dinds] > 0]
            #print 'Getting boundary layers for wanderers, cells: %i' % dinds.size

        # Correct For periodicity
        X[X < self.domain_buffer] += self.domain_active_dims
        Y[Y < self.domain_buffer] += self.domain_active_dims
        Z[Z < self.domain_buffer] += self.domain_active_dims
        X[X >= self.domain_buffer + self.domain_active_dims] -= self.domain_active_dims
        Y[Y >= self.domain_buffer + self.domain_active_dims] -= self.domain_active_dims
        Z[Z >= self.domain_buffer + self.domain_active_dims] -= self.domain_active_dims

        #print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()

        indices = self.get_keyv([X, Y, Z])
        # Only mask out if we are actually getting data rather than getting indices into
        # a space.
        if self.valid_indexdata:
            indices = indices[indices < self._max_key]
            #indices = indices[self.indexdata['len'][indices] > 0]
            # Faster for sparse lookups. Need better heuristic.
            new_indices = []
            for ind in indices:
                if self.indexdata['len'][ind] > 0:
                    new_indices.append(ind)
            indices = np.array(indices, dtype="int64")

        #indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
        # Here we sort the indices to batch consecutive reads together.
        if self.wandering_particles:
            indices = np.sort(np.append(indices, dinds))
        else:
            indices = np.sort(indices)
        return indices