示例#1
0
 def _initialize_oct_handler(self):
     """
     Just count the number of octs per domain and
     allocate the requisite memory in the oct tree
     """
     nv = len(self.fluid_field_list)
     self.oct_handler = ARTOctreeContainer(
         self.dataset.domain_dimensions / 2,  # dd is # of root cells
         self.dataset.domain_left_edge,
         self.dataset.domain_right_edge,
         1)
     # The 1 here refers to domain_id == 1 always for ARTIO.
     self.domains = [ARTDomainFile(self.dataset, nv, self.oct_handler, 1)]
     self.octs_per_domain = [dom.level_count.sum() for dom in self.domains]
     self.total_octs = sum(self.octs_per_domain)
     mylog.debug("Allocating %s octs", self.total_octs)
     self.oct_handler.allocate_domains(self.octs_per_domain)
     domain = self.domains[0]
     domain._read_amr_root(self.oct_handler)
     domain._read_amr_level(self.oct_handler)
     self.oct_handler.finalize()
 def _initialize_oct_handler(self):
     """
     Just count the number of octs per domain and
     allocate the requisite memory in the oct tree
     """
     nv = len(self.fluid_field_list)
     self.oct_handler = ARTOctreeContainer(
         self.dataset.domain_dimensions/2,  # dd is # of root cells
         self.dataset.domain_left_edge,
         self.dataset.domain_right_edge,
         1)
     # The 1 here refers to domain_id == 1 always for ARTIO.
     self.domains = [ARTDomainFile(self.dataset, nv, 
                                   self.oct_handler, 1)]
     self.octs_per_domain = [dom.level_count.sum() for dom in
     self.domains]
     
     self.total_octs = sum(self.octs_per_domain)
     mylog.debug("Allocating %s octs", self.total_octs)
     self.oct_handler.allocate_domains(self.octs_per_domain)
     domain = self.domains[0]
     domain._read_amr_root(self.oct_handler)
     domain._read_amr_level(self.oct_handler)
     self.oct_handler.finalize()
class ARTIndex(OctreeIndex):
    def __init__(self, ds, dataset_type="art"):
        self.fluid_field_list = fluid_fields
        self.dataset_type = dataset_type
        self.dataset = weakref.proxy(ds)
        self.index_filename = self.dataset.parameter_filename
        self.directory = os.path.dirname(self.index_filename)
        self.max_level = ds.max_level
        self.float_type = np.float64
        super(ARTIndex, self).__init__(ds, dataset_type)

    def get_smallest_dx(self):
        """
        Returns (in code units) the smallest cell size in the simulation.
        """
        # Overloaded
        ds = self.dataset
        return (1.0 / ds.domain_dimensions.astype('f8') /
                (2**self.max_level)).min()

    def _initialize_oct_handler(self):
        """
        Just count the number of octs per domain and
        allocate the requisite memory in the oct tree
        """
        nv = len(self.fluid_field_list)
        self.oct_handler = ARTOctreeContainer(
            self.dataset.domain_dimensions / 2,  # dd is # of root cells
            self.dataset.domain_left_edge,
            self.dataset.domain_right_edge,
            1)
        # The 1 here refers to domain_id == 1 always for ARTIO.
        self.domains = [ARTDomainFile(self.dataset, nv, self.oct_handler, 1)]
        self.octs_per_domain = [dom.level_count.sum() for dom in self.domains]

        self.total_octs = sum(self.octs_per_domain)
        mylog.debug("Allocating %s octs", self.total_octs)
        self.oct_handler.allocate_domains(self.octs_per_domain)
        domain = self.domains[0]
        domain._read_amr_root(self.oct_handler)
        domain._read_amr_level(self.oct_handler)
        self.oct_handler.finalize()

    def _detect_output_fields(self):
        self.particle_field_list = [f for f in particle_fields]
        self.field_list = [("art", f) for f in fluid_fields]
        # now generate all of the possible particle fields
        for ptype in self.dataset.particle_types_raw:
            for pfield in self.particle_field_list:
                pfn = (ptype, pfield)
                self.field_list.append(pfn)

    def _identify_base_chunk(self, dobj):
        """
        Take the passed in data source dobj, and use its embedded selector
        to calculate the domain mask, build the reduced domain
        subsets and oct counts. Attach this information to dobj.
        """
        if getattr(dobj, "_chunk_info", None) is None:
            # Get all octs within this oct handler
            domains = [
                dom for dom in self.domains if dom.included(dobj.selector)
            ]
            base_region = getattr(dobj, "base_region", dobj)
            if len(domains) > 1:
                mylog.debug("Identified %s intersecting domains", len(domains))
            subsets = [
                ARTDomainSubset(base_region, domain, self.dataset)
                for domain in domains
            ]
            dobj._chunk_info = subsets
        dobj._current_chunk = list(self._chunk_all(dobj))[0]

    def _chunk_all(self, dobj):
        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
        # We pass the chunk both the current chunk and list of chunks,
        # as well as the referring data source
        yield YTDataChunk(dobj, "all", oobjs, None)

    def _chunk_spatial(self, dobj, ngz, sort=None, preload_fields=None):
        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
        for i, og in enumerate(sobjs):
            if ngz > 0:
                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
            else:
                g = og
            yield YTDataChunk(dobj, "spatial", [g], None)

    def _chunk_io(self, dobj, cache=True, local_only=False):
        """
        Since subsets are calculated per domain,
        i.e. per file, yield each domain at a time to
        organize by IO. We will eventually chunk out NMSU ART
        to be level-by-level.
        """
        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
        for subset in oobjs:
            yield YTDataChunk(dobj, "io", [subset], None, cache=cache)
class ARTIndex(OctreeIndex):
    def __init__(self, ds, dataset_type="art"):
        self.fluid_field_list = fluid_fields
        self.dataset_type = dataset_type
        self.dataset = weakref.proxy(ds)
        self.index_filename = self.dataset.parameter_filename
        self.directory = os.path.dirname(self.index_filename)
        self.max_level = ds.max_level
        self.float_type = np.float64
        super(ARTIndex, self).__init__(ds, dataset_type)

    def get_smallest_dx(self):
        """
        Returns (in code units) the smallest cell size in the simulation.
        """
        # Overloaded
        ds = self.dataset
        return (1.0/ds.domain_dimensions.astype('f8') /
                (2**self.max_level)).min()

    def _initialize_oct_handler(self):
        """
        Just count the number of octs per domain and
        allocate the requisite memory in the oct tree
        """
        nv = len(self.fluid_field_list)
        self.oct_handler = ARTOctreeContainer(
            self.dataset.domain_dimensions/2,  # dd is # of root cells
            self.dataset.domain_left_edge,
            self.dataset.domain_right_edge,
            1)
        # The 1 here refers to domain_id == 1 always for ARTIO.
        self.domains = [ARTDomainFile(self.dataset, nv, 
                                      self.oct_handler, 1)]
        self.octs_per_domain = [dom.level_count.sum() for dom in
        self.domains]
        
        self.total_octs = sum(self.octs_per_domain)
        mylog.debug("Allocating %s octs", self.total_octs)
        self.oct_handler.allocate_domains(self.octs_per_domain)
        domain = self.domains[0]
        domain._read_amr_root(self.oct_handler)
        domain._read_amr_level(self.oct_handler)
        self.oct_handler.finalize()

    def _detect_output_fields(self):
        self.particle_field_list = [f for f in particle_fields]
        self.field_list = [("art", f) for f in fluid_fields]
        # now generate all of the possible particle fields
        for ptype in self.dataset.particle_types_raw:
            for pfield in self.particle_field_list:
                pfn = (ptype, pfield)
                self.field_list.append(pfn)

    def _identify_base_chunk(self, dobj):
        """
        Take the passed in data source dobj, and use its embedded selector
        to calculate the domain mask, build the reduced domain
        subsets and oct counts. Attach this information to dobj.
        """
        if getattr(dobj, "_chunk_info", None) is None:
            # Get all octs within this oct handler
            domains = [dom for dom in self.domains if
                       dom.included(dobj.selector)]
            base_region = getattr(dobj, "base_region", dobj)
            if len(domains) > 1:
                mylog.debug("Identified %s intersecting domains", len(domains))
            subsets = [ARTDomainSubset(base_region, domain, self.dataset)
                       for domain in domains]
            dobj._chunk_info = subsets
        dobj._current_chunk = list(self._chunk_all(dobj))[0]

    def _chunk_all(self, dobj):
        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
        # We pass the chunk both the current chunk and list of chunks,
        # as well as the referring data source
        yield YTDataChunk(dobj, "all", oobjs, None)

    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
        for i,og in enumerate(sobjs):
            if ngz > 0:
                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
            else:
                g = og
            yield YTDataChunk(dobj, "spatial", [g], None)

    def _chunk_io(self, dobj, cache = True, local_only = False):
        """
        Since subsets are calculated per domain,
        i.e. per file, yield each domain at a time to
        organize by IO. We will eventually chunk out NMSU ART
        to be level-by-level.
        """
        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
        for subset in oobjs:
            yield YTDataChunk(dobj, "io", [subset], None,
                              cache = cache)