예제 #1
0
 def _create_subtree_from_compositors(self, dataset_key, query):
     try:
         node = self._find_compositor(dataset_key, query)
         LOG.trace("Found composite:\n\tRequested: {}\n\tFound: {}".format(dataset_key, node and node.name))
     except KeyError:
         LOG.trace("Composite not found: {}".format(dataset_key))
         raise MissingDependencies({dataset_key})
     return node
예제 #2
0
 def _get_subtree_for_existing_key(self, dsq):
     try:
         node = self.getitem(dsq)
         LOG.trace("Found exact dataset already loaded: {}".format(node.name))
         return node
     except KeyError:
         LOG.trace("Exact dataset {} isn't loaded, will try reader...".format(dsq))
         raise MissingDependencies({dsq})
예제 #3
0
    def _find_compositor(self, dataset_key, query):
        """Find the compositor object for the given dataset_key."""
        # NOTE: This function can not find a modifier that performs
        # one or more modifications if it has modifiers see if we can find
        # the unmodified version first

        if dataset_key.is_modified():
            implicit_dependency_node = self._create_implicit_dependency_subtree(
                dataset_key, query)
            dataset_key = self._promote_query_to_modified_dataid(
                dataset_key, implicit_dependency_node.name)
            try:
                compositor = self.get_modifier(dataset_key)
            except KeyError:
                raise KeyError("Can't find anything called {}".format(
                    str(dataset_key)))
            compositor.attrs['prerequisites'] = [
                implicit_dependency_node
            ] + list(compositor.attrs['prerequisites'])
        else:
            try:
                compositor = self.get_compositor(dataset_key)
            except KeyError:
                raise KeyError("Can't find anything called {}".format(
                    str(dataset_key)))

        root = CompositorNode(compositor)
        composite_id = root.name

        prerequisite_filter = composite_id.create_filter_query_without_required_fields(
            dataset_key)

        # Get the prerequisites
        LOG.trace(
            "Looking for composite prerequisites for: {}".format(dataset_key))
        prereqs = [
            create_filtered_query(prereq, prerequisite_filter)
            if not isinstance(prereq, Node) else prereq
            for prereq in compositor.attrs['prerequisites']
        ]
        prereqs = self._create_required_subtrees(root, prereqs, query=query)
        root.add_required_nodes(prereqs)

        # Get the optionals
        LOG.trace(
            "Looking for optional prerequisites for: {}".format(dataset_key))
        optionals = [
            create_filtered_query(prereq, prerequisite_filter)
            if not isinstance(prereq, Node) else prereq
            for prereq in compositor.attrs['optional_prerequisites']
        ]
        optionals = self._create_optional_subtrees(root,
                                                   optionals,
                                                   query=query)
        root.add_optional_nodes(optionals)

        return root
예제 #4
0
 def _create_subtree_from_reader(self, dataset_key, query):
     try:
         node = self._find_reader_node(dataset_key, query)
     except MissingDependencies:
         LOG.trace("Could not find dataset in reader: {}".format(dataset_key))
         raise
     else:
         LOG.trace("Found reader provided dataset:\n\tRequested: {}\n\tFound: {}".format(dataset_key, node.name))
         return node
예제 #5
0
 def _get_subtree_for_existing_name(self, dsq):
     try:
         # assume that there is no such thing as a "better" composite
         # version so if we find any DataIDs already loaded then
         # we want to use them
         node = self[dsq]
         LOG.trace("Composite already loaded:\n\tRequested: {}\n\tFound: {}".format(dsq, node.name))
         return node
     except KeyError:
         # composite hasn't been loaded yet, let's load it below
         LOG.trace("Composite hasn't been loaded yet, will load: {}".format(dsq))
         raise MissingDependencies({dsq})
예제 #6
0
 def _find_matching_ids_in_readers(self, dataset_key):
     matching_ids = {}
     for reader_name, reader_instance in self.readers.items():
         matching_ids[reader_name] = []
         try:
             ds_ids = reader_instance.get_dataset_key(dataset_key, available_only=self._available_only,
                                                      num_results=0, best=False)
         except KeyError:
             LOG.trace("Can't find dataset %s in reader %s", str(dataset_key), reader_name)
             continue
         matching_ids[reader_name].extend(ds_ids)
     return matching_ids
예제 #7
0
    def _create_subtree_for_key(self, dataset_key, query=None):
        """Find the dependencies for *dataset_key*.

        Args:
            dataset_key (str, float, DataID, DataQuery): Dataset identifier to locate
                                                         and find any additional
                                                         dependencies for.
            query (DataQuery): Additional filter parameters. See
                               `satpy.readers.get_key` for more details.

        """
        # 0 check if the *exact* dataset is already loaded
        try:
            node = self._get_subtree_for_existing_key(dataset_key)
        except MissingDependencies:
            # exact dataset isn't loaded, let's load it below
            pass
        else:
            return node

        # 1 try to get *best* dataset from reader
        try:
            node = self._create_subtree_from_reader(dataset_key, query)
        except TooManyResults:
            LOG.warning("Too many possible datasets to load for {}".format(
                dataset_key))
            raise MissingDependencies({dataset_key})
        except MissingDependencies:
            pass
        else:
            return node

        # 2 try to find a composite by name (any version of it is good enough)
        try:
            node = self._get_subtree_for_existing_name(dataset_key)
        except MissingDependencies:
            pass
        else:
            return node

        # 3 try to find a composite that matches
        try:
            node = self._create_subtree_from_compositors(dataset_key, query)
        except MissingDependencies:
            raise
        else:
            return node
예제 #8
0
    def _create_optional_subtrees(self, parent, prereqs, query=None):
        """Determine optional prerequisite Nodes for a composite.

        Args:
            parent (Node): Compositor node to add these prerequisites under
            prereqs (sequence): Strings (names), floats (wavelengths), or
                                DataQuerys to analyze.

        """
        prereq_nodes, unknown_datasets = self._create_prerequisite_subtrees(parent, prereqs, query)

        for prereq, unknowns in unknown_datasets.items():
            u_str = ", ".join([str(x) for x in unknowns])
            LOG.debug('Skipping optional %s: Unknown dataset %s',
                      str(prereq), u_str)

        return prereq_nodes
예제 #9
0
 def _get_unique_matching_id(self, matching_ids, dataset_key, query):
     """Get unique matching id from `matching_ids`, for a given `dataset_key` and some optional `query`."""
     all_ids = sum(matching_ids.values(), [])
     if len(all_ids) == 0:
         raise MissingDependencies({dataset_key})
     elif len(all_ids) == 1:
         result = all_ids[0]
     else:
         sorted_ids, distances = dataset_key.sort_dataids_with_preference(all_ids, query)
         try:
             result = self._get_unique_id_from_sorted_ids(sorted_ids, distances)
         except TooManyResults:
             LOG.trace("Too many datasets matching key {} in readers {}".format(dataset_key, matching_ids.keys()))
             raise TooManyResults("Too many keys matching: {}".format(dataset_key))
         except MissingDependencies:
             raise MissingDependencies({dataset_key})
     return result