示例#1
0
    def _find_compositor(self, dataset_key, query):
        """Find the compositor object for the given dataset_key."""
        # NOTE: This function can not find a modifier that performs
        # one or more modifications if it has modifiers see if we can find
        # the unmodified version first

        if dataset_key.is_modified():
            implicit_dependency_node = self._create_implicit_dependency_subtree(
                dataset_key, query)
            dataset_key = self._promote_query_to_modified_dataid(
                dataset_key, implicit_dependency_node.name)
            try:
                compositor = self.get_modifier(dataset_key)
            except KeyError:
                raise KeyError("Can't find anything called {}".format(
                    str(dataset_key)))
            compositor.attrs['prerequisites'] = [
                implicit_dependency_node
            ] + list(compositor.attrs['prerequisites'])
        else:
            try:
                compositor = self.get_compositor(dataset_key)
            except KeyError:
                raise KeyError("Can't find anything called {}".format(
                    str(dataset_key)))

        root = CompositorNode(compositor)
        composite_id = root.name

        prerequisite_filter = composite_id.create_filter_query_without_required_fields(
            dataset_key)

        # Get the prerequisites
        LOG.trace(
            "Looking for composite prerequisites for: {}".format(dataset_key))
        prereqs = [
            create_filtered_query(prereq, prerequisite_filter)
            if not isinstance(prereq, Node) else prereq
            for prereq in compositor.attrs['prerequisites']
        ]
        prereqs = self._create_required_subtrees(root, prereqs, query=query)
        root.add_required_nodes(prereqs)

        # Get the optionals
        LOG.trace(
            "Looking for optional prerequisites for: {}".format(dataset_key))
        optionals = [
            create_filtered_query(prereq, prerequisite_filter)
            if not isinstance(prereq, Node) else prereq
            for prereq in compositor.attrs['optional_prerequisites']
        ]
        optionals = self._create_optional_subtrees(root,
                                                   optionals,
                                                   query=query)
        root.add_optional_nodes(optionals)

        return root
示例#2
0
    def populate_with_keys(self, dataset_keys: set, query=None):
        """Populate the dependency tree.

        Args:
            dataset_keys (set): Strings, DataIDs, DataQuerys to find dependencies for
            query (DataQuery): Additional filter parameters. See
                              `satpy.readers.get_key` for more details.

        Returns:
            (Node, set): Root node of the dependency tree and a set of unknown datasets

        """
        unknown_datasets = list()
        known_nodes = list()
        for key in dataset_keys.copy():
            try:
                dsq = create_filtered_query(key, query)
                node = self._create_subtree_for_key(dsq, query)
            except MissingDependencies as unknown:
                unknown_datasets.append(unknown.missing_dependencies)
            else:
                known_nodes.append(node)
                self.add_child(self._root, node)

        for key in dataset_keys.copy():
            dataset_keys.discard(key)
        for node in known_nodes:
            dataset_keys.add(node.name)
        if unknown_datasets:
            raise MissingDependencies(unknown_datasets, "Unknown datasets:")
示例#3
0
def get_key(key,
            key_container,
            num_results=1,
            best=True,
            query=None,
            **kwargs):
    """Get the fully-specified key best matching the provided key.

    Only the best match is returned if `best` is `True` (default). See
    `get_best_dataset_key` for more information on how this is determined.

    `query` is provided as a convenience to filter by multiple parameters
    at once without having to filter by multiple `key` inputs.

    Args:
        key (DataID): DataID of query parameters to use for
                         searching. Any parameter that is `None`
                         is considered a wild card and any match is
                         accepted.
        key_container (dict or set): Container of DataID objects that
                                     uses hashing to quickly access items.
        num_results (int): Number of results to return. Use `0` for all
                           matching results. If `1` then the single matching
                           key is returned instead of a list of length 1.
                           (default: 1)
        best (bool): Sort results to get "best" result first
                     (default: True). See `get_best_dataset_key` for details.
        query (DataQuery): filter for the key which can contain for example:
            resolution (float, int, or list): Resolution of the dataset in
                                            dataset units (typically
                                            meters). This can also be a
                                            list of these numbers.
            calibration (str or list): Dataset calibration
                                    (ex.'reflectance'). This can also be a
                                    list of these strings.
            polarization (str or list): Dataset polarization
                                        (ex.'V'). This can also be a
                                        list of these strings.
            level (number or list): Dataset level (ex. 100). This can also be a
                                    list of these numbers.
            modifiers (list): Modifiers applied to the dataset. Unlike
                            resolution and calibration this is the exact
                            desired list of modifiers for one dataset, not
                            a list of possible modifiers.


    Returns (list or DataID): Matching key(s)

    Raises: KeyError if no matching results or if more than one result is
            found when `num_results` is `1`.

    """
    key = create_filtered_query(key, query)

    res = filter_keys_by_dataset_query(key, key_container)
    if not res:
        raise KeyError("No dataset matching '{}' found".format(str(key)))

    if best:
        res = get_best_dataset_key(key, res)

    if num_results == 1 and not res:
        raise KeyError("No dataset matching '{}' found".format(str(key)))
    elif num_results == 1 and len(res) != 1:
        raise TooManyResults("No unique dataset matching {}".format(str(key)))
    elif num_results == 1:
        return res[0]
    elif num_results == 0:
        return res
    else:
        return res[:num_results]
示例#4
0
文件: node.py 项目: duncanwp/satpy
    def _find_dependencies(self, dataset_key, query=None):
        """Find the dependencies for *dataset_key*.

        Args:
            dataset_key (str, float, DataID, DataQuery): Dataset identifier to locate
                                                         and find any additional
                                                         dependencies for.
            query (DataQuery): Additional filter parameters. See
                               `satpy.readers.get_key` for more details.

        """
        # Special case: No required dependencies for this composite
        if dataset_key is None:
            return self.empty_node, set()
        if query is None:
            dsq = dataset_key
        else:
            dsq = create_filtered_query(dataset_key, query)
        # 0 check if the *exact* dataset is already loaded
        try:
            node = self.getitem(dsq)
            LOG.trace("Found exact dataset already loaded: {}".format(
                node.name))
            return node, set()
        except KeyError:
            # exact dataset isn't loaded, let's load it below
            LOG.trace(
                "Exact dataset {} isn't loaded, will try reader...".format(
                    dataset_key))

        # 1 try to get *best* dataset from reader
        try:
            node = self._find_reader_dataset(dsq)
        except TooManyResults:
            LOG.warning("Too many possible datasets to load for {}".format(
                dataset_key))
            return None, set([dataset_key])
        if node is not None:
            LOG.trace(
                "Found reader provided dataset:\n\tRequested: {}\n\tFound: {}".
                format(dataset_key, node.name))
            return node, set()
        LOG.trace("Could not find dataset in reader: {}".format(dataset_key))

        # 2 try to find a composite by name (any version of it is good enough)
        try:
            # assume that there is no such thing as a "better" composite
            # version so if we find any DataIDs already loaded then
            # we want to use them
            node = self[dsq]
            LOG.trace(
                "Composite already loaded:\n\tRequested: {}\n\tFound: {}".
                format(dataset_key, node.name))
            return node, set()
        except KeyError:
            # composite hasn't been loaded yet, let's load it below
            LOG.trace("Composite hasn't been loaded yet, will load: {}".format(
                dataset_key))

        # 3 try to find a composite that matches
        try:
            node, unknowns = self._find_compositor(dsq)
            LOG.trace("Found composite:\n\tRequested: {}\n\tFound: {}".format(
                dataset_key, node and node.name))
        except KeyError:
            node = None
            unknowns = set([dataset_key])
            LOG.trace("Composite not found: {}".format(dataset_key))

        return node, unknowns