Exemplo n.º 1
0
    def _find_similar_hypothesis(
        self,
        new_meaning: PerceptionGraph,
        candidates: Iterable["AbstractCrossSituationalLearner.Hypothesis"],
    ) -> Optional[Tuple[PartialMatchRatio,
                        "AbstractCrossSituationalLearner.Hypothesis"]]:
        """
        Finds the hypothesis in candidates most similar to new_meaning and returns it
        together with the match ratio.

        Returns None if no candidate can be found that is sufficiently similar to new_meaning. A candidate is
        sufficiently similar if and only if its match ratio with new_meaning is at least
        _graph_match_confirmation_threshold.
        """
        candidates_iter = iter(candidates)
        match = None
        while match is None:
            try:
                existing_hypothesis = next(candidates_iter)
            except StopIteration:
                return None

            try:
                match = compute_match_ratio(
                    existing_hypothesis.pattern_template,
                    new_meaning,
                    ontology=self._ontology,
                )
            except RuntimeError:
                # Occurs when no matches of the pattern are found in the graph. This seems to
                # to indicate some full matches and some matches with no intersection at all
                pass

        for candidate in candidates:
            try:
                new_match = compute_match_ratio(candidate.pattern_template,
                                                new_meaning,
                                                ontology=self._ontology)
            except RuntimeError:
                # Occurs when no matches of the pattern are found in the graph. This seems to
                # to indicate some full matches and some matches with no intersection at all
                new_match = None
            if new_match and new_match.match_ratio > match.match_ratio:
                match = new_match
                existing_hypothesis = candidate
        if (match.match_ratio >= self._graph_match_confirmation_threshold
                and match.matching_subgraph and existing_hypothesis):
            return match, existing_hypothesis
        else:
            return None
Exemplo n.º 2
0
 def check_and_remove_meaning(
     other_concept: Concept,
     hypothesis: "AbstractCrossSituationalLearner.Hypothesis",
     *,
     ontology: Ontology,
 ) -> None:
     match = compute_match_ratio(
         hypothesis.pattern_template,
         language_perception_semantic_alignment.
         perception_semantic_alignment.perception_graph,
         ontology=ontology,
     )
     if match and match.matching_subgraph:
         for meaning in meanings_from_perception:
             if match.matching_subgraph.check_isomorphism(
                     meanings_to_pattern_template[meaning].graph_pattern
             ):
                 concepts_to_remove.add(other_concept)
Exemplo n.º 3
0
    def _match_template(
        self,
        *,
        concept: Concept,
        pattern: PerceptionGraphTemplate,
        perception_graph: PerceptionGraph,
    ) -> Iterable[Tuple[PerceptionGraphPatternMatch, SemanticNode]]:
        """
        Try to match our model of the semantics to the perception graph
        """
        partial_match = compute_match_ratio(
            pattern,
            perception_graph,
            ontology=self._ontology,
            graph_logger=self._graph_logger,
            debug_callback=self._debug_callback,
        )

        if (partial_match.match_ratio >=
                self._graph_match_confirmation_threshold
                and partial_match.matching_subgraph):
            # if there is a match, which is above our minimum match ratio
            # Use that pattern to try and find a match in the scene
            # There should be one
            # TODO: This currently means we match to the graph multiple times. Reduce this?
            matcher = partial_match.matching_subgraph.matcher(
                perception_graph,
                match_mode=MatchMode.NON_OBJECT,
                debug_callback=self._debug_callback,
            )
            found_match = False
            for match in matcher.matches(use_lookahead_pruning=True):
                found_match = True
                semantic_node_for_match = pattern_match_to_semantic_node(
                    concept=concept, pattern=pattern, match=match)
                yield match, semantic_node_for_match
            # We raise an error if we find a partial match but don't manage to match it to the scene
            if not found_match:
                raise RuntimeError(
                    f"Partial Match found for {concept} below match ratio however pattern "
                    f"subgraph was unable to match to perception graph.\n"
                    f"Partial Match: {partial_match}\n"
                    f"Perception Graph: {perception_graph}")
Exemplo n.º 4
0
    def _learning_step(
        self,
        language_perception_semantic_alignment:
        LanguagePerceptionSemanticAlignment,
        bound_surface_template: SurfaceTemplateBoundToSemanticNodes,
    ) -> None:
        """
        Try to learn the semantics of a `SurfaceTemplate` given the assumption
        that its argument slots (if any) are bound to objects according to
        *bound_surface_template*.

        For example, "try to learn the meaning of 'red' given the language 'red car'
        and an alignment of 'car' to particular perceptions in the perception graph.
        """
        concept = None
        # We have seen this template before and already have a concept for it
        # So we attempt to verify our already picked concept
        if bound_surface_template.surface_template in self._surface_template_to_concept:
            # We don't directly associate surface templates with perceptions.
            # Instead we mediate the relationship with "concept" objects.
            # These don't matter now, but the split might be helpful in the future
            # when we might have multiple ways of expressing the same idea.
            concept = self._surface_template_to_concept[
                bound_surface_template.surface_template]

            # What is our current hypotheses about what this template might mean?
            pattern_hypotheses = self._concept_to_hypotheses[concept]

            # We have a hypothesis, now we check if the current scene can verify it
            # So we try to match our hypothesis to the scene accepting a partial match
            partial_match = compute_match_ratio(
                first(pattern_hypotheses),
                language_perception_semantic_alignment.
                perception_semantic_alignment.perception_graph,
                ontology=self._ontology,
                graph_logger=self._graph_logger,
                debug_callback=self._debug_callback,
            )

            # Now we want to see if our hypothesis is confirmed. We do this by seeing if the
            # *match_ratio* is above the required value
            if partial_match.match_ratio >= self._minimum_match_ratio:
                logging.debug(
                    f"Hypothesis for {concept} is confirmed with ratio: {partial_match.match_ratio}"
                )
                # We've verified our hypothesis so we don't need to learn anything from this scene.
                # Note - We currently don't do any generalizing of our hypothesis space
                # Which might be needed for anything other than objects
                # Or the `minimum_match_ratio` value should be evaluated for those cases
                # As the values from the pursuit paper are focused around objects
                return

        # If we either haven't seen this semantic template before or our hypothesis
        # Wasn't confirmed We initialize a new concept or replace the current one.
        if not concept:
            concept = self._new_concept(debug_string=bound_surface_template.
                                        surface_template.to_short_string())
        self._surface_template_to_concept[
            bound_surface_template.surface_template] = concept
        self._concept_to_surface_template[
            concept] = bound_surface_template.surface_template

        self._concept_to_hypotheses[concept] = immutableset(
            self._hypotheses_from_perception(
                language_perception_semantic_alignment,
                bound_surface_template))