Exemple #1
0
    def describe(
        self, perception: PerceptualRepresentation[DevelopmentalPrimitivePerceptionFrame]
    ) -> Mapping[LinguisticDescription, float]:

        perception_graph = self._extract_perception_graph(perception)

        cur_description_state = PerceptionSemanticAlignment.create_unaligned(
            perception_graph
        )

        for sub_learner in [
            self.object_learner,
            self.attribute_learner,
            self.relation_learner,
        ]:
            if sub_learner:
                cur_description_state = sub_learner.enrich_during_description(
                    cur_description_state
                )

        if perception.is_dynamic() and self.action_learner:
            cur_description_state = self.action_learner.enrich_during_description(
                cur_description_state
            )

            if self.functional_learner:
                cur_description_state = self.functional_learner.enrich_during_description(
                    cur_description_state
                )
        return self._linguistic_descriptions_from_semantics(cur_description_state)
Exemple #2
0
 def _preprocess_scene(
     self, perception_semantic_alignment: PerceptionSemanticAlignment
 ) -> PerceptionSemanticAlignment:
     # Avoid accidentally identifying a word with the learner itself.
     return perception_semantic_alignment.copy_with_updated_graph_and_added_nodes(
         new_graph=graph_without_learner(
             perception_semantic_alignment.perception_graph),
         new_nodes=[],
     )
Exemple #3
0
 def match_objects_old(
     self, perception_graph: PerceptionGraph
 ) -> PerceptionGraphFromObjectRecognizer:
     new_style_input = PerceptionSemanticAlignment(
         perception_graph=perception_graph, semantic_nodes=[])
     new_style_output = self.match_objects(new_style_input)
     return PerceptionGraphFromObjectRecognizer(
         perception_graph=new_style_output[0].perception_graph,
         description_to_matched_object_node=new_style_output[1],
     )
Exemple #4
0
 def match_objects_with_language_old(
     self, language_aligned_perception: LanguageAlignedPerception
 ) -> LanguageAlignedPerception:
     if language_aligned_perception.node_to_language_span:
         raise RuntimeError(
             "Don't know how to handle a non-empty node-to-language-span")
     new_style_input = LanguagePerceptionSemanticAlignment(
         language_concept_alignment=LanguageConceptAlignment(
             language_aligned_perception.language,
             node_to_language_span=[]),
         perception_semantic_alignment=PerceptionSemanticAlignment(
             perception_graph=language_aligned_perception.perception_graph,
             semantic_nodes=[],
         ),
     )
     new_style_output = self.match_objects_with_language(new_style_input)
     return LanguageAlignedPerception(
         language=new_style_output.language_concept_alignment.language,
         perception_graph=new_style_output.perception_semantic_alignment.
         perception_graph,
         node_to_language_span=new_style_output.language_concept_alignment.
         node_to_language_span,
     )
Exemple #5
0
 def _preprocess_scene(
     self, perception_semantic_alignment: PerceptionSemanticAlignment
 ) -> PerceptionSemanticAlignment:
     nodes = [
         s for s in perception_semantic_alignment.semantic_nodes
         if isinstance(s, ObjectSemanticNode)
     ]
     counts = collections.Counter([s.concept for s in nodes])
     digraph = perception_semantic_alignment.perception_graph.copy_as_digraph(
     )
     for node in nodes:
         count = counts[node.concept]
         if count > 1:
             if count == 2:
                 count_node = TWO
             else:
                 count_node = MANY
             digraph.add_node(count_node)
             digraph.add_edge(node, count_node, label=HAS_COUNT)
     graph_with_counts = PerceptionGraph(
         digraph,
         dynamic=perception_semantic_alignment.perception_graph.dynamic)
     return PerceptionSemanticAlignment(
         graph_with_counts, perception_semantic_alignment.semantic_nodes)
Exemple #6
0
    def _enrich_common(
        self, perception_semantic_alignment: PerceptionSemanticAlignment
    ) -> Tuple[PerceptionSemanticAlignment, AbstractSet[SemanticNode]]:
        """
        Shared code between `enrich_during_learning` and `enrich_during_description`.
        """
        preprocessing_result = self._preprocess_scene(perception_semantic_alignment)

        preprocessed_perception_graph = preprocessing_result.perception_graph

        # This accumulates our output.
        match_to_score: List[Tuple[SemanticNode, float]] = []

        # In the case of objects only, we alter the perception graph once they
        # are recognized by replacing the matched portion of the graph with the
        # ObjectSemanticNodes.  We gather them as we match and do the replacement below.
        matched_objects: List[Tuple[SemanticNode, PerceptionGraphPatternMatch]] = []

        # We pull this out into a function because we do matching in two passes:
        # first against templates whose meanings we are sure of (=have lexicalized)
        # and then, if no match has been found, against those we are still learning.
        def match_template(
            *, concept: Concept, pattern: PerceptionGraphTemplate, score: float
        ) -> None:
            # try to see if (our model of) its semantics is present in the situation.
            matcher = pattern.graph_pattern.matcher(
                preprocessed_perception_graph,
                match_mode=MatchMode.NON_OBJECT,
                # debug_callback=self._debug_callback,
            )
            for match in matcher.matches(use_lookahead_pruning=True):
                # if there is a match, use that match to describe the situation.
                semantic_node_for_match = pattern_match_to_semantic_node(
                    concept=concept, pattern=pattern, match=match
                )
                match_to_score.append((semantic_node_for_match, score))
                # We want to replace object matches with their semantic nodes,
                # but we don't want to alter the graph while matching it,
                # so we accumulate these to replace later.
                if isinstance(concept, ObjectConcept):
                    matched_objects.append((semantic_node_for_match, match))
                # A template only has to match once; we don't care about finding additional matches.
                return

        # For each template whose semantics we are certain of (=have been added to the lexicon)
        for (concept, graph_pattern, score) in self._primary_templates():
            check_state(isinstance(graph_pattern, PerceptionGraphTemplate))
            if (
                preprocessed_perception_graph.dynamic
                == graph_pattern.graph_pattern.dynamic
            ):
                match_template(concept=concept, pattern=graph_pattern, score=score)
            else:
                logging.debug(
                    f"Unable to try and match {concept} to {preprocessed_perception_graph} "
                    f"because both patterns must be static or dynamic"
                )
        if not match_to_score:
            # Try to match against patterns being learned
            # only if no lexicalized pattern was matched.
            for (concept, graph_pattern, score) in self._fallback_templates():
                # we may have multiple pattern hypotheses for a single concept, in which case we only want to identify the concept once
                if not any(m[0].concept == concept for m in match_to_score):
                    match_template(concept=concept, pattern=graph_pattern, score=score)

        perception_graph_after_matching = perception_semantic_alignment.perception_graph

        # Replace any objects found
        def by_pattern_complexity(pair):
            _, pattern_match = pair
            return len(pattern_match.matched_pattern)

        matched_objects.sort(key=by_pattern_complexity, reverse=True)
        already_replaced: Set[ObjectPerception] = set()
        new_nodes: List[SemanticNode] = []
        for (matched_object_node, pattern_match) in matched_objects:
            root: ObjectPerception = _get_root_object_perception(
                pattern_match.matched_sub_graph._graph,  # pylint:disable=protected-access
                immutableset(
                    pattern_match.matched_sub_graph._graph.nodes,  # pylint:disable=protected-access
                    disable_order_check=True,
                ),
            )
            if root not in already_replaced:
                perception_graph_after_matching = replace_match_root_with_object_semantic_node(
                    object_semantic_node=cast(ObjectSemanticNode, matched_object_node),
                    current_perception=perception_graph_after_matching,
                    pattern_match=pattern_match,
                )
                already_replaced.add(root)
                new_nodes.append(matched_object_node)
            else:
                logging.info(
                    f"Matched pattern for {matched_object_node} "
                    f"but root object {root} already replaced."
                )
        if matched_objects:
            immutable_new_nodes = immutableset(new_nodes)
        else:
            immutable_new_nodes = immutableset(node for (node, _) in match_to_score)

        (
            perception_graph_after_post_processing,
            nodes_after_post_processing,
        ) = self._enrich_post_process(
            perception_graph_after_matching, immutable_new_nodes
        )

        return (
            perception_semantic_alignment.copy_with_updated_graph_and_added_nodes(
                new_graph=perception_graph_after_post_processing,
                new_nodes=nodes_after_post_processing,
            ),
            nodes_after_post_processing,
        )
Exemple #7
0
    def observe(
        self,
        learning_example: LearningExample[
            DevelopmentalPrimitivePerceptionFrame, LinguisticDescription
        ],
        observation_num: int = -1,
    ) -> None:
        if observation_num >= 0:
            logging.info(
                "Observation %s: %s",
                observation_num,
                learning_example.linguistic_description.as_token_string(),
            )
        else:
            logging.info(
                "Observation %s: %s",
                self._observation_num,
                learning_example.linguistic_description.as_token_string(),
            )

        self._observation_num += 1

        # We need to track the alignment between perceived objects
        # and portions of the input language, so internally we operate over
        # LanguageAlignedPerceptions.
        current_learner_state = LanguagePerceptionSemanticAlignment(
            language_concept_alignment=LanguageConceptAlignment.create_unaligned(
                language=learning_example.linguistic_description
            ),
            perception_semantic_alignment=PerceptionSemanticAlignment(
                perception_graph=self._extract_perception_graph(
                    learning_example.perception
                ),
                semantic_nodes=[],
            ),
        )

        # We iteratively let each "layer" of semantic analysis attempt
        # to learn from the perception,
        # and then to annotate the perception with any semantic alignments it knows.
        for sub_learner in [
            self.object_learner,
            self.attribute_learner,
            self.relation_learner,
        ]:
            if sub_learner:
                # Currently we do not attempt to learn static things from dynamic situations
                # because the static learners do not know how to deal with the temporal
                # perception graph edge wrappers.
                # See https://github.com/isi-vista/adam/issues/792 .
                if not learning_example.perception.is_dynamic():
                    sub_learner.learn_from(
                        current_learner_state, observation_num=observation_num
                    )
                current_learner_state = sub_learner.enrich_during_learning(
                    current_learner_state
                )
        if learning_example.perception.is_dynamic() and self.action_learner:
            self.action_learner.learn_from(current_learner_state)
            current_learner_state = self.action_learner.enrich_during_learning(
                current_learner_state
            )

            if self.functional_learner:
                self.functional_learner.learn_from(
                    current_learner_state, observation_num=observation_num
                )
Exemple #8
0
    def observe(
        self,
        learning_example: LearningExample[
            DevelopmentalPrimitivePerceptionFrame, LinguisticDescription],
        offset: int = 0,
    ) -> None:

        logging.info(
            "Observation %s: %s",
            self._observation_num + offset,
            learning_example.linguistic_description.as_token_string(),
        )

        self._observation_num += 1

        # We need to track the alignment between perceived objects
        # and portions of the input language, so internally we operate over
        # LanguageAlignedPerceptions.
        current_learner_state = LanguagePerceptionSemanticAlignment(
            language_concept_alignment=LanguageConceptAlignment.
            create_unaligned(language=learning_example.linguistic_description),
            perception_semantic_alignment=PerceptionSemanticAlignment(
                perception_graph=self._extract_perception_graph(
                    learning_example.perception),
                semantic_nodes=[],
            ),
        )

        # We iteratively let each "layer" of semantic analysis attempt
        # to learn from the perception,
        # and then to annotate the perception with any semantic alignments it knows.
        for sub_learner in [
                self.object_learner,
                self.attribute_learner,
                self.plural_learner,
                self.relation_learner,
        ]:
            if sub_learner:
                # Currently we do not attempt to learn static things from dynamic situations
                # because the static learners do not know how to deal with the temporal
                # perception graph edge wrappers.
                # See https://github.com/isi-vista/adam/issues/792 .
                if not learning_example.perception.is_dynamic():
                    # For more details on the try/excepts below
                    # See: https://github.com/isi-vista/adam/issues/1008
                    try:
                        sub_learner.learn_from(current_learner_state,
                                               offset=offset)
                    except (RuntimeError, KeyError) as e:
                        logging.warning(
                            f"Sub_learner ({sub_learner}) was unable to learn from instance number {self._observation_num}.\n"
                            f"Instance: {current_learner_state}.\n"
                            f"Full Error Information: {e}")
                        if not self._suppress_error:
                            raise e
                current_learner_state = sub_learner.enrich_during_learning(
                    current_learner_state)
                # Check definiteness after recognizing objects
                if sub_learner == self.object_learner:
                    self.learn_definiteness_markers(current_learner_state)

        if learning_example.perception.is_dynamic() and self.action_learner:
            try:
                self.action_learner.learn_from(current_learner_state)
            except (RuntimeError, KeyError) as e:
                logging.warning(
                    f"Action Learner ({self.action_learner}) was unable to learn from instance number {self._observation_num}.\n"
                    f"Instance: {current_learner_state}.\n"
                    f"Full Error Information: {e}")
                if not self._suppress_error:
                    raise e

            current_learner_state = self.action_learner.enrich_during_learning(
                current_learner_state)

            if self.functional_learner:
                self.functional_learner.learn_from(current_learner_state,
                                                   offset=offset)

        # Engage generics learner if the utterance is indefinite
        if self.generics_learner and not self.is_definite(
                current_learner_state):
            # Lack of definiteness could be marking a generic statement
            # Check if the known descriptions match the utterance
            descs = self._linguistic_descriptions_from_semantics(
                current_learner_state.perception_semantic_alignment)
            # If the statement isn't a recognized sentence, run learner
            if not learning_example.linguistic_description.as_token_sequence(
            ) in [desc.as_token_sequence() for desc in descs]:
                # Pass plural markers to generics before learning from a statement
                if isinstance(self.plural_learner, SubsetPluralLearnerNew):
                    self.generics_learner.plural_markers = list(  # pylint: disable=assigning-non-slot
                        self.plural_learner.potential_plural_markers.keys())
                self.generics_learner.learn_from(current_learner_state)

        # Update concept semantics
        self.update_concept_semantics(current_learner_state)