Exemple #1
0
 def _hypothesis_from_perception(
     self, preprocessed_input: LanguageAlignedPerception
 ) -> PerceptionGraphTemplate:
     return PerceptionGraphTemplate.from_graph(
         preprocessed_input.perception_graph,
         template_variable_to_matched_object_node=immutabledict(
             zip(STANDARD_SLOT_VARIABLES,
                 preprocessed_input.aligned_nodes)),
     )
Exemple #2
0
 def _hypotheses_from_perception(
     self,
     learning_state: LanguagePerceptionSemanticAlignment,
     bound_surface_template: SurfaceTemplateBoundToSemanticNodes,
 ) -> AbstractSet[PerceptionGraphTemplate]:
     # For the subset learner, our hypothesis is the entire graph.
     return immutableset([
         PerceptionGraphTemplate.from_graph(
             learning_state.perception_semantic_alignment.perception_graph,
             template_variable_to_matched_object_node=bound_surface_template
             .slot_to_semantic_node,
         )
     ])
Exemple #3
0
 def _hypotheses_from_perception(
     self,
     learning_state: LanguagePerceptionSemanticAlignment,
     bound_surface_template: SurfaceTemplateBoundToSemanticNodes,
 ) -> AbstractSet[PerceptionGraphTemplate]:
     # This makes a hypothesis for the whole graph, with the wildcard slot
     # at each recognized object.
     return immutableset([
         PerceptionGraphTemplate.from_graph(
             learning_state.perception_semantic_alignment.perception_graph,
             template_variable_to_matched_object_node=bound_surface_template
             .slot_to_semantic_node,
         )
     ])
Exemple #4
0
 def _hypotheses_from_perception(
     self,
     learning_state: LanguagePerceptionSemanticAlignment,
     bound_surface_template: SurfaceTemplateBoundToSemanticNodes,
 ) -> AbstractSet[PerceptionGraphTemplate]:
     return immutableset(
         PerceptionGraphTemplate.from_graph(
             perception_graph=candidate_relation_meaning,
             template_variable_to_matched_object_node=bound_surface_template
             .slot_to_semantic_node,
         ) for candidate_relation_meaning in _extract_candidate_relations(
             learning_state.perception_semantic_alignment.perception_graph,
             bound_surface_template.slot_to_semantic_node[SLOT1],
             bound_surface_template.slot_to_semantic_node[SLOT2],
         ))
Exemple #5
0
    def _hypothesis_from_perception(
        self, preprocessed_input: LanguageAlignedPerception
    ) -> PerceptionGraphTemplate:
        num_nodes_aligned_to_language = len(preprocessed_input.aligned_nodes)
        if num_nodes_aligned_to_language != 1:
            raise RuntimeError(
                f"Attribute learner can work only with a single aligned node,"
                f"but got {num_nodes_aligned_to_language}. Language is "
                f"{preprocessed_input.language.as_token_string()}"
            )

        return PerceptionGraphTemplate.from_graph(
            preprocessed_input.perception_graph,
            template_variable_to_matched_object_node=immutabledict(
                zip(STANDARD_SLOT_VARIABLES, preprocessed_input.aligned_nodes)
            ),
        )
Exemple #6
0
    def _learning_step(
        self,
        language_perception_semantic_alignment:
        LanguagePerceptionSemanticAlignment,
        bound_surface_template: SurfaceTemplateBoundToSemanticNodes,
    ) -> None:
        """
        Try to learn the semantics of a `SurfaceTemplate` given the assumption
        that its argument slots (if any) are bound to objects according to
        *bound_surface_template*.

        For example, "try to learn the meaning of 'red' given the language 'red car'
        and an alignment of 'car' to particular perceptions in the perception graph.
        """
        # Generate all possible meanings from the Graph
        meanings_from_perception = immutableset(
            self._hypotheses_from_perception(
                language_perception_semantic_alignment,
                bound_surface_template))
        meanings_to_pattern_template: Mapping[
            PerceptionGraph, PerceptionGraphTemplate] = immutabledict(
                (meaning,
                 PerceptionGraphTemplate.from_graph(meaning, immutabledict()))
                for meaning in meanings_from_perception)

        # We check for meanings that are described by lexicalized concepts
        # and don't try to learn those lexicalized concepts further.
        # jac: Not mentioned in the part of the paper I read. New?
        concepts_to_remove: Set[Concept] = set()

        def check_and_remove_meaning(
            other_concept: Concept,
            hypothesis: "AbstractCrossSituationalLearner.Hypothesis",
            *,
            ontology: Ontology,
        ) -> None:
            match = compute_match_ratio(
                hypothesis.pattern_template,
                language_perception_semantic_alignment.
                perception_semantic_alignment.perception_graph,
                ontology=ontology,
            )
            if match and match.matching_subgraph:
                for meaning in meanings_from_perception:
                    if match.matching_subgraph.check_isomorphism(
                            meanings_to_pattern_template[meaning].graph_pattern
                    ):
                        concepts_to_remove.add(other_concept)

        for (other_concept, hypotheses) in self._concept_to_hypotheses.items():
            for hypothesis in hypotheses:
                if hypothesis.probability > self._lexicon_entry_threshold:
                    check_and_remove_meaning(other_concept,
                                             hypothesis,
                                             ontology=self._ontology)

        # We have seen this template before and already have a concept for it
        # So we attempt to verify our already picked concept
        if bound_surface_template.surface_template in self._surface_template_to_concept:
            # We don't directly associate surface templates with perceptions.
            # Instead we mediate the relationship with "concept" objects.
            # These don't matter now, but the split might be helpful in the future
            # when we might have multiple ways of expressing the same idea.
            concept = self._surface_template_to_concept[
                bound_surface_template.surface_template]
        else:
            concept = self._new_concept(debug_string=bound_surface_template.
                                        surface_template.to_short_string())
        self._surface_template_to_concept[
            bound_surface_template.surface_template] = concept
        self._concept_to_surface_template[
            concept] = bound_surface_template.surface_template

        concepts_after_preprocessing = immutableset([
            concept for concept in self._concepts_in_utterance
            if concept not in concepts_to_remove
            # TODO Does it make sense to include a dummy concept/"word"? The paper has one so I
            #  am including it for now.
        ] + [self._dummy_concept])

        # Step 0. Update priors for any meanings as-yet unobserved.

        # Step 1. Compute alignment probabilities (pp. 1029)
        # We have an identified "word" (concept) from U(t)
        # and a collection of meanings from the scene S(t).
        # We now want to calculate the alignment probabilities,
        # which will be used to update this concept's association scores, assoc(w|m, U(t), S(t)),
        # and meaning probabilities, p(m|w).
        alignment_probabilities = self._get_alignment_probabilities(
            concepts_after_preprocessing, meanings_from_perception)

        # We have an identified "word" (concept) from U(t)
        # and a collection of meanings from the scene S(t).
        # We now want to update p(.|w), which means calculating the probabilities.
        new_hypotheses = self._updated_meaning_probabilities(
            concept,
            meanings_from_perception,
            meanings_to_pattern_template,
            alignment_probabilities,
        )

        # Finally, update our hypotheses for this concept
        self._updated_hypotheses[concept] = new_hypotheses
Exemple #7
0
def preposition_hypothesis_from_perception(
    scene_aligned_perception: LanguageAlignedPerception,
    template_variables_to_object_match_nodes: Mapping[SyntaxSemanticsVariable,
                                                      ObjectSemanticNode],
) -> PerceptionGraphTemplate:
    """
        Create a hypothesis for the semantics of a preposition based on the observed scene.

        Our current implementation is to just include the content
        on the path between the recognized object nodes
        and one hop away from that path.
        """

    # The directions of edges in the perception graph are not necessarily meaningful
    # from the point-of-view of hypothesis generation, so we need an undirected copy
    # of the graph.
    perception_digraph = scene_aligned_perception.perception_graph.copy_as_digraph(
    )
    perception_graph_undirected = perception_digraph.to_undirected(
        # as_view=True loses determinism
        as_view=False)

    if {SLOT1, SLOT2} != set(template_variables_to_object_match_nodes.keys()):
        raise RuntimeError(
            "Can only make a preposition hypothesis if the recognized "
            "objects are aligned to SurfaceTemplateVariables SLOT1 and SLOT2")

    slot1_object = template_variables_to_object_match_nodes[SLOT1]
    slot2_object = template_variables_to_object_match_nodes[SLOT2]

    # The core of our hypothesis for the semantics of a preposition is all nodes
    # along the shortest path between the two objects involved in the perception graph.
    hypothesis_spine_nodes: ImmutableSet[PerceptionGraphNode] = immutableset(
        flatten(
            # if there are multiple paths between the object match nodes,
            # we aren't sure which are relevant, so we include them all in our hypothesis
            # and figure we can trim out irrelevant stuff as we make more observations.
            all_shortest_paths(perception_graph_undirected, slot2_object,
                               slot1_object)))

    # Along the core of our hypothesis we also want to collect the predecessors and successors
    hypothesis_nodes_mutable = []
    for node in hypothesis_spine_nodes:
        if node not in {slot1_object, slot2_object}:
            for successor in perception_digraph.successors(node):
                if not isinstance(successor, ObjectPerception):
                    hypothesis_nodes_mutable.append(successor)
            for predecessor in perception_digraph.predecessors(node):
                if not isinstance(predecessor, ObjectPerception):
                    hypothesis_nodes_mutable.append(predecessor)

    hypothesis_nodes_mutable.extend(hypothesis_spine_nodes)

    # We wrap the nodes in an immutable set to remove duplicates
    # while preserving iteration determinism.
    hypothesis_nodes = immutableset(hypothesis_nodes_mutable)

    preposition_sub_graph = PerceptionGraph(
        digraph_with_nodes_sorted_by(
            subgraph(perception_digraph, hypothesis_nodes), _graph_node_order))

    return PerceptionGraphTemplate.from_graph(
        preposition_sub_graph, template_variables_to_object_match_nodes)