Exemple #1
0
def test_matching_static_vs_dynamic_graphs():
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)
    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)
    perceptual_representation = only(train_curriculum.instances())[2]

    perception_graph = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))
    temporal_perception_graph = perception_graph.copy_with_temporal_scopes(
        temporal_scopes=[TemporalScope.AFTER])

    perception_pattern = PerceptionGraphPattern.from_graph(
        perception_graph).perception_graph_pattern

    temporal_perception_pattern = perception_pattern.copy_with_temporal_scopes(
        required_temporal_scopes=[TemporalScope.AFTER])

    # Test runtime error for matching static pattern against dynamic graph and vice versa

    with pytest.raises(RuntimeError):
        perception_pattern.matcher(temporal_perception_graph,
                                   match_mode=MatchMode.NON_OBJECT)

    with pytest.raises(RuntimeError):
        temporal_perception_pattern.matcher(perception_graph,
                                            match_mode=MatchMode.NON_OBJECT)
Exemple #2
0
    def from_graph(
        perception_graph: PerceptionGraph,
        template_variable_to_matched_object_node: Mapping[
            SyntaxSemanticsVariable, ObjectSemanticNode
        ],
    ) -> "PerceptionGraphTemplate":
        # It is possible the perception graph has additional recognized objects
        # which are not aligned to surface template slots.
        # We assume these are not arguments of the verb and remove them from the perception
        # before creating a pattern.
        pattern_from_graph = PerceptionGraphPattern.from_graph(perception_graph)
        pattern_graph = pattern_from_graph.perception_graph_pattern
        matched_object_to_matched_predicate = (
            pattern_from_graph.perception_graph_node_to_pattern_node
        )

        template_variable_to_pattern_node: List[Any] = []

        for (
            template_variable,
            object_node,
        ) in template_variable_to_matched_object_node.items():
            if object_node in matched_object_to_matched_predicate:
                template_variable_to_pattern_node.append(
                    (template_variable, matched_object_to_matched_predicate[object_node])
                )

        return PerceptionGraphTemplate(
            graph_pattern=pattern_graph,
            template_variable_to_pattern_node=template_variable_to_pattern_node,
        )
Exemple #3
0
    def _enrich_post_process(
        perception_graph_after_matching: PerceptionGraph,
        immutable_new_nodes: AbstractSet[SemanticNode],
    ) -> Tuple[PerceptionGraph, AbstractSet[SemanticNode]]:
        new_nodes = []
        perception_graph_after_processing = perception_graph_after_matching
        for candiate_object_graph in extract_candidate_objects(
                perception_graph_after_matching,
                sort_by_increasing_size=False):
            fake_pattern_graph = PerceptionGraphPattern.from_graph(
                candiate_object_graph)
            fake_object_semantic_node = ObjectSemanticNode(
                concept=FunctionalObjectConcept("unknown_object"))
            perception_graph_after_processing = replace_match_with_object_graph_node(
                matched_object_node=fake_object_semantic_node,
                current_perception=perception_graph_after_processing,
                pattern_match=PerceptionGraphPatternMatch(
                    matched_pattern=fake_pattern_graph.
                    perception_graph_pattern,
                    graph_matched_against=perception_graph_after_processing,
                    matched_sub_graph=candiate_object_graph,
                    pattern_node_to_matched_graph_node=fake_pattern_graph.
                    perception_graph_node_to_pattern_node,
                ),
            ).perception_graph_after_replacement
            new_nodes.append(fake_object_semantic_node)

        return (
            perception_graph_after_processing,
            immutableset(chain(immutable_new_nodes, new_nodes)),
        )
Exemple #4
0
def test_successfully_extending_partial_match():
    """
    Tests whether we can match a perception pattern against a perception graph
    when initializing the search from a partial match.
    """

    target_object = BOX
    # Create train and test templates for the target objects
    train_obj_object = object_variable("obj-with-color", target_object)

    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    # Original perception graph
    perception = PerceptionGraph.from_frame(
        perceptual_representation.frames[0])

    # Create a perception pattern for the whole thing
    # and also a perception pattern for a subset of the whole pattern
    whole_perception_pattern = PerceptionGraphPattern.from_graph(
        perception).perception_graph_pattern

    partial_digraph = whole_perception_pattern.copy_as_digraph()
    partial_digraph.remove_nodes_from([
        node for node in partial_digraph.nodes
        if isinstance(node, IsColorNodePredicate)
    ])
    partial_perception_pattern = PerceptionGraphPattern(partial_digraph)

    # get our initial match by matching the partial pattern
    matcher = partial_perception_pattern.matcher(
        perception, match_mode=MatchMode.NON_OBJECT)

    partial_match: PerceptionGraphPatternMatch = first(
        matcher.matches(use_lookahead_pruning=True))
    partial_mapping = partial_match.pattern_node_to_matched_graph_node

    # Try to extend the partial mapping, to create a complete mapping
    matcher_2 = whole_perception_pattern.matcher(
        perception, match_mode=MatchMode.NON_OBJECT)
    complete_match: PerceptionGraphPatternMatch = first(
        matcher_2.matches(initial_partial_match=partial_mapping,
                          use_lookahead_pruning=True),
        None,
    )
    complete_mapping = complete_match.pattern_node_to_matched_graph_node
    assert len(complete_mapping) == len(perception.copy_as_digraph().nodes)
    assert len(complete_mapping) == len(
        whole_perception_pattern.copy_as_digraph().nodes)
Exemple #5
0
 def _hypothesis_from_perception(
     self, preprocessed_input: LanguageAlignedPerception
 ) -> PerceptionGraphTemplate:
     new_hypothesis = PerceptionGraphPattern.from_graph(
         preprocessed_input.perception_graph).perception_graph_pattern
     return PerceptionGraphTemplate(
         graph_pattern=new_hypothesis,
         template_variable_to_pattern_node=immutabledict(),
     )
Exemple #6
0
def candidate_object_hypotheses(
    language_perception_semantic_alignment: LanguagePerceptionSemanticAlignment
) -> Sequence[PerceptionGraphTemplate]:
    """
    Given a learning input, returns all possible meaning hypotheses.
    """
    return [
        PerceptionGraphTemplate(
            graph_pattern=PerceptionGraphPattern.from_graph(
                object_
            ).perception_graph_pattern
        )
        for object_ in get_objects_from_perception(
            language_perception_semantic_alignment.perception_semantic_alignment.perception_graph
        )
    ]
Exemple #7
0
    def _hypotheses_from_perception(
        self,
        learning_state: LanguagePerceptionSemanticAlignment,
        bound_surface_template: SurfaceTemplateBoundToSemanticNodes,
    ) -> AbstractSet[PerceptionGraphTemplate]:
        if bound_surface_template.slot_to_semantic_node:
            raise RuntimeError(
                "Object learner should not have slot to semantic node alignments!"
            )

        return immutableset(
            PerceptionGraphTemplate(
                graph_pattern=PerceptionGraphPattern.from_graph(
                    candidate_object).perception_graph_pattern,
                template_variable_to_pattern_node=immutabledict(),
            ) for candidate_object in extract_candidate_objects(
                learning_state.perception_semantic_alignment.perception_graph))
Exemple #8
0
    def _enrich_post_process(
        self,
        perception_graph_after_matching: PerceptionGraph,
        immutable_new_nodes: AbstractSet[SemanticNode],
    ) -> Tuple[PerceptionGraph, AbstractSet[SemanticNode]]:
        object_root_nodes = immutableset(  # pylint:disable=protected-access
            node for node in perception_graph_after_matching._graph.nodes  # pylint:disable=protected-access
            if isinstance(node, ObjectPerception))
        new_nodes = []
        perception_graph_after_processing = perception_graph_after_matching
        for object_root_node in object_root_nodes:
            fake_subgraph = subgraph(  # pylint:disable=protected-access
                perception_graph_after_matching._graph,  # pylint:disable=protected-access
                [object_root_node],
            )
            fake_perception_graph = PerceptionGraph(
                graph=fake_subgraph,
                dynamic=perception_graph_after_matching.dynamic)
            fake_pattern_graph = PerceptionGraphPattern.from_graph(
                fake_perception_graph)
            fake_object_semantic_node = ObjectSemanticNode(
                concept=FunctionalObjectConcept("unknown_object"))
            # perception_graph_after_processing = replace_match_root_with_object_semantic_node(
            #     object_semantic_node=fake_object_semantic_node,
            perception_graph_after_processing = replace_match_with_object_graph_node(
                matched_object_node=fake_object_semantic_node,
                current_perception=perception_graph_after_processing,
                pattern_match=PerceptionGraphPatternMatch(
                    matched_pattern=fake_pattern_graph.
                    perception_graph_pattern,
                    graph_matched_against=perception_graph_after_matching,
                    matched_sub_graph=fake_perception_graph,
                    pattern_node_to_matched_graph_node=fake_pattern_graph.
                    perception_graph_node_to_pattern_node,
                ),
            ).perception_graph_after_replacement
            new_nodes.append(fake_object_semantic_node)

        return (
            perception_graph_after_processing,
            immutableset(chain(immutable_new_nodes, new_nodes)),
        )
Exemple #9
0
def test_copy_with_temporal_scope_pattern_content():
    """
    Tests whether copy_with_temporal_scope converts patterns to be dynamic as intended
    """

    # We use a situation to generate the perceptual representation
    # for a box with color.
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    perception_graph = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))

    perception_pattern = PerceptionGraphPattern.from_graph(
        perception_graph).perception_graph_pattern

    temporal_perception_graph = perception_graph.copy_with_temporal_scopes(
        temporal_scopes=[TemporalScope.AFTER])
    temporal_perception_pattern = perception_pattern.copy_with_temporal_scopes(
        required_temporal_scopes=TemporalScope.AFTER)

    # Exception while applying to dynamic pattern
    with pytest.raises(RuntimeError):
        temporal_perception_pattern.copy_with_temporal_scopes(
            required_temporal_scopes=TemporalScope.AFTER)

    for (source, target) in perception_pattern.copy_as_digraph().edges():
        assert not isinstance(
            perception_pattern.copy_as_digraph()[source][target]["predicate"],
            HoldsAtTemporalScopePredicate,
        )
    for (source,
         target) in temporal_perception_pattern.copy_as_digraph().edges():
        # Check type, and then the content
        predicate = temporal_perception_pattern.copy_as_digraph(
        )[source][target]["predicate"]
        # Test HoldsAtTemporalScope dot label, matches predicate
        assert isinstance(predicate.dot_label(), str)
        assert predicate.matches_predicate(
            HoldsAtTemporalScopePredicate(predicate.wrapped_edge_predicate,
                                          predicate.temporal_scopes))
        assert not predicate.matches_predicate(
            HoldsAtTemporalScopePredicate(predicate.wrapped_edge_predicate,
                                          [TemporalScope.BEFORE]))
        assert isinstance(predicate, HoldsAtTemporalScopePredicate)
        assert (predicate.wrapped_edge_predicate == perception_pattern.
                copy_as_digraph()[source][target]["predicate"])
        assert len(predicate.temporal_scopes) == 1
        assert only(predicate.temporal_scopes) == TemporalScope.AFTER

    # Test normal matching behavior
    temporal_matcher = temporal_perception_pattern.matcher(
        temporal_perception_graph, match_mode=MatchMode.NON_OBJECT)
    first(temporal_matcher.matches(use_lookahead_pruning=True))

    # Test HoldsAtTemporalScopePredicate
    for (source, target) in perception_graph.copy_as_digraph().edges():
        label = "test edge label"
        edge_predicate = AnyEdgePredicate()
        temporal_predicate = HoldsAtTemporalScopePredicate(
            edge_predicate, [TemporalScope.AFTER])

        temporal_edge_label = TemporallyScopedEdgeLabel(
            label, [TemporalScope.AFTER])
        assert temporal_predicate(source, temporal_edge_label, target)
        # Non temporal edge exception
        with pytest.raises(RuntimeError):
            temporal_predicate(source, label, target)
Exemple #10
0
def test_allowed_matches_with_bad_partial_match():
    """
    Tests whether PatternMarching's allowed_matches functionality works as intended when a bad
    partial match is specified.
    """
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]
    perception = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))

    pattern1: PerceptionGraphPattern = PerceptionGraphPattern.from_graph(
        perception.subgraph_by_nodes({
            cast(PerceptionGraphNode, node)
            for node in perception._graph.nodes  # pylint: disable=protected-access
            if getattr(node, "debug_handle", None) == "box_0"
        })).perception_graph_pattern

    pattern2: PerceptionGraphPattern = PerceptionGraphPattern.from_graph(
        perception.subgraph_by_nodes({
            cast(PerceptionGraphNode, node)
            for node in perception._graph.nodes  # pylint: disable=protected-access
            if getattr(node, "debug_handle", None) in {"box_0", "the ground"}
        })).perception_graph_pattern

    pattern1_box: AnyObjectPerception = cast(
        AnyObjectPerception,
        only(node for node in pattern1._graph  # pylint: disable=protected-access
             if getattr(node, "debug_handle", None) == "box_0"),
    )
    pattern2_box: AnyObjectPerception = cast(
        AnyObjectPerception,
        only(node for node in pattern2._graph  # pylint: disable=protected-access
             if getattr(node, "debug_handle", None) == "box_0"),
    )
    pattern2_ground: AnyObjectPerception = cast(
        AnyObjectPerception,
        only(node for node in pattern2._graph  # pylint: disable=protected-access
             if getattr(node, "debug_handle", None) == "the ground"),
    )

    matcher = PatternMatching(
        pattern=pattern1,
        graph_to_match_against=pattern2,
        matching_pattern_against_pattern=True,
        match_mode=MatchMode.OBJECT,
        allowed_matches=immutablesetmultidict([(pattern1_box, pattern2_box)]),
    )
    with pytest.raises(RuntimeError):
        first(
            matcher.matches(
                initial_partial_match={pattern1_box: pattern2_ground},
                use_lookahead_pruning=True,
            ),
            None,
        )
Exemple #11
0
def test_syntactically_infeasible_partial_match():
    """
    Tests whether syntactic feasibility works as intended
    """

    # We use a situation to generate the perceptual representation
    # for a box with color.
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    # Original perception graph
    perception = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))

    # Create an altered perception graph we remove the color node
    altered_perception_digraph = perception.copy_as_digraph()
    nodes = []
    for node in perception.copy_as_digraph().nodes:
        # If we find a color node, we add an extra edge to it
        if isinstance(node, tuple) and isinstance(node[0], RgbColorPerception):
            nodes.append(node)

    # change edge information
    for node in nodes:
        random_node = r.choice(list(altered_perception_digraph.nodes))
        altered_perception_digraph.add_edge(node, random_node, label=PART_OF)
        random_node_2 = r.choice(list(altered_perception_digraph.nodes))
        altered_perception_digraph.add_edge(random_node_2, node, label=PART_OF)

    altered_perception_perception_graph = PerceptionGraph(
        altered_perception_digraph)
    altered_perception_pattern = PerceptionGraphPattern.from_graph(
        altered_perception_perception_graph).perception_graph_pattern

    # Start the matching process, get a partial match
    first_matcher = altered_perception_pattern.matcher(
        altered_perception_perception_graph, match_mode=MatchMode.OBJECT)
    partial_match: PerceptionGraphPatternMatch = first(
        first_matcher.matches(use_lookahead_pruning=True), None)
    partial_mapping = partial_match.pattern_node_to_matched_graph_node
    # Try to extend the partial mapping, we expect a semantic infeasibility runtime error
    second_matcher = altered_perception_pattern.matcher(
        perception, match_mode=MatchMode.OBJECT)
    # The partial mapping (obtained from first matcher with original perception graph)
    # syntactically doesn't match the one in the altered version (second matcher with altered graph)
    with pytest.raises(RuntimeError):
        first(
            second_matcher.matches(initial_partial_match=partial_mapping,
                                   use_lookahead_pruning=True),
            None,
        )
Exemple #12
0
def test_semantically_infeasible_partial_match():
    """
    Tests whether semantic feasibility works as intended
    """

    target_object = BOX
    # Create train and test templates for the target objects
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]
    # Original perception graph
    perception = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))
    whole_perception_pattern = PerceptionGraphPattern.from_graph(
        perception).perception_graph_pattern

    # Create an altered perception graph we remove the color node
    altered_perception_digraph = perception.copy_as_digraph()
    nodes_to_remove = []
    edges = []
    different_nodes = []
    for node in perception.copy_as_digraph().nodes:
        # If we find a color node, we make it black
        if isinstance(node, tuple) and isinstance(node[0], RgbColorPerception):
            new_node = (RgbColorPerception(0, 0, 0), node[1])
            # Get edge information
            for edge in perception.copy_as_digraph().edges(data=True):
                if edge[0] == node:
                    edges.append((new_node, edge[1], edge[2]))
                if edge[1] == node:
                    edges.append((edge[0], new_node, edge[2]))
            nodes_to_remove.append(node)
            different_nodes.append(new_node)

    # remove original node
    altered_perception_digraph.remove_nodes_from(nodes_to_remove)

    # add new nodes
    for node in different_nodes:
        altered_perception_digraph.add_node(node)
    # add edge information
    for edge in edges:
        altered_perception_digraph.add_edge(edge[0], edge[1])
        for k, v in edge[2].items():
            altered_perception_digraph[edge[0]][edge[1]][k] = v

    altered_perception_pattern = PerceptionGraphPattern.from_graph(
        PerceptionGraph(altered_perception_digraph)).perception_graph_pattern

    partial_digraph = altered_perception_pattern.copy_as_digraph()
    partial_digraph.remove_nodes_from([
        node for node in partial_digraph.nodes
        if isinstance(node, IsColorNodePredicate)
    ])

    # Start the matching process, get a partial match
    matcher = whole_perception_pattern.matcher(perception,
                                               match_mode=MatchMode.OBJECT)
    partial_match: PerceptionGraphPatternMatch = first(
        matcher.matches(use_lookahead_pruning=True))
    partial_mapping = partial_match.pattern_node_to_matched_graph_node

    # Try to extend the partial mapping, we expect a semantic infeasibility runtime error
    second_matcher = whole_perception_pattern.matcher(
        PerceptionGraph(altered_perception_digraph),
        match_mode=MatchMode.OBJECT)
    # The partial mapping (obtained from first matcher with original perception graph)
    # semantically doesn't match the one in the altered version (second matcher with altered graph)
    with pytest.raises(RuntimeError):
        first(
            second_matcher.matches(initial_partial_match=partial_mapping,
                                   use_lookahead_pruning=True),
            None,
        )
Exemple #13
0
def test_last_failed_pattern_node():
    """
    Tests whether `MatchFailure` can find the correct node.
    """

    target_object = BOX
    # Create train and test templates for the target objects
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    for (_, _, perceptual_representation) in train_curriculum.instances():
        # Original perception graph
        perception = graph_without_learner(
            PerceptionGraph.from_frame(perceptual_representation.frames[0]))

        # Original perception pattern
        whole_perception_pattern = PerceptionGraphPattern.from_graph(
            perception).perception_graph_pattern
        # Create an altered perception graph we replace the color node
        altered_perception_digraph = perception.copy_as_digraph()
        nodes_to_remove = []
        edges = []
        different_nodes = []
        for node in perception.copy_as_digraph().nodes:
            # If we find a color node, we make it black
            if isinstance(node, tuple) and isinstance(node[0],
                                                      RgbColorPerception):
                new_node = (RgbColorPerception(0, 0, 0), 42)
                # Get edge information
                for edge in perception.copy_as_digraph().edges(data=True):
                    if edge[0] == node:
                        edges.append((new_node, edge[1], edge[2]))
                    if edge[1] == node:
                        edges.append((edge[0], new_node, edge[2]))
                nodes_to_remove.append(node)
                different_nodes.append(new_node)

        # add new nodes
        for node in different_nodes:
            altered_perception_digraph.add_node(node)
        # add edge information
        for edge in edges:
            altered_perception_digraph.add_edge(edge[0], edge[1])
            for k, v in edge[2].items():
                altered_perception_digraph[edge[0]][edge[1]][k] = v
        # remove original node
        altered_perception_digraph.remove_nodes_from(nodes_to_remove)

        # Start the matching process
        matcher = whole_perception_pattern.matcher(
            PerceptionGraph(altered_perception_digraph),
            match_mode=MatchMode.NON_OBJECT)
        match_or_failure = matcher.first_match_or_failure_info()
        assert isinstance(match_or_failure, PatternMatching.MatchFailure)
        assert isinstance(match_or_failure.last_failed_pattern_node,
                          IsColorNodePredicate)
Exemple #14
0
 def _hypothesis_from_perception(
         self, perception: PerceptionGraph) -> PerceptionGraphTemplate:
     return PerceptionGraphTemplate(
         graph_pattern=PerceptionGraphPattern.from_graph(
             perception).perception_graph_pattern)