Exemplo n.º 1
0
def test_dynamic_perception_graph_instantiation():
    ball = ObjectPerception("ball", _BALL_SCHEMA.geon.copy())
    table = ObjectPerception("table", axes=_TABLE_SCHEMA.axes.copy())

    first_frame = DevelopmentalPrimitivePerceptionFrame(
        perceived_objects=[ball, table],
        relations=[
            above(ball, table),
            Relation(IN_REGION, ball,
                     Region(table, distance=EXTERIOR_BUT_IN_CONTACT)),
            Relation(IN_REGION, table,
                     Region(ball, distance=EXTERIOR_BUT_IN_CONTACT)),
        ],
    )

    second_frame = DevelopmentalPrimitivePerceptionFrame(
        perceived_objects=[ball, table],
        relations=[Relation(IN_REGION, ball, Region(table, distance=DISTAL))],
    )

    perception_graph = PerceptionGraph.from_dynamic_perceptual_representation(
        PerceptualRepresentation(frames=[first_frame, second_frame]))
    assert perception_graph.dynamic

    # Ensure we don't attempt to handle more than two frames yet.
    with pytest.raises(ValueError):
        PerceptionGraph.from_dynamic_perceptual_representation(
            PerceptualRepresentation(
                frames=[first_frame, second_frame, second_frame]))
Exemplo n.º 2
0
 def _extract_perception_graph(
     self, perception: PerceptualRepresentation[DevelopmentalPrimitivePerceptionFrame]
 ) -> PerceptionGraph:
     if perception.is_dynamic():
         return PerceptionGraph.from_dynamic_perceptual_representation(perception)
     else:
         return PerceptionGraph.from_frame(perception.frames[0])
Exemplo n.º 3
0
def _extract_candidate_attributes(
    whole_scene_perception_graph: PerceptionGraph,
    object_with_attribute: ObjectSemanticNode,
) -> Sequence[PerceptionGraph]:
    perception_digraph = whole_scene_perception_graph.copy_as_digraph()
    # For now, we assume all attributes are based on properties.
    properties = immutableset(
        [
            node
            for _, node, label in perception_digraph.out_edges(
                object_with_attribute, data="label"
            )
            if label == HAS_PROPERTY_LABEL
        ]
    )
    # Furthermore, we limit the search space to the even smaller set of hypotheses
    # where we consider only single properties as possible attributes.
    # Otherwise there are too many hypotheses for the pursuit learner to search through
    # and it's unlikely to converge on the correct hypothesis
    # in any reasonable amount of time or number of examples.
    candidate_attribute_subgraph_node_sets = [
        immutableset([object_with_attribute, property]) for property in properties
    ]
    return immutableset(
        [
            whole_scene_perception_graph.subgraph_by_nodes(
                candidate_attribute_subgraph_nodes
            )
            for candidate_attribute_subgraph_nodes in candidate_attribute_subgraph_node_sets
        ]
    )
Exemplo n.º 4
0
def test_trivial_dynamic_situation_with_schemaless_object(language_mode):
    dad_situation_object = SituationObject.instantiate_ontology_node(
        ontology_node=DAD, ontology=GAILA_PHASE_1_ONTOLOGY)
    situation = HighLevelSemanticsSituation(
        ontology=GAILA_PHASE_1_ONTOLOGY,
        salient_objects=[dad_situation_object])
    perception_generator = HighLevelSemanticsSituationToDevelopmentalPrimitivePerceptionGenerator(
        GAILA_PHASE_1_ONTOLOGY)
    # We explicitly exclude ground in perception generation

    # this generates a static perception...
    perception = perception_generator.generate_perception(
        situation, chooser=RandomChooser.for_seed(0), include_ground=False)

    # so we need to construct a dynamic one by hand from two identical scenes
    dynamic_perception = PerceptualRepresentation(
        frames=[perception.frames[0], perception.frames[0]])

    perception_graph = PerceptionGraph.from_dynamic_perceptual_representation(
        dynamic_perception)
    perception_semantic_alignment = PerceptionSemanticAlignment.create_unaligned(
        perception_graph)
    (_, description_to_matched_semantic_node
     ) = LANGUAGE_MODE_TO_OBJECT_RECOGNIZER[language_mode].match_objects(
         perception_semantic_alignment)
    assert len(description_to_matched_semantic_node) == 1
    assert (language_mode == LanguageMode.ENGLISH and
            ("Dad", ) in description_to_matched_semantic_node) or (
                language_mode == LanguageMode.CHINESE and
                ("ba4 ba4", ) in description_to_matched_semantic_node)
Exemplo n.º 5
0
def test_matching_static_vs_dynamic_graphs():
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)
    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)
    perceptual_representation = only(train_curriculum.instances())[2]

    perception_graph = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))
    temporal_perception_graph = perception_graph.copy_with_temporal_scopes(
        temporal_scopes=[TemporalScope.AFTER])

    perception_pattern = PerceptionGraphPattern.from_graph(
        perception_graph).perception_graph_pattern

    temporal_perception_pattern = perception_pattern.copy_with_temporal_scopes(
        required_temporal_scopes=[TemporalScope.AFTER])

    # Test runtime error for matching static pattern against dynamic graph and vice versa

    with pytest.raises(RuntimeError):
        perception_pattern.matcher(temporal_perception_graph,
                                   match_mode=MatchMode.NON_OBJECT)

    with pytest.raises(RuntimeError):
        temporal_perception_pattern.matcher(perception_graph,
                                            match_mode=MatchMode.NON_OBJECT)
Exemplo n.º 6
0
def _extract_candidate_relations(
    whole_scene_perception_graph: PerceptionGraph,
    relation_object_1: ObjectSemanticNode,
    relation_object_2: ObjectSemanticNode,
) -> Sequence[PerceptionGraph]:
    # The directions of edges in the perception graph are not necessarily meaningful
    # from the point-of-view of hypothesis generation, so we need an undirected copy
    # of the graph.
    perception_digraph = whole_scene_perception_graph.copy_as_digraph()
    perception_graph_undirected = perception_digraph.to_undirected(
        # as_view=True loses determinism
        as_view=False)

    output_graphs = []

    # The core of our hypothesis for the semantics of a preposition is all nodes
    # along the shortest path between the two objects involved in the perception graph.
    for hypothesis_spine_nodes in all_shortest_paths(
            perception_graph_undirected, relation_object_2, relation_object_1):
        # Along the core of our hypothesis we also want to collect the predecessors and successors
        hypothesis_nodes_mutable = []
        for node in hypothesis_spine_nodes:
            if node not in {relation_object_1, relation_object_2}:
                for successor in perception_digraph.successors(node):
                    if not (isinstance(successor, ObjectPerception)
                            or isinstance(successor, ObjectSemanticNode)):
                        hypothesis_nodes_mutable.append(successor)
                for predecessor in perception_digraph.predecessors(node):
                    if not (isinstance(predecessor, ObjectPerception)
                            or isinstance(predecessor, ObjectSemanticNode)):
                        hypothesis_nodes_mutable.append(predecessor)

        hypothesis_nodes_mutable.extend(hypothesis_spine_nodes)

        # We wrap the nodes in an immutable set to remove duplicates
        # while preserving iteration determinism.
        hypothesis_nodes = immutableset(hypothesis_nodes_mutable)

        output_graphs.append(
            PerceptionGraph(
                digraph_with_nodes_sorted_by(
                    subgraph(perception_digraph, hypothesis_nodes),
                    _graph_node_order)))

    return output_graphs
Exemplo n.º 7
0
def test_successfully_extending_partial_match():
    """
    Tests whether we can match a perception pattern against a perception graph
    when initializing the search from a partial match.
    """

    target_object = BOX
    # Create train and test templates for the target objects
    train_obj_object = object_variable("obj-with-color", target_object)

    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    # Original perception graph
    perception = PerceptionGraph.from_frame(
        perceptual_representation.frames[0])

    # Create a perception pattern for the whole thing
    # and also a perception pattern for a subset of the whole pattern
    whole_perception_pattern = PerceptionGraphPattern.from_graph(
        perception).perception_graph_pattern

    partial_digraph = whole_perception_pattern.copy_as_digraph()
    partial_digraph.remove_nodes_from([
        node for node in partial_digraph.nodes
        if isinstance(node, IsColorNodePredicate)
    ])
    partial_perception_pattern = PerceptionGraphPattern(partial_digraph)

    # get our initial match by matching the partial pattern
    matcher = partial_perception_pattern.matcher(
        perception, match_mode=MatchMode.NON_OBJECT)

    partial_match: PerceptionGraphPatternMatch = first(
        matcher.matches(use_lookahead_pruning=True))
    partial_mapping = partial_match.pattern_node_to_matched_graph_node

    # Try to extend the partial mapping, to create a complete mapping
    matcher_2 = whole_perception_pattern.matcher(
        perception, match_mode=MatchMode.NON_OBJECT)
    complete_match: PerceptionGraphPatternMatch = first(
        matcher_2.matches(initial_partial_match=partial_mapping,
                          use_lookahead_pruning=True),
        None,
    )
    complete_mapping = complete_match.pattern_node_to_matched_graph_node
    assert len(complete_mapping) == len(perception.copy_as_digraph().nodes)
    assert len(complete_mapping) == len(
        whole_perception_pattern.copy_as_digraph().nodes)
Exemplo n.º 8
0
def test_perception_graph_post_init_edge_cases():
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)
    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)
    perceptual_representation = only(train_curriculum.instances())[2]
    perception_graph = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))
    temporal_perception_graph = perception_graph.copy_with_temporal_scopes(
        temporal_scopes=[TemporalScope.AFTER])
    temporal_digraph = temporal_perception_graph.copy_as_digraph()
    # Test valid edge label
    # The only feasible test seems to be the instation, since creating a corrupt instance throws the same RuntimeError
    with pytest.raises(RuntimeError):
        TemporallyScopedEdgeLabel(None)

    # In a dynamic graph, all edge labels must be wrapped in TemporallyScopedEdgeLabel
    new_graph = DiGraph()
    for (source, target) in temporal_digraph.edges():
        new_graph.add_edge(source, target)
        new_graph[source][target]["label"] = None
    with pytest.raises(RuntimeError):
        PerceptionGraph(new_graph, dynamic=True)

    # TemporallyScopedEdgeLabels may not appear in a static graph
    new_graph = DiGraph()
    for (source, target) in temporal_digraph.edges():
        new_graph.add_edge(source, target)
        new_graph[source][target]["label"] = TemporallyScopedEdgeLabel(
            "attribute", [TemporalScope.AFTER])
    with pytest.raises(RuntimeError):
        PerceptionGraph(new_graph)

    # Every edge in a PerceptionGraph must have a 'label
    new_graph = DiGraph()
    for (source, target) in temporal_digraph.edges():
        new_graph.add_edge(source, target)
    with pytest.raises(RuntimeError):
        PerceptionGraph(new_graph)
Exemplo n.º 9
0
def graph_without_learner(
        perception_graph: PerceptionGraph) -> PerceptionGraph:
    """ Helper function to return a `PerceptionGraph`
    without a ground object and its related nodes."""
    graph = perception_graph.copy_as_digraph()
    # Get the learner node
    learner_node_candidates = [
        node for node in graph.nodes() if isinstance(node, ObjectPerception)
        and node.debug_handle == LEARNER.handle
    ]
    if len(learner_node_candidates) > 1:
        raise RuntimeError("More than one learners in perception.")
    elif len(learner_node_candidates) == 1:
        learner_node = first(learner_node_candidates)
        # Remove learner
        graph.remove_node(learner_node)
        # remove remaining islands
        islands = list(isolates(graph))
        graph.remove_nodes_from(islands)
    return PerceptionGraph(graph, dynamic=perception_graph.dynamic)
Exemplo n.º 10
0
def test_recognize_in_transfer_of_possession(language_mode):
    dad = object_variable("person_0", DAD)
    baby = object_variable("person_1", BABY)
    chair = object_variable("give_object_0", CHAIR)

    giving_template = Phase1SituationTemplate(
        "dad-transfer-of-possession",
        salient_object_variables=[dad, baby, chair],
        actions=[
            Action(
                GIVE,
                argument_roles_to_fillers=[(AGENT, dad), (GOAL, baby),
                                           (THEME, chair)],
            )
        ],
        syntax_hints=[PREFER_DITRANSITIVE],
    )

    (_, _, perception) = first(
        phase1_instances(
            "foo",
            sampled(
                giving_template,
                max_to_sample=1,
                chooser=PHASE1_CHOOSER_FACTORY(),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                block_multiple_of_the_same_type=True,
            ),
        ).instances())

    perception_graph = PerceptionGraph.from_dynamic_perceptual_representation(
        perception)
    perception_semantic_alignment = PerceptionSemanticAlignment.create_unaligned(
        perception_graph)
    (_, description_to_matched_semantic_node
     ) = LANGUAGE_MODE_TO_OBJECT_RECOGNIZER[language_mode].match_objects(
         perception_semantic_alignment)
    assert len(description_to_matched_semantic_node) == 4
    assert (language_mode == LanguageMode.ENGLISH and
            ("Dad", ) in description_to_matched_semantic_node) or (
                language_mode == LanguageMode.CHINESE and
                ("ba4 ba4", ) in description_to_matched_semantic_node)
Exemplo n.º 11
0
    def _enrich_post_process(
        self,
        perception_graph_after_matching: PerceptionGraph,
        immutable_new_nodes: AbstractSet[SemanticNode],
    ) -> Tuple[PerceptionGraph, AbstractSet[SemanticNode]]:
        object_root_nodes = immutableset(  # pylint:disable=protected-access
            node for node in perception_graph_after_matching._graph.nodes  # pylint:disable=protected-access
            if isinstance(node, ObjectPerception))
        new_nodes = []
        perception_graph_after_processing = perception_graph_after_matching
        for object_root_node in object_root_nodes:
            fake_subgraph = subgraph(  # pylint:disable=protected-access
                perception_graph_after_matching._graph,  # pylint:disable=protected-access
                [object_root_node],
            )
            fake_perception_graph = PerceptionGraph(
                graph=fake_subgraph,
                dynamic=perception_graph_after_matching.dynamic)
            fake_pattern_graph = PerceptionGraphPattern.from_graph(
                fake_perception_graph)
            fake_object_semantic_node = ObjectSemanticNode(
                concept=FunctionalObjectConcept("unknown_object"))
            # perception_graph_after_processing = replace_match_root_with_object_semantic_node(
            #     object_semantic_node=fake_object_semantic_node,
            perception_graph_after_processing = replace_match_with_object_graph_node(
                matched_object_node=fake_object_semantic_node,
                current_perception=perception_graph_after_processing,
                pattern_match=PerceptionGraphPatternMatch(
                    matched_pattern=fake_pattern_graph.
                    perception_graph_pattern,
                    graph_matched_against=perception_graph_after_matching,
                    matched_sub_graph=fake_perception_graph,
                    pattern_node_to_matched_graph_node=fake_pattern_graph.
                    perception_graph_node_to_pattern_node,
                ),
            ).perception_graph_after_replacement
            new_nodes.append(fake_object_semantic_node)

        return (
            perception_graph_after_processing,
            immutableset(chain(immutable_new_nodes, new_nodes)),
        )
Exemplo n.º 12
0
def test_copy_with_temporal_scopes_content():
    """
    Tests whether copy_with_temporal_scopes converts graphs to be dynamic as intended
    """

    # We use a situation to generate the perceptual representation
    # for a box with color.
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    perception_graph = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))
    temporal_perception_graph = perception_graph.copy_with_temporal_scopes(
        temporal_scopes=[TemporalScope.AFTER])
    for (source, target) in perception_graph.copy_as_digraph().edges():
        assert not isinstance(
            perception_graph.copy_as_digraph()[source][target]["label"],
            TemporallyScopedEdgeLabel,
        )
    for (source,
         target) in temporal_perception_graph.copy_as_digraph().edges():
        # Check type, and then the content
        label = temporal_perception_graph.copy_as_digraph(
        )[source][target]["label"]
        assert isinstance(label, TemporallyScopedEdgeLabel)
        assert (label.attribute == perception_graph.copy_as_digraph()[source]
                [target]["label"])
        assert all(specifier in [TemporalScope.AFTER]
                   for specifier in label.temporal_specifiers)
Exemplo n.º 13
0
 def _preprocess_scene(
     self, perception_semantic_alignment: PerceptionSemanticAlignment
 ) -> PerceptionSemanticAlignment:
     nodes = [
         s for s in perception_semantic_alignment.semantic_nodes
         if isinstance(s, ObjectSemanticNode)
     ]
     counts = collections.Counter([s.concept for s in nodes])
     digraph = perception_semantic_alignment.perception_graph.copy_as_digraph(
     )
     for node in nodes:
         count = counts[node.concept]
         if count > 1:
             if count == 2:
                 count_node = TWO
             else:
                 count_node = MANY
             digraph.add_node(count_node)
             digraph.add_edge(node, count_node, label=HAS_COUNT)
     graph_with_counts = PerceptionGraph(
         digraph,
         dynamic=perception_semantic_alignment.perception_graph.dynamic)
     return PerceptionSemanticAlignment(
         graph_with_counts, perception_semantic_alignment.semantic_nodes)
Exemplo n.º 14
0
def test_subset_learner_subobject():
    mom = SituationObject.instantiate_ontology_node(
        ontology_node=MOM, ontology=GAILA_PHASE_1_ONTOLOGY)
    head = SituationObject.instantiate_ontology_node(
        ontology_node=HEAD, ontology=GAILA_PHASE_1_ONTOLOGY)
    hand = SituationObject.instantiate_ontology_node(
        ontology_node=HAND, ontology=GAILA_PHASE_1_ONTOLOGY)
    ball = SituationObject.instantiate_ontology_node(
        ontology_node=BALL, ontology=GAILA_PHASE_1_ONTOLOGY)
    house = SituationObject.instantiate_ontology_node(
        ontology_node=HOUSE, ontology=GAILA_PHASE_1_ONTOLOGY)
    ground = SituationObject.instantiate_ontology_node(
        ontology_node=GROUND, ontology=GAILA_PHASE_1_ONTOLOGY)

    mom_situation = HighLevelSemanticsSituation(
        ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=immutableset([mom]))

    floating_head_situation = HighLevelSemanticsSituation(
        ontology=GAILA_PHASE_1_ONTOLOGY,
        salient_objects=immutableset([head]),
        other_objects=immutableset([ground]),
        always_relations=flatten_relations(negate(on(head, ground))),
    )

    # Need to include some extra situations so that the learner will prune its semantics for 'a'
    # away and not recognize it as an object.
    floating_hand_situation = HighLevelSemanticsSituation(
        ontology=GAILA_PHASE_1_ONTOLOGY,
        salient_objects=immutableset([hand]),
        other_objects=immutableset([ground]),
        always_relations=flatten_relations(negate(on(hand, ground))),
    )

    floating_ball_situation = HighLevelSemanticsSituation(
        ontology=GAILA_PHASE_1_ONTOLOGY,
        salient_objects=immutableset([ball]),
        other_objects=immutableset([ground]),
        always_relations=flatten_relations(negate(on(ball, ground))),
    )

    floating_house_situation = HighLevelSemanticsSituation(
        ontology=GAILA_PHASE_1_ONTOLOGY,
        salient_objects=immutableset([house]),
        other_objects=immutableset([ground]),
        always_relations=flatten_relations(negate(on(house, ground))),
    )

    object_learner = SubsetObjectLearnerNew(ontology=GAILA_PHASE_1_ONTOLOGY,
                                            beam_size=5,
                                            language_mode=LanguageMode.ENGLISH)

    for situation in [
            mom_situation,
            floating_head_situation,
            floating_hand_situation,
            floating_ball_situation,
            floating_house_situation,
    ]:
        perceptual_representation = GAILA_PHASE_1_PERCEPTION_GENERATOR.generate_perception(
            situation, chooser=RandomChooser.for_seed(0))
        for linguistic_description in GAILA_PHASE_1_LANGUAGE_GENERATOR.generate_language(
                situation, chooser=RandomChooser.for_seed(0)):
            perception_graph = PerceptionGraph.from_frame(
                perceptual_representation.frames[0])

            object_learner.learn_from(
                LanguagePerceptionSemanticAlignment(
                    language_concept_alignment=LanguageConceptAlignment.
                    create_unaligned(language=linguistic_description),
                    perception_semantic_alignment=PerceptionSemanticAlignment(
                        perception_graph=perception_graph, semantic_nodes=[]),
                ))

    mom_perceptual_representation = GAILA_PHASE_1_PERCEPTION_GENERATOR.generate_perception(
        mom_situation, chooser=RandomChooser.for_seed(0))
    perception_graph = PerceptionGraph.from_frame(
        mom_perceptual_representation.frames[0])
    enriched = object_learner.enrich_during_description(
        PerceptionSemanticAlignment.create_unaligned(perception_graph))

    semantic_node_types_and_debug_strings = {
        (type(semantic_node), semantic_node.concept.debug_string)
        for semantic_node in enriched.semantic_nodes
    }
    assert (ObjectSemanticNode, "Mom") in semantic_node_types_and_debug_strings
    assert (ObjectSemanticNode,
            "head") in semantic_node_types_and_debug_strings
    assert (ObjectSemanticNode,
            "hand") in semantic_node_types_and_debug_strings
Exemplo n.º 15
0
def test_copy_with_temporal_scope_pattern_content():
    """
    Tests whether copy_with_temporal_scope converts patterns to be dynamic as intended
    """

    # We use a situation to generate the perceptual representation
    # for a box with color.
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    perception_graph = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))

    perception_pattern = PerceptionGraphPattern.from_graph(
        perception_graph).perception_graph_pattern

    temporal_perception_graph = perception_graph.copy_with_temporal_scopes(
        temporal_scopes=[TemporalScope.AFTER])
    temporal_perception_pattern = perception_pattern.copy_with_temporal_scopes(
        required_temporal_scopes=TemporalScope.AFTER)

    # Exception while applying to dynamic pattern
    with pytest.raises(RuntimeError):
        temporal_perception_pattern.copy_with_temporal_scopes(
            required_temporal_scopes=TemporalScope.AFTER)

    for (source, target) in perception_pattern.copy_as_digraph().edges():
        assert not isinstance(
            perception_pattern.copy_as_digraph()[source][target]["predicate"],
            HoldsAtTemporalScopePredicate,
        )
    for (source,
         target) in temporal_perception_pattern.copy_as_digraph().edges():
        # Check type, and then the content
        predicate = temporal_perception_pattern.copy_as_digraph(
        )[source][target]["predicate"]
        # Test HoldsAtTemporalScope dot label, matches predicate
        assert isinstance(predicate.dot_label(), str)
        assert predicate.matches_predicate(
            HoldsAtTemporalScopePredicate(predicate.wrapped_edge_predicate,
                                          predicate.temporal_scopes))
        assert not predicate.matches_predicate(
            HoldsAtTemporalScopePredicate(predicate.wrapped_edge_predicate,
                                          [TemporalScope.BEFORE]))
        assert isinstance(predicate, HoldsAtTemporalScopePredicate)
        assert (predicate.wrapped_edge_predicate == perception_pattern.
                copy_as_digraph()[source][target]["predicate"])
        assert len(predicate.temporal_scopes) == 1
        assert only(predicate.temporal_scopes) == TemporalScope.AFTER

    # Test normal matching behavior
    temporal_matcher = temporal_perception_pattern.matcher(
        temporal_perception_graph, match_mode=MatchMode.NON_OBJECT)
    first(temporal_matcher.matches(use_lookahead_pruning=True))

    # Test HoldsAtTemporalScopePredicate
    for (source, target) in perception_graph.copy_as_digraph().edges():
        label = "test edge label"
        edge_predicate = AnyEdgePredicate()
        temporal_predicate = HoldsAtTemporalScopePredicate(
            edge_predicate, [TemporalScope.AFTER])

        temporal_edge_label = TemporallyScopedEdgeLabel(
            label, [TemporalScope.AFTER])
        assert temporal_predicate(source, temporal_edge_label, target)
        # Non temporal edge exception
        with pytest.raises(RuntimeError):
            temporal_predicate(source, label, target)
Exemplo n.º 16
0
def replace_match_with_object_graph_node(
    matched_object_node: ObjectSemanticNode,
    current_perception: PerceptionGraph,
    pattern_match: PerceptionGraphPatternMatch,
) -> PerceptionGraphWithReplacedObjectResult:
    """
    Internal function to replace the nodes of the perception matched by the object pattern
    with an `ObjectSemanticNode`.

    Any external relationships those nodes had is inherited by the `ObjectSemanticNode`.
    """
    perception_digraph = current_perception.copy_as_digraph()
    perception_digraph.add_node(matched_object_node)

    matched_subgraph_nodes: ImmutableSet[PerceptionGraphNode] = immutableset(
        [
            node for node in pattern_match.matched_sub_graph._graph.nodes  # pylint:disable=protected-access
            if node in perception_digraph.nodes
        ],
        disable_order_check=True,
    )

    # Multiple sub-objects of a matched object may link to the same property
    # (for example, to a color shared by all the parts).
    # In this case, we want the shared object node to link to this property only once.
    external_properties: Set[Union[OntologyNode, ObjectSemanticNode]] = set()
    duplicate_nodes_to_remove: List[PerceptionGraphNode] = []

    for matched_subgraph_node in matched_subgraph_nodes:
        if isinstance(matched_subgraph_node, ObjectSemanticNode):
            raise RuntimeError(
                f"We do not currently allow object recognitions to themselves "
                f"operate over other object recognitions, but got match "
                f"{pattern_match.matched_sub_graph}")

        # A pattern might refer to shared parts of the world like the learner
        # or the ground, and we don't want the replacement root to inherit the
        # shared world item's relationships.
        if matched_subgraph_node in SHARED_WORLD_ITEMS:
            continue

        # If there is an edge from the matched sub-graph to a node outside it,
        # also add an edge from the object match node to that node.
        for matched_subgraph_node_successor in perception_digraph.successors(
                matched_subgraph_node):
            edge_label = _get_edge_label(perception_digraph,
                                         matched_subgraph_node,
                                         matched_subgraph_node_successor)

            # don't want to add edges which are internal to the matched sub-graph
            if matched_subgraph_node_successor not in matched_subgraph_nodes:
                if edge_equals_ignoring_temporal_scope(edge_label,
                                                       HAS_PROPERTY_LABEL):
                    # Prevent multiple `has-property` assertions to the same color node
                    # On a recognized object
                    # Also prevent size relations from being inherited on root object.
                    if matched_subgraph_node_successor[
                            0] in external_properties or matched_subgraph_node_successor[
                                0] in {
                                    SMALLER_THAN,
                                    BIGGER_THAN,
                                    ABOUT_THE_SAME_SIZE_AS_LEARNER,
                                }:
                        if (perception_digraph.degree(
                                matched_subgraph_node_successor) != 1):
                            raise_graph_exception(
                                f"Node {matched_subgraph_node_successor} "
                                f"appears to be a duplicate property node, "
                                f"but has degree != 1",
                                current_perception,
                            )
                        duplicate_nodes_to_remove.append(
                            matched_subgraph_node_successor)
                        continue
                    else:
                        external_properties.add(
                            matched_subgraph_node_successor[0])

                perception_digraph.add_edge(matched_object_node,
                                            matched_subgraph_node_successor,
                                            label=edge_label)

        # If there is an edge to the matched sub-graph from a node outside it,
        # also add an edge to the object match node from that node.
        for matched_subgraph_node_predecessor in perception_digraph.predecessors(
                matched_subgraph_node):
            edge_label = _get_edge_label(
                perception_digraph,
                matched_subgraph_node_predecessor,
                matched_subgraph_node,
            )

            # don't want to add edges which are internal to the matched sub-graph
            if matched_subgraph_node_predecessor not in matched_subgraph_nodes:
                if edge_equals_ignoring_temporal_scope(edge_label,
                                                       HAS_PROPERTY_LABEL):
                    # Prevent multiple `has-property` assertions to the same color node
                    # On a recognized object
                    if isinstance(matched_subgraph_node_predecessor,
                                  ObjectSemanticNode):
                        prop = matched_subgraph_node_predecessor
                    else:
                        prop = matched_subgraph_node_predecessor[0]
                    if prop in external_properties:
                        if (perception_digraph.degree(
                                matched_subgraph_node_predecessor) != 1):
                            raise_graph_exception(
                                f"Node {matched_subgraph_node_predecessor} "
                                f"appears to be a duplicate property node, "
                                f"but has degree != 1",
                                current_perception,
                            )
                        duplicate_nodes_to_remove.append(
                            matched_subgraph_node_predecessor)
                        continue
                    else:
                        external_properties.add(prop)

                perception_digraph.add_edge(
                    matched_subgraph_node_predecessor,
                    matched_object_node,
                    label=edge_label,
                )

    # Remove all matched nodes which are not shared world items (e.g. gravity, the learner)
    to_remove = immutableset([
        matched_node for matched_node in matched_subgraph_nodes
        if matched_node not in SHARED_WORLD_ITEMS
    ] + duplicate_nodes_to_remove)
    perception_digraph.remove_nodes_from(to_remove)

    # We want to re-add any properties linked directly to the root node of an object.
    # Example: water is a liquid
    # These may be relevant to learning verb semantics
    # (e.g. you can only drink a liquid)
    _add_external_properties_linked_to_root_object_perception(
        original_graph=current_perception.copy_as_digraph(),
        output_graph=perception_digraph,
        matched_nodes=matched_subgraph_nodes,
        matched_object_node=matched_object_node,
    )
    # We also want to re-add any relationships directly linked to the root node of an object.
    # These may have been overwritten when iterating over the matched nodes.
    # Example: ball is behind table
    _add_external_relationships_linked_to_root_object_perception(
        original_graph=current_perception.copy_as_digraph(),
        output_graph=perception_digraph,
        matched_nodes=matched_subgraph_nodes,
        matched_object_node=matched_object_node,
    )

    return PerceptionGraphWithReplacedObjectResult(
        PerceptionGraph(perception_digraph,
                        dynamic=current_perception.dynamic), to_remove)
Exemplo n.º 17
0
def test_cannot_make_dynamic_copy_of_a_dynamic_graph():
    graph = PerceptionGraph(graph=DiGraph(), dynamic=True)
    with pytest.raises(RuntimeError):
        graph.copy_with_temporal_scopes([TemporalScope.BEFORE])
Exemplo n.º 18
0
def get_objects_from_perception(
    observed_perception_graph: PerceptionGraph
) -> List[PerceptionGraph]:
    """
    Utility function to get a list of `PerceptionGraphs` which are independent objects in the scene
    """
    perception_as_digraph = observed_perception_graph.copy_as_digraph()
    perception_as_graph = perception_as_digraph.to_undirected()

    meanings = []

    # 1) Take all of the obj perc that dont have part of relationships with anything else
    root_object_percetion_nodes = []
    for node in perception_as_graph.nodes:
        if isinstance(node, ObjectPerception) and node.debug_handle != "the ground":
            if not any(
                [
                    u == node and str(data["label"]) == "partOf"
                    for u, v, data in perception_as_digraph.edges.data()
                ]
            ):
                root_object_percetion_nodes.append(node)

    # 2) for each of these, walk along the part of relationships backwards,
    # i.e find all of the subparts of the root object
    for root_object_perception_node in root_object_percetion_nodes:
        # Iteratively get all other object perceptions that connect to a root with a part of
        # relation
        all_object_perception_nodes = [root_object_perception_node]
        frontier = [root_object_perception_node]
        updated = True
        while updated:
            updated = False
            new_frontier = []
            for frontier_node in frontier:
                for node in perception_as_graph.neighbors(frontier_node):
                    edge_data = perception_as_digraph.get_edge_data(
                        node, frontier_node, default=-1
                    )
                    if edge_data != -1 and str(edge_data["label"]) == "partOf":
                        new_frontier.append(node)

            if new_frontier:
                all_object_perception_nodes.extend(new_frontier)
                updated = True
                frontier = new_frontier

        # Now we have a list of all perceptions that are connected
        # 3) For each of these objects including root object, get axes, properties,
        # and relations and regions which are between these internal object perceptions
        other_nodes = []
        for node in all_object_perception_nodes:
            for neighbor in perception_as_graph.neighbors(node):
                # Filter out regions that don't have a reference in all object perception nodes
                # TODO: We currently remove colors to achieve a match - otherwise finding
                #  patterns fails.
                if (
                    isinstance(neighbor, Region)
                    and neighbor.reference_object not in all_object_perception_nodes
                    or isinstance(neighbor, RgbColorPerception)
                ):
                    continue
                # Append all other none-object nodes to be kept in the subgraph
                if not isinstance(neighbor, ObjectPerception):
                    other_nodes.append(neighbor)

        generated_subgraph = networkx_utils.subgraph(
            perception_as_digraph, all_object_perception_nodes + other_nodes
        )
        meanings.append(PerceptionGraph(generated_subgraph))

    logging.info(f"Got {len(meanings)} candidate meanings")
    return meanings
Exemplo n.º 19
0
def extract_candidate_objects(
    whole_scene_perception_graph: PerceptionGraph
) -> Sequence[PerceptionGraph]:
    """
    Pulls out distinct objects from a scene.

    We will attempt to recognize only these and will ignore other parts of the scene.
    """
    scene_digraph = whole_scene_perception_graph.copy_as_digraph()

    def is_part_of_label(label) -> bool:
        return label == PART_OF or (isinstance(
            label, TemporallyScopedEdgeLabel) and label.attribute == PART_OF)

    # We first identify root object nodes, which are object nodes with no part-of
    # relationship with other object nodes.
    def is_root_object_node(node) -> bool:
        if isinstance(node, ObjectPerception):
            for (_, _, edge_label) in scene_digraph.out_edges(node,
                                                              data="label"):
                if is_part_of_label(edge_label):
                    # This object node is part of another object and cannot be a root.
                    return False
            return True
        return False

    candidate_object_root_nodes = [
        node for node in scene_digraph.nodes
        if is_root_object_node(node) and node not in (GROUND_PERCEPTION,
                                                      LEARNER_PERCEPTION)
    ]

    candidate_objects: List[PerceptionGraph] = []
    for root_object_node in candidate_object_root_nodes:
        # Having identified the root nodes of the candidate objects,
        # we now gather all sub-object nodes.
        object_nodes_in_object_list = []
        nodes_to_examine = [root_object_node]

        # This would be clearer recursively
        # but I'm betting this implementation is a bit faster in Python.

        nodes_visited: Set[PerceptionGraphNode] = set()
        while nodes_to_examine:
            node_to_examine = nodes_to_examine.pop()
            if node_to_examine in nodes_visited:
                continue
            nodes_visited.add(node_to_examine)
            object_nodes_in_object_list.append(node_to_examine)
            for (next_node, _,
                 edge_label) in scene_digraph.in_edges(node_to_examine,
                                                       data="label"):
                if is_part_of_label(edge_label):
                    nodes_to_examine.append(next_node)
        object_nodes_in_object = immutableset(object_nodes_in_object_list)

        # Now we know all object nodes for this candidate object.
        # Finally, we find the sub-graph to match against which could possibly correspond
        # to this candidate object
        # by performing a BFS over the graph
        # but *stopping whenever we encounter an object node
        # which is not part of this candidate object*.
        # This is a little more generous than we need to be, but it's simple.
        nodes_to_examine = [root_object_node]
        candidate_subgraph_nodes = []
        nodes_visited.clear()
        while nodes_to_examine:
            node_to_examine = nodes_to_examine.pop()
            is_allowable_node = (
                not isinstance(node_to_examine,
                               (ObjectPerception, ObjectSemanticNode))
                or node_to_examine in object_nodes_in_object)
            if node_to_examine not in nodes_visited and is_allowable_node:
                nodes_visited.add(node_to_examine)
                candidate_subgraph_nodes.append(node_to_examine)
                nodes_to_examine.extend(
                    out_neighbor
                    for (_, out_neighbor
                         ) in scene_digraph.out_edges(node_to_examine)
                    if not (isinstance(out_neighbor, tuple) and isinstance(
                        out_neighbor[0], Region) and out_neighbor[0].
                            reference_object == GROUND_PERCEPTION))
                nodes_to_examine.extend(
                    in_neighbor
                    for (in_neighbor,
                         _) in scene_digraph.in_edges(node_to_examine)
                    # Avoid in-edges from Regions as they can come from other objects regions (e.g ground).
                    if not (
                        # isinstance(node_to_examine, GeonAxis)
                        isinstance(in_neighbor, tuple)
                        and isinstance(in_neighbor[0], Region)))
        candidate_objects.append(
            whole_scene_perception_graph.subgraph_by_nodes(
                immutableset(candidate_subgraph_nodes)))
    return candidate_objects
Exemplo n.º 20
0
def test_last_failed_pattern_node():
    """
    Tests whether `MatchFailure` can find the correct node.
    """

    target_object = BOX
    # Create train and test templates for the target objects
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    for (_, _, perceptual_representation) in train_curriculum.instances():
        # Original perception graph
        perception = graph_without_learner(
            PerceptionGraph.from_frame(perceptual_representation.frames[0]))

        # Original perception pattern
        whole_perception_pattern = PerceptionGraphPattern.from_graph(
            perception).perception_graph_pattern
        # Create an altered perception graph we replace the color node
        altered_perception_digraph = perception.copy_as_digraph()
        nodes_to_remove = []
        edges = []
        different_nodes = []
        for node in perception.copy_as_digraph().nodes:
            # If we find a color node, we make it black
            if isinstance(node, tuple) and isinstance(node[0],
                                                      RgbColorPerception):
                new_node = (RgbColorPerception(0, 0, 0), 42)
                # Get edge information
                for edge in perception.copy_as_digraph().edges(data=True):
                    if edge[0] == node:
                        edges.append((new_node, edge[1], edge[2]))
                    if edge[1] == node:
                        edges.append((edge[0], new_node, edge[2]))
                nodes_to_remove.append(node)
                different_nodes.append(new_node)

        # add new nodes
        for node in different_nodes:
            altered_perception_digraph.add_node(node)
        # add edge information
        for edge in edges:
            altered_perception_digraph.add_edge(edge[0], edge[1])
            for k, v in edge[2].items():
                altered_perception_digraph[edge[0]][edge[1]][k] = v
        # remove original node
        altered_perception_digraph.remove_nodes_from(nodes_to_remove)

        # Start the matching process
        matcher = whole_perception_pattern.matcher(
            PerceptionGraph(altered_perception_digraph),
            match_mode=MatchMode.NON_OBJECT)
        match_or_failure = matcher.first_match_or_failure_info()
        assert isinstance(match_or_failure, PatternMatching.MatchFailure)
        assert isinstance(match_or_failure.last_failed_pattern_node,
                          IsColorNodePredicate)
Exemplo n.º 21
0
def do_object_on_table_test(
    object_type_to_match: OntologyNode,
    object_schema: ObjectStructuralSchema,
    negative_object_ontology_node: OntologyNode,
):
    """
    Tests the `PerceptionGraphMatcher` can match simple objects.
    """
    # we create four situations:
    # a object_to_match above or under a table with color red or blue
    color = color_variable("color")
    object_to_match = object_variable(
        debug_handle=object_type_to_match.handle,
        root_node=object_type_to_match,
        added_properties=[color],
    )
    table = standard_object("table_0", TABLE)

    object_on_table_template = Phase1SituationTemplate(
        "object_to_match-on-table",
        salient_object_variables=[object_to_match, table],
        asserted_always_relations=[
            bigger_than(table, object_to_match),
            on(object_to_match, table),
        ],
    )

    object_under_table_template = Phase1SituationTemplate(
        "object_to_match-under-table",
        salient_object_variables=[object_to_match, table],
        asserted_always_relations=[
            bigger_than(table, object_to_match),
            above(table, object_to_match),
        ],
    )

    # We test that a perceptual pattern for "object_to_match" matches in all four cases.
    object_to_match_pattern = PerceptionGraphPattern.from_schema(
        object_schema, perception_generator=GAILA_PHASE_1_PERCEPTION_GENERATOR)

    situations_with_object_to_match = chain(
        all_possible_test(object_on_table_template),
        all_possible_test(object_under_table_template),
    )

    for (_,
         situation_with_object) in enumerate(situations_with_object_to_match):
        perception = GAILA_PHASE_1_PERCEPTION_GENERATOR.generate_perception(
            situation_with_object, chooser=RandomChooser.for_seed(0))
        perception_graph = PerceptionGraph.from_frame(perception.frames[0])
        # perception_graph.render_to_file(f"object_to_match {idx}", out_dir / f"object_to_match
        # -{idx}.pdf")
        # object_to_match_pattern.render_to_file(f"object_to_match pattern", out_dir /
        # "object_to_match_pattern.pdf")
        matcher = object_to_match_pattern.matcher(perception_graph,
                                                  match_mode=MatchMode.OBJECT)
        # debug_matching = matcher.debug_matching(
        #    use_lookahead_pruning=False, render_match_to=Path("/Users/gabbard/tmp")
        # )
        result = any(matcher.matches(use_lookahead_pruning=False))
        if not result:
            return False

    # Now let's create the same situations, but substitute a negative_object for a object_to_match.
    negative_object = object_variable(
        debug_handle=negative_object_ontology_node.handle,
        root_node=negative_object_ontology_node,
        added_properties=[color],
    )
    negative_object_on_table_template = Phase1SituationTemplate(
        "negative_object-on-table",
        salient_object_variables=[negative_object, table],
        asserted_always_relations=[
            bigger_than(table, negative_object),
            on(negative_object, table),
        ],
    )

    negative_object_under_table_template = Phase1SituationTemplate(
        "negative_object-under-table",
        salient_object_variables=[negative_object, table],
        asserted_always_relations=[
            bigger_than(table, negative_object),
            above(table, negative_object),
        ],
    )

    situations_with_negative_object = chain(
        all_possible_test(negative_object_on_table_template),
        all_possible_test(negative_object_under_table_template),
    )

    # The pattern should now fail to match.
    for situation_with_negative_object in situations_with_negative_object:
        perception = GAILA_PHASE_1_PERCEPTION_GENERATOR.generate_perception(
            situation_with_negative_object, chooser=RandomChooser.for_seed(0))
        perception_graph = PerceptionGraph.from_frame(perception.frames[0])
        if any(
                object_to_match_pattern.matcher(
                    perception_graph, match_mode=MatchMode.OBJECT).matches(
                        use_lookahead_pruning=True)):
            return False
    return True
Exemplo n.º 22
0
def preposition_hypothesis_from_perception(
    scene_aligned_perception: LanguageAlignedPerception,
    template_variables_to_object_match_nodes: Mapping[SyntaxSemanticsVariable,
                                                      ObjectSemanticNode],
) -> PerceptionGraphTemplate:
    """
        Create a hypothesis for the semantics of a preposition based on the observed scene.

        Our current implementation is to just include the content
        on the path between the recognized object nodes
        and one hop away from that path.
        """

    # The directions of edges in the perception graph are not necessarily meaningful
    # from the point-of-view of hypothesis generation, so we need an undirected copy
    # of the graph.
    perception_digraph = scene_aligned_perception.perception_graph.copy_as_digraph(
    )
    perception_graph_undirected = perception_digraph.to_undirected(
        # as_view=True loses determinism
        as_view=False)

    if {SLOT1, SLOT2} != set(template_variables_to_object_match_nodes.keys()):
        raise RuntimeError(
            "Can only make a preposition hypothesis if the recognized "
            "objects are aligned to SurfaceTemplateVariables SLOT1 and SLOT2")

    slot1_object = template_variables_to_object_match_nodes[SLOT1]
    slot2_object = template_variables_to_object_match_nodes[SLOT2]

    # The core of our hypothesis for the semantics of a preposition is all nodes
    # along the shortest path between the two objects involved in the perception graph.
    hypothesis_spine_nodes: ImmutableSet[PerceptionGraphNode] = immutableset(
        flatten(
            # if there are multiple paths between the object match nodes,
            # we aren't sure which are relevant, so we include them all in our hypothesis
            # and figure we can trim out irrelevant stuff as we make more observations.
            all_shortest_paths(perception_graph_undirected, slot2_object,
                               slot1_object)))

    # Along the core of our hypothesis we also want to collect the predecessors and successors
    hypothesis_nodes_mutable = []
    for node in hypothesis_spine_nodes:
        if node not in {slot1_object, slot2_object}:
            for successor in perception_digraph.successors(node):
                if not isinstance(successor, ObjectPerception):
                    hypothesis_nodes_mutable.append(successor)
            for predecessor in perception_digraph.predecessors(node):
                if not isinstance(predecessor, ObjectPerception):
                    hypothesis_nodes_mutable.append(predecessor)

    hypothesis_nodes_mutable.extend(hypothesis_spine_nodes)

    # We wrap the nodes in an immutable set to remove duplicates
    # while preserving iteration determinism.
    hypothesis_nodes = immutableset(hypothesis_nodes_mutable)

    preposition_sub_graph = PerceptionGraph(
        digraph_with_nodes_sorted_by(
            subgraph(perception_digraph, hypothesis_nodes), _graph_node_order))

    return PerceptionGraphTemplate.from_graph(
        preposition_sub_graph, template_variables_to_object_match_nodes)
Exemplo n.º 23
0
 def _extract_perception_graph(
     self, perception: PerceptualRepresentation[
         DevelopmentalPrimitivePerceptionFrame]
 ) -> PerceptionGraph:
     return PerceptionGraph.from_frame(perception.frames[0])
Exemplo n.º 24
0
    def match_objects(
        self,
        perception_semantic_alignment: PerceptionSemanticAlignment,
        *,
        post_process: Callable[[PerceptionGraph, AbstractSet[SemanticNode]],
                               Tuple[PerceptionGraph,
                                     AbstractSet[SemanticNode]],
                               ] = default_post_process_enrichment,
    ) -> Tuple[PerceptionSemanticAlignment, Mapping[Tuple[str, ...],
                                                    ObjectSemanticNode]]:
        r"""
        Recognize known objects in a `PerceptionGraph`.

        The matched portion of the graph will be replaced with an `ObjectSemanticNode`\ s
        which will inherit all relationships of any nodes internal to the matched portion
        with any external nodes.

        This is useful as a pre-processing step
        before prepositional and verbal learning experiments.
        """

        # pylint: disable=global-statement,invalid-name
        global cumulative_millis_in_successful_matches_ms
        global cumulative_millis_in_failed_matches_ms

        object_nodes: List[Tuple[Tuple[str, ...], ObjectSemanticNode]] = []
        perception_graph = perception_semantic_alignment.perception_graph
        is_dynamic = perception_semantic_alignment.perception_graph.dynamic

        if is_dynamic:
            concepts_to_patterns = self._concepts_to_dynamic_patterns
        else:
            concepts_to_patterns = self._concepts_to_static_patterns

        # We special case handling the ground perception
        # Because we don't want to remove it from the graph, we just want to use it's
        # Object node as a recognized object. The situation "a box on the ground"
        # Prompted the need to recognize the ground
        graph_to_return = perception_graph
        for node in graph_to_return._graph.nodes:  # pylint:disable=protected-access
            if node == GROUND_PERCEPTION:
                matched_object_node = ObjectSemanticNode(GROUND_OBJECT_CONCEPT)
                if LanguageMode.ENGLISH == self._language_mode:
                    object_nodes.append(
                        ((f"{GROUND_OBJECT_CONCEPT.debug_string}", ),
                         matched_object_node))
                elif LanguageMode.CHINESE == self._language_mode:
                    object_nodes.append((("di4 myan4", ), matched_object_node))
                else:
                    raise RuntimeError("Invalid language_generator")
                # We construct a fake match which is only the ground perception node
                subgraph_of_root = subgraph(perception_graph.copy_as_digraph(),
                                            [node])
                pattern_match = PerceptionGraphPatternMatch(
                    matched_pattern=PerceptionGraphPattern(
                        graph=subgraph_of_root,
                        dynamic=perception_graph.dynamic),
                    graph_matched_against=perception_graph,
                    matched_sub_graph=PerceptionGraph(
                        graph=subgraph_of_root,
                        dynamic=perception_graph.dynamic),
                    pattern_node_to_matched_graph_node=immutabledict(),
                )
                graph_to_return = replace_match_with_object_graph_node(
                    matched_object_node, graph_to_return, pattern_match)

        candidate_object_subgraphs = extract_candidate_objects(
            perception_graph)

        for candidate_object_graph in candidate_object_subgraphs:
            num_object_nodes = candidate_object_graph.count_nodes_matching(
                lambda node: isinstance(node, ObjectPerception))

            for (concept, pattern) in concepts_to_patterns.items():
                # As an optimization, we count how many sub-object nodes
                # are in the graph and the pattern.
                # If they aren't the same, the match is impossible
                # and we can bail out early.
                if num_object_nodes != self._concept_to_num_subobjects[concept]:
                    continue

                with Timer(factor=1000) as t:
                    matcher = pattern.matcher(candidate_object_graph,
                                              match_mode=MatchMode.OBJECT)
                    pattern_match = first(
                        matcher.matches(use_lookahead_pruning=True), None)
                if pattern_match:
                    cumulative_millis_in_successful_matches_ms += t.elapsed
                    matched_object_node = ObjectSemanticNode(concept)

                    # We wrap the concept in a tuple because it could in theory be multiple
                    # tokens,
                    # even though currently it never is.
                    if self._language_mode == LanguageMode.ENGLISH:
                        object_nodes.append(
                            ((concept.debug_string, ), matched_object_node))
                    elif self._language_mode == LanguageMode.CHINESE:
                        if concept.debug_string == "me":
                            object_nodes.append(
                                (("wo3", ), matched_object_node))
                        elif concept.debug_string == "you":
                            object_nodes.append(
                                (("ni3", ), matched_object_node))
                        mappings = (
                            GAILA_PHASE_1_CHINESE_LEXICON.
                            _ontology_node_to_word  # pylint:disable=protected-access
                        )
                        for k, v in mappings.items():
                            if k.handle == concept.debug_string:
                                debug_string = str(v.base_form)
                                object_nodes.append(
                                    ((debug_string, ), matched_object_node))
                    graph_to_return = replace_match_with_object_graph_node(
                        matched_object_node, graph_to_return, pattern_match)
                    # We match each candidate objects against only one object type.
                    # See https://github.com/isi-vista/adam/issues/627
                    break
                else:
                    cumulative_millis_in_failed_matches_ms += t.elapsed
        if object_nodes:
            logging.info(
                "Object recognizer recognized: %s",
                [concept for (concept, _) in object_nodes],
            )
        logging.info(
            "object matching: ms in success: %s, ms in failed: %s",
            cumulative_millis_in_successful_matches_ms,
            cumulative_millis_in_failed_matches_ms,
        )
        semantic_object_nodes = immutableset(node
                                             for (_, node) in object_nodes)

        post_process_graph, post_process_nodes = post_process(
            graph_to_return, semantic_object_nodes)

        return (
            perception_semantic_alignment.
            copy_with_updated_graph_and_added_nodes(
                new_graph=post_process_graph, new_nodes=post_process_nodes),
            immutabledict(object_nodes),
        )
Exemplo n.º 25
0
def test_semantically_infeasible_partial_match():
    """
    Tests whether semantic feasibility works as intended
    """

    target_object = BOX
    # Create train and test templates for the target objects
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]
    # Original perception graph
    perception = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))
    whole_perception_pattern = PerceptionGraphPattern.from_graph(
        perception).perception_graph_pattern

    # Create an altered perception graph we remove the color node
    altered_perception_digraph = perception.copy_as_digraph()
    nodes_to_remove = []
    edges = []
    different_nodes = []
    for node in perception.copy_as_digraph().nodes:
        # If we find a color node, we make it black
        if isinstance(node, tuple) and isinstance(node[0], RgbColorPerception):
            new_node = (RgbColorPerception(0, 0, 0), node[1])
            # Get edge information
            for edge in perception.copy_as_digraph().edges(data=True):
                if edge[0] == node:
                    edges.append((new_node, edge[1], edge[2]))
                if edge[1] == node:
                    edges.append((edge[0], new_node, edge[2]))
            nodes_to_remove.append(node)
            different_nodes.append(new_node)

    # remove original node
    altered_perception_digraph.remove_nodes_from(nodes_to_remove)

    # add new nodes
    for node in different_nodes:
        altered_perception_digraph.add_node(node)
    # add edge information
    for edge in edges:
        altered_perception_digraph.add_edge(edge[0], edge[1])
        for k, v in edge[2].items():
            altered_perception_digraph[edge[0]][edge[1]][k] = v

    altered_perception_pattern = PerceptionGraphPattern.from_graph(
        PerceptionGraph(altered_perception_digraph)).perception_graph_pattern

    partial_digraph = altered_perception_pattern.copy_as_digraph()
    partial_digraph.remove_nodes_from([
        node for node in partial_digraph.nodes
        if isinstance(node, IsColorNodePredicate)
    ])

    # Start the matching process, get a partial match
    matcher = whole_perception_pattern.matcher(perception,
                                               match_mode=MatchMode.OBJECT)
    partial_match: PerceptionGraphPatternMatch = first(
        matcher.matches(use_lookahead_pruning=True))
    partial_mapping = partial_match.pattern_node_to_matched_graph_node

    # Try to extend the partial mapping, we expect a semantic infeasibility runtime error
    second_matcher = whole_perception_pattern.matcher(
        PerceptionGraph(altered_perception_digraph),
        match_mode=MatchMode.OBJECT)
    # The partial mapping (obtained from first matcher with original perception graph)
    # semantically doesn't match the one in the altered version (second matcher with altered graph)
    with pytest.raises(RuntimeError):
        first(
            second_matcher.matches(initial_partial_match=partial_mapping,
                                   use_lookahead_pruning=True),
            None,
        )
Exemplo n.º 26
0
def replace_match_root_with_object_semantic_node(
    object_semantic_node: ObjectSemanticNode,
    current_perception: PerceptionGraph,
    pattern_match: PerceptionGraphPatternMatch,
) -> PerceptionGraph:
    """
    Internal function to replace the root node of the perception matched by the object pattern
    with an `ObjectSemanticNode`.

    The `ObjectSemanticNode` inherits both the root node's internal relationships and all external
    relationships involving either the root node or its children.
    """
    perception_digraph = current_perception.copy_as_digraph()
    perception_digraph.add_node(object_semantic_node)

    matched_subgraph_nodes: ImmutableSet[PerceptionGraphNode] = immutableset(
        pattern_match.matched_sub_graph._graph.nodes,  # pylint:disable=protected-access
        disable_order_check=True,
    )

    root = _get_root_object_perception(perception_digraph,
                                       matched_subgraph_nodes)

    # Multiple sub-objects of a matched object may link to the same property
    # (for example, to a color shared by all the parts).
    # In this case, we want the shared object node to link to this property only once.
    external_properties: Set[Union[OntologyNode, ObjectSemanticNode]] = set()

    for matched_subgraph_node in matched_subgraph_nodes:
        if isinstance(matched_subgraph_node, ObjectSemanticNode):
            raise RuntimeError(
                f"We do not currently allow object recognitions to themselves "
                f"operate over other object recognitions, but got match "
                f"{pattern_match.matched_sub_graph}")

        # A pattern might refer to shared parts of the world like the learner
        # or the ground, and we don't want the replacement root to inherit the
        # shared world item's relationships.
        if matched_subgraph_node in SHARED_WORLD_ITEMS:
            continue

        # If there is an edge from the matched sub-graph to a node outside it,
        # also add an edge from the object match node to that node.
        for matched_subgraph_node_successor in perception_digraph.successors(
                matched_subgraph_node):
            edge_label = _get_edge_label(perception_digraph,
                                         matched_subgraph_node,
                                         matched_subgraph_node_successor)

            # don't want to add edges which are internal to the matched sub-graph
            if matched_subgraph_node_successor not in matched_subgraph_nodes:
                if edge_equals_ignoring_temporal_scope(edge_label,
                                                       HAS_PROPERTY_LABEL):
                    # Prevent multiple `has-property` assertions to the same color node
                    # On a recognized object
                    if matched_subgraph_node_successor[
                            0] in external_properties or matched_subgraph_node_successor[
                                0] in {
                                    SMALLER_THAN,
                                    BIGGER_THAN,
                                }:
                        if (perception_digraph.degree(
                                matched_subgraph_node_successor) != 1):
                            raise_graph_exception(
                                f"Node {matched_subgraph_node_successor} "
                                f"appears to be a duplicate property node, "
                                f"but has degree != 1",
                                current_perception,
                            )
                        continue
                    else:
                        external_properties.add(
                            matched_subgraph_node_successor[0])

                perception_digraph.add_edge(
                    object_semantic_node,
                    matched_subgraph_node_successor,
                    label=edge_label,
                )

        # If there is an edge to the matched sub-graph from a node outside it,
        # also add an edge to the object match node from that node.
        for matched_subgraph_node_predecessor in perception_digraph.predecessors(
                matched_subgraph_node):
            edge_label = _get_edge_label(
                perception_digraph,
                matched_subgraph_node_predecessor,
                matched_subgraph_node,
            )

            # don't want to add edges which are internal to the matched sub-graph
            if matched_subgraph_node_predecessor not in matched_subgraph_nodes:
                if edge_equals_ignoring_temporal_scope(edge_label,
                                                       HAS_PROPERTY_LABEL):
                    # Prevent multiple `has-property` assertions to the same color node
                    # On a recognized object
                    if isinstance(matched_subgraph_node_predecessor,
                                  ObjectSemanticNode):
                        prop = matched_subgraph_node_predecessor
                    else:
                        prop = matched_subgraph_node_predecessor[0]
                    if prop in external_properties:
                        if (perception_digraph.degree(
                                matched_subgraph_node_predecessor) != 1):
                            raise_graph_exception(
                                f"Node {matched_subgraph_node_predecessor} "
                                f"appears to be a duplicate property node, "
                                f"but has degree != 1",
                                current_perception,
                            )
                        continue
                    else:
                        external_properties.add(prop)

                perception_digraph.add_edge(
                    matched_subgraph_node_predecessor,
                    object_semantic_node,
                    label=edge_label,
                )

    if root in SHARED_WORLD_ITEMS:
        raise RuntimeError(
            f"Pattern match root cannot be a shared world item, "
            f"but got match {pattern_match}")

    perception_digraph.remove_node(root)

    # We want to re-add any relationships linked directly to the root node of an object.
    # Example: water is a liquid
    # Example: this hand is a part of this person
    # These may be relevant to learning verb semantics
    # (e.g. you can only drink a liquid)
    _add_relationships_linked_to_root_object_perception(
        original_graph=current_perception.copy_as_digraph(),
        output_graph=perception_digraph,
        matched_nodes=matched_subgraph_nodes,
        matched_object_node=object_semantic_node,
    )

    return PerceptionGraph(perception_digraph,
                           dynamic=current_perception.dynamic)
Exemplo n.º 27
0
def test_syntactically_infeasible_partial_match():
    """
    Tests whether syntactic feasibility works as intended
    """

    # We use a situation to generate the perceptual representation
    # for a box with color.
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    # Original perception graph
    perception = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))

    # Create an altered perception graph we remove the color node
    altered_perception_digraph = perception.copy_as_digraph()
    nodes = []
    for node in perception.copy_as_digraph().nodes:
        # If we find a color node, we add an extra edge to it
        if isinstance(node, tuple) and isinstance(node[0], RgbColorPerception):
            nodes.append(node)

    # change edge information
    for node in nodes:
        random_node = r.choice(list(altered_perception_digraph.nodes))
        altered_perception_digraph.add_edge(node, random_node, label=PART_OF)
        random_node_2 = r.choice(list(altered_perception_digraph.nodes))
        altered_perception_digraph.add_edge(random_node_2, node, label=PART_OF)

    altered_perception_perception_graph = PerceptionGraph(
        altered_perception_digraph)
    altered_perception_pattern = PerceptionGraphPattern.from_graph(
        altered_perception_perception_graph).perception_graph_pattern

    # Start the matching process, get a partial match
    first_matcher = altered_perception_pattern.matcher(
        altered_perception_perception_graph, match_mode=MatchMode.OBJECT)
    partial_match: PerceptionGraphPatternMatch = first(
        first_matcher.matches(use_lookahead_pruning=True), None)
    partial_mapping = partial_match.pattern_node_to_matched_graph_node
    # Try to extend the partial mapping, we expect a semantic infeasibility runtime error
    second_matcher = altered_perception_pattern.matcher(
        perception, match_mode=MatchMode.OBJECT)
    # The partial mapping (obtained from first matcher with original perception graph)
    # syntactically doesn't match the one in the altered version (second matcher with altered graph)
    with pytest.raises(RuntimeError):
        first(
            second_matcher.matches(initial_partial_match=partial_mapping,
                                   use_lookahead_pruning=True),
            None,
        )
Exemplo n.º 28
0
def test_allowed_matches_with_bad_partial_match():
    """
    Tests whether PatternMarching's allowed_matches functionality works as intended when a bad
    partial match is specified.
    """
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]
    perception = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))

    pattern1: PerceptionGraphPattern = PerceptionGraphPattern.from_graph(
        perception.subgraph_by_nodes({
            cast(PerceptionGraphNode, node)
            for node in perception._graph.nodes  # pylint: disable=protected-access
            if getattr(node, "debug_handle", None) == "box_0"
        })).perception_graph_pattern

    pattern2: PerceptionGraphPattern = PerceptionGraphPattern.from_graph(
        perception.subgraph_by_nodes({
            cast(PerceptionGraphNode, node)
            for node in perception._graph.nodes  # pylint: disable=protected-access
            if getattr(node, "debug_handle", None) in {"box_0", "the ground"}
        })).perception_graph_pattern

    pattern1_box: AnyObjectPerception = cast(
        AnyObjectPerception,
        only(node for node in pattern1._graph  # pylint: disable=protected-access
             if getattr(node, "debug_handle", None) == "box_0"),
    )
    pattern2_box: AnyObjectPerception = cast(
        AnyObjectPerception,
        only(node for node in pattern2._graph  # pylint: disable=protected-access
             if getattr(node, "debug_handle", None) == "box_0"),
    )
    pattern2_ground: AnyObjectPerception = cast(
        AnyObjectPerception,
        only(node for node in pattern2._graph  # pylint: disable=protected-access
             if getattr(node, "debug_handle", None) == "the ground"),
    )

    matcher = PatternMatching(
        pattern=pattern1,
        graph_to_match_against=pattern2,
        matching_pattern_against_pattern=True,
        match_mode=MatchMode.OBJECT,
        allowed_matches=immutablesetmultidict([(pattern1_box, pattern2_box)]),
    )
    with pytest.raises(RuntimeError):
        first(
            matcher.matches(
                initial_partial_match={pattern1_box: pattern2_ground},
                use_lookahead_pruning=True,
            ),
            None,
        )
Exemplo n.º 29
0
 def _extract_perception_graph(
     self, perception: PerceptualRepresentation[
         DevelopmentalPrimitivePerceptionFrame]
 ) -> PerceptionGraph:
     return PerceptionGraph.from_dynamic_perceptual_representation(
         perception)