示例#1
0
def pattern_remove_incomplete_region_or_spatial_path(
    perception_graph: PerceptionGraphPattern
) -> PerceptionGraphPattern:
    """
    Helper function to return a `PerceptionGraphPattern` verifying
    that region and spatial path perceptions contain a reference object.
    """
    graph = perception_graph.copy_as_digraph()
    region_and_path_nodes: ImmutableSet[NodePredicate] = immutableset(
        node
        for node in graph.nodes
        if isinstance(node, IsPathPredicate) or isinstance(node, RegionPredicate)
    )
    nodes_without_reference: List[NodePredicate] = []
    for node in region_and_path_nodes:
        has_reference_edge: bool = False
        for successor in graph.successors(node):
            predicate = graph.edges[node, successor]["predicate"]
            if isinstance(predicate, RelationTypeIsPredicate):
                if predicate.relation_type in [
                    REFERENCE_OBJECT_LABEL,
                    REFERENCE_OBJECT_DESTINATION_LABEL,
                    REFERENCE_OBJECT_SOURCE_LABEL,
                ]:
                    has_reference_edge = True
                    break
        if not has_reference_edge:
            nodes_without_reference.append(node)

    logging.info(
        f"Removing incomplete regions and paths. "
        f"Removing nodes: {nodes_without_reference}"
    )
    graph.remove_nodes_from(nodes_without_reference)

    def sort_by_num_nodes(g: DiGraph) -> int:
        return len(g.nodes)

    # We should maybe consider doing this a different way
    # As this approach just brute force solves the problem rather than being methodical about it
    if number_weakly_connected_components(graph) > 1:
        components = [
            component
            for component in [
                subgraph(graph, comp) for comp in weakly_connected_components(graph)
            ]
        ]
        components.sort(key=sort_by_num_nodes, reverse=True)
        computed_graph = subgraph(graph, components[0].nodes)
        removed_nodes: List[NodePredicate] = []
        for i in range(1, len(components)):
            removed_nodes.extend(components[i].nodes)
        logging.info(f"Cleanup disconnected elements. Removing: {removed_nodes}")
    else:
        computed_graph = graph

    return PerceptionGraphPattern(computed_graph, dynamic=perception_graph.dynamic)
示例#2
0
def test_successfully_extending_partial_match():
    """
    Tests whether we can match a perception pattern against a perception graph
    when initializing the search from a partial match.
    """

    target_object = BOX
    # Create train and test templates for the target objects
    train_obj_object = object_variable("obj-with-color", target_object)

    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    # Original perception graph
    perception = PerceptionGraph.from_frame(
        perceptual_representation.frames[0])

    # Create a perception pattern for the whole thing
    # and also a perception pattern for a subset of the whole pattern
    whole_perception_pattern = PerceptionGraphPattern.from_graph(
        perception).perception_graph_pattern

    partial_digraph = whole_perception_pattern.copy_as_digraph()
    partial_digraph.remove_nodes_from([
        node for node in partial_digraph.nodes
        if isinstance(node, IsColorNodePredicate)
    ])
    partial_perception_pattern = PerceptionGraphPattern(partial_digraph)

    # get our initial match by matching the partial pattern
    matcher = partial_perception_pattern.matcher(
        perception, match_mode=MatchMode.NON_OBJECT)

    partial_match: PerceptionGraphPatternMatch = first(
        matcher.matches(use_lookahead_pruning=True))
    partial_mapping = partial_match.pattern_node_to_matched_graph_node

    # Try to extend the partial mapping, to create a complete mapping
    matcher_2 = whole_perception_pattern.matcher(
        perception, match_mode=MatchMode.NON_OBJECT)
    complete_match: PerceptionGraphPatternMatch = first(
        matcher_2.matches(initial_partial_match=partial_mapping,
                          use_lookahead_pruning=True),
        None,
    )
    complete_mapping = complete_match.pattern_node_to_matched_graph_node
    assert len(complete_mapping) == len(perception.copy_as_digraph().nodes)
    assert len(complete_mapping) == len(
        whole_perception_pattern.copy_as_digraph().nodes)
示例#3
0
def get_largest_matching_pattern(
    pattern: PerceptionGraphPattern,
    graph: PerceptionGraph,
    *,
    debug_callback: Optional[DebugCallableType] = None,
    graph_logger: Optional[GraphLogger] = None,
    ontology: Ontology,
    match_ratio: Optional[float] = None,
    match_mode: MatchMode,
    trim_after_match: Optional[Callable[[PerceptionGraphPattern],
                                        PerceptionGraphPattern]] = None,
    allowed_matches: ImmutableSetMultiDict[Any, Any] = immutablesetmultidict(),
) -> Optional[PerceptionGraphPattern]:
    """ Helper function to return the largest matching `PerceptionGraphPattern`
    for learner from a perception pattern and graph pair."""
    matching = pattern.matcher(
        graph,
        debug_callback=debug_callback,
        match_mode=match_mode,
        allowed_matches=allowed_matches,
    )
    return matching.relax_pattern_until_it_matches(
        graph_logger=graph_logger,
        ontology=ontology,
        min_ratio=match_ratio,
        trim_after_match=trim_after_match,
    )
示例#4
0
    def for_ontology_types(
        ontology_types: Iterable[OntologyNode],
        determiners: Iterable[str],
        ontology: Ontology,
        language_mode: LanguageMode,
        *,
        perception_generator:
        HighLevelSemanticsSituationToDevelopmentalPrimitivePerceptionGenerator,
    ) -> "ObjectRecognizer":
        ontology_types_to_concepts = {
            obj_type: ObjectConcept(obj_type.handle)
            for obj_type in ontology_types
        }

        return ObjectRecognizer(
            concepts_to_static_patterns=_sort_mapping_by_pattern_complexity(
                immutabledict((
                    concept,
                    PerceptionGraphPattern.from_ontology_node(
                        obj_type,
                        ontology,
                        perception_generator=perception_generator),
                ) for (obj_type,
                       concept) in ontology_types_to_concepts.items())),
            determiners=determiners,
            concepts_to_names={
                concept: obj_type.handle
                for obj_type, concept in ontology_types_to_concepts.items()
            },
            language_mode=language_mode,
        )
示例#5
0
    def from_graph(
        perception_graph: PerceptionGraph,
        template_variable_to_matched_object_node: Mapping[
            SyntaxSemanticsVariable, ObjectSemanticNode
        ],
    ) -> "PerceptionGraphTemplate":
        # It is possible the perception graph has additional recognized objects
        # which are not aligned to surface template slots.
        # We assume these are not arguments of the verb and remove them from the perception
        # before creating a pattern.
        pattern_from_graph = PerceptionGraphPattern.from_graph(perception_graph)
        pattern_graph = pattern_from_graph.perception_graph_pattern
        matched_object_to_matched_predicate = (
            pattern_from_graph.perception_graph_node_to_pattern_node
        )

        template_variable_to_pattern_node: List[Any] = []

        for (
            template_variable,
            object_node,
        ) in template_variable_to_matched_object_node.items():
            if object_node in matched_object_to_matched_predicate:
                template_variable_to_pattern_node.append(
                    (template_variable, matched_object_to_matched_predicate[object_node])
                )

        return PerceptionGraphTemplate(
            graph_pattern=pattern_graph,
            template_variable_to_pattern_node=template_variable_to_pattern_node,
        )
示例#6
0
    def _enrich_post_process(
        perception_graph_after_matching: PerceptionGraph,
        immutable_new_nodes: AbstractSet[SemanticNode],
    ) -> Tuple[PerceptionGraph, AbstractSet[SemanticNode]]:
        new_nodes = []
        perception_graph_after_processing = perception_graph_after_matching
        for candiate_object_graph in extract_candidate_objects(
                perception_graph_after_matching,
                sort_by_increasing_size=False):
            fake_pattern_graph = PerceptionGraphPattern.from_graph(
                candiate_object_graph)
            fake_object_semantic_node = ObjectSemanticNode(
                concept=FunctionalObjectConcept("unknown_object"))
            perception_graph_after_processing = replace_match_with_object_graph_node(
                matched_object_node=fake_object_semantic_node,
                current_perception=perception_graph_after_processing,
                pattern_match=PerceptionGraphPatternMatch(
                    matched_pattern=fake_pattern_graph.
                    perception_graph_pattern,
                    graph_matched_against=perception_graph_after_processing,
                    matched_sub_graph=candiate_object_graph,
                    pattern_node_to_matched_graph_node=fake_pattern_graph.
                    perception_graph_node_to_pattern_node,
                ),
            ).perception_graph_after_replacement
            new_nodes.append(fake_object_semantic_node)

        return (
            perception_graph_after_processing,
            immutableset(chain(immutable_new_nodes, new_nodes)),
        )
示例#7
0
def test_matching_static_vs_dynamic_graphs():
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)
    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)
    perceptual_representation = only(train_curriculum.instances())[2]

    perception_graph = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))
    temporal_perception_graph = perception_graph.copy_with_temporal_scopes(
        temporal_scopes=[TemporalScope.AFTER])

    perception_pattern = PerceptionGraphPattern.from_graph(
        perception_graph).perception_graph_pattern

    temporal_perception_pattern = perception_pattern.copy_with_temporal_scopes(
        required_temporal_scopes=[TemporalScope.AFTER])

    # Test runtime error for matching static pattern against dynamic graph and vice versa

    with pytest.raises(RuntimeError):
        perception_pattern.matcher(temporal_perception_graph,
                                   match_mode=MatchMode.NON_OBJECT)

    with pytest.raises(RuntimeError):
        temporal_perception_pattern.matcher(perception_graph,
                                            match_mode=MatchMode.NON_OBJECT)
示例#8
0
 def _hypothesis_from_perception(
     self, preprocessed_input: LanguageAlignedPerception
 ) -> PerceptionGraphTemplate:
     new_hypothesis = PerceptionGraphPattern.from_graph(
         preprocessed_input.perception_graph).perception_graph_pattern
     return PerceptionGraphTemplate(
         graph_pattern=new_hypothesis,
         template_variable_to_pattern_node=immutabledict(),
     )
示例#9
0
def candidate_object_hypotheses(
    language_perception_semantic_alignment: LanguagePerceptionSemanticAlignment
) -> Sequence[PerceptionGraphTemplate]:
    """
    Given a learning input, returns all possible meaning hypotheses.
    """
    return [
        PerceptionGraphTemplate(
            graph_pattern=PerceptionGraphPattern.from_graph(
                object_
            ).perception_graph_pattern
        )
        for object_ in get_objects_from_perception(
            language_perception_semantic_alignment.perception_semantic_alignment.perception_graph
        )
    ]
示例#10
0
文件: objects.py 项目: gabbard/adam
    def _hypotheses_from_perception(
        self,
        learning_state: LanguagePerceptionSemanticAlignment,
        bound_surface_template: SurfaceTemplateBoundToSemanticNodes,
    ) -> AbstractSet[PerceptionGraphTemplate]:
        if bound_surface_template.slot_to_semantic_node:
            raise RuntimeError(
                "Object learner should not have slot to semantic node alignments!"
            )

        return immutableset(
            PerceptionGraphTemplate(
                graph_pattern=PerceptionGraphPattern.from_graph(
                    candidate_object).perception_graph_pattern,
                template_variable_to_pattern_node=immutabledict(),
            ) for candidate_object in extract_candidate_objects(
                learning_state.perception_semantic_alignment.perception_graph))
示例#11
0
    def _enrich_post_process(
        self,
        perception_graph_after_matching: PerceptionGraph,
        immutable_new_nodes: AbstractSet[SemanticNode],
    ) -> Tuple[PerceptionGraph, AbstractSet[SemanticNode]]:
        object_root_nodes = immutableset(  # pylint:disable=protected-access
            node for node in perception_graph_after_matching._graph.nodes  # pylint:disable=protected-access
            if isinstance(node, ObjectPerception))
        new_nodes = []
        perception_graph_after_processing = perception_graph_after_matching
        for object_root_node in object_root_nodes:
            fake_subgraph = subgraph(  # pylint:disable=protected-access
                perception_graph_after_matching._graph,  # pylint:disable=protected-access
                [object_root_node],
            )
            fake_perception_graph = PerceptionGraph(
                graph=fake_subgraph,
                dynamic=perception_graph_after_matching.dynamic)
            fake_pattern_graph = PerceptionGraphPattern.from_graph(
                fake_perception_graph)
            fake_object_semantic_node = ObjectSemanticNode(
                concept=FunctionalObjectConcept("unknown_object"))
            # perception_graph_after_processing = replace_match_root_with_object_semantic_node(
            #     object_semantic_node=fake_object_semantic_node,
            perception_graph_after_processing = replace_match_with_object_graph_node(
                matched_object_node=fake_object_semantic_node,
                current_perception=perception_graph_after_processing,
                pattern_match=PerceptionGraphPatternMatch(
                    matched_pattern=fake_pattern_graph.
                    perception_graph_pattern,
                    graph_matched_against=perception_graph_after_matching,
                    matched_sub_graph=fake_perception_graph,
                    pattern_node_to_matched_graph_node=fake_pattern_graph.
                    perception_graph_node_to_pattern_node,
                ),
            ).perception_graph_after_replacement
            new_nodes.append(fake_object_semantic_node)

        return (
            perception_graph_after_processing,
            immutableset(chain(immutable_new_nodes, new_nodes)),
        )
示例#12
0
def test_syntactically_infeasible_partial_match():
    """
    Tests whether syntactic feasibility works as intended
    """

    # We use a situation to generate the perceptual representation
    # for a box with color.
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    # Original perception graph
    perception = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))

    # Create an altered perception graph we remove the color node
    altered_perception_digraph = perception.copy_as_digraph()
    nodes = []
    for node in perception.copy_as_digraph().nodes:
        # If we find a color node, we add an extra edge to it
        if isinstance(node, tuple) and isinstance(node[0], RgbColorPerception):
            nodes.append(node)

    # change edge information
    for node in nodes:
        random_node = r.choice(list(altered_perception_digraph.nodes))
        altered_perception_digraph.add_edge(node, random_node, label=PART_OF)
        random_node_2 = r.choice(list(altered_perception_digraph.nodes))
        altered_perception_digraph.add_edge(random_node_2, node, label=PART_OF)

    altered_perception_perception_graph = PerceptionGraph(
        altered_perception_digraph)
    altered_perception_pattern = PerceptionGraphPattern.from_graph(
        altered_perception_perception_graph).perception_graph_pattern

    # Start the matching process, get a partial match
    first_matcher = altered_perception_pattern.matcher(
        altered_perception_perception_graph, match_mode=MatchMode.OBJECT)
    partial_match: PerceptionGraphPatternMatch = first(
        first_matcher.matches(use_lookahead_pruning=True), None)
    partial_mapping = partial_match.pattern_node_to_matched_graph_node
    # Try to extend the partial mapping, we expect a semantic infeasibility runtime error
    second_matcher = altered_perception_pattern.matcher(
        perception, match_mode=MatchMode.OBJECT)
    # The partial mapping (obtained from first matcher with original perception graph)
    # syntactically doesn't match the one in the altered version (second matcher with altered graph)
    with pytest.raises(RuntimeError):
        first(
            second_matcher.matches(initial_partial_match=partial_mapping,
                                   use_lookahead_pruning=True),
            None,
        )
示例#13
0
 def _hypothesis_from_perception(
         self, perception: PerceptionGraph) -> PerceptionGraphTemplate:
     return PerceptionGraphTemplate(
         graph_pattern=PerceptionGraphPattern.from_graph(
             perception).perception_graph_pattern)
示例#14
0
    def match_objects(
        self,
        perception_semantic_alignment: PerceptionSemanticAlignment,
        *,
        post_process: Callable[[PerceptionGraph, AbstractSet[SemanticNode]],
                               Tuple[PerceptionGraph,
                                     AbstractSet[SemanticNode]],
                               ] = default_post_process_enrichment,
    ) -> Tuple[PerceptionSemanticAlignment, Mapping[Tuple[str, ...],
                                                    ObjectSemanticNode]]:
        r"""
        Recognize known objects in a `PerceptionGraph`.

        The matched portion of the graph will be replaced with an `ObjectSemanticNode`\ s
        which will inherit all relationships of any nodes internal to the matched portion
        with any external nodes.

        This is useful as a pre-processing step
        before prepositional and verbal learning experiments.
        """

        # pylint: disable=global-statement,invalid-name
        global cumulative_millis_in_successful_matches_ms
        global cumulative_millis_in_failed_matches_ms

        object_nodes: List[Tuple[Tuple[str, ...], ObjectSemanticNode]] = []
        perception_graph = perception_semantic_alignment.perception_graph
        is_dynamic = perception_semantic_alignment.perception_graph.dynamic

        if is_dynamic:
            concepts_to_patterns = self._concepts_to_dynamic_patterns
        else:
            concepts_to_patterns = self._concepts_to_static_patterns

        # We special case handling the ground perception
        # Because we don't want to remove it from the graph, we just want to use it's
        # Object node as a recognized object. The situation "a box on the ground"
        # Prompted the need to recognize the ground
        graph_to_return = perception_graph
        for node in graph_to_return._graph.nodes:  # pylint:disable=protected-access
            if node == GROUND_PERCEPTION:
                matched_object_node = ObjectSemanticNode(GROUND_OBJECT_CONCEPT)
                if LanguageMode.ENGLISH == self._language_mode:
                    object_nodes.append(
                        ((f"{GROUND_OBJECT_CONCEPT.debug_string}", ),
                         matched_object_node))
                elif LanguageMode.CHINESE == self._language_mode:
                    object_nodes.append((("di4 myan4", ), matched_object_node))
                else:
                    raise RuntimeError("Invalid language_generator")
                # We construct a fake match which is only the ground perception node
                subgraph_of_root = subgraph(perception_graph.copy_as_digraph(),
                                            [node])
                pattern_match = PerceptionGraphPatternMatch(
                    matched_pattern=PerceptionGraphPattern(
                        graph=subgraph_of_root,
                        dynamic=perception_graph.dynamic),
                    graph_matched_against=perception_graph,
                    matched_sub_graph=PerceptionGraph(
                        graph=subgraph_of_root,
                        dynamic=perception_graph.dynamic),
                    pattern_node_to_matched_graph_node=immutabledict(),
                )
                graph_to_return = replace_match_with_object_graph_node(
                    matched_object_node, graph_to_return, pattern_match)

        candidate_object_subgraphs = extract_candidate_objects(
            perception_graph)

        for candidate_object_graph in candidate_object_subgraphs:
            num_object_nodes = candidate_object_graph.count_nodes_matching(
                lambda node: isinstance(node, ObjectPerception))

            for (concept, pattern) in concepts_to_patterns.items():
                # As an optimization, we count how many sub-object nodes
                # are in the graph and the pattern.
                # If they aren't the same, the match is impossible
                # and we can bail out early.
                if num_object_nodes != self._concept_to_num_subobjects[concept]:
                    continue

                with Timer(factor=1000) as t:
                    matcher = pattern.matcher(candidate_object_graph,
                                              match_mode=MatchMode.OBJECT)
                    pattern_match = first(
                        matcher.matches(use_lookahead_pruning=True), None)
                if pattern_match:
                    cumulative_millis_in_successful_matches_ms += t.elapsed
                    matched_object_node = ObjectSemanticNode(concept)

                    # We wrap the concept in a tuple because it could in theory be multiple
                    # tokens,
                    # even though currently it never is.
                    if self._language_mode == LanguageMode.ENGLISH:
                        object_nodes.append(
                            ((concept.debug_string, ), matched_object_node))
                    elif self._language_mode == LanguageMode.CHINESE:
                        if concept.debug_string == "me":
                            object_nodes.append(
                                (("wo3", ), matched_object_node))
                        elif concept.debug_string == "you":
                            object_nodes.append(
                                (("ni3", ), matched_object_node))
                        mappings = (
                            GAILA_PHASE_1_CHINESE_LEXICON.
                            _ontology_node_to_word  # pylint:disable=protected-access
                        )
                        for k, v in mappings.items():
                            if k.handle == concept.debug_string:
                                debug_string = str(v.base_form)
                                object_nodes.append(
                                    ((debug_string, ), matched_object_node))
                    graph_to_return = replace_match_with_object_graph_node(
                        matched_object_node, graph_to_return, pattern_match)
                    # We match each candidate objects against only one object type.
                    # See https://github.com/isi-vista/adam/issues/627
                    break
                else:
                    cumulative_millis_in_failed_matches_ms += t.elapsed
        if object_nodes:
            logging.info(
                "Object recognizer recognized: %s",
                [concept for (concept, _) in object_nodes],
            )
        logging.info(
            "object matching: ms in success: %s, ms in failed: %s",
            cumulative_millis_in_successful_matches_ms,
            cumulative_millis_in_failed_matches_ms,
        )
        semantic_object_nodes = immutableset(node
                                             for (_, node) in object_nodes)

        post_process_graph, post_process_nodes = post_process(
            graph_to_return, semantic_object_nodes)

        return (
            perception_semantic_alignment.
            copy_with_updated_graph_and_added_nodes(
                new_graph=post_process_graph, new_nodes=post_process_nodes),
            immutabledict(object_nodes),
        )
示例#15
0
def do_object_on_table_test(
    object_type_to_match: OntologyNode,
    object_schema: ObjectStructuralSchema,
    negative_object_ontology_node: OntologyNode,
):
    """
    Tests the `PerceptionGraphMatcher` can match simple objects.
    """
    # we create four situations:
    # a object_to_match above or under a table with color red or blue
    color = color_variable("color")
    object_to_match = object_variable(
        debug_handle=object_type_to_match.handle,
        root_node=object_type_to_match,
        added_properties=[color],
    )
    table = standard_object("table_0", TABLE)

    object_on_table_template = Phase1SituationTemplate(
        "object_to_match-on-table",
        salient_object_variables=[object_to_match, table],
        asserted_always_relations=[
            bigger_than(table, object_to_match),
            on(object_to_match, table),
        ],
    )

    object_under_table_template = Phase1SituationTemplate(
        "object_to_match-under-table",
        salient_object_variables=[object_to_match, table],
        asserted_always_relations=[
            bigger_than(table, object_to_match),
            above(table, object_to_match),
        ],
    )

    # We test that a perceptual pattern for "object_to_match" matches in all four cases.
    object_to_match_pattern = PerceptionGraphPattern.from_schema(
        object_schema, perception_generator=GAILA_PHASE_1_PERCEPTION_GENERATOR)

    situations_with_object_to_match = chain(
        all_possible_test(object_on_table_template),
        all_possible_test(object_under_table_template),
    )

    for (_,
         situation_with_object) in enumerate(situations_with_object_to_match):
        perception = GAILA_PHASE_1_PERCEPTION_GENERATOR.generate_perception(
            situation_with_object, chooser=RandomChooser.for_seed(0))
        perception_graph = PerceptionGraph.from_frame(perception.frames[0])
        # perception_graph.render_to_file(f"object_to_match {idx}", out_dir / f"object_to_match
        # -{idx}.pdf")
        # object_to_match_pattern.render_to_file(f"object_to_match pattern", out_dir /
        # "object_to_match_pattern.pdf")
        matcher = object_to_match_pattern.matcher(perception_graph,
                                                  match_mode=MatchMode.OBJECT)
        # debug_matching = matcher.debug_matching(
        #    use_lookahead_pruning=False, render_match_to=Path("/Users/gabbard/tmp")
        # )
        result = any(matcher.matches(use_lookahead_pruning=False))
        if not result:
            return False

    # Now let's create the same situations, but substitute a negative_object for a object_to_match.
    negative_object = object_variable(
        debug_handle=negative_object_ontology_node.handle,
        root_node=negative_object_ontology_node,
        added_properties=[color],
    )
    negative_object_on_table_template = Phase1SituationTemplate(
        "negative_object-on-table",
        salient_object_variables=[negative_object, table],
        asserted_always_relations=[
            bigger_than(table, negative_object),
            on(negative_object, table),
        ],
    )

    negative_object_under_table_template = Phase1SituationTemplate(
        "negative_object-under-table",
        salient_object_variables=[negative_object, table],
        asserted_always_relations=[
            bigger_than(table, negative_object),
            above(table, negative_object),
        ],
    )

    situations_with_negative_object = chain(
        all_possible_test(negative_object_on_table_template),
        all_possible_test(negative_object_under_table_template),
    )

    # The pattern should now fail to match.
    for situation_with_negative_object in situations_with_negative_object:
        perception = GAILA_PHASE_1_PERCEPTION_GENERATOR.generate_perception(
            situation_with_negative_object, chooser=RandomChooser.for_seed(0))
        perception_graph = PerceptionGraph.from_frame(perception.frames[0])
        if any(
                object_to_match_pattern.matcher(
                    perception_graph, match_mode=MatchMode.OBJECT).matches(
                        use_lookahead_pruning=True)):
            return False
    return True
示例#16
0
    ObjectSemanticNode,
    GROUND_OBJECT_CONCEPT,
    SemanticNode,
)
from adam.utils.networkx_utils import subgraph
from attr import attrib, attrs
from attr.validators import deep_iterable, deep_mapping, instance_of
from immutablecollections import ImmutableDict, ImmutableSet, immutabledict, immutableset
from immutablecollections.converter_utils import _to_immutabledict, _to_immutableset
from vistautils.span import Span

_LIST_OF_PERCEIVED_PATTERNS = immutableset(
    (
        node.handle,
        PerceptionGraphPattern.from_schema(
            first(GAILA_PHASE_1_ONTOLOGY.structural_schemata(node)),
            perception_generator=GAILA_PHASE_1_PERCEPTION_GENERATOR,
        ),
    ) for node in PHASE_1_CURRICULUM_OBJECTS
    if node in GAILA_PHASE_1_ONTOLOGY._structural_schemata.keys()  # pylint:disable=protected-access
)


@attrs(frozen=True, slots=True, auto_attribs=True)
class PerceptionGraphFromObjectRecognizer:
    """
    See `ObjectRecognizer.match_objects`
    """

    perception_graph: PerceptionGraph
    description_to_matched_object_node: ImmutableDict[Tuple[
        str, ...], ObjectSemanticNode] = attrib(converter=_to_immutabledict)
示例#17
0
def test_last_failed_pattern_node():
    """
    Tests whether `MatchFailure` can find the correct node.
    """

    target_object = BOX
    # Create train and test templates for the target objects
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    for (_, _, perceptual_representation) in train_curriculum.instances():
        # Original perception graph
        perception = graph_without_learner(
            PerceptionGraph.from_frame(perceptual_representation.frames[0]))

        # Original perception pattern
        whole_perception_pattern = PerceptionGraphPattern.from_graph(
            perception).perception_graph_pattern
        # Create an altered perception graph we replace the color node
        altered_perception_digraph = perception.copy_as_digraph()
        nodes_to_remove = []
        edges = []
        different_nodes = []
        for node in perception.copy_as_digraph().nodes:
            # If we find a color node, we make it black
            if isinstance(node, tuple) and isinstance(node[0],
                                                      RgbColorPerception):
                new_node = (RgbColorPerception(0, 0, 0), 42)
                # Get edge information
                for edge in perception.copy_as_digraph().edges(data=True):
                    if edge[0] == node:
                        edges.append((new_node, edge[1], edge[2]))
                    if edge[1] == node:
                        edges.append((edge[0], new_node, edge[2]))
                nodes_to_remove.append(node)
                different_nodes.append(new_node)

        # add new nodes
        for node in different_nodes:
            altered_perception_digraph.add_node(node)
        # add edge information
        for edge in edges:
            altered_perception_digraph.add_edge(edge[0], edge[1])
            for k, v in edge[2].items():
                altered_perception_digraph[edge[0]][edge[1]][k] = v
        # remove original node
        altered_perception_digraph.remove_nodes_from(nodes_to_remove)

        # Start the matching process
        matcher = whole_perception_pattern.matcher(
            PerceptionGraph(altered_perception_digraph),
            match_mode=MatchMode.NON_OBJECT)
        match_or_failure = matcher.first_match_or_failure_info()
        assert isinstance(match_or_failure, PatternMatching.MatchFailure)
        assert isinstance(match_or_failure.last_failed_pattern_node,
                          IsColorNodePredicate)
示例#18
0
def test_copy_with_temporal_scope_pattern_content():
    """
    Tests whether copy_with_temporal_scope converts patterns to be dynamic as intended
    """

    # We use a situation to generate the perceptual representation
    # for a box with color.
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    perception_graph = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))

    perception_pattern = PerceptionGraphPattern.from_graph(
        perception_graph).perception_graph_pattern

    temporal_perception_graph = perception_graph.copy_with_temporal_scopes(
        temporal_scopes=[TemporalScope.AFTER])
    temporal_perception_pattern = perception_pattern.copy_with_temporal_scopes(
        required_temporal_scopes=TemporalScope.AFTER)

    # Exception while applying to dynamic pattern
    with pytest.raises(RuntimeError):
        temporal_perception_pattern.copy_with_temporal_scopes(
            required_temporal_scopes=TemporalScope.AFTER)

    for (source, target) in perception_pattern.copy_as_digraph().edges():
        assert not isinstance(
            perception_pattern.copy_as_digraph()[source][target]["predicate"],
            HoldsAtTemporalScopePredicate,
        )
    for (source,
         target) in temporal_perception_pattern.copy_as_digraph().edges():
        # Check type, and then the content
        predicate = temporal_perception_pattern.copy_as_digraph(
        )[source][target]["predicate"]
        # Test HoldsAtTemporalScope dot label, matches predicate
        assert isinstance(predicate.dot_label(), str)
        assert predicate.matches_predicate(
            HoldsAtTemporalScopePredicate(predicate.wrapped_edge_predicate,
                                          predicate.temporal_scopes))
        assert not predicate.matches_predicate(
            HoldsAtTemporalScopePredicate(predicate.wrapped_edge_predicate,
                                          [TemporalScope.BEFORE]))
        assert isinstance(predicate, HoldsAtTemporalScopePredicate)
        assert (predicate.wrapped_edge_predicate == perception_pattern.
                copy_as_digraph()[source][target]["predicate"])
        assert len(predicate.temporal_scopes) == 1
        assert only(predicate.temporal_scopes) == TemporalScope.AFTER

    # Test normal matching behavior
    temporal_matcher = temporal_perception_pattern.matcher(
        temporal_perception_graph, match_mode=MatchMode.NON_OBJECT)
    first(temporal_matcher.matches(use_lookahead_pruning=True))

    # Test HoldsAtTemporalScopePredicate
    for (source, target) in perception_graph.copy_as_digraph().edges():
        label = "test edge label"
        edge_predicate = AnyEdgePredicate()
        temporal_predicate = HoldsAtTemporalScopePredicate(
            edge_predicate, [TemporalScope.AFTER])

        temporal_edge_label = TemporallyScopedEdgeLabel(
            label, [TemporalScope.AFTER])
        assert temporal_predicate(source, temporal_edge_label, target)
        # Non temporal edge exception
        with pytest.raises(RuntimeError):
            temporal_predicate(source, label, target)
示例#19
0
def test_semantically_infeasible_partial_match():
    """
    Tests whether semantic feasibility works as intended
    """

    target_object = BOX
    # Create train and test templates for the target objects
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]
    # Original perception graph
    perception = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))
    whole_perception_pattern = PerceptionGraphPattern.from_graph(
        perception).perception_graph_pattern

    # Create an altered perception graph we remove the color node
    altered_perception_digraph = perception.copy_as_digraph()
    nodes_to_remove = []
    edges = []
    different_nodes = []
    for node in perception.copy_as_digraph().nodes:
        # If we find a color node, we make it black
        if isinstance(node, tuple) and isinstance(node[0], RgbColorPerception):
            new_node = (RgbColorPerception(0, 0, 0), node[1])
            # Get edge information
            for edge in perception.copy_as_digraph().edges(data=True):
                if edge[0] == node:
                    edges.append((new_node, edge[1], edge[2]))
                if edge[1] == node:
                    edges.append((edge[0], new_node, edge[2]))
            nodes_to_remove.append(node)
            different_nodes.append(new_node)

    # remove original node
    altered_perception_digraph.remove_nodes_from(nodes_to_remove)

    # add new nodes
    for node in different_nodes:
        altered_perception_digraph.add_node(node)
    # add edge information
    for edge in edges:
        altered_perception_digraph.add_edge(edge[0], edge[1])
        for k, v in edge[2].items():
            altered_perception_digraph[edge[0]][edge[1]][k] = v

    altered_perception_pattern = PerceptionGraphPattern.from_graph(
        PerceptionGraph(altered_perception_digraph)).perception_graph_pattern

    partial_digraph = altered_perception_pattern.copy_as_digraph()
    partial_digraph.remove_nodes_from([
        node for node in partial_digraph.nodes
        if isinstance(node, IsColorNodePredicate)
    ])

    # Start the matching process, get a partial match
    matcher = whole_perception_pattern.matcher(perception,
                                               match_mode=MatchMode.OBJECT)
    partial_match: PerceptionGraphPatternMatch = first(
        matcher.matches(use_lookahead_pruning=True))
    partial_mapping = partial_match.pattern_node_to_matched_graph_node

    # Try to extend the partial mapping, we expect a semantic infeasibility runtime error
    second_matcher = whole_perception_pattern.matcher(
        PerceptionGraph(altered_perception_digraph),
        match_mode=MatchMode.OBJECT)
    # The partial mapping (obtained from first matcher with original perception graph)
    # semantically doesn't match the one in the altered version (second matcher with altered graph)
    with pytest.raises(RuntimeError):
        first(
            second_matcher.matches(initial_partial_match=partial_mapping,
                                   use_lookahead_pruning=True),
            None,
        )
示例#20
0
def test_allowed_matches_with_bad_partial_match():
    """
    Tests whether PatternMarching's allowed_matches functionality works as intended when a bad
    partial match is specified.
    """
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]
    perception = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))

    pattern1: PerceptionGraphPattern = PerceptionGraphPattern.from_graph(
        perception.subgraph_by_nodes({
            cast(PerceptionGraphNode, node)
            for node in perception._graph.nodes  # pylint: disable=protected-access
            if getattr(node, "debug_handle", None) == "box_0"
        })).perception_graph_pattern

    pattern2: PerceptionGraphPattern = PerceptionGraphPattern.from_graph(
        perception.subgraph_by_nodes({
            cast(PerceptionGraphNode, node)
            for node in perception._graph.nodes  # pylint: disable=protected-access
            if getattr(node, "debug_handle", None) in {"box_0", "the ground"}
        })).perception_graph_pattern

    pattern1_box: AnyObjectPerception = cast(
        AnyObjectPerception,
        only(node for node in pattern1._graph  # pylint: disable=protected-access
             if getattr(node, "debug_handle", None) == "box_0"),
    )
    pattern2_box: AnyObjectPerception = cast(
        AnyObjectPerception,
        only(node for node in pattern2._graph  # pylint: disable=protected-access
             if getattr(node, "debug_handle", None) == "box_0"),
    )
    pattern2_ground: AnyObjectPerception = cast(
        AnyObjectPerception,
        only(node for node in pattern2._graph  # pylint: disable=protected-access
             if getattr(node, "debug_handle", None) == "the ground"),
    )

    matcher = PatternMatching(
        pattern=pattern1,
        graph_to_match_against=pattern2,
        matching_pattern_against_pattern=True,
        match_mode=MatchMode.OBJECT,
        allowed_matches=immutablesetmultidict([(pattern1_box, pattern2_box)]),
    )
    with pytest.raises(RuntimeError):
        first(
            matcher.matches(
                initial_partial_match={pattern1_box: pattern2_ground},
                use_lookahead_pruning=True,
            ),
            None,
        )