Exemple #1
0
def pattern_remove_incomplete_region_or_spatial_path(
    perception_graph: PerceptionGraphPattern
) -> PerceptionGraphPattern:
    """
    Helper function to return a `PerceptionGraphPattern` verifying
    that region and spatial path perceptions contain a reference object.
    """
    graph = perception_graph.copy_as_digraph()
    region_and_path_nodes: ImmutableSet[NodePredicate] = immutableset(
        node
        for node in graph.nodes
        if isinstance(node, IsPathPredicate) or isinstance(node, RegionPredicate)
    )
    nodes_without_reference: List[NodePredicate] = []
    for node in region_and_path_nodes:
        has_reference_edge: bool = False
        for successor in graph.successors(node):
            predicate = graph.edges[node, successor]["predicate"]
            if isinstance(predicate, RelationTypeIsPredicate):
                if predicate.relation_type in [
                    REFERENCE_OBJECT_LABEL,
                    REFERENCE_OBJECT_DESTINATION_LABEL,
                    REFERENCE_OBJECT_SOURCE_LABEL,
                ]:
                    has_reference_edge = True
                    break
        if not has_reference_edge:
            nodes_without_reference.append(node)

    logging.info(
        f"Removing incomplete regions and paths. "
        f"Removing nodes: {nodes_without_reference}"
    )
    graph.remove_nodes_from(nodes_without_reference)

    def sort_by_num_nodes(g: DiGraph) -> int:
        return len(g.nodes)

    # We should maybe consider doing this a different way
    # As this approach just brute force solves the problem rather than being methodical about it
    if number_weakly_connected_components(graph) > 1:
        components = [
            component
            for component in [
                subgraph(graph, comp) for comp in weakly_connected_components(graph)
            ]
        ]
        components.sort(key=sort_by_num_nodes, reverse=True)
        computed_graph = subgraph(graph, components[0].nodes)
        removed_nodes: List[NodePredicate] = []
        for i in range(1, len(components)):
            removed_nodes.extend(components[i].nodes)
        logging.info(f"Cleanup disconnected elements. Removing: {removed_nodes}")
    else:
        computed_graph = graph

    return PerceptionGraphPattern(computed_graph, dynamic=perception_graph.dynamic)
Exemple #2
0
def _extract_candidate_relations(
    whole_scene_perception_graph: PerceptionGraph,
    relation_object_1: ObjectSemanticNode,
    relation_object_2: ObjectSemanticNode,
) -> Sequence[PerceptionGraph]:
    # The directions of edges in the perception graph are not necessarily meaningful
    # from the point-of-view of hypothesis generation, so we need an undirected copy
    # of the graph.
    perception_digraph = whole_scene_perception_graph.copy_as_digraph()
    perception_graph_undirected = perception_digraph.to_undirected(
        # as_view=True loses determinism
        as_view=False)

    output_graphs = []

    # The core of our hypothesis for the semantics of a preposition is all nodes
    # along the shortest path between the two objects involved in the perception graph.
    for hypothesis_spine_nodes in all_shortest_paths(
            perception_graph_undirected, relation_object_2, relation_object_1):
        # Along the core of our hypothesis we also want to collect the predecessors and successors
        hypothesis_nodes_mutable = []
        for node in hypothesis_spine_nodes:
            if node not in {relation_object_1, relation_object_2}:
                for successor in perception_digraph.successors(node):
                    if not (isinstance(successor, ObjectPerception)
                            or isinstance(successor, ObjectSemanticNode)):
                        hypothesis_nodes_mutable.append(successor)
                for predecessor in perception_digraph.predecessors(node):
                    if not (isinstance(predecessor, ObjectPerception)
                            or isinstance(predecessor, ObjectSemanticNode)):
                        hypothesis_nodes_mutable.append(predecessor)

        hypothesis_nodes_mutable.extend(hypothesis_spine_nodes)

        # We wrap the nodes in an immutable set to remove duplicates
        # while preserving iteration determinism.
        hypothesis_nodes = immutableset(hypothesis_nodes_mutable)

        output_graphs.append(
            PerceptionGraph(
                digraph_with_nodes_sorted_by(
                    subgraph(perception_digraph, hypothesis_nodes),
                    _graph_node_order)))

    return output_graphs
Exemple #3
0
    def _enrich_post_process(
        self,
        perception_graph_after_matching: PerceptionGraph,
        immutable_new_nodes: AbstractSet[SemanticNode],
    ) -> Tuple[PerceptionGraph, AbstractSet[SemanticNode]]:
        object_root_nodes = immutableset(  # pylint:disable=protected-access
            node for node in perception_graph_after_matching._graph.nodes  # pylint:disable=protected-access
            if isinstance(node, ObjectPerception))
        new_nodes = []
        perception_graph_after_processing = perception_graph_after_matching
        for object_root_node in object_root_nodes:
            fake_subgraph = subgraph(  # pylint:disable=protected-access
                perception_graph_after_matching._graph,  # pylint:disable=protected-access
                [object_root_node],
            )
            fake_perception_graph = PerceptionGraph(
                graph=fake_subgraph,
                dynamic=perception_graph_after_matching.dynamic)
            fake_pattern_graph = PerceptionGraphPattern.from_graph(
                fake_perception_graph)
            fake_object_semantic_node = ObjectSemanticNode(
                concept=FunctionalObjectConcept("unknown_object"))
            # perception_graph_after_processing = replace_match_root_with_object_semantic_node(
            #     object_semantic_node=fake_object_semantic_node,
            perception_graph_after_processing = replace_match_with_object_graph_node(
                matched_object_node=fake_object_semantic_node,
                current_perception=perception_graph_after_processing,
                pattern_match=PerceptionGraphPatternMatch(
                    matched_pattern=fake_pattern_graph.
                    perception_graph_pattern,
                    graph_matched_against=perception_graph_after_matching,
                    matched_sub_graph=fake_perception_graph,
                    pattern_node_to_matched_graph_node=fake_pattern_graph.
                    perception_graph_node_to_pattern_node,
                ),
            ).perception_graph_after_replacement
            new_nodes.append(fake_object_semantic_node)

        return (
            perception_graph_after_processing,
            immutableset(chain(immutable_new_nodes, new_nodes)),
        )
Exemple #4
0
def get_objects_from_perception(
    observed_perception_graph: PerceptionGraph
) -> List[PerceptionGraph]:
    """
    Utility function to get a list of `PerceptionGraphs` which are independent objects in the scene
    """
    perception_as_digraph = observed_perception_graph.copy_as_digraph()
    perception_as_graph = perception_as_digraph.to_undirected()

    meanings = []

    # 1) Take all of the obj perc that dont have part of relationships with anything else
    root_object_percetion_nodes = []
    for node in perception_as_graph.nodes:
        if isinstance(node, ObjectPerception) and node.debug_handle != "the ground":
            if not any(
                [
                    u == node and str(data["label"]) == "partOf"
                    for u, v, data in perception_as_digraph.edges.data()
                ]
            ):
                root_object_percetion_nodes.append(node)

    # 2) for each of these, walk along the part of relationships backwards,
    # i.e find all of the subparts of the root object
    for root_object_perception_node in root_object_percetion_nodes:
        # Iteratively get all other object perceptions that connect to a root with a part of
        # relation
        all_object_perception_nodes = [root_object_perception_node]
        frontier = [root_object_perception_node]
        updated = True
        while updated:
            updated = False
            new_frontier = []
            for frontier_node in frontier:
                for node in perception_as_graph.neighbors(frontier_node):
                    edge_data = perception_as_digraph.get_edge_data(
                        node, frontier_node, default=-1
                    )
                    if edge_data != -1 and str(edge_data["label"]) == "partOf":
                        new_frontier.append(node)

            if new_frontier:
                all_object_perception_nodes.extend(new_frontier)
                updated = True
                frontier = new_frontier

        # Now we have a list of all perceptions that are connected
        # 3) For each of these objects including root object, get axes, properties,
        # and relations and regions which are between these internal object perceptions
        other_nodes = []
        for node in all_object_perception_nodes:
            for neighbor in perception_as_graph.neighbors(node):
                # Filter out regions that don't have a reference in all object perception nodes
                # TODO: We currently remove colors to achieve a match - otherwise finding
                #  patterns fails.
                if (
                    isinstance(neighbor, Region)
                    and neighbor.reference_object not in all_object_perception_nodes
                    or isinstance(neighbor, RgbColorPerception)
                ):
                    continue
                # Append all other none-object nodes to be kept in the subgraph
                if not isinstance(neighbor, ObjectPerception):
                    other_nodes.append(neighbor)

        generated_subgraph = networkx_utils.subgraph(
            perception_as_digraph, all_object_perception_nodes + other_nodes
        )
        meanings.append(PerceptionGraph(generated_subgraph))

    logging.info(f"Got {len(meanings)} candidate meanings")
    return meanings
Exemple #5
0
    def match_objects(
        self,
        perception_semantic_alignment: PerceptionSemanticAlignment,
        *,
        post_process: Callable[[PerceptionGraph, AbstractSet[SemanticNode]],
                               Tuple[PerceptionGraph,
                                     AbstractSet[SemanticNode]],
                               ] = default_post_process_enrichment,
    ) -> Tuple[PerceptionSemanticAlignment, Mapping[Tuple[str, ...],
                                                    ObjectSemanticNode]]:
        r"""
        Recognize known objects in a `PerceptionGraph`.

        The matched portion of the graph will be replaced with an `ObjectSemanticNode`\ s
        which will inherit all relationships of any nodes internal to the matched portion
        with any external nodes.

        This is useful as a pre-processing step
        before prepositional and verbal learning experiments.
        """

        # pylint: disable=global-statement,invalid-name
        global cumulative_millis_in_successful_matches_ms
        global cumulative_millis_in_failed_matches_ms

        object_nodes: List[Tuple[Tuple[str, ...], ObjectSemanticNode]] = []
        perception_graph = perception_semantic_alignment.perception_graph
        is_dynamic = perception_semantic_alignment.perception_graph.dynamic

        if is_dynamic:
            concepts_to_patterns = self._concepts_to_dynamic_patterns
        else:
            concepts_to_patterns = self._concepts_to_static_patterns

        # We special case handling the ground perception
        # Because we don't want to remove it from the graph, we just want to use it's
        # Object node as a recognized object. The situation "a box on the ground"
        # Prompted the need to recognize the ground
        graph_to_return = perception_graph
        for node in graph_to_return._graph.nodes:  # pylint:disable=protected-access
            if node == GROUND_PERCEPTION:
                matched_object_node = ObjectSemanticNode(GROUND_OBJECT_CONCEPT)
                if LanguageMode.ENGLISH == self._language_mode:
                    object_nodes.append(
                        ((f"{GROUND_OBJECT_CONCEPT.debug_string}", ),
                         matched_object_node))
                elif LanguageMode.CHINESE == self._language_mode:
                    object_nodes.append((("di4 myan4", ), matched_object_node))
                else:
                    raise RuntimeError("Invalid language_generator")
                # We construct a fake match which is only the ground perception node
                subgraph_of_root = subgraph(perception_graph.copy_as_digraph(),
                                            [node])
                pattern_match = PerceptionGraphPatternMatch(
                    matched_pattern=PerceptionGraphPattern(
                        graph=subgraph_of_root,
                        dynamic=perception_graph.dynamic),
                    graph_matched_against=perception_graph,
                    matched_sub_graph=PerceptionGraph(
                        graph=subgraph_of_root,
                        dynamic=perception_graph.dynamic),
                    pattern_node_to_matched_graph_node=immutabledict(),
                )
                graph_to_return = replace_match_with_object_graph_node(
                    matched_object_node, graph_to_return, pattern_match)

        candidate_object_subgraphs = extract_candidate_objects(
            perception_graph)

        for candidate_object_graph in candidate_object_subgraphs:
            num_object_nodes = candidate_object_graph.count_nodes_matching(
                lambda node: isinstance(node, ObjectPerception))

            for (concept, pattern) in concepts_to_patterns.items():
                # As an optimization, we count how many sub-object nodes
                # are in the graph and the pattern.
                # If they aren't the same, the match is impossible
                # and we can bail out early.
                if num_object_nodes != self._concept_to_num_subobjects[concept]:
                    continue

                with Timer(factor=1000) as t:
                    matcher = pattern.matcher(candidate_object_graph,
                                              match_mode=MatchMode.OBJECT)
                    pattern_match = first(
                        matcher.matches(use_lookahead_pruning=True), None)
                if pattern_match:
                    cumulative_millis_in_successful_matches_ms += t.elapsed
                    matched_object_node = ObjectSemanticNode(concept)

                    # We wrap the concept in a tuple because it could in theory be multiple
                    # tokens,
                    # even though currently it never is.
                    if self._language_mode == LanguageMode.ENGLISH:
                        object_nodes.append(
                            ((concept.debug_string, ), matched_object_node))
                    elif self._language_mode == LanguageMode.CHINESE:
                        if concept.debug_string == "me":
                            object_nodes.append(
                                (("wo3", ), matched_object_node))
                        elif concept.debug_string == "you":
                            object_nodes.append(
                                (("ni3", ), matched_object_node))
                        mappings = (
                            GAILA_PHASE_1_CHINESE_LEXICON.
                            _ontology_node_to_word  # pylint:disable=protected-access
                        )
                        for k, v in mappings.items():
                            if k.handle == concept.debug_string:
                                debug_string = str(v.base_form)
                                object_nodes.append(
                                    ((debug_string, ), matched_object_node))
                    graph_to_return = replace_match_with_object_graph_node(
                        matched_object_node, graph_to_return, pattern_match)
                    # We match each candidate objects against only one object type.
                    # See https://github.com/isi-vista/adam/issues/627
                    break
                else:
                    cumulative_millis_in_failed_matches_ms += t.elapsed
        if object_nodes:
            logging.info(
                "Object recognizer recognized: %s",
                [concept for (concept, _) in object_nodes],
            )
        logging.info(
            "object matching: ms in success: %s, ms in failed: %s",
            cumulative_millis_in_successful_matches_ms,
            cumulative_millis_in_failed_matches_ms,
        )
        semantic_object_nodes = immutableset(node
                                             for (_, node) in object_nodes)

        post_process_graph, post_process_nodes = post_process(
            graph_to_return, semantic_object_nodes)

        return (
            perception_semantic_alignment.
            copy_with_updated_graph_and_added_nodes(
                new_graph=post_process_graph, new_nodes=post_process_nodes),
            immutabledict(object_nodes),
        )