Ejemplo n.º 1
0
def test_two_objects():
    two_object_template = Phase1SituationTemplate(
        "two-objects",
        salient_object_variables=[
            object_variable("person", root_node=_PERSON),
            object_variable("toy_vehicle", required_properties=[_TOY_VEHICLE]),
        ],
    )

    reference_object_sets = {
        immutableset(["mom", "toy_truck"]),
        immutableset(["dad", "toy_truck"]),
        immutableset(["learner", "toy_truck"]),
        immutableset(["mom", "toy_car"]),
        immutableset(["dad", "toy_car"]),
        immutableset(["learner", "toy_car"]),
    }

    generated_object_sets = set(
        immutableset(situation_object.ontology_node.handle
                     for situation_object in situation.salient_objects)
        for situation in all_possible(
            two_object_template,
            ontology=_TESTING_ONTOLOGY,
            chooser=RandomChooser.for_seed(0),
            default_addressee_node=_LEARNER,
        ))

    assert generated_object_sets == reference_object_sets
Ejemplo n.º 2
0
def _make_in_training(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = object_variable("water", WATER)
    figure_1 = object_variable("juice", JUICE)
    ground_0 = standard_object("box", BOX)
    ground_1 = standard_object("cup", CUP)

    figures = immutableset([figure_0, figure_1])
    grounds = immutableset([ground_0, ground_1])

    return phase1_instances(
        "Preposition Training In",
        chain(*[
            sampled(
                _in_template(
                    figure,
                    ground,
                    make_noise_objects(noise_objects),
                    is_training=True,
                ),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                chooser=PHASE1_CHOOSER_FACTORY(),
                max_to_sample=num_samples if num_samples else 5,
            ) for figure in figures for ground in grounds
        ]),
        language_generator=language_generator,
    )
Ejemplo n.º 3
0
def test_before_after_relations_asserted():
    ball = object_variable("ball", root_node=BALL)
    box = object_variable("box", root_node=BOX)
    ground = object_variable("ground", root_node=GROUND)

    template_action = Phase1SituationTemplate(
        "Before/After Relation",
        salient_object_variables=[ball, box],
        background_object_variables=[ground],
        actions=[
            Action(
                ROLL,
                argument_roles_to_fillers=[(AGENT, ball)],
                auxiliary_variable_bindings=[(ROLL_SURFACE_AUXILIARY, ground)],
            )
        ],
        before_action_relations=flatten_relations([on(ball, box)]),
        after_action_relations=flatten_relations([far(ball, box)]),
    )

    situation_with_relations = tuple(
        sampled(
            template_action,
            ontology=GAILA_PHASE_1_ONTOLOGY,
            chooser=RandomChooser.for_seed(0),
            max_to_sample=1,
        ))

    assert situation_with_relations[0].before_action_relations
    assert situation_with_relations[0].after_action_relations
Ejemplo n.º 4
0
def test_matching_static_vs_dynamic_graphs():
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)
    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)
    perceptual_representation = only(train_curriculum.instances())[2]

    perception_graph = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))
    temporal_perception_graph = perception_graph.copy_with_temporal_scopes(
        temporal_scopes=[TemporalScope.AFTER])

    perception_pattern = PerceptionGraphPattern.from_graph(
        perception_graph).perception_graph_pattern

    temporal_perception_pattern = perception_pattern.copy_with_temporal_scopes(
        required_temporal_scopes=[TemporalScope.AFTER])

    # Test runtime error for matching static pattern against dynamic graph and vice versa

    with pytest.raises(RuntimeError):
        perception_pattern.matcher(temporal_perception_graph,
                                   match_mode=MatchMode.NON_OBJECT)

    with pytest.raises(RuntimeError):
        temporal_perception_pattern.matcher(perception_graph,
                                            match_mode=MatchMode.NON_OBJECT)
def test_subset_preposition_in(language_mode, learner):
    water = object_variable("water", WATER)
    cup = standard_object("cup", CUP)

    run_preposition_test(
        learner(language_mode),
        _in_template(water, cup, immutableset(), is_training=True),
        language_generator=phase1_language_generator(language_mode),
    )
Ejemplo n.º 6
0
def test_successfully_extending_partial_match():
    """
    Tests whether we can match a perception pattern against a perception graph
    when initializing the search from a partial match.
    """

    target_object = BOX
    # Create train and test templates for the target objects
    train_obj_object = object_variable("obj-with-color", target_object)

    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    # Original perception graph
    perception = PerceptionGraph.from_frame(
        perceptual_representation.frames[0])

    # Create a perception pattern for the whole thing
    # and also a perception pattern for a subset of the whole pattern
    whole_perception_pattern = PerceptionGraphPattern.from_graph(
        perception).perception_graph_pattern

    partial_digraph = whole_perception_pattern.copy_as_digraph()
    partial_digraph.remove_nodes_from([
        node for node in partial_digraph.nodes
        if isinstance(node, IsColorNodePredicate)
    ])
    partial_perception_pattern = PerceptionGraphPattern(partial_digraph)

    # get our initial match by matching the partial pattern
    matcher = partial_perception_pattern.matcher(
        perception, match_mode=MatchMode.NON_OBJECT)

    partial_match: PerceptionGraphPatternMatch = first(
        matcher.matches(use_lookahead_pruning=True))
    partial_mapping = partial_match.pattern_node_to_matched_graph_node

    # Try to extend the partial mapping, to create a complete mapping
    matcher_2 = whole_perception_pattern.matcher(
        perception, match_mode=MatchMode.NON_OBJECT)
    complete_match: PerceptionGraphPatternMatch = first(
        matcher_2.matches(initial_partial_match=partial_mapping,
                          use_lookahead_pruning=True),
        None,
    )
    complete_mapping = complete_match.pattern_node_to_matched_graph_node
    assert len(complete_mapping) == len(perception.copy_as_digraph().nodes)
    assert len(complete_mapping) == len(
        whole_perception_pattern.copy_as_digraph().nodes)
def test_pursuit_preposition_in_learner(language_mode):
    rng = random.Random()
    rng.seed(0)
    learner = PrepositionPursuitLearner(
        learning_factor=0.5,
        graph_match_confirmation_threshold=0.7,
        lexicon_entry_threshold=0.7,
        rng=rng,
        smoothing_parameter=0.001,
        ontology=GAILA_PHASE_1_ONTOLOGY,
        object_recognizer=LANGUAGE_MODE_TO_OBJECT_RECOGNIZER[language_mode],
        language_mode=language_mode,
    )  # type: ignore
    water = object_variable("water", WATER)
    cup = standard_object("cup", CUP)
    language_generator = phase1_language_generator(language_mode)
    in_train_curriculum = phase1_instances(
        "Preposition In Unit Train",
        situations=sampled(
            _in_template(water, cup, immutableset(), is_training=True),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=10,
        ),
        language_generator=language_generator,
    )
    in_test_curriculum = phase1_instances(
        "Preposition In Unit Test",
        situations=sampled(
            _in_template(water, cup, immutableset(), is_training=False),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=1,
        ),
        language_generator=language_generator,
    )
    for (
            _,
            linguistic_description,
            perceptual_representation,
    ) in in_train_curriculum.instances():
        learner.observe(
            LearningExample(perceptual_representation, linguistic_description))

    for (
            _,
            test_linguistic_description,
            test_perceptual_representation,
    ) in in_test_curriculum.instances():
        descriptions_from_learner = learner.describe(
            test_perceptual_representation)
        gold = test_linguistic_description.as_token_sequence()
        assert descriptions_from_learner
        assert [
            desc.as_token_sequence() for desc in descriptions_from_learner
        ][0] == gold
Ejemplo n.º 8
0
def _make_drink_cups_curriculum(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[
        HighLevelSemanticsSituation, LinearizedDependencyTree
    ],
) -> Phase1InstanceGroup:

    templates = []
    for cup in [CUP, CUP_2, CUP_3, CUP_4]:
        cup_obj = standard_object("cup", cup)
        liquid_0 = object_variable("liquid_0", required_properties=[LIQUID])
        person_0 = standard_object(
            "person_0", PERSON, banned_properties=[IS_SPEAKER, IS_ADDRESSEE]
        )

        templates.append(
            Phase1SituationTemplate(
                "drink-cup",
                salient_object_variables=[liquid_0, person_0, cup_obj],
                background_object_variables=make_noise_objects(noise_objects),
                actions=[
                    Action(
                        DRINK,
                        argument_roles_to_fillers=[(AGENT, person_0), (THEME, liquid_0)],
                        auxiliary_variable_bindings=[(DRINK_CONTAINER_AUX, cup_obj)],
                    )
                ],
                asserted_always_relations=[inside(liquid_0, cup_obj)],
            )
        )

    return phase2_instances(
        "drink - cup",
        chain(
            *[
                sampled(
                    cup_template,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    ontology=GAILA_PHASE_2_ONTOLOGY,
                    max_to_sample=num_samples,
                    block_multiple_of_the_same_type=True,
                )
                if num_samples
                else all_possible(
                    cup_template,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    ontology=GAILA_PHASE_2_ONTOLOGY,
                )
                for cup_template in templates
            ]
        ),
        perception_generator=GAILA_PHASE_2_PERCEPTION_GENERATOR,
        language_generator=language_generator,
    )
Ejemplo n.º 9
0
def test_recognize_in_transfer_of_possession(language_mode):
    dad = object_variable("person_0", DAD)
    baby = object_variable("person_1", BABY)
    chair = object_variable("give_object_0", CHAIR)

    giving_template = Phase1SituationTemplate(
        "dad-transfer-of-possession",
        salient_object_variables=[dad, baby, chair],
        actions=[
            Action(
                GIVE,
                argument_roles_to_fillers=[(AGENT, dad), (GOAL, baby),
                                           (THEME, chair)],
            )
        ],
        syntax_hints=[PREFER_DITRANSITIVE],
    )

    (_, _, perception) = first(
        phase1_instances(
            "foo",
            sampled(
                giving_template,
                max_to_sample=1,
                chooser=PHASE1_CHOOSER_FACTORY(),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                block_multiple_of_the_same_type=True,
            ),
        ).instances())

    perception_graph = PerceptionGraph.from_dynamic_perceptual_representation(
        perception)
    perception_semantic_alignment = PerceptionSemanticAlignment.create_unaligned(
        perception_graph)
    (_, description_to_matched_semantic_node
     ) = LANGUAGE_MODE_TO_OBJECT_RECOGNIZER[language_mode].match_objects(
         perception_semantic_alignment)
    assert len(description_to_matched_semantic_node) == 4
    assert (language_mode == LanguageMode.ENGLISH and
            ("Dad", ) in description_to_matched_semantic_node) or (
                language_mode == LanguageMode.CHINESE and
                ("ba4 ba4", ) in description_to_matched_semantic_node)
Ejemplo n.º 10
0
def drink_test_template():
    object_0 = standard_object(
        "object_0",
        required_properties=[HOLLOW, PERSON_CAN_HAVE],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )
    liquid_0 = object_variable("liquid_0", required_properties=[LIQUID])
    person_0 = standard_object("person_0",
                               PERSON,
                               banned_properties=[IS_SPEAKER, IS_ADDRESSEE])
    return make_drink_template(person_0, liquid_0, object_0, None)
def test_pursuit_preposition_in_learner(language_mode, learner):
    water = object_variable("water", WATER)
    cup = standard_object("cup", CUP)
    language_generator = phase1_language_generator(language_mode)
    in_train_curriculum = phase1_instances(
        "Preposition In Unit Train",
        situations=sampled(
            _in_template(water, cup, immutableset(), is_training=True),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=10,
            block_multiple_of_the_same_type=True,
        ),
        language_generator=language_generator,
    )
    in_test_curriculum = phase1_instances(
        "Preposition In Unit Test",
        situations=sampled(
            _in_template(water, cup, immutableset(), is_training=False),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=1,
            block_multiple_of_the_same_type=True,
        ),
        language_generator=language_generator,
    )

    processing_learner = learner(language_mode)

    for (
            _,
            linguistic_description,
            perceptual_representation,
    ) in in_train_curriculum.instances():
        processing_learner.observe(
            LearningExample(perceptual_representation, linguistic_description))

    for (
            _,
            test_linguistic_description,
            test_perceptual_representation,
    ) in in_test_curriculum.instances():
        descriptions_from_learner = processing_learner.describe(
            test_perceptual_representation)
        gold = test_linguistic_description.as_token_sequence()
        assert descriptions_from_learner
        assert gold in [
            desc.as_token_sequence() for desc in descriptions_from_learner
        ]
Ejemplo n.º 12
0
def _make_in_tests(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = object_variable(
        "figure_0",
        THING,
        banned_properties=[IS_BODY_PART, IS_SPEAKER, IS_ADDRESSEE])
    figure_1 = standard_object(
        "figure_1",
        THING,
        banned_properties=[IS_BODY_PART, IS_SPEAKER, IS_ADDRESSEE])
    ground_0 = standard_object(
        "ground_0",
        THING,
        required_properties=[HOLLOW],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )
    ground_1 = standard_object(
        "ground_1",
        THING,
        required_properties=[HOLLOW],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )

    figures = immutableset([figure_0, figure_1])
    grounds = immutableset([ground_0, ground_1])

    return phase1_instances(
        "Preposition Testing In",
        chain(*[
            sampled(
                _in_template(
                    figure,
                    ground,
                    make_noise_objects(noise_objects),
                    is_training=False,
                ),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                chooser=PHASE1_CHOOSER_FACTORY(),
                max_to_sample=num_samples if num_samples else 5,
                block_multiple_of_the_same_type=True,
            ) for figure in figures for ground in grounds
        ]),
        language_generator=language_generator,
    )
Ejemplo n.º 13
0
def test_perception_graph_post_init_edge_cases():
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)
    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)
    perceptual_representation = only(train_curriculum.instances())[2]
    perception_graph = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))
    temporal_perception_graph = perception_graph.copy_with_temporal_scopes(
        temporal_scopes=[TemporalScope.AFTER])
    temporal_digraph = temporal_perception_graph.copy_as_digraph()
    # Test valid edge label
    # The only feasible test seems to be the instation, since creating a corrupt instance throws the same RuntimeError
    with pytest.raises(RuntimeError):
        TemporallyScopedEdgeLabel(None)

    # In a dynamic graph, all edge labels must be wrapped in TemporallyScopedEdgeLabel
    new_graph = DiGraph()
    for (source, target) in temporal_digraph.edges():
        new_graph.add_edge(source, target)
        new_graph[source][target]["label"] = None
    with pytest.raises(RuntimeError):
        PerceptionGraph(new_graph, dynamic=True)

    # TemporallyScopedEdgeLabels may not appear in a static graph
    new_graph = DiGraph()
    for (source, target) in temporal_digraph.edges():
        new_graph.add_edge(source, target)
        new_graph[source][target]["label"] = TemporallyScopedEdgeLabel(
            "attribute", [TemporalScope.AFTER])
    with pytest.raises(RuntimeError):
        PerceptionGraph(new_graph)

    # Every edge in a PerceptionGraph must have a 'label
    new_graph = DiGraph()
    for (source, target) in temporal_digraph.edges():
        new_graph.add_edge(source, target)
    with pytest.raises(RuntimeError):
        PerceptionGraph(new_graph)
Ejemplo n.º 14
0
def body_part_object(
    debug_handle: str,
    root_node: OntologyNode = THING,
    *,
    required_properties: Iterable[OntologyNode] = tuple(),
    banned_properties: Iterable[OntologyNode] = immutableset(),
    added_properties: Iterable[Union[
        OntologyNode, TemplatePropertyVariable]] = immutableset(),
) -> TemplateObjectVariable:
    """
    Method for generating template objects that are body parts.
    """
    required_properties_final = [IS_BODY_PART]
    required_properties_final.extend(required_properties)
    return object_variable(
        debug_handle=debug_handle,
        root_node=root_node,
        banned_properties=banned_properties,
        required_properties=required_properties_final,
        added_properties=added_properties,
    )
Ejemplo n.º 15
0
def test_copy_with_temporal_scopes_content():
    """
    Tests whether copy_with_temporal_scopes converts graphs to be dynamic as intended
    """

    # We use a situation to generate the perceptual representation
    # for a box with color.
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    perception_graph = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))
    temporal_perception_graph = perception_graph.copy_with_temporal_scopes(
        temporal_scopes=[TemporalScope.AFTER])
    for (source, target) in perception_graph.copy_as_digraph().edges():
        assert not isinstance(
            perception_graph.copy_as_digraph()[source][target]["label"],
            TemporallyScopedEdgeLabel,
        )
    for (source,
         target) in temporal_perception_graph.copy_as_digraph().edges():
        # Check type, and then the content
        label = temporal_perception_graph.copy_as_digraph(
        )[source][target]["label"]
        assert isinstance(label, TemporallyScopedEdgeLabel)
        assert (label.attribute == perception_graph.copy_as_digraph()[source]
                [target]["label"])
        assert all(specifier in [TemporalScope.AFTER]
                   for specifier in label.temporal_specifiers)
Ejemplo n.º 16
0
def standard_object(
    debug_handle: str,
    root_node: OntologyNode = INANIMATE_OBJECT,
    *,
    required_properties: Iterable[OntologyNode] = tuple(),
    banned_properties: Iterable[OntologyNode] = immutableset(),
    added_properties: Iterable[Union[
        OntologyNode, TemplatePropertyVariable]] = immutableset(),
    banned_ontology_types: Iterable[OntologyNode] = immutableset(),
) -> TemplateObjectVariable:
    """
    Preferred method of generating template objects as this automatically prevent liquids and
    body parts from object selection.
    """
    banned_properties_final = [IS_BODY_PART, LIQUID]
    banned_properties_final.extend(banned_properties)
    return object_variable(
        debug_handle=debug_handle,
        root_node=root_node,
        banned_properties=banned_properties_final,
        required_properties=required_properties,
        added_properties=added_properties,
        banned_ontology_types=banned_ontology_types,
    )
Ejemplo n.º 17
0
def test_syntactically_infeasible_partial_match():
    """
    Tests whether syntactic feasibility works as intended
    """

    # We use a situation to generate the perceptual representation
    # for a box with color.
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    # Original perception graph
    perception = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))

    # Create an altered perception graph we remove the color node
    altered_perception_digraph = perception.copy_as_digraph()
    nodes = []
    for node in perception.copy_as_digraph().nodes:
        # If we find a color node, we add an extra edge to it
        if isinstance(node, tuple) and isinstance(node[0], RgbColorPerception):
            nodes.append(node)

    # change edge information
    for node in nodes:
        random_node = r.choice(list(altered_perception_digraph.nodes))
        altered_perception_digraph.add_edge(node, random_node, label=PART_OF)
        random_node_2 = r.choice(list(altered_perception_digraph.nodes))
        altered_perception_digraph.add_edge(random_node_2, node, label=PART_OF)

    altered_perception_perception_graph = PerceptionGraph(
        altered_perception_digraph)
    altered_perception_pattern = PerceptionGraphPattern.from_graph(
        altered_perception_perception_graph).perception_graph_pattern

    # Start the matching process, get a partial match
    first_matcher = altered_perception_pattern.matcher(
        altered_perception_perception_graph, match_mode=MatchMode.OBJECT)
    partial_match: PerceptionGraphPatternMatch = first(
        first_matcher.matches(use_lookahead_pruning=True), None)
    partial_mapping = partial_match.pattern_node_to_matched_graph_node
    # Try to extend the partial mapping, we expect a semantic infeasibility runtime error
    second_matcher = altered_perception_pattern.matcher(
        perception, match_mode=MatchMode.OBJECT)
    # The partial mapping (obtained from first matcher with original perception graph)
    # syntactically doesn't match the one in the altered version (second matcher with altered graph)
    with pytest.raises(RuntimeError):
        first(
            second_matcher.matches(initial_partial_match=partial_mapping,
                                   use_lookahead_pruning=True),
            None,
        )
Ejemplo n.º 18
0
def make_simple_pursuit_curriculum(
    num_instances: Optional[int],
    num_noise_instances: Optional[int],
    language_generator: LanguageGenerator[
        HighLevelSemanticsSituation, LinearizedDependencyTree
    ],
    *,
    target_objects=[BALL, CHAIR, MOM, DAD, BABY, TABLE, DOG, BIRD, BOX],
    num_objects_in_instance: int = 3,
    perception_generator: HighLevelSemanticsSituationToDevelopmentalPrimitivePerceptionGenerator = GAILA_PHASE_2_PERCEPTION_GENERATOR,
    add_gaze: bool = False,
) -> Phase1InstanceGroup:
    """
    Creates a Pursuit-learning curriculum with for a set of standard objects. Each instance in the curriculum is a set
    of *num_objects_in_instance* objects paired with a word.
    We say an instance is non-noisy if the word refers to one of the objects in the set.
    An instance is noisy if none of the objects correspond to the word.
    For each type of object of interest, we will generate *num_instances_per_object_type* instances,
    of which *num_noise_instances_per_object_type* will be noisy.
    """
    if not num_instances:
        num_instances = 10
    if not num_noise_instances:
        num_noise_instances = 0
    if num_noise_instances > num_instances:
        raise RuntimeError("Cannot have more noise than regular exemplars")

    noise_object_variables = [
        standard_object("obj-" + str(idx), banned_properties=[IS_SPEAKER, IS_ADDRESSEE])
        for idx in range(num_objects_in_instance)
    ]

    # A template that is used to replace situations and perceptions (not linguistic description) in noise instances
    noise_template = Phase1SituationTemplate(
        "simple_pursuit-noise",
        salient_object_variables=[noise_object_variables[0]],
        background_object_variables=noise_object_variables[1:],
    )

    all_instances = []
    # Generate phase_1 instance groups for each template (i.e each target word)
    for target_object in target_objects:
        target_object_variable = object_variable(
            target_object.handle + "-target", target_object
        )
        # For each target object, create a template with specific a target object in each to create learning instances.
        # There is one object (e.g. Ball) across all instances while the other objects vary. Hence, the target object is
        # a salient object (used for the linguistic description) while the remaining objects are background objects.
        object_is_present_template = Phase1SituationTemplate(
            "simple_pursuit",
            salient_object_variables=[target_object_variable],
            background_object_variables=noise_object_variables[:-1],
            gazed_objects=[target_object_variable] if add_gaze else [],
        )
        non_noise_instances = list(
            phase1_instances(
                "simple_pursuit_curriculum",
                sampled(
                    object_is_present_template,
                    max_to_sample=num_instances - num_noise_instances,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    ontology=GAILA_PHASE_2_ONTOLOGY,
                ),
                perception_generator=perception_generator,
                language_generator=language_generator,
            ).instances()
        )

        # Filter out instances in which the target is present more than once, to ensure "a ball" instead of "the balls"
        for instance in non_noise_instances:
            # If the target appears exactly once (does not appear in background objects) keep using this instance
            situation = instance[0]
            if situation and not any(
                [obj.ontology_node == target_object for obj in situation.other_objects]
            ):
                all_instances.append(instance)

        # Create instances for noise
        noise_instances = phase1_instances(
            "simple_pursuit_curriculum",
            sampled(
                noise_template,
                max_to_sample=num_noise_instances,
                chooser=PHASE1_CHOOSER_FACTORY(),
                ontology=GAILA_PHASE_2_ONTOLOGY,
            ),
            perception_generator=perception_generator,
            language_generator=language_generator,
        ).instances()
        # [1] is the index of the linguistic description in an instance
        # It doesn't matter which non-noise instance is chosen
        # because they all have the object type name as their linguistic description.
        target_object_linguistic_description = all_instances[-1][1]
        for (situation, _, perception) in noise_instances:
            # A noise instance needs to have the word for our target object
            # while not actually having our target object be present.
            # However, our language generator can't generate irrelevant language for a situation.
            # Therefore, we generate the instance as normal above,
            # but here we replace its linguistic description with the word for the target object.

            # Skip the noise instance if the target object appears in the noise data
            if situation and not any(
                [obj.ontology_node == target_object for obj in situation.all_objects]
            ):
                all_instances.append(
                    (situation, target_object_linguistic_description, perception)
                )

    description = (
        f"simple_pursuit_curriculum_examples-{num_instances}_objects-{num_objects_in_instance}_noise-"
        f"{num_noise_instances} "
    )
    rng = random.Random()
    rng.seed(0)
    random.shuffle(all_instances, rng.random)
    final_instance_group: Phase1InstanceGroup = ExplicitWithSituationInstanceGroup(
        description, all_instances
    )
    return final_instance_group
Ejemplo n.º 19
0
def test_learner_as_default_addressee():
    learner = object_variable("learner", root_node=LEARNER)
    ball = object_variable("ball", root_node=BALL)
    template_with_learner = Phase1SituationTemplate(
        "template with learner",
        salient_object_variables=[learner, ball],
        asserted_always_relations=[near(learner, ball)],
    )

    template_with_out_learner = Phase1SituationTemplate(
        "template with out learner",
        salient_object_variables=[object_variable("ball", root_node=BALL)],
    )

    template_with_addressee = Phase1SituationTemplate(
        "template with addressee",
        salient_object_variables=[
            object_variable("mom",
                            root_node=MOM,
                            added_properties=[IS_ADDRESSEE])
        ],
    )

    situation_with_learner = tuple(
        sampled(
            template_with_learner,
            ontology=GAILA_PHASE_1_ONTOLOGY,
            chooser=RandomChooser.for_seed(0),
            max_to_sample=1,
        ))

    situation_with_out_learner = tuple(
        sampled(
            template_with_out_learner,
            ontology=GAILA_PHASE_1_ONTOLOGY,
            chooser=RandomChooser.for_seed(0),
            max_to_sample=1,
        ))

    situation_with_addressee = tuple(
        sampled(
            template_with_addressee,
            ontology=GAILA_PHASE_1_ONTOLOGY,
            chooser=RandomChooser.for_seed(0),
            max_to_sample=1,
        ))

    for object_ in situation_with_learner[0].all_objects:
        if object_.ontology_node == LEARNER:
            assert IS_ADDRESSEE in object_.properties
            break

    assert situation_with_learner[0].axis_info
    assert situation_with_learner[0].axis_info.addressee

    assert len(situation_with_out_learner[0].all_objects) == 2

    for object_ in situation_with_out_learner[0].all_objects:
        if object_.ontology_node == LEARNER:
            assert IS_ADDRESSEE in object_.properties
            break

    assert situation_with_out_learner[0].axis_info
    assert situation_with_out_learner[0].axis_info.addressee

    for object_ in situation_with_addressee[0].all_objects:
        if object_.ontology_node == LEARNER:
            assert False

    assert situation_with_addressee[0].axis_info
    assert situation_with_addressee[0].axis_info.addressee
Ejemplo n.º 20
0
)
from adam.perception.developmental_primitive_perception import (
    DevelopmentalPrimitivePerceptionFrame, )
from adam.perception.high_level_semantics_situation_to_developmental_primitive_perception import (
    GAILA_PHASE_1_PERCEPTION_GENERATOR,
    HighLevelSemanticsSituationToDevelopmentalPrimitivePerceptionGenerator,
)
from adam.random_utils import RandomChooser
from adam.situation.high_level_semantics_situation import HighLevelSemanticsSituation
from adam.situation.templates.phase1_templates import (
    object_variable,
    TemplatePropertyVariable,
    TemplateObjectVariable,
)

GROUND_OBJECT_TEMPLATE = object_variable("ground", GROUND)
PHASE1_CHOOSER_FACTORY = lambda: RandomChooser.for_seed(0)  # noqa: E731
PHASE1_TEST_CHOOSER_FACTORY = lambda: RandomChooser.for_seed(1)  # noqa: E731
Phase1InstanceGroup = InstanceGroup[  # pylint:disable=invalid-name
    HighLevelSemanticsSituation, LinearizedDependencyTree,
    DevelopmentalPrimitivePerceptionFrame, ]


def standard_object(
    debug_handle: str,
    root_node: OntologyNode = INANIMATE_OBJECT,
    *,
    required_properties: Iterable[OntologyNode] = tuple(),
    banned_properties: Iterable[OntologyNode] = immutableset(),
    added_properties: Iterable[Union[
        OntologyNode, TemplatePropertyVariable]] = immutableset(),
Ejemplo n.º 21
0
def run_subset_learner_for_object(
    nodes: Iterable[OntologyNode], *, learner,
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree]):
    colored_obj_objects = [
        object_variable("obj-with-color",
                        node,
                        added_properties=[color_variable("color")])
        for node in nodes
    ]

    obj_templates = [
        Phase1SituationTemplate(
            "colored-obj-object",
            salient_object_variables=[colored_obj_object],
            syntax_hints=[IGNORE_COLORS],
        ) for colored_obj_object in colored_obj_objects
    ]

    obj_curriculum = phase1_instances(
        "all obj situations",
        flatten([
            all_possible(
                obj_template,
                chooser=PHASE1_CHOOSER_FACTORY(),
                ontology=GAILA_PHASE_1_ONTOLOGY,
            ) for obj_template in obj_templates
        ]),
        language_generator=language_generator,
    )

    test_obj_curriculum = phase1_instances(
        "obj test",
        situations=sampled(
            obj_templates[0],
            chooser=PHASE1_TEST_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=1,
        ),
        language_generator=language_generator,
    )

    for training_stage in [obj_curriculum]:
        for (
                _,
                linguistic_description,
                perceptual_representation,
        ) in training_stage.instances():
            learner.observe(
                LearningExample(perceptual_representation,
                                linguistic_description))

    for test_instance_group in [test_obj_curriculum]:
        for (
                _,
                test_instance_language,
                test_instance_perception,
        ) in test_instance_group.instances():
            descriptions_from_learner = learner.describe(
                test_instance_perception)
            gold = test_instance_language.as_token_sequence()
            assert gold in [
                desc.as_token_sequence() for desc in descriptions_from_learner
            ]
Ejemplo n.º 22
0
def test_pursuit_object_learner_with_gaze(language_mode):
    target_objects = [
        BALL,
        # PERSON,
        # CHAIR,
        # TABLE,
        DOG,
        # BIRD,
        BOX,
    ]

    language_generator = phase1_language_generator(language_mode)

    target_test_templates = []
    for obj in target_objects:
        # Create train and test templates for the target objects
        test_obj_object = object_variable("obj-with-color", obj)
        test_template = Phase1SituationTemplate(
            "colored-obj-object",
            salient_object_variables=[test_obj_object],
            syntax_hints=[IGNORE_COLORS],
            gazed_objects=[test_obj_object],
        )
        target_test_templates.extend(
            all_possible(
                test_template,
                chooser=PHASE1_CHOOSER_FACTORY(),
                ontology=GAILA_PHASE_1_ONTOLOGY,
            ))
    rng = random.Random()
    rng.seed(0)

    # We can use this to generate the actual pursuit curriculum
    train_curriculum = make_simple_pursuit_curriculum(
        target_objects=target_objects,
        num_instances=30,
        num_objects_in_instance=3,
        num_noise_instances=0,
        language_generator=language_generator,
        add_gaze=True,
    )

    test_obj_curriculum = phase1_instances(
        "obj test",
        situations=target_test_templates,
        language_generator=language_generator,
    )

    # All parameters should be in the range 0-1.
    # Learning factor works better when kept < 0.5
    # Graph matching threshold doesn't seem to matter that much, as often seems to be either a
    # complete or a very small match.
    # The lexicon threshold works better between 0.07-0.3, but we need to play around with it because we end up not
    # lexicalize items sufficiently because of diminishing lexicon probability through training
    rng = random.Random()
    rng.seed(0)
    learner = IntegratedTemplateLearner(object_learner=PursuitObjectLearnerNew(
        learning_factor=0.05,
        graph_match_confirmation_threshold=0.7,
        lexicon_entry_threshold=0.7,
        rng=rng,
        smoothing_parameter=0.002,
        ontology=GAILA_PHASE_1_ONTOLOGY,
        language_mode=language_mode,
        rank_gaze_higher=True,
    ))
    for training_stage in [train_curriculum]:
        for (
                _,
                linguistic_description,
                perceptual_representation,
        ) in training_stage.instances():
            learner.observe(
                LearningExample(perceptual_representation,
                                linguistic_description))

    for test_instance_group in [test_obj_curriculum]:
        for (
                _,
                test_instance_language,
                test_instance_perception,
        ) in test_instance_group.instances():
            logging.info("lang: %s", test_instance_language)
            descriptions_from_learner = learner.describe(
                test_instance_perception)
            gold = test_instance_language.as_token_sequence()
            assert gold in [
                desc.as_token_sequence() for desc in descriptions_from_learner
            ]
Ejemplo n.º 23
0
def test_copy_with_temporal_scope_pattern_content():
    """
    Tests whether copy_with_temporal_scope converts patterns to be dynamic as intended
    """

    # We use a situation to generate the perceptual representation
    # for a box with color.
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    perception_graph = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))

    perception_pattern = PerceptionGraphPattern.from_graph(
        perception_graph).perception_graph_pattern

    temporal_perception_graph = perception_graph.copy_with_temporal_scopes(
        temporal_scopes=[TemporalScope.AFTER])
    temporal_perception_pattern = perception_pattern.copy_with_temporal_scopes(
        required_temporal_scopes=TemporalScope.AFTER)

    # Exception while applying to dynamic pattern
    with pytest.raises(RuntimeError):
        temporal_perception_pattern.copy_with_temporal_scopes(
            required_temporal_scopes=TemporalScope.AFTER)

    for (source, target) in perception_pattern.copy_as_digraph().edges():
        assert not isinstance(
            perception_pattern.copy_as_digraph()[source][target]["predicate"],
            HoldsAtTemporalScopePredicate,
        )
    for (source,
         target) in temporal_perception_pattern.copy_as_digraph().edges():
        # Check type, and then the content
        predicate = temporal_perception_pattern.copy_as_digraph(
        )[source][target]["predicate"]
        # Test HoldsAtTemporalScope dot label, matches predicate
        assert isinstance(predicate.dot_label(), str)
        assert predicate.matches_predicate(
            HoldsAtTemporalScopePredicate(predicate.wrapped_edge_predicate,
                                          predicate.temporal_scopes))
        assert not predicate.matches_predicate(
            HoldsAtTemporalScopePredicate(predicate.wrapped_edge_predicate,
                                          [TemporalScope.BEFORE]))
        assert isinstance(predicate, HoldsAtTemporalScopePredicate)
        assert (predicate.wrapped_edge_predicate == perception_pattern.
                copy_as_digraph()[source][target]["predicate"])
        assert len(predicate.temporal_scopes) == 1
        assert only(predicate.temporal_scopes) == TemporalScope.AFTER

    # Test normal matching behavior
    temporal_matcher = temporal_perception_pattern.matcher(
        temporal_perception_graph, match_mode=MatchMode.NON_OBJECT)
    first(temporal_matcher.matches(use_lookahead_pruning=True))

    # Test HoldsAtTemporalScopePredicate
    for (source, target) in perception_graph.copy_as_digraph().edges():
        label = "test edge label"
        edge_predicate = AnyEdgePredicate()
        temporal_predicate = HoldsAtTemporalScopePredicate(
            edge_predicate, [TemporalScope.AFTER])

        temporal_edge_label = TemporallyScopedEdgeLabel(
            label, [TemporalScope.AFTER])
        assert temporal_predicate(source, temporal_edge_label, target)
        # Non temporal edge exception
        with pytest.raises(RuntimeError):
            temporal_predicate(source, label, target)
Ejemplo n.º 24
0
def test_allowed_matches_with_bad_partial_match():
    """
    Tests whether PatternMarching's allowed_matches functionality works as intended when a bad
    partial match is specified.
    """
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]
    perception = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))

    pattern1: PerceptionGraphPattern = PerceptionGraphPattern.from_graph(
        perception.subgraph_by_nodes({
            cast(PerceptionGraphNode, node)
            for node in perception._graph.nodes  # pylint: disable=protected-access
            if getattr(node, "debug_handle", None) == "box_0"
        })).perception_graph_pattern

    pattern2: PerceptionGraphPattern = PerceptionGraphPattern.from_graph(
        perception.subgraph_by_nodes({
            cast(PerceptionGraphNode, node)
            for node in perception._graph.nodes  # pylint: disable=protected-access
            if getattr(node, "debug_handle", None) in {"box_0", "the ground"}
        })).perception_graph_pattern

    pattern1_box: AnyObjectPerception = cast(
        AnyObjectPerception,
        only(node for node in pattern1._graph  # pylint: disable=protected-access
             if getattr(node, "debug_handle", None) == "box_0"),
    )
    pattern2_box: AnyObjectPerception = cast(
        AnyObjectPerception,
        only(node for node in pattern2._graph  # pylint: disable=protected-access
             if getattr(node, "debug_handle", None) == "box_0"),
    )
    pattern2_ground: AnyObjectPerception = cast(
        AnyObjectPerception,
        only(node for node in pattern2._graph  # pylint: disable=protected-access
             if getattr(node, "debug_handle", None) == "the ground"),
    )

    matcher = PatternMatching(
        pattern=pattern1,
        graph_to_match_against=pattern2,
        matching_pattern_against_pattern=True,
        match_mode=MatchMode.OBJECT,
        allowed_matches=immutablesetmultidict([(pattern1_box, pattern2_box)]),
    )
    with pytest.raises(RuntimeError):
        first(
            matcher.matches(
                initial_partial_match={pattern1_box: pattern2_ground},
                use_lookahead_pruning=True,
            ),
            None,
        )
Ejemplo n.º 25
0
def do_object_on_table_test(
    object_type_to_match: OntologyNode,
    object_schema: ObjectStructuralSchema,
    negative_object_ontology_node: OntologyNode,
):
    """
    Tests the `PerceptionGraphMatcher` can match simple objects.
    """
    # we create four situations:
    # a object_to_match above or under a table with color red or blue
    color = color_variable("color")
    object_to_match = object_variable(
        debug_handle=object_type_to_match.handle,
        root_node=object_type_to_match,
        added_properties=[color],
    )
    table = standard_object("table_0", TABLE)

    object_on_table_template = Phase1SituationTemplate(
        "object_to_match-on-table",
        salient_object_variables=[object_to_match, table],
        asserted_always_relations=[
            bigger_than(table, object_to_match),
            on(object_to_match, table),
        ],
    )

    object_under_table_template = Phase1SituationTemplate(
        "object_to_match-under-table",
        salient_object_variables=[object_to_match, table],
        asserted_always_relations=[
            bigger_than(table, object_to_match),
            above(table, object_to_match),
        ],
    )

    # We test that a perceptual pattern for "object_to_match" matches in all four cases.
    object_to_match_pattern = PerceptionGraphPattern.from_schema(
        object_schema, perception_generator=GAILA_PHASE_1_PERCEPTION_GENERATOR)

    situations_with_object_to_match = chain(
        all_possible_test(object_on_table_template),
        all_possible_test(object_under_table_template),
    )

    for (_,
         situation_with_object) in enumerate(situations_with_object_to_match):
        perception = GAILA_PHASE_1_PERCEPTION_GENERATOR.generate_perception(
            situation_with_object, chooser=RandomChooser.for_seed(0))
        perception_graph = PerceptionGraph.from_frame(perception.frames[0])
        # perception_graph.render_to_file(f"object_to_match {idx}", out_dir / f"object_to_match
        # -{idx}.pdf")
        # object_to_match_pattern.render_to_file(f"object_to_match pattern", out_dir /
        # "object_to_match_pattern.pdf")
        matcher = object_to_match_pattern.matcher(perception_graph,
                                                  match_mode=MatchMode.OBJECT)
        # debug_matching = matcher.debug_matching(
        #    use_lookahead_pruning=False, render_match_to=Path("/Users/gabbard/tmp")
        # )
        result = any(matcher.matches(use_lookahead_pruning=False))
        if not result:
            return False

    # Now let's create the same situations, but substitute a negative_object for a object_to_match.
    negative_object = object_variable(
        debug_handle=negative_object_ontology_node.handle,
        root_node=negative_object_ontology_node,
        added_properties=[color],
    )
    negative_object_on_table_template = Phase1SituationTemplate(
        "negative_object-on-table",
        salient_object_variables=[negative_object, table],
        asserted_always_relations=[
            bigger_than(table, negative_object),
            on(negative_object, table),
        ],
    )

    negative_object_under_table_template = Phase1SituationTemplate(
        "negative_object-under-table",
        salient_object_variables=[negative_object, table],
        asserted_always_relations=[
            bigger_than(table, negative_object),
            above(table, negative_object),
        ],
    )

    situations_with_negative_object = chain(
        all_possible_test(negative_object_on_table_template),
        all_possible_test(negative_object_under_table_template),
    )

    # The pattern should now fail to match.
    for situation_with_negative_object in situations_with_negative_object:
        perception = GAILA_PHASE_1_PERCEPTION_GENERATOR.generate_perception(
            situation_with_negative_object, chooser=RandomChooser.for_seed(0))
        perception_graph = PerceptionGraph.from_frame(perception.frames[0])
        if any(
                object_to_match_pattern.matcher(
                    perception_graph, match_mode=MatchMode.OBJECT).matches(
                        use_lookahead_pruning=True)):
            return False
    return True
Ejemplo n.º 26
0
def test_semantically_infeasible_partial_match():
    """
    Tests whether semantic feasibility works as intended
    """

    target_object = BOX
    # Create train and test templates for the target objects
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]
    # Original perception graph
    perception = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))
    whole_perception_pattern = PerceptionGraphPattern.from_graph(
        perception).perception_graph_pattern

    # Create an altered perception graph we remove the color node
    altered_perception_digraph = perception.copy_as_digraph()
    nodes_to_remove = []
    edges = []
    different_nodes = []
    for node in perception.copy_as_digraph().nodes:
        # If we find a color node, we make it black
        if isinstance(node, tuple) and isinstance(node[0], RgbColorPerception):
            new_node = (RgbColorPerception(0, 0, 0), node[1])
            # Get edge information
            for edge in perception.copy_as_digraph().edges(data=True):
                if edge[0] == node:
                    edges.append((new_node, edge[1], edge[2]))
                if edge[1] == node:
                    edges.append((edge[0], new_node, edge[2]))
            nodes_to_remove.append(node)
            different_nodes.append(new_node)

    # remove original node
    altered_perception_digraph.remove_nodes_from(nodes_to_remove)

    # add new nodes
    for node in different_nodes:
        altered_perception_digraph.add_node(node)
    # add edge information
    for edge in edges:
        altered_perception_digraph.add_edge(edge[0], edge[1])
        for k, v in edge[2].items():
            altered_perception_digraph[edge[0]][edge[1]][k] = v

    altered_perception_pattern = PerceptionGraphPattern.from_graph(
        PerceptionGraph(altered_perception_digraph)).perception_graph_pattern

    partial_digraph = altered_perception_pattern.copy_as_digraph()
    partial_digraph.remove_nodes_from([
        node for node in partial_digraph.nodes
        if isinstance(node, IsColorNodePredicate)
    ])

    # Start the matching process, get a partial match
    matcher = whole_perception_pattern.matcher(perception,
                                               match_mode=MatchMode.OBJECT)
    partial_match: PerceptionGraphPatternMatch = first(
        matcher.matches(use_lookahead_pruning=True))
    partial_mapping = partial_match.pattern_node_to_matched_graph_node

    # Try to extend the partial mapping, we expect a semantic infeasibility runtime error
    second_matcher = whole_perception_pattern.matcher(
        PerceptionGraph(altered_perception_digraph),
        match_mode=MatchMode.OBJECT)
    # The partial mapping (obtained from first matcher with original perception graph)
    # semantically doesn't match the one in the altered version (second matcher with altered graph)
    with pytest.raises(RuntimeError):
        first(
            second_matcher.matches(initial_partial_match=partial_mapping,
                                   use_lookahead_pruning=True),
            None,
        )
Ejemplo n.º 27
0
def test_last_failed_pattern_node():
    """
    Tests whether `MatchFailure` can find the correct node.
    """

    target_object = BOX
    # Create train and test templates for the target objects
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    for (_, _, perceptual_representation) in train_curriculum.instances():
        # Original perception graph
        perception = graph_without_learner(
            PerceptionGraph.from_frame(perceptual_representation.frames[0]))

        # Original perception pattern
        whole_perception_pattern = PerceptionGraphPattern.from_graph(
            perception).perception_graph_pattern
        # Create an altered perception graph we replace the color node
        altered_perception_digraph = perception.copy_as_digraph()
        nodes_to_remove = []
        edges = []
        different_nodes = []
        for node in perception.copy_as_digraph().nodes:
            # If we find a color node, we make it black
            if isinstance(node, tuple) and isinstance(node[0],
                                                      RgbColorPerception):
                new_node = (RgbColorPerception(0, 0, 0), 42)
                # Get edge information
                for edge in perception.copy_as_digraph().edges(data=True):
                    if edge[0] == node:
                        edges.append((new_node, edge[1], edge[2]))
                    if edge[1] == node:
                        edges.append((edge[0], new_node, edge[2]))
                nodes_to_remove.append(node)
                different_nodes.append(new_node)

        # add new nodes
        for node in different_nodes:
            altered_perception_digraph.add_node(node)
        # add edge information
        for edge in edges:
            altered_perception_digraph.add_edge(edge[0], edge[1])
            for k, v in edge[2].items():
                altered_perception_digraph[edge[0]][edge[1]][k] = v
        # remove original node
        altered_perception_digraph.remove_nodes_from(nodes_to_remove)

        # Start the matching process
        matcher = whole_perception_pattern.matcher(
            PerceptionGraph(altered_perception_digraph),
            match_mode=MatchMode.NON_OBJECT)
        match_or_failure = matcher.first_match_or_failure_info()
        assert isinstance(match_or_failure, PatternMatching.MatchFailure)
        assert isinstance(match_or_failure.last_failed_pattern_node,
                          IsColorNodePredicate)