Ejemplo n.º 1
0
def _make_drink_cups_curriculum(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:

    templates = []
    for cup in [CUP, CUP_2, CUP_3, CUP_4]:
        cup_obj = standard_object("cup", cup)
        liquid_0 = object_variable("liquid_0", required_properties=[LIQUID])
        person_0 = standard_object(
            "person_0", PERSON, banned_properties=[IS_SPEAKER, IS_ADDRESSEE])

        templates.append(
            Phase1SituationTemplate(
                "drink-cup",
                salient_object_variables=[liquid_0, person_0, cup_obj],
                background_object_variables=make_noise_objects(noise_objects),
                actions=[
                    Action(
                        DRINK,
                        argument_roles_to_fillers=[(AGENT, person_0),
                                                   (THEME, liquid_0)],
                        auxiliary_variable_bindings=[(DRINK_CONTAINER_AUX,
                                                      cup_obj)],
                    )
                ],
                asserted_always_relations=[inside(liquid_0, cup_obj)],
            ))

    return phase2_instances(
        "drink - cup",
        chain(*[
            sampled(
                cup_template,
                chooser=PHASE1_CHOOSER_FACTORY(),
                ontology=GAILA_PHASE_2_ONTOLOGY,
                max_to_sample=num_samples,
            ) if num_samples else all_possible(
                cup_template,
                chooser=PHASE1_CHOOSER_FACTORY(),
                ontology=GAILA_PHASE_2_ONTOLOGY,
            ) for cup_template in templates
        ]),
        perception_generator=GAILA_PHASE_2_PERCEPTION_GENERATOR,
        language_generator=language_generator,
    )
Ejemplo n.º 2
0
def _near_template(
    figure: TemplateObjectVariable,
    ground: TemplateObjectVariable,
    background: Iterable[TemplateObjectVariable],
    *,
    is_training: bool,
) -> Phase1SituationTemplate:
    handle = "training" if is_training else "testing"
    return Phase1SituationTemplate(
        f"preposition-{handle}-{figure.handle}-near-{ground.handle}",
        salient_object_variables=[figure, ground],
        background_object_variables=background,
        asserted_always_relations=[near(figure, ground)],
        gazed_objects=[figure],
        syntax_hints=[USE_NEAR],
    )
Ejemplo n.º 3
0
 def object_with_color(
     target_with_color: TemplateObjectVariable,
     *,
     background_objects: Iterable[TemplateObjectVariable] = immutableset(),
     background_relations: Iterable[Relation[Any]] = immutableset(),
 ) -> Phase1SituationTemplate:
     return Phase1SituationTemplate(
         name=f"single-attribute-color-{target_with_color.handle}",
         salient_object_variables=[target_with_color],
         background_object_variables=background_objects
         if add_noise
         else immutableset(),
         asserted_always_relations=background_relations
         if add_noise
         else immutableset(),
     )
Ejemplo n.º 4
0
def _on_template(
    figure: TemplateObjectVariable,
    ground: TemplateObjectVariable,
    background: Iterable[TemplateObjectVariable],
    *,
    is_training: bool,
    background_relations: Iterable[Relation[Any]] = immutableset(),
) -> Phase1SituationTemplate:
    handle = "training" if is_training else "testing"
    relations = [on(figure, ground)]
    relations.extend(background_relations)  # type: ignore
    return Phase1SituationTemplate(
        f"preposition-{handle}-{figure.handle}-on-{ground.handle}",
        salient_object_variables=[figure, ground],
        background_object_variables=background,
        asserted_always_relations=flatten_relations(relations),
        gazed_objects=[figure],
    )
Ejemplo n.º 5
0
def test_perception_graph_post_init_edge_cases():
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)
    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)
    perceptual_representation = only(train_curriculum.instances())[2]
    perception_graph = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))
    temporal_perception_graph = perception_graph.copy_with_temporal_scopes(
        temporal_scopes=[TemporalScope.AFTER])
    temporal_digraph = temporal_perception_graph.copy_as_digraph()
    # Test valid edge label
    # The only feasible test seems to be the instation, since creating a corrupt instance throws the same RuntimeError
    with pytest.raises(RuntimeError):
        TemporallyScopedEdgeLabel(None)

    # In a dynamic graph, all edge labels must be wrapped in TemporallyScopedEdgeLabel
    new_graph = DiGraph()
    for (source, target) in temporal_digraph.edges():
        new_graph.add_edge(source, target)
        new_graph[source][target]["label"] = None
    with pytest.raises(RuntimeError):
        PerceptionGraph(new_graph, dynamic=True)

    # TemporallyScopedEdgeLabels may not appear in a static graph
    new_graph = DiGraph()
    for (source, target) in temporal_digraph.edges():
        new_graph.add_edge(source, target)
        new_graph[source][target]["label"] = TemporallyScopedEdgeLabel(
            "attribute", [TemporalScope.AFTER])
    with pytest.raises(RuntimeError):
        PerceptionGraph(new_graph)

    # Every edge in a PerceptionGraph must have a 'label
    new_graph = DiGraph()
    for (source, target) in temporal_digraph.edges():
        new_graph.add_edge(source, target)
    with pytest.raises(RuntimeError):
        PerceptionGraph(new_graph)
Ejemplo n.º 6
0
def test_recognize_in_transfer_of_possession(language_mode):
    dad = object_variable("person_0", DAD)
    baby = object_variable("person_1", BABY)
    chair = object_variable("give_object_0", CHAIR)

    giving_template = Phase1SituationTemplate(
        "dad-transfer-of-possession",
        salient_object_variables=[dad, baby, chair],
        actions=[
            Action(
                GIVE,
                argument_roles_to_fillers=[(AGENT, dad), (GOAL, baby),
                                           (THEME, chair)],
            )
        ],
        syntax_hints=[PREFER_DITRANSITIVE],
    )

    (_, _, perception) = first(
        phase1_instances(
            "foo",
            sampled(
                giving_template,
                max_to_sample=1,
                chooser=PHASE1_CHOOSER_FACTORY(),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                block_multiple_of_the_same_type=True,
            ),
        ).instances())

    perception_graph = PerceptionGraph.from_dynamic_perceptual_representation(
        perception)
    perception_semantic_alignment = PerceptionSemanticAlignment.create_unaligned(
        perception_graph)
    (_, description_to_matched_semantic_node
     ) = LANGUAGE_MODE_TO_OBJECT_RECOGNIZER[language_mode].match_objects(
         perception_semantic_alignment)
    assert len(description_to_matched_semantic_node) == 4
    assert (language_mode == LanguageMode.ENGLISH and
            ("Dad", ) in description_to_matched_semantic_node) or (
                language_mode == LanguageMode.CHINESE and
                ("ba4 ba4", ) in description_to_matched_semantic_node)
Ejemplo n.º 7
0
def _fly_over_template(
    # A bird flies over a ball
    agent: TemplateObjectVariable,
    object_in_path: TemplateObjectVariable,
    background: Iterable[TemplateObjectVariable],
) -> Phase1SituationTemplate:
    return Phase1SituationTemplate(
        f"{agent.handle}-flies-over-{object_in_path.handle}",
        salient_object_variables=[agent, object_in_path],
        background_object_variables=background,
        actions=[
            Action(
                FLY,
                argument_roles_to_fillers=[(AGENT, agent)],
                during=DuringAction(
                    at_some_point=flatten_relations(strictly_above(agent, object_in_path))
                ),
            )
        ],
    )
Ejemplo n.º 8
0
def _go_to_template(
    agent: TemplateObjectVariable,
    goal_object: TemplateObjectVariable,
    background: Iterable[TemplateObjectVariable],
) -> Phase1SituationTemplate:
    return Phase1SituationTemplate(
        f"go_to-{agent.handle}-to-{goal_object.handle}",
        salient_object_variables=[agent, goal_object],
        background_object_variables=background,
        actions=[
            Action(
                GO,
                argument_roles_to_fillers=[
                    (AGENT, agent),
                    (GOAL, Region(goal_object, distance=PROXIMAL)),
                ],
            )
        ],
        after_action_relations=[near(agent, goal_object)],
        gazed_objects=[agent],
    )
Ejemplo n.º 9
0
def _go_in_template(
    agent: TemplateObjectVariable,
    goal_object: TemplateObjectVariable,
    background: Iterable[TemplateObjectVariable],
) -> Phase1SituationTemplate:
    return Phase1SituationTemplate(
        f"go_in-{agent.handle}-in-{goal_object.handle}",
        salient_object_variables=[agent, goal_object],
        background_object_variables=background,
        actions=[
            Action(
                GO,
                argument_roles_to_fillers=[
                    (AGENT, agent),
                    (GOAL, Region(goal_object, distance=INTERIOR)),
                ],
            )
        ],
        constraining_relations=flatten_relations(bigger_than(goal_object, agent)),
        after_action_relations=[inside(agent, goal_object)],
    )
Ejemplo n.º 10
0
def _jump_over_template(
    # "Mom jumps over a ball"
    agent: TemplateObjectVariable,
    object_in_path: TemplateObjectVariable,
    background: Iterable[TemplateObjectVariable],
) -> Phase1SituationTemplate:
    return Phase1SituationTemplate(
        f"{agent.handle}-jumps-over-{object_in_path.handle}",
        salient_object_variables=[agent, object_in_path],
        background_object_variables=background,
        actions=[
            Action(
                JUMP,
                argument_roles_to_fillers=[(AGENT, agent)],
                during=DuringAction(
                    at_some_point=flatten_relations(
                        strictly_above(agent, object_in_path)
                    ),
                    objects_to_paths=[
                        (
                            agent,
                            SpatialPath(
                                operator=VIA,
                                reference_source_object=Region(
                                    object_in_path,
                                    direction=GRAVITATIONAL_UP,
                                    distance=DISTAL,
                                ),
                                reference_destination_object=GROUND_OBJECT_TEMPLATE,
                            ),
                        )
                    ],
                ),
                auxiliary_variable_bindings=[
                    (JUMP_INITIAL_SUPPORTER_AUX, GROUND_OBJECT_TEMPLATE)
                ],
            )
        ],
        asserted_always_relations=[negate(on(agent, GROUND_OBJECT_TEMPLATE))],
    )
Ejemplo n.º 11
0
def _put_in_template(
    agent: TemplateObjectVariable,
    theme: TemplateObjectVariable,
    goal_reference: TemplateObjectVariable,
    background: Iterable[TemplateObjectVariable],
) -> Phase1SituationTemplate:
    return Phase1SituationTemplate(
        f"{agent.handle}-puts-{theme.handle}-in-{goal_reference.handle}",
        salient_object_variables=[agent, theme, goal_reference],
        background_object_variables=background,
        actions=[
            Action(
                PUT,
                argument_roles_to_fillers=[
                    (AGENT, agent),
                    (THEME, theme),
                    (GOAL, Region(goal_reference, distance=INTERIOR)),
                ],
            )
        ],
        constraining_relations=flatten_relations(bigger_than(goal_reference, theme)),
    )
Ejemplo n.º 12
0
def test_copy_with_temporal_scopes_content():
    """
    Tests whether copy_with_temporal_scopes converts graphs to be dynamic as intended
    """

    # We use a situation to generate the perceptual representation
    # for a box with color.
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    perception_graph = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))
    temporal_perception_graph = perception_graph.copy_with_temporal_scopes(
        temporal_scopes=[TemporalScope.AFTER])
    for (source, target) in perception_graph.copy_as_digraph().edges():
        assert not isinstance(
            perception_graph.copy_as_digraph()[source][target]["label"],
            TemporallyScopedEdgeLabel,
        )
    for (source,
         target) in temporal_perception_graph.copy_as_digraph().edges():
        # Check type, and then the content
        label = temporal_perception_graph.copy_as_digraph(
        )[source][target]["label"]
        assert isinstance(label, TemporallyScopedEdgeLabel)
        assert (label.attribute == perception_graph.copy_as_digraph()[source]
                [target]["label"])
        assert all(specifier in [TemporalScope.AFTER]
                   for specifier in label.temporal_specifiers)
Ejemplo n.º 13
0
def _over_template(
    figure: TemplateObjectVariable,
    ground: TemplateObjectVariable,
    background: Iterable[TemplateObjectVariable],
    *,
    is_training: bool,
    is_distal: bool,
    syntax_hints: Iterable[str] = [],
) -> Phase1SituationTemplate:
    handle = "training" if is_training else "testing"
    # TODO: currently this hack keeps old implementation for English that hasn't solved https://github.com/isi-vista/adam/issues/802
    # and returns new implementation for Chinese that does solve this
    return Phase1SituationTemplate(
        f"preposition-{handle}-{figure.handle}-over-{ground.handle}",
        salient_object_variables=[figure, ground],
        background_object_variables=background,
        asserted_always_relations=[
            strictly_over(figure,
                          ground,
                          dist=DISTAL if is_distal else PROXIMAL)
        ],
        gazed_objects=[figure],
        syntax_hints=syntax_hints,
    )
Ejemplo n.º 14
0
def _over_template(
    figure: TemplateObjectVariable,
    ground: TemplateObjectVariable,
    background: Iterable[TemplateObjectVariable],
    *,
    is_training: bool,
    is_distal: bool,
    syntax_hints: Iterable[str] = immutableset(),
    background_relations: Iterable[TemplateObjectVariable] = immutableset(),
) -> Phase1SituationTemplate:
    handle = "training" if is_training else "testing"
    relations = [
        negate(on(figure, GROUND_OBJECT_TEMPLATE)),
        strictly_over(figure, ground, dist=DISTAL if is_distal else PROXIMAL),
    ]
    relations.extend(background_relations)  # type: ignore
    return Phase1SituationTemplate(
        f"preposition-{handle}-{figure.handle}-over-{ground.handle}",
        salient_object_variables=[figure, ground],
        background_object_variables=background,
        asserted_always_relations=flatten_relations(relations),
        gazed_objects=[figure],
        syntax_hints=syntax_hints,
    )
Ejemplo n.º 15
0
def _fly_under_template(
    # A bird flies under a chair
    agent: TemplateObjectVariable,
    object_in_path: TemplateObjectVariable,
    background: Iterable[TemplateObjectVariable],
) -> Phase1SituationTemplate:
    return Phase1SituationTemplate(
        f"{agent.handle}-flies-under-{object_in_path.handle}",
        salient_object_variables=[agent, object_in_path],
        background_object_variables=background,
        actions=[
            Action(
                FLY,
                argument_roles_to_fillers=[(AGENT, agent)],
                during=DuringAction(
                    at_some_point=flatten_relations(strictly_above(object_in_path, agent))
                ),
            )
        ],
        asserted_always_relations=[negate(on(object_in_path, GROUND_OBJECT_TEMPLATE))],
        before_action_relations=[negate(on(object_in_path, GROUND_OBJECT_TEMPLATE))],
        after_action_relations=[negate(on(object_in_path, GROUND_OBJECT_TEMPLATE))],
        constraining_relations=flatten_relations(bigger_than(object_in_path, agent)),
    )
Ejemplo n.º 16
0
def _in_front_template(
    figure: TemplateObjectVariable,
    ground: TemplateObjectVariable,
    background: Iterable[TemplateObjectVariable],
    *,
    is_training: bool,
    is_near: bool,
    background_relations: Iterable[Relation[Any]] = immutableset(),
) -> Phase1SituationTemplate:
    handle = "training" if is_training else "testing"
    direction = Direction(positive=True,
                          relative_to_axis=FacingAddresseeAxis(ground))
    relations = [
        near(figure, ground, direction=direction)
        if is_near else far(figure, ground, direction=direction)
    ]
    relations.extend(background_relations)  # type: ignore
    return Phase1SituationTemplate(
        f"preposition-{handle}-{figure.handle}-behind-{ground.handle}",
        salient_object_variables=[figure, ground],
        background_object_variables=background,
        asserted_always_relations=flatten_relations(relations),
        gazed_objects=[figure],
    )
Ejemplo n.º 17
0
def do_object_on_table_test(
    object_type_to_match: OntologyNode,
    object_schema: ObjectStructuralSchema,
    negative_object_ontology_node: OntologyNode,
):
    """
    Tests the `PerceptionGraphMatcher` can match simple objects.
    """
    # we create four situations:
    # a object_to_match above or under a table with color red or blue
    color = color_variable("color")
    object_to_match = object_variable(
        debug_handle=object_type_to_match.handle,
        root_node=object_type_to_match,
        added_properties=[color],
    )
    table = standard_object("table_0", TABLE)

    object_on_table_template = Phase1SituationTemplate(
        "object_to_match-on-table",
        salient_object_variables=[object_to_match, table],
        asserted_always_relations=[
            bigger_than(table, object_to_match),
            on(object_to_match, table),
        ],
    )

    object_under_table_template = Phase1SituationTemplate(
        "object_to_match-under-table",
        salient_object_variables=[object_to_match, table],
        asserted_always_relations=[
            bigger_than(table, object_to_match),
            above(table, object_to_match),
        ],
    )

    # We test that a perceptual pattern for "object_to_match" matches in all four cases.
    object_to_match_pattern = PerceptionGraphPattern.from_schema(
        object_schema, perception_generator=GAILA_PHASE_1_PERCEPTION_GENERATOR)

    situations_with_object_to_match = chain(
        all_possible_test(object_on_table_template),
        all_possible_test(object_under_table_template),
    )

    for (_,
         situation_with_object) in enumerate(situations_with_object_to_match):
        perception = GAILA_PHASE_1_PERCEPTION_GENERATOR.generate_perception(
            situation_with_object, chooser=RandomChooser.for_seed(0))
        perception_graph = PerceptionGraph.from_frame(perception.frames[0])
        # perception_graph.render_to_file(f"object_to_match {idx}", out_dir / f"object_to_match
        # -{idx}.pdf")
        # object_to_match_pattern.render_to_file(f"object_to_match pattern", out_dir /
        # "object_to_match_pattern.pdf")
        matcher = object_to_match_pattern.matcher(perception_graph,
                                                  match_mode=MatchMode.OBJECT)
        # debug_matching = matcher.debug_matching(
        #    use_lookahead_pruning=False, render_match_to=Path("/Users/gabbard/tmp")
        # )
        result = any(matcher.matches(use_lookahead_pruning=False))
        if not result:
            return False

    # Now let's create the same situations, but substitute a negative_object for a object_to_match.
    negative_object = object_variable(
        debug_handle=negative_object_ontology_node.handle,
        root_node=negative_object_ontology_node,
        added_properties=[color],
    )
    negative_object_on_table_template = Phase1SituationTemplate(
        "negative_object-on-table",
        salient_object_variables=[negative_object, table],
        asserted_always_relations=[
            bigger_than(table, negative_object),
            on(negative_object, table),
        ],
    )

    negative_object_under_table_template = Phase1SituationTemplate(
        "negative_object-under-table",
        salient_object_variables=[negative_object, table],
        asserted_always_relations=[
            bigger_than(table, negative_object),
            above(table, negative_object),
        ],
    )

    situations_with_negative_object = chain(
        all_possible_test(negative_object_on_table_template),
        all_possible_test(negative_object_under_table_template),
    )

    # The pattern should now fail to match.
    for situation_with_negative_object in situations_with_negative_object:
        perception = GAILA_PHASE_1_PERCEPTION_GENERATOR.generate_perception(
            situation_with_negative_object, chooser=RandomChooser.for_seed(0))
        perception_graph = PerceptionGraph.from_frame(perception.frames[0])
        if any(
                object_to_match_pattern.matcher(
                    perception_graph, match_mode=MatchMode.OBJECT).matches(
                        use_lookahead_pruning=True)):
            return False
    return True
Ejemplo n.º 18
0
def run_subset_learner_for_object(
    nodes: Iterable[OntologyNode], *, learner,
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree]):
    colored_obj_objects = [
        object_variable("obj-with-color",
                        node,
                        added_properties=[color_variable("color")])
        for node in nodes
    ]

    obj_templates = [
        Phase1SituationTemplate(
            "colored-obj-object",
            salient_object_variables=[colored_obj_object],
            syntax_hints=[IGNORE_COLORS],
        ) for colored_obj_object in colored_obj_objects
    ]

    obj_curriculum = phase1_instances(
        "all obj situations",
        flatten([
            all_possible(
                obj_template,
                chooser=PHASE1_CHOOSER_FACTORY(),
                ontology=GAILA_PHASE_1_ONTOLOGY,
            ) for obj_template in obj_templates
        ]),
        language_generator=language_generator,
    )

    test_obj_curriculum = phase1_instances(
        "obj test",
        situations=sampled(
            obj_templates[0],
            chooser=PHASE1_TEST_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=1,
        ),
        language_generator=language_generator,
    )

    for training_stage in [obj_curriculum]:
        for (
                _,
                linguistic_description,
                perceptual_representation,
        ) in training_stage.instances():
            learner.observe(
                LearningExample(perceptual_representation,
                                linguistic_description))

    for test_instance_group in [test_obj_curriculum]:
        for (
                _,
                test_instance_language,
                test_instance_perception,
        ) in test_instance_group.instances():
            descriptions_from_learner = learner.describe(
                test_instance_perception)
            gold = test_instance_language.as_token_sequence()
            assert gold in [
                desc.as_token_sequence() for desc in descriptions_from_learner
            ]
Ejemplo n.º 19
0
def test_pursuit_object_learner_with_gaze(language_mode):
    target_objects = [
        BALL,
        # PERSON,
        # CHAIR,
        # TABLE,
        DOG,
        # BIRD,
        BOX,
    ]

    language_generator = phase1_language_generator(language_mode)

    target_test_templates = []
    for obj in target_objects:
        # Create train and test templates for the target objects
        test_obj_object = object_variable("obj-with-color", obj)
        test_template = Phase1SituationTemplate(
            "colored-obj-object",
            salient_object_variables=[test_obj_object],
            syntax_hints=[IGNORE_COLORS],
            gazed_objects=[test_obj_object],
        )
        target_test_templates.extend(
            all_possible(
                test_template,
                chooser=PHASE1_CHOOSER_FACTORY(),
                ontology=GAILA_PHASE_1_ONTOLOGY,
            ))
    rng = random.Random()
    rng.seed(0)

    # We can use this to generate the actual pursuit curriculum
    train_curriculum = make_simple_pursuit_curriculum(
        target_objects=target_objects,
        num_instances=30,
        num_objects_in_instance=3,
        num_noise_instances=0,
        language_generator=language_generator,
        add_gaze=True,
    )

    test_obj_curriculum = phase1_instances(
        "obj test",
        situations=target_test_templates,
        language_generator=language_generator,
    )

    # All parameters should be in the range 0-1.
    # Learning factor works better when kept < 0.5
    # Graph matching threshold doesn't seem to matter that much, as often seems to be either a
    # complete or a very small match.
    # The lexicon threshold works better between 0.07-0.3, but we need to play around with it because we end up not
    # lexicalize items sufficiently because of diminishing lexicon probability through training
    rng = random.Random()
    rng.seed(0)
    learner = IntegratedTemplateLearner(object_learner=PursuitObjectLearnerNew(
        learning_factor=0.05,
        graph_match_confirmation_threshold=0.7,
        lexicon_entry_threshold=0.7,
        rng=rng,
        smoothing_parameter=0.002,
        ontology=GAILA_PHASE_1_ONTOLOGY,
        language_mode=language_mode,
        rank_gaze_higher=True,
    ))
    for training_stage in [train_curriculum]:
        for (
                _,
                linguistic_description,
                perceptual_representation,
        ) in training_stage.instances():
            learner.observe(
                LearningExample(perceptual_representation,
                                linguistic_description))

    for test_instance_group in [test_obj_curriculum]:
        for (
                _,
                test_instance_language,
                test_instance_perception,
        ) in test_instance_group.instances():
            logging.info("lang: %s", test_instance_language)
            descriptions_from_learner = learner.describe(
                test_instance_perception)
            gold = test_instance_language.as_token_sequence()
            assert gold in [
                desc.as_token_sequence() for desc in descriptions_from_learner
            ]
Ejemplo n.º 20
0
def test_copy_with_temporal_scope_pattern_content():
    """
    Tests whether copy_with_temporal_scope converts patterns to be dynamic as intended
    """

    # We use a situation to generate the perceptual representation
    # for a box with color.
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    perception_graph = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))

    perception_pattern = PerceptionGraphPattern.from_graph(
        perception_graph).perception_graph_pattern

    temporal_perception_graph = perception_graph.copy_with_temporal_scopes(
        temporal_scopes=[TemporalScope.AFTER])
    temporal_perception_pattern = perception_pattern.copy_with_temporal_scopes(
        required_temporal_scopes=TemporalScope.AFTER)

    # Exception while applying to dynamic pattern
    with pytest.raises(RuntimeError):
        temporal_perception_pattern.copy_with_temporal_scopes(
            required_temporal_scopes=TemporalScope.AFTER)

    for (source, target) in perception_pattern.copy_as_digraph().edges():
        assert not isinstance(
            perception_pattern.copy_as_digraph()[source][target]["predicate"],
            HoldsAtTemporalScopePredicate,
        )
    for (source,
         target) in temporal_perception_pattern.copy_as_digraph().edges():
        # Check type, and then the content
        predicate = temporal_perception_pattern.copy_as_digraph(
        )[source][target]["predicate"]
        # Test HoldsAtTemporalScope dot label, matches predicate
        assert isinstance(predicate.dot_label(), str)
        assert predicate.matches_predicate(
            HoldsAtTemporalScopePredicate(predicate.wrapped_edge_predicate,
                                          predicate.temporal_scopes))
        assert not predicate.matches_predicate(
            HoldsAtTemporalScopePredicate(predicate.wrapped_edge_predicate,
                                          [TemporalScope.BEFORE]))
        assert isinstance(predicate, HoldsAtTemporalScopePredicate)
        assert (predicate.wrapped_edge_predicate == perception_pattern.
                copy_as_digraph()[source][target]["predicate"])
        assert len(predicate.temporal_scopes) == 1
        assert only(predicate.temporal_scopes) == TemporalScope.AFTER

    # Test normal matching behavior
    temporal_matcher = temporal_perception_pattern.matcher(
        temporal_perception_graph, match_mode=MatchMode.NON_OBJECT)
    first(temporal_matcher.matches(use_lookahead_pruning=True))

    # Test HoldsAtTemporalScopePredicate
    for (source, target) in perception_graph.copy_as_digraph().edges():
        label = "test edge label"
        edge_predicate = AnyEdgePredicate()
        temporal_predicate = HoldsAtTemporalScopePredicate(
            edge_predicate, [TemporalScope.AFTER])

        temporal_edge_label = TemporallyScopedEdgeLabel(
            label, [TemporalScope.AFTER])
        assert temporal_predicate(source, temporal_edge_label, target)
        # Non temporal edge exception
        with pytest.raises(RuntimeError):
            temporal_predicate(source, label, target)
Ejemplo n.º 21
0
def test_allowed_matches_with_bad_partial_match():
    """
    Tests whether PatternMarching's allowed_matches functionality works as intended when a bad
    partial match is specified.
    """
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]
    perception = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))

    pattern1: PerceptionGraphPattern = PerceptionGraphPattern.from_graph(
        perception.subgraph_by_nodes({
            cast(PerceptionGraphNode, node)
            for node in perception._graph.nodes  # pylint: disable=protected-access
            if getattr(node, "debug_handle", None) == "box_0"
        })).perception_graph_pattern

    pattern2: PerceptionGraphPattern = PerceptionGraphPattern.from_graph(
        perception.subgraph_by_nodes({
            cast(PerceptionGraphNode, node)
            for node in perception._graph.nodes  # pylint: disable=protected-access
            if getattr(node, "debug_handle", None) in {"box_0", "the ground"}
        })).perception_graph_pattern

    pattern1_box: AnyObjectPerception = cast(
        AnyObjectPerception,
        only(node for node in pattern1._graph  # pylint: disable=protected-access
             if getattr(node, "debug_handle", None) == "box_0"),
    )
    pattern2_box: AnyObjectPerception = cast(
        AnyObjectPerception,
        only(node for node in pattern2._graph  # pylint: disable=protected-access
             if getattr(node, "debug_handle", None) == "box_0"),
    )
    pattern2_ground: AnyObjectPerception = cast(
        AnyObjectPerception,
        only(node for node in pattern2._graph  # pylint: disable=protected-access
             if getattr(node, "debug_handle", None) == "the ground"),
    )

    matcher = PatternMatching(
        pattern=pattern1,
        graph_to_match_against=pattern2,
        matching_pattern_against_pattern=True,
        match_mode=MatchMode.OBJECT,
        allowed_matches=immutablesetmultidict([(pattern1_box, pattern2_box)]),
    )
    with pytest.raises(RuntimeError):
        first(
            matcher.matches(
                initial_partial_match={pattern1_box: pattern2_ground},
                use_lookahead_pruning=True,
            ),
            None,
        )
Ejemplo n.º 22
0
def test_syntactically_infeasible_partial_match():
    """
    Tests whether syntactic feasibility works as intended
    """

    # We use a situation to generate the perceptual representation
    # for a box with color.
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    # Original perception graph
    perception = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))

    # Create an altered perception graph we remove the color node
    altered_perception_digraph = perception.copy_as_digraph()
    nodes = []
    for node in perception.copy_as_digraph().nodes:
        # If we find a color node, we add an extra edge to it
        if isinstance(node, tuple) and isinstance(node[0], RgbColorPerception):
            nodes.append(node)

    # change edge information
    for node in nodes:
        random_node = r.choice(list(altered_perception_digraph.nodes))
        altered_perception_digraph.add_edge(node, random_node, label=PART_OF)
        random_node_2 = r.choice(list(altered_perception_digraph.nodes))
        altered_perception_digraph.add_edge(random_node_2, node, label=PART_OF)

    altered_perception_perception_graph = PerceptionGraph(
        altered_perception_digraph)
    altered_perception_pattern = PerceptionGraphPattern.from_graph(
        altered_perception_perception_graph).perception_graph_pattern

    # Start the matching process, get a partial match
    first_matcher = altered_perception_pattern.matcher(
        altered_perception_perception_graph, match_mode=MatchMode.OBJECT)
    partial_match: PerceptionGraphPatternMatch = first(
        first_matcher.matches(use_lookahead_pruning=True), None)
    partial_mapping = partial_match.pattern_node_to_matched_graph_node
    # Try to extend the partial mapping, we expect a semantic infeasibility runtime error
    second_matcher = altered_perception_pattern.matcher(
        perception, match_mode=MatchMode.OBJECT)
    # The partial mapping (obtained from first matcher with original perception graph)
    # syntactically doesn't match the one in the altered version (second matcher with altered graph)
    with pytest.raises(RuntimeError):
        first(
            second_matcher.matches(initial_partial_match=partial_mapping,
                                   use_lookahead_pruning=True),
            None,
        )
Ejemplo n.º 23
0
def test_semantically_infeasible_partial_match():
    """
    Tests whether semantic feasibility works as intended
    """

    target_object = BOX
    # Create train and test templates for the target objects
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]
    # Original perception graph
    perception = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))
    whole_perception_pattern = PerceptionGraphPattern.from_graph(
        perception).perception_graph_pattern

    # Create an altered perception graph we remove the color node
    altered_perception_digraph = perception.copy_as_digraph()
    nodes_to_remove = []
    edges = []
    different_nodes = []
    for node in perception.copy_as_digraph().nodes:
        # If we find a color node, we make it black
        if isinstance(node, tuple) and isinstance(node[0], RgbColorPerception):
            new_node = (RgbColorPerception(0, 0, 0), node[1])
            # Get edge information
            for edge in perception.copy_as_digraph().edges(data=True):
                if edge[0] == node:
                    edges.append((new_node, edge[1], edge[2]))
                if edge[1] == node:
                    edges.append((edge[0], new_node, edge[2]))
            nodes_to_remove.append(node)
            different_nodes.append(new_node)

    # remove original node
    altered_perception_digraph.remove_nodes_from(nodes_to_remove)

    # add new nodes
    for node in different_nodes:
        altered_perception_digraph.add_node(node)
    # add edge information
    for edge in edges:
        altered_perception_digraph.add_edge(edge[0], edge[1])
        for k, v in edge[2].items():
            altered_perception_digraph[edge[0]][edge[1]][k] = v

    altered_perception_pattern = PerceptionGraphPattern.from_graph(
        PerceptionGraph(altered_perception_digraph)).perception_graph_pattern

    partial_digraph = altered_perception_pattern.copy_as_digraph()
    partial_digraph.remove_nodes_from([
        node for node in partial_digraph.nodes
        if isinstance(node, IsColorNodePredicate)
    ])

    # Start the matching process, get a partial match
    matcher = whole_perception_pattern.matcher(perception,
                                               match_mode=MatchMode.OBJECT)
    partial_match: PerceptionGraphPatternMatch = first(
        matcher.matches(use_lookahead_pruning=True))
    partial_mapping = partial_match.pattern_node_to_matched_graph_node

    # Try to extend the partial mapping, we expect a semantic infeasibility runtime error
    second_matcher = whole_perception_pattern.matcher(
        PerceptionGraph(altered_perception_digraph),
        match_mode=MatchMode.OBJECT)
    # The partial mapping (obtained from first matcher with original perception graph)
    # semantically doesn't match the one in the altered version (second matcher with altered graph)
    with pytest.raises(RuntimeError):
        first(
            second_matcher.matches(initial_partial_match=partial_mapping,
                                   use_lookahead_pruning=True),
            None,
        )
Ejemplo n.º 24
0
def test_learner_as_default_addressee():
    learner = object_variable("learner", root_node=LEARNER)
    ball = object_variable("ball", root_node=BALL)
    template_with_learner = Phase1SituationTemplate(
        "template with learner",
        salient_object_variables=[learner, ball],
        asserted_always_relations=[near(learner, ball)],
    )

    template_with_out_learner = Phase1SituationTemplate(
        "template with out learner",
        salient_object_variables=[object_variable("ball", root_node=BALL)],
    )

    template_with_addressee = Phase1SituationTemplate(
        "template with addressee",
        salient_object_variables=[
            object_variable("mom",
                            root_node=MOM,
                            added_properties=[IS_ADDRESSEE])
        ],
    )

    situation_with_learner = tuple(
        sampled(
            template_with_learner,
            ontology=GAILA_PHASE_1_ONTOLOGY,
            chooser=RandomChooser.for_seed(0),
            max_to_sample=1,
        ))

    situation_with_out_learner = tuple(
        sampled(
            template_with_out_learner,
            ontology=GAILA_PHASE_1_ONTOLOGY,
            chooser=RandomChooser.for_seed(0),
            max_to_sample=1,
        ))

    situation_with_addressee = tuple(
        sampled(
            template_with_addressee,
            ontology=GAILA_PHASE_1_ONTOLOGY,
            chooser=RandomChooser.for_seed(0),
            max_to_sample=1,
        ))

    for object_ in situation_with_learner[0].all_objects:
        if object_.ontology_node == LEARNER:
            assert IS_ADDRESSEE in object_.properties
            break

    assert situation_with_learner[0].axis_info
    assert situation_with_learner[0].axis_info.addressee

    assert len(situation_with_out_learner[0].all_objects) == 2

    for object_ in situation_with_out_learner[0].all_objects:
        if object_.ontology_node == LEARNER:
            assert IS_ADDRESSEE in object_.properties
            break

    assert situation_with_out_learner[0].axis_info
    assert situation_with_out_learner[0].axis_info.addressee

    for object_ in situation_with_addressee[0].all_objects:
        if object_.ontology_node == LEARNER:
            assert False

    assert situation_with_addressee[0].axis_info
    assert situation_with_addressee[0].axis_info.addressee
Ejemplo n.º 25
0
def test_last_failed_pattern_node():
    """
    Tests whether `MatchFailure` can find the correct node.
    """

    target_object = BOX
    # Create train and test templates for the target objects
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    for (_, _, perceptual_representation) in train_curriculum.instances():
        # Original perception graph
        perception = graph_without_learner(
            PerceptionGraph.from_frame(perceptual_representation.frames[0]))

        # Original perception pattern
        whole_perception_pattern = PerceptionGraphPattern.from_graph(
            perception).perception_graph_pattern
        # Create an altered perception graph we replace the color node
        altered_perception_digraph = perception.copy_as_digraph()
        nodes_to_remove = []
        edges = []
        different_nodes = []
        for node in perception.copy_as_digraph().nodes:
            # If we find a color node, we make it black
            if isinstance(node, tuple) and isinstance(node[0],
                                                      RgbColorPerception):
                new_node = (RgbColorPerception(0, 0, 0), 42)
                # Get edge information
                for edge in perception.copy_as_digraph().edges(data=True):
                    if edge[0] == node:
                        edges.append((new_node, edge[1], edge[2]))
                    if edge[1] == node:
                        edges.append((edge[0], new_node, edge[2]))
                nodes_to_remove.append(node)
                different_nodes.append(new_node)

        # add new nodes
        for node in different_nodes:
            altered_perception_digraph.add_node(node)
        # add edge information
        for edge in edges:
            altered_perception_digraph.add_edge(edge[0], edge[1])
            for k, v in edge[2].items():
                altered_perception_digraph[edge[0]][edge[1]][k] = v
        # remove original node
        altered_perception_digraph.remove_nodes_from(nodes_to_remove)

        # Start the matching process
        matcher = whole_perception_pattern.matcher(
            PerceptionGraph(altered_perception_digraph),
            match_mode=MatchMode.NON_OBJECT)
        match_or_failure = matcher.first_match_or_failure_info()
        assert isinstance(match_or_failure, PatternMatching.MatchFailure)
        assert isinstance(match_or_failure.last_failed_pattern_node,
                          IsColorNodePredicate)
Ejemplo n.º 26
0
def make_simple_pursuit_curriculum(
    num_instances: Optional[int],
    num_noise_instances: Optional[int],
    language_generator: LanguageGenerator[
        HighLevelSemanticsSituation, LinearizedDependencyTree
    ],
    *,
    target_objects=[BALL, CHAIR, MOM, DAD, BABY, TABLE, DOG, BIRD, BOX],
    num_objects_in_instance: int = 3,
    perception_generator: HighLevelSemanticsSituationToDevelopmentalPrimitivePerceptionGenerator = GAILA_PHASE_2_PERCEPTION_GENERATOR,
    add_gaze: bool = False,
) -> Phase1InstanceGroup:
    """
    Creates a Pursuit-learning curriculum with for a set of standard objects. Each instance in the curriculum is a set
    of *num_objects_in_instance* objects paired with a word.
    We say an instance is non-noisy if the word refers to one of the objects in the set.
    An instance is noisy if none of the objects correspond to the word.
    For each type of object of interest, we will generate *num_instances_per_object_type* instances,
    of which *num_noise_instances_per_object_type* will be noisy.
    """
    if not num_instances:
        num_instances = 10
    if not num_noise_instances:
        num_noise_instances = 0
    if num_noise_instances > num_instances:
        raise RuntimeError("Cannot have more noise than regular exemplars")

    noise_object_variables = [
        standard_object("obj-" + str(idx), banned_properties=[IS_SPEAKER, IS_ADDRESSEE])
        for idx in range(num_objects_in_instance)
    ]

    # A template that is used to replace situations and perceptions (not linguistic description) in noise instances
    noise_template = Phase1SituationTemplate(
        "simple_pursuit-noise",
        salient_object_variables=[noise_object_variables[0]],
        background_object_variables=noise_object_variables[1:],
    )

    all_instances = []
    # Generate phase_1 instance groups for each template (i.e each target word)
    for target_object in target_objects:
        target_object_variable = object_variable(
            target_object.handle + "-target", target_object
        )
        # For each target object, create a template with specific a target object in each to create learning instances.
        # There is one object (e.g. Ball) across all instances while the other objects vary. Hence, the target object is
        # a salient object (used for the linguistic description) while the remaining objects are background objects.
        object_is_present_template = Phase1SituationTemplate(
            "simple_pursuit",
            salient_object_variables=[target_object_variable],
            background_object_variables=noise_object_variables[:-1],
            gazed_objects=[target_object_variable] if add_gaze else [],
        )
        non_noise_instances = list(
            phase1_instances(
                "simple_pursuit_curriculum",
                sampled(
                    object_is_present_template,
                    max_to_sample=num_instances - num_noise_instances,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    ontology=GAILA_PHASE_2_ONTOLOGY,
                ),
                perception_generator=perception_generator,
                language_generator=language_generator,
            ).instances()
        )

        # Filter out instances in which the target is present more than once, to ensure "a ball" instead of "the balls"
        for instance in non_noise_instances:
            # If the target appears exactly once (does not appear in background objects) keep using this instance
            situation = instance[0]
            if situation and not any(
                [obj.ontology_node == target_object for obj in situation.other_objects]
            ):
                all_instances.append(instance)

        # Create instances for noise
        noise_instances = phase1_instances(
            "simple_pursuit_curriculum",
            sampled(
                noise_template,
                max_to_sample=num_noise_instances,
                chooser=PHASE1_CHOOSER_FACTORY(),
                ontology=GAILA_PHASE_2_ONTOLOGY,
            ),
            perception_generator=perception_generator,
            language_generator=language_generator,
        ).instances()
        # [1] is the index of the linguistic description in an instance
        # It doesn't matter which non-noise instance is chosen
        # because they all have the object type name as their linguistic description.
        target_object_linguistic_description = all_instances[-1][1]
        for (situation, _, perception) in noise_instances:
            # A noise instance needs to have the word for our target object
            # while not actually having our target object be present.
            # However, our language generator can't generate irrelevant language for a situation.
            # Therefore, we generate the instance as normal above,
            # but here we replace its linguistic description with the word for the target object.

            # Skip the noise instance if the target object appears in the noise data
            if situation and not any(
                [obj.ontology_node == target_object for obj in situation.all_objects]
            ):
                all_instances.append(
                    (situation, target_object_linguistic_description, perception)
                )

    description = (
        f"simple_pursuit_curriculum_examples-{num_instances}_objects-{num_objects_in_instance}_noise-"
        f"{num_noise_instances} "
    )
    rng = random.Random()
    rng.seed(0)
    random.shuffle(all_instances, rng.random)
    final_instance_group: Phase1InstanceGroup = ExplicitWithSituationInstanceGroup(
        description, all_instances
    )
    return final_instance_group