Exemplo n.º 1
0
def _make_sit_on_curriculum(
    num_samples: Optional[int],
    num_noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    sitter = standard_object(
        "sitter_0",
        THING,
        required_properties=[ANIMATE],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )
    seat = standard_object("sitting-surface",
                           INANIMATE_OBJECT,
                           required_properties=[CAN_BE_SAT_ON_BY_PEOPLE])
    return phase1_instances(
        "sit_on",
        chain(*[
            sampled(
                make_sit_template_intransitive(sitter,
                                               seat,
                                               num_noise_objects,
                                               surface=False,
                                               syntax_hints=False),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                chooser=PHASE1_CHOOSER_FACTORY(),
                max_to_sample=num_samples if num_samples else 25,
                block_multiple_of_the_same_type=True,
            ),
            sampled(
                make_sit_transitive(sitter,
                                    seat,
                                    num_noise_objects,
                                    surface=False,
                                    syntax_hints=False),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                chooser=PHASE1_CHOOSER_FACTORY(),
                max_to_sample=num_samples if num_samples else 25,
                block_multiple_of_the_same_type=True,
            ),
        ]),
        language_generator=language_generator,
    )
Exemplo n.º 2
0
def test_recognize_in_transfer_of_possession(language_mode):
    dad = object_variable("person_0", DAD)
    baby = object_variable("person_1", BABY)
    chair = object_variable("give_object_0", CHAIR)

    giving_template = Phase1SituationTemplate(
        "dad-transfer-of-possession",
        salient_object_variables=[dad, baby, chair],
        actions=[
            Action(
                GIVE,
                argument_roles_to_fillers=[(AGENT, dad), (GOAL, baby),
                                           (THEME, chair)],
            )
        ],
        syntax_hints=[PREFER_DITRANSITIVE],
    )

    (_, _, perception) = first(
        phase1_instances(
            "foo",
            sampled(
                giving_template,
                max_to_sample=1,
                chooser=PHASE1_CHOOSER_FACTORY(),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                block_multiple_of_the_same_type=True,
            ),
        ).instances())

    perception_graph = PerceptionGraph.from_dynamic_perceptual_representation(
        perception)
    perception_semantic_alignment = PerceptionSemanticAlignment.create_unaligned(
        perception_graph)
    (_, description_to_matched_semantic_node
     ) = LANGUAGE_MODE_TO_OBJECT_RECOGNIZER[language_mode].match_objects(
         perception_semantic_alignment)
    assert len(description_to_matched_semantic_node) == 4
    assert (language_mode == LanguageMode.ENGLISH and
            ("Dad", ) in description_to_matched_semantic_node) or (
                language_mode == LanguageMode.CHINESE and
                ("ba4 ba4", ) in description_to_matched_semantic_node)
Exemplo n.º 3
0
def make_animal_eat_curriculum(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    object_to_eat = standard_object("object_0", required_properties=[EDIBLE])
    animal = standard_object("eater_0", NONHUMAN_ANIMAL)
    background = make_noise_objects(noise_objects)

    return phase1_instances(
        "Animal-Eat-Curriculum",
        # Fressen
        sampled(
            make_eat_template(animal, object_to_eat, background),
            max_to_sample=num_samples if num_samples else 5,
            ontology=GAILA_PHASE_1_ONTOLOGY,
            chooser=PHASE1_CHOOSER_FACTORY(),
        ),
        language_generator=language_generator,
    )
Exemplo n.º 4
0
def _make_under_tests(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = standard_object("figure_0", THING, banned_properties=[HOLLOW])
    figure_1 = standard_object("figure_1", THING, banned_properties=[HOLLOW])
    ground_0 = standard_object(
        "ground_0",
        THING,
        required_properties=[HAS_SPACE_UNDER],
        banned_properties=[HOLLOW],
    )

    figures = immutableset([figure_0, figure_1])
    grounds = immutableset([ground_0])

    return phase1_instances(
        "Preposition Testing Under",
        chain(*[
            sampled(
                _under_template(
                    figure,
                    ground,
                    make_noise_objects(noise_objects),
                    is_training=False,
                    is_distal=use_above_below,
                    syntax_hints=[USE_ABOVE_BELOW] if use_above_below else [],
                ),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                chooser=PHASE1_CHOOSER_FACTORY(),
                max_to_sample=num_samples if num_samples else 5,
                block_multiple_of_the_same_type=True,
            ) for figure in figures for ground in grounds
            # for distance in BOOL_SET
            for use_above_below in BOOL_SET
        ]),
        language_generator=language_generator,
    )
def make_pass_toss_subtle_verb_distinction(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    tosser = standard_object("tosser_passer_0",
                             THING,
                             required_properties=[ANIMATE])
    tossee = standard_object("tossee_passee_0",
                             THING,
                             required_properties=[INANIMATE])
    goal = standard_object("move-goal-reference",
                           THING,
                           required_properties=[INANIMATE])
    background = make_noise_objects(noise_objects)

    return phase1_instances(
        "tossing_passing",
        chain(
            flatten([
                sampled(
                    make_pass_template(
                        tosser,
                        tossee,
                        goal,
                        use_adverbial_path_modifier=use_adverbial_path_modifier,
                        operator=operator,
                        spatial_properties=[HARD_FORCE]
                        if hard_force else [SOFT_FORCE],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for use_adverbial_path_modifier in BOOL_SET
                for hard_force in BOOL_SET for operator in [TOWARD, AWAY_FROM]
            ])),
        language_generator=language_generator,
    )
Exemplo n.º 6
0
def _make_behind_training(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = standard_object("ball", BALL)
    figure_1 = standard_object("book", BOOK)
    figure_2 = standard_object("dad", DAD)
    ground_0 = standard_object("cookie", COOKIE)
    ground_1 = standard_object("table", TABLE)
    ground_2 = standard_object("person",
                               PERSON,
                               banned_properties=[IS_SPEAKER, IS_ADDRESSEE])

    figures = immutableset([figure_0, figure_1, figure_2])
    grounds = immutableset([ground_0, ground_1, ground_2])

    return phase1_instances(
        "Preposition Training Behind",
        chain(*[
            flatten([
                sampled(
                    _behind_template(
                        figure,
                        ground,
                        make_noise_objects(noise_objects),
                        is_training=True,
                        is_near=close,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for figure in figures for ground in grounds
                for close in BOOL_SET
            ])
        ]),
        language_generator=language_generator,
    )
Exemplo n.º 7
0
def test_copy_with_temporal_scopes_content():
    """
    Tests whether copy_with_temporal_scopes converts graphs to be dynamic as intended
    """

    # We use a situation to generate the perceptual representation
    # for a box with color.
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    perception_graph = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))
    temporal_perception_graph = perception_graph.copy_with_temporal_scopes(
        temporal_scopes=[TemporalScope.AFTER])
    for (source, target) in perception_graph.copy_as_digraph().edges():
        assert not isinstance(
            perception_graph.copy_as_digraph()[source][target]["label"],
            TemporallyScopedEdgeLabel,
        )
    for (source,
         target) in temporal_perception_graph.copy_as_digraph().edges():
        # Check type, and then the content
        label = temporal_perception_graph.copy_as_digraph(
        )[source][target]["label"]
        assert isinstance(label, TemporallyScopedEdgeLabel)
        assert (label.attribute == perception_graph.copy_as_digraph()[source]
                [target]["label"])
        assert all(specifier in [TemporalScope.AFTER]
                   for specifier in label.temporal_specifiers)
Exemplo n.º 8
0
def _make_beside_training(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = standard_object("ball", BALL)
    figure_1 = standard_object("book", BOOK)
    figure_2 = standard_object("mom", MOM)
    ground_0 = standard_object("cookie", COOKIE)
    ground_1 = standard_object("table", TABLE)
    ground_2 = standard_object("dad", DAD)

    figures = immutableset([figure_0, figure_1, figure_2])
    grounds = immutableset([ground_0, ground_1, ground_2])

    return phase1_instances(
        "Preposition Training Beside",
        chain(*[
            flatten([
                sampled(
                    _beside_template(
                        figure,
                        ground,
                        make_noise_objects(noise_objects),
                        is_right=True,
                        is_training=True,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                    block_multiple_of_the_same_type=True,
                ) for figure in figures for ground in grounds
                # for direction in BOOL_SET
            ])
        ]),
        language_generator=language_generator,
    )
Exemplo n.º 9
0
def make_human_eat_curriculum(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    object_to_eat = standard_object("object_0", required_properties=[EDIBLE])
    human = standard_object("eater_0",
                            PERSON,
                            banned_properties=[IS_SPEAKER, IS_ADDRESSEE])
    background = make_noise_objects(noise_objects)

    return phase1_instances(
        "Human-Eat-Curriculum",
        # Essen
        sampled(
            make_eat_template(human, object_to_eat, background),
            max_to_sample=num_samples if num_samples else 5,
            ontology=GAILA_PHASE_1_ONTOLOGY,
            chooser=PHASE1_CHOOSER_FACTORY(),
        ),
        language_generator=language_generator,
    )
def make_walk_run_subtle_verb_distinction(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:

    agent = standard_object(
        "walker_0",
        THING,
        required_properties=[ANIMATE],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )
    background = make_noise_objects(noise_objects)

    return phase1_instances(
        "walking-running",
        chain(
            flatten([
                sampled(
                    make_walk_run_template(
                        agent,
                        use_adverbial_path_modifier=use_adverbial_path_modifier,
                        operator=operator,
                        spatial_properties=[HARD_FORCE]
                        if hard_force else [SOFT_FORCE],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for use_adverbial_path_modifier in BOOL_SET
                for hard_force in BOOL_SET for operator in [AWAY_FROM, TOWARD]
            ])),
        language_generator=language_generator,
    )
def make_jump_imprecise_temporal_descriptions(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:

    jumper = standard_object(
        "jumper_0",
        THING,
        required_properties=[CAN_JUMP],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )

    background = make_noise_objects(noise_objects)

    return phase1_instances(
        "jumping",
        chain(
            flatten([
                sampled(
                    # "A person jumps"
                    make_jump_template(
                        jumper,
                        use_adverbial_path_modifier=use_adverbial_path_modifier,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for use_adverbial_path_modifier in (True, False)
                for is_fast in BOOL_SET
            ])),
        language_generator=language_generator,
    )
Exemplo n.º 12
0
def _make_over_training(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = standard_object("ball", BALL)
    figure_1 = standard_object("book", BOOK)
    figure_2 = standard_object("mom", MOM)
    ground_0 = standard_object("cookie", COOKIE)
    ground_1 = standard_object("table", TABLE)

    figures = immutableset([figure_0, figure_1, figure_2])
    grounds = immutableset([ground_0, ground_1])

    return phase1_instances(
        "Preposition Training Over",
        chain(*[
            sampled(
                _over_template(
                    figure,
                    ground,
                    make_noise_objects(noise_objects),
                    is_training=True,
                    is_distal=use_above_below,
                    syntax_hints=[USE_ABOVE_BELOW] if use_above_below else [],
                ),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                chooser=PHASE1_CHOOSER_FACTORY(),
                max_to_sample=num_samples if num_samples else 5,
            ) for figure in figures for ground in grounds
            # for distance in BOOL_SET
            for use_above_below in BOOL_SET
        ]),
        language_generator=language_generator,
    )
Exemplo n.º 13
0
def make_simple_pursuit_curriculum(
    num_instances: Optional[int],
    num_noise_instances: Optional[int],
    language_generator: LanguageGenerator[
        HighLevelSemanticsSituation, LinearizedDependencyTree
    ],
    *,
    target_objects=[BALL, CHAIR, MOM, DAD, BABY, TABLE, DOG, BIRD, BOX],
    num_objects_in_instance: int = 3,
    perception_generator: HighLevelSemanticsSituationToDevelopmentalPrimitivePerceptionGenerator = GAILA_PHASE_2_PERCEPTION_GENERATOR,
    add_gaze: bool = False,
) -> Phase1InstanceGroup:
    """
    Creates a Pursuit-learning curriculum with for a set of standard objects. Each instance in the curriculum is a set
    of *num_objects_in_instance* objects paired with a word.
    We say an instance is non-noisy if the word refers to one of the objects in the set.
    An instance is noisy if none of the objects correspond to the word.
    For each type of object of interest, we will generate *num_instances_per_object_type* instances,
    of which *num_noise_instances_per_object_type* will be noisy.
    """
    if not num_instances:
        num_instances = 10
    if not num_noise_instances:
        num_noise_instances = 0
    if num_noise_instances > num_instances:
        raise RuntimeError("Cannot have more noise than regular exemplars")

    noise_object_variables = [
        standard_object("obj-" + str(idx), banned_properties=[IS_SPEAKER, IS_ADDRESSEE])
        for idx in range(num_objects_in_instance)
    ]

    # A template that is used to replace situations and perceptions (not linguistic description) in noise instances
    noise_template = Phase1SituationTemplate(
        "simple_pursuit-noise",
        salient_object_variables=[noise_object_variables[0]],
        background_object_variables=noise_object_variables[1:],
    )

    all_instances = []
    # Generate phase_1 instance groups for each template (i.e each target word)
    for target_object in target_objects:
        target_object_variable = object_variable(
            target_object.handle + "-target", target_object
        )
        # For each target object, create a template with specific a target object in each to create learning instances.
        # There is one object (e.g. Ball) across all instances while the other objects vary. Hence, the target object is
        # a salient object (used for the linguistic description) while the remaining objects are background objects.
        object_is_present_template = Phase1SituationTemplate(
            "simple_pursuit",
            salient_object_variables=[target_object_variable],
            background_object_variables=noise_object_variables[:-1],
            gazed_objects=[target_object_variable] if add_gaze else [],
        )
        non_noise_instances = list(
            phase1_instances(
                "simple_pursuit_curriculum",
                sampled(
                    object_is_present_template,
                    max_to_sample=num_instances - num_noise_instances,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    ontology=GAILA_PHASE_2_ONTOLOGY,
                ),
                perception_generator=perception_generator,
                language_generator=language_generator,
            ).instances()
        )

        # Filter out instances in which the target is present more than once, to ensure "a ball" instead of "the balls"
        for instance in non_noise_instances:
            # If the target appears exactly once (does not appear in background objects) keep using this instance
            situation = instance[0]
            if situation and not any(
                [obj.ontology_node == target_object for obj in situation.other_objects]
            ):
                all_instances.append(instance)

        # Create instances for noise
        noise_instances = phase1_instances(
            "simple_pursuit_curriculum",
            sampled(
                noise_template,
                max_to_sample=num_noise_instances,
                chooser=PHASE1_CHOOSER_FACTORY(),
                ontology=GAILA_PHASE_2_ONTOLOGY,
            ),
            perception_generator=perception_generator,
            language_generator=language_generator,
        ).instances()
        # [1] is the index of the linguistic description in an instance
        # It doesn't matter which non-noise instance is chosen
        # because they all have the object type name as their linguistic description.
        target_object_linguistic_description = all_instances[-1][1]
        for (situation, _, perception) in noise_instances:
            # A noise instance needs to have the word for our target object
            # while not actually having our target object be present.
            # However, our language generator can't generate irrelevant language for a situation.
            # Therefore, we generate the instance as normal above,
            # but here we replace its linguistic description with the word for the target object.

            # Skip the noise instance if the target object appears in the noise data
            if situation and not any(
                [obj.ontology_node == target_object for obj in situation.all_objects]
            ):
                all_instances.append(
                    (situation, target_object_linguistic_description, perception)
                )

    description = (
        f"simple_pursuit_curriculum_examples-{num_instances}_objects-{num_objects_in_instance}_noise-"
        f"{num_noise_instances} "
    )
    rng = random.Random()
    rng.seed(0)
    random.shuffle(all_instances, rng.random)
    final_instance_group: Phase1InstanceGroup = ExplicitWithSituationInstanceGroup(
        description, all_instances
    )
    return final_instance_group
def test_pursuit_preposition_in_front_learner(language_mode):
    rng = random.Random()
    rng.seed(0)
    learner = PrepositionPursuitLearner(
        learning_factor=0.5,
        graph_match_confirmation_threshold=0.7,
        lexicon_entry_threshold=0.7,
        rng=rng,
        smoothing_parameter=0.001,
        ontology=GAILA_PHASE_1_ONTOLOGY,
        object_recognizer=LANGUAGE_MODE_TO_OBJECT_RECOGNIZER[language_mode],
        language_mode=language_mode,
    )  # type: ignore
    ball = standard_object("ball", BALL)
    table = standard_object("table", TABLE)
    language_generator = phase1_language_generator(language_mode)
    in_front_train_curriculum = phase1_instances(
        "Preposition In Front Unit Train",
        situations=sampled(
            _in_front_template(
                ball,
                table,
                immutableset(),
                is_training=True,
                is_near=True,
                speaker_root_node=MOM,
            ),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=10,
        ),
        language_generator=language_generator,
    )
    in_front_test_curriculum = phase1_instances(
        "Preposition In Front Unit Test",
        situations=sampled(
            _in_front_template(
                ball,
                table,
                immutableset(),
                is_training=False,
                is_near=True,
                speaker_root_node=MOM,
            ),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=1,
        ),
        language_generator=language_generator,
    )

    for (
            _,
            linguistic_description,
            perceptual_representation,
    ) in in_front_train_curriculum.instances():
        learner.observe(
            LearningExample(perceptual_representation, linguistic_description))

    for (
            _,
            test_linguistic_description,
            test_perceptual_representation,
    ) in in_front_test_curriculum.instances():
        descriptions_from_learner = learner.describe(
            test_perceptual_representation)
        gold = test_linguistic_description.as_token_sequence()
        assert descriptions_from_learner
        assert [
            desc.as_token_sequence() for desc in descriptions_from_learner
        ][0] == gold
Exemplo n.º 15
0
def test_copy_with_temporal_scope_pattern_content():
    """
    Tests whether copy_with_temporal_scope converts patterns to be dynamic as intended
    """

    # We use a situation to generate the perceptual representation
    # for a box with color.
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    perception_graph = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))

    perception_pattern = PerceptionGraphPattern.from_graph(
        perception_graph).perception_graph_pattern

    temporal_perception_graph = perception_graph.copy_with_temporal_scopes(
        temporal_scopes=[TemporalScope.AFTER])
    temporal_perception_pattern = perception_pattern.copy_with_temporal_scopes(
        required_temporal_scopes=TemporalScope.AFTER)

    # Exception while applying to dynamic pattern
    with pytest.raises(RuntimeError):
        temporal_perception_pattern.copy_with_temporal_scopes(
            required_temporal_scopes=TemporalScope.AFTER)

    for (source, target) in perception_pattern.copy_as_digraph().edges():
        assert not isinstance(
            perception_pattern.copy_as_digraph()[source][target]["predicate"],
            HoldsAtTemporalScopePredicate,
        )
    for (source,
         target) in temporal_perception_pattern.copy_as_digraph().edges():
        # Check type, and then the content
        predicate = temporal_perception_pattern.copy_as_digraph(
        )[source][target]["predicate"]
        # Test HoldsAtTemporalScope dot label, matches predicate
        assert isinstance(predicate.dot_label(), str)
        assert predicate.matches_predicate(
            HoldsAtTemporalScopePredicate(predicate.wrapped_edge_predicate,
                                          predicate.temporal_scopes))
        assert not predicate.matches_predicate(
            HoldsAtTemporalScopePredicate(predicate.wrapped_edge_predicate,
                                          [TemporalScope.BEFORE]))
        assert isinstance(predicate, HoldsAtTemporalScopePredicate)
        assert (predicate.wrapped_edge_predicate == perception_pattern.
                copy_as_digraph()[source][target]["predicate"])
        assert len(predicate.temporal_scopes) == 1
        assert only(predicate.temporal_scopes) == TemporalScope.AFTER

    # Test normal matching behavior
    temporal_matcher = temporal_perception_pattern.matcher(
        temporal_perception_graph, match_mode=MatchMode.NON_OBJECT)
    first(temporal_matcher.matches(use_lookahead_pruning=True))

    # Test HoldsAtTemporalScopePredicate
    for (source, target) in perception_graph.copy_as_digraph().edges():
        label = "test edge label"
        edge_predicate = AnyEdgePredicate()
        temporal_predicate = HoldsAtTemporalScopePredicate(
            edge_predicate, [TemporalScope.AFTER])

        temporal_edge_label = TemporallyScopedEdgeLabel(
            label, [TemporalScope.AFTER])
        assert temporal_predicate(source, temporal_edge_label, target)
        # Non temporal edge exception
        with pytest.raises(RuntimeError):
            temporal_predicate(source, label, target)
Exemplo n.º 16
0
def test_syntactically_infeasible_partial_match():
    """
    Tests whether syntactic feasibility works as intended
    """

    # We use a situation to generate the perceptual representation
    # for a box with color.
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]

    # Original perception graph
    perception = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))

    # Create an altered perception graph we remove the color node
    altered_perception_digraph = perception.copy_as_digraph()
    nodes = []
    for node in perception.copy_as_digraph().nodes:
        # If we find a color node, we add an extra edge to it
        if isinstance(node, tuple) and isinstance(node[0], RgbColorPerception):
            nodes.append(node)

    # change edge information
    for node in nodes:
        random_node = r.choice(list(altered_perception_digraph.nodes))
        altered_perception_digraph.add_edge(node, random_node, label=PART_OF)
        random_node_2 = r.choice(list(altered_perception_digraph.nodes))
        altered_perception_digraph.add_edge(random_node_2, node, label=PART_OF)

    altered_perception_perception_graph = PerceptionGraph(
        altered_perception_digraph)
    altered_perception_pattern = PerceptionGraphPattern.from_graph(
        altered_perception_perception_graph).perception_graph_pattern

    # Start the matching process, get a partial match
    first_matcher = altered_perception_pattern.matcher(
        altered_perception_perception_graph, match_mode=MatchMode.OBJECT)
    partial_match: PerceptionGraphPatternMatch = first(
        first_matcher.matches(use_lookahead_pruning=True), None)
    partial_mapping = partial_match.pattern_node_to_matched_graph_node
    # Try to extend the partial mapping, we expect a semantic infeasibility runtime error
    second_matcher = altered_perception_pattern.matcher(
        perception, match_mode=MatchMode.OBJECT)
    # The partial mapping (obtained from first matcher with original perception graph)
    # syntactically doesn't match the one in the altered version (second matcher with altered graph)
    with pytest.raises(RuntimeError):
        first(
            second_matcher.matches(initial_partial_match=partial_mapping,
                                   use_lookahead_pruning=True),
            None,
        )
Exemplo n.º 17
0
def test_allowed_matches_with_bad_partial_match():
    """
    Tests whether PatternMarching's allowed_matches functionality works as intended when a bad
    partial match is specified.
    """
    target_object = BOX
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]
    perception = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))

    pattern1: PerceptionGraphPattern = PerceptionGraphPattern.from_graph(
        perception.subgraph_by_nodes({
            cast(PerceptionGraphNode, node)
            for node in perception._graph.nodes  # pylint: disable=protected-access
            if getattr(node, "debug_handle", None) == "box_0"
        })).perception_graph_pattern

    pattern2: PerceptionGraphPattern = PerceptionGraphPattern.from_graph(
        perception.subgraph_by_nodes({
            cast(PerceptionGraphNode, node)
            for node in perception._graph.nodes  # pylint: disable=protected-access
            if getattr(node, "debug_handle", None) in {"box_0", "the ground"}
        })).perception_graph_pattern

    pattern1_box: AnyObjectPerception = cast(
        AnyObjectPerception,
        only(node for node in pattern1._graph  # pylint: disable=protected-access
             if getattr(node, "debug_handle", None) == "box_0"),
    )
    pattern2_box: AnyObjectPerception = cast(
        AnyObjectPerception,
        only(node for node in pattern2._graph  # pylint: disable=protected-access
             if getattr(node, "debug_handle", None) == "box_0"),
    )
    pattern2_ground: AnyObjectPerception = cast(
        AnyObjectPerception,
        only(node for node in pattern2._graph  # pylint: disable=protected-access
             if getattr(node, "debug_handle", None) == "the ground"),
    )

    matcher = PatternMatching(
        pattern=pattern1,
        graph_to_match_against=pattern2,
        matching_pattern_against_pattern=True,
        match_mode=MatchMode.OBJECT,
        allowed_matches=immutablesetmultidict([(pattern1_box, pattern2_box)]),
    )
    with pytest.raises(RuntimeError):
        first(
            matcher.matches(
                initial_partial_match={pattern1_box: pattern2_ground},
                use_lookahead_pruning=True,
            ),
            None,
        )
Exemplo n.º 18
0
def test_last_failed_pattern_node():
    """
    Tests whether `MatchFailure` can find the correct node.
    """

    target_object = BOX
    # Create train and test templates for the target objects
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    for (_, _, perceptual_representation) in train_curriculum.instances():
        # Original perception graph
        perception = graph_without_learner(
            PerceptionGraph.from_frame(perceptual_representation.frames[0]))

        # Original perception pattern
        whole_perception_pattern = PerceptionGraphPattern.from_graph(
            perception).perception_graph_pattern
        # Create an altered perception graph we replace the color node
        altered_perception_digraph = perception.copy_as_digraph()
        nodes_to_remove = []
        edges = []
        different_nodes = []
        for node in perception.copy_as_digraph().nodes:
            # If we find a color node, we make it black
            if isinstance(node, tuple) and isinstance(node[0],
                                                      RgbColorPerception):
                new_node = (RgbColorPerception(0, 0, 0), 42)
                # Get edge information
                for edge in perception.copy_as_digraph().edges(data=True):
                    if edge[0] == node:
                        edges.append((new_node, edge[1], edge[2]))
                    if edge[1] == node:
                        edges.append((edge[0], new_node, edge[2]))
                nodes_to_remove.append(node)
                different_nodes.append(new_node)

        # add new nodes
        for node in different_nodes:
            altered_perception_digraph.add_node(node)
        # add edge information
        for edge in edges:
            altered_perception_digraph.add_edge(edge[0], edge[1])
            for k, v in edge[2].items():
                altered_perception_digraph[edge[0]][edge[1]][k] = v
        # remove original node
        altered_perception_digraph.remove_nodes_from(nodes_to_remove)

        # Start the matching process
        matcher = whole_perception_pattern.matcher(
            PerceptionGraph(altered_perception_digraph),
            match_mode=MatchMode.NON_OBJECT)
        match_or_failure = matcher.first_match_or_failure_info()
        assert isinstance(match_or_failure, PatternMatching.MatchFailure)
        assert isinstance(match_or_failure.last_failed_pattern_node,
                          IsColorNodePredicate)
Exemplo n.º 19
0
def test_semantically_infeasible_partial_match():
    """
    Tests whether semantic feasibility works as intended
    """

    target_object = BOX
    # Create train and test templates for the target objects
    train_obj_object = object_variable("obj-with-color", target_object)
    obj_template = Phase1SituationTemplate(
        "colored-obj-object", salient_object_variables=[train_obj_object])
    template = all_possible(obj_template,
                            chooser=PHASE1_CHOOSER_FACTORY(),
                            ontology=GAILA_PHASE_1_ONTOLOGY)

    train_curriculum = phase1_instances("all obj situations",
                                        situations=template)

    perceptual_representation = only(train_curriculum.instances())[2]
    # Original perception graph
    perception = graph_without_learner(
        PerceptionGraph.from_frame(perceptual_representation.frames[0]))
    whole_perception_pattern = PerceptionGraphPattern.from_graph(
        perception).perception_graph_pattern

    # Create an altered perception graph we remove the color node
    altered_perception_digraph = perception.copy_as_digraph()
    nodes_to_remove = []
    edges = []
    different_nodes = []
    for node in perception.copy_as_digraph().nodes:
        # If we find a color node, we make it black
        if isinstance(node, tuple) and isinstance(node[0], RgbColorPerception):
            new_node = (RgbColorPerception(0, 0, 0), node[1])
            # Get edge information
            for edge in perception.copy_as_digraph().edges(data=True):
                if edge[0] == node:
                    edges.append((new_node, edge[1], edge[2]))
                if edge[1] == node:
                    edges.append((edge[0], new_node, edge[2]))
            nodes_to_remove.append(node)
            different_nodes.append(new_node)

    # remove original node
    altered_perception_digraph.remove_nodes_from(nodes_to_remove)

    # add new nodes
    for node in different_nodes:
        altered_perception_digraph.add_node(node)
    # add edge information
    for edge in edges:
        altered_perception_digraph.add_edge(edge[0], edge[1])
        for k, v in edge[2].items():
            altered_perception_digraph[edge[0]][edge[1]][k] = v

    altered_perception_pattern = PerceptionGraphPattern.from_graph(
        PerceptionGraph(altered_perception_digraph)).perception_graph_pattern

    partial_digraph = altered_perception_pattern.copy_as_digraph()
    partial_digraph.remove_nodes_from([
        node for node in partial_digraph.nodes
        if isinstance(node, IsColorNodePredicate)
    ])

    # Start the matching process, get a partial match
    matcher = whole_perception_pattern.matcher(perception,
                                               match_mode=MatchMode.OBJECT)
    partial_match: PerceptionGraphPatternMatch = first(
        matcher.matches(use_lookahead_pruning=True))
    partial_mapping = partial_match.pattern_node_to_matched_graph_node

    # Try to extend the partial mapping, we expect a semantic infeasibility runtime error
    second_matcher = whole_perception_pattern.matcher(
        PerceptionGraph(altered_perception_digraph),
        match_mode=MatchMode.OBJECT)
    # The partial mapping (obtained from first matcher with original perception graph)
    # semantically doesn't match the one in the altered version (second matcher with altered graph)
    with pytest.raises(RuntimeError):
        first(
            second_matcher.matches(initial_partial_match=partial_mapping,
                                   use_lookahead_pruning=True),
            None,
        )
def test_pursuit_preposition_has_learner(language_mode):
    person = standard_object("person", PERSON)
    inanimate_object = standard_object("inanimate-object",
                                       INANIMATE_OBJECT,
                                       required_properties=[PERSON_CAN_HAVE])
    ball = standard_object("ball", BALL)

    language_generator = phase1_language_generator(language_mode)

    has_train_curriculum = phase1_instances(
        "Has Unit Train",
        situations=sampled(
            _x_has_y_template(person, inanimate_object),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=2,
        ),
        language_generator=language_generator,
    )

    has_test_curriculum = phase1_instances(
        "Has Unit Test",
        situations=sampled(
            _x_has_y_template(person, ball),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=1,
        ),
        language_generator=language_generator,
    )

    rng = random.Random()
    rng.seed(0)
    learner = PrepositionPursuitLearner(
        learning_factor=0.5,
        graph_match_confirmation_threshold=0.7,
        lexicon_entry_threshold=0.7,
        rng=rng,
        smoothing_parameter=0.001,
        ontology=GAILA_PHASE_1_ONTOLOGY,
        object_recognizer=LANGUAGE_MODE_TO_OBJECT_RECOGNIZER[language_mode],
        language_mode=language_mode,
    )  # type: ignore

    for (
            _,
            linguistic_description,
            perceptual_representation,
    ) in has_train_curriculum.instances():
        learner.observe(
            LearningExample(perceptual_representation, linguistic_description))

    for (
            _,
            test_lingustics_description,
            test_perceptual_representation,
    ) in has_test_curriculum.instances():
        descriptions_from_learner = learner.describe(
            test_perceptual_representation)
        gold = test_lingustics_description.as_token_sequence()
        assert descriptions_from_learner
        assert [
            desc.as_token_sequence() for desc in descriptions_from_learner
        ][0] == gold
def test_subset_preposition_has(language_mode, learner):
    person = standard_object("person", PERSON)
    cup = standard_object("cup", CUP)
    book = standard_object("book", BOOK)
    ball = standard_object("ball", BALL)

    language_generator = phase1_language_generator(language_mode)

    has_train_curriculum = []
    has_train_curriculum.extend(
        phase1_instances(
            "Has Unit Train",
            language_generator=language_generator,
            situations=sampled(
                _x_has_y_template(person, cup),
                chooser=PHASE1_CHOOSER_FACTORY(),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                max_to_sample=1,
                block_multiple_of_the_same_type=True,
            ),
        ).instances())
    has_train_curriculum.extend(
        phase1_instances(
            "Has Unit Train",
            language_generator=language_generator,
            situations=sampled(
                _x_has_y_template(person, book),
                chooser=PHASE1_CHOOSER_FACTORY(),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                max_to_sample=1,
                block_multiple_of_the_same_type=True,
            ),
        ).instances())

    has_test_curriculum = phase1_instances(
        "Has Unit Test",
        situations=sampled(
            _x_has_y_template(person, ball),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=1,
            block_multiple_of_the_same_type=True,
        ),
        language_generator=language_generator,
    )

    process_learner = learner(language_mode)
    for (_, linguistic_description,
         perceptual_representation) in has_train_curriculum:
        process_learner.observe(
            LearningExample(perceptual_representation, linguistic_description))

    for (
            _,
            test_lingustics_description,
            test_perceptual_representation,
    ) in has_test_curriculum.instances():
        descriptions_from_learner = process_learner.describe(
            test_perceptual_representation)
        gold = test_lingustics_description.as_token_sequence()
        assert descriptions_from_learner
        assert gold in [
            desc.as_token_sequence() for desc in descriptions_from_learner
        ]
def make_eat_big_small_curriculum(  # pylint: disable=unused-argument
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    # "Mom eats a big cookie"
    # We generate situations directly since templates fail to generate plurals.

    learner = SituationObject.instantiate_ontology_node(
        ontology_node=LEARNER,
        debug_handle=LEARNER.handle,
        ontology=GAILA_PHASE_1_ONTOLOGY,
    )
    situations = []

    for eater_ontology_node in [MOM, DAD, BABY, DOG]:
        eater = SituationObject.instantiate_ontology_node(
            ontology_node=eater_ontology_node,
            debug_handle=eater_ontology_node.handle,
            ontology=GAILA_PHASE_1_ONTOLOGY,
        )
        for _object in [COOKIE, WATERMELON]:
            object_to_eat = SituationObject.instantiate_ontology_node(
                ontology_node=_object,
                debug_handle=_object.handle,
                ontology=GAILA_PHASE_1_ONTOLOGY,
            )
            other_edibles = [
                SituationObject.instantiate_ontology_node(
                    ontology_node=_object,
                    debug_handle=_object.handle + f"_{i}",
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                ) for i in range(3)
            ]
            computed_background = [learner]
            computed_background.extend(other_edibles)

            # Big
            for relation_list in [
                [
                    bigger_than(object_to_eat, learner),
                    bigger_than(object_to_eat, other_edibles),
                ],
                [
                    bigger_than(learner, object_to_eat),
                    bigger_than(other_edibles, object_to_eat),
                ],
            ]:
                situations.append(
                    HighLevelSemanticsSituation(
                        ontology=GAILA_PHASE_1_ONTOLOGY,
                        salient_objects=[eater, object_to_eat],
                        other_objects=computed_background,
                        actions=[
                            Action(
                                EAT,
                                argument_roles_to_fillers=[
                                    (AGENT, eater),
                                    (PATIENT, object_to_eat),
                                ],
                            )
                        ],
                        always_relations=relation_list,
                    ))

    return phase1_instances("Big - Small Curriculum",
                            situations,
                            language_generator=language_generator)
def make_spin_tall_short_curriculum(  # pylint: disable=unused-argument
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    # "Mom spins a tall chair"
    # We generate situations directly since templates fail to generate plurals.

    learner = SituationObject.instantiate_ontology_node(
        ontology_node=LEARNER,
        debug_handle=LEARNER.handle,
        ontology=GAILA_PHASE_1_ONTOLOGY,
    )
    situations = []
    for agent_ontology_node in [MOM, DAD, BABY, DOG]:
        agent = SituationObject.instantiate_ontology_node(
            ontology_node=agent_ontology_node,
            debug_handle=agent_ontology_node.handle,
            ontology=GAILA_PHASE_1_ONTOLOGY,
        )
        for _object in [CHAIR, TABLE]:
            theme = SituationObject.instantiate_ontology_node(
                ontology_node=_object,
                debug_handle=_object.handle,
                ontology=GAILA_PHASE_1_ONTOLOGY,
            )
            other_objs = [
                SituationObject.instantiate_ontology_node(
                    ontology_node=_object,
                    debug_handle=_object.handle + f"_{i}",
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                ) for i in range(3)
            ]
            computed_background = [learner]
            computed_background.extend(other_objs)

            # Tall and short
            for relation_list in [
                [bigger_than(learner, theme),
                 bigger_than(other_objs, theme)],
                [bigger_than(theme, learner),
                 bigger_than(theme, other_objs)],
            ]:
                situations.append(
                    HighLevelSemanticsSituation(
                        ontology=GAILA_PHASE_1_ONTOLOGY,
                        salient_objects=[agent, theme],
                        other_objects=computed_background,
                        actions=[
                            Action(
                                SPIN,
                                argument_roles_to_fillers=[
                                    (AGENT, agent),
                                    (THEME, theme),
                                ],
                            )
                        ],
                        always_relations=relation_list,
                        syntax_hints=[USE_VERTICAL_MODIFIERS],
                    ))

    return phase1_instances("Tall - Short Curriculum",
                            situations,
                            language_generator=language_generator)
Exemplo n.º 24
0
def test_your_attribute_learner(language_mode, learner):
    person_0 = standard_object(
        "speaker",
        PERSON,
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
        added_properties=[IS_SPEAKER],
    )
    person_1 = standard_object(
        "addressee",
        PERSON,
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
        added_properties=[IS_ADDRESSEE],
    )
    inanimate_object = standard_object(
        "object", INANIMATE_OBJECT, required_properties=[PERSON_CAN_HAVE]
    )

    language_generator = phase1_language_generator(language_mode)

    your_train_curriculum = phase1_instances(
        "your-train",
        situations=sampled(
            _x_has_y_template(
                person_1,
                inanimate_object,
                background=[person_0],
                syntax_hints=[IGNORE_HAS_AS_VERB],
            ),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            chooser=PHASE1_CHOOSER_FACTORY(),
            max_to_sample=5,
        ),
        language_generator=language_generator,
    )

    your_test_curriculum = phase1_instances(
        "your-test",
        situations=sampled(
            _x_has_y_template(
                person_1,
                inanimate_object,
                background=[person_0],
                syntax_hints=[IGNORE_HAS_AS_VERB],
            ),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            chooser=PHASE1_TEST_CHOOSER_FACTORY(),
            max_to_sample=1,
        ),
        language_generator=language_generator,
    )

    process_learner = learner(language_mode)

    for (
        _,
        linguistic_description,
        perceptual_representation,
    ) in your_train_curriculum.instances():
        process_learner.observe(
            LearningExample(perceptual_representation, linguistic_description)
        )

    for (
        _,
        test_lingustics_description,
        test_perceptual_representation,
    ) in your_test_curriculum.instances():
        descriptions_from_learner = process_learner.describe(
            test_perceptual_representation
        )
        gold = test_lingustics_description.as_token_sequence()
        assert descriptions_from_learner
        assert gold in [desc.as_token_sequence() for desc in descriptions_from_learner]
def make_throw_imprecise_temporal_descriptions(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    thrower = standard_object(
        "thrower_0",
        THING,
        required_properties=[ANIMATE],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )
    catcher = standard_object(
        "catcher_0",
        THING,
        required_properties=[ANIMATE],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )
    object_thrown = standard_object("object_0",
                                    required_properties=[INANIMATE])
    implicit_goal_reference = standard_object("implicit_throw_goal_object",
                                              BOX)
    background = make_noise_objects(noise_objects)

    return phase1_instances(
        "throwing-with-temporal-descriptions",
        chain(
            # Throw on Ground
            flatten(
                sampled(
                    throw_on_ground_template(
                        thrower,
                        object_thrown,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_fast in BOOL_SET),
            # Throw
            flatten(
                sampled(
                    throw_template(
                        thrower,
                        object_thrown,
                        implicit_goal_reference,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_fast in BOOL_SET),
            # Throw up, down
            flatten(
                sampled(
                    throw_up_down_template(
                        thrower,
                        object_thrown,
                        implicit_goal_reference,
                        is_up=is_up,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_fast in BOOL_SET for is_up in BOOL_SET),
            # Throw To
            flatten(
                sampled(
                    throw_to_template(
                        thrower,
                        object_thrown,
                        catcher,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_fast in BOOL_SET),
        ),
        language_generator=language_generator,
    )
def make_roll_imprecise_temporal_descriptions(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    animate_0 = standard_object("object_0",
                                THING,
                                required_properties=[ANIMATE])
    rollable_0 = standard_object("object_1", required_properties=[ROLLABLE])
    rolling_surface = standard_object(
        "surface",
        THING,
        required_properties=[CAN_HAVE_THINGS_RESTING_ON_THEM])
    background = make_noise_objects(noise_objects)

    return phase1_instances(
        "roll-imprecise-temporal-descriptions",
        chain(
            # rolls intransitively
            flatten(
                sampled(
                    intransitive_roll(
                        animate_0,
                        rolling_surface,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_fast in BOOL_SET),
            # rolls transitively
            flatten(
                sampled(
                    transitive_roll(
                        animate_0,
                        rollable_0,
                        rolling_surface,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_fast in BOOL_SET),
            # rolls on a surface
            flatten(
                sampled(
                    transitive_roll_with_surface(
                        animate_0,
                        rollable_0,
                        rolling_surface,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_fast in BOOL_SET),
        ),
        language_generator=language_generator,
    )
Exemplo n.º 27
0
def test_pursuit_color_attribute(color_node, object_0_node, object_1_node,
                                 language_mode, learner):
    color = property_variable(f"{color_node.handle}", color_node)
    object_0 = standard_object(f"{object_0_node.handle}",
                               object_0_node,
                               added_properties=[color])
    object_1 = standard_object(f"{object_1_node.handle}",
                               object_1_node,
                               added_properties=[color])

    color_object_template = _object_with_color_template(object_0, None)

    templates_with_n_samples = [
        (color_object_template, 2),
        (_object_with_color_template(object_1, None), 4),
    ]

    language_generator = phase1_language_generator(language_mode)

    color_train_curriculum = phase1_instances(
        f"{color.handle} Color Train",
        language_generator=language_generator,
        situations=chain(*[
            flatten([
                sampled(
                    template,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    max_to_sample=n_samples,
                    block_multiple_of_the_same_type=True,
                ) for template, n_samples in templates_with_n_samples
            ])
        ]),
    )

    color_test_curriculum = phase1_instances(
        f"{color.handle} Color Test",
        situations=sampled(
            color_object_template,
            chooser=PHASE1_TEST_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=1,
            block_multiple_of_the_same_type=True,
        ),
        language_generator=language_generator,
    )

    processing_learner = learner(language_mode)

    for (
            _,
            linguistic_description,
            perceptual_representation,
    ) in color_train_curriculum.instances():
        processing_learner.observe(
            LearningExample(perceptual_representation, linguistic_description))

    for (
            _,
            test_lingustics_description,
            test_perceptual_representation,
    ) in color_test_curriculum.instances():
        descriptions_from_learner = processing_learner.describe(
            test_perceptual_representation)
        gold = test_lingustics_description.as_token_sequence()
        assert descriptions_from_learner
        assert gold in [
            desc.as_token_sequence() for desc in descriptions_from_learner
        ]
Exemplo n.º 28
0
def run_generics_test(learner, language_mode):
    def build_object_multiples_situations(
            ontology: Ontology,
            *,
            samples_per_object: int = 3,
            chooser: RandomChooser) -> Iterable[HighLevelSemanticsSituation]:
        for object_type in PHASE_1_CURRICULUM_OBJECTS:
            # Exclude slow objects for now
            if object_type.handle in ["bird", "dog", "truck"]:
                continue
            is_liquid = ontology.has_all_properties(object_type, [LIQUID])
            # don't want multiples of named people
            if not is_recognized_particular(ontology,
                                            object_type) and not is_liquid:
                for _ in range(samples_per_object):
                    num_objects = chooser.choice(range(2, 4))
                    yield HighLevelSemanticsSituation(
                        ontology=GAILA_PHASE_1_ONTOLOGY,
                        salient_objects=[
                            SituationObject.instantiate_ontology_node(
                                ontology_node=object_type,
                                debug_handle=object_type.handle + f"_{idx}",
                                ontology=GAILA_PHASE_1_ONTOLOGY,
                            ) for idx in range(num_objects)
                        ],
                        axis_info=AxesInfo(),
                    )

    language_generator = phase2_language_generator(language_mode)
    # Teach plurals
    plurals = phase1_instances(
        "plurals pretraining",
        build_object_multiples_situations(ontology=GAILA_PHASE_1_ONTOLOGY,
                                          chooser=PHASE1_CHOOSER_FACTORY()),
        language_generator=language_generator,
    )

    curricula = [
        # Actions - verbs in generics
        _make_eat_curriculum(10, 0, language_generator),
        # Plurals
        plurals,
        # Color attributes
        _make_objects_with_colors_curriculum(None, None, language_generator),
        # Predicates
        _make_colour_predicates_curriculum(None, None, language_generator),
        _make_kind_predicates_curriculum(None, None, language_generator),
        # Generics
        _make_generic_statements_curriculum(
            num_samples=3,
            noise_objects=0,
            language_generator=language_generator),
    ]

    for curriculum in curricula:
        for (
                _,
                linguistic_description,
                perceptual_representation,
        ) in curriculum.instances():
            # Get the object matches first - preposition learner can't learn without already recognized objects
            learner.observe(
                LearningExample(perceptual_representation,
                                linguistic_description))