Esempio n. 1
0
def _make_on_training(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = standard_object("ball", BALL)
    figure_1 = standard_object("book", BOOK)
    figure_2 = standard_object("mom", MOM)
    ground_0 = standard_object("chair", CHAIR)
    ground_1 = standard_object("table", TABLE)

    figures = immutableset([figure_0, figure_1, figure_2])
    grounds = immutableset([ground_0, ground_1])

    return phase1_instances(
        "Preposition Training On",
        chain(*[
            flatten([
                sampled(
                    _on_template(
                        figure,
                        ground,
                        make_noise_objects(noise_objects),
                        is_training=True,
                    ),
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    max_to_sample=num_samples if num_samples else 5,
                ) for figure in figures for ground in grounds
            ])
        ]),
        language_generator=language_generator,
    )
Esempio n. 2
0
def _in_front_template(
    figure: TemplateObjectVariable,
    ground: TemplateObjectVariable,
    background: Iterable[TemplateObjectVariable],
    *,
    is_training: bool,
    is_near: bool,
    speaker_root_node: OntologyNode = PERSON,
) -> Phase1SituationTemplate:
    handle = "training" if is_training else "testing"
    direction = Direction(positive=True,
                          relative_to_axis=FacingAddresseeAxis(ground))
    speaker = standard_object("speaker",
                              speaker_root_node,
                              added_properties=[IS_SPEAKER])
    addressee = standard_object("addressee",
                                LEARNER,
                                added_properties=[IS_ADDRESSEE])
    computed_background = [speaker, addressee]
    computed_background.extend(background)
    return Phase1SituationTemplate(
        f"preposition-{handle}-{figure.handle}-behind-{ground.handle}",
        salient_object_variables=[figure, ground],
        background_object_variables=computed_background,
        asserted_always_relations=[
            near(figure, ground, direction=direction)
            if is_near else far(figure, ground, direction=direction)
        ],
        gazed_objects=[figure],
    )
Esempio n. 3
0
def _make_under_training(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = standard_object("ball", BALL)
    figure_1 = standard_object("book", BOOK)
    figure_2 = standard_object("mom", MOM)
    ground_0 = standard_object("table", TABLE)

    figures = immutableset([figure_0, figure_1, figure_2])
    grounds = immutableset([ground_0])

    return phase1_instances(
        "Preposition Training Under",
        chain(*[
            sampled(
                _under_template(
                    figure,
                    ground,
                    make_noise_objects(noise_objects),
                    is_training=True,
                    is_distal=use_above_below,
                    syntax_hints=[USE_ABOVE_BELOW] if use_above_below else [],
                ),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                chooser=PHASE1_CHOOSER_FACTORY(),
                max_to_sample=num_samples if num_samples else 5,
            ) for figure in figures for ground in grounds
            # for distance in BOOL_SET
            for use_above_below in BOOL_SET
        ]),
        language_generator=language_generator,
    )
Esempio n. 4
0
def _make_put_in_curriculum(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[
        HighLevelSemanticsSituation, LinearizedDependencyTree
    ],
) -> Phase1InstanceGroup:
    agent = standard_object(
        "agent",
        THING,
        required_properties=[ANIMATE],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )
    theme = standard_object("theme", INANIMATE_OBJECT)
    goal_in = standard_object("goal_in", INANIMATE_OBJECT, required_properties=[HOLLOW])

    return phase1_instances(
        "Capabilities - Put in",
        sampled(
            _put_in_template(agent, theme, goal_in, make_noise_objects(noise_objects)),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            chooser=PHASE1_CHOOSER_FACTORY(),
            max_to_sample=num_samples if num_samples else 20,
            block_multiple_of_the_same_type=True,
        ),
        language_generator=language_generator,
    )
Esempio n. 5
0
def _make_in_training(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = object_variable("water", WATER)
    figure_1 = object_variable("juice", JUICE)
    ground_0 = standard_object("box", BOX)
    ground_1 = standard_object("cup", CUP)

    figures = immutableset([figure_0, figure_1])
    grounds = immutableset([ground_0, ground_1])

    return phase1_instances(
        "Preposition Training In",
        chain(*[
            sampled(
                _in_template(
                    figure,
                    ground,
                    make_noise_objects(noise_objects),
                    is_training=True,
                ),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                chooser=PHASE1_CHOOSER_FACTORY(),
                max_to_sample=num_samples if num_samples else 5,
            ) for figure in figures for ground in grounds
        ]),
        language_generator=language_generator,
    )
def make_take_grab_subtle_verb_distinction(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    taker = standard_object("tosser_passer_0",
                            THING,
                            required_properties=[ANIMATE])
    takee = standard_object("tossee_passee_0",
                            THING,
                            required_properties=[INANIMATE])
    background = make_noise_objects(noise_objects)
    return phase1_instances(
        "taking-grabbing",
        chain(
            flatten([
                sampled(
                    make_take_template(
                        taker,
                        takee,
                        use_adverbial_path_modifier=use_adverbial_path_modifier,
                        operator=operator,
                        spatial_properties=[HARD_FORCE]
                        if hard_force else [SOFT_FORCE],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for use_adverbial_path_modifier in BOOL_SET
                for hard_force in BOOL_SET for operator in [TOWARD, AWAY_FROM]
            ])),
        language_generator=language_generator,
    )
Esempio n. 7
0
def make_multiple_object_situation(
    num_samples: Optional[int],
    num_noise_objects: Optional[int],
    language_generator: LanguageGenerator[
        HighLevelSemanticsSituation, LinearizedDependencyTree
    ],
) -> Phase1InstanceGroup:

    target_object = standard_object("target_object")
    noise_object_variables = [
        standard_object("obj-" + str(idx), banned_properties=[IS_SPEAKER, IS_ADDRESSEE])
        for idx in range(num_noise_objects if num_noise_objects else 0)
    ]

    return phase1_instances(
        "Multiple Objects",
        sampled(
            _make_multiple_object_template(target_object, noise_object_variables),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            chooser=PHASE1_CHOOSER_FACTORY(),
            max_to_sample=num_samples if num_samples else 20,
            block_multiple_of_the_same_type=True,
        ),
        language_generator=language_generator,
    )
def make_imprecise_size_descriptions(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    theme_0 = standard_object("theme",
                              banned_properties=[IS_SPEAKER, IS_ADDRESSEE])
    theme_1 = standard_object("theme-thing",
                              THING,
                              banned_properties=[IS_SPEAKER, IS_ADDRESSEE])

    return phase1_instances(
        "Imprecise Size",
        chain(
            flatten([
                sampled(
                    template(theme, noise_objects),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for template in [
                    _big_x_template,
                    _little_x_template,
                    _tall_x_template,
                    _short_x_template,
                ] for theme in [theme_0, theme_1]
            ])),
        language_generator=language_generator,
    )
Esempio n. 9
0
def make_german_eat_test_curriculum(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:

    object_to_eat = standard_object("object_0", required_properties=[EDIBLE])
    eater = standard_object(
        "eater_0",
        THING,
        required_properties=[ANIMATE],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )
    background = make_noise_objects(noise_objects)

    return phase1_instances(
        "german-eating",
        chain(*[
            sampled(
                make_eat_template(eater, object_to_eat, background),
                max_to_sample=num_samples if num_samples else 5,
                ontology=GAILA_PHASE_1_ONTOLOGY,
                chooser=PHASE1_CHOOSER_FACTORY(),
            )
        ]),
        language_generator=language_generator,
    )
Esempio n. 10
0
def _make_m6_in_front_curriculum(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    learner_object = standard_object("learner",
                                     LEARNER,
                                     added_properties=[IS_ADDRESSEE])
    mom = standard_object("mom", MOM, added_properties=[IS_SPEAKER])
    background = [learner_object, mom]
    background.extend(make_noise_objects(noise_objects))

    return phase1_instances(
        "Preposition behind",
        situations=chain(*[
            sampled(
                _behind_template(object_1,
                                 object_2,
                                 background,
                                 is_training=True,
                                 is_near=True),
                chooser=PHASE1_CHOOSER_FACTORY(),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                max_to_sample=num_samples if num_samples else 1,
                block_multiple_of_the_same_type=True,
            ) for object_1 in r.sample(SMALL_OBJECT_VARS, 3)
            for object_2 in r.sample(LARGE_OBJECT_VARS, 3)
        ]),
        perception_generator=GAILA_M6_PERCEPTION_GENERATOR,
        language_generator=language_generator,
    )
Esempio n. 11
0
def test_jump(language_mode, learner):

    jumper = standard_object(
        "jumper_0",
        THING,
        required_properties=[CAN_JUMP],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )
    jumped_over = standard_object("jumped_over",
                                  banned_properties=[IS_SPEAKER, IS_ADDRESSEE])

    for situation_template in make_jump_templates(None):

        run_verb_test(
            learner(language_mode),
            situation_template,
            language_generator=phase1_language_generator(language_mode),
        )
    for situation_template in [
            _jump_over_template(jumper, jumped_over, immutableset())
    ]:
        run_verb_test(
            learner(language_mode),
            situation_template,
            language_generator=phase1_language_generator(language_mode),
        )
def test_subset_preposition_on(language_mode, learner):
    ball = standard_object("ball", BALL)
    table = standard_object("table", TABLE)

    run_preposition_test(
        learner(language_mode),
        _on_template(ball, table, immutableset(), is_training=True),
        language_generator=phase1_language_generator(language_mode),
    )
def make_move_imprecise_temporal_descriptions(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    self_mover_0 = standard_object(
        "self-mover_0",
        THING,
        required_properties=[SELF_MOVING],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )

    other_mover_0 = standard_object("mover_0",
                                    THING,
                                    required_properties=[ANIMATE])
    movee_0 = standard_object("movee_0",
                              THING,
                              required_properties=[INANIMATE])
    move_goal_reference = standard_object("move-goal-reference",
                                          THING,
                                          required_properties=[INANIMATE])
    background = make_noise_objects(noise_objects)

    return phase1_instances(
        "move-with-temporal-descriptions",
        chain(
            # bare move (e.g. "a box moves") is about half of uses in child speed
            flatten(
                sampled(
                    bare_move_template(
                        self_mover_0,
                        move_goal_reference,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_fast in BOOL_SET),
            # Transitive Move
            flatten(
                sampled(
                    transitive_move_template(
                        other_mover_0,
                        movee_0,
                        move_goal_reference,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_fast in BOOL_SET),
        ),
        language_generator=language_generator,
    )
def test_pursuit_preposition_on_learner(language_mode):
    rng = random.Random()
    rng.seed(0)
    learner = PrepositionPursuitLearner(
        learning_factor=0.5,
        graph_match_confirmation_threshold=0.7,
        lexicon_entry_threshold=0.7,
        rng=rng,
        smoothing_parameter=0.001,
        ontology=GAILA_PHASE_1_ONTOLOGY,
        object_recognizer=LANGUAGE_MODE_TO_OBJECT_RECOGNIZER[language_mode],
        language_mode=language_mode,
    )  # type: ignore
    ball = standard_object("ball", BALL)
    table = standard_object("table", TABLE)
    language_generator = phase1_language_generator(language_mode)
    on_train_curriculum = phase1_instances(
        "Preposition Unit Train",
        situations=sampled(
            _on_template(ball, table, immutableset(), is_training=True),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=10,
        ),
        language_generator=language_generator,
    )
    on_test_curriculum = phase1_instances(
        "Preposition Unit Test",
        situations=sampled(
            _on_template(ball, table, immutableset(), is_training=False),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=1,
        ),
        language_generator=language_generator,
    )

    for (
            _,
            linguistic_description,
            perceptual_representation,
    ) in on_train_curriculum.instances():
        # Get the object matches first - preposition learner can't learn without already recognized objects
        learner.observe(
            LearningExample(perceptual_representation, linguistic_description))
    for (
            _,
            test_lingustics_description,
            test_perceptual_representation,
    ) in on_test_curriculum.instances():
        descriptions_from_learner = learner.describe(
            test_perceptual_representation)
        gold = test_lingustics_description.as_token_sequence()
        assert descriptions_from_learner
        assert [
            desc.as_token_sequence() for desc in descriptions_from_learner
        ][0] == gold
def test_pursuit_preposition_has_learner(language_mode, learner):
    person = standard_object("person",
                             PERSON,
                             banned_properties=[IS_SPEAKER, IS_ADDRESSEE])
    inanimate_object = standard_object("inanimate-object",
                                       INANIMATE_OBJECT,
                                       required_properties=[PERSON_CAN_HAVE])
    ball = standard_object("ball", BALL)

    language_generator = phase1_language_generator(language_mode)

    has_train_curriculum = phase1_instances(
        "Has Unit Train",
        situations=sampled(
            _x_has_y_template(person, inanimate_object),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=2,
            block_multiple_of_the_same_type=True,
        ),
        language_generator=language_generator,
    )

    has_test_curriculum = phase1_instances(
        "Has Unit Test",
        situations=sampled(
            _x_has_y_template(person, ball),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=1,
            block_multiple_of_the_same_type=True,
        ),
        language_generator=language_generator,
    )

    processing_learner = learner(language_mode)

    for (
            _,
            linguistic_description,
            perceptual_representation,
    ) in has_train_curriculum.instances():
        processing_learner.observe(
            LearningExample(perceptual_representation, linguistic_description))

    for (
            _,
            test_lingustics_description,
            test_perceptual_representation,
    ) in has_test_curriculum.instances():
        descriptions_from_learner = processing_learner.describe(
            test_perceptual_representation)
        gold = test_lingustics_description.as_token_sequence()
        assert descriptions_from_learner
        assert gold in [
            desc.as_token_sequence() for desc in descriptions_from_learner
        ]
def test_pursuit_preposition_over_learner(language_mode, learner):
    ball = standard_object("ball", BALL)
    table = standard_object("table", TABLE)
    language_generator = phase1_language_generator(language_mode)
    over_train_curriculum = phase1_instances(
        "Preposition Over Unit Train",
        situations=sampled(
            _over_template(ball,
                           table,
                           immutableset(),
                           is_training=True,
                           is_distal=True),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=10,
            block_multiple_of_the_same_type=True,
        ),
        language_generator=language_generator,
    )
    over_test_curriculum = phase1_instances(
        "Preposition Over Unit Test",
        situations=sampled(
            _over_template(ball,
                           table,
                           immutableset(),
                           is_training=False,
                           is_distal=True),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=1,
            block_multiple_of_the_same_type=True,
        ),
        language_generator=language_generator,
    )

    processing_learner = learner(language_mode)

    for (
            _,
            linguistic_description,
            perceptual_representation,
    ) in over_train_curriculum.instances():
        processing_learner.observe(
            LearningExample(perceptual_representation, linguistic_description))

    for (
            _,
            test_linguistic_description,
            test_perceptual_representation,
    ) in over_test_curriculum.instances():
        descriptions_from_learner = processing_learner.describe(
            test_perceptual_representation)
        gold = test_linguistic_description.as_token_sequence()
        assert descriptions_from_learner
        assert gold in [
            desc.as_token_sequence() for desc in descriptions_from_learner
        ]
Esempio n. 17
0
def test_come(language_mode, learner):
    movee = standard_object(
        "movee",
        required_properties=[SELF_MOVING],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )
    learner_obj = standard_object("leaner_0", LEARNER)
    speaker = standard_object(
        "speaker",
        PERSON,
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
        added_properties=[IS_SPEAKER],
    )
    object_ = standard_object("object_0",
                              THING,
                              banned_properties=[IS_SPEAKER, IS_ADDRESSEE])
    ground = standard_object("ground", root_node=GROUND)

    come_to_speaker = Phase1SituationTemplate(
        "come-to-speaker",
        salient_object_variables=[movee, speaker],
        actions=[
            Action(COME,
                   argument_roles_to_fillers=[(AGENT, movee), (GOAL, speaker)])
        ],
    )
    come_to_learner = Phase1SituationTemplate(
        "come-to-leaner",
        salient_object_variables=[movee],
        actions=[
            Action(COME,
                   argument_roles_to_fillers=[(AGENT, movee),
                                              (GOAL, learner_obj)])
        ],
    )
    come_to_object = Phase1SituationTemplate(
        "come-to-object",
        salient_object_variables=[movee, object_],
        actions=[
            Action(COME,
                   argument_roles_to_fillers=[(AGENT, movee), (GOAL, object_)])
        ],
    )
    for situation_template in [
            _make_come_down_template(movee, object_, speaker, ground,
                                     immutableset()),
            come_to_speaker,
            come_to_learner,
            come_to_object,
    ]:
        run_verb_test(
            learner(language_mode),
            situation_template,
            language_generator=phase1_language_generator(language_mode),
        )
Esempio n. 18
0
def _make_drink_cups_curriculum(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[
        HighLevelSemanticsSituation, LinearizedDependencyTree
    ],
) -> Phase1InstanceGroup:

    templates = []
    for cup in [CUP, CUP_2, CUP_3, CUP_4]:
        cup_obj = standard_object("cup", cup)
        liquid_0 = object_variable("liquid_0", required_properties=[LIQUID])
        person_0 = standard_object(
            "person_0", PERSON, banned_properties=[IS_SPEAKER, IS_ADDRESSEE]
        )

        templates.append(
            Phase1SituationTemplate(
                "drink-cup",
                salient_object_variables=[liquid_0, person_0, cup_obj],
                background_object_variables=make_noise_objects(noise_objects),
                actions=[
                    Action(
                        DRINK,
                        argument_roles_to_fillers=[(AGENT, person_0), (THEME, liquid_0)],
                        auxiliary_variable_bindings=[(DRINK_CONTAINER_AUX, cup_obj)],
                    )
                ],
                asserted_always_relations=[inside(liquid_0, cup_obj)],
            )
        )

    return phase2_instances(
        "drink - cup",
        chain(
            *[
                sampled(
                    cup_template,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    ontology=GAILA_PHASE_2_ONTOLOGY,
                    max_to_sample=num_samples,
                    block_multiple_of_the_same_type=True,
                )
                if num_samples
                else all_possible(
                    cup_template,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    ontology=GAILA_PHASE_2_ONTOLOGY,
                )
                for cup_template in templates
            ]
        ),
        perception_generator=GAILA_PHASE_2_PERCEPTION_GENERATOR,
        language_generator=language_generator,
    )
Esempio n. 19
0
def drink_test_template():
    object_0 = standard_object(
        "object_0",
        required_properties=[HOLLOW, PERSON_CAN_HAVE],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )
    liquid_0 = object_variable("liquid_0", required_properties=[LIQUID])
    person_0 = standard_object("person_0",
                               PERSON,
                               banned_properties=[IS_SPEAKER, IS_ADDRESSEE])
    return make_drink_template(person_0, liquid_0, object_0, None)
def test_subset_preposition_in_front(language_mode, learner):
    ball = standard_object("ball", BALL)
    table = standard_object("table", TABLE)
    speaker = standard_object("speaker", MOM, added_properties=[IS_SPEAKER])

    run_preposition_test(
        learner(language_mode),
        _in_front_template(ball,
                           table, [speaker],
                           is_training=True,
                           is_near=True),
        language_generator=phase1_language_generator(language_mode),
    )
Esempio n. 21
0
def test_eat_simple(language_mode, learner):
    object_to_eat = standard_object("object_0", required_properties=[EDIBLE])
    eater = standard_object(
        "eater_0",
        THING,
        required_properties=[ANIMATE],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )
    run_verb_test(
        learner(language_mode),
        make_eat_template(eater, object_to_eat),
        language_generator=phase1_language_generator(language_mode),
    )
Esempio n. 22
0
def _make_sit_on_chair_curriculum(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[
        HighLevelSemanticsSituation, LinearizedDependencyTree
    ],
) -> Phase1InstanceGroup:

    templates = []
    for chair_type in [CHAIR, CHAIR_2, CHAIR_3, CHAIR_4, CHAIR_5]:
        sitter = standard_object(
            "sitter_0",
            THING,
            required_properties=[ANIMATE],
            banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
        )
        seat = standard_object("chair", chair_type)
        templates.append(
            make_sit_transitive(
                sitter, seat, noise_objects, surface=False, syntax_hints=False
            )
        )
        templates.append(
            make_sit_template_intransitive(
                sitter, seat, noise_objects, surface=False, syntax_hints=False
            )
        )

    return phase2_instances(
        "sit on chair",
        chain(
            *[
                sampled(
                    template,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    ontology=GAILA_PHASE_2_ONTOLOGY,
                    max_to_sample=num_samples,
                    block_multiple_of_the_same_type=True,
                )
                if num_samples
                else all_possible(
                    template,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    ontology=GAILA_PHASE_2_ONTOLOGY,
                )
                for template in templates
            ]
        ),
        perception_generator=GAILA_PHASE_2_PERCEPTION_GENERATOR,
        language_generator=language_generator,
    )
def make_push_shove_subtle_verb_distinctions(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    pusher = standard_object(
        "pusher_0",
        THING,
        required_properties=[ANIMATE],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )
    pushee = standard_object("pushee_0",
                             THING,
                             required_properties=[INANIMATE])
    push_surface = standard_object("push_surface_0",
                                   THING,
                                   required_properties=[INANIMATE])
    push_goal = standard_object("push_goal_0",
                                THING,
                                required_properties=[INANIMATE])
    background = make_noise_objects(noise_objects)
    # get all possible templates
    templates = flatten([
        make_push_templates(
            pusher,
            pushee,
            push_surface,
            push_goal,
            use_adverbial_path_modifier=use_adverbial_path_modifier,
            operator=operator,
            spatial_properties=[HARD_FORCE] if hard_force else [SOFT_FORCE],
            background=background,
        ) for hard_force in BOOL_SET
        for use_adverbial_path_modifier in BOOL_SET
        for operator in [TOWARD, AWAY_FROM]
    ])
    return phase1_instances(
        "pushing-shoving",
        chain(
            flatten([
                sampled(
                    template,
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for template in templates
            ])),
        language_generator=language_generator,
    )
Esempio n. 24
0
def _make_on_tests(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = standard_object(
        "figure_0",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    figure_1 = standard_object(
        "figure_1",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    ground_0 = standard_object(
        "ground_0",
        THING,
        required_properties=[CAN_HAVE_THINGS_RESTING_ON_THEM],
        banned_properties=[HOLLOW],
    )
    ground_1 = standard_object(
        "ground_1",
        THING,
        required_properties=[CAN_HAVE_THINGS_RESTING_ON_THEM],
        banned_properties=[HOLLOW],
    )

    figures = immutableset([figure_0, figure_1])
    grounds = immutableset([ground_0, ground_1])

    return phase1_instances(
        "Preposition Testing On",
        chain(*[
            flatten([
                sampled(
                    _on_template(
                        figure,
                        ground,
                        make_noise_objects(noise_objects),
                        is_training=False,
                    ),
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    max_to_sample=num_samples if num_samples else 5,
                    block_multiple_of_the_same_type=True,
                ) for figure in figures for ground in grounds
            ])
        ]),
        language_generator=language_generator,
    )
Esempio n. 25
0
def _make_over_tests(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = standard_object(
        "figure_0",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    figure_1 = standard_object(
        "figure_1",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    ground_0 = standard_object(
        "ground_0",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    ground_1 = standard_object(
        "ground_1",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])

    figures = immutableset([figure_0, figure_1])
    grounds = immutableset([ground_0, ground_1])

    return phase1_instances(
        "Preposition Testing Over",
        chain(*[
            sampled(
                _over_template(
                    figure,
                    ground,
                    make_noise_objects(noise_objects),
                    is_training=False,
                    is_distal=use_above_below,
                    syntax_hints=[USE_ABOVE_BELOW] if use_above_below else [],
                ),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                chooser=PHASE1_CHOOSER_FACTORY(),
                max_to_sample=num_samples if num_samples else 5,
                block_multiple_of_the_same_type=True,
            ) for figure in figures for ground in grounds
            # for distance in BOOL_SET
            for use_above_below in BOOL_SET
        ]),
        language_generator=language_generator,
    )
Esempio n. 26
0
def test_take(language_mode, learner):
    run_verb_test(
        learner(language_mode),
        make_take_template(
            agent=standard_object(
                "taker_0",
                THING,
                required_properties=[ANIMATE],
                banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
            ),
            theme=standard_object("object_taken_0",
                                  required_properties=[INANIMATE]),
            use_adverbial_path_modifier=False,
        ),
        language_generator=phase1_language_generator(language_mode),
    )
def make_fly_imprecise_temporal_descriptions(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    bird = standard_object("bird_0", BIRD)
    syntax_hints_options = ([], [USE_ADVERBIAL_PATH_MODIFIER])  # type: ignore
    background = make_noise_objects(noise_objects)

    return phase1_instances(
        "fly-imprecise-temporal-descripttions",
        chain(
            # Bare Fly
            flatten(
                sampled(
                    bare_fly(
                        bird,
                        up=is_up,
                        syntax_hints=syntax_hints,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_up in BOOL_SET
                for syntax_hints in syntax_hints_options
                for is_fast in BOOL_SET)),
        language_generator=language_generator,
    )
def test_subset_preposition_behind(language_mode, learner):
    ball = standard_object("ball", BALL)
    table = standard_object("table", TABLE)

    run_preposition_test(
        learner(language_mode),
        _behind_template(
            ball,
            table,
            immutableset(),
            is_training=True,
            is_near=True,
            speaker_root_node=MOM,
        ),
        language_generator=phase1_language_generator(language_mode),
    )
Esempio n. 29
0
def _make_in_front_tests(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = standard_object(
        "figure_0",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    figure_1 = standard_object(
        "figure_1",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    ground_0 = standard_object(
        "ground_0",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    ground_1 = standard_object(
        "ground_1",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])

    figures = immutableset([figure_0, figure_1])
    grounds = immutableset([ground_0, ground_1])

    return phase1_instances(
        "Preposition Testing In Front",
        chain(*[
            flatten([
                sampled(
                    _in_front_template(
                        figure,
                        ground,
                        make_noise_objects(noise_objects),
                        is_training=False,
                        is_near=close,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for figure in figures for ground in grounds
                for close in BOOL_SET
            ])
        ]),
        language_generator=language_generator,
    )
def test_subset_preposition_in(language_mode, learner):
    water = object_variable("water", WATER)
    cup = standard_object("cup", CUP)

    run_preposition_test(
        learner(language_mode),
        _in_template(water, cup, immutableset(), is_training=True),
        language_generator=phase1_language_generator(language_mode),
    )