예제 #1
0
def _make_m6_beside_curriculum(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    return phase1_instances(
        "Preposition on",
        situations=chain(*[
            sampled(
                _beside_template(
                    object_1,
                    object_2,
                    make_noise_objects(noise_objects),
                    is_training=True,
                    is_right=True,
                ),
                chooser=PHASE1_CHOOSER_FACTORY(),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                max_to_sample=num_samples if num_samples else 1,
            ) for object_1 in r.sample(SMALL_OBJECT_VARS, 3)
            for object_2 in r.sample(LARGE_OBJECT_VARS, 3)
        ]),
        perception_generator=GAILA_M6_PERCEPTION_GENERATOR,
        language_generator=language_generator,
    )
예제 #2
0
def _make_in_front_tests(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = standard_object(
        "figure_0",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    figure_1 = standard_object(
        "figure_1",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    ground_0 = standard_object(
        "ground_0",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    ground_1 = standard_object(
        "ground_1",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])

    figures = immutableset([figure_0, figure_1])
    grounds = immutableset([ground_0, ground_1])

    speaker = standard_object("speaker", PERSON, added_properties=[IS_SPEAKER])
    addressee = standard_object("addressee",
                                LEARNER,
                                added_properties=[IS_ADDRESSEE])
    computed_background = [speaker, addressee]

    return phase1_instances(
        "Preposition Testing In Front",
        chain(*[
            flatten([
                sampled(
                    _in_front_template(
                        figure,
                        ground,
                        flatten([
                            make_noise_objects(noise_objects),
                            computed_background,
                        ]),
                        is_training=False,
                        is_near=close,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                    block_multiple_of_the_same_type=True,
                ) for figure in figures for ground in grounds
                for close in BOOL_SET
            ])
        ]),
        language_generator=language_generator,
    )
def make_move_imprecise_temporal_descriptions(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    self_mover_0 = standard_object(
        "self-mover_0",
        THING,
        required_properties=[SELF_MOVING],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )

    other_mover_0 = standard_object("mover_0",
                                    THING,
                                    required_properties=[ANIMATE])
    movee_0 = standard_object("movee_0",
                              THING,
                              required_properties=[INANIMATE])
    move_goal_reference = standard_object("move-goal-reference",
                                          THING,
                                          required_properties=[INANIMATE])
    background = make_noise_objects(noise_objects)

    return phase1_instances(
        "move-with-temporal-descriptions",
        chain(
            # bare move (e.g. "a box moves") is about half of uses in child speed
            flatten(
                sampled(
                    bare_move_template(
                        self_mover_0,
                        move_goal_reference,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_fast in BOOL_SET),
            # Transitive Move
            flatten(
                sampled(
                    transitive_move_template(
                        other_mover_0,
                        movee_0,
                        move_goal_reference,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_fast in BOOL_SET),
        ),
        language_generator=language_generator,
    )
예제 #4
0
def _make_drink_cups_curriculum(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[
        HighLevelSemanticsSituation, LinearizedDependencyTree
    ],
) -> Phase1InstanceGroup:

    templates = []
    for cup in [CUP, CUP_2, CUP_3, CUP_4]:
        cup_obj = standard_object("cup", cup)
        liquid_0 = object_variable("liquid_0", required_properties=[LIQUID])
        person_0 = standard_object(
            "person_0", PERSON, banned_properties=[IS_SPEAKER, IS_ADDRESSEE]
        )

        templates.append(
            Phase1SituationTemplate(
                "drink-cup",
                salient_object_variables=[liquid_0, person_0, cup_obj],
                background_object_variables=make_noise_objects(noise_objects),
                actions=[
                    Action(
                        DRINK,
                        argument_roles_to_fillers=[(AGENT, person_0), (THEME, liquid_0)],
                        auxiliary_variable_bindings=[(DRINK_CONTAINER_AUX, cup_obj)],
                    )
                ],
                asserted_always_relations=[inside(liquid_0, cup_obj)],
            )
        )

    return phase2_instances(
        "drink - cup",
        chain(
            *[
                sampled(
                    cup_template,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    ontology=GAILA_PHASE_2_ONTOLOGY,
                    max_to_sample=num_samples,
                    block_multiple_of_the_same_type=True,
                )
                if num_samples
                else all_possible(
                    cup_template,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    ontology=GAILA_PHASE_2_ONTOLOGY,
                )
                for cup_template in templates
            ]
        ),
        perception_generator=GAILA_PHASE_2_PERCEPTION_GENERATOR,
        language_generator=language_generator,
    )
def _big_x_template(theme: TemplateObjectVariable,
                    noise_objects: Optional[int]) -> Phase1SituationTemplate:
    learner = learner_template_factory()
    computed_background = [learner]
    computed_background.extend(make_noise_objects(noise_objects))
    return Phase1SituationTemplate(
        f"big-{theme.handle}",
        salient_object_variables=[theme],
        background_object_variables=computed_background,
        asserted_always_relations=[bigger_than(theme, learner)],
    )
예제 #6
0
def _make_on_tests(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = standard_object(
        "figure_0",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    figure_1 = standard_object(
        "figure_1",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    ground_0 = standard_object(
        "ground_0",
        THING,
        required_properties=[CAN_HAVE_THINGS_RESTING_ON_THEM],
        banned_properties=[HOLLOW],
    )
    ground_1 = standard_object(
        "ground_1",
        THING,
        required_properties=[CAN_HAVE_THINGS_RESTING_ON_THEM],
        banned_properties=[HOLLOW],
    )

    figures = immutableset([figure_0, figure_1])
    grounds = immutableset([ground_0, ground_1])

    return phase1_instances(
        "Preposition Testing On",
        chain(*[
            flatten([
                sampled(
                    _on_template(
                        figure,
                        ground,
                        make_noise_objects(noise_objects),
                        is_training=False,
                    ),
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    max_to_sample=num_samples if num_samples else 5,
                    block_multiple_of_the_same_type=True,
                ) for figure in figures for ground in grounds
            ])
        ]),
        language_generator=language_generator,
    )
def make_push_shove_subtle_verb_distinctions(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    pusher = standard_object(
        "pusher_0",
        THING,
        required_properties=[ANIMATE],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )
    pushee = standard_object("pushee_0",
                             THING,
                             required_properties=[INANIMATE])
    push_surface = standard_object("push_surface_0",
                                   THING,
                                   required_properties=[INANIMATE])
    push_goal = standard_object("push_goal_0",
                                THING,
                                required_properties=[INANIMATE])
    background = make_noise_objects(noise_objects)
    # get all possible templates
    templates = flatten([
        make_push_templates(
            pusher,
            pushee,
            push_surface,
            push_goal,
            use_adverbial_path_modifier=use_adverbial_path_modifier,
            operator=operator,
            spatial_properties=[HARD_FORCE] if hard_force else [SOFT_FORCE],
            background=background,
        ) for hard_force in BOOL_SET
        for use_adverbial_path_modifier in BOOL_SET
        for operator in [TOWARD, AWAY_FROM]
    ])
    return phase1_instances(
        "pushing-shoving",
        chain(
            flatten([
                sampled(
                    template,
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for template in templates
            ])),
        language_generator=language_generator,
    )
def _short_x_template(theme: TemplateObjectVariable,
                      noise_objects: Optional[int]) -> Phase1SituationTemplate:
    learner = learner_template_factory()
    computed_background = [learner]
    computed_background.extend(make_noise_objects(noise_objects))

    # TODO: This difference should be an axis size but we can't yet
    # implement that. See: https://github.com/isi-vista/adam/issues/832
    return Phase1SituationTemplate(
        f"tall-{theme.handle}",
        salient_object_variables=[theme],
        background_object_variables=computed_background,
        asserted_always_relations=[bigger_than(learner, theme)],
        syntax_hints=[USE_VERTICAL_MODIFIERS],
    )
예제 #9
0
def _make_over_tests(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = standard_object(
        "figure_0",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    figure_1 = standard_object(
        "figure_1",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    ground_0 = standard_object(
        "ground_0",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    ground_1 = standard_object(
        "ground_1",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])

    figures = immutableset([figure_0, figure_1])
    grounds = immutableset([ground_0, ground_1])

    return phase1_instances(
        "Preposition Testing Over",
        chain(*[
            sampled(
                _over_template(
                    figure,
                    ground,
                    make_noise_objects(noise_objects),
                    is_training=False,
                    is_distal=use_above_below,
                    syntax_hints=[USE_ABOVE_BELOW] if use_above_below else [],
                ),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                chooser=PHASE1_CHOOSER_FACTORY(),
                max_to_sample=num_samples if num_samples else 5,
                block_multiple_of_the_same_type=True,
            ) for figure in figures for ground in grounds
            # for distance in BOOL_SET
            for use_above_below in BOOL_SET
        ]),
        language_generator=language_generator,
    )
예제 #10
0
def _make_behind_tests(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = standard_object(
        "figure_0",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    figure_1 = standard_object(
        "figure_1",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    ground_0 = standard_object(
        "ground_0",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    ground_1 = standard_object(
        "ground_1",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])

    figures = immutableset([figure_0, figure_1])
    grounds = immutableset([ground_0, ground_1])

    return phase1_instances(
        "Preposition Testing Behind",
        chain(*[
            flatten([
                sampled(
                    _behind_template(
                        figure,
                        ground,
                        make_noise_objects(noise_objects),
                        is_training=False,
                        is_near=close,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for figure in figures for ground in grounds
                for close in BOOL_SET
            ])
        ]),
        language_generator=language_generator,
    )
def make_fall_imprecise_temporal_descriptions(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    arbitary_object = standard_object("object_0", THING)
    syntax_hints_options = ([], [USE_ADVERBIAL_PATH_MODIFIER])  # type: ignore
    background = make_noise_objects(noise_objects)

    return phase1_instances(
        f"fall-imprecise-temporal-description",
        chain(
            # Any Object Falling
            flatten(
                sampled(
                    falling_template(
                        arbitary_object,
                        lands_on_ground=object_ends_up_on_ground,
                        syntax_hints=syntax_hints,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for object_ends_up_on_ground in BOOL_SET
                for syntax_hints in syntax_hints_options
                for is_fast in BOOL_SET),
            # Fall on Ground
            flatten(
                sampled(
                    fall_on_ground_template(
                        arbitary_object,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_fast in BOOL_SET),
        ),
        language_generator=language_generator,
    )
예제 #12
0
def make_animal_eat_curriculum(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    object_to_eat = standard_object("object_0", required_properties=[EDIBLE])
    animal = standard_object("eater_0", NONHUMAN_ANIMAL)
    background = make_noise_objects(noise_objects)

    return phase1_instances(
        "Animal-Eat-Curriculum",
        # Fressen
        sampled(
            make_eat_template(animal, object_to_eat, background),
            max_to_sample=num_samples if num_samples else 5,
            ontology=GAILA_PHASE_1_ONTOLOGY,
            chooser=PHASE1_CHOOSER_FACTORY(),
        ),
        language_generator=language_generator,
    )
def make_pass_toss_subtle_verb_distinction(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    tosser = standard_object("tosser_passer_0",
                             THING,
                             required_properties=[ANIMATE])
    tossee = standard_object("tossee_passee_0",
                             THING,
                             required_properties=[INANIMATE])
    goal = standard_object("move-goal-reference",
                           THING,
                           required_properties=[INANIMATE])
    background = make_noise_objects(noise_objects)

    return phase1_instances(
        "tossing_passing",
        chain(
            flatten([
                sampled(
                    make_pass_template(
                        tosser,
                        tossee,
                        goal,
                        use_adverbial_path_modifier=use_adverbial_path_modifier,
                        operator=operator,
                        spatial_properties=[HARD_FORCE]
                        if hard_force else [SOFT_FORCE],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for use_adverbial_path_modifier in BOOL_SET
                for hard_force in BOOL_SET for operator in [TOWARD, AWAY_FROM]
            ])),
        language_generator=language_generator,
    )
예제 #14
0
def _make_behind_training(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = standard_object("ball", BALL)
    figure_1 = standard_object("book", BOOK)
    figure_2 = standard_object("dad", DAD)
    ground_0 = standard_object("cookie", COOKIE)
    ground_1 = standard_object("table", TABLE)
    ground_2 = standard_object("person",
                               PERSON,
                               banned_properties=[IS_SPEAKER, IS_ADDRESSEE])

    figures = immutableset([figure_0, figure_1, figure_2])
    grounds = immutableset([ground_0, ground_1, ground_2])

    return phase1_instances(
        "Preposition Training Behind",
        chain(*[
            flatten([
                sampled(
                    _behind_template(
                        figure,
                        ground,
                        make_noise_objects(noise_objects),
                        is_training=True,
                        is_near=close,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for figure in figures for ground in grounds
                for close in BOOL_SET
            ])
        ]),
        language_generator=language_generator,
    )
예제 #15
0
def _make_beside_training(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = standard_object("ball", BALL)
    figure_1 = standard_object("book", BOOK)
    figure_2 = standard_object("mom", MOM)
    ground_0 = standard_object("cookie", COOKIE)
    ground_1 = standard_object("table", TABLE)
    ground_2 = standard_object("dad", DAD)

    figures = immutableset([figure_0, figure_1, figure_2])
    grounds = immutableset([ground_0, ground_1, ground_2])

    return phase1_instances(
        "Preposition Training Beside",
        chain(*[
            flatten([
                sampled(
                    _beside_template(
                        figure,
                        ground,
                        make_noise_objects(noise_objects),
                        is_right=True,
                        is_training=True,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                    block_multiple_of_the_same_type=True,
                ) for figure in figures for ground in grounds
                # for direction in BOOL_SET
            ])
        ]),
        language_generator=language_generator,
    )
예제 #16
0
def make_human_eat_curriculum(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    object_to_eat = standard_object("object_0", required_properties=[EDIBLE])
    human = standard_object("eater_0",
                            PERSON,
                            banned_properties=[IS_SPEAKER, IS_ADDRESSEE])
    background = make_noise_objects(noise_objects)

    return phase1_instances(
        "Human-Eat-Curriculum",
        # Essen
        sampled(
            make_eat_template(human, object_to_eat, background),
            max_to_sample=num_samples if num_samples else 5,
            ontology=GAILA_PHASE_1_ONTOLOGY,
            chooser=PHASE1_CHOOSER_FACTORY(),
        ),
        language_generator=language_generator,
    )
예제 #17
0
def _make_over_training(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = standard_object("ball", BALL)
    figure_1 = standard_object("book", BOOK)
    figure_2 = standard_object("mom", MOM)
    ground_0 = standard_object("cookie", COOKIE)
    ground_1 = standard_object("table", TABLE)

    figures = immutableset([figure_0, figure_1, figure_2])
    grounds = immutableset([ground_0, ground_1])

    return phase1_instances(
        "Preposition Training Over",
        chain(*[
            sampled(
                _over_template(
                    figure,
                    ground,
                    make_noise_objects(noise_objects),
                    is_training=True,
                    is_distal=use_above_below,
                    syntax_hints=[USE_ABOVE_BELOW] if use_above_below else [],
                ),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                chooser=PHASE1_CHOOSER_FACTORY(),
                max_to_sample=num_samples if num_samples else 5,
            ) for figure in figures for ground in grounds
            # for distance in BOOL_SET
            for use_above_below in BOOL_SET
        ]),
        language_generator=language_generator,
    )
def make_walk_run_subtle_verb_distinction(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:

    agent = standard_object(
        "walker_0",
        THING,
        required_properties=[ANIMATE],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )
    background = make_noise_objects(noise_objects)

    return phase1_instances(
        "walking-running",
        chain(
            flatten([
                sampled(
                    make_walk_run_template(
                        agent,
                        use_adverbial_path_modifier=use_adverbial_path_modifier,
                        operator=operator,
                        spatial_properties=[HARD_FORCE]
                        if hard_force else [SOFT_FORCE],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for use_adverbial_path_modifier in BOOL_SET
                for hard_force in BOOL_SET for operator in [AWAY_FROM, TOWARD]
            ])),
        language_generator=language_generator,
    )
def make_jump_imprecise_temporal_descriptions(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:

    jumper = standard_object(
        "jumper_0",
        THING,
        required_properties=[CAN_JUMP],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )

    background = make_noise_objects(noise_objects)

    return phase1_instances(
        "jumping",
        chain(
            flatten([
                sampled(
                    # "A person jumps"
                    make_jump_template(
                        jumper,
                        use_adverbial_path_modifier=use_adverbial_path_modifier,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for use_adverbial_path_modifier in (True, False)
                for is_fast in BOOL_SET
            ])),
        language_generator=language_generator,
    )
def make_roll_imprecise_temporal_descriptions(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    animate_0 = standard_object("object_0",
                                THING,
                                required_properties=[ANIMATE])
    rollable_0 = standard_object("object_1", required_properties=[ROLLABLE])
    rolling_surface = standard_object(
        "surface",
        THING,
        required_properties=[CAN_HAVE_THINGS_RESTING_ON_THEM])
    background = make_noise_objects(noise_objects)

    return phase1_instances(
        "roll-imprecise-temporal-descriptions",
        chain(
            # rolls intransitively
            flatten(
                sampled(
                    intransitive_roll(
                        animate_0,
                        rolling_surface,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_fast in BOOL_SET),
            # rolls transitively
            flatten(
                sampled(
                    transitive_roll(
                        animate_0,
                        rollable_0,
                        rolling_surface,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_fast in BOOL_SET),
            # rolls on a surface
            flatten(
                sampled(
                    transitive_roll_with_surface(
                        animate_0,
                        rollable_0,
                        rolling_surface,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_fast in BOOL_SET),
        ),
        language_generator=language_generator,
    )
def make_throw_imprecise_temporal_descriptions(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    thrower = standard_object(
        "thrower_0",
        THING,
        required_properties=[ANIMATE],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )
    catcher = standard_object(
        "catcher_0",
        THING,
        required_properties=[ANIMATE],
        banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
    )
    object_thrown = standard_object("object_0",
                                    required_properties=[INANIMATE])
    implicit_goal_reference = standard_object("implicit_throw_goal_object",
                                              BOX)
    background = make_noise_objects(noise_objects)

    return phase1_instances(
        "throwing-with-temporal-descriptions",
        chain(
            # Throw on Ground
            flatten(
                sampled(
                    throw_on_ground_template(
                        thrower,
                        object_thrown,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_fast in BOOL_SET),
            # Throw
            flatten(
                sampled(
                    throw_template(
                        thrower,
                        object_thrown,
                        implicit_goal_reference,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_fast in BOOL_SET),
            # Throw up, down
            flatten(
                sampled(
                    throw_up_down_template(
                        thrower,
                        object_thrown,
                        implicit_goal_reference,
                        is_up=is_up,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_fast in BOOL_SET for is_up in BOOL_SET),
            # Throw To
            flatten(
                sampled(
                    throw_to_template(
                        thrower,
                        object_thrown,
                        catcher,
                        spatial_properties=[FAST] if is_fast else [SLOW],
                        background=background,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                ) for is_fast in BOOL_SET),
        ),
        language_generator=language_generator,
    )