def test_choose_first():
    generator = ChooseFirstLanguageGenerator(DummyLanguageGenerator())
    # pycharm fails to recognize converter
    # noinspection PyTypeChecker
    situation = LocatedObjectSituation([(situation_object(BALL),
                                         Point(0, 0, 0))])

    generated_descriptions = generator.generate_language(
        situation, RandomChooser.for_seed(0))
    assert len(generated_descriptions) == 1
    assert generated_descriptions[0].as_token_sequence() == ("hello", "world")
Beispiel #2
0
class SceneNode:
    """
    Node type used for creating graph structure from a Perception of a scene.
    This kind of hierarchical grouping of objects within the scene is helpful for adjusting
    the positions of the objects within the rendering engine.
    """

    name: str = attr.ib()
    perceived_obj: ObjectPerception = attr.ib()
    children: List["SceneNode"] = attr.ib(factory=list)
    parent: "SceneNode" = attr.ib(default=None)
    position: Point = attr.ib(default=Point(0, 0, 0))
def test_single_object_generator():
    # warning due to PyCharm bug
    # noinspection PyTypeChecker
    situation = LocatedObjectSituation(
        objects_to_locations=((situation_object(BALL), Point(0, 0, 0)), ))

    single_obj_generator = SingleObjectLanguageGenerator(
        GAILA_PHASE_1_ENGLISH_LEXICON)

    languages_for_situation = single_obj_generator.generate_language(
        situation, RandomChooser.for_seed(0))
    assert len(languages_for_situation) == 1
    assert languages_for_situation[0].as_token_sequence() == ("ball", )
Beispiel #4
0
def test_simple_experiment():
    language_generator = SingleObjectLanguageGenerator(
        GAILA_PHASE_1_ENGLISH_LEXICON)
    perception_generator = DummyVisualPerceptionGenerator()

    only_show_truck = GeneratedFromSituationsInstanceGroup(
        name="only-ball",
        situations=[
            LocatedObjectSituation([(
                SituationObject.instantiate_ontology_node(
                    BALL, ontology=GAILA_PHASE_1_ONTOLOGY),
                Point(0.0, 0.0, 0.0),
            )])
        ],
        language_generator=language_generator,
        perception_generator=perception_generator,
        chooser=RandomChooser.for_seed(0),
    )

    pre_acc = CandidateAccuracyObserver("pre")
    post_acc = CandidateAccuracyObserver("post")
    test_acc = CandidateAccuracyObserver("test")

    experiment = Experiment(
        name="simple",
        training_stages=[only_show_truck],
        learner_factory=MemorizingLanguageLearner,
        pre_example_training_observers=[
            TopChoiceExactMatchObserver("pre"), pre_acc
        ],
        post_example_training_observers=[
            TopChoiceExactMatchObserver("post"), post_acc
        ],
        warm_up_test_instance_groups=[only_show_truck],
        test_instance_groups=[only_show_truck],
        test_observers=[TopChoiceExactMatchObserver("test"), test_acc],
        sequence_chooser=RandomChooser.for_seed(0),
    )

    execute_experiment(experiment)

    assert pre_acc.accuracy() == 0.0
    assert post_acc.accuracy() == 1.0
    assert test_acc.accuracy() == 1.0
Beispiel #5
0
 def _locations_in_a_line_1m_apart():
     for x_coordinate in range(0, sys.maxsize):
         yield Point(float(x_coordinate), 0.0, 0.0)
Beispiel #6
0
def test_dummy_visual_perception():
    located_truck = DummyVisualPerceptionFrame.SingleObjectPerception(
        "truck", location=Point(1.0, 2.0, 3.0)
    )
    assert located_truck.tag == "truck"
    assert located_truck.location == Point(1.0, 2.0, 3.0)