Exemple #1
0
def _make_under_training(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = standard_object("ball", BALL)
    figure_1 = standard_object("book", BOOK)
    figure_2 = standard_object("mom", MOM)
    ground_0 = standard_object("table", TABLE)

    figures = immutableset([figure_0, figure_1, figure_2])
    grounds = immutableset([ground_0])

    return phase1_instances(
        "Preposition Training Under",
        chain(*[
            sampled(
                _under_template(
                    figure,
                    ground,
                    make_noise_objects(noise_objects),
                    is_training=True,
                    is_distal=use_above_below,
                    syntax_hints=[USE_ABOVE_BELOW] if use_above_below else [],
                ),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                chooser=PHASE1_CHOOSER_FACTORY(),
                max_to_sample=num_samples if num_samples else 5,
            ) for figure in figures for ground in grounds
            # for distance in BOOL_SET
            for use_above_below in BOOL_SET
        ]),
        language_generator=language_generator,
    )
Exemple #2
0
def _extract_candidate_attributes(
    whole_scene_perception_graph: PerceptionGraph,
    object_with_attribute: ObjectSemanticNode,
) -> Sequence[PerceptionGraph]:
    perception_digraph = whole_scene_perception_graph.copy_as_digraph()
    # For now, we assume all attributes are based on properties.
    properties = immutableset(
        [
            node
            for _, node, label in perception_digraph.out_edges(
                object_with_attribute, data="label"
            )
            if label == HAS_PROPERTY_LABEL
        ]
    )
    # Furthermore, we limit the search space to the even smaller set of hypotheses
    # where we consider only single properties as possible attributes.
    # Otherwise there are too many hypotheses for the pursuit learner to search through
    # and it's unlikely to converge on the correct hypothesis
    # in any reasonable amount of time or number of examples.
    candidate_attribute_subgraph_node_sets = [
        immutableset([object_with_attribute, property]) for property in properties
    ]
    return immutableset(
        [
            whole_scene_perception_graph.subgraph_by_nodes(
                candidate_attribute_subgraph_nodes
            )
            for candidate_attribute_subgraph_nodes in candidate_attribute_subgraph_node_sets
        ]
    )
Exemple #3
0
    def test_disjoint_index(self) -> None:
        overlapping_items = (
            Foo(Span(0, 10)),
            Foo(Span(5, 25)),
            Foo(Span(20, 30)),
            Bar(Span(20, 30)),
        )
        with self.assertRaisesRegex(
                ValueError, "Some range keys are connected or overlapping"):
            HasSpanIndex.index_disjoint(overlapping_items)

        s1, s2, s3 = (Span(0, 3), Span(5, 25), Span(25, 30))
        s2_within = Span(5, 10)
        s4_contains = Span(5, 30)
        fs1, fs2, fs3 = Foo(s1), Foo(s2), Foo(s3)
        index = HasSpanIndex.index_disjoint((fs1, fs2, fs3))

        self.assertIsNone(index.get_exactly_matching(s2_within))
        self.assertEqual(fs3, index.get_exactly_matching(s3))
        self.assertEqual(immutableset(), index.get_overlapping(Span(35, 40)))
        self.assertEqual(immutableset([fs3]),
                         index.get_overlapping(Span(28, 35)))
        self.assertEqual(immutableset([fs1, fs2]),
                         index.get_overlapping(Span(2, 7)))
        self.assertEqual(immutableset(), index.get_contained(s2_within))
        self.assertEqual(immutableset([fs2, fs3]),
                         index.get_contained(s4_contains))
        self.assertIsNone(index.get_containing(s4_contains))
        self.assertEqual(fs2, index.get_containing(s2_within))
Exemple #4
0
def _under_template(
    figure: TemplateObjectVariable,
    ground: TemplateObjectVariable,
    background: Iterable[TemplateObjectVariable],
    *,
    is_training: bool,
    is_distal: bool,
    syntax_hints: Iterable[str] = immutableset(),
    background_relations: Iterable[TemplateObjectVariable] = immutableset(),
) -> Phase1SituationTemplate:
    handle = "training" if is_training else "testing"
    relations = [
        negate(on(figure, GROUND_OBJECT_TEMPLATE)),
        strictly_under(ground, figure, dist=DISTAL if is_distal else PROXIMAL),
    ]
    relations.extend(background_relations)  # type: ignore
    return Phase1SituationTemplate(
        f"preposition-{handle}-{figure.handle}-under-{ground.handle}",
        salient_object_variables=[figure, ground],
        background_object_variables=background,
        asserted_always_relations=flatten_relations(relations),
        constraining_relations=[bigger_than(ground, figure)],
        gazed_objects=[figure],
        syntax_hints=syntax_hints,
    )
Exemple #5
0
def _make_in_training(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = object_variable("water", WATER)
    figure_1 = object_variable("juice", JUICE)
    ground_0 = standard_object("box", BOX)
    ground_1 = standard_object("cup", CUP)

    figures = immutableset([figure_0, figure_1])
    grounds = immutableset([ground_0, ground_1])

    return phase1_instances(
        "Preposition Training In",
        chain(*[
            sampled(
                _in_template(
                    figure,
                    ground,
                    make_noise_objects(noise_objects),
                    is_training=True,
                ),
                ontology=GAILA_PHASE_1_ONTOLOGY,
                chooser=PHASE1_CHOOSER_FACTORY(),
                max_to_sample=num_samples if num_samples else 5,
            ) for figure in figures for ground in grounds
        ]),
        language_generator=language_generator,
    )
Exemple #6
0
    def forward(self):  # pylint: disable=arguments-differ
        self._update_subobject_positions()

        collision_penalty = sum(
            self.collision_penalty(box1, box2)
            for (box1,
                 box2) in combinations(self.included_and_excluded_boxes, 2))
        below_ground_penalty = sum(
            self.below_ground_penalty(box)
            for box in self.object_bounding_boxes)
        weak_gravity_penalty = sum(
            self.weak_gravity_penalty(
                bounding_box,
                immutableset(self.in_region_relations[object_perception]))
            for object_perception, bounding_box in
            self.object_perception_to_bounding_box.items()
            if object_perception in self.in_region_relations)
        in_region_penalty = sum(
            self.in_region_penalty(
                object_perception,
                immutableset(self.in_region_relations[object_perception]),
            ) for object_perception in self.object_perception_to_bounding_box
            if object_perception in self.in_region_relations)
        # print(
        #     f"collision penalty: {collision_penalty}"
        #     f"\nout of bounds penalty: {below_ground_penalty}"
        #     f"\ngravity penalty: {weak_gravity_penalty}"
        #     f"\nin-region penalty: {in_region_penalty}"
        # )
        return (collision_penalty + below_ground_penalty +
                weak_gravity_penalty + in_region_penalty)
def test_object_not_on_ground():
    """
    Intended to test that one can specify an object is not on the ground
    """
    table = situation_object(TABLE)
    ground = situation_object(GROUND)
    perception = _PERCEPTION_GENERATOR.generate_perception(
        HighLevelSemanticsSituation(
            ontology=GAILA_PHASE_1_ONTOLOGY,
            salient_objects=immutableset([table]),
            other_objects=immutableset([ground]),
            always_relations=flatten_relations(negate(on(table, ground))),
        ),
        chooser=RandomChooser.for_seed(0),
    )
    frame = perception.frames[0]
    relations = frame.relations
    table_perception = perception_with_handle(frame, "**table_0")
    ground_perception = perception_with_handle(frame, "the ground")
    assert not any(
        relation.relation_type == IN_REGION
        and relation.first_slot == table_perception
        and isinstance(relation.second_slot, Region)
        and relation.region.reference_object == ground_perception
        and relation.region.distance == EXTERIOR_BUT_IN_CONTACT
        for relation in relations
    )
Exemple #8
0
 def from_job(job: Job,
              output_files: Optional[Iterable[File]]) -> "DependencyNode":
     return DependencyNode(
         job=job,
         output_files=immutableset(output_files)
         if output_files else immutableset(),
     )
Exemple #9
0
 def aligned_object_nodes(
     num_arguments: int,
     num_arguments_to_alignments_sets: Dict[
         int, ImmutableSet[Tuple[SemanticNodeWithSpan, ...]]
     ],
     language_concept_alignment: LanguageConceptAlignment,
 ) -> ImmutableSet[Tuple[SemanticNodeWithSpan, ...]]:
     if num_arguments not in num_arguments_to_alignments_sets.keys():
         # we haven't seen a request for this number of arguments before so we need to generate all the valid options
         semantic_nodes_with_spans = immutableset(
             SemanticNodeWithSpan(node=node, span=span)
             for (
                 node,
                 span,
             ) in language_concept_alignment.node_to_language_span.items()
             if isinstance(node, ObjectSemanticNode)
         )
         num_arguments_to_alignments_sets[num_arguments] = immutableset(
             ordered_semantic_nodes
             for ordered_semantic_nodes in itertools.product(
                 semantic_nodes_with_spans, repeat=num_arguments
             )
             if in_left_to_right_order(ordered_semantic_nodes)
         )
     return num_arguments_to_alignments_sets[num_arguments]
Exemple #10
0
 def templates_for_concept(
         self, concept: Concept) -> ImmutableSet[SurfaceTemplate]:
     if self._language_mode == LanguageMode.ENGLISH:
         return self._concepts_to_templates[concept]
     elif self._language_mode == LanguageMode.CHINESE:
         if concept.debug_string == "you":
             return immutableset([
                 SurfaceTemplate.for_object_name(
                     "ni3", language_mode=self._language_mode)
             ])
         if concept.debug_string == "me":
             return immutableset([
                 SurfaceTemplate.for_object_name(
                     "wo3", language_mode=self._language_mode)
             ])
         mappings = (
             GAILA_PHASE_1_CHINESE_LEXICON._ontology_node_to_word  # pylint:disable=protected-access
         )
         for k, v in mappings.items():
             if k.handle == concept.debug_string:
                 return immutableset([
                     SurfaceTemplate.for_object_name(
                         v.base_form, language_mode=self._language_mode)
                 ])
     # FunctionalObjectConcepts mean we have recognized an object but don't have
     # Knowledge of what the lexicalization is. So we just return an empty set
     if isinstance(concept, FunctionalObjectConcept):
         return immutableset()
     raise RuntimeError(f"Invalid concept {concept}")
Exemple #11
0
    def _candidate_templates(
        self, language_perception_semantic_alignment:
        LanguagePerceptionSemanticAlignment
    ) -> AbstractSet[SurfaceTemplateBoundToSemanticNodes]:
        # We can only learn single words for objects at the moment.
        # See https://github.com/isi-vista/adam/issues/793 .

        # Attempt to align every unaligned token to some object in the scene.
        language_alignment = (
            language_perception_semantic_alignment.language_concept_alignment)
        ret = immutableset(
            SurfaceTemplateBoundToSemanticNodes(
                SurfaceTemplate.for_object_name(
                    token, language_mode=self._language_mode),
                slot_to_semantic_node={},
            ) for (tok_idx, token) in enumerate(
                language_alignment.language.as_token_sequence())
            if not language_alignment.token_index_is_aligned(tok_idx)
            # ignore determiners
            and token not in DETERMINERS)

        return immutableset(
            bound_surface_template for bound_surface_template in ret
            # For now, we require templates to account for the entire utterance.
            # See https://github.com/isi-vista/adam/issues/789
            if covers_entire_utterance(bound_surface_template,
                                       language_alignment,
                                       ignore_determiners=True))
Exemple #12
0
    def required_action_description(
            self, action_type: OntologyNode,
            semantic_roles: Iterable[OntologyNode]) -> ActionDescription:
        semantic_roles_set = immutableset(semantic_roles)
        descriptions_for_action_type = self.action_to_description[action_type]
        matching_descriptions = immutableset(
            description for description in descriptions_for_action_type
            if description.frame.semantic_roles == semantic_roles_set)

        if matching_descriptions:
            if len(matching_descriptions) == 1:
                return only(matching_descriptions)
            else:
                raise RuntimeError(
                    f"Multiple action descriptions match action type "
                    f"{action_type} and roles {semantic_roles_set}")
        else:
            available_frames: Any = [
                immutableset(description.frame.roles_to_variables.keys())
                for description in descriptions_for_action_type
            ]
            raise RuntimeError(
                f"No action descriptions match action type "
                f"{action_type} and roles {semantic_roles_set}. "
                f"Known frames for {action_type} are "
                f"{available_frames}")
Exemple #13
0
def _make_on_training(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = standard_object("ball", BALL)
    figure_1 = standard_object("book", BOOK)
    figure_2 = standard_object("mom", MOM)
    ground_0 = standard_object("chair", CHAIR)
    ground_1 = standard_object("table", TABLE)

    figures = immutableset([figure_0, figure_1, figure_2])
    grounds = immutableset([ground_0, ground_1])

    return phase1_instances(
        "Preposition Training On",
        chain(*[
            flatten([
                sampled(
                    _on_template(
                        figure,
                        ground,
                        make_noise_objects(noise_objects),
                        is_training=True,
                    ),
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    max_to_sample=num_samples if num_samples else 5,
                ) for figure in figures for ground in grounds
            ])
        ]),
        language_generator=language_generator,
    )
Exemple #14
0
        def candidate_relation_templates(
        ) -> Iterable[Tuple[AlignmentSlots, ...]]:
            # This function returns templates fro the candidate relation templates
            # terminology: (A)rgument - Noun, (F)ixedString - A collection or str tokens that can be a preposition or localiser/coverb, etc.

            # Now, handle two arguments with one function string (e.g. a ball on a table)
            for output in immutableset(
                    itertools.permutations(
                        [
                            AlignmentSlots.Argument,
                            AlignmentSlots.Argument,
                            AlignmentSlots.FixedString,
                        ],
                        3,
                    )):
                yield output

            # Now, handle two arguments with two function strings (e.g. chyuou dzai zhouzi shang)
            for output in immutableset(
                    itertools.permutations(
                        [
                            AlignmentSlots.Argument,
                            AlignmentSlots.Argument,
                            AlignmentSlots.FixedString,
                            AlignmentSlots.FixedString,
                        ],
                        4,
                    )):
                yield output
 def test_empty_singleton(self):
     empty1 = immutableset()
     empty2 = immutableset()
     self.assertIs(empty1, empty2)
     empty3 = ImmutableSet.builder().build()
     self.assertIs(empty1, empty3)
     empty4 = immutableset([])
     self.assertIs(empty1, empty4)
 def test_symmetric_difference(self):
     s1 = immutableset([1, 2, 3, 8])
     s2 = immutableset([1, 2, 3, 4, 5, 6, 7])
     s3 = immutableset([4, 5, 6, 7, 8])
     self.assertEqual(s1, s2 ^ s3)
     self.assertEqual(s1, s3 ^ s2)
     self.assertEqual(s1, s2.symmetric_difference(s3))
     self.assertEqual(s1, s3.symmetric_difference(s2))
    def run_python_on_args(
        self,
        job_name: Locator,
        python_module_or_path: Any,
        set_args: str,
        *,
        depends_on,
        resource_request: Optional[ResourceRequest] = None,
        override_conda_config: Optional[CondaConfiguration] = None,
        category: Optional[str] = None,
        use_pypy: bool = False,
        job_is_stageable: bool = False,
        job_bypass_staging: bool = False,
        pre_job_bash: str = "",
        post_job_bash: str = "",
        times_to_retry_job: int = 0,
        container: Optional[Container] = None,
        job_profiles: Iterable[PegasusProfile] = immutableset(),
        input_file_paths: Union[Iterable[Union[Path, str]], Path,
                                str] = immutableset(),
        output_file_paths: Union[Iterable[Union[Path, str]], Path,
                                 str] = immutableset(),
    ) -> DependencyNode:
        """
        Schedule a job to run the given *python_script* with the given *set_args*.

        If this job requires other jobs to be executed first,
        include them in *depends_on*.

        This method returns a `DependencyNode` which can be used in *depends_on*
        for future jobs.

        `pre_job_bash` and `post_job_bash` are not provided as editable fields to append
        and additional job into this python job. Scoring, Post-Processing, Etc should be
        its own job. They are provided to allow for cases like 'export PYTHONPATH={path}'
        where a job expects environment variables to be set.
        """
        return self._run_python_job(
            job_name,
            python_module_or_path,
            set_args,
            depends_on=depends_on,
            resource_request=resource_request,
            override_conda_config=override_conda_config,
            category=category,
            use_pypy=use_pypy,
            container=container,
            pre_job_bash=pre_job_bash,
            post_job_bash=post_job_bash,
            job_is_stageable=job_is_stageable,
            job_bypass_staging=job_bypass_staging,
            times_to_retry_job=times_to_retry_job,
            job_profiles=job_profiles,
            treat_params_as_cmd_args=True,
            input_file_paths=input_file_paths,
            output_file_paths=output_file_paths,
        )
Exemple #18
0
class ActionDescription:
    frame: ActionDescriptionFrame = attrib(
        validator=instance_of(ActionDescriptionFrame), kw_only=True
    )
    # nested generic in optional seems to be confusing mypy
    during: Optional[DuringAction[ActionDescriptionVariable]] = attrib(  # type: ignore
        validator=optional(instance_of(DuringAction)), default=None, kw_only=True
    )
    # conditions which hold both before and after the action
    enduring_conditions: ImmutableSet[Relation[ActionDescriptionVariable]] = attrib(
        converter=flatten_relations, default=immutableset(), kw_only=True
    )
    # Preconditions
    preconditions: ImmutableSet[Relation[ActionDescriptionVariable]] = attrib(
        converter=flatten_relations, default=immutableset(), kw_only=True
    )
    # Postconditions
    postconditions: ImmutableSet[Relation[ActionDescriptionVariable]] = attrib(
        converter=flatten_relations, default=immutableset(), kw_only=True
    )
    # Asserted properties of objects in action
    asserted_properties: ImmutableSetMultiDict[
        ActionDescriptionVariable, OntologyNode
    ] = attrib(
        converter=_to_immutablesetmultidict, default=immutablesetmultidict(), kw_only=True
    )
    auxiliary_variables: ImmutableSet[ActionDescriptionVariable] = attrib(init=False)
    """
    These are variables which do not occupy semantic roles 
    but are are still referred to by conditions, paths, etc.
    An example would be the container for liquid for a "drink" action.
    """

    def __attrs_post_init__(self) -> None:
        for relation in chain(
            self.enduring_conditions, self.preconditions, self.postconditions
        ):
            if not isinstance(relation, Relation):
                raise RuntimeError(
                    f"All conditions on an action description ought to be Relations "
                    f"but got {relation}"
                )

    @auxiliary_variables.default
    def _init_auxiliary_variables(self):
        auxiliary_variables: List[ActionDescriptionVariable] = []
        if self.during:
            self.during.accumulate_referenced_objects(auxiliary_variables)
        for relation in chain(
            self.enduring_conditions, self.preconditions, self.postconditions
        ):
            relation.accumulate_referenced_objects(auxiliary_variables)
        return immutableset(
            variable
            for variable in auxiliary_variables
            if variable not in self.frame.variables_to_roles
        )
def test_pursuit_preposition_on_learner(language_mode):
    rng = random.Random()
    rng.seed(0)
    learner = PrepositionPursuitLearner(
        learning_factor=0.5,
        graph_match_confirmation_threshold=0.7,
        lexicon_entry_threshold=0.7,
        rng=rng,
        smoothing_parameter=0.001,
        ontology=GAILA_PHASE_1_ONTOLOGY,
        object_recognizer=LANGUAGE_MODE_TO_OBJECT_RECOGNIZER[language_mode],
        language_mode=language_mode,
    )  # type: ignore
    ball = standard_object("ball", BALL)
    table = standard_object("table", TABLE)
    language_generator = phase1_language_generator(language_mode)
    on_train_curriculum = phase1_instances(
        "Preposition Unit Train",
        situations=sampled(
            _on_template(ball, table, immutableset(), is_training=True),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=10,
        ),
        language_generator=language_generator,
    )
    on_test_curriculum = phase1_instances(
        "Preposition Unit Test",
        situations=sampled(
            _on_template(ball, table, immutableset(), is_training=False),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=1,
        ),
        language_generator=language_generator,
    )

    for (
            _,
            linguistic_description,
            perceptual_representation,
    ) in on_train_curriculum.instances():
        # Get the object matches first - preposition learner can't learn without already recognized objects
        learner.observe(
            LearningExample(perceptual_representation, linguistic_description))
    for (
            _,
            test_lingustics_description,
            test_perceptual_representation,
    ) in on_test_curriculum.instances():
        descriptions_from_learner = learner.describe(
            test_perceptual_representation)
        gold = test_lingustics_description.as_token_sequence()
        assert descriptions_from_learner
        assert [
            desc.as_token_sequence() for desc in descriptions_from_learner
        ][0] == gold
Exemple #20
0
def _make_in_front_tests(
    num_samples: Optional[int],
    noise_objects: Optional[int],
    language_generator: LanguageGenerator[HighLevelSemanticsSituation,
                                          LinearizedDependencyTree],
) -> Phase1InstanceGroup:
    figure_0 = standard_object(
        "figure_0",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    figure_1 = standard_object(
        "figure_1",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    ground_0 = standard_object(
        "ground_0",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])
    ground_1 = standard_object(
        "ground_1",
        THING,
        banned_properties=[HOLLOW, IS_SPEAKER, IS_ADDRESSEE])

    figures = immutableset([figure_0, figure_1])
    grounds = immutableset([ground_0, ground_1])

    speaker = standard_object("speaker", PERSON, added_properties=[IS_SPEAKER])
    addressee = standard_object("addressee",
                                LEARNER,
                                added_properties=[IS_ADDRESSEE])
    computed_background = [speaker, addressee]

    return phase1_instances(
        "Preposition Testing In Front",
        chain(*[
            flatten([
                sampled(
                    _in_front_template(
                        figure,
                        ground,
                        flatten([
                            make_noise_objects(noise_objects),
                            computed_background,
                        ]),
                        is_training=False,
                        is_near=close,
                    ),
                    ontology=GAILA_PHASE_1_ONTOLOGY,
                    chooser=PHASE1_CHOOSER_FACTORY(),
                    max_to_sample=num_samples if num_samples else 5,
                    block_multiple_of_the_same_type=True,
                ) for figure in figures for ground in grounds
                for close in BOOL_SET
            ])
        ]),
        language_generator=language_generator,
    )
 def test_empty(self):
     empty = immutableset()
     self.assertEqual(0, len(empty))
     empty2 = immutableset([])
     self.assertEqual(0, len(empty2))
     self.assertEqual(empty, empty2)
     empty3 = ImmutableSet.builder().build()
     self.assertEqual(0, len(empty3))
     self.assertEqual(empty, empty3)
Exemple #22
0
 def generate_language(
         self, situation: SituationT,
         chooser: SequenceChooser) -> ImmutableSet[LinguisticDescriptionT]:
     wrapped_result = self._wrapped_generator.generate_language(
         situation, chooser)
     if wrapped_result:
         return immutableset([wrapped_result[0]])
     else:
         return immutableset()
 def test_immutable(self):
     source = [1, 2, 3]
     set1 = immutableset(source)
     with self.assertRaises(AttributeError):
         # noinspection PyUnresolvedReferences
         set1.add(4)
     # Update doesn't affect original
     source.append(4)
     self.assertNotEqual(immutableset(source), set1)
def test_pursuit_preposition_over_learner(language_mode, learner):
    ball = standard_object("ball", BALL)
    table = standard_object("table", TABLE)
    language_generator = phase1_language_generator(language_mode)
    over_train_curriculum = phase1_instances(
        "Preposition Over Unit Train",
        situations=sampled(
            _over_template(ball,
                           table,
                           immutableset(),
                           is_training=True,
                           is_distal=True),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=10,
            block_multiple_of_the_same_type=True,
        ),
        language_generator=language_generator,
    )
    over_test_curriculum = phase1_instances(
        "Preposition Over Unit Test",
        situations=sampled(
            _over_template(ball,
                           table,
                           immutableset(),
                           is_training=False,
                           is_distal=True),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=1,
            block_multiple_of_the_same_type=True,
        ),
        language_generator=language_generator,
    )

    processing_learner = learner(language_mode)

    for (
            _,
            linguistic_description,
            perceptual_representation,
    ) in over_train_curriculum.instances():
        processing_learner.observe(
            LearningExample(perceptual_representation, linguistic_description))

    for (
            _,
            test_linguistic_description,
            test_perceptual_representation,
    ) in over_test_curriculum.instances():
        descriptions_from_learner = processing_learner.describe(
            test_perceptual_representation)
        gold = test_linguistic_description.as_token_sequence()
        assert descriptions_from_learner
        assert gold in [
            desc.as_token_sequence() for desc in descriptions_from_learner
        ]
def test_pursuit_preposition_in_learner(language_mode):
    rng = random.Random()
    rng.seed(0)
    learner = PrepositionPursuitLearner(
        learning_factor=0.5,
        graph_match_confirmation_threshold=0.7,
        lexicon_entry_threshold=0.7,
        rng=rng,
        smoothing_parameter=0.001,
        ontology=GAILA_PHASE_1_ONTOLOGY,
        object_recognizer=LANGUAGE_MODE_TO_OBJECT_RECOGNIZER[language_mode],
        language_mode=language_mode,
    )  # type: ignore
    water = object_variable("water", WATER)
    cup = standard_object("cup", CUP)
    language_generator = phase1_language_generator(language_mode)
    in_train_curriculum = phase1_instances(
        "Preposition In Unit Train",
        situations=sampled(
            _in_template(water, cup, immutableset(), is_training=True),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=10,
        ),
        language_generator=language_generator,
    )
    in_test_curriculum = phase1_instances(
        "Preposition In Unit Test",
        situations=sampled(
            _in_template(water, cup, immutableset(), is_training=False),
            chooser=PHASE1_CHOOSER_FACTORY(),
            ontology=GAILA_PHASE_1_ONTOLOGY,
            max_to_sample=1,
        ),
        language_generator=language_generator,
    )
    for (
            _,
            linguistic_description,
            perceptual_representation,
    ) in in_train_curriculum.instances():
        learner.observe(
            LearningExample(perceptual_representation, linguistic_description))

    for (
            _,
            test_linguistic_description,
            test_perceptual_representation,
    ) in in_test_curriculum.instances():
        descriptions_from_learner = learner.describe(
            test_perceptual_representation)
        gold = test_linguistic_description.as_token_sequence()
        assert descriptions_from_learner
        assert [
            desc.as_token_sequence() for desc in descriptions_from_learner
        ][0] == gold
Exemple #26
0
def _read_keys_from_keys_file(zip_file: ZipFile) -> Optional[AbstractSet[str]]:
    try:
        keys_data = zip_file.read("__keys")
        if keys_data:
            return immutableset(keys_data.decode("utf-8").split("\n"))
        else:
            # If keys_data is empty, the "split" above will return [''], which is wrong.
            return immutableset()
    except KeyError:
        return None
Exemple #27
0
def make_noise_objects(
    noise_objects: Optional[int],
    banned_ontology_types: Iterable[OntologyNode] = immutableset(),
) -> Iterable[TemplateObjectVariable]:
    return immutableset(
        standard_object(
            f"noise_object_{x}",
            banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
            banned_ontology_types=banned_ontology_types,
        ) for x in range(noise_objects if noise_objects else 0))
 def test_issuperset(self):
     empty = immutableset()
     s1 = immutableset([1, 2, 3, 3, 2, 1])
     s2 = immutableset([1, 2, 3, 4])
     self.assertFalse(s1.issuperset(s2))
     self.assertTrue(s2.issuperset(s1))
     self.assertFalse(empty.issuperset(s1))
     # issuperset is >=, not >
     self.assertTrue(empty.issuperset(empty))
     self.assertTrue(s1.issuperset(empty))
Exemple #29
0
 def generate_language(
         self, situation: SituationT,
         chooser: SequenceChooser) -> ImmutableSet[LinguisticDescriptionT]:
     wrapped_result = self._wrapped_generator.generate_language(
         situation, chooser)
     if wrapped_result:
         # noinspection PyTypeChecker
         return immutableset(
             [self._sequence_chooser.choice(wrapped_result)])
     else:
         return immutableset()
Exemple #30
0
class ObjectStructuralSchema(HasAxes, MaybeHasGeon):
    r"""
    A hierarchical representation of the internal structure of some type of object.

    An `ObjectStructuralSchema` represents the general pattern of the structure of an object,
    rather than the structure of any particular object
    (e.g. people in general, rather than a particular person).

    For example a person's body is made up of a head, torso, left arm, right arm, left leg, and
    right leg. These sub-objects have various relations to one another
    (e.g. the head is above and supported by the torso).

    Declaring an `ObjectStructuralSchema` can be verbose;
     see `Relation`\ s for additional tips on how to make this more compact.
    """

    ontology_node: OntologyNode = attrib(validator=instance_of(OntologyNode))
    """
    The `OntologyNode` this `ObjectStructuralSchema` represents the structure of.
    """
    sub_objects: ImmutableSet["SubObject"] = attrib(converter=_to_immutableset,
                                                    default=immutableset())
    r"""
    The component parts which make up an object of the type *parent_object*.
    
    These `SubObject`\ s themselves wrap `ObjectStructuralSchema`\ s 
    and can therefore themselves have complex internal structure.
    """
    sub_object_relations: ImmutableSet[Relation["SubObject"]] = attrib(
        converter=_to_immutableset, default=immutableset())
    r"""
    A set of `Relation`\ s which define how the `SubObject`\ s relate to one another. 
    """
    geon: Optional[Geon] = attrib(validator=optional(instance_of(Geon)),
                                  default=None,
                                  kw_only=True)
    axes: Axes = attrib(validator=instance_of(Axes), kw_only=True)

    @axes.default
    def _init_axes(self) -> Axes:
        if self.geon:
            return self.geon.axes
        else:
            raise RuntimeError(
                "If a geon is not give for a structural schema, "
                "then axes must be explicitly specified")

    def __attrs_post_init__(self) -> None:
        for sub_object in self.sub_objects:
            if sub_object.schema.axes == self.axes:
                raise RuntimeError(
                    "Don't share axes between schemata.  If the axes should be "
                    "similar, consider using .copy()")