コード例 #1
0
def possible_recognized_object_for_predicate(predicate,
                                             fill_unkown_fields=True):
    """
       From a predicate using words from sentence, returns an object from vision module
       corresponding to the situation described by the predicate (in french), using grounded concept
       (in english)
    """

    if predicate is None:

        if fill_unkown_fields:
            return random_recognized_object()

        return RecognizedObject(None, None, None)

    if fill_unkown_fields:
        default_category = np.random.choice(param.CATEGORIES)
        default_position = np.random.choice(param.POSITIONS)
        default_color = np.random.choice(param.COLORS)
    else:
        default_category = None
        default_position = None
        default_color = None

    seen_category = param.OBJ_NAME_TO_CONCEPT.get(predicate.object,
                                                  default_category)
    seen_position = param.POSITION_NAME_TO_CONCEPT.get(predicate.action,
                                                       default_position)
    seen_color = param.COLOR_NAME_TO_CONCEPT.get(predicate.color,
                                                 default_color)

    return RecognizedObject(seen_category, seen_position, seen_color)
コード例 #2
0
def random_recognized_object(category_choices=param.CATEGORIES,
                             position_choices=param.POSITIONS,
                             color_choices=param.COLORS):
    """
        Returns a random object. One half of the time,
        an empty object is returned.
    """

    if np.random.rand() < 0.5:
        random_category = np.random.choice(category_choices)
        random_position = np.random.choice(position_choices)
        random_color = np.random.choice(color_choices)
        return RecognizedObject(random_category, random_position, random_color)

    return RecognizedObject(None, None, None)
コード例 #3
0
def sentence_grounding_network_error(
        nw,
        nb_tests,
        evaluation_method=is_a_valid_representation,
        train_sentences=[]):
    """
        Computes and returns error percentage made on nb_tests randomly picked sentences
        not apart of the training sentences. Evaluation method, which determines if
        an imagined vision matches or not a predicate list, can be chosen 
        (either is_a_valid_representation or is_an_exact_representation)
    """

    test_sentences, test_predicates = random_test_sentences(
        nb_tests, train_sentences)

    nb_bad = 0.

    for i, sentence in enumerate(test_sentences):

        predicates = test_predicates[i]
        imagined_vision = [
            RecognizedObject(*caracterics)
            for caracterics in nw.ground_sentence(sentence)
        ]

        if not evaluation_method(predicates, imagined_vision):
            nb_bad += 1.

    return (nb_bad / nb_tests) * 100.
def output_to_vision(output, nb_concepts, factor):

    global concepts_delimitations
    global output_id_to_concept_dict
    objs = []
    nb_objects = 2
    for j in range(nb_objects):
        cat = [None] * 3
        offset = j * nb_concepts

        for i in range(len(concepts_delimitations)):
            cat_activations = output[offset +
                                     concepts_delimitations[i][0]:offset +
                                     concepts_delimitations[i][1]]
            #print(output_id_to_concept_dict[i], cat_activations, output, concepts_delimitations)

            proba = softmax(cat_activations)

            #print(proba)
            concept = np.argmax(proba)

            #the threshold to be accepted depend on the number of concept to choose from : must be a factor higher than the uniform proba
            threshold = factor / (concepts_delimitations[i][1] -
                                  concepts_delimitations[i][0])

            if proba[concept] < threshold:
                cat[i] = None
            else:
                cat[i] = output_id_to_concept_dict[concepts_delimitations[i][0]
                                                   + concept]

        objs.append(RecognizedObject(cat[0], cat[1],
                                     cat[2]))  #object, position, color

    return objs
コード例 #5
0
def train_sentence_grounding_network(nw,
                                     nb_trainings,
                                     verbose=False,
                                     learn_individual_words=False,
                                     continuous_learning=False):
    """
        Trains the network nw by randomly picking nb_training sentences
        If learn_individual_words is True, the network learns also the meaning of individual words
        contained in the sentence
    """

    if learn_individual_words and continuous_learning:
        print(
            "ERROR: learn_individual_words and continuous_learning can't be booth true"
        )
        return

    train_sentences, train_predicates = random_train_sentences_and_predicates(
        nb_trainings)

    for i in range(nb_trainings):

        sentence = train_sentences[i]
        predicates = train_predicates[i]

        # A vision corresponding to the sentence description is created
        provided_vision = map(possible_recognized_object_for_predicate,
                              predicates)
        caracteristics_list_to_learn = [[x.category, x.position, x.color]
                                        for x in provided_vision]

        if verbose:
            print sentence
            imagined_caracteristics_list = nw.ground_sentence(sentence)
            imagined_vision = [
                RecognizedObject(*caracterics)
                for caracterics in imagined_caracteristics_list
            ]
            print "Imagined:", imagined_vision
            print "Is a valid representation:", is_a_valid_representation(
                predicates, imagined_vision)
            print "Is an exact representation:", is_an_exact_representation(
                predicates, imagined_vision)
            print "Provided vision for learning:", provided_vision
            print '---------------------------------'

        nw.cross_situational_learning(sentence,
                                      caracteristics_list_to_learn,
                                      continuous_learning=continuous_learning)

        if learn_individual_words:
            # Then every word meaning is also learned separately
            for w in nw.split_sentence(sentence):
                nw.cross_situational_learning(w, caracteristics_list_to_learn)

    return train_sentences
コード例 #6
0
def train_sentence_grounding_network_offline(nw,
                                             nb_trainings,
                                             offline_ridge,
                                             verbose=False,
                                             learn_individual_words=False,
                                             continuous_learning=False):
    """
        Trains with offline method the network nw by randomly picking nb_training sentences
        If learn_individual_words is True, the network learns also the meaning of individual words
        contained in the sentence
    """

    if learn_individual_words:
        print("ERROR: learn_individual_words can't be true")
        return

    nw.linear_model = sklm.Ridge(offline_ridge)

    train_sentences, train_predicates = random_train_sentences_and_predicates(
        nb_trainings)

    for sentence in train_sentences:
        for w in nw.split_sentence(sentence):
            nw.add_word_id_if_unkown(w)

    input_teachers = []
    output_teachers = []

    for i in range(nb_trainings):

        sentence = train_sentences[i]
        predicates = train_predicates[i]

        # A vision corresponding to the sentence description is created
        provided_vision = map(possible_recognized_object_for_predicate,
                              predicates)
        caracteristics_list_to_learn = [[x.category, x.position, x.color]
                                        for x in provided_vision]

        if verbose:
            print sentence
            imagined_caracteristics_list = nw.ground_sentence(sentence)
            imagined_vision = [
                RecognizedObject(*caracterics)
                for caracterics in imagined_caracteristics_list
            ]
            print "Imagined:", imagined_vision
            print "Is a valid representation:", is_a_valid_representation(
                predicates, imagined_vision)
            print "Is an exact representation:", is_an_exact_representation(
                predicates, imagined_vision)
            print "Provided vision for learning:", provided_vision
            print '---------------------------------'

        input_teachers.append(
            nw.one_hot_encoding_inputs_for_sentence(sentence))
        output_teachers.append((len(input_teachers[-1]) - 2) *
                               [np.zeros_like(nw.output_values)] + 2 * [
                                   nw.caracteristics_to_output_teacher(
                                       caracteristics_list_to_learn)
                               ])

    if continuous_learning:
        nb_washing_list = 0
    else:
        nb_washing_list = [len(inpt) - 1 for inpt in input_teachers]

    nw.learn_series(input_teachers,
                    output_teachers,
                    nb_washing_list,
                    reset_memory=True)

    return train_sentences