示例#1
0
def test_example(cv_image, message, jointModelObject, print_message):
    # convert cv image into processing format
    # TODO: this needs to be corrected
    # we do not read a file
    # we convert from one format to the other
    #image = utils.imageRead(imageFile)
    print("Test an Example...")
    if cv_image == None:
        print_message("Unable to capture an Image.")
        return

    image = cv_image

    # extract color and shape of image
    # image_copy = copy.copy(image)
    cnt = utils.objectIdentification(cv_image)
    [x, y, w, h] = utils.boundingRectangle(cnt)
    pixNp = dc.findAllPixels(image, cnt, x, y, w, h)
    pixNp = dc.findUniquePixels(pixNp)

    # store image data as dictionary
    imageData = {'color': pixNp, 'shape': cnt}
    print("Printing the size of RGB value " + str(len(pixNp)))

    # extract keywords from message
    languageObject = lm(message)
    [positiveLanguageData, negativeLanguageData] = languageObject.process_content()

    print(ctime())
    # Now perform the test.
    [totalScore, wordRanks, wordScoreDictionary] = jointModelObject.associate_words_example(positiveLanguageData, negativeLanguageData, imageData)
    print_message("Score: " + str(totalScore))
    print_message(str(wordRanks))
    print_message(str(wordScoreDictionary))
    print(ctime())
示例#2
0
        def processLanguage(self, message):
            # extract keywords from message
            languageObject = lm(message)
            [positiveLanguageData, negativeLanguageData] = languageObject.process_content()

            # Return the list of words processed.
            return [positiveLanguageData, negativeLanguageData]
示例#3
0
def add_example(cv_image, message, jointModelObject, print_message, example_count):
    # convert cv image into processing format
    # TODO: this needs to be corrected
    # we do not read a file
    # we convert from one format to the other
    # image = utils.imageRead(imageFile)
    print("Adding an Example...")
    if cv_image == None:
        print_message("Unable to capture an Image.")
        return

    image = cv_image

    # extract color and shape of image
    # image_copy = copy.copy(image)
    cnt = utils.objectIdentification(cv_image)
    [x, y, w, h] = utils.boundingRectangle(cnt)
    pixNp = dc.findAllPixels(image, cnt, x, y, w, h)
    pixNp = dc.findUniquePixels(pixNp)

    # store image data as dictionary
    imageData = {'color': pixNp, 'shape': cnt}
    print("Printing the size of RGB value " + str(len(pixNp)))

    # extract keywords from message
    languageObject = lm(message)
    [positiveLanguageData, negativeLanguageData] = languageObject.process_content()

    # for each keyword
    # add keyword, image pair to joint model
    for keyword in positiveLanguageData:
        jointModelObject.add_word_example_pair(keyword, imageData, "+")

    for keyword in negativeLanguageData:
        jointModelObject.add_word_example_pair(keyword, imageData, "-")

    # Send ACK to output Node that the concept has been added.
    # Pickle the data to store training information.
    # Store size is the number of examples after which the model is stored to a pickle file.
    store_size = 20
    if ((example_count % store_size) == 0):
        with open('data/pickle/passive_jointModelObject.pickle', 'wb') as handle:
            pickle.dump(jointModelObject, handle)

    print_message("Example Object - Concept Added. Number of Examples Added: " + str(example_count))