Ejemplo n.º 1
0
import numpy as np
import util
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
import visual_words
import visual_recog
import skimage.io

if __name__ == '__main__':

    num_cores = util.get_num_CPU()

    path_img = "../data/laundromat/sun_aiyluzcowlbwxmdb.jpg"
    image = skimage.io.imread(path_img)
    image = image.astype('float') / 255
    #filter_responses = visual_words.extract_filter_responses(image)
    #util.display_filter_responses(filter_responses)

    #visual_words.compute_dictionary(num_workers=num_cores)

    dictionary = np.load('dictionary.npy')
    wordmap = visual_words.get_visual_words(image, dictionary)
    filename = 'figure3'
    util.save_wordmap(wordmap, filename)
    #visual_recog.build_recognition_system(num_workers=num_cores)

    #conf, accuracy = visual_recog.evaluate_recognition_system(num_workers=num_cores)
    #print(conf)
    #print(np.diag(conf).sum()/conf.sum())
Ejemplo n.º 2
0
import skimage.io

import time

if __name__ == '__main__':
    start = time.time()  #ctl
    num_cores = util.get_num_CPU()
    # path_img = "../data/kitchen/sun_avuzlcqxzrzteyvc.jpg"
    path_img = "../data/aquarium/sun_aztvjgubyrgvirup.jpg"
    image = skimage.io.imread(path_img)
    image = image.astype('float') / 255
    filter_responses = visual_words.extract_filter_responses(image)
    # util.display_filter_responses(filter_responses)
    # visual_words.compute_dictionary(num_workers=num_cores)

    dictionary = np.load('dictionary.npy')
    wordmap = visual_words.get_visual_words(image, dictionary)
    util.save_wordmap(wordmap, "wordmap_n")
    # visual_recog.get_feature_from_wordmap(wordmap,dictionary.shape[0])  #ctl
    # visual_recog.get_feature_from_wordmap_SPM(wordmap,3,dictionary.shape[0])  #ctl
    visual_recog.build_recognition_system(
        num_workers=num_cores
    )  # approx 12 min for 100 files, about 2 hr for 1000 files

    conf, accuracy = visual_recog.evaluate_recognition_system(
        num_workers=num_cores)  # approx 1 hr for 577 files
    # print(conf)
    # print(np.diag(conf).sum()/conf.sum())
    end = time.time()  #ctl
    print("Time: ", end - start)  #ctl
Ejemplo n.º 3
0
import numpy as np
import util
import matplotlib
from matplotlib import pyplot as plt
import visual_words
import visual_recog
import skimage.io

if __name__ == '__main__':

    num_cores = util.get_num_CPU()

    path_img = "../data/waterfall/sun_bolfhwtizbvyjmem.jpg"
    image = skimage.io.imread(path_img)
    image = image.astype('float') / 255
    filter_responses = visual_words.extract_filter_responses(image)
    util.display_filter_responses(filter_responses)
    visual_words.compute_dictionary(num_workers=num_cores)
    dictionary = np.load('dictionary.npy')
    wordmap = visual_words.get_visual_words(image, dictionary)
    util.save_wordmap(wordmap, "waterfall_3.jpg")
    visual_recog.build_recognition_system(num_workers=num_cores)
    conf, accurSacy = visual_recog.evaluate_recognition_system(
        num_workers=num_cores)
    print(conf)
    print(np.diag(conf).sum() / conf.sum())
Ejemplo n.º 4
0
import deep_recog
import skimage

if __name__ == '__main__':
    num_cores = util.get_num_CPU()

    path_img = "../data/park/labelme_vtvrcfujsukawzl.jpg"
    image = skimage.io.imread(path_img)
    image = np.array(image) / 255
    filter_responses = visual_words.extract_filter_responses(image)
    util.display_filter_responses(filter_responses)

    visual_words.compute_dictionary(num_workers=num_cores)

    dictionary = np.load('dictionary.npy')
    img = visual_words.get_visual_words(image, dictionary)
    util.save_wordmap(img, '../results/wordmap_3.png')
    visual_recog.build_recognition_system(num_workers=num_cores)

    conf, accuracy = visual_recog.evaluate_recognition_system(
        num_workers=num_cores)
    print(conf)
    print(np.diag(conf).sum() / conf.sum())
    vgg16 = torchvision.models.vgg16(pretrained=True).double()
    vgg16.eval()
    deep_recog.build_recognition_system(vgg16, num_workers=num_cores // 2)
    conf = deep_recog.evaluate_recognition_system(vgg16,
                                                  num_workers=num_cores // 2)
    print(conf)
    print(np.diag(conf).sum() / conf.sum())
Ejemplo n.º 5
0
if __name__ == '__main__':
    num_cores = util.get_num_CPU()

    path_img = "../data/kitchen/sun_aasmevtpkslccptd.jpg"
    image = skimage.io.imread(path_img)

    image = image.astype('float') / 255
    filter_responses = visual_words.extract_filter_responses(image)
    util.display_filter_responses(filter_responses)

    visual_words.compute_dictionary(num_workers=num_cores)

    dictionary = np.load('dictionary.npy')
    filename = "test.jpg"
    img = visual_words.get_visual_words(image, dictionary)
    util.save_wordmap(img, filename)
    visual_recog.build_recognition_system(num_workers=num_cores)

    conf, accuracy = visual_recog.evaluate_recognition_system(
        num_workers=num_cores)
    print(conf)
    print(np.diag(conf).sum() / conf.sum())

    vgg16 = torchvision.models.vgg16(pretrained=True).double()
    vgg16.eval()
    deep_recog.build_recognition_system(vgg16, num_workers=num_cores)
    conf, accuracy = deep_recog.evaluate_recognition_system(
        vgg16, num_workers=num_cores)
    print(conf)
    print(np.diag(conf).sum() / conf.sum())
Ejemplo n.º 6
0
    #path_img = "../data/auditorium/sun_aflgfyywvxbpeyxl.jpg"
    #path_img = "../data/baseball_field/sun_aalztykafqwxrspj.jpg"
    path_img = "../data/kitchen/sun_aasmevtpkslccptd.jpg"
    #path_img = "../data/highway/sun_acpvugnkzrliaqir.jpg"
    image = skimage.io.imread(path_img)
    image = image.astype('float') / 255

    filter_responses = visual_words.extract_filter_responses(image)
    util.display_filter_responses(filter_responses)

    visual_words.compute_dictionary(num_workers=num_cores)

    dictionary = np.load('dictionary.npy')
    wordmap = visual_words.get_visual_words(image, dictionary)
    util.save_wordmap(wordmap, 'word_map.jpg')

    visual_recog.build_recognition_system(num_workers=num_cores)

    conf, accuracy = visual_recog.evaluate_recognition_system(
        num_workers=num_cores)
    print(conf)
    print(accuracy)

    #vgg16 = torchvision.models.vgg16(pretrained=True).double()
    #vgg16.eval()
    vgg16 = util.vgg16_fc7()

    #deep_recog.build_recognition_system(vgg16,num_workers=num_cores//2)
    conf, accuracy = deep_recog.evaluate_recognition_system(
        vgg16, num_workers=num_cores // 2)