Esempio n. 1
0
def main():
    opts = get_opts()

    ## Q1.1
    img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32)/255
    filter_responses = visual_words.extract_filter_responses(opts, img)
    util.display_filter_responses(opts, filter_responses)

    # ## Q1.2
    n_cpu = util.get_num_CPU()
    visual_words.compute_dictionary(opts, n_worker=n_cpu)
    
    ## Q1.3
    img_path = join(opts.data_dir, 'windmill/sun_bsngeuxxmgmcsesp.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32)/255
    dictionary = np.load(join(opts.out_dir, 'dictionary.npy'))
    wordmap = visual_words.get_visual_words(opts, img, dictionary)
    util.visualize_wordmap(wordmap)

    ## Q2.1-2.4
    n_cpu = util.get_num_CPU()
    visual_recog.build_recognition_system(opts, n_worker=n_cpu)

    ## Q2.5
    n_cpu = util.get_num_CPU()
    conf, accuracy = visual_recog.evaluate_recognition_system(opts, n_worker=n_cpu)
    
    print(conf)
    print(accuracy)

    np.savetxt(join(opts.out_dir, 'confmat.csv'), conf, fmt='%d', delimiter=',')
    np.savetxt(join(opts.out_dir, 'accuracy.txt'), [accuracy], fmt='%g')
Esempio n. 2
0
def main():
    opts = get_opts()

    ## Q1.1
    img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32) / 255
    filter_responses = visual_words.extract_filter_responses(opts, img)
    util.display_filter_responses(opts, filter_responses)
Esempio n. 3
0
def main():
    opts = get_opts()

    # Q1.1
    img_path = join(opts.data_dir, 'aquarium/sun_aztvjgubyrgvirup.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32) / 255
    filter_responses = visual_words.extract_filter_responses(opts, img)
    util.display_filter_responses(opts, filter_responses)

    # Q1.2
    n_cpu = util.get_num_CPU()
    visual_words.compute_dictionary(opts, n_worker=n_cpu)

    # Q1.3
    ### Uncomment for picture 1 ###
    img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg')
    ###  Uncomment for picture 2 ###
    # img_path = join(opts.data_dir, 'aquarium/sun_acrxheaggpuqwdwm.jpg')
    ### Uncomment for picture 3 ###
    # img_path = join(opts.data_dir, 'desert/sun_banypouestzeimab.jpg')
    ####################################################################
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32) / 255
    dictionary = np.load(join(opts.out_dir, 'dictionary.npy'))
    wordmap = visual_words.get_visual_words(opts, img, dictionary)
    util.visualize_wordmap(wordmap)

    # Q2.1-2.4
    n_cpu = util.get_num_CPU()
    visual_recog.build_recognition_system(opts, n_worker=n_cpu)

    # Q2.5
    n_cpu = util.get_num_CPU()
    conf, accuracy = visual_recog.evaluate_recognition_system(opts,
                                                              n_worker=n_cpu)

    # Q3.2
    # n_cpu = util.get_num_CPU()
    # custom.build_recognition_system(opts, n_worker=n_cpu)
    # n_cpu = util.get_num_CPU()
    # conf, accuracy = custom.evaluate_recognition_system(opts, n_worker=n_cpu)

    print(conf)
    print(accuracy)
    np.savetxt(join(opts.out_dir, 'confmat.csv'),
               conf,
               fmt='%d',
               delimiter=',')
    np.savetxt(join(opts.out_dir, 'accuracy.txt'), [accuracy], fmt='%g')
import util
import matplotlib.pyplot as plt
import visual_words
import visual_recog
import deep_recog
import skimage

if __name__ == '__main__':
    num_cores = util.get_num_CPU()

    path_img = "../data/kitchen/sun_aasmevtpkslccptd.jpg"
    image = skimage.io.imread(path_img)

    image = image.astype('float') / 255
    filter_responses = visual_words.extract_filter_responses(image)
    util.display_filter_responses(filter_responses)

    visual_words.compute_dictionary(num_workers=num_cores)

    dictionary = np.load('dictionary.npy')
    img = visual_words.get_visual_words(image, dictionary)
    #util.save_wordmap(wordmap, filename)
    visual_recog.build_recognition_system(num_workers=num_cores)

    conf, accuracy = visual_recog.evaluate_recognition_system(
        num_workers=num_cores)
    print(conf)
    print(np.diag(conf).sum() / conf.sum())

    vgg16 = torchvision.models.vgg16(pretrained=True).double()
    vgg16.eval()
Esempio n. 5
0
def main():
    opts = get_opts()
    print('L is', opts.L)
    print('K is', opts.K)
    print('alpha is', opts.alpha)
    print()
    #     Q1.1

    img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32) / 255
    filter_responses = visual_words.extract_filter_responses(opts, img)
    #    imageio.imsave('../results/filter_responses.jpg',filter_responses)
    util.visualize_wordmap(img)
    util.display_filter_responses(opts, filter_responses)

    ##    # Q1.2
    n_cpu = util.get_num_CPU()
    visual_words.compute_dictionary(opts, n_worker=n_cpu)
    dictionary = np.load(join(opts.out_dir, 'dictionary.npy'))
    ###
    #    ## Q1.3
    img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32) / 255
    wordmap = visual_words.get_visual_words(opts, img, dictionary)
    util.visualize_wordmap(img)
    util.visualize_wordmap(wordmap)
    #
    img_path = join(opts.data_dir, 'waterfall/sun_bbeqjdnienanmmif.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32) / 255
    wordmap = visual_words.get_visual_words(opts, img, dictionary)
    util.visualize_wordmap(img)
    util.visualize_wordmap(wordmap)
    #
    img_path = join(opts.data_dir, 'windmill/sun_bratfupeyvlazpba.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32) / 255
    wordmap = visual_words.get_visual_words(opts, img, dictionary)
    util.visualize_wordmap(img)
    util.visualize_wordmap(wordmap)

    img_path = join(opts.data_dir, 'desert/sun_adjlepvuitklskrz.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32) / 255
    wordmap = visual_words.get_visual_words(opts, img, dictionary)
    util.visualize_wordmap(img)
    util.visualize_wordmap(wordmap)
    #

    # Q2.1-2.4
    n_cpu = util.get_num_CPU()
    visual_recog.build_recognition_system(opts, n_worker=n_cpu)

    ## Q2.5
    n_cpu = util.get_num_CPU()
    conf, accuracy = visual_recog.evaluate_recognition_system(opts,
                                                              n_worker=n_cpu)

    print(conf)
    print(accuracy)
    np.savetxt(join(opts.out_dir, 'confmat.csv'),
               conf,
               fmt='%d',
               delimiter=',')
    np.savetxt(join(opts.out_dir, 'accuracy.txt'), [accuracy], fmt='%g')
    for i in range(len(temp_data_list)):
        filter_responses.append(
            np.load(temp_data_path + '/' + temp_data_list[i]))

    all_filter_response = np.concatenate(filter_responses)
    kmeans = sklearn.cluster.KMeans(n_clusters=K).fit(all_filter_response)
    dictionary = kmeans.cluster_centers_
    np.save('dictionary.npy', dictionary)


def removeTmpFiles(path):
    if path.split("/")[-1] == '.DS_Store':
        os.remove(path)
    elif os.path.isdir(path):
        for filename in os.listdir(path):
            removeTmpFiles(path + "/" + filename)


if __name__ == "__main__":

    image_path = '../data/aquarium/sun_aztvjgubyrgvirup.jpg'
    # image=imageio.imread(image_path)
    image = cv2.imread(image_path)
    print(image.shape)
    response_map = extract_filter_responses(image)
    # dictionary = np.load("dictionary.npy")
    # wordmap=get_visual_words(image,dictionary)
    util.display_filter_responses(response_map)

    # util.save_wordmap(wordmap,'labelme_aacpgupgzvdjapw_wordmap.jpeg')