Пример #1
0
def main():
    
    for ind in range(len(K)):
        start = time.time()
        print('Started at: ',start)
        opts = get_opts()
        opts.L = L[ind]
        opts.K = K[ind]
        opts.alpha = alpha[ind]
        print('filter_scales',opts.filter_scales)
        print('L is', opts.L)
        print('K is', opts.K)
        print('alpha is', opts.alpha)
    
        n_cpu = util.get_num_CPU()
        visual_words.compute_dictionary(opts, n_worker=n_cpu)
        dictionary = np.load(join(opts.out_dir, 'dictionary.npy'))
        
        # Q2.1-2.4
        n_cpu = util.get_num_CPU()
        visual_recog.build_recognition_system(opts, n_worker=n_cpu)
    
        ## Q2.5
        n_cpu = util.get_num_CPU()
        conf, accuracy = visual_recog.evaluate_recognition_system(opts, n_worker=n_cpu)
        
        print(conf)
        print(accuracy)
        np.savetxt(join(opts.out_dir, 'confmat.csv'), conf, fmt='%d', delimiter=',')
        np.savetxt(join(opts.out_dir, 'accuracy.txt'), [accuracy], fmt='%g')
        print('Finished at: ',time.time())
        print('It took', ((time.time()-start)/60), 'minutes to execute the iteration.')
Пример #2
0
def main():
    opts = get_opts()

    img_path = join(opts.data_dir, 'aquarium/sun_aztvjgubyrgvirup.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32) / 255
    filter_responses = visual_words.extract_filter_responses(opts, img)

    n_cpu = util.get_num_CPU()
    visual_words.compute_dictionary(opts, n_worker=1)

    img_path = join(opts.data_dir, 'kitchen/sun_aaqhazmhbhefhakh.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32) / 255
    dictionary = np.load(join(opts.out_dir, 'dictionary.npy'))
    wordmap = visual_words.get_visual_words(opts, img, dictionary)

    n_cpu = util.get_num_CPU()
    visual_recog.build_recognition_system(opts, n_worker=n_cpu)

    n_cpu = util.get_num_CPU()
    conf, accuracy, incorrect = visual_recog.evaluate_recognition_system(
        opts, n_worker=n_cpu)

    print(conf)
    print(accuracy)

    np.savetxt(join(opts.out_dir, 'confmat.csv'),
               conf,
               fmt='%d',
               delimiter=',')
    np.savetxt(join(opts.out_dir, 'accuracy.txt'), [accuracy], fmt='%g')
Пример #3
0
def main():
    opts = get_opts()

    ## Q1.1
    img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32)/255
    filter_responses = visual_words.extract_filter_responses(opts, img)
    util.display_filter_responses(opts, filter_responses)

    # ## Q1.2
    n_cpu = util.get_num_CPU()
    visual_words.compute_dictionary(opts, n_worker=n_cpu)
    
    ## Q1.3
    img_path = join(opts.data_dir, 'windmill/sun_bsngeuxxmgmcsesp.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32)/255
    dictionary = np.load(join(opts.out_dir, 'dictionary.npy'))
    wordmap = visual_words.get_visual_words(opts, img, dictionary)
    util.visualize_wordmap(wordmap)

    ## Q2.1-2.4
    n_cpu = util.get_num_CPU()
    visual_recog.build_recognition_system(opts, n_worker=n_cpu)

    ## Q2.5
    n_cpu = util.get_num_CPU()
    conf, accuracy = visual_recog.evaluate_recognition_system(opts, n_worker=n_cpu)
    
    print(conf)
    print(accuracy)

    np.savetxt(join(opts.out_dir, 'confmat.csv'), conf, fmt='%d', delimiter=',')
    np.savetxt(join(opts.out_dir, 'accuracy.txt'), [accuracy], fmt='%g')
Пример #4
0
def main():
    opts = get_opts()

    # Q1.1
    img_path = join(opts.data_dir, 'aquarium/sun_aztvjgubyrgvirup.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32) / 255
    filter_responses = visual_words.extract_filter_responses(opts, img)
    util.display_filter_responses(opts, filter_responses)

    # Q1.2
    n_cpu = util.get_num_CPU()
    visual_words.compute_dictionary(opts, n_worker=n_cpu)

    # Q1.3
    ### Uncomment for picture 1 ###
    img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg')
    ###  Uncomment for picture 2 ###
    # img_path = join(opts.data_dir, 'aquarium/sun_acrxheaggpuqwdwm.jpg')
    ### Uncomment for picture 3 ###
    # img_path = join(opts.data_dir, 'desert/sun_banypouestzeimab.jpg')
    ####################################################################
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32) / 255
    dictionary = np.load(join(opts.out_dir, 'dictionary.npy'))
    wordmap = visual_words.get_visual_words(opts, img, dictionary)
    util.visualize_wordmap(wordmap)

    # Q2.1-2.4
    n_cpu = util.get_num_CPU()
    visual_recog.build_recognition_system(opts, n_worker=n_cpu)

    # Q2.5
    n_cpu = util.get_num_CPU()
    conf, accuracy = visual_recog.evaluate_recognition_system(opts,
                                                              n_worker=n_cpu)

    # Q3.2
    # n_cpu = util.get_num_CPU()
    # custom.build_recognition_system(opts, n_worker=n_cpu)
    # n_cpu = util.get_num_CPU()
    # conf, accuracy = custom.evaluate_recognition_system(opts, n_worker=n_cpu)

    print(conf)
    print(accuracy)
    np.savetxt(join(opts.out_dir, 'confmat.csv'),
               conf,
               fmt='%d',
               delimiter=',')
    np.savetxt(join(opts.out_dir, 'accuracy.txt'), [accuracy], fmt='%g')
def build_recognition_system(vgg16,num_workers=2):
	'''
	Creates a trained recognition system by generating training features from all training images.

	[input]
	* vgg16: prebuilt VGG-16 network.
	* num_workers: number of workers to process in parallel

	[saved]
	* features: numpy.ndarray of shape (N,K)
	* labels: numpy.ndarray of shape (N)
	'''


	# ----- TODO -----
	num_workers=util.get_num_CPU()
	vgg16_weights=util.get_VGG16_weights()
	train_data = np.load("../data/train_data.npz")
	train_name,labels = train_data['image_names'],train_data['labels']

	lenth=len(train_name)
	vgg16=torchvision.models.vgg16(pretrained=True)

	features=[]
	for i in range(0,lenth):
		image_path = os.path.join('../data/',train_name[i][0])
		args=[i,image_path,vgg16]
		single_feature=get_image_feature(args)
		features.append(single_feature)
	features=np.asarray(features)
	#np.savez('../code/trained_system_deep.npz', features=features, labels=labels)
	print('saved train_system_deep.npz')
	return features,labels
Пример #6
0
def main():
    opts = get_opts()

    ## Q1.1
    #img_path = join(opts.data_dir, 'aquarium/sun_aztvjgubyrgvirup.jpg')
    #img = Image.open(img_path)
    #img = np.array(img).astype(np.float32)/255.0
    #filter_responses = visual_words.extract_filter_responses(opts, img)
    #util.display_filter_responses(opts, filter_responses)

    ## Q1.2
    # n_cpu = util.get_num_CPU()
    #visual_words.compute_dictionary(opts, n_worker=n_cpu)

    ## Q1.3
    img_path = join(opts.data_dir, 'desert/sun_acrqldhmwdraspza.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32) / 255
    dictionary = np.load(join(opts.out_dir, 'dictionary.npy'))
    wordmap = visual_words.get_visual_words(opts, img, dictionary)
    #util.visualize_wordmap(wordmap)

    ## Q2.1-2.4
    n_cpu = util.get_num_CPU()
    visual_recog.get_feature_from_wordmap(opts, wordmap)
    visual_recog.build_recognition_system(opts, n_worker=n_cpu)

    ## Q2.5
    n_cpu = util.get_num_CPU()
    conf, accuracy = visual_recog.evaluate_recognition_system(opts,
                                                              n_worker=n_cpu)

    print(conf)
    print(accuracy)
    np.savetxt(join(opts.out_dir, 'confmat.csv'),
               conf,
               fmt='%d',
               delimiter=',')
    np.savetxt(join(opts.out_dir, 'accuracy.txt'), [accuracy], fmt='%g')
Пример #7
0
def main():
    opts = get_opts()

    ## Q1.1
    # # img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg')
    # img_path = join(opts.data_dir, 'aquarium/sun_aztvjgubyrgvirup.jpg')
    # img = Image.open(img_path)
    # img = np.array(img).astype(np.float32)/255
    # filter_responses = visual_words.extract_filter_responses(opts, img)
    # util.display_filter_responses(opts, filter_responses)

    ## Q1.2
    # n_cpu = util.get_num_CPU()
    # visual_words.compute_dictionary(opts, n_worker=n_cpu)
    #
    #
    ## Q1.3

    #
    # img_path = join(opts.data_dir, 'aquarium/sun_acusadxqppxaqouk.jpg')
    # img = Image.open(img_path)
    # img = np.array(img).astype(np.float32)/255
    # dictionary = np.load(join(opts.out_dir, 'dictionary.npy'))
    # wordmap = visual_words.get_visual_words(opts, img, dictionary)
    # util.visualize_wordmap(wordmap)

    ## Q2.1-2.4
    # n_cpu = util.get_num_CPU()
    # visual_recog.build_recognition_system(opts, n_worker=n_cpu)
    #
    # #Q2.5
    n_cpu = util.get_num_CPU()
    conf, accuracy = visual_recog.evaluate_recognition_system(opts,
                                                              n_worker=n_cpu)
    # conf, accuracy = custom.evaluate_recognition_system(opts, n_worker=n_cpu)

    print(conf)
    print(accuracy)
    np.savetxt(join(opts.out_dir, 'confmat.csv'),
               conf,
               fmt='%d',
               delimiter=',')
    np.savetxt(join(opts.out_dir, 'accuracy.txt'), [accuracy], fmt='%g')
Пример #8
0
def main():
    opts = get_opts()

    ## Q1.1
    # img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg')
    img_path = join(opts.data_dir, 'aquarium/sun_aztvjgubyrgvirup.jpg')
    # img = Image.open(img_path)
    # img = np.array(img).astype(np.float32)/255
    # filter_responses = visual_words.extract_filter_responses(opts, img)
    # util.display_filter_responses(opts, filter_responses)

    ## Q1.2
    n_cpu = util.get_num_CPU()
    # visual_words.compute_dictionary_one_image(opts, img)
    # visual_words.compute_dictionary(opts, n_worker=n_cpu)

    ## Q1.3
    # img_path = join(opts.data_dir, 'desert/sun_aaqyzvrweabdxjzo.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32) / 255
    dictionary = np.load(join(opts.out_dir, 'dictionary.npy'))
    wordmap = visual_words.get_visual_words(opts, img, dictionary)
    util.visualize_wordmap(wordmap)
import numpy as np
import torchvision
import util
import matplotlib.pyplot as plt
import visual_words
import visual_recog
import deep_recog
import skimage

if __name__ == '__main__':
    num_cores = util.get_num_CPU()

    path_img = "../data/kitchen/sun_aasmevtpkslccptd.jpg"
    image = skimage.io.imread(path_img)

    image = image.astype('float') / 255
    filter_responses = visual_words.extract_filter_responses(image)
    util.display_filter_responses(filter_responses)

    visual_words.compute_dictionary(num_workers=num_cores)

    dictionary = np.load('dictionary.npy')
    img = visual_words.get_visual_words(image, dictionary)
    #util.save_wordmap(wordmap, filename)
    visual_recog.build_recognition_system(num_workers=num_cores)

    conf, accuracy = visual_recog.evaluate_recognition_system(
        num_workers=num_cores)
    print(conf)
    print(np.diag(conf).sum() / conf.sum())
Пример #10
0
	for n in range(0,N):
		dist[n]= distance.euclidean(feature,train_features[n])
	return dist
	pass

## to call my won extract feature function
def process(train_files,label,vgg16_weights,i):

	feature=np.zeros((1001))
	path = os.path.join('../data/',train_files[0])
	image = imageio.imread(path).astype('double')
	image=skimage.transform.resize(image,(224,224,3))

	feature[:1000]=network_layers.extract_deep_feature(image,vgg16_weights)
	feature[1000]=label
	#add label to the end of the feature
	feature_label=np.asarray(feature)
	np.save('../deep_features/{}.npy'.format(i), feature_label)
	print('get deep features:',i,'shape:',feature_label.shape)
	return feature
pass


if __name__ == '__main__':
	vgg16 = torchvision.models.vgg16(pretrained=True).double()
	vgg16.eval()
#	build_recognition_system(vgg16,8)
	num_workers=util.get_num_CPU()
	evaluate_recognition_system(vgg16,num_workers)
	print('all finished')
Пример #11
0
def main():
    opts = get_opts()
    print('L is', opts.L)
    print('K is', opts.K)
    print('alpha is', opts.alpha)
    print()
    #     Q1.1

    img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32) / 255
    filter_responses = visual_words.extract_filter_responses(opts, img)
    #    imageio.imsave('../results/filter_responses.jpg',filter_responses)
    util.visualize_wordmap(img)
    util.display_filter_responses(opts, filter_responses)

    ##    # Q1.2
    n_cpu = util.get_num_CPU()
    visual_words.compute_dictionary(opts, n_worker=n_cpu)
    dictionary = np.load(join(opts.out_dir, 'dictionary.npy'))
    ###
    #    ## Q1.3
    img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32) / 255
    wordmap = visual_words.get_visual_words(opts, img, dictionary)
    util.visualize_wordmap(img)
    util.visualize_wordmap(wordmap)
    #
    img_path = join(opts.data_dir, 'waterfall/sun_bbeqjdnienanmmif.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32) / 255
    wordmap = visual_words.get_visual_words(opts, img, dictionary)
    util.visualize_wordmap(img)
    util.visualize_wordmap(wordmap)
    #
    img_path = join(opts.data_dir, 'windmill/sun_bratfupeyvlazpba.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32) / 255
    wordmap = visual_words.get_visual_words(opts, img, dictionary)
    util.visualize_wordmap(img)
    util.visualize_wordmap(wordmap)

    img_path = join(opts.data_dir, 'desert/sun_adjlepvuitklskrz.jpg')
    img = Image.open(img_path)
    img = np.array(img).astype(np.float32) / 255
    wordmap = visual_words.get_visual_words(opts, img, dictionary)
    util.visualize_wordmap(img)
    util.visualize_wordmap(wordmap)
    #

    # Q2.1-2.4
    n_cpu = util.get_num_CPU()
    visual_recog.build_recognition_system(opts, n_worker=n_cpu)

    ## Q2.5
    n_cpu = util.get_num_CPU()
    conf, accuracy = visual_recog.evaluate_recognition_system(opts,
                                                              n_worker=n_cpu)

    print(conf)
    print(accuracy)
    np.savetxt(join(opts.out_dir, 'confmat.csv'),
               conf,
               fmt='%d',
               delimiter=',')
    np.savetxt(join(opts.out_dir, 'accuracy.txt'), [accuracy], fmt='%g')
# In-built modules
import os
import multiprocessing

# External modules
import imageio
import numpy as np

# Local python modules
import util
import visual_words

# Globals
PROGRESS = 0
PROGRESS_LOCK = multiprocessing.Lock()
NPROC = util.get_num_CPU()

label_to_string = {
    0: 'auditorium',
    1: 'baseball_field',
    2: 'desert',
    3: 'highway',
    4: 'kitchen',
    5: 'laundromat',
    6: 'waterfall',
    7: 'windmill'
}


def build_recognition_system(num_workers=2):
    '''
Пример #13
0
def tune(alpha, filter_scales, K, L, D):
  """
    Tunes the recognition system 
    [input]
    * alpha: list of number of sampling pixels
    * filter scales: list of filter scales to extract responses
    * K: list of visual words
    * L: list of layers in SPM
    [saves]
    * dictionary, trained model and results per test 
  """
  opts = get_opts()
  n_cpu = util.get_num_CPU()
  OUT_DIR = opts.out_dir
  
  # Tests
  test = 0
  total_tests = len(L) * len(K) * len(filter_scales) \
              * len(alpha) * len(D)
              
  for d in D:
    opts.D = d
    for fs in filter_scales:
      opts.filter_scales = fs
      for a in alpha:
        opts.alpha = a
        for k in K:
          opts.K = k
          for l in L:
            opts.L = l
            test += 1
                
            # Create test directory 
            opts.out_dir = OUT_DIR + f"/test_l-{l}_k-{k}_fs-{len(fs)}_a-{a}_d-{d}"
            if not os.path.exists(opts.out_dir):
              os.mkdir(opts.out_dir)
                  
            print(f"TEST [{test}/{total_tests}]: L-{l}_K-{k}_fs-{len(fs)}_a-{a}_d-{d}")
              
            # Dictionary  
            if not os.path.exists(join(opts.out_dir, "dictionary.npy")):
              print("\tBuilding Dictionary")
              start = time()
              visual_words.compute_dictionary(opts, n_worker=n_cpu)
              print(f"Time  {(time() - start) / 60.0}")
            else: 
              print("\tDictionary exists")
              
            # Train
            if not os.path.exists(join(opts.out_dir, "trained_system.npz")):
              print("\tBuilding Recognition System")
              start = time()
              visual_recog.build_recognition_system(opts, n_worker=n_cpu)
              print(f"Time  {(time() - start) / 60.0}")
            else:
              print("\tRecognition system exists")
              
            # Test
            if not os.path.exists(join(opts.out_dir, "accuracy.txt")) or \
              not os.path.exists(join(opts.out_dir, "confmat.csv")) or \
              not os.path.exists(join(opts.out_dir, "model.npz")):  
              # Test
              print("\tEvaluation")
              start = time()
              conf, acc = visual_recog.evaluate_recognition_system(opts, 
                                                                   n_worker=n_cpu)
              print(f"Time  {(time() - start) / 60.0}")
              
              # Results
              print(f"Confusion Matrix\n{conf}\n Accuracy: {acc}")
              np.savetxt(join(opts.out_dir, 'confmat.csv'), conf, 
                        fmt='%d', delimiter=',')
              np.savetxt(join(opts.out_dir, 'accuracy.txt'), [acc], fmt='%g')
              np.savez_compressed(join(opts.out_dir, "model.npz"), 
                filter_scales=fs, K=k, L=l, alpha=a, acc=acc, conf_mat=conf)
            else:
              print("\tEvaluation exists")
Пример #14
0
def main():
    opts = get_opts()

    ## Q1.1 - Filter responses
    # img_path = join(opts.data_dir, 'aquarium/sun_aztvjgubyrgvirup.jpg')
    # img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg')
    # img_path = join(opts.data_dir, 'desert/sun_aaqyzvrweabdxjzo.jpg')
    # print("Q1.1.2 - Extract filter responses")
    # img = Image.open(img_path)
    # img.show()
    # img = np.array(img).astype(np.float32)/255
    # start = time()
    # filter_responses = visual_words.extract_filter_responses(opts, img)
    # print(f"Time:  {(time() - start) / 60.0}")
    # util.display_filter_responses(opts, filter_responses)

    ## Q1.2 - Dictionary
    print("Q1.2 - Building dictionary")
    start = time()
    n_cpu = util.get_num_CPU()
    visual_words.compute_dictionary(opts, n_worker=n_cpu)
    print(f"Time: {(time() - start) / 60.0}")

    ## Q1.3 - Wordmaps
    # img_path = join(opts.data_dir, 'park/labelme_aumetbzppbkuwju.jpg')
    # img_path = join(opts.data_dir, 'laundromat/sun_aaxufyiupegixznm.jpg')
    # img_path = join(opts.data_dir, 'highway/sun_beakjawckqywuhzw.jpg')
    # img_path = join(opts.data_dir, 'desert/sun_bfyksyxmxcrgvlqw.jpg')
    # img_path = join(opts.data_dir, "desert/sun_bqljzuxtzgthsjqt.jpg")
    # img_path = join(opts.data_dir, 'kitchen/sun_anmauekjnmmqhigr.jpg')
    # print("Q1.3 - Visual words")
    # start = time()
    # img = Image.open(img_path)
    # img.show()
    # img = np.array(img).astype(np.float32)/255
    # dictionary = np.load(join(opts.out_dir, 'dictionary.npy'))
    # wordmap = visual_words.get_visual_words(opts, img, dictionary)
    # print(f"Time: {(time() - start) / 60.0}")
    # util.visualize_wordmap(wordmap)

    ## Q2.1-Q2.2 - Histograms
    # img_path = join(opts.data_dir, 'kitchen/sun_aasmevtpkslccptd.jpg')
    # dictionary = np.load(join(opts.out_dir, 'dictionary.npy'))
    # img = Image.open(img_path)
    # img = np.array(img).astype(np.float32)/255
    # wordmap = visual_words.get_visual_words(opts, img, dictionary)
    # print("Q2.1 - Feature from wordmap")
    # start = time()
    # hist = visual_recog.get_feature_from_wordmap(opts, wordmap)
    # print(np.sum(hist))
    # print(f"Time:  {(time() - start) / 60.0}")
    # print("Q2.2 - Feature from wordmap SPM")
    # hist = visual_recog.get_feature_from_wordmap_SPM(opts, wordmap)
    # print(f"Time:  {(time() - start) / 60.0}")
    # print(np.sum(hist))

    ## Q2.3 - 2.4 - Recognition System
    print("Q2.3-2.4 - Building recognition system")
    start = time()
    n_cpu = util.get_num_CPU()
    visual_recog.build_recognition_system(opts, n_worker=n_cpu)
    print(f"Time  {(time() - start) / 60.0}")

    ## Q2.5 - Evaluation
    print("Q2.5 - Evaluating recognition system")
    start = time()
    n_cpu = util.get_num_CPU()
    conf, acc = visual_recog.evaluate_recognition_system(opts, n_worker=n_cpu)
    print(f"Time  {(time() - start) / 60.0}")
    print(f"Confusion Matrix\n{conf}\n Accuracy: {acc}")
    np.savetxt(join(opts.out_dir, 'confmat.csv'),
               conf,
               fmt='%d',
               delimiter=',')
    np.savetxt(join(opts.out_dir, 'accuracy.txt'), [acc], fmt='%g')