Esempio n. 1
0
 def worker_to_visual_words(wind):
     for j in range(math.ceil(len(all_imagenames) / num_cores)):
         img_ind = j * num_cores + wind
         if img_ind < len(all_imagenames):
             img_name = all_imagenames[img_ind]
             print('converting %d-th image %s to visual words' %
                   (img_ind, img_name))
             image = cv.imread('../data/%s' % img_name)
             image = cv.cvtColor(
                 image,
                 cv.COLOR_BGR2RGB)  # convert the image from bgr to rgb
             wordMap = get_visual_words(image, dictionary, filterBank)
             pickle.dump(
                 wordMap,
                 open('../data/%s_%s.pkl' % (img_name[:-4], point_method),
                      'wb'))
Esempio n. 2
0
def batch_to_visual_words (num_cores, point_method):

    print ('using %d threads for getting visual words' % num_cores)

    meta = pickle.load (open('../data/traintest.pkl', 'rb'))
    all_imagenames = meta['all_imagenames']

    # dictionary = pickle.load (open('dictionary%s.pkl' % point_method, 'rb'))
    dictionary = pickle.load(open('/Users/justindulay/Downloads/scene_classification/python/randomWords.pkl' , 'rb'))

    filterBank = create_filterbank()

    # def worker_to_visual_words (wind):
    for j in range(math.ceil(len(all_imagenames) / num_cores)):
        # img_ind = j * num_cores + wind
        img_ind = j
        if img_ind < len(all_imagenames):
            img_name = all_imagenames[img_ind]
            print ('converting %d-th image %s to visual words' % (img_ind, img_name))
            image = cv2.imread ('../data/%s' % img_name)
            # should be OK in standard BGR format
            wordMap = get_visual_words (image, dictionary, filterBank)

            # print('here, the wordMap is', wordMap)
            pickle.dump (wordMap, open('../data/%s_%s.pkl' % (img_name[:-4], point_method), 'wb'))

            print('here we are going to call get_image_features on the wordMap')
            histogram = get_image_features(wordMap, len(dictionary))
            


    # workers = []
    # for i in range(num_cores):
    #     workers.append (multiprocessing.Process(target=worker_to_visual_words, args=(i,)))
    # for worker in workers:
    #     worker.start()

    # for worker in workers:
    #     worker.join()

    print ('batch to visual words done!')
Esempio n. 3
0
# -----fill in your implementation here --------
meta = pickle.load(open('visionRandom.pkl', 'rb'))
trainHist = meta['trainFeatures']
trainLabels = meta['trainLabels']
filterBank = meta['filterBank']
randomWordsDictionary = meta['dictionary']

all_accuracies = []
for k in range(40):
    testHist = []
    predLabels = []
    correct = 0
    for i in range(len(test_imagenames)): 
        img_name = test_imagenames[i]
        img = cv2.imread ('../data/%s' % img_name)
        wordMap = get_visual_words(img, randomWordsDictionary, filterBank)
        testFts = get_image_features(wordMap, len(randomWordsDictionary))
        testHist.append(testFts)

        # minDist = 100000000000
        kminDist = [10000000000 for _ in range(k)]
        predLabel = None
        min_train_label_indices = []
        for j in range(len(trainHist)):
            dst = get_image_distance(trainHist[j], testFts, method='chi')
            if len(kminDist) > 0: 
                if dst < max(kminDist): 
                    # minDist = dst
                    kminDist.append(dst)
                    kminDist.remove(min(kminDist))