def findWord(dict_vocab,path,grid_m,grid_n): files = [ f for f in listdir(path) if isfile(join(path,f)) ] word_hist = [] for f in files: keypoints,file_desc = test_feature_detector(path+f, grid_n) line_hist = [] for i in range(0,grid_m): for j in range(0,grid_n): desc = array(file_desc[j]) hist = buildWordHist(desc,dict_vocab[0]) # if len(line_hist) == 0: # line_hist = hist #else: # line_hist = np.hstack((line_hist,hist)) line_hist.append(hist) word_hist.append(line_hist) return word_hist
def findWord(dict_vocab, path, grid_m, grid_n): files = [f for f in listdir(path) if isfile(join(path, f))] word_hist = [] for f in files: keypoints, file_desc = test_feature_detector(path + f, grid_n) line_hist = [] for i in range(0, grid_m): for j in range(0, grid_n): desc = array(file_desc[j]) hist = buildWordHist(desc, dict_vocab[0]) # if len(line_hist) == 0: # line_hist = hist #else: # line_hist = np.hstack((line_hist,hist)) line_hist.append(hist) word_hist.append(line_hist) return word_hist
def buildVocabulary(path,k,grid_m,grid_n): files = [ f for f in listdir(path) if isfile(join(path,f)) ] total_desc = [] dict_vocab = [] database_keypoints = [] database_file_desc = [] for f in files: print f keypoints,file_desc = test_feature_detector(path+f, grid_n) database_keypoints.append(keypoints) database_file_desc.append(file_desc) for i in range(0,grid_m): for j in range(0,grid_n): if len(total_desc) < 1: total_desc.append(file_desc[j]) else: temp = total_desc[0] total_desc[0] = np.vstack((temp,file_desc[j])) t1 = time.time() vocab,dist = kmeans(total_desc[0],k) # k is the seed number t2 = time.time() print 'Kmeans in grid[',j,'] takes',t2-t1 dict_vocab.append(vocab) word_hist = [] for fidx in range(0,len(files)): keypoints = database_keypoints[fidx] file_desc = database_file_desc[fidx] line_hist = [] for i in range(0,grid_m): for j in range(0,grid_n): desc = array(file_desc[j]) hist = buildWordHist(desc,dict_vocab[0]) #if len(line_hist) == 0: # line_hist = hist #else: # line_hist = np.hstack((line_hist,hist)) line_hist.append(hist) word_hist.append(line_hist) return dict_vocab,word_hist
def buildVocabulary(path, k, grid_m, grid_n): files = [f for f in listdir(path) if isfile(join(path, f))] total_desc = [] dict_vocab = [] database_keypoints = [] database_file_desc = [] for f in files: print f keypoints, file_desc = test_feature_detector(path + f, grid_n) database_keypoints.append(keypoints) database_file_desc.append(file_desc) for i in range(0, grid_m): for j in range(0, grid_n): if len(total_desc) < 1: total_desc.append(file_desc[j]) else: temp = total_desc[0] total_desc[0] = np.vstack((temp, file_desc[j])) t1 = time.time() vocab, dist = kmeans(total_desc[0], k) # k is the seed number t2 = time.time() print 'Kmeans in grid[', j, '] takes', t2 - t1 dict_vocab.append(vocab) word_hist = [] for fidx in range(0, len(files)): keypoints = database_keypoints[fidx] file_desc = database_file_desc[fidx] line_hist = [] for i in range(0, grid_m): for j in range(0, grid_n): desc = array(file_desc[j]) hist = buildWordHist(desc, dict_vocab[0]) #if len(line_hist) == 0: # line_hist = hist #else: # line_hist = np.hstack((line_hist,hist)) line_hist.append(hist) word_hist.append(line_hist) return dict_vocab, word_hist