Example #1
0
#---------------------------------------------------------------
# make a codebook

x_train = pickle.load(open('./datasets/train_img.npy', 'rb'))
x_test = pickle.load(open('./datasets/test_img.npy', 'rb'))
y_train = pickle.load(open('./datasets/train_label.txt', 'rb'))
y_test = pickle.load(open('./datasets/test_label.txt', 'rb'))

strong_des = sift.dense_sift_each()  # dense SIFT

# weak_des = sift.weak_des_whole()      # original SIFT

codebook_path = './codebook/km_center_dense_200_caltech'

K_means.clustering(strong_des, codebook_path, n_cluster=200)

#---------------------------------------------------------------
# train, test에 해당하는 level 0, 1, 2의 PHOW(pyramid histogram of word)를 저장

codebooks = codebook.load_codebook(codebook_path)

tr_sl_0 = single_level(cal_train, 0, codebooks)
tr_sl_1 = single_level(cal_train, 1, codebooks)
tr_sl_2 = single_level(cal_train, 2, codebooks)

ts_sl_0 = single_level(cal_test, 0, codebooks)
ts_sl_1 = single_level(cal_test, 1, codebooks)
ts_sl_2 = single_level(cal_test, 2, codebooks)

tr_pyramid_L0 = tr_sl_0  # book 추가