Ejemplo n.º 1
0
def parallel_testing(test_image, test_label, codebook, svm, scaler, pca):
    gray = io.load_grayscale_image(test_image)
    kpt, des = feature_extraction.dense(gray)
    labels = np.array([test_label] * des.shape[0])
    ind = np.array([0] * des.shape[0])
    vis_word, _ = bovw.visual_words(des, labels, ind, codebook)
    prediction_prob = classification.predict_svm(vis_word, svm, std_scaler=scaler, pca=pca)
    predicted_class = lin_svm.classes_[np.argmax(prediction_prob)]
    return predicted_class == test_label, predicted_class, np.ravel(prediction_prob)
Ejemplo n.º 2
0
def parallel_testing(test_image, test_label, lin_svm, std_scaler):
    gray = io.load_grayscale_image(test_image)
    kpt, des = feature_extraction.sift(gray)
    predictions = classification.predict_svm(des,
                                             lin_svm,
                                             std_scaler=std_scaler)
    values, counts = np.unique(predictions, return_counts=True)
    predicted_class = values[np.argmax(counts)]
    return predicted_class == test_label
Ejemplo n.º 3
0
def parallel_testing(test_image, test_label, codebook, svm, scaler, pca):
    gray = io.load_grayscale_image(test_image)
    kpt, des = feature_extraction.dense(gray)
    kpt_pos = np.array([kpt[i].pt for i in range(0, len(kpt))], dtype=np.float64)
    labels = np.array([test_label] * des.shape[0])
    ind = np.array([0] * des.shape[0])
    vis_word, _ = bovw.visual_words(des, labels, ind, codebook, spatial_pyramid=True)
    prediction_prob = classification.predict_svm(vis_word, svm, std_scaler=scaler, pca=pca)
    predicted_class = svm.classes_[np.argmax(prediction_prob)]
    return predicted_class == test_label, predicted_class, np.ravel(prediction_prob)
Ejemplo n.º 4
0
def parallel_testing(test_image, test_label, svm, scaler, gmm):
    gray = io.load_grayscale_image(test_image)
    kpt, des = feature_extraction.sift(gray)
    labels = np.array([test_label] * des.shape[0])
    ind = np.array([0] * des.shape[0])

    fisher, _ = bovw.fisher_vectors(des, labels, ind, gmm)

    prediction_prob = classification.predict_svm(fisher, svm, std_scaler=scaler)
    predicted_class = svm.classes_[np.argmax(prediction_prob)]
    return predicted_class == test_label, predicted_class, np.ravel(prediction_prob)
Ejemplo n.º 5
0
def parallel_testing(test_image, test_label, svm, std_scaler, pca):
    gray = io.load_grayscale_image(test_image)
    kpt, des = feature_extraction.sift(gray)
    predictions = classification.predict_svm(des,
                                             svm,
                                             std_scaler=std_scaler,
                                             pca=pca,
                                             probability=True)
    probabilities = np.sum(predictions, axis=0)
    predicted_class = svm.classes_[np.argmax(probabilities)]

    return predicted_class == test_label, predicted_class, test_label
Ejemplo n.º 6
0
def parallel_testing(test_image, test_label, lin_svm, std_scaler, pca):
    probs = [0, 0, 0, 0, 0, 0, 0, 0]
    gray = io.load_grayscale_image(test_image)
    kpt, des = feature_extraction.sift(gray)
    predictions = classification.predict_svm(des,
                                             lin_svm,
                                             std_scaler=std_scaler,
                                             pca=pca,
                                             probability=True)
    for j in range(0, len(predictions)):
        probs = probs + predictions[j]
    predicted_class = lin_svm.classes_[np.argmax(probs)]

    return predicted_class == test_label, predicted_class, test_label
Ejemplo n.º 7
0
print('Loaded {} training images filenames with classes {}.'.format(
    len(train_images_filenames), set(train_labels)))

# Read the test set
test_images_filenames, test_labels = io.load_test_set()
print('Loaded {} testing images filenames with classes {}.'.format(
    len(test_images_filenames), set(test_labels)))

# Extract features
Train_descriptors = []
Train_label_per_descriptor = []

for i, filename in enumerate(train_images_filenames):
    if Train_label_per_descriptor.count(train_labels[i]) < 30:
        print('Reading image {}'.format(filename))
        gray = io.load_grayscale_image(filename)
        kpt, des = feature_extraction.brisk(gray)
        Train_descriptors.append(des)
        Train_label_per_descriptor.append(train_labels[i])
        print('{} extracted keypoints and descriptors'.format(len(kpt)))

# Transform everything to numpy arrays
D = Train_descriptors[0]
L = np.array([Train_label_per_descriptor[0]] * Train_descriptors[0].shape[0])

for i in range(1, len(Train_descriptors)):
    D = np.vstack((D, Train_descriptors[i]))
    L = np.hstack((L,
                   np.array([Train_label_per_descriptor[i]] *
                            Train_descriptors[i].shape[0])))