Esempio n. 1
0
def parallel_testing(test_image, test_label, lin_svm, std_scaler):
    gray = io.load_grayscale_image(test_image)
    kpt, des = feature_extraction.sift(gray)
    predictions = classification.predict_svm(des,
                                             lin_svm,
                                             std_scaler=std_scaler)
    values, counts = np.unique(predictions, return_counts=True)
    predicted_class = values[np.argmax(counts)]
    return predicted_class == test_label
Esempio n. 2
0
def parallel_testing(test_image, test_label, svm, scaler, gmm):
    gray = io.load_grayscale_image(test_image)
    kpt, des = feature_extraction.sift(gray)
    labels = np.array([test_label] * des.shape[0])
    ind = np.array([0] * des.shape[0])

    fisher, _ = bovw.fisher_vectors(des, labels, ind, gmm)

    prediction_prob = classification.predict_svm(fisher, svm, std_scaler=scaler)
    predicted_class = svm.classes_[np.argmax(prediction_prob)]
    return predicted_class == test_label, predicted_class, np.ravel(prediction_prob)
Esempio n. 3
0
def parallel_testing(test_image, test_label, svm, std_scaler, pca):
    gray = io.load_grayscale_image(test_image)
    kpt, des = feature_extraction.sift(gray)
    predictions = classification.predict_svm(des,
                                             svm,
                                             std_scaler=std_scaler,
                                             pca=pca,
                                             probability=True)
    probabilities = np.sum(predictions, axis=0)
    predicted_class = svm.classes_[np.argmax(probabilities)]

    return predicted_class == test_label, predicted_class, test_label
def seq_testing(test_image, test_label, codebook, svm, scaler, pca):
    gray = io.load_grayscale_image(test_image)
    kpt, des = feature_extraction.sift(gray)
    labels = np.array([test_label] * des.shape[0])
    ind = np.array([0] * des.shape[0])
    vis_word, _ = bovw.visual_words(des, labels, ind, codebook)
    prediction_prob = classification.predict_svm(vis_word,
                                                 svm,
                                                 std_scaler=scaler,
                                                 pca=pca)
    predicted_class = lin_svm.classes_[np.argmax(prediction_prob)]
    return predicted_class == test_label, predicted_class, np.ravel(
        prediction_prob)
Esempio n. 5
0
def parallel_testing(test_image, test_label, lin_svm, std_scaler, pca):
    probs = [0, 0, 0, 0, 0, 0, 0, 0]
    gray = io.load_grayscale_image(test_image)
    kpt, des = feature_extraction.sift(gray)
    predictions = classification.predict_svm(des,
                                             lin_svm,
                                             std_scaler=std_scaler,
                                             pca=pca,
                                             probability=True)
    for j in range(0, len(predictions)):
        probs = probs + predictions[j]
    predicted_class = lin_svm.classes_[np.argmax(probs)]

    return predicted_class == test_label, predicted_class, test_label
Esempio n. 6
0
def parallel_testing(test_image, test_label, codebook, svm, scaler, pca):
    gray = io.load_grayscale_image(test_image)
    kpt, des = feature_extraction.sift(gray)
    kpt_pos = np.array([kpt[i].pt for i in range(0, len(kpt))],
                       dtype=np.float64)
    labels = np.array([test_label] * des.shape[0])
    ind = np.array([0] * des.shape[0])
    vis_word, _ = bovw.visual_words(des,
                                    labels,
                                    ind,
                                    codebook,
                                    spatial_pyramid=True)
    prediction_prob = classification.predict_svm(vis_word,
                                                 svm,
                                                 std_scaler=scaler,
                                                 pca=pca)
    predicted_class = svm.classes_[np.argmax(prediction_prob)]
    return predicted_class == test_label, predicted_class, np.ravel(
        prediction_prob)
Train_descriptors = []
Train_label_per_descriptor = []
for sigma1 in range(10, 30, 1):
    sigma = float(sigma1) / 10
    for ct1 in range(1, 9):
        ct = float(ct1) / 100
        for et in range(28, 88, 10):
            for i in range(len(train_images_filenames)):
                filename = train_images_filenames[i]
                filename = "../." + filename
                if Train_label_per_descriptor.count(train_labels[i]) < 30:
                    print 'Reading image ' + filename
                    ima = cv2.imread(filename)
                    gray = cv2.cvtColor(ima, cv2.COLOR_BGR2GRAY)
                    ##kpt, des = SIFT_detector.detectAndCompute(gray, None)
                    kpt, des = feature_extraction.sift(gray, sigma, ct, et)
                    Train_descriptors.append(des)
                    Train_label_per_descriptor.append(train_labels[i])
                    print str(
                        len(kpt)) + ' extracted keypoints and descriptors'

# Transform everything to numpy arrays

            D = Train_descriptors[0]
            L = np.array([Train_label_per_descriptor[0]] *
                         Train_descriptors[0].shape[0])
            for i in range(1, len(Train_descriptors)):
                D = np.vstack((D, Train_descriptors[i]))
                L = np.hstack((L,
                               np.array([Train_label_per_descriptor[i]] *
                                        Train_descriptors[i].shape[0])))
Esempio n. 8
0
# read the just 30 train images per class
# extract sift keypoints and descriptors
# store descriptors in a python list of numpy arrays

Train_descriptors = []
Train_label_per_descriptor = []

for i in range(len(train_images_filenames)):
    filename = train_images_filenames[i]
    filename = "../." + filename
    if Train_label_per_descriptor.count(train_labels[i]) < 30:
        print 'Reading image ' + filename
        ima = cv2.imread(filename)
        gray = cv2.cvtColor(ima, cv2.COLOR_BGR2GRAY)
        ##kpt, des = SIFT_detector.detectAndCompute(gray, None)
        kpt, des = feature_extraction.sift(gray)
        Train_descriptors.append(des)
        Train_label_per_descriptor.append(train_labels[i])
        print str(len(kpt)) + ' extracted keypoints and descriptors'

# Transform everything to numpy arrays

D = Train_descriptors[0]
L = np.array([Train_label_per_descriptor[0]] * Train_descriptors[0].shape[0])

for i in range(1, len(Train_descriptors)):
    D = np.vstack((D, Train_descriptors[i]))
    L = np.hstack((L,
                   np.array([Train_label_per_descriptor[i]] *
                            Train_descriptors[i].shape[0])))