Example #1
0
def main():
    X_train, y_train, X_test, y_test = gen_train_test(5000, 500)
    num_test = y_test.shape[0]
    classifier = KNearestNeighbor()
    classifier.train(X_train, y_train)
    starttime = datetime.datetime.now()
    dists = classifier.compute_distances_one_loop(X_test)
    endtime = datetime.datetime.now()
    print(endtime - starttime).seconds
    print dists.shape
    y_test_pred = classifier.predict_labels(dists, k=5)
    # Compute and print the fraction of correctly predicted examples
    num_correct = np.sum(y_test_pred == y_test)
    accuracy = float(num_correct) / num_test
    print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test,
                                                   accuracy)
Example #2
0
# compute and print the fraction of correctly predicted examples

num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print('Got %d /%d correct => accuracy: %f' % (num_correct, num_test, accuracy))

#secondly set k=5
y_test_pred = classifier.predict_labels(dists, k=5)
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print('Using k=5, Got %d /%d correct => accuracy: %f' %
      (num_correct, num_test, accuracy))

#Now lets speed up distance matrix computation by using partial vectorization with
# one loop.
dists_one = classifier.compute_distances_one_loop(X_test)
# compute the differeces between the two methods
differeces = np.linalg.norm(dists - dists_one, ord='fro')
if differeces < 0.001:
    print('Good, the two method give the same results.')
else:
    print('The distance is different')

# Now we use the method without any loop
dists_non = classifier.compute_distances_no_loops(X_test)
# compute the differeces between the two methods
differeces = np.linalg.norm(dists - dists_non, ord='fro')
if differeces < 0.001:
    print('Good, The differece is %f' % differeces)
else:
    print('The distance is different')
Example #3
0
plt.imshow(dists, interpolation='none')
plt.show()

# Now implement the function predict_labels and run the code below:
# We use k = 1 (which is Nearest Neighbor).
y_test_pred = classifier.predict_labels(dists, k=1)

# Compute and print the fraction of correctly predicted examples
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)

# Now lets speed up distance matrix computation by using partial vectorization
# with one loop. Implement the function compute_distances_one_loop and run the
# code below:
dists_one = classifier.compute_distances_one_loop(X_test)

# To ensure that our vectorized implementation is correct, we make sure that it
# agrees with the naive implementation. There are many ways to decide whether
# two matrices are similar; one of the simplest is the Frobenius norm. In case
# you haven't seen it before, the Frobenius norm of two matrices is the square
# root of the squared sum of differences of all elements; in other words, reshape
# the matrices into vectors and compute the Euclidean distance between them.

difference = np.linalg.norm(dists - dists_one, ord='fro')
print 'Difference was: %f' % (difference, )
if difference < 0.001:
  print 'Good! The distance matrices are the same'
else:
  print 'Uh-oh! The distance matrices are different'
Example #4
0
from cs231n.classifiers import KNearestNeighbor
import numpy as np

classifier = KNearestNeighbor()
classifier.__init__()
print("I have import.")

a = np.arange(2000).reshape(100, 20)
a_y = np.array([1] * 5)
b = np.arange(20).reshape(1, 20)
c = a - b
print(a_y)
classifier.train(a, a_y.T)
classifier.compute_distances_one_loop(a)