def countSupportVectors(eyeData, targets, k, C, gamma, kernel):
    (normalizedTraining, mean, variance) = featureNormalize(eyeData, doScale=False)
    covarianceMatrix = getCovarianceMatrix(normalizedTraining)
    (u, s, v) = np.linalg.svd(covarianceMatrix)
    projectedTraining = projectData(normalizedTraining, u, k)
    classifier = svm.classifier(projectedTraining, targets, C, gamma, kernel)

    return classifier.support_vectors_.shape[0]
def validate(eyeData, people, targets, testPerson, k, C, gamma, kernel):
    trainingIndices = np.nonzero(people != testPerson)
    testIndices = np.nonzero(people == testPerson)

    trainingData = eyeData[trainingIndices]
    trainingTargets = targets[trainingIndices]
    testData = eyeData[testIndices]
    testTargets = targets[testIndices]

    # normalize training data, get mean
    (normalizedTraining, mean, variance) = featureNormalize(trainingData, doScale=False)

    # normalize test data with mean from above
    normalizedTest = testData - mean

    if k is None:
        projectedTraining = normalizedTraining
        projectedTest = normalizedTest
    else:
        # run PCA with some value of k to get (u,s,v) from training data
        covarianceMatrix = getCovarianceMatrix(normalizedTraining)
        (u, s, v) = np.linalg.svd(covarianceMatrix)

        # project training data & test data
        projectedTraining = projectData(normalizedTraining, u, k)
        projectedTest = projectData(normalizedTest, u, k)

    # learn through projected training data
    classifier = svm.classifier(projectedTraining, trainingTargets, C, gamma, kernel)

    # try to predict projected test data
    testResults = classifier.predict(projectedTest)

    # left/right classification
    # correct = np.sum((testTargets-1)/2 == (testResults-1)/2) / float(len(testResults))

    # classification among four directions
    correct = np.sum(testTargets == testResults) / float(len(testResults))

    # print classification_report(testTargets, testResults)

    return correct
def validate(eyeData, people, targets, testPerson, k, C, gamma, kernel):
    trainingIndices = np.nonzero(people != testPerson)
    testIndices = np.nonzero(people == testPerson)

    trainingData = eyeData[trainingIndices]
    trainingTargets = targets[trainingIndices]
    testData = eyeData[testIndices]
    testTargets = targets[testIndices]

    # normalize training data, get mean
    (normalizedTraining, mean, variance) = featureNormalize(trainingData, doScale = False)

    # normalize test data with mean from above
    normalizedTest = testData - mean

    # run PCA with some value of k to get (u,s,v) from training data
    covarianceMatrix = getCovarianceMatrix(normalizedTraining)
    (u, s, v) = np.linalg.svd(covarianceMatrix)

    # project training data & test data
    projectedTraining = projectData(normalizedTraining, u, k)
    projectedTest = projectData(normalizedTest, u, k)

    # learn through projected training data
    classifier = svm.classifier(projectedTraining, trainingTargets, C, gamma, kernel)

    # try to predict projected test data
    testResults = classifier.predict(projectedTest)

    # Dict of of misclassified images
    miss = []
    for index, result in enumerate(testResults):
        if testTargets[index] != result:
            miss.append({ 'eye_data': testData[index], 'target': testTargets[index], 'classification': result, 'testPerson': testPerson})


    #correct = np.sum((testTargets-1)/2 == (testResults-1)/2) / float(len(testResults))
    correct = np.sum(testTargets == testResults) / float(len(testResults))


    #print classification_report(testTargets, testResults
    return classifier, correct, miss
def plotDecisionBoundary(eyeData, targets, k, C, gamma, kernel):

    (normalizedTraining, mean, variance) = featureNormalize(eyeData, doScale=False)
    covarianceMatrix = getCovarianceMatrix(normalizedTraining)
    (u, s, v) = np.linalg.svd(covarianceMatrix)
    projectedTraining = projectData(normalizedTraining, u, k)
    classifier = svm.classifier(projectedTraining, targets, C, gamma, kernel)

    minx = np.min(projectedTraining[:, 0])
    maxx = np.max(projectedTraining[:, 0])
    stepx = (maxx - minx) / 600.0
    xx = np.array(np.arange(minx, maxx, stepx))
    miny = np.min(projectedTraining[:, 1])
    maxy = np.max(projectedTraining[:, 1])
    stepy = (maxy - miny) / 400.0
    yy = np.array(np.arange(miny, maxy, stepy))

    results = np.zeros((len(xx), len(yy)))

    for i, x in enumerate(xx):
        for j, y in enumerate(yy):
            results[i, j] = classifier.predict(np.array([[x, y]]))[0]

    params = ("b,", "r,", "g,", "y,")
    params2 = ("bo", "ro", "go", "yo")
    plt.hold("on")

    for target in range(4):
        ii = np.nonzero(results == target + 1)
        plt.plot(xx[ii[0]].flatten(), yy[ii[1]].flatten(), params[target])

    for target in range(4):
        ii2 = np.nonzero(targets == target + 1)[0]
        plt.plot(projectedTraining[ii2, 0].flatten(), projectedTraining[ii2, 1].flatten(), params2[target])

    plt.show()
示例#5
0
pl.figure()
pl.plot(train0[:, 0], train0[:, 1], "o", color="0.75")
pl.plot(train1[:, 0], train1[:, 1], "s", color="0.25")

import svm
reload(svm)

svm = svm.svm(kernel='linear', C=0.1)
#svm = svm.svm(kernel='rbf')
#svm = svm.svm(kernel='poly',C=0.1,degree=4)

print np.shape(train), np.shape(labeltrain)
svm.train_svm(train, labeltrain)
pl.scatter(svm.X[:, 0], svm.X[:, 1], s=200, color='k')

predict = svm.classifier(test, soft=False)
correct = np.sum(predict == labeltest)
print correct, np.shape(predict)
print float(correct) / np.shape(predict)[0] * 100., "test accuracy"

# Classify points over 2D space to fit contour
x, y = np.meshgrid(np.linspace(-6, 6, 50), np.linspace(-6, 6, 50))
xx = np.reshape(np.ravel(x), (2500, 1))
yy = np.reshape(np.ravel(y), (2500, 1))
points = np.concatenate((xx, yy), axis=1)
outpoints = svm.classifier(points, soft=True).reshape(np.shape(x))
pl.contour(x, y, outpoints, [0.0], colors='k', linewidths=1, origin='lower')
pl.contour(x,
           y,
           outpoints + 1, [0.0],
           colors='grey',
示例#6
0
pl.figure()
pl.plot(train0[:,0], train0[:,1], "o",color="0.75")
pl.plot(train1[:,0], train1[:,1], "s",color="0.25")

import svm
reload(svm)

svm = svm.svm(kernel='linear',C=0.1)
#svm = svm.svm(kernel='rbf')
#svm = svm.svm(kernel='poly',C=0.1,degree=4)

print np.shape(train), np.shape(labeltrain)
svm.train_svm(train, labeltrain)
pl.scatter(svm.X[:,0], svm.X[:,1], s=200,color= 'k')

predict = svm.classifier(test,soft=False)
correct = np.sum(predict == labeltest)
print correct, np.shape(predict)
print float(correct)/np.shape(predict)[0]*100., "test accuracy"

# Classify points over 2D space to fit contour
x,y = np.meshgrid(np.linspace(-6,6,50), np.linspace(-6,6,50))
xx = np.reshape(np.ravel(x),(2500,1))
yy = np.reshape(np.ravel(y),(2500,1))
points = np.concatenate((xx,yy),axis=1)
outpoints = svm.classifier(points,soft=True).reshape(np.shape(x))
pl.contour(x, y, outpoints, [0.0], colors='k', linewidths=1, origin='lower')
pl.contour(x, y, outpoints + 1, [0.0], colors='grey', linewidths=1, origin='lower')
pl.contour(x, y, outpoints - 1, [0.0], colors='grey', linewidths=1, origin='lower')

pl.axis("tight")