예제 #1
0
def SVM_Classify(trainDataPath, trainLabelPath, testDataPath, testLabelPath, kernelType):
    trainData = np.array(utils.loadDataFromFile(trainDataPath))
    trainLabels = utils.loadDataFromFile(trainLabelPath)

    testData = np.array(utils.loadDataFromFile(testDataPath))
    testLabels = utils.loadDataFromFile(testLabelPath)


    if kernelType == "HI":

        gramMatrix = histogramIntersection(trainData, trainData)
        clf = SVC(kernel='precomputed')
        clf.fit(gramMatrix, trainLabels)

        predictMatrix = histogramIntersection(testData, trainData)
        SVMResults = clf.predict(predictMatrix)
        correct = sum(1.0 * (SVMResults == testLabels))
        accuracy = correct / len(testLabels)
        print "SVM (Histogram Intersection): " +str(accuracy)+ " (" +str(int(correct))+ "/" +str(len(testLabels))+ ")"

    else:
        clf = SVC(kernel = kernelType)
        clf.fit(trainData, trainLabels)
        SVMResults = clf.predict(testData)

        correct = sum(1.0 * (SVMResults == testLabels))
        accuracy = correct / len(testLabels)
        print "SVM (" +kernelType+"): " +str(accuracy)+ " (" +str(int(correct))+ "/" +str(len(testLabels))+ ")"
예제 #2
0
def SVM_Classify(trainDataPath, trainLabelPath, testDataPath, testLabelPath,
                 kernelType):
    trainData = np.array(utils.loadDataFromFile(trainDataPath))
    trainLabels = utils.loadDataFromFile(trainLabelPath)

    testData = np.array(utils.loadDataFromFile(testDataPath))
    testLabels = utils.loadDataFromFile(testLabelPath)

    if kernelType == "HI":

        gramMatrix = histogramIntersection(trainData, trainData)
        clf = SVC(kernel='precomputed')
        clf.fit(gramMatrix, trainLabels)

        predictMatrix = histogramIntersection(testData, trainData)
        SVMResults = clf.predict(predictMatrix)
        correct = sum(1.0 * (SVMResults == testLabels))
        accuracy = correct / len(testLabels)
        print "SVM (Histogram Intersection): " + str(accuracy) + " (" + str(
            int(correct)) + "/" + str(len(testLabels)) + ")"

    else:
        clf = SVC(kernel=kernelType)
        clf.fit(trainData, trainLabels)
        SVMResults = clf.predict(testData)

        correct = sum(1.0 * (SVMResults == testLabels))
        accuracy = correct / len(testLabels)
        print "SVM (" + kernelType + "): " + str(accuracy) + " (" + str(
            int(correct)) + "/" + str(len(testLabels)) + ")"
예제 #3
0
def KNN_Classify(trainDataPath, trainLabelPath, testDataPath, testLabelPath):

    trainData = np.array(utils.loadDataFromFile(trainDataPath))
    trainLabels = utils.loadDataFromFile(trainLabelPath)

    testData = np.array(utils.loadDataFromFile(testDataPath))
    testLabels = utils.loadDataFromFile(testLabelPath)

    KNN = KNeighborsClassifier()
    KNN.fit(trainData, trainLabels)
    KNN_testLabels = KNN.predict(testData)

    correct = sum(1.0 * (KNN_testLabels == testLabels))
    accuracy = correct / len(testLabels)
    print "KNN: " +str(accuracy)+ " (" +str(int(correct))+ "/" +str(len(testLabels))+ ")"
예제 #4
0
def KNN_Classify(trainDataPath, trainLabelPath, testDataPath, testLabelPath):

    trainData = np.array(utils.loadDataFromFile(trainDataPath))
    trainLabels = utils.loadDataFromFile(trainLabelPath)

    testData = np.array(utils.loadDataFromFile(testDataPath))
    testLabels = utils.loadDataFromFile(testLabelPath)

    KNN = KNeighborsClassifier()
    KNN.fit(trainData, trainLabels)
    KNN_testLabels = KNN.predict(testData)

    correct = sum(1.0 * (KNN_testLabels == testLabels))
    accuracy = correct / len(testLabels)
    print "KNN: " + str(accuracy) + " (" + str(int(correct)) + "/" + str(
        len(testLabels)) + ")"
예제 #5
0
import utils
from network import NNOneHidden

ARG_TRAINED_MODEL_IN = "model"

argParser = argparse.ArgumentParser()
argParser.add_argument("-m",
                       "--{}".format(ARG_TRAINED_MODEL_IN),
                       required=True,
                       help="path to trained model")
args = vars(argParser.parse_args())

trainedModelFileName = args[ARG_TRAINED_MODEL_IN]

print("Loading model from '{}' ...".format(trainedModelFileName))
W, b = utils.loadDataFromFile(trainedModelFileName)

print("Fitting model...")

eel.init("web")


@eel.expose
def classify_image(X):
    pixels = list(map(int, X.split(" ")))
    #inputData = np.array([1-x for x in pixels])
    inputData = np.array(pixels).reshape(784, 1)
    # plt.imshow(inputData.reshape(28, 28), cmap=matplotlib.cm.binary)
    # plt.axis("off")
    # plt.show()
    # utils.display(inputData)
                       required=True,
                       help="path to training data")
argParser.add_argument("-s",
                       "--{}".format(ARG_TRAINED_MODEL_OUT),
                       required=True,
                       help="path to save trained model")
argParser.add_argument("-b",
                       "--{}".format(ARG_BATCH_TRAIN),
                       required=True,
                       help="train with batch or not")
args = vars(argParser.parse_args())

trainDataFileName = args[ARG_TRAIN_DATA]

print("Getting training data from '{}' ...".format(trainDataFileName))
X_train, y_train = utils.loadDataFromFile(trainDataFileName)
X_test, y_test = utils.loadDataFromFile("data/mnist.data.test")

print(X_train.shape, y_train.shape)

print("Training model batch={}...".format(args[ARG_BATCH_TRAIN]))
nnet = NNOneHidden()
epochs = 1000
hidden_units = 50
if args[ARG_BATCH_TRAIN] == "true":
    W, b, train_history, test_history = nnet.mini_batch_train(
        X_train,
        y_train,
        X_test,
        y_test,
        epochs=epochs,
예제 #7
0
import numpy as np

#command line arguments for this script
ARG_TRAINED_MODEL_IN = "model"
ARG_TEST_DATA = "test_data"

argParser = argparse.ArgumentParser()
argParser.add_argument("-m", "--{}".format(ARG_TRAINED_MODEL_IN), required=True, help="path to trained model")
argParser.add_argument("-t", "--{}".format(ARG_TEST_DATA), required=True, help="path to test data")
args = vars(argParser.parse_args())

trainedModelFileName = args[ARG_TRAINED_MODEL_IN]
testDataFileName = args[ARG_TEST_DATA]

print("Loading model from '{}' ...".format(trainedModelFileName))
W, b = utils.loadDataFromFile(trainedModelFileName)

print("Loading test data from '{}' ...".format(testDataFileName))
X_test, y_test = utils.loadDataFromFile(testDataFileName)
print("Test data shapes", X_test.shape, y_test.shape)

print("Fitting model and getting predictions...")
nnet = NNOneHidden()
nnet.fit(W, b)

classification = nnet.classify(X_test)
predictions = np.argmax(classification, axis=0)
labels = np.argmax(y_test, axis=0)

correct_predictions = np.sum(predictions == labels) #compute how many predictions were correct
print("Performance : {}/{} correct".format(correct_predictions, predictions.shape[0]))