def buildSmallExampleNet():
    # Build model.
    mModel = NNModel.Model()
    mModel.add(layer_size=2, learning_rate=1, isInput=True)
    mModel.add(layer_size=3, learning_rate=1, momentum_factor=.3)
    mModel.add(layer_size=2, learning_rate=1, momentum_factor=.3)
    print("Created Model.")

    # Train model.
    testData = np.array([[1, 1]])
    labelData = np.array([[1, 0]])
    mModel.train(testData, labelData, epochs=10000)
    # Predict data.
    output = mModel.predict(testData[0])
    print("Model output is: ")
    print(output)
def runNetTrial():
    # Build model.
    mModel = NNModel.Model()
    mModel.add(layer_size=784, learning_rate=.01, isInput=True)
    mModel.add(layer_size=200, learning_rate=.005, momentum_factor=0)
    #mModel.add(layer_size=100, learning_rate=.005, momentum_factor=0)
    mModel.add(layer_size=10, learning_rate=.005, momentum_factor=0)
    print("Created Model.")

    # Read data from file.
    xData = pd.read_csv('./MNISTnumImages5000.txt', sep='\t', header=None)
    yData = pd.read_csv('./MNISTnumLabels5000.txt',
                        header=None,
                        names=['labels'])
    xData['labels'] = yData.values
    # Break data into train and test sets.
    import random
    trainSet, testSet = train_test_split(xData,
                                         test_size=0.2,
                                         random_state=random.randint(
                                             0, 100000))

    # Break data into train and test sets.
    originalTrainLabels = trainSet['labels'].values
    originalTestLabels = testSet['labels'].values
    trainLabels = NNModel.labelToOneHotEncoding(originalTrainLabels)
    testLabels = NNModel.labelToOneHotEncoding(originalTestLabels)
    trainData = trainSet[trainSet.columns[:-1]].values
    testData = testSet[testSet.columns[:-1]].values

    print("Starting training.")
    trialWiseErrorList = mModel.train(trainData,
                                      trainLabels,
                                      validation_data_set=testData,
                                      validation_label_set=originalTestLabels,
                                      epochs=60)
    print("Training finished.")

    # Predict the test set metrics
    predictedLabels = mModel.predictAll(testData)
    predictedLabels = NNModel.oneHotEncodingToLabels(predictedLabels)
    accuracy = NNModel.calculateAccuracy(predictedLabels, originalTestLabels)
    #testSetMetrics = calculateMetrics(predictedLabels, originalTestLabels)
    testSetMetrics = {}
    testSetMetrics["accuracy"] = accuracy

    # Predict the train set metrics
    predictedLabels = mModel.predictAll(trainData)
    predictedLabels = NNModel.oneHotEncodingToLabels(predictedLabels)
    accuracy = NNModel.calculateAccuracy(predictedLabels, originalTrainLabels)
    #trainSetMetrics = calculateMetrics(predictedLabels, originalTrainLabels)
    trainSetMetrics = {}
    trainSetMetrics["accuracy"] = accuracy
    trainSetMetrics["accuracyList"] = trialWiseErrorList

    print("Orig labels: ")
    print(originalTrainLabels[0:20])
    print("Pred labels: ")
    print(predictedLabels[0:20])

    return mModel, trainSetMetrics, testSetMetrics
Beispiel #3
0
def runNetTrial():
    # Build model.
    mModel = NNModel.Model()
    mModel.add(layer_size=2, learning_rate=.05, isInput=True)
    mModel.add(layer_size=20, learning_rate=.05)
    mModel.add(layer_size=2, learning_rate=.05)
    print("Created Model.")

    data = pd.read_table('./hw2_dataProblem.txt', sep=" +", engine='python')
    #Range scale the P data.
    data["P"] = data["P"].apply(lambda item: (item - data.P.min()) /
                                (data.P.max() - data.P.min()))
    #Range scale the L data
    data["L"] = data["L"].apply(lambda item: (item - data.L.min()) /
                                (data.L.max() - data.L.min()))

    #Split the data into training and test data sets.
    train0, test0 = train_test_split(data[data.D == 0].values,
                                     test_size=0.2,
                                     random_state=random.randint(0, 100000))
    train1, test1 = train_test_split(data[data.D == 1].values,
                                     test_size=0.2,
                                     random_state=random.randint(0, 100000))

    #Combine and shuffle the test and train examples.
    testSet = np.vstack((test0, test1))
    np.random.shuffle(testSet)
    trainSet = np.vstack((train0, train1))
    #trainSet = np.vstack((trainSet, train0))
    np.random.shuffle(trainSet)

    testSetData = testSet[:, 0:2]
    testSetLabels = NNModel.labelToOneHotEncoding(testSet[:, 2])
    trainSetData = trainSet[:, 0:2]
    trainSetLabels = NNModel.labelToOneHotEncoding(trainSet[:, 2])

    print("Starting training.")
    trialWiseErrorList = mModel.train(trainSetData, trainSetLabels, epochs=200)
    print("Training finished.")

    # Predict the test set metrics
    predictedLabels = mModel.predictAll(testSetData)
    predictedLabels = NNModel.oneHotEncodingToLabels(predictedLabels)
    accuracy = calculateAccuracy(predictedLabels, testSet[:, 2].reshape(
        (len(testSet), 1)))
    testSetMetrics = calculateMetrics(predictedLabels, testSet[:, 2].reshape(
        (len(testSet), 1)))
    testSetMetrics["accuracy"] = accuracy

    # Predict the train set metrics
    predictedLabels = mModel.predictAll(trainSetData)
    predictedLabels = NNModel.oneHotEncodingToLabels(predictedLabels)
    accuracy = calculateAccuracy(predictedLabels, trainSet[:, 2].reshape(
        (len(trainSet), 1)))
    trainSetMetrics = calculateMetrics(
        predictedLabels, trainSet[:, 2].reshape((len(trainSet), 1)))
    trainSetMetrics["accuracy"] = accuracy
    trainSetMetrics["accuracyList"] = trialWiseErrorList

    # Print model metrics.
    # print("Predicted Labels:")
    # print(predictedLabels)
    # print("Accuracy on test set is: " + str(accuracy))
    # print("Sensitivity: " + str(metrics["sensitivity"]))
    # print("Specificity: " + str(metrics["specificity"]))
    # print("ppv: " + str(metrics["ppv"]))
    # print("npv: " + str(metrics["npv"]))

    return mModel, trainSetMetrics, testSetMetrics