def overittingTest():
    # Data
    # X = (hours sleeping, hours studying), y = score on test
    # Training Data
    trainX_org = np.array(([3, 5], [5, 1], [10, 2], [6, 1.5]), dtype=float)
    trainY_org = np.array(([75], [83], [93], [70]), dtype=float)

    # Testing Data
    testX = np.array(([4, 5.5], [4.5, 1], [9, 2.5], [6, 2]), dtype=float)
    testY = np.array(([70], [89], [85], [75]), dtype=float)

    # Normalize
    trainX = trainX_org / np.amax(trainX_org, axis=0)
    trainY = trainY_org / 100

    testX = testX / np.amax(testX, axis=0)
    testY = testY / 100

    # Network
    inputSize = 2
    layerCount = 2
    networkConf = nnConf.NeuralNetworkConf(inputSize, layerCount)
    networkConf.weightInitializerMethod = "random"  # Options: random, he, xavier, zeros, ones
    networkConf.layerConf[0].neuronCount = 3
    networkConf.layerConf[0].activationFn = "sigmoid"
    networkConf.layerConf[1].neuronCount = 1
    networkConf.layerConf[1].activationFn = "sigmoid"
    #networkConf.Lambda = 0.0001

    NN = nn.NeuralNetwork(networkConf)
    numgrad = NN.computeNumericalGradient(trainX, trainY)
    print("****************************")
    grad = NN.computeGradient(trainX, trainY)
    print("\nnumGrad: ", numgrad)
    print("\ngrad: ", grad)

    # Quantize numgrad and grad comparison(This should be < 1e-8)
    modelCorrectness = np.linalg.norm(grad - numgrad) / np.linalg.norm(grad +
                                                                       numgrad)
    print("\nModel Correctness: ", modelCorrectness)

    # Train network with new data:
    T = nn.trainer(NN)
    T.maxIter = 1000
    T.batchSize = 1
    T.learningRate = .5
    #    T.train(trainX, trainY, testX, testY)
    T.train_GD(trainX, trainY, testX, testY)

    plt.plot(T.J)
    plt.plot(T.testJ)
    plt.grid(1)
    plt.xlabel("Iterations")
    plt.ylabel("Cost")
    plt.legend(["Training", "Testing"])
    plt.show()

    print("Final Training cost: ", T.J[-1])
    print("Final Test cost: ", T.testJ[-1])
    print("Number of iterations: ", len(T.J))
Exemple #2
0
def trainAndPredict(trainX, trainY, testX, PassengerId, netConf, wrightsBias):
    nn.setGlobalConf(wrightsBias[0], wrightsBias[1])
    NN = nn.NeuralNetwork(netConf, usePrevWt=True)

    # Train network with new data:
    T = nn.trainer(NN)
    T.maxIter = netConf.maxIter
    T.train(trainX, trainY, None, None)
    #    T.train_GD(trainX, trainY, testX, testY)

    print("Final Training cost: ", T.J[-1])
    print("Number of iterations: ", len(T.J))

    testYhat = NN.forward(testX)

    # Consider values above .5 as 1 and values less that .5 as 0
    DBFunc = np.vectorize(lambda x: 0 if x < 0.5 else 1)
    testYAns = DBFunc(testYhat)
    #    print(np.concatenate((PassengerId, testYAns), axis=1))

    testOutput = pd.DataFrame({
        "PassengerId": np.array(PassengerId).ravel(),
        "Survived": np.array(testYAns).ravel()
    })
    #    print(testOutput)
    path = os.path.dirname(__file__)
    resultUrl = os.path.join(path, "titanic", "result.csv")

    testOutput.to_csv(resultUrl, index=False)
    pass
def titanicTest():
    # Data
    trainX, trainY, testX, PassengerId = getDataTest()

    # Network
    inputSize = 7
    layerCount = 2
    networkConf = nnConf.NeuralNetworkConf(inputSize, layerCount)
    networkConf.layerConf[0].neuronCount = 20
    networkConf.layerConf[0].activationFn = "relu"
    networkConf.layerConf[0].weightInitializerMethod = "random"
    networkConf.layerConf[1].neuronCount = 1
    networkConf.layerConf[1].activationFn = "sigmoid"
    networkConf.layerConf[1].weightInitializerMethod = "random"
    networkConf.Lambda = 0.00009
    networkConf.maxIter = 500

    NN = nn.NeuralNetwork(networkConf)

    # Train network with new data:
    T = nn.trainer(NN)
    T.maxIter = networkConf.maxIter
    T.train(trainX, trainY, None, None)
#    T.train_GD(trainX, trainY, testX, testY)

    print("Final Training cost: ", T.J[-1])
    print("Number of iterations: ", len(T.J))

    testYhat = NN.forward(testX)

    # Consider values above .5 as 1 and values less that .5 as 0
    DBFunc = np.vectorize(lambda x: 0 if x < 0.5 else 1)
    testYAns = DBFunc(testYhat)
#    testYAns = np.int(testYAns)
#    print(np.shape(testYAns))
#    print(np.concatenate((PassengerId, testYAns), axis=1))

    testOutput = pd.DataFrame({"PassengerId": np.array(PassengerId).ravel(),
                               "Survived": np.array(testYAns).ravel()})
    print(testOutput)
    path = os.path.dirname(__file__)
    resultUrl = os.path.join(path, "titanic", "result.csv")

    testOutput.to_csv(resultUrl, index=False)
import speech_recognition as sr
from nn import trainer
from nn import neuralNet
import main
import pickle
import thread
from PyQt4 import QtCore, QtGui



Fs = 16000
eps = 0.00000001
lowcut = 0
highcut =0
neuralNetwork = neuralNet("test.nn")
emoTrainer = trainer(neuralNetwork)
maxlist = [-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9]
minlist = [9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]
denom = []


def guiWrite(gui,text):
	gui.ui.featureText.append(text)
	return

def butter_bandpass(lowcut, highcut, Fs, order):
	#nyq = 0.5 * Fs
	#low = lowcut / nyq
	#high = highcut / nyq
	filtb, filta = butter(order, 0.1,btype = 'low')
	return filtb, filta
Exemple #5
0
def validateNN(dataX,
               dataY,
               netConf,
               n_splits,
               showLearning=True,
               wtReuse=False,
               wtAndBias=[]):

    print("\n*********************************************")
    accuracyList = []

    # Validate Network
    NN = nn.NeuralNetwork(netConf, usePrevWt=False)

    # Validate Network
    numgrad = NN.computeNumericalGradient(dataX, dataY)
    grad = NN.computeGradient(dataX, dataY)

    # Quantize numgrad and grad comparison(This should be < 1e-8)
    modelCorrectness = np.linalg.norm(grad - numgrad) / np.linalg.norm(grad +
                                                                       numgrad)
    print("\nModel Correctness: ", modelCorrectness)

    if wtReuse is True:
        nn.setGlobalConf(wtAndBias[0], wtAndBias[1])

    # break data into training and test set
    kf = KFold(n_splits=n_splits, random_state=None)
    for train_index, test_index in kf.split(dataX):
        # Split data into training and test set
        trainX, testX = dataX[train_index], dataX[test_index]
        trainY, testY = dataY[train_index], dataY[test_index]

        NN = nn.NeuralNetwork(netConf, usePrevWt=wtReuse)
        wtReuse = True
        '''# Validate Network
        numgrad = NN.computeNumericalGradient(trainX, trainY)
        grad = NN.computeGradient(trainX, trainY)

        # Quantize numgrad and grad comparison(This should be < 1e-8)
        modelCorrectness = np.linalg.norm(grad-numgrad)/np.linalg.norm(grad+numgrad)
        print("\nModel Correctness: ", modelCorrectness)'''

        # Train network with new data:
        T = nn.trainer(NN)
        T.maxIter = netConf.maxIter
        T.train(trainX, trainY, testX, testY)
        #    T.train_GD(trainX, trainY, testX, testY)

        # Show Learning for training and test dataset
        if showLearning is True:
            plt.plot(T.J)
            plt.plot(T.testJ)
            plt.grid(1)
            plt.xlabel("Iterations")
            plt.ylabel("Cost")
            plt.legend(["Training", "Testing"])
            plt.show()

        print("Final Training cost: ", T.J[-1])
        print("Final Test cost: ", T.testJ[-1])
        print("Number of iterations: ", len(T.J))

        testYhat = NN.forward(testX)

        # Consider values above .5 as 1 and values less that .5 as 0
        DBFunc = np.vectorize(lambda x: 0 if x < 0.5 else 1)
        testYAns = DBFunc(testYhat)
        #    print(np.concatenate((testY, testYAns, testYhat, (testY == testYAns)), axis=1))

        accuracy = np.count_nonzero(testY == testYAns) / testY.shape[0]
        print("Accuracy: ", accuracy * 100, "%")
        accuracyList.append(accuracy)
        print("*********************************************\n")

    return accuracyList
from scipy.signal import filtfilt
import glob
import speech_recognition as sr
from nn import trainer
from nn import neuralNet
import main
import pickle
import thread
from PyQt4 import QtCore, QtGui

Fs = 16000
eps = 0.00000001
lowcut = 0
highcut = 0
neuralNetwork = neuralNet("test.nn")
emoTrainer = trainer(neuralNetwork)
maxlist = [
    -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9
]
minlist = [9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9]
denom = []


def guiWrite(gui, text):
    gui.ui.featureText.append(text)
    return


def butter_bandpass(lowcut, highcut, Fs, order):
    #nyq = 0.5 * Fs
    #low = lowcut / nyq