def overittingTest():
    # Data
    # X = (hours sleeping, hours studying), y = score on test
    # Training Data
    trainX_org = np.array(([3, 5], [5, 1], [10, 2], [6, 1.5]), dtype=float)
    trainY_org = np.array(([75], [83], [93], [70]), dtype=float)

    # Testing Data
    testX = np.array(([4, 5.5], [4.5, 1], [9, 2.5], [6, 2]), dtype=float)
    testY = np.array(([70], [89], [85], [75]), dtype=float)

    # Normalize
    trainX = trainX_org / np.amax(trainX_org, axis=0)
    trainY = trainY_org / 100

    testX = testX / np.amax(testX, axis=0)
    testY = testY / 100

    # Network
    inputSize = 2
    layerCount = 2
    networkConf = nnConf.NeuralNetworkConf(inputSize, layerCount)
    networkConf.weightInitializerMethod = "random"  # Options: random, he, xavier, zeros, ones
    networkConf.layerConf[0].neuronCount = 3
    networkConf.layerConf[0].activationFn = "sigmoid"
    networkConf.layerConf[1].neuronCount = 1
    networkConf.layerConf[1].activationFn = "sigmoid"
    #networkConf.Lambda = 0.0001

    NN = nn.NeuralNetwork(networkConf)
    numgrad = NN.computeNumericalGradient(trainX, trainY)
    print("****************************")
    grad = NN.computeGradient(trainX, trainY)
    print("\nnumGrad: ", numgrad)
    print("\ngrad: ", grad)

    # Quantize numgrad and grad comparison(This should be < 1e-8)
    modelCorrectness = np.linalg.norm(grad - numgrad) / np.linalg.norm(grad +
                                                                       numgrad)
    print("\nModel Correctness: ", modelCorrectness)

    # Train network with new data:
    T = nn.trainer(NN)
    T.maxIter = 1000
    T.batchSize = 1
    T.learningRate = .5
    #    T.train(trainX, trainY, testX, testY)
    T.train_GD(trainX, trainY, testX, testY)

    plt.plot(T.J)
    plt.plot(T.testJ)
    plt.grid(1)
    plt.xlabel("Iterations")
    plt.ylabel("Cost")
    plt.legend(["Training", "Testing"])
    plt.show()

    print("Final Training cost: ", T.J[-1])
    print("Final Test cost: ", T.testJ[-1])
    print("Number of iterations: ", len(T.J))
Exemple #2
0
def restoreConfig(confFile):
    # Open ang load json file
    path = os.path.dirname(__file__)
    confUrl = os.path.join(path, "titanic", confFile)
    with open(confUrl, "r") as fp:
        conf = json.load(fp)

    # Create a nnConf object
    netConf = nnConf.NeuralNetworkConf(conf["inputSize"], conf["layerCount"])

    netConf.inputSize = conf["inputSize"]
    netConf.layerCount = conf["layerCount"]
    netConf.Lambda = conf["Lambda"]
    netConf.maxIter = conf["maxIter"]
    netConf.learningRate = conf["learningRate"]
    netConf.batchSize = conf["batchSize"]
    netConf.accuracy = conf["accuracy"]
    netConf.enableBias = conf["enableBias"]

    # Restore layer-wise configurations
    for i in range(netConf.layerCount):
        netConf.layerConf[i].neuronCount = conf["W" + str(i)]["neuronCount"]
        netConf.layerConf[i].activationFn = conf["W" + str(i)]["activationFn"]
        netConf.layerConf[i].weightInitializerMethod = conf[
            "W" + str(i)]["weightInitializerMethod"]

    # Restore the Weights and biases
    wrightsBias = (convertListToNdarray(conf["wrightsBias"]["weight"]),
                   convertListToNdarray(conf["wrightsBias"]["bias"]))

    #    print("While Loading", wrightsBias)

    return (netConf, wrightsBias)
def runAndMeasure():
    # Data
    X, Y = getDataValidation()

    # Network
    inputSize = 7
    layerCount = 2
    netConf = nnConf.NeuralNetworkConf(inputSize, layerCount)
    netConf.layerConf[0].neuronCount = 20
    netConf.layerConf[0].activationFn = "relu"
    netConf.layerConf[0].weightInitializerMethod = "random"
    netConf.layerConf[1].neuronCount = 1
    netConf.layerConf[1].activationFn = "sigmoid"
    netConf.layerConf[1].weightInitializerMethod = "random"
    netConf.Lambda = .00009      # 0.0001
    netConf.maxIter = 500

    accuracyList = nnUtils.validateNN(X, Y, netConf, 5,
                                      showLearning=False)

    netConf.accuracy = np.mean(accuracyList)
    print(accuracyList)
    print("Mean Accuracy: ", netConf.accuracy)

    return (netConf, nn.getGlobalConf())
def validateTitanic():
    # Data
    X, Y = getDataValidation()

    # Network
    inputSize = 7
    layerCount = 2
    networkConf = nnConf.NeuralNetworkConf(inputSize, layerCount)
    networkConf.layerConf[0].neuronCount = 20
    networkConf.layerConf[0].activationFn = "relu"
    networkConf.layerConf[0].weightInitializerMethod = "random"
    networkConf.layerConf[1].neuronCount = 1
    networkConf.layerConf[1].activationFn = "sigmoid"
    networkConf.layerConf[1].weightInitializerMethod = "random"
    networkConf.Lambda = .00009      # 0.0001
    networkConf.maxIter = 500

    accuracyList = nnUtils.validateNN(X, Y, networkConf, 5, showLearning=False)

    print(accuracyList)
    print("Mean Accuracy: ", np.mean(accuracyList))

    # Store Neural network settings and state
#    print(NN.getGlobalConf())
#    print(networkConf)
    nnUtils.saveConfig(networkConf, nn.getGlobalConf())

    nnUtils.restoreConfig()

    accuracyList = []
    return (accuracyList)
Exemple #5
0
def getInitialNetConf():
    # Network
    inputSize = 7
    layerCount = 2
    netConf = nnConf.NeuralNetworkConf(inputSize, layerCount)
    netConf.layerConf[0].neuronCount = 20
    netConf.layerConf[0].activationFn = "relu"
    netConf.layerConf[0].weightInitializerMethod = "random"
    netConf.layerConf[1].neuronCount = 1
    netConf.layerConf[1].activationFn = "sigmoid"
    netConf.layerConf[1].weightInitializerMethod = "random"
    netConf.Lambda = .00009  # 0.0001
    netConf.maxIter = 500

    return netConf
def titanicTest():
    # Data
    trainX, trainY, testX, PassengerId = getDataTest()

    # Network
    inputSize = 7
    layerCount = 2
    networkConf = nnConf.NeuralNetworkConf(inputSize, layerCount)
    networkConf.layerConf[0].neuronCount = 20
    networkConf.layerConf[0].activationFn = "relu"
    networkConf.layerConf[0].weightInitializerMethod = "random"
    networkConf.layerConf[1].neuronCount = 1
    networkConf.layerConf[1].activationFn = "sigmoid"
    networkConf.layerConf[1].weightInitializerMethod = "random"
    networkConf.Lambda = 0.00009
    networkConf.maxIter = 500

    NN = nn.NeuralNetwork(networkConf)

    # Train network with new data:
    T = nn.trainer(NN)
    T.maxIter = networkConf.maxIter
    T.train(trainX, trainY, None, None)
#    T.train_GD(trainX, trainY, testX, testY)

    print("Final Training cost: ", T.J[-1])
    print("Number of iterations: ", len(T.J))

    testYhat = NN.forward(testX)

    # Consider values above .5 as 1 and values less that .5 as 0
    DBFunc = np.vectorize(lambda x: 0 if x < 0.5 else 1)
    testYAns = DBFunc(testYhat)
#    testYAns = np.int(testYAns)
#    print(np.shape(testYAns))
#    print(np.concatenate((PassengerId, testYAns), axis=1))

    testOutput = pd.DataFrame({"PassengerId": np.array(PassengerId).ravel(),
                               "Survived": np.array(testYAns).ravel()})
    print(testOutput)
    path = os.path.dirname(__file__)
    resultUrl = os.path.join(path, "titanic", "result.csv")

    testOutput.to_csv(resultUrl, index=False)