Пример #1
0
def taskMnist():
	XTrain, YTrain, XVal, YVal, XTest, YTest = readMNIST()
	# Create a NeuralNetwork object 'nn1' as follows with optimal parameters. For parameter definition, refer to nn.py file.
	# nn1 = nn.NeuralNetwork(out_nodes, alpha, batchSize, epochs)	
	# Add layers to neural network corresponding to inputs and outputs of given data
	# Eg. nn1.addLayer(FullyConnectedLayer(x,y))
	###############################################
	# TASK 2.3 - YOUR CODE HERE
	#raise NotImplementedError	
	nn1 = nn.NeuralNetwork(10, 0.09, 32, 20)
	nn1.addLayer(FullyConnectedLayer(784, 10))
	nn1.addLayer(FullyConnectedLayer(10, 10))
	###############################################
	nn1.train(XTrain, YTrain, XVal, YVal, False, True)
	pred, acc  = nn1.validate(XTest, YTest)
	print('Test Accuracy ',acc)
	return nn1, XTest, YTest
def taskCircle():
	XTrain, YTrain, XVal, YVal, XTest, YTest = readCircle()
	# Create a NeuralNetwork object 'nn1' as follows with optimal parameters. For parameter definition, refer to nn.py file.
	# nn1 = nn.NeuralNetwork(inputSize, outputSize, numHiddenLayers, hiddenLayerSizes, alpha, batchSize, epochs)
	
	###############################################
	# TASK 2.3 - YOUR CODE HERE
	nn1 = nn.NeuralNetwork(len(XTrain[0]), len(YTrain[0]), 1, [6], 0.1, 8, 10)
	
	###############################################

	nn1.train(XTrain, YTrain, XVal, YVal, False, True)
	pred, acc = nn1.validate(XTest, YTest)
	print('Test Accuracy ',acc)
	# Run script visualizeTruth.py to visualize ground truth. Run command 'python3 visualizeTruth.py 3'
	# Use drawCircle(XTest, pred) to visualize YOUR predictions.
	drawCircle(XTest, pred)
def titanicTest():
    # Data
    trainX, trainY, testX, PassengerId = getDataTest()

    # Network
    inputSize = 7
    layerCount = 2
    networkConf = nnConf.NeuralNetworkConf(inputSize, layerCount)
    networkConf.layerConf[0].neuronCount = 20
    networkConf.layerConf[0].activationFn = "relu"
    networkConf.layerConf[0].weightInitializerMethod = "random"
    networkConf.layerConf[1].neuronCount = 1
    networkConf.layerConf[1].activationFn = "sigmoid"
    networkConf.layerConf[1].weightInitializerMethod = "random"
    networkConf.Lambda = 0.00009
    networkConf.maxIter = 500

    NN = nn.NeuralNetwork(networkConf)

    # Train network with new data:
    T = nn.trainer(NN)
    T.maxIter = networkConf.maxIter
    T.train(trainX, trainY, None, None)
#    T.train_GD(trainX, trainY, testX, testY)

    print("Final Training cost: ", T.J[-1])
    print("Number of iterations: ", len(T.J))

    testYhat = NN.forward(testX)

    # Consider values above .5 as 1 and values less that .5 as 0
    DBFunc = np.vectorize(lambda x: 0 if x < 0.5 else 1)
    testYAns = DBFunc(testYhat)
#    testYAns = np.int(testYAns)
#    print(np.shape(testYAns))
#    print(np.concatenate((PassengerId, testYAns), axis=1))

    testOutput = pd.DataFrame({"PassengerId": np.array(PassengerId).ravel(),
                               "Survived": np.array(testYAns).ravel()})
    print(testOutput)
    path = os.path.dirname(__file__)
    resultUrl = os.path.join(path, "titanic", "result.csv")

    testOutput.to_csv(resultUrl, index=False)
def check_all_layers():
    XTrain = np.random.randn(10, 3, 32, 32)
    YTrain = np.random.randn(10, 10)

    nn1 = nn.NeuralNetwork(10, 1)

    nn1.addLayer(ConvolutionLayer([3, 32, 32], [3, 3], 4, 1, 'relu'))
    nn1.addLayer(AvgPoolingLayer([4, 30, 30], [2, 2], 2))
    nn1.addLayer(ConvolutionLayer([4, 15, 15], [4, 4], 4, 1, 'relu'))
    nn1.addLayer(MaxPoolingLayer([4, 12, 12], [2, 2], 2))
    nn1.addLayer(FlattenLayer())
    nn1.addLayer(FullyConnectedLayer(144, 10, 'softmax'))

    delta = 1e-7
    size = nn1.layers[0].weights.shape
    num_grad = np.zeros(size)

    for a in range(size[0]):
        for b in range(size[1]):
            for i in range(size[2]):
                for j in range(size[3]):
                    activations = nn1.feedforward(XTrain)
                    loss1 = nn1.computeLoss(YTrain, activations)
                    nn1.layers[0].weights[a, b, i, j] += delta
                    activations = nn1.feedforward(XTrain)
                    loss2 = nn1.computeLoss(YTrain, activations)
                    num_grad_ij = (loss2 - loss1) / delta
                    num_grad[a, b, i, j] = num_grad_ij
                    nn1.layers[0].weights[a, b, i, j] -= delta

    saved = nn1.layers[0].weights[:, :, :, :].copy()
    activations = nn1.feedforward(XTrain)
    nn1.backpropagate(activations, YTrain)
    new = nn1.layers[0].weights[:, :, :, :]
    ana_grad = saved - new

    print(np.linalg.norm(num_grad - ana_grad))
    try:
        assert np.linalg.norm(num_grad - ana_grad) < 1e-4
        print("Gradient Test Passed for all layers!")
        return 5
    except:
        print("Gradient Test Failed for all Layers :(")
        return 0
def taskSemiCircle(draw):
    XTrain, YTrain, XVal, YVal, XTest, YTest = readSemiCircle()
    # Create a NeuralNetwork object 'nn1' as follows with optimal parameters. For parameter definition, refer to nn.py file.
    # nn1 = nn.NeuralNetwork(out_nodes, alpha, batchSize, epochs)
    # Add layers to neural network corresponding to inputs and outputs of given data
    # Eg. nn1.addLayer(FullyConnectedLayer(x,y))
    ###############################################
    nn1 = nn.NeuralNetwork(2, .1, 5, 15)
    nn1.addLayer(FullyConnectedLayer(2, 2))
    nn1.addLayer(FullyConnectedLayer(2, 2))
    ###############################################
    nn1.train(XTrain, YTrain, XVal, YVal, False, True)
    pred, acc = nn1.validate(XTest, YTest)
    print('Test Accuracy ', acc)
    # Run script visualizeTruth.py to visualize ground truth. Run command 'python3 visualizeTruth.py 4'
    # Use drawSemiCircle(XTest, pred) to vnisualize YOUR predictions.
    if draw:
        drawSemiCircle(XTest, pred)
    return nn1, XTest, YTest
def taskMnist():
    XTrain, YTrain, XVal, YVal, XTest, YTest = readMNIST()
    # Create a NeuralNetwork object 'nn1' as follows with optimal parameters. For parameter definition, refer to nn.py file.
    # nn1 = nn.NeuralNetwork(out_nodes, alpha, batchSize, epochs)
    # Add layers to neural network corresponding to inputs and outputs of given data
    # Eg. nn1.addLayer(FullyConnectedLayer(x,y))

    out_nodes = 10
    alpha = 0.1
    batchSize = 20
    epochs = 10
    nn1 = nn.NeuralNetwork(out_nodes, alpha, batchSize, epochs)
    nn1.addLayer(FullyConnectedLayer(XTrain.shape[1], 50))
    nn1.addLayer(FullyConnectedLayer(50, out_nodes))
    ###############################################
    nn1.train(XTrain, YTrain, XVal, YVal, False, True)
    pred, acc = nn1.validate(XTest, YTest)
    print('Test Accuracy ', acc)
    return nn1, XTest, YTest
Пример #7
0
def taskCifar10():
    XTrain, YTrain, XVal, YVal, XTest, YTest = readCIFAR10()

    XTrain = XTrain[0:5000, :, :, :]
    XVal = XVal[0:1000, :, :, :]
    XTest = XTest[0:1000, :, :, :]
    YVal = YVal[0:1000, :]
    YTest = YTest[0:1000, :]
    YTrain = YTrain[0:5000, :]

    modelName = 'model.npy'
    # # Create a NeuralNetwork object 'nn1' as follows with optimal parameters. For parameter definition, refer to nn.py file.
    # # nn1 = nn.NeuralNetwork(out_nodes, alpha, batchSize, epochs)
    # # Add layers to neural network corresponding to inputs and outputs of given data
    # # Eg. nn1.addLayer(FullyConnectedLayer(x,y))
    # ###############################################
    # # TASK 2.4 - YOUR CODE HERE
    nn2 = nn.NeuralNetwork(10, 0.01, 100, 30)
    # nn2.addLayer(ConvolutionLayer([3,32,32], [10,10], 5, 2))
    # nn2.addLayer(ConvolutionLayer([5,12,12], [4,4], 5, 2))
    # nn2.addLayer(FlattenLayer())
    # nn2.addLayer(FullyConnectedLayer(125,10))
    # nn2 = nn.NeuralNetwork(10, 0.01, 100, 30)
    nn2.addLayer(ConvolutionLayer([3, 32, 32], [10, 10], 10, 2))
    nn2.addLayer(AvgPoolingLayer([10, 12, 12], [4, 4], 4))
    nn2.addLayer(FlattenLayer())
    nn2.addLayer(FullyConnectedLayer(90, 10))
    # nn2.addLayer(FullyConnectedLayer(100,10))

    ###################################################
    return nn2, XTest, YTest, modelName  # UNCOMMENT THIS LINE WHILE SUBMISSION

    nn2.train(XTrain,
              YTrain,
              XVal,
              YVal,
              True,
              True,
              loadModel=True,
              saveModel=True,
              modelName=modelName)
    pred, acc = nn2.validate(XTest, YTest)
    print('Test Accuracy ', acc)
Пример #8
0
    def select_best_model(self,
                          X_design,
                          y_design,
                          X_va=None,
                          y_va=None,
                          fname=None):
        """
        This function retrains the model with the best hyperparams'
        configuration

        Parameters
        ----------
        X_design: numpy.ndarray
            the design matrix

        y_design: numpy.ndarray
            the target column vector

        X_va: numpy.ndarray
            the validation design matrix

        y_va: numpy.ndarray
            the validation target column vector

        fname: str
            the path to the file which contains the results for the best
            hyperparameters' search phase

        Returns
        -------
        The model trained with the best hyperparameters' configuration.
        """
        if fname is None:
            fname = self.fname

        best = self.select_best_hyperparams(top=1, fname=fname)
        best_hyperparams = best[0]['hyperparams']

        neural_net = nn.NeuralNetwork(X_design, y_design, **best_hyperparams)
        neural_net.train(X_design, y_design, X_va, y_va)

        return neural_net
Пример #9
0
def taskSquare(draw):
	XTrain, YTrain, XVal, YVal, XTest, YTest = readSquare()
	# Create a NeuralNetwork object 'nn1' as follows with optimal parameters. For parameter definition, refer to nn.py file.
	# nn1 = nn.NeuralNetwork(out_nodes, alpha, batchSize, epochs)	
	# Add layers to neural network corresponding to inputs and outputs of given data
	# Eg. nn1.addLayer(FullyConnectedLayer(x,y))
	###############################################
	# TASK 2.1 - YOUR CODE HERE
	# raise NotImplementedError
	nn1 = nn.NeuralNetwork(2, 0.1, 20, 50)
	nn1.addLayer(FullyConnectedLayer(2 , 4)) # seed is 61 for 4 nodes
	nn1.addLayer(FullyConnectedLayer(4 , 2))
	###############################################
	nn1.train(XTrain, YTrain, XVal, YVal, False, True)
	pred, acc = nn1.validate(XTest, YTest)
	print('Test Accuracy ',acc)
	# Run script visualizeTruth.py to visualize ground truth. Run command 'python3 visualizeTruth.py 2'
	# Use drawSquare(XTest, pred) to visualize YOUR predictions.
	if draw:
		drawSquare(XTest, pred)
	return nn1, XTest, YTest
Пример #10
0
def taskMnist():
    XTrain, YTrain, XVal, YVal, XTest, _ = loadMnist()
    # Create a NeuralNetwork object 'nn1' as follows with optimal parameters. For parameter definition, refer to nn.py file.
    # nn1 = nn.NeuralNetwork(lr, batchSize, epochs)
    # Add layers to neural network corresponding to inputs and outputs of given data
    # Eg. nn1.addLayer(FullyConnectedLayer(x,y))
    ###############################################
    # TASK 3b (Marks 13) - YOUR CODE HERE
    nn1 = nn.NeuralNetwork(0.01, 1, 100)
    nn1.addLayer(nn.FullyConnectedLayer(784,41, 'relu'))
    nn1.addLayer(nn.FullyConnectedLayer(41,10, 'softmax'))
    # raise NotImplementedError
    ###############################################
    nn1.train(XTrain, YTrain, XVal, YVal)
    pred, _ = nn1.validate(XTest, None)
    with open("predictionsMnist.csv", 'w') as file:
        writer = csv.writer(file)
        writer.writerow(["id", "prediction"])
        for i, p in enumerate(pred):
            writer.writerow([i, p])
    return nn1
Пример #11
0
def taskCifar10():
    XTrain, YTrain, XVal, YVal, XTest, YTest = readCIFAR10()
    idx = np.random.choice(40000, 1500)
    XTrain = XTrain[idx, :, :, :]
    XVal = XVal[0:1000, :, :, :]
    XTest = XTest[0:1000, :, :, :]
    YVal = YVal[0:1000, :]
    YTest = YTest[0:1000, :]
    YTrain = YTrain[idx, :]
    np.random.seed(42)

    modelName = 'model.npy'
    # # Create a NeuralNetwork object 'nn1' as follows with optimal parameters. For parameter definition, refer to nn.py file.
    # # nn1 = nn.NeuralNetwork(out_nodes, alpha, batchSize, epochs)
    # # Add layers to neural network corresponding to inputs and outputs of given data
    # # Eg. nn1.addLayer(FullyConnectedLayer(x,y))
    # ###############################################
    # # TASK 2.4 - YOUR CODE HERE
    nn1 = nn.NeuralNetwork(10, 0.01, 8, 24)
    nn1.addLayer(ConvolutionLayer([3, 32, 32], [4, 4], 8, 2, 'relu'))
    nn1.addLayer(AvgPoolingLayer([8, 15, 15], [3, 3], 2))
    nn1.addLayer(FlattenLayer())
    nn1.addLayer(FullyConnectedLayer(7 * 7 * 8, 20, 'relu'))
    nn1.addLayer(FullyConnectedLayer(20, 10, 'softmax'))

    ###################################################
    return nn1, XTest, YTest, modelName  # UNCOMMENT THIS LINE WHILE SUBMISSION

    print("HERE")
    nn1.train(XTrain,
              YTrain,
              XVal,
              YVal,
              True,
              True,
              loadModel=True,
              saveModel=True,
              modelName=modelName)
    pred, acc = nn1.validate(XTest, YTest)
    print('Test Accuracy ', acc)
Пример #12
0
def taskMnist():
	XTrain, YTrain, XVal, YVal, XTest, _ = loadMnist()
	# Create a NeuralNetwork object 'nn1' as follows with optimal parameters. For parameter definition, refer to nn.py file.
	nn1 = nn.NeuralNetwork(0.01, 20 , 60)
	# Add layers to neural network corresponding to inputs and outputs of given data
	nn1.addLayer(nn.FullyConnectedLayer(XTrain.shape[1], 140, activation='relu'))
	#nn1.addLayer(nn.FullyConnectedLayer(128,64, activation='relu'))
	nn1.addLayer(nn.FullyConnectedLayer(140, YTrain.shape[1], activation='softmax'))
	###############################################
	# TASK 3b (Marks 13) - YOUR CODE HERE
	#raise NotImplementedError
	###############################################
	nn1.train(XTrain, YTrain, XVal, YVal)
	pred, _ = nn1.validate(XTest, None)
	_, acc = nn1.validate(XVal, YVal)
	with open("predictionsMnist.csv", 'w') as file:
		writer = csv.writer(file)
		writer.writerow(["id", "prediction"])
		for i, p in enumerate(pred):
			writer.writerow([i, p])
	print('Test Accuracy', acc)
	return nn1
Пример #13
0
def check_fully_connected():
    # XTrain = np.random.randn(20, 100)
    # YTrain = np.random.randn(20, 10)

    # nn1 = nn.NeuralNetwork(10, 1)
    # nn1.addLayer(FullyConnectedLayer(100, 50, 'relu'))
    # nn1.addLayer(FullyConnectedLayer(50, 30, 'relu'))
    # nn1.addLayer(FullyConnectedLayer(30, 20, 'relu'))
    # nn1.addLayer(FullyConnectedLayer(20, 10, 'softmax'))
    XTrain = np.random.randn(10, 100)
    YTrain = np.random.randn(10, 10)

    nn1 = nn.NeuralNetwork(10, 1)
    nn1.addLayer(FullyConnectedLayer(100, 10, 'softmax'))

    delta = 1e-7
    size = nn1.layers[0].weights.shape
    num_grad = np.zeros(size)

    for i in range(size[0]):
        for j in range(size[1]):
            activations = nn1.feedforward(XTrain)
            loss1 = nn1.computeLoss(YTrain, activations)
            nn1.layers[0].weights[i, j] += delta
            activations = nn1.feedforward(XTrain)
            loss2 = nn1.computeLoss(YTrain, activations)
            num_grad_ij = (loss2 - loss1) / delta
            num_grad[i, j] = num_grad_ij
            nn1.layers[0].weights[i, j] -= delta

    saved = nn1.layers[0].weights[:, :].copy()
    activations = nn1.feedforward(XTrain)
    nn1.backpropagate(activations, YTrain)
    new = nn1.layers[0].weights[:, :]
    ana_grad = saved - new

    print(np.linalg.norm(num_grad - ana_grad))
    assert np.linalg.norm(num_grad - ana_grad) < 1e-5
    print("Gradient Test Passed for Fully Connected Layer!")
Пример #14
0
def taskMnist():
    print("Training: MNIST...")
    XTrain, YTrain, XVal, YVal, XTest, _ = loadMnist()
    # Create a NeuralNetwork object 'nn1' as follows with optimal parameters. For parameter definition, refer to nn.py file.
    nn1 = nn.NeuralNetwork(lr=0.005, batchSize=8, epochs=50)
    # Add layers to neural network corresponding to inputs and outputs of given data
    nn1.addLayer(nn.FullyConnectedLayer(28 * 28, 256, 'relu'))
    #nn1.addLayer(nn.FullyConnectedLayer(128,256,'relu'))
    nn1.addLayer(nn.FullyConnectedLayer(256, 10, 'softmax'))
    ###############################################
    # TASK 3b (Marks 13) - YOUR CODE HERE
    ###############################################
    nn1.train(XTrain, YTrain, XVal, YVal)
    pred, acc = nn1.validate(XVal, YVal)
    print(acc)
    pred, _ = nn1.validate(XTest, None)
    with open("predictionsMnist.csv", 'w') as file:
        writer = csv.writer(file)
        writer.writerow(["id", "prediction"])
        for i, p in enumerate(pred):
            writer.writerow([i, p])
    return nn1
Пример #15
0
    def __init__(self,
                 shape=[27, 64, 64, 27],
                 hidden_activation=nn.relu,
                 output_activation=nn.softmax,
                 filename=None):
        """
        Initialise a NNPlayer with a Neural Network brain

            Parameters:
                shape (list): The shape of the internal neural network
                hidden_activation (function): The function to be used for the hidden layers
                output_activation (function): The function to be used for the output layer
            Returns:
                None
        """
        self.brain = nn.NeuralNetwork(shape, hidden_activation,
                                      output_activation)
        if filename is not None:
            self.brain.load(filename)
        self.fitness = 0
        self.games_won = 0
        self.scores = []
Пример #16
0
def main():

    cli()

    mnist = tf.keras.datasets.mnist

    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    X_train, X_test = X_train / 255.0, X_test / 255.0

    z = nn.NeuralNetwork()

    z.fit(X_train, y_train)

    C, RC, N, RN = z.get_layer_output(X_test)

    out = z.predict(X_test)

    prediction_0 = np.argmax(out[0])

    weights_1 = z.get_weights(1)
    weights_3 = z.get_weights(3)
Пример #17
0
def taskCifar10():
    XTrain, YTrain, XVal, YVal, XTest, YTest = readCIFAR10()

    XTrain = XTrain[0:5000, :, :, :]
    XVal = XVal[0:1000, :, :, :]
    XTest = XTest[0:1000, :, :, :]
    YVal = YVal[0:1000, :]
    YTest = YTest[0:1000, :]
    YTrain = YTrain[0:5000, :]

    modelName = 'model.npy'
    # Create a NeuralNetwork object 'nn1' as follows with optimal parameters. For parameter definition, refer to nn.py file.
    # nn1 = nn.NeuralNetwork(out_nodes, alpha, batchSize, epochs)
    # Add layers to neural network corresponding to inputs and outputs of given data
    # Eg. nn1.addLayer(FullyConnectedLayer(x,y))
    ###############################################
    # TASK 2.4 - YOUR CODE HERE

    nn1 = nn.NeuralNetwork(10, 0.1, 20, 10)
    nn1.addLayer(ConvolutionLayer([3, 32, 32], [8, 8], 6, 3))
    nn1.addLayer(FlattenLayer())
    nn1.addLayer(FullyConnectedLayer(486, 70))
    nn1.addLayer(FullyConnectedLayer(70, 10))

    # raise NotImplementedError
    ###################################################
    return nn1, XTest, YTest, modelName  # UNCOMMENT THIS LINE WHILE SUBMISSION

    nn1.train(XTrain,
              YTrain,
              XVal,
              YVal,
              True,
              True,
              loadModel=False,
              saveModel=True,
              modelName=modelName)
    pred, acc = nn1.validate(XTest, YTest)
    print('Test Accuracy ', acc)
Пример #18
0
def taskXor():
    print("Training XOR...")
    XTrain, YTrain, XVal, YVal, XTest, YTest = loadXor()
    # Create a NeuralNetwork object 'nn1' as follows with optimal parameters. For parameter definition, refer to nn.py file.
    nn1 = nn.NeuralNetwork(lr=0.005, batchSize=8, epochs=50)
    # Add layers to neural network corresponding to inputs and outputs of given data
    nn1.addLayer(nn.FullyConnectedLayer(2, 12, 'relu'))
    #nn1.addLayer(nn.FullyConnectedLayer(12,64,'relu'))
    nn1.addLayer(nn.FullyConnectedLayer(12, 2, 'softmax'))
    ###############################################
    # TASK 3a (Marks 7) - YOUR CODE HERE

    ###############################################
    nn1.train(XTrain, YTrain, XVal, YVal)
    pred, acc = nn1.validate(XTest, YTest)
    with open("predictionsXor.csv", 'w') as file:
        writer = csv.writer(file)
        writer.writerow(["id", "prediction"])
        for i, p in enumerate(pred):
            writer.writerow([i, p])
    print('Test Accuracy', acc)
    return nn1
def check_fully_connected():
    print("-------------------------")
    print("Grading Forward pass and backward pass")
    XTrain = np.random.randn(10, 100)
    YTrain = np.random.randn(10, 10)

    nn1 = nn.NeuralNetwork(10, 1)
    nn1.addLayer(FullyConnectedLayer(100, 10, 'softmax'))

    delta = 1e-7
    size = nn1.layers[0].weights.shape
    num_grad = np.zeros(size)

    for i in range(size[0]):
        for j in range(size[1]):
            activations = nn1.feedforward(XTrain)
            loss1 = nn1.computeLoss(YTrain, activations)
            nn1.layers[0].weights[i, j] += delta
            activations = nn1.feedforward(XTrain)
            loss2 = nn1.computeLoss(YTrain, activations)
            num_grad_ij = (loss2 - loss1) / delta
            num_grad[i, j] = num_grad_ij
            nn1.layers[0].weights[i, j] -= delta

    saved = nn1.layers[0].weights[:, :].copy()
    activations = nn1.feedforward(XTrain)
    nn1.backpropagate(activations, YTrain)
    new = nn1.layers[0].weights[:, :]
    ana_grad = saved - new

    # print(np.linalg.norm(num_grad - ana_grad))
    try:
        assert np.linalg.norm(num_grad - ana_grad) < 1e-4
        print("Gradient Test Passed for Fully Connected Layer! Marks: 5")
        return 5
    except:
        print("Gradient Test Failed for Fully Connected Layer: (")
        return 0
Пример #20
0
def fs():
    input_length = 100
    hidden_cnt = 50
    data = get_test_data(input_length)

    rnn_nn = nn.NeuralNetwork(nn=rnn.RNN(input_length, hidden_cnt,
                                         data.x.shape[2], data.y.shape[1]),
                              validation_split=0.2,
                              batch_size=256,
                              nb_epoch=10,
                              show_accuracy=True)

    features, results = rnn_nn.feature_selection(data)
    print("Selected features: {0}".format(features))
    print(results)

    feature_selection = {
        "features": features,
        "results": results,
        "count": data.x.shape[2]
    }
    output = open('../../results/RNN_features', 'wb')
    pickle.dump(feature_selection, output)
    output.close()
Пример #21
0
def taskXor():
    XTrain, YTrain, XVal, YVal, XTest, YTest = loadXor()
    # Create a NeuralNetwork object 'nn1' as follows with optimal parameters. For parameter definition, refer to nn.py file.
    # nn1 = nn.NeuralNetwork(lr, batchSize, epochs)
    # Add layers to neural network corresponding to inputs and outputs of given data
    # Eg. nn1.addLayer(FullyConnectedLayer(x,y))
    ###############################################
    # TASK 3a (Marks 7) - YOUR CODE HERE
    nn1 = nn.NeuralNetwork(lr=0.1, batchSize=40, epochs=50)
    nn1.addLayer(
        nn.FullyConnectedLayer(in_nodes=2, out_nodes=3, activation='relu'))
    nn1.addLayer(
        nn.FullyConnectedLayer(in_nodes=3, out_nodes=2, activation='softmax'))
    #raise NotImplementedError
    ###############################################
    nn1.train(XTrain, YTrain, XVal, YVal)
    pred, acc = nn1.validate(XTest, YTest)
    with open("predictionsXor.csv", 'w') as file:
        writer = csv.writer(file)
        writer.writerow(["id", "prediction"])
        for i, p in enumerate(pred):
            writer.writerow([i, p])
    print('Test Accuracy', acc)
    return nn1
Пример #22
0
    def __init__(self, screen, brain=None):
        self.screen = screen
        self.height = screen.get_height()
        self.width = screen.get_width()

        self.y = int(self.height / 2)
        self.x = 25

        self.gravity = 0.2
        self.velocity = 0
        self.lift = -5

        # AI stuff
        self.score = 0
        # probability to be picked in the next generation
        self.fitness = 0

        if brain is not None:
            self.brain = brain.copy()
        else:
            # if Bird is initialised without neural network, make a new NN
            # inputs - 4 (y of bird; x of pipe's left edge, y of bottom pipe, y of top pipe)
            # hidden layer - 4 (random), output - 1 (number between 0 and 1; more than 0.5 => jump)
            self.brain = nn.NeuralNetwork(4, 4, 1)
Пример #23
0
def init():
    for i in range(v.total_models):
        out_nodes = 1
        alpha = 0.01
        batchSize = 1
        epochs = 1
        nn1 = nn.NeuralNetwork(out_nodes, alpha, batchSize, epochs)
        nn1.addLayer(FullyConnectedLayer(3, 7, 'sigmoid'))
        nn1.addLayer(FullyConnectedLayer(7, 1, 'sigmoid'))
        v.current_pool.append(nn1)
        v.fitness.append(-100)

    if v.load_saved_pool:
        for i in range(v.total_models):
            file_name = "Trained_model/" + str(i) + ".pkl"
            with open(file_name, "rb") as f:
                to_load = pickle.load(f)
                v.current_pool[i].setweight(to_load)
    if v.load_best:
        for i in range(v.total_models):
            file_name = "Trained_model_best/" + str(i) + ".pkl"
            with open(file_name, "rb") as f:
                to_load = pickle.load(f)
                v.current_pool[i].setweight(to_load)
Пример #24
0
    def __init__(self, dataset_name):
        self.save_model = False
        if dataset_name == 'MNIST':
            self.XTrain, self.YTrain, self.XVal, self.YVal, self.XTest, self.YTest = readMNIST(
            )
            # Add your network topology along with other hyperparameters here
            self.batch_size = 64
            self.epochs = 20
            # self.lr = 0.001
            self.nn = nn.NeuralNetwork(out_nodes=10, lr=0.001)
            self.nn.addLayer(FullyConnectedLayer(784, 10,
                                                 activation='softmax'))

        if dataset_name == 'CIFAR10':
            self.XTrain, self.YTrain, self.XVal, self.YVal, self.XTest, self.YTest = readCIFAR10(
            )
            self.XTrain = self.XTrain[0:5000, :, :, :]
            self.XVal = self.XVal[0:1000, :, :, :]
            self.XTest = self.XTest[0:1000, :, :, :]
            self.YVal = self.YVal[0:1000, :]
            self.YTest = self.YTest[0:1000, :]
            self.YTrain = self.YTrain[0:5000, :]

            self.save_model = True
            self.model_name = "model.npy"

            # Add your network topology along with other hyperparameters here
            self.batch_size = 16
            self.epochs = 10
            self.lr = 1e-2
            # self.nn =
            # nn.addLayer()
            self.nn = nn.NeuralNetwork(10, 1e-2)
            self.nn.addLayer(
                ConvolutionLayer([3, 32, 32], [4, 4], 6, 4, 'relu'))
            self.nn.addLayer(AvgPoolingLayer([6, 8, 8], [2, 2], 2))
            self.nn.addLayer(FlattenLayer())
            self.nn.addLayer(FullyConnectedLayer(96, 10, 'softmax'))

        if dataset_name == 'XOR':
            self.XTrain, self.YTrain, self.XVal, self.YVal, self.XTest, self.YTest = readXOR(
            )

            # self.save_model = True
            self.model_name = "model.npy"

            # Add your network topology along with other hyperparameters here
            self.batch_size = 10
            self.epochs = 15
            self.lr = 1e-3
            self.nn = nn.NeuralNetwork(out_nodes=2, lr=self.lr)
            # nn.addLayer()
            self.nn.addLayer(FullyConnectedLayer(2, 4, 'relu'))
            self.nn.addLayer(FullyConnectedLayer(4, 2, 'softmax'))

        if dataset_name == 'square':
            self.XTrain, self.YTrain, self.XVal, self.YVal, self.XTest, self.YTest = readXOR(
            )
            # Add your network topology along with other hyperparameters here
            self.batch_size = 10
            self.epochs = 20
            self.lr = 1e-3
            self.nn = nn.NeuralNetwork(out_nodes=2, lr=self.lr)
            # nn.addLayer()
            self.nn.addLayer(FullyConnectedLayer(2, 3, 'relu'))
            self.nn.addLayer(FullyConnectedLayer(3, 2, 'softmax'))

        if dataset_name == 'circle':
            self.XTrain, self.YTrain, self.XVal, self.YVal, self.XTest, self.YTest = readCircle(
            )
            # Add your network topology along with other hyperparameters here
            self.batch_size = 10
            self.epochs = 20
            self.lr = 1e-3
            self.nn = nn.NeuralNetwork(out_nodes=2, lr=self.lr)
            # nn.addLayer()
            self.nn.addLayer(FullyConnectedLayer(2, 2, 'relu'))
            self.nn.addLayer(FullyConnectedLayer(2, 2, 'softmax'))
Пример #25
0
    },
    {
        "inputs": [0, 1],
        "targets": [1]
    },
    {
        "inputs": [1, 0],
        "targets": [1]
    },
    {
        "inputs": [1, 1],
        "targets": [0]
    },
]

neural_network = nn.NeuralNetwork(2, 4, 1)

resolution = 10
cols = screen.get_width() // resolution
rows = screen.get_height() // resolution

running = True
while running:
    for event in pygame.event.get():
        if event.type == pygame.QUIT:
            running = False

    for i in range(100):
        data = random.choice(training_data)
        neural_network.train(data['inputs'], data['targets'])
    color = 10
Пример #26
0
import random
import nn
import numpy as np
import csv
iteration = 10000
letters = "ABCDEFGHHIJKLMNOPQRSTUVWXYZ"
a = nn.NeuralNetwork(784, 200, len(letters))


def inverter(data):
    data = np.asfarray(data)
    for i in range(len(data)):
        data[i] = 255 - data[i]
    return data


def listmaker(temp):
    target = []
    for i in range(len(letters)):
        if i == temp:
            target.append(0.99)
        else:
            target.append(0.01)
    return target


with open("data2.csv", "r") as f:
    datareader = csv.reader(f)
    main_list = list(datareader)

print "starting"
Пример #27
0
np.random.shuffle(dataset)

split = int(X.shape[0] * 0.9)

train = dataset[:split, :]
validation = dataset[split:, :]

X_train, y_train = np.hsplit(train, [X.shape[1]])
X_va, y_va = np.hsplit(validation, [X.shape[1]])

nn = NN.NeuralNetwork(X_train,
                      y_train,
                      eta=0.4,
                      alpha=0.1,
                      hidden_sizes=[3],
                      reg_method='l2',
                      reg_lambda=0.0,
                      epochs=1000,
                      batch_size=100,
                      activation='sigmoid',
                      task='classifier',
                      w_par=6)
nn.train(X_train, y_train, X_va, y_va)
u.plot_learning_curve(nn)

y_pred = nn.predict(X)

np.abs((np.round(y_pred, 0) - y)).sum()

nn.error_per_epochs_va
plt.close()
Пример #28
0
    lambda_reg = 1

    FEATURES = 6
    INTERVAL = 5
    EX_PER_STOCK = 60

    requested_stocks = 'all'  #['STL', 'REC', 'SDRL', 'SAS-NOK', 'ATLA-NOK', 'BIONOR', 'HAVI', 'SBX', 'FUNCOM', 'TEL', 'ARCHER', 'NAVA', 'EMGS', 'MHG', 'FRO', 'PRS', 'ODFB', 'AKVA', 'SONG', 'NAS']

    OSEBX = StockMarket('OSEBX', requested_stocks)
    my_portfolio = stock_algorithms.Portfolio()

    TS = nn.TrainingSet(OSEBX, FEATURES, EX_PER_STOCK, INTERVAL)

    X, y, X_cv, y_cv, X_test, y_test, X_p, stocks = TS.get_data_set()

    NN = nn.NeuralNetwork(FEATURES * INTERVAL, 12, 12)

    NN.input_data(X, y, X_cv, y_cv, X_test, y_test)
    NN.minimize_cost(X, y, lambda_reg)

    NN.test_nn(X_test, y_test, lambda_reg, theta=None, type="test")
    run_no = 0

    while run_no < 24:

        X_p, stocks, y_p, prediction = NN.predict(X_p, stocks)

        promising_stocks = [stock for stock, prob in prediction]

        costs_train, costs_cv, costs_test, accuracies_train, f1s_train, accuracies_cv, f1s_cv, accuracies_test, f1s_test = NN.get_mod_ver_data(
        )
Пример #29
0
             'alpha': params['hyperparameters']['alpha']}
        params['hyperparameters'].pop('momentum_type')
        params['hyperparameters'].pop('alpha')
    else:
        if plot_time and epochs is None:
            params['hyperparameters']['max_epochs'] = None

    params['hyperparameters'].pop('activation')
    params['hyperparameters'].pop('topology')

    for trial in tqdm(range(ntrials),
                      desc='TESTING DATASET {}'.format(ds + 1)):

        neural_net = nn.NeuralNetwork(X_designs[ds],
                                      y_designs[ds],
                                      hidden_sizes=hidden_sizes,
                                      activation='sigmoid',
                                      task='classifier')

        # ipdb.set_trace()

        neural_net.train(X_designs[ds],
                         y_designs[ds],
                         opt,
                         epochs=epochs,
                         X_va=X_tests[ds],
                         y_va=y_tests[ds],
                         **params['hyperparameters'])

        mse_tr.append(neural_net.optimizer.error_per_epochs[-1])
        mse_ts.append(neural_net.optimizer.error_per_epochs_va[-1])
Пример #30
0
    def search(self,
               X_design,
               y_design,
               nfolds=3,
               ntrials=7,
               save_results=True,
               fname=None,
               **kwargs):
        """
        This function searches for the best hyperparamenters' configugation
        through a search space of hyperparameters.

        Parameters
        ----------
        X_design: numpy.ndarray
            the design matrix

        y_design: numpy.ndarray
            the column target vector

        nfolds: int
            the number of folds to be applied in the algorithm
            (Default value = 3)

        ntrials: int
            the number of times the search for the hyperparameters has to be
            repeated in order to have different network's initializations
            (Default value = 7)

        save_results: bool
            whether or not to save the results as a JSON file
            (Default value = True)

        fname: str
            where to save the results obtained at the end of the searching
            phase
            (Default value = '../data/model_selection_results.json.gz')

        Returns
        -------
        """
        self.n_iter = self.repetitions * len(self.grid)

        if fname is None:
            fname = self.fname

        if save_results:
            with gzip.open(fname, 'w') as f:
                f.write('{"out": [')

        i = 0

        for rep in tqdm(range(self.repetitions),
                        desc="CROSS VALIDATION'S REPETITION PROGRESS"):
            dataset = np.hstack((X_design, y_design))
            np.random.shuffle(dataset)
            X_design, y_design = np.hsplit(dataset, [X_design.shape[1]])

            for hyperparams in tqdm(
                    self.grid,
                    desc='GRID SEARCH {}'.format(
                        kwargs['par_name'] if 'par_name' in kwargs else '')):
                # instanciate neural network
                i += 1
                for trial in tqdm(range(ntrials), desc="TRIALS"):
                    # repeated inizialization of the net
                    neural_net = nn.NeuralNetwork(X_design, y_design,
                                                  **hyperparams)
                    cross_val = KFoldCrossValidation(X_design,
                                                     y_design,
                                                     neural_net,
                                                     nfolds=nfolds,
                                                     shuffle=False)

                    out = dict()
                    out['hyperparams'] = neural_net.get_params()
                    out['errors'] = cross_val.aggregated_results
                    out['fold_results'] = cross_val.fold_results
                    out['id_grid'] = i
                    out['id_trial'] = trial
                    # fold results
                    for res in out['fold_results']:
                        res['id_grid'] = i
                        res['id_trial'] = trial

                    if save_results:
                        with gzip.open(fname, 'a') as f:
                            json.dump(out, f, indent=4)
                            if i != self.n_iter:
                                f.write(',\n')
                            else:
                                f.write('\n ]}')