Exemple #1
0
 def test_layers(self):
     nn = NeuralNetwork.NeuralNetwork([1, 1, 1])
     self.assertIs(type(nn.firstLayer), NeuronLayer.NeuronLayer)
     self.assertIs(type(nn.firstLayer.nextLayer), OutputLayer.OutputLayer)
     nn.firstLayer.neurons[0].setWeight(0, 0.5)
     nn.firstLayer.neurons[0].setBias(1)
     nn.firstLayer.nextLayer.neurons[0].setWeight(0, 0.5)
     nn.firstLayer.nextLayer.neurons[0].setBias(1)
     self.assertEqual(nn.firstLayer.getNeurons()[0].getWeights(), [0.5])
     self.assertEqual(nn.firstLayer.getNeurons()[0].getBias(), 1)
     self.assertIsNone(nn.firstLayer.getPreviousLayer())
     self.assertIs(type(nn.firstLayer.getNextLayer()),
                   OutputLayer.OutputLayer)
     nn.firstLayer.forwardFeed([1])
     self.assertAlmostEqual(nn.firstLayer.getOutput()[0], 0.8, 1)
     self.assertAlmostEqual(nn.firstLayer.getNextLayer().getOutput()[0],
                            0.8, 1)
     self.assertIs(type(nn.firstLayer.getOutputLayer()),
                   OutputLayer.OutputLayer)
     nn.firstLayer.getOutputLayer().backwardPropagateError([1.5])
     self.assertAlmostEqual(nn.firstLayer.getNeurons()[0].getDelta(), 0.008,
                            3)
     self.assertAlmostEqual(
         nn.firstLayer.getNextLayer().getNeurons()[0].getDelta(), 0.1, 1)
     nn.firstLayer.updateWeightsAndBias([1], 2.5)
     self.assertAlmostEqual(nn.firstLayer.getNeurons()[0].getWeights()[0],
                            0.52, 2)
     self.assertAlmostEqual(nn.firstLayer.getNeurons()[0].getBias(), 1.02,
                            2)
     self.assertAlmostEqual(
         nn.firstLayer.getNextLayer().getNeurons()[0].getWeights()[0], 0.72,
         2)
     self.assertAlmostEqual(
         nn.firstLayer.getNextLayer().getNeurons()[0].getBias(), 1.27, 2)
Exemple #2
0
 def test_XOR_neural_network(self):
     set = [[[1, 1], [0]], [[1, 0], [1]], [[0, 1], [1]], [[0, 0], [0]]]
     epochs = 2000
     learningRate = 0.5
     nn = NeuralNetwork.NeuralNetwork([2, 3, 1])
     for i in range(epochs):
         for j in range(len(set)):
             nn.train(set[j][0], set[j][1], learningRate)
     nn.forwardFeed([1, 1])
     self.assertAlmostEqual(0.0, nn.getOutput()[0], 0)
     nn.forwardFeed([1, 0])
     self.assertAlmostEqual(1.0, nn.getOutput()[0], 0)
     nn.forwardFeed([0, 1])
     self.assertAlmostEqual(1.0, nn.getOutput()[0], 0)
     nn.forwardFeed([0, 0])
     self.assertAlmostEqual(0.0, nn.getOutput()[0], 0)
Exemple #3
0
def neural_network(x_train, x_test, y_train, y_test, x_pca, x_ica, x_kpca,
                   x_rp, x_kmeans, x_gmm, **kwargs):
    """Perform neural network experiment.

        Args:
           x_train (ndarray): training data.
           x_test (ndarray): test data.
           y_train (ndarray): training labels.
           y_test (ndarray): test labels.
           x_pca (ndarray): reduced dataset by PCA.
           x_ica (ndarray): reduced dataset by ICA.
           x_kpca (ndarray): reduced dataset by KPCA.
           x_rp (ndarray): reduced dataset by RP.
           x_kmeans (ndarray): clusters produced by k-Means.
           x_gmm (ndarray): clusters produced by Gaussian Mixture Models.
           kwargs (dict): additional arguments to pass:
                    - layer1_nodes (int): number of neurons in first layer.
                    - layer2_nodes (int): number of neurons in second layer.
                    - learning_rate (float): learning rate.

        Returns:
           None.
        """

    print('\n--------------------------')
    print('NN')
    print('--------------------------')

    # Declare Neural Network and perform experiments on the original dataset
    nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],
                       layer2_nodes=kwargs['layer2_nodes'],
                       learning_rate=kwargs['learning_rate'])
    nn.experiment(x_train, x_test, y_train, y_test)

    print('\n--------------------------')
    print('PCA + NN')
    print('--------------------------')

    # Declare Neural Network and perform experiments on the reduced dataset by PCA
    nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],
                       layer2_nodes=kwargs['layer2_nodes'],
                       learning_rate=kwargs['learning_rate'])
    nn.experiment(x_pca[0], x_pca[1], y_train, y_test)

    print('\n--------------------------')
    print('ICA + NN')
    print('--------------------------')

    # Declare Neural Network and perform experiments on the reduced dataset by ICA
    nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],
                       layer2_nodes=kwargs['layer2_nodes'],
                       learning_rate=kwargs['learning_rate'])
    nn.experiment(x_ica[0], x_ica[1], y_train, y_test)

    print('\n--------------------------')
    print('KPCA + NN')
    print('--------------------------')

    # Declare Neural Network and perform experiments on the reduced dataset by KPCA
    nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],
                       layer2_nodes=kwargs['layer2_nodes'],
                       learning_rate=kwargs['learning_rate'])
    nn.experiment(x_kpca[0], x_kpca[1], y_train, y_test)

    print('\n--------------------------')
    print('RP+ NN')
    print('--------------------------')

    # Declare Neural Network and perform experiments on the reduced dataset by RP
    nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],
                       layer2_nodes=kwargs['layer2_nodes'],
                       learning_rate=kwargs['learning_rate'])
    nn.experiment(x_rp[0], x_rp[1], y_train, y_test)

    print('\n--------------------------')
    print('KMEANS+ NN')
    print('--------------------------')

    # Declare Neural Network
    nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],
                       layer2_nodes=kwargs['layer2_nodes'],
                       learning_rate=kwargs['learning_rate'])

    # Augment the original dataset by adding clusters produced by k-Means as features
    x_kmeans_normalized = (x_kmeans[0] - np.mean(x_kmeans[0])) / np.std(
        x_kmeans[0])
    x_kmeans_normalized = np.expand_dims(x_kmeans_normalized, axis=1)
    x_train_new = np.append(x_train, x_kmeans_normalized, axis=1)
    x_kmeans_normalized = (x_kmeans[1] - np.mean(x_kmeans[1])) / np.std(
        x_kmeans[1])
    x_kmeans_normalized = np.expand_dims(x_kmeans_normalized, axis=1)
    x_test_new = np.append(x_test, x_kmeans_normalized, axis=1)

    # Perform experiments on it
    nn.experiment(x_train_new, x_test_new, y_train, y_test)

    print('\n--------------------------')
    print('GMM+ NN')
    print('--------------------------')

    # Declare Neural Network
    nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],
                       layer2_nodes=kwargs['layer2_nodes'],
                       learning_rate=kwargs['learning_rate'])

    # Augment the original dataset by adding clusters produced by Gaussian Mixture Models as features
    x_gmm_normalized = (x_gmm[0] - np.mean(x_gmm[0])) / np.std(x_gmm[0])
    x_gmm_normalized = np.expand_dims(x_gmm_normalized, axis=1)
    x_train_new = np.append(x_train, x_gmm_normalized, axis=1)
    x_gmm_normalized = (x_gmm[1] - np.mean(x_gmm[1])) / np.std(x_gmm[1])
    x_gmm_normalized = np.expand_dims(x_gmm_normalized, axis=1)
    x_test_new = np.append(x_test, x_gmm_normalized, axis=1)

    # Perform experiments on it
    nn.experiment(x_train_new, x_test_new, y_train, y_test)
Exemple #4
0
def xor_network():
    nn = NeuralNetwork(0.05, 2, 4, 1)
    for _ in range(10000):
        nn.train(xor)
    return nn
Exemple #5
0
# Parameters.
epochs = 2000
learningRate = 0.1
neuralNetworkRepresentation = [7, 5, 3]
dataSetFileName = 'seeds.txt'
dataSetNumberOfParameters = 8

# Neural network construction.
nn = NeuralNetwork.NeuralNetwork(neuralNetworkRepresentation)

# Adapted Data Set.
parser = DataSetParser.DataSetParser(dataSetFileName, dataSetNumberOfParameters)
parser.adaptData()
adaptedData = parser.getAdaptedData()
random.shuffle(adaptedData)

# Learning process.
start_time = time.time()
nn.learn(adaptedData, epochs, learningRate)
elapsed_time = time.time() - start_time
print('Learning elapsed time: '+str(elapsed_time) +' seconds.')
"""

NeuronsArray = [[[[1, 1], 1], [[1, 2], 1], [[1, 3], 1]], [[[4, 4, 4], 0.5]]]
print("init")
NN = NeuralNetwork.NeuralNetwork([1, 1, 1])
NN.setModelParameters(NeuronsArray)
NN2 = NeuralNetwork.NeuralNetwork([2, 3, 1])
print("initialized!")
print(NN.getOutputLayer().getPreviousLayer().getNeurons())
print(NN.getOutputLayer().getNeurons())
 def randomSample(self):
     """Generate a neural network with random weights and bias according to the specified architecture."""
     return NeuralNetwork.NeuralNetwork(self.getNeuralNetworkArchitecture())
Exemple #7
0
y_train_batches = np.array_split(one_hot_encoded_y_train,
                                 len(one_hot_encoded_y_train) // batch_size)

# # training data
# x_train = np.array([[[0,0]], [[0,1]], [[1,0]], [[1,1]]])
# one_hot_encoded_y_train = np.array([[[1, 0]], [[0, 1]], [[0, 1]], [[0, 1]]])
# y_train =[0, 1, 1, 1]

# this line is used to catch the errors arising from numpy.
np.seterr(all='raise')

input_number = x_train.shape[2]
output_number = 6
size_of_hidden_layer = 10

neural_network = NeuralNetwork(cross_entropy, cross_entropy_prime)
neural_network.add_layer(
    FCLayer(input_number, size_of_hidden_layer, diminishing_factor=10))
neural_network.add_layer(ActivationLayer(swish, swish_prime))
neural_network.add_layer(FCLayer(size_of_hidden_layer, output_number))
neural_network.add_layer(ActivationLayer(softmax, softmax_prime))

neural_network.fit(x_train,
                   one_hot_encoded_y_train,
                   epoch_number=10,
                   initial_learning_rate=0.5,
                   decay=0.01)
out = neural_network.predict(x_train)

predictions = argmax(out)
print("confusion matrix:", confusion_matrix(y_train, predictions), sep="\n")
Exemple #8
0
 def constructNeuralNetwork(self, individualPos):
     individual = self.getPopulation()[individualPos]
     NN = NeuralNetwork.NeuralNetwork()
Exemple #9
0
def main():
    global SCREEN, FPSCLOCK

    global NN, ITER, userControl
    userControl = True
    print("User control: " + str(userControl))
    ITER = 0
    #----------------REDES NEURONALES------------------#
    NN = NeuralNetwork.NeuralNetwork([52, 6, 1])
    #NN = NeuralNetwork.NeuralNetwork([2, 6, 1])
    #--------------------------------------------------#

    pygame.init()
    FPSCLOCK = pygame.time.Clock()
    SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
    pygame.display.set_caption('Flappy Bird')

    # numbers sprites for score display
    IMAGES['numbers'] = (
        pygame.image.load('assets/sprites/0.png').convert_alpha(),
        pygame.image.load('assets/sprites/1.png').convert_alpha(),
        pygame.image.load('assets/sprites/2.png').convert_alpha(),
        pygame.image.load('assets/sprites/3.png').convert_alpha(),
        pygame.image.load('assets/sprites/4.png').convert_alpha(),
        pygame.image.load('assets/sprites/5.png').convert_alpha(),
        pygame.image.load('assets/sprites/6.png').convert_alpha(),
        pygame.image.load('assets/sprites/7.png').convert_alpha(),
        pygame.image.load('assets/sprites/8.png').convert_alpha(),
        pygame.image.load('assets/sprites/9.png').convert_alpha())

    # game over sprite
    IMAGES['gameover'] = pygame.image.load(
        'assets/sprites/gameover.png').convert_alpha()
    # message sprite for welcome screen
    IMAGES['message'] = pygame.image.load(
        'assets/sprites/message.png').convert_alpha()
    # base (ground) sprite
    IMAGES['base'] = pygame.image.load(
        'assets/sprites/base.png').convert_alpha()

    # sounds
    if 'win' in sys.platform:
        soundExt = '.wav'
    else:
        soundExt = '.ogg'

    SOUNDS['die'] = pygame.mixer.Sound('assets/audio/die' + soundExt)
    SOUNDS['hit'] = pygame.mixer.Sound('assets/audio/hit' + soundExt)
    SOUNDS['point'] = pygame.mixer.Sound('assets/audio/point' + soundExt)
    SOUNDS['swoosh'] = pygame.mixer.Sound('assets/audio/swoosh' + soundExt)
    SOUNDS['wing'] = pygame.mixer.Sound('assets/audio/wing' + soundExt)

    while True:
        # select random background sprites
        randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1)
        IMAGES['background'] = pygame.image.load(
            BACKGROUNDS_LIST[randBg]).convert()

        # select random player sprites
        randPlayer = random.randint(0, len(PLAYERS_LIST) - 1)
        IMAGES['player'] = (
            pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(),
            pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(),
            pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(),
        )

        # select random pipe sprites
        pipeindex = random.randint(0, len(PIPES_LIST) - 1)
        IMAGES['pipe'] = (
            pygame.transform.rotate(
                pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180),
            pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(),
        )

        # hismask for pipes
        HITMASKS['pipe'] = (
            getHitmask(IMAGES['pipe'][0]),
            getHitmask(IMAGES['pipe'][1]),
        )

        # hitmask for player
        HITMASKS['player'] = (
            getHitmask(IMAGES['player'][0]),
            getHitmask(IMAGES['player'][1]),
            getHitmask(IMAGES['player'][2]),
        )

        movementInfo = showWelcomeAnimation()
        crashInfo = mainGame(movementInfo)
        showGameOverScreen(crashInfo)
Exemple #10
0
size = 857
filepath = os.path.dirname(os.getcwd()) + DEFAULT_DIR + DEFAULT_NAME

fp = open(filepath, "w")

config = 0

for epoch in epochs:
    for lr in learning_rates:
        for reg in regularizations:
            for alpha in momentums:
                mean_loss = 0
                mean_validation = 0

                for i in range(k):
                    model = NeuralNetwork()
                    model.add(InputLayer(10))
                    model.add(DenseLayer(50, fanin=10))
                    model.add(DenseLayer(30, fanin=50))
                    model.add(OutputLayer(2, fanin=30))
                    model.compile(size, epoch, lr / size, None, reg, alpha,
                                  "mean_squared_error")
                    (train, val) = data.kfolds(index=i, k=k)
                    mean_loss = mean_loss + model.fit(train[0], train[1])[-1]
                    mean_validation = mean_validation + model.evaluate(
                        val[0], val[1])

                fp.write("{}, {}, {}, {}, {}, {}, {}\n".format(
                    config, epoch, lr, reg, alpha, mean_loss / k,
                    mean_validation / k))
Exemple #11
0
 def plug(self, brain):
     self.brain = NeuralNetwork(brain, False)
Exemple #12
0
 def __init__(self, location, lenght, brain):
     super(SmartSnake, self).__init__(location, lenght)
     self.brain = NeuralNetwork(brain)
     self.sight = Sight()
     self.bind_predeath_event(self.punish_for_suicide)
Exemple #13
0
# coding: utf-8

import bottle
import numpy as np

from neural_networks import NeuralNetwork

app = bottle.default_app()
app.iteration = 0
app.nn = NeuralNetwork(3, 3, 2)


@bottle.route('/')
def index():
    return bottle.static_file('index.html', root="./front/")


@bottle.route('/json/color.get')
def color_get():
    r, g, b = np.random.randint(0, 255, size=3)
    app.iteration += 1

    guess = app.nn.predict(np.array([[r, g, b]]))[0]
    if guess[0] < guess[1]:
        guess_label = "white"
    else:
        guess_label = "black"

    ctx = {
        'color': "#%02x%02x%02x" % (r, g, b),
        'iteration': app.iteration,
Exemple #14
0
    """ Method which generates sequence of numbers """
    X = np.zeros([nums, 10, 20], dtype=float)
    y = np.zeros([nums, 10, 20], dtype=float)
    for i in range(nums):
        start = np.random.randint(0, 10)
        num_seq = np.arange(start, start + 10)
        X[i] = to_categorical(num_seq, n_col=20)
        y[i] = np.roll(X[i], -1, axis=0)
    y[:, -1, 1] = 1  # Mark endpoint as 1
    return X, y


if __name__ == '__main__':
    X, y = gen_mult_ser(3000)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)
    clf = NeuralNetwork(optimizer=optimizer, loss=CrossEntropy)
    clf.add(RNN(10, activation="tanh", bptt_trunc=5, input_shape=(10, 61)))
    clf.add(Activation('softmax'))

    tmp_X = np.argmax(X_train[0], axis=1)
    tmp_y = np.argmax(y_train[0], axis=1)
    print("Number Series Problem:")
    print("X = [" + " ".join(tmp_X.astype("str")) + "]")
    print("y = [" + " ".join(tmp_y.astype("str")) + "]")
    print()
    train_err, _ = clf.fit(X_train, y_train, n_epochs=500, batch_size=512)
    y_pred = np.argmax(clf.predict(X_test), axis=2)
    y_test = np.argmax(y_test, axis=2)
    accuracy = np.mean(accuracy_score(y_test, y_pred))
    print(accuracy)
Exemple #15
0
import random, time
from neural_networks import NeuralNetwork, DataSetParser

# Parameters.
epochs = 2000
learningRate = 0.1
neuralNetworkRepresentation = [7, 5, 3]
dataSetFileName = 'seeds.txt'
dataSetNumberOfParameters = 8

# Neural network construction.
nn = NeuralNetwork.NeuralNetwork(neuralNetworkRepresentation)

# Adapted Data Set.
parser = DataSetParser.DataSetParser(dataSetFileName,
                                     dataSetNumberOfParameters)
parser.adaptData()
adaptedData = parser.getAdaptedData()
random.shuffle(adaptedData)

# Learning process.
start_time = time.time()
nn.learn(adaptedData, epochs, learningRate)
elapsed_time = time.time() - start_time
print('Learning elapsed time: ' + str(elapsed_time) + ' seconds.')
def experiment(x_train, x_test, y_train, y_test):
    """Perform experiment.

        Args:
           x_train (ndarray): training data.
           x_test (ndarray): test data.
           y_train (ndarray): training labels.
           y_test (ndarray): test labels.

        Returns:
           None.
        """

    # Array of training sizes to plot the learning curves over.
    training_sizes = np.arange(20, int(len(x_train) * 0.9), 10)

    # K-Nearest Neighbor
    print('\n--------------------------')
    knn = KNN(k=1, weights='uniform', p=2)
    knn.experiment(x_train,
                   x_test,
                   y_train,
                   y_test,
                   cv=10,
                   y_lim=0.3,
                   n_neighbors_range=np.arange(1, 50, 2),
                   p_range=np.arange(1, 20),
                   weight_functions=['uniform', 'distance'],
                   train_sizes=training_sizes)

    # Support Vector Machines
    print('\n--------------------------')
    svm = SVM(c=1., kernel='rbf', degree=3, gamma=0.001, random_state=42)
    svm.experiment(x_train,
                   x_test,
                   y_train,
                   y_test,
                   cv=10,
                   y_lim=0.2,
                   C_range=[1, 5] + list(range(10, 100, 20)) +
                   list(range(100, 1000, 50)),
                   kernels=['linear', 'poly', 'rbf'],
                   gamma_range=np.logspace(-7, 0, 50),
                   poly_degrees=[2, 3, 4],
                   train_sizes=training_sizes)

    # Decision Trees
    print('\n--------------------------')
    dt = DecisionTree(max_depth=1, min_samples_leaf=1, random_state=42)
    dt.experiment(x_train,
                  x_test,
                  y_train,
                  y_test,
                  cv=10,
                  y_lim=0.1,
                  max_depth_range=list(range(1, 50)),
                  min_samples_leaf_range=list(range(1, 30)),
                  train_sizes=training_sizes)

    # AdaBoost
    print('\n--------------------------')
    boosted_dt = AdaBoost(n_estimators=50,
                          learning_rate=1.,
                          max_depth=3,
                          random_state=42)
    boosted_dt.experiment(x_train,
                          x_test,
                          y_train,
                          y_test,
                          cv=10,
                          y_lim=0.2,
                          max_depth_range=list(range(1, 30)),
                          n_estimators_range=[1, 3, 5, 8] +
                          list(range(10, 100, 5)) + list(range(100, 1000, 50)),
                          learning_rate_range=np.logspace(-6, 1, 50),
                          train_sizes=training_sizes)

    # Neural Networks
    print('\n--------------------------')
    nn = NeuralNetwork(alpha=0.01,
                       layer1_nodes=50,
                       layer2_nodes=30,
                       learning_rate=0.001,
                       max_iter=100)
    nn.experiment(x_train,
                  x_test,
                  y_train,
                  y_test,
                  cv=10,
                  y_lim=0.1,
                  alpha_range=np.logspace(-5, 1, 30),
                  learning_rate_range=np.logspace(-4, 0, 50),
                  train_sizes=training_sizes)