def test_conections(self): genome = GenomeMock([(2, 4, 0, True), (1, 3, 0, True), (2, 3, 0, True), (1, 4, 0, True)], 2, 2) nn = NeuralNetwork() nn.generate_network(genome) self.assertEqual(nn._genome.get_connections(), [(1, 3, 0, True), (1, 4, 0, True), (2, 3, 0, True), (2, 4, 0, True)])
def test_connections_creation(self): genome = GenomeMock([(2, 4, 0, True), (1, 3, -3, True), (2, 3, 4, True), (1, 4, 22, True)], 2, 2) nn = NeuralNetwork() nn.generate_network(genome) self.assertEqual(nn._connections, { (2, 4): 0, (1, 3): -3, (2, 3): 4, (1, 4): 22 })
def test_node_creation(self): genome = GenomeMock([(2, 4, 0, True), (1, 3, -3, True), (2, 3, 4, True), (1, 4, 22, True), (1, 5, 3, True)], 2, 2) nn = NeuralNetwork() nn.generate_network(genome) self.assertEqual(1 in nn._neurons, True) self.assertEqual(2 in nn._neurons, True) self.assertEqual(3 in nn._neurons, True) self.assertEqual(4 in nn._neurons, True) self.assertEqual(5 in nn._neurons, True) self.assertEqual(1 in nn._input_neurons, True) self.assertEqual(2 in nn._input_neurons, True) self.assertEqual(3 in nn._input_neurons, False) self.assertEqual(4 in nn._input_neurons, False) self.assertEqual(5 in nn._input_neurons, False) self.assertEqual(1 in nn._output_neurons, False) self.assertEqual(2 in nn._output_neurons, False) self.assertEqual(3 in nn._output_neurons, True) self.assertEqual(4 in nn._output_neurons, True) self.assertEqual(5 in nn._output_neurons, False)
def test_input_neurons_have_input_signal(self): genome = GenomeMock([(2, 4, 0, True), (1, 3, 0, True), (2, 3, 0, True), (1, 4, 0, True)], 2, 2) nn = NeuralNetwork() nn.generate_network(genome) nn.forward([3, 22]) self.assertEqual(nn._input_neurons[1]._input_signals, [3]) self.assertEqual(nn._input_neurons[2]._input_signals, [22])
def test_input_length_exception(self): genome = GenomeMock([(2, 4, 0, True), (1, 3, 0, True), (2, 3, 0, True), (1, 4, 0, True)], 2, 2) nn = NeuralNetwork() nn.generate_network(genome) with self.assertRaises(Exception) as e: nn.forward([1]) self.assertEqual(str(e.exception), "Expected 2 inputs, got 1 instead")
def test_hard_forward_propagation(self): genome = GenomeMock([(1, 5, 3, True), (2, 5, -2, True), (1, 6, -1, True), (5, 6, -3.4, True), (3, 6, 4, True), (6, 4, 5, True)], 3, 1) nn = NeuralNetwork() nn.generate_network(genome) y = nn.forward([0.2, 2, -0.02]) self.assertAlmostEqual(y[0], 0.5144, places=4) genome = GenomeMock([(1, 5, 3, True), (2, 5, -2, True), (1, 7, -1, True), (5, 7, -3.4, True), (3, 7, 4, True), (7, 6, 2, True), (6, 4, 0.3, True)], 3, 1) nn.generate_network(genome) y = nn.forward([0.2, 2, -0.02]) self.assertAlmostEqual(y[0], 0.6778, places=4)
# load the MNIST dataset dataset = datasets.load_digits() # apply min/max scaling to scale the # pixel intensity values to the range [0, 1] data = dataset.data.astype("float") data = (data - data.min()) / (data.max() - data.min()) print("[INFO] samples: {}, dim: {}".format(data.shape[0],data.shape[1])) labels = dataset.target # split training: 75%, testing: 25% (trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42) # convert labels as vector lb = LabelBinarizer() trainY = lb.fit_transform(trainY) testY = lb.fit_transform(testY) # train the network print("[INFO] training network ...") model = NeuralNetwork([trainX.shape[1], 32, 16, 10],alpha=0.5) print("[INFO] {}".format(model)) model.fit(trainX, trainY, epochs=1000) # evaluate network print("[INFO] evaluating network...") preds = model.predict(testX) print(classification_report(testY.argmax(axis=1), preds.argmax(axis=1)))
parser.add_argument('--model_path', '-m', type=str, required=True, nargs=1, dest='model_path', help='path to trained model') return vars(parser.parse_args(argv)) if __name__ == '__main__': # parse command line arguments args = parameter_parse(sys.argv[1:]) # load data and model nn = NeuralNetwork.load(args['model_path'][0]) test = load_data(args['data_path'][0]) # clean the data test_x = [normalize(x, 255) for x in test['x']] test_y = output2binary(test['y'][0]) # make prediction nn.bulk_predict(test_x, test_y) # get results print("Accuracy: %f" % ((nn.hit_count / len(test_x)) * 100)) print("Hit count: %d" % nn.hit_count)
required=True, help='Enter type of bitwise dataset to apply the perceptron') ap.add_argument('-e', '--epochs', type=int, default=20000, help='# of epochs to fit dataset') args = vars(ap.parse_args()) datasets = { 'and': [[0], [0], [0], [1]], 'or': [[0], [1], [1], [1]], 'xor': [[0], [1], [1], [0]] } # construct the dataset X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) y = np.array(datasets[args['dataset']]) # define our 2-2-1 neural network and train it print(f'[INFO] training Neural Network for {args["epochs"]} epochs') nn = NeuralNetwork([2, 2, 1], alpha=0.5) nn.fit(X, y, epochs=args['epochs']) print('[INFO] testing the Neural Network') for x, target in zip(X, y): # make a prediciton on the data point and display the result # to our console pred = nn.predict(x) step = 1 if pred > 0.5 else 0 print(f'[INFO] data={x}, ground-truth={target[0]}, pred={pred[0][0]:.4f}, step={step}')
# import the necessary packages from nn.neuralnetwork import NeuralNetwork import numpy as np import matplotlib.pyplot as plt # construct the XOR dataset X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) y = np.array([[0], [1], [1], [0]]) epochs = 20000 # define our 2-2-1 neural network and train it nn = NeuralNetwork([2, 1], alpha=0.5) losses = nn.fit(X, y, epochs=epochs) # evaluate our model print("[INFO] evaluating...") # now that our network is trained, loop over the XOR data points for (x, target) in zip(X, y): # make a prediction on the data point and display the result to our console pred = nn.predict(x)[0][0] step = 1 if pred > 0.5 else 0 print("[INFO] data={}, ground-truth={}, pred={:.4f}, step={}".format( x, target[0], pred, step)) # construct a figure that plots the loss over time plt.style.use("ggplot") plt.figure() plt.plot(np.arange(0, len(losses)), losses) plt.title("Training Loss") plt.xlabel("Epoch #")
def create_phenotypes(self): for group in self.groups.values(): for genome in group.genomes: self.phenotypes.append(NeuralNetwork(genome))
#!/usr/local/bin/python3.6 ## import packages import os import sys sys.path.append("./nn") from nn.neuralnetwork import NeuralNetwork import numpy as np import matplotlib.pyplot as plt # create XOR datasets X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) y = np.array([[0], [1], [1], [0]]) Epochs = 20000 nn = NeuralNetwork([2, 2, 1], alpha=0.8) displayLoss = nn.fit(X, y, epochs=Epochs) # predict X print("[INFO] Predicting on XOR...") for (x, target) in zip(X, y): pred = nn.predict(x)[0][0] # because p is 2d array # encode in to 1 or 0 pred_label = 1 if pred > 0.5 else 0 print("[INFO] data={}, gt={}, pred={}, pred_label={}".format( x, target, pred, pred_label)) # plot learning curve plt.figure()
def test_simple_xor(self): print("testing simple xor") Group._GROUP_ID = 0 Generation._GENERATION_ID = 0 specie = Group() for i in range(16): for j in range(17, 21): ConnectionGene(i + 1, j, enabled=True) connection_list = [] z = 0 for i in range(16): for j in range(17, 21): connection_list.append([ i + 1, j, random.normalvariate(mu=0.0, sigma=1.0), True, z ]) z += 1 print(connection_list) for i in range(10): specie.add_genome( Genome( [[1, 3, random.normalvariate(mu=0.0, sigma=1.0), True, 0], [2, 3, random.normalvariate(mu=0.0, sigma=1.0), True, 1]], 2, 1)) mutation_coefficients = { 'add_connection': 0.5, 'split_connection': 0.2, 'change_weight': 0.8, 'new_connection_abs_max_weight': 1.0, 'max_weight_mutation': 0.5 } compatibility_coefficients = { 'excess_factor': 1.0, 'disjoint_factor': 1.0, 'weight_difference_factor': 2.0 } log = Logger() gen = Generation([specie], mutation_coefficients=mutation_coefficients, compatibility_coefficients=compatibility_coefficients, logger=log) i = 1 while i < 150: print(i) gen = gen.create_new_generation() i += 1 best_nn = NeuralNetwork(Generation.best_genome) a = (best_nn.forward([0.0, 0.0])) b = (best_nn.forward([0.0, 1.0])) c = (best_nn.forward([1.0, 0.0])) d = (best_nn.forward([1.0, 1.0])) print(a) print(b) print(c) print(d) print(4.0 - (a[0] - 0)**2 - (b[0] - 1)**2 - (c[0] - 1)**2 - (d[0] - 0)**2) print(best_nn._genome.fitness) groups_count = [] for generation_log in gen.logger.log.values(): groups_count.append(len(generation_log.groups_log)) plt.plot(list(gen.logger.log.keys()), groups_count) plt.xlabel("Generation") plt.ylabel("Number of groups") plt.title("Groups amount change over evolution") plt.savefig("plot of gen count") plt.clf() last_gen_groups_fitness = [] for fit in gen.logger.log[gen.id - 1].groups_fitness_scores_log.values(): last_gen_groups_fitness.append(fit[0][2]) plt.plot(list(gen.logger.log[gen.id - 1].groups_log.keys()), last_gen_groups_fitness, 'ro') plt.xlabel("Group") plt.ylabel("Group adjusted fitness") plt.title("Adjusted fitness of groups in last generation") plt.savefig("plot of last gen fitness") plt.clf() plt.plot(list(Generation.best_fitnesses.keys()), list(Generation.best_fitnesses.values())) plt.xlabel("Generation") plt.ylabel("Fitness score") plt.title("Fitness score progression") plt.savefig("plot of fitness")
# prepare training data train = load_data(args['data_path']) train_x = [normalize(x, 255) for x in train['x']] train_y = output2binary(train['y'][0]) # load validation data validate = None if VALIDATE_DATA: try: validate = load_data(args['validation_path']) except FileNotFoundError: print('Validation file not found on given path, omitting\n') VALIDATE_DATA = False # build model from template NN = NeuralNetwork.create(args['template_path']) if len(NN.network) > 2: # if not single layer network do not visualize even if flag is set VISUALIZE = False # log the data for graph losses = [] accuracies = [] validation_accuracies = [] for epoch in range(NN.epochs): NN.dropout_probability = NN.default_dropout_chance NN.fit(train_x, train_y)
from nn.neuralnetwork import NeuralNetwork from sklearn.preprocessing import LabelBinarizer from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report from sklearn import datasets print('[INFO] loading MNST (sample) dataset...') digits = datasets.load_digits() data = digits.data.astype('float') data = (data - data.min()) / (data.max() - data.min()) print(f'[INFO] samples: {data.shape[0]}, dim: {data.shape[1]}') (trainX, testX, trainY, testY) = train_test_split(data, digits.target, test_size=0.25) trainY = LabelBinarizer().fit_transform(trainY) testY = LabelBinarizer().fit_transform(testY) print('[INFO] training network...') nn = NeuralNetwork([trainX.shape[1], 32, 16, 10]) print(f'[INFO] {nn}') nn.fit(trainX, trainY, epochs=1000) print('[INFO] evaluating network...') predictions = nn.predict(testX) predictions = predictions.argmax(axis=1) print(classification_report(testY.argmax(axis=1), predictions))
def test_easy_forward_propagation(self): genome = GenomeMock([(2, 4, 0, True), (1, 3, 0, True), (2, 3, 0, True), (1, 4, 0, True)], 2, 2) nn = NeuralNetwork() nn.generate_network(genome) y = nn.forward([3, 22]) self.assertEqual(len(y), 2) self.assertEqual(y, [0.5, 0.5]) genome = GenomeMock([(2, 4, 1, True), (1, 3, 0, True), (2, 3, 0, True), (1, 4, 0, True)], 2, 2) nn.generate_network(genome) y = nn.forward([3, 22]) self.assertEqual(y[0], 0.5) self.assertAlmostEqual(y[1], 0.9926085) genome = GenomeMock([(2, 4, 1, True), (1, 3, 0, True), (2, 3, 0, True), (1, 4, -2, True)], 2, 2) nn.generate_network(genome) y = nn.forward([3, 22]) self.assertEqual(y[0], 0.5) self.assertAlmostEqual(y[1], 0.00739157) genome = GenomeMock([(2, 4, 1, True), (1, 3, 0, False), (2, 3, 0, False), (1, 4, -2, True)], 2, 2) nn.generate_network(genome) y = nn.forward([3, 22]) self.assertEqual(y[0], 0.5) self.assertAlmostEqual(y[1], 0.00739157)
from nn.neuralnetwork import NeuralNetwork import numpy as np X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) y = np.array([[0], [1], [1], [0]]) nn = NeuralNetwork([2, 2, 1], alpha=0.5) nn.fit(X, y, epochs=20000) for (x, target) in zip(X, y): pred = nn.predict(x)[0][0] step = 1 if pred > 0.5 else 0 print("[INFO] data={}, ground-truth={}, pred={:.4f}, step={}".format( x, target[0], pred, step))
from nn.neuralnetwork import NeuralNetwork from nn.dataset import Dataset from nn.layer import Layer from nn.utils import kfold_cv_generator import numpy as np if __name__ == '__main__': dataset = Dataset('iris.csv') data = dataset.data output_real = dataset.attr nn = NeuralNetwork() nn.add(Layer(4)) nn.add(Layer(32, activation='relu')) nn.add(Layer(3, activation='sigmoid')) id_train, id_test = kfold_cv_generator(data, n_splits=8) kf = 1 acc = [] for train_idx, test_idx in zip(id_train, id_test): train = data.iloc[train_idx] test = data.iloc[test_idx] print("#FOLD: ", kf) score = nn.learn(train, test, output_real, kf, epochs=100,
def test_evolve_xor(self): print("testing advanced xor") Group._GROUP_ID = 0 Generation._GENERATION_ID = 0 specie = Group() c1 = ConnectionGene(1, 3, enabled=True) c2 = ConnectionGene(2, 3, enabled=True) c3 = ConnectionGene(2, 3, enabled=True) c4 = ConnectionGene(2, 3, enabled=True) c5 = ConnectionGene(2, 3, enabled=True) c6 = ConnectionGene(2, 3, enabled=True) c7 = ConnectionGene(2, 3, enabled=True) c8 = ConnectionGene(2, 3, enabled=True) c9 = ConnectionGene(2, 3, enabled=True) c10 = ConnectionGene(2, 3, enabled=True) c11 = ConnectionGene(2, 3, enabled=True) c12 = ConnectionGene(2, 3, enabled=True) c13 = ConnectionGene(2, 3, enabled=True) c14 = ConnectionGene(2, 3, enabled=True) c15 = ConnectionGene(2, 3, enabled=True) c16 = ConnectionGene(2, 3, enabled=True) c17 = ConnectionGene(2, 3, enabled=True) c18 = ConnectionGene(2, 3, enabled=True) c19 = ConnectionGene(2, 3, enabled=True) c20 = ConnectionGene(2, 3, enabled=True) c21 = ConnectionGene(2, 3, enabled=True) c22 = ConnectionGene(2, 3, enabled=True) c23 = ConnectionGene(2, 3, enabled=True) c24 = ConnectionGene(2, 3, enabled=True) c25 = ConnectionGene(2, 3, enabled=True) c26 = ConnectionGene(2, 3, enabled=True) c27 = ConnectionGene(2, 3, enabled=True) c28 = ConnectionGene(2, 3, enabled=True) c29 = ConnectionGene(2, 3, enabled=True) c30 = ConnectionGene(2, 3, enabled=True) c31 = ConnectionGene(2, 3, enabled=True) c31 = ConnectionGene(2, 3, enabled=True) c33 = ConnectionGene(2, 3, enabled=True) for i in range(10): specie.add_genome( Genome([ [1, 9, random.normalvariate(mu=0.0, sigma=1.0), True], [1, 10, random.normalvariate(mu=0.0, sigma=1.0), True], [1, 11, random.normalvariate(mu=0.0, sigma=1.0), True], [1, 12, random.normalvariate(mu=0.0, sigma=1.0), True], [2, 9, random.normalvariate(mu=0.0, sigma=1.0), True], [2, 10, random.normalvariate(mu=0.0, sigma=1.0), True], [2, 11, random.normalvariate(mu=0.0, sigma=1.0), True], [2, 12, random.normalvariate(mu=0.0, sigma=1.0), True], [3, 9, random.normalvariate(mu=0.0, sigma=1.0), True], [3, 10, random.normalvariate(mu=0.0, sigma=1.0), True], [3, 11, random.normalvariate(mu=0.0, sigma=1.0), True], [3, 12, random.normalvariate(mu=0.0, sigma=1.0), True], [4, 9, random.normalvariate(mu=0.0, sigma=1.0), True], [4, 10, random.normalvariate(mu=0.0, sigma=1.0), True], [4, 11, random.normalvariate(mu=0.0, sigma=1.0), True], [4, 12, random.normalvariate(mu=0.0, sigma=1.0), True], [5, 9, random.normalvariate(mu=0.0, sigma=1.0), True], [5, 10, random.normalvariate(mu=0.0, sigma=1.0), True], [5, 11, random.normalvariate(mu=0.0, sigma=1.0), True], [5, 12, random.normalvariate(mu=0.0, sigma=1.0), True], [6, 9, random.normalvariate(mu=0.0, sigma=1.0), True], [6, 10, random.normalvariate(mu=0.0, sigma=1.0), True], [6, 11, random.normalvariate(mu=0.0, sigma=1.0), True], [6, 12, random.normalvariate(mu=0.0, sigma=1.0), True], [7, 9, random.normalvariate(mu=0.0, sigma=1.0), True], [7, 10, random.normalvariate(mu=0.0, sigma=1.0), True], [7, 11, random.normalvariate(mu=0.0, sigma=1.0), True], [7, 12, random.normalvariate(mu=0.0, sigma=1.0), True], [8, 9, random.normalvariate(mu=0.0, sigma=1.0), True], [8, 10, random.normalvariate(mu=0.0, sigma=1.0), True], [8, 11, random.normalvariate(mu=0.0, sigma=1.0), True], [8, 12, random.normalvariate(mu=0.0, sigma=1.0), True] ], 8, 4)) mutation_coefficients = { 'add_connection': 0.5, 'split_connection': 0.2, 'change_weight': 0.8, 'new_connection_abs_max_weight': 1.0, 'max_weight_mutation': 0.25 } compatibility_coefficients = { 'excess_factor': 1.0, 'disjoint_factor': 1.0, 'weight_difference_factor': 0.5 } log = Logger() gen = Generation([specie], mutation_coefficients=mutation_coefficients, compatibility_coefficients=compatibility_coefficients, logger=log) i = 1 while i < 8: print(i) gen = gen.create_new_generation() i += 1 best_nn = NeuralNetwork(Generation.best_genome) print(best_nn.forward([0, 1, 1, 0, 0, 1, 1, 0])) print(str(best_nn._genome.fitness) + "/" + str(256 * 4))
# min-max normalization data = (data - data.min()) / (data.max() - data.min()) print("[INFO] samples = {}, dim = {}".format(data.shape[0], data.shape[1])) ## split datasets 75%-25% trainX, testX, trainY, testY = train_test_split(data, digits.target, test_size=0.25) # One-hot encoding targets trainY = LabelBinarizer().fit_transform(trainY) testY = LabelBinarizer().fit_transform(testY) ## train the model print("[INFO] training network....") nn = NeuralNetwork([trainX.shape[1], 32, 16, 16, 10], alpha=0.5) print("[INFO] {}".format(nn)) displayLoss = nn.fit(trainX, trainY, epochs=1000) ## test print("[INFO] evaluating...") pred_probs = nn.predict(testX) pred_labels = pred_probs.argmax(axis=1) print(classification_report(pred_labels, testY.argmax(axis=1))) # plot learning curve plt.figure() plt.plot(np.arange(0, 1100, 100), displayLoss) plt.title("loss of on MNIST samples".format(nn)) plt.xlabel("epoch #") plt.ylabel("loss")