Пример #1
0
 def __init__(self, initial_population, generations):
     """
     A genetic algorithm is used to learn the weights and bias of a topology
     fixed network.
     """
     super().__init__(initial_population)
     #self.expected_precision = expected_precision
     self.generation_span = generations
     self.precision = 0
     self.epoch = 0
     self.num_inputs = 4
     self.neurons_per_layer = [self.num_inputs, 4, 3]
     # Build Fixed Neural Network, with 4 inputs
     self.neural_network = NeuralNetwork(self.num_inputs)
     # The neural network has 3 layers with 3,4 and 3 neurons in each
     self.neural_network.buildFixed(self.neurons_per_layer)
     self.test_values = 20
     # Parse data set
     file_manager = FileManager()
     file_manager.load_file("../Datasets/iris.data")
     self.train_data = file_manager.get_train_data()
     self.test_data = file_manager.get_test_data()
     self.neurons_position = []
     self.x_plot = []
     self.y_plot = []
Пример #2
0
def main():
    # ===================================
    # Settings
    # ===================================
    csv_filename = "data/creditcard.csv"
    hidden_layers = [5]
    eta = 0.1
    n_epochs = 500
    n_folds = 3

    X, y, n_classes = utils.read_csv(csv_filename, target_name="Class")
    N, d = X.shape
    print(" -> X.shape = {}, y.shape = {}, n_classes = {}\n".format(X.shape, y.shape, n_classes))

    print("Running")
    idx_all = np.arange(0, N)
    idx_folds = utils.crossval_folds(N, n_folds, seed=1)

    acc_train, acc_valid = list(), list()
    print("Cross-validation")
    for i, idx_valid in enumerate(idx_folds):
        idx_train = np.delete(idx_all, idx_valid)
        X_train, y_train = X[idx_train], y[idx_train]
        X_valid, y_valid = X[idx_valid], y[idx_valid]

        model = NeuralNetwork(input_dim=d, output_dim=n_classes,
                              hidden_layers=hidden_layers, seed=1)
        model.train(X_train, y_train, eta=eta, n_epochs=n_epochs)

        ypred_train = model.predict(X_train)
        ypred_valid = model.predict(X_valid)

        acc_train.append(100 * np.sum(y_train == ypred_train) / len(y_train))
        acc_valid.append(100 * np.sum(y_valid == ypred_valid) / len(y_valid))
        print("TP: " + str(np.sum((y_valid == ypred_valid) & (y_valid == 1))))
        print("TN: " + str(np.sum((y_valid == ypred_valid) & (y_valid == 0))))
        print("FP: " + str(np.sum((y_valid != ypred_valid) & (y_valid == 1))))
        print("FN: " + str(np.sum((y_valid != ypred_valid) & (y_valid == 0))))
        TP = np.sum((y_valid == ypred_valid) & (y_valid == 1))
        TN = np.sum((y_valid == ypred_valid) & (y_valid == 0))
        FP = np.sum((y_valid != ypred_valid) & (y_valid == 1))
        FN = np.sum((y_valid != ypred_valid) & (y_valid == 0))
        precision = calculate_precision(TP, FP)
        recall = calculate_recall(TP, FN)

        print(str(f1_score(recall, precision)))
        print(" Fold {}/{}: acc_train = {:.2f}%, acc_valid = {:.2f}% (n_train = {}, n_valid = {})".format(
            i + 1, n_folds, acc_train[-1], acc_valid[-1], len(X_train), len(X_valid)))

    print("  -> acc_train_avg = {:.2f}%, acc_valid_avg = {:.2f}%".format(
        sum(acc_train) / float(len(acc_train)), sum(acc_valid) / float(len(acc_valid))))
    def build_network(self):
        network = NeuralNetwork(2)
        neuron_1 = SigmoidNeuron()
        neuron_2 = SigmoidNeuron()
        neuron_3 = SigmoidNeuron()
        neuron_1.weights = [0.3, 0.6]
        neuron_2.weights = [1.2, -0.6]
        neuron_3.weights = [0.7, 0.6]

        neuron_4 = SigmoidNeuron()
        neuron_5 = SigmoidNeuron()
        neuron_4.weights = [0.5, 0.2, 1.1]
        neuron_5.weights = [-0.5, 0.5, 0.5]

        first_layer = FirstNeuralLayer()
        first_layer.neuron_array = [neuron_1, neuron_2, neuron_3]
        output_layer = LastNeuralLayer()
        output_layer.neuron_array = [neuron_4, neuron_5]
        first_layer.setNextLayer(output_layer)
        output_layer.setPreviousLayer(first_layer)
        network.first_layer = first_layer
        network.output_layer = output_layer
        return network
    def test_recognize(self):
        # Load one image which contains trees
        img_path = "../images/test_images/test_completa1_2.png"
        img = cv.imread(img_path)
        coordenates = (None, None)

        # Load retinanet
        retinanet = NeuralNetwork("../src/model/model.h5")

        # Run the detection of trees
        tree_detector = TreeDetector(retinanet)
        trees = tree_detector.recognize(img, coordenates)

        # If the number of objects 'Tree' generated is greater than zero the neural network works fine.
        self.assertGreater(len(trees), 0)
    def test_network_one_epoch(self):
        first_layer = FirstNeuralLayer()
        output_layer = LastNeuralLayer()
        hidden1 = SigmoidNeuron()
        hidden1.weights = [0.1, 0.2]
        hidden1.setBias(0.1)
        hidden2 = SigmoidNeuron()
        hidden2.weights = [0.2, 0.3]
        hidden2.setBias(0.1)
        hidden1.setC(0.5)
        hidden2.setC(0.5)

        out = SigmoidNeuron()
        out.weights = [0.3, 0.4]
        out.setBias(0.1)
        out.setC(0.5)
        first_layer.neuron_array = [hidden1, hidden2]
        output_layer.neuron_array = [out]

        first_layer.setNextLayer(output_layer)
        output_layer.setPreviousLayer(first_layer)

        neural_network = NeuralNetwork(2)
        neural_network.first_layer = first_layer
        neural_network.output_layer = output_layer

        neural_network.train(1, [[0.9, 0.8, [1]]])
        print(hidden1.output)

        #Network weights
        self.assertEqual(hidden1.weights[0], 0.1028369848921488)
        self.assertEqual(hidden1.weights[1], 0.20252176434857672)
        self.assertEqual(hidden2.weights[0], 0.20364750008778296)
        self.assertEqual(hidden2.weights[1], 0.3032422223002515)
        self.assertEqual(hidden2.weights[1], 0.3032422223002515)
        self.assertEqual(out.weights[0], 0.3254179929201722)
        self.assertEqual(out.weights[1], 0.42717415579920276)

        #Network biases
        self.assertEqual(hidden1.bias, 0.10315220543572089)
        self.assertEqual(hidden1.bias, 0.10315220543572089)
        self.assertEqual(hidden1.bias, 0.10315220543572089)
        self.assertEqual(out.bias, 0.14332974979557217)

        #Network deltas
        self.assertEqual(hidden1.delta, 0.006304410871441775)
        self.assertEqual(hidden2.delta, 0.008105555750628777)
        self.assertEqual(out.delta, 0.08665949959114436)

        #Network outputs
        self.assertEqual(hidden1.output, 0.5866175789173301)
        self.assertEqual(hidden2.output, 0.6271477663131956)
        self.assertEqual(out.output, 0.6287468135085144)
Пример #6
0
def plot_hidden_layers_vs_precision_rate(train_data, test_data):
    hidden_layers = []
    precision_rates = []
    for i in range(20):
        hidden_layers.append(i)
        # Build Neural Network
        neural_network = NeuralNetwork(4)
        neural_network.setRandomLayers(i, 8, 8, 3)

        # Train Network
        neural_network.train(1000, train_data)

        precision_rates.append(getPrecision(neural_network, test_data))

    # Plot
    plt.figure()
    plt.title("Hidden Layers v/s Precision Rate", fontsize=20)
    plt.xlabel('Hidden Layers')
    plt.ylabel('Precision Rate')
    plt.plot(hidden_layers, precision_rates)
    plt.show()
Пример #7
0
def build_network():
    # Build Neural Network
    neural_network = NeuralNetwork(4)
    neural_network.setRandomLayers(1, 4, 4, 3)
    return neural_network
def main():
    # ===================================
    # Settings
    # ===================================
    csv_filename = "data/Leeds02.csv"
    hidden_layers = [5] # number of nodes in hidden layers i.e. [layer1, layer2, ...]
    eta = 0.1 # learning rate
    n_epochs = 400 # number of training epochs
    n_folds = 4 # number of folds for cross-validation
    seed_crossval = 1 # seed for cross-validation
    seed_weights = 1 # seed for NN weight initialization

    # ===================================
    # Read csv data + normalize features
    # ===================================
    print("Reading '{}'...".format(csv_filename))
    X, y, n_classes = utils.read_csv(csv_filename, target_name="y", normalize=True)
    N, d = X.shape
    print(" -> X.shape = {}, y.shape = {}, n_classes = {}\n".format(X.shape, y.shape, n_classes))

    print("Neural network model:")
    print(" input_dim = {}".format(d))
    print(" hidden_layers = {}".format(hidden_layers))
    print(" output_dim = {}".format(n_classes))
    print(" eta = {}".format(eta))
    print(" n_epochs = {}".format(n_epochs))
    print(" n_folds = {}".format(n_folds))
    print(" seed_crossval = {}".format(seed_crossval))
    print(" seed_weights = {}\n".format(seed_weights))

    # ===================================
    # Create cross-validation folds
    # ===================================
    idx_all = np.arange(0, N)
    idx_folds = utils.crossval_folds(N, n_folds, seed=seed_crossval) # list of list of fold indices

    # ===================================
    # Train/evaluate the model on each fold
    # ===================================
    acc_train, acc_valid = list(), list()  # training/test accuracy score
    print("Cross-validating with {} folds...".format(len(idx_folds)))
    for i, idx_valid in enumerate(idx_folds):

        # Collect training and test data from folds
        idx_train = np.delete(idx_all, idx_valid)
        X_train, y_train = X[idx_train], y[idx_train]
        X_valid, y_valid = X[idx_valid], y[idx_valid]

        # Build neural network classifier model and train
        model = NeuralNetwork(input_dim=d, output_dim=n_classes,
                              hidden_layers=hidden_layers, seed=seed_weights)
        model.train(X_train, y_train, eta=eta, n_epochs=n_epochs)

        # Make predictions for training and test data
        ypred_train = model.predict(X_train)
        ypred_valid = model.predict(X_valid)

        # Compute training/test accuracy score from predicted values
        acc_train.append(100*np.sum(y_train==ypred_train)/len(y_train))
        acc_valid.append(100*np.sum(y_valid==ypred_valid)/len(y_valid))

        # Print cross-validation result
        print(" Fold {}/{}: acc_train = {:.2f}%, acc_valid = {:.2f}% (n_train = {}, n_valid = {})".format(
            i+1, n_folds, acc_train[-1], acc_valid[-1], len(X_train), len(X_valid)))

    # ===================================
    # Print results
    # ===================================
    print("  -> acc_train_avg = {:.2f}%, acc_valid_avg = {:.2f}%".format(
        sum(acc_train)/float(len(acc_train)), sum(acc_valid)/float(len(acc_valid))))
Пример #9
0
import tensorflow as tf
from src.NeuralNetwork import NeuralNetwork

# config
config_proto = tf.ConfigProto()
config_proto.gpu_options.allow_growth = True
config_proto.allow_soft_placement = True
config_proto.log_device_placement = False
my_config = {}
my_config['train_filename'] = 'data/train.arff'
my_config['test_filename'] = 'data/test.arff'
my_config['batch_size'] = 1000
my_config['lr'] = 1e-5
my_config['iterations'] = 20000
my_config['snapshot_frequency'] = 20000
my_config['print_frequency'] = 500
my_config['validation_frequency'] = 1000
my_config['num_processes'] = 6
my_config['num_folds'] = 10
my_config['gpu'] = 0
my_config['cost_matrix'] = [[0.0, 5.0], [50.0, 0.0]]

net = NeuralNetwork(config={'tf': config_proto, 'user': my_config})
net.run_train()
Пример #10
0
def init_network(architecture):
    normalize = lambda num: 1 / (1 + math.e**-num)
    return NeuralNetwork(architecture, normalize)
Пример #11
0
6. take single genome then duplicate with mutations
'''

if __name__ == '__main__':
    x = [[0, 0], [0, 1], [1, 1], [1, 0]]
    y = [[0], [1], [0], [1]]

    g = GenomeFactory.create_genome(2, 1)
    pop = create_population(g)

    results = []
    generations = 5

    for gen in range(0, generations):
        for genome in pop:
            results.append(sum(NeuralNetwork.feedforward(genome=genome, x_input=x, y_out=y, fitness_fnc=sum)))
        print("Finished feedforward")
        species_dict = sort_species(pop)
        print(len(species_dict))
        print("Finished speciation")

        new_pop = []
        for index, species in species_dict.items():
            # crossover the two best genomes from each species
            new_num_species = calculate_new_number_of_species(species)
            best_genomes = get_best_genomes(species)
            new_species = crossover_species(best_genomes, new_num_species)
            new_pop.extend(new_species)
        pop = new_pop

    best_genome = get_best_genomes(pop, amount=1)
import cgi
import json
import socketserver
from http.server import BaseHTTPRequestHandler

import cv2
import cv2 as cv
import numpy as np
import time

from src.NeuralNetwork import NeuralNetwork
from src.TreeDetector import TreeDetector
from src.TreePainter import TreePainter

# To not reload NN
retinanet = NeuralNetwork("./src/model/model.h5")


class Server(BaseHTTPRequestHandler):
    """ Works like Server HTTP """
    def _set_headers(self):
        """ Write headers of responses """
        self.send_response(200)
        self.send_header('Content-type', 'application/json')
        self.end_headers()

    def do_POST(self):
        """ Precess POSTed data (image)  """
        message = self._extract_msg()

        # Extract the image from the JSON that must come in Base64
Пример #13
0
    num_iters = data.num_iters(batch_size)
    loss = []
    avg_loss = 0.
    avg_acc = 0.
    for epoch in range(1, n_epochs + 1):
        for _ in range(num_iters):
            batch_x, batch_y = data.next_batch(batch_size)
            batch_y = to_categorical(batch_y)

            avg_loss += model.fit(batch_x, batch_y)
            avg_acc += model.accuracy()
        if epoch % 10 == 0:
            n = 10. * num_iters
            print "Epoch: {}; Loss: {}".format(epoch, avg_loss / n)
            print "Acc: {}".format(avg_acc / n)
            loss.append(avg_loss)
            avg_loss = 0.
            avg_acc = 0.

    test_x, test_y = data.test
    pred = model.predict(test_x)
    test_acc = np.mean(test_y == pred)
    print "Test accuracy: {}".format(test_acc)


if __name__ == '__main__':
    data = Dataset(DATA_DIR)
    inp_shape, num_classes = data.inp_shape(), data.num_classes()
    model = NeuralNetwork(inp_shape, num_classes, hidden_units=64)
    train(data, model)
Пример #14
0
class GeneticFixedTopology(AbstractGeneticAlgorithm):
    def __init__(self, initial_population, generations):
        """
        A genetic algorithm is used to learn the weights and bias of a topology
        fixed network.
        """
        super().__init__(initial_population)
        #self.expected_precision = expected_precision
        self.generation_span = generations
        self.precision = 0
        self.epoch = 0
        self.num_inputs = 4
        self.neurons_per_layer = [self.num_inputs, 4, 3]
        # Build Fixed Neural Network, with 4 inputs
        self.neural_network = NeuralNetwork(self.num_inputs)
        # The neural network has 3 layers with 3,4 and 3 neurons in each
        self.neural_network.buildFixed(self.neurons_per_layer)
        self.test_values = 20
        # Parse data set
        file_manager = FileManager()
        file_manager.load_file("../Datasets/iris.data")
        self.train_data = file_manager.get_train_data()
        self.test_data = file_manager.get_test_data()
        self.neurons_position = []
        self.x_plot = []
        self.y_plot = []

    def run(self):
        """
        Execute the genetic algorithm to find the best weights and bias
        for fixed neural network.
        """
        self.initialize_population(self.population_size)
        while self.generation < self.generation_span:
            self.precision = 0
            self.generation += 1
            self.x_plot.append(self.generation)
            child_population = []
            while len(child_population) < self.population_size:
                father = self.selection()
                mother = self.selection()
                first_child, second_child = self.cross_over(father, mother)
                self.mutate(first_child)
                self.mutate(second_child)
                father_fitness = self.evaluate_fitness(father)
                mother_fitness = self.evaluate_fitness(mother)
                fitness_first_child = self.evaluate_fitness(first_child)
                fitness_second_child = self.evaluate_fitness(second_child)

                if fitness_first_child >= fitness_second_child:
                    if fitness_first_child >= father_fitness and fitness_first_child >= mother_fitness:
                        child_population.append(first_child)
                        self.set_best_new_population_fitness(
                            fitness_first_child)
                    else:
                        if father_fitness > mother_fitness:
                            child_population.append(father)
                            self.set_best_new_population_fitness(
                                father_fitness)
                        else:
                            child_population.append(mother)
                            self.set_best_new_population_fitness(
                                mother_fitness)

                else:
                    if fitness_second_child >= father_fitness and fitness_second_child >= mother_fitness:
                        child_population.append(second_child)
                        self.set_best_new_population_fitness(
                            fitness_second_child)
                    else:
                        if father_fitness > mother_fitness:
                            child_population.append(father)
                            self.set_best_new_population_fitness(
                                father_fitness)
                        else:
                            child_population.append(mother)
                            self.set_best_new_population_fitness(
                                mother_fitness)

            self.y_plot.append(self.precision)
            self.population = child_population

        return self.get_best_neural_network()

    def set_best_new_population_fitness(self, fitness):
        ratio = float(fitness / self.test_values)
        if ratio > self.precision:
            self.precision = ratio

    def create_random_bias(self):
        return random.uniform(1.0, 3.0)

    def create_random_weight(self):
        return random.uniform(0.0, 2.0)

    def initialize_population(self, number_of_individuals):
        """
        Creates a fixed number of individuals with the same properties,
        but different weights and bias.
        """
        num_inputs = self.num_inputs
        position = 0
        #Input layer
        for initial_neuron in range(self.num_inputs):
            self.neurons_position.append(position)
            position += 2

        i = 1
        for num_neurons in self.neurons_per_layer[1:]:
            for j in range(num_neurons):
                self.neurons_position.append(position)
                position += (self.neurons_per_layer[i - 1] + 1)
            i += 1
        self.number_genes = position
        super().initialize_population(number_of_individuals)

    def evaluate_fitness(self, individual):
        """
        Returns the fitness value of an individual.
        """
        fitness = 0
        if individual.fitness is None:
            self.neural_network.load_network(individual.serialized_values)
            for i in range(self.test_values):
                data = self.test_data[random.randint(0,
                                                     len(self.test_data) - 1)]
                correct_result = data[-1]
                raw_result = self.neural_network.feed(data[0:-1])
                guess_result = self.neural_network.interp(raw_result)
                if correct_result == guess_result:
                    fitness += 1
            individual.fitness = fitness
        return individual.fitness

    def get_best_neural_network(self):
        best_individual = self.population[0]
        if best_individual.fitness is None:
            self.evaluate_fitness(best_individual)
        for individual in self.population:
            if individual.fitness is None:
                self.evaluate_fitness(individual)
            if best_individual.fitness < individual.fitness:
                best_individual = individual
        return self.neural_network.load_network(
            best_individual.serialized_values)

    def mutate(self, individual):
        for index in range(len(self.neurons_position)):
            if self.mutation_rate > random.uniform(0, 1):
                if index != len(self.neurons_position) - 1:
                    dif = self.neurons_position[
                        index + 1] - self.neurons_position[index]
                else:
                    dif = len(individual.serialized_values
                              ) - 1 - self.neurons_position[index]
                for j in range(dif):
                    individual.serialized_values[index + j] = random.uniform(
                        -15.0, 15.0)

    def plot_results(self):
        plt.figure()
        plt.title("Precisión", fontsize=20)
        plt.xlabel('genercación')
        plt.ylabel('precisión')
        plt.plot(self.x_plot, self.y_plot)
        plt.show()