def crossover(self): #Introduce crossover while len(self.new_generation) < self.number_of_agents: net = NeuralNetwork() layer1 = Layer(210, 10) layer2 = Layer(10, 10) layer3 = Layer(10, 4) net.layers.append(layer1) net.layers.append(layer2) net.layers.append(layer3) # Crossover weights from each layer two nets for i, layer in enumerate(net.layers): random_place_in_genom = random.randint(1, len(layer.weights) - 1) layer.weights = np.concatenate((self.new_generation[0].layers[i].weights[:random_place_in_genom], \ self.new_generation[1].layers[i].weights[random_place_in_genom:])) self.new_generation.append(copy.deepcopy(net)) print( f"\nNumber of agents for next generation: {len(self.list_of_neural_nets)}" ) self.list_of_neural_nets = [] self.list_of_neural_nets = copy.deepcopy(self.new_generation)
def __init__(self, *layers, output_type='classification', inner_activation='ReLU'): ''' Construct an atrificial neural network User specifies a list of number of neurons (sequential, including input and output layers) User can speciffy classification or regression, the number of output neurons will determine whether to use a sigmoid or softmax for the output layer Inner activation is ReLU by default, but user can specify other functions currently supported are 'none', 'sigmoid' and 'ReLU' ''' self.labels = None self.encoded = False network = [] prev = Layer(layers[0]) network.append(prev) for l in layers[1:-1]: curr = Layer(l, prev, activation=inner_activation) network.append(curr) prev = curr if (output_type == 'classification'): # label network for potentially label encoding self.labels = True if (layers[-1] == 1): out = 'sigmoid' else: out = 'softmax' network.append(Layer(layers[-1], prev, activation=out)) else: network.append(Layer(layers[-1], prev, activation='none')) self.network = network
def populate_list_of_neural_nets(self): for _ in range(self.number_of_agents): net = NeuralNetwork() layer1 = Layer(210, 10) layer2 = Layer(10, 10) layer3 = Layer(10, 4) net.layers.append(layer1) net.layers.append(layer2) net.layers.append(layer3) self.list_of_neural_nets.append(net)
def test_model_output_shape(self): """ if you have an input with the form (m, i) and the last layer has "n" neurons, the output must have the form (m, n) """ model = Model([]) add_many_layers_to_model( model, Layer) # let's pass the Layer class as an argument last_layer_neurons = 3 model.add(Layer(last_layer_neurons)) assert testing_model_outputs( model, # X.shape[0] is the number of examples of the input condition=lambda X, Y: Y.shape == (X.shape[0], last_layer_neurons))
def __init__(self, list_objective_functions, list_of_dimensions, learning_rate): self.input_dim = list_of_dimensions[0] self.learning_rate = learning_rate if (list_objective_functions.shape[0] != (list_of_dimensions.shape[0] - 1)): raise ValueError( 'The number of objective functions must be equal to the list of dimensions' ) list_of_layers = [] for i in range(list_of_dimensions.shape[0] - 1): input_dimension = list_of_dimensions[i] output_dimension = list_of_dimensions[i + 1] new_layer = Layer.Layer_3(input_dimension, output_dimension, list_objective_functions[i]) list_of_layers.append(new_layer) self.layer_list = np.array(list_of_layers)
""" Created on Sun May 31 16:52:42 2020 @author: radekrehacek """ from neural_network import Layer, NeuralNetwork grid = [[(255, 0, 0) for n in range(10)] for i in range(20)] net = NeuralNetwork() net.read_inputs(grid) #Layer(inputs, neurons) layer1 = Layer(210, 10) layer2 = Layer(10, 10) layer3 = Layer(10, 4) net.layers.append(layer1) net.layers.append(layer2) net.layers.append(layer3) layer1.forward(net.inputs) layer1.activation_sigmoid(layer1.output) print(layer1.output) print() layer2.forward(layer1.output) layer2.activation_sigmoid(layer2.output)