def _initialize_hidden_layers(self, neural_network): """Initializes hidden layers, based on defined number of layers.""" number_of_layers = randint(self.init_minimum_layers, self.layers) """ -1 here (two lines) """ neurons_per_layer = [ randint(1, self.init_maximum_neurons_per_layer) for _ in range(number_of_layers - 1) ] hidden_layers = [[ create_neuron(None, neural_network.bias, maximum_bias_connection_weight=self. maximum_bias_connection_weight) for _ in range(neurons_per_layer[layer]) ] for layer in range(number_of_layers - 1)] # From Jan: Create hidden layers with one neuron with random activation function each. # hidden_layers = [[create_neuron(None, neural_network.bias)] for i in range(self.layers - 1)] # Add final hidden layer with one neuron activation_function = choice( list(_NON_LINEAR_ACTIVATION_FUNCTIONS.keys())) hidden_layers.append([ create_neuron(activation_function, neural_network.bias, maximum_bias_connection_weight=self. maximum_bias_connection_weight) ]) # Returns hidden layers. return hidden_layers
def _create_final_hidden_neuron(self, bias, maximum_bias_connection_weight=None): """ Creates the final hidden neuron """ activation_function = choice( list(_NON_LINEAR_ACTIVATION_FUNCTIONS.keys())) return create_neuron( activation_function, bias, maximum_bias_connection_weight=maximum_bias_connection_weight)
def create_neuron(activation_function=None, bias=None, maximum_bias_connection_weight=1.0): """Creates neuron with defined activation function and bias.""" # If activation function not defined, choose activation function at random. if not activation_function: activation_function = choice(list(_NON_LINEAR_ACTIVATION_FUNCTIONS.keys())) # activation_function = choice(list(_ACTIVATION_FUNCTIONS.keys())) neuron = Neuron(array([]), list(), activation_function) # If is biased, connect to bias with random weight. if bias: Connection(bias, neuron, uniform(-maximum_bias_connection_weight, maximum_bias_connection_weight)) return neuron
def create_cnn_neuron(activation_function=None): if not activation_function: activation_function = choice( list(_NON_LINEAR_ACTIVATION_FUNCTIONS.keys())) what_layer = np.random.rand() if what_layer >= 0.5: neuron = ConvNeuron(np.array([]), list(), activation_function) else: neuron = PoolNeuron(np.array([]), list(), activation_function) return neuron
def create_cnn_neuron(activation_function=None, conv_prob=0.9): if not activation_function: activation_function = choice( list(_NON_LINEAR_ACTIVATION_FUNCTIONS.keys())) what_layer = np.random.rand() """ TODO: To Konrad: added this probability for a more flexible code """ if what_layer < conv_prob: neuron = ConvNeuron(np.array([]), list(), activation_function) else: neuron = PoolNeuron(np.array([]), list(), activation_function) return neuron
def create_network_from_topology(topology): """Creates neural network from topology.""" # Create bias. bias = Sensor(array([1])) # Creates neurons from remaining items in string. activation_function = choice(list(_NON_LINEAR_ACTIVATION_FUNCTIONS.keys())) hidden_layers = [[create_neuron(activation_function, bias) for i in range(j)] for j in topology] # Create output neuron. output_neuron = create_neuron('identity', bias) # Connect nodes in neural network. if len(hidden_layers) > 1: for i in range(1, len(hidden_layers)): _connect_nodes(hidden_layers[i - 1], hidden_layers[i]) _connect_nodes(hidden_layers[i], [output_neuron]) else: _connect_nodes(hidden_layers[0], [output_neuron]) # Create neural network. neural_network = NeuralNetwork(None, bias, hidden_layers, output_neuron) # Return neural network. return neural_network