def __init__(self, genome): self.neurons = {} for i in range(1,Inputs + 1): self.neurons[i] = Neuron() for o in range(1,Outputs + 1): self.neurons[MaxNodes + o] = Neuron() genome.genes = sorted(genome.genes, key=lambda k: k.out) #table.sort(genome.genes, def (a,b) # return (a.out < b.out) #end) for i in range(1,len(genome.genes) + 1): gene = genome.genes[i - 1] if gene.enabled: #print(self.neurons[gene.out]) #if self.neurons.get(gene.out, None) == None: # self.neurons[gene.out] = Neuron() self.neurons.setdefault(gene.out, Neuron()) neuron = self.neurons[gene.out] neuron.incoming.append(gene) #if self.neurons.get(gene.into, None) == None: # self.neurons[gene.into] = Neuron() self.neurons.setdefault(gene.into, Neuron())
def __init__(self, num_neurons, previous_layer, inputs_per_neuron, outputs_per_neuron, learning_rate, ml_lambda, layer_type): self.output_layer = False self.learning_rate = learning_rate self.ml_lambda = ml_lambda if layer_type == "input": self.neurons = [] self.neurons.append(Neuron(0, outputs_per_neuron, 0, "bias")) for x in range(num_neurons): self.neurons.append( Neuron(inputs_per_neuron, outputs_per_neuron, 0, "input")) elif layer_type == "output": self.output_layer = True self.neurons = [] for pointer in range(num_neurons): weights_in = [] for neuron in previous_layer.neurons: weights_in.append(neuron.weights_out[pointer]) self.neurons.append( Neuron(len(weights_in), outputs_per_neuron, weights_in, "from_outputs")) else: # hidden layer self.neurons = [] self.neurons.append(Neuron(0, outputs_per_neuron, 0, "bias")) for x in range(num_neurons): weights_in = [] for neuron in previous_layer.neurons: weights_in.append(neuron.weights_out[x]) self.neurons.append( Neuron(len(weights_in), outputs_per_neuron, weights_in, "hidden"))
def __init__(self, numInputNeurons, numHiddenLayers): # Create Layers self.inputLayer = [Neuron() for _ in range(numInputNeurons)] self.numHiddenLayers = numHiddenLayers self.hiddenLayers = [[Neuron() for _ in range(numInputNeurons)] for _ in range(numHiddenLayers)] self.outputNeuron = Neuron() if numHiddenLayers > 0: # Create edges to connect input neurons to first hidden layer neurons (if exists) for i_neuron in self.inputLayer: for h_neuron in self.hiddenLayers[0]: Edge(i_neuron, h_neuron) # Create edges to connect hidden layer neurons to each other for h_layer1, h_layer2 in [ (self.hiddenLayers[i], self.hiddenLayers[i + 1]) for i in range(self.numHiddenLayers - 1) ]: for h_neuron1 in h_layer1: for h_neuron2 in h_layer2: Edge(h_neuron1, h_neuron2) # Create edges to connect last hidden layer neurons to output neuron for h_neuron in self.hiddenLayers[-1]: Edge(h_neuron, self.outputNeuron) else: # Create edges to connect input neurons to output neuron for i_neuron in self.inputLayer: Edge(i_neuron, self.outputNeuron)
def __init__(self, params): self.p = params self.neurons = [] self.input_neurons = [] self.firing_threshold = self.p["initial_firing_threshold"] self.graph = nx.DiGraph() self.node_colors = [] plt.ion() plt.show() self.neuron_params = { "learning_rate": self.p["learning_rate"], "synapse_activity_discount": self.p["synapse_activity_discount"], "initial_firing_threshold": self.p["initial_firing_threshold"], "initial_weight": self.p["initial_weight"] } """Input neurons""" for i in range(self.p["num_inputs"]): self.input_neurons.append(InputNeuron(name="Input" + str(i), graph=self.graph)) self.graph.node[self.input_neurons[i].name]["pos"] = (0, i) """Hidden neuron""" self.neurons.append(Neuron(self.neuron_params, "0", self.graph)) self.graph.node[self.neurons[0].name]["pos"] = (1, 0) for input_neuron in range(self.p["num_inputs"]): self.neurons[0].add_input(self.input_neurons[input_neuron]) """Output neurons""" self.output_neuron_index_range = range(1, self.p["num_outputs"] + 1) for output_neuron in self.output_neuron_index_range: name = "Output" + str(output_neuron) self.neurons.append(Neuron(self.neuron_params, name, self.graph)) self.neurons[output_neuron].add_input(self.neurons[0]) self.graph.node[self.neurons[output_neuron].name]["pos"] = (5, 0)
def __init__(self): weights = np.array([0, 1]) bias = 0 self.h1 = Neuron(weights, bias) self.h2 = Neuron(weights, bias) self.o1 = Neuron(weights, bias)
def __init__(self, inputLayerSize, hiddenLayersSize, outputLayerSize, epochs, learningStep=0.5, biasNeuron=False): self.learningStep = learningStep self.bias = biasNeuron if biasNeuron: self.biasNeuron = InputNeuron(1) self.inputLayer = [InputNeuron() for _ in range(inputLayerSize)] self.hiddenLayers = [] # populate first hidden layer self.hiddenLayers.append([ Neuron(inputLayerSize + int(self.bias)) for _ in range(hiddenLayersSize.pop(0)) ]) # we allow to pass multiple hidden layers for idx, hiddenLayerSize in enumerate(hiddenLayersSize): self.hiddenLayers.append([ Neuron(len(self.hiddenLayers[idx]) + int(self.bias)) for _ in range(hiddenLayerSize) ]) self.outputLayer = [ Neuron(len(self.hiddenLayers[-1]) + int(self.bias)) for _ in range(outputLayerSize) ] self.layers = [self.inputLayer, *self.hiddenLayers, self.outputLayer] self.epochs = epochs
def Neuron_creation(self) : exci_number = int(self.size * self.ex_in_ratio) inhi_number = self.size - exci_number for i in range(exci_number) : self.neurons.append(Neuron(I = self.I_ex, neuron_type=1)) for i in range(inhi_number) : self.neurons.append(Neuron(I = self.I_ex, neuron_type=-1))
def __init__(self, input_number, layers, neuron_per_layer, output_number): self.inputs = [InputSignal(0) for x in range(input_number)] self.layers = [] prev_layer = self.inputs self.fit = 0 for i in range(layers): new_layer = [Neuron(prev_layer) for j in range(neuron_per_layer)] self.layers.append(new_layer) prev_layer = new_layer self.outputs = [Neuron(prev_layer) for i in range(output_number)]
def add_layer(self, tipo, qtd_neurons=None): if tipo.lower() == "hidden": for i in range(qtd_neurons): self.hidden_layer.append( Neuron(len(self.padroes[0]), len(self.padroes))) elif tipo.lower() == "output": for i in range(len(self.correct_outputs[0])): self.output_layer.append( Neuron(len(self.hidden_layer), len(self.padroes)))
def __init__(self): self.layouts = [] neurons = [] for i in range(1000): neurons.append(Neuron(6, self.f_unipolarna)) self.layouts.append(neurons) self.beta = -1 neurons = [] for i in range(24): neurons.append(Neuron(6, self.f_unipolarna))
def netInitialisation(noInputs, noOutputs, noHiddenNeurons): net = [] hiddenLayer = [] for h in range(noHiddenNeurons): # create hidden layers weights = [random() for i in range(noInputs + 1)] # noInputs and the bias neuron = Neuron(weights) hiddenLayer.append(neuron) net.append(hiddenLayer) outputLayer = [Neuron([random() for i in range(noHiddenNeurons + 1)]) for o in range(noOutputs)] net.append(outputLayer) return net
def __init__(self, topology): self.layers = [] for numNeuron in topology: layer = [] for i in range(numNeuron): if (len(self.layers) == 0): layer.append(Neuron(None)) else: layer.append(Neuron(self.layers[-1])) layer.append(Neuron(None)) # Represents the bias neuron layer[-1].setOutput(1) # setting the output of bias neuron self.layers.append(layer)
def setUp(self): """Prepares a Halfadder Neuron Network""" # Layer 1 Neurons: n1 = Neuron([12, 12], Sigmoid().activate, bias=-18) n2 = Neuron([-12, -12], Sigmoid().activate, bias=6) n3 = Neuron([12, 12], Sigmoid().activate, bias=-18) # Layer 2 Neurons: n4 = Neuron([-12, -12, 0], Sigmoid().activate, bias=6) n5 = Neuron([0, 0, 12], Sigmoid().activate, bias=-6) # Layers l1 = NeuronLayer([n1, n2, n3]) l2 = NeuronLayer([n4, n5]) self.ntwrk = NeuronNetwork([l1, l2])
def __init__(self, topology): #topology is a list containing [input_layer,hidden_layers,output_layer] self.layers = [] for numNeuron in topology: layer = [] for i in range(numNeuron): #we got no layers then if not self.layers: layer.append(Neuron(None)) else: layer.append(Neuron(self.layers[-1])) layer.append(Neuron(None)) #this is our bias neuron layer[-1].setOutput(1) #our bias will be of 1 self.layers.append(layer)
def __init__(self): # nr(id), weights, layer, bias weights = np.array([0, 1]) bias = 0 self.inputNeuron1 = InputNeuron(1, weights, 1, bias) self.inputNeuron2 = InputNeuron(2, weights, 1, bias) self.interNeuron1 = InterNeuron(3, weights, 2, bias) self.interNeuron2 = InterNeuron(4, weights, 2, bias) self.interNeuron3 = InterNeuron(5, weights, 2, bias) self.outputNeuron = OutputNeuron(1, weights, 3, bias) self.h1 = Neuron(1, weights, 0, bias) self.h2 = Neuron(2, weights, 0, bias) self.o1 = Neuron(3, weights, 0, bias)
def __init__(self,numOfNeurons, activation, input_num, lr, weights=None, name=None): self.input_num = input_num self.lr = lr self.name=name self.activation = activation self.num_neurons = numOfNeurons # print('creating neurons for layer: ', name) if weights is None: self.neurons = [Neuron(activation, input_num, lr) for i in range(numOfNeurons)] else: # self.neurons = [Neuron(activation, input_num, lr, [weights[0][i,:],weights[1][i]]) for i in range(numOfNeurons)] self.neurons = [Neuron(activation, input_num, lr, [weights[0][:,i],weights[1][i]]) for i in range(numOfNeurons)] self.outputShape = numOfNeurons self.update_weights()
def build_network(): neurons = [] w01 = random.uniform(-1 / 8, 1 / 8) w02 = random.uniform(-1 / 8, 1 / 8) n0 = Neuron(w01, w02) neurons.append(n0) w11 = random.uniform(-1 / 8, 1 / 8) w12 = random.uniform(-1 / 8, 1 / 8) n1 = Neuron(w11, w12) neurons.append(n1) w21 = random.uniform(-1 / 8, 1 / 8) w22 = random.uniform(-1 / 8, 1 / 8) n2 = Neuron(w21, w22) neurons.append(n2) return neurons
def _get_item(self, index): if index in self.cache: geometry, morphology, diameter = self.cache[index] else: fn = self.datapath[index] print(fn) neuron = Neuron(file_format='swc', input_file=fn) point_set = neuron.nodes_list # Node: xyz, r, parent, type, children # point_set = np.loadtxt(fn, delimiter=',').astype(np.float32) # get geometry and morphology # move soma to original coordination geometry = np.transpose(neuron.location) # geometry = np.array([node.getxyz() for node in point_set]) # real location diameter = np.array([node.r for node in point_set]) morphology = neuron.parent_index+1 # n_id , neuron.parent_index is begain 0 morphology = morphology[0:self.npoints] # cut morphology[0] = 0 # print("parent index:") # print(morphology) geometry = geometry[0:self.npoints, :] # cut soma # print("after plan origin geometry") # print(geometry) print("geo_diam_morph shape", geometry.shape, diameter.shape, morphology.shape) print("------------------") # normalization # geometry[:, 0:3] = self.pc_normalize(geometry[:, 0:3]) --------------------not use ------------------- # print("after normalization geometry") # print(geometry) if len(self.cache) < self.cache_size: self.cache[index] = (geometry, morphology, diameter) # tuple: geometry, morphology, radius, diameter not use now return geometry, morphology
def addN(self, who): self.Nneurons += len(who[:]) for i, pos in enumerate(who): self.neurons.insert( pos + i, Neuron(self.neurons[0].Nvars, self.neurons[0].afunType))
def __init__(self, layers_topology, activacion_func_topology, momentum=0.2, learning_rate=0.1, bias=1, epoches=1000, error_measure_frequency=10): self.momentum = momentum self.learning_rate = learning_rate self.bias = bias self.epoches = epoches self.error_measure_frequency = error_measure_frequency self.error = 0 self.error_sum = 0 self.error_max = 0 self.error_X = [] self.error_Y = [] self.correct_Y = [] self.predicted = [] self.expected = [] self.layers = [[ Neuron(layers_topology[i - 1], neuron_number + 1, activacion_func_topology[i], self.momentum, self.learning_rate, self.bias) for neuron_number in range(layers_topology[i]) ] for i in range(len(layers_topology))]
def __init__(self, neurons=[], synapses=[], layer_type=LayerType.INPUT, neuron_count=-1, synapse_count=-1, weight_per_synapse=-1): self.neurons = [] self.synapses = [] if len(neurons) > 0: Log.i("Using custom neurons.") self.neurons = neurons elif neuron_count > 0: Log.i("Creating Layer with %d neurons." % neuron_count) for i in range(0, neuron_count): self.neurons.append(Neuron()) Log.d('debug neuron count %s' % len(self.neurons)) self.layer_type = layer_type if len(synapses) > 0: self.synapses = synapses elif synapse_count > 0 and weight_per_synapse > 0: for i in range(0, synapse_count): self.synapses.append( Synapse(weights=[], weight_count=weight_per_synapse, random_weight=True))
def __init__(self, size): # super(Layer, self).__init__() self.size = size self.Neurons = [] for i in range(0, size): n = Neuron(0.00) self.Neurons.append(n)
def __init__(self, _nodes, _classes, weights, obs, _metaLayers, trace, _Random): global perceptrons perceptrons = [] global hiddenLayer hiddenLayer = [] global classes classes = np.arange(_classes) global lastObs lastObs = obs[:] global avgObs avgObs = obs[:] global metaLayers metaLayers = _metaLayers global allowRandom allowRandom = _Random global discount discount = .75 global gradientCount gradientCount = 0 _nodes = _nodes * _metaLayers for k in range(len(classes)): p = Neuron( classes[k], classes[k], _nodes ) # important to set perceptrons here for each new learning rate perceptrons.append(p) for i in range(_nodes): h = HiddenNetwork( i * 1000 + i, i % len(classes), weights ) # important to set perceptrons here for each new learning rate hiddenLayer.append(h)
def __init__(self, format, p): """ Creates a NeuralNetwork object with random coefficients and a given size for each layer ---- input: format: array of integer of size C -> (n_c)_c p: int -> the numberof column used as input of the network ---- output: void """ self.format = format C = len(format) input_sizes = [p] + format self.neuron_layers = [[ Neuron(input_sizes[layer]) for _ in range(format[layer]) ] for layer in range(C)] self.Z_layers = [ np.array([0 for _ in range(format[layer])], dtype=np.double) for layer in range(C) ] self.learning_Rate = 0.5 #Arbitrary self.current_input = [0 for _ in range(p)] self.errors = [0] self.derivatives = [[[0 for _ in range(input_sizes[layer])] for _ in range(format[layer])] for layer in range(C)]
def gen_result_R_neurons(self, n): a = [] for _ in range(n): a.append(Neuron(self.get_weights(), self.b_R)) return a
def __init__(self, data, all_y_trues, neurons_in_hl=[0]): # Data Members self.data = data self.all_y_trues = all_y_trues self.hidden_layers = [] # # Hidden Layer 1 # self.hl = HiddenLayer(neurons_in_hl[0]) # For each hidden layer save in the hidden_layer array amount_of_weights = len(self.data[0]) for hl_count in range(len(neurons_in_hl)): hl = HiddenLayer(neurons_in_hl[hl_count]) for neuron in hl.neurons(): neuron.changeProps( [np.random.normal() for i in range(amount_of_weights)], np.random.normal()) amount_of_weights = neurons_in_hl[hl_count] self.hidden_layers.append(hl) # Output Neuron self.o1 = Neuron( [np.random.normal() for i in range(amount_of_weights)], np.random.normal())
def __init__(self, n_syn=10, max_spikes=50, shape=[2, 5, 1]): ## Create the Network , NOTE , the input layer is not included self.shape = shape self.hidden_layer = [] self.output_layer = [] self.max_spikes = max_spikes # We make the hidden layer to be a group layer by layer for i in range(len(shape) - 2): self.hidden_layer.append([ Neuron(n_syn=self.shape[i], max_spikes=self.max_spikes) for t in range(self.shape[i + 1]) ]) for i in range(self.shape[-1]): self.output_layer.append( Neuron(n_syn=self.shape[-2], max_spikes=self.max_spikes))
def __init__(self, num_neurons, function): self.num_neurons = num_neurons self.neurons = [] for i in range(num_neurons): self.neurons.append(Neuron()) self.function = function # Name of function. Ex: sigmoid, relu, tanh etc. print(self.__repr__() + " --initialized")
def imageTopology(self, inXY, netXY): if (not self.inXY): self.inXY, self.netXY = inXY, netXY self.stats = {'01,{0:02d},{1:02d}'.format(j, i): {str(k): 0 for k in range(10)} for j in range(self.netXY[0]) for i in range(self.netXY[1])} inAxonTerminals = ['01,{0:02d},{1:02d}'.format(j,i) for j in range(netXY[0]) for i in range(netXY[1])] for j in range(inXY[0]): # Input Layer (L0) for i in range(inXY[1]): address = '00,{0:02d},{1:02d}'.format(j,i) self.input[address] = Neuron(address, dict(), inAxonTerminals) for j in range(netXY[0]): # Excitatory and Inhibitory Layer (L1) for i in range(netXY[1]): address = '01,{0:02d},{1:02d}'.format(j,i) netDendrites = {'00,{0:02d},{1:02d}'.format(j,i): np.random.uniform(0.3, 0.7) for j in range(inXY[0]) for i in range(inXY[1])} netAxonTerm = list(inAxonTerminals) netAxonTerm.remove(address) self.network[address] = Neuron(address, netDendrites, netAxonTerm) self.saveBrain(0)
def __init__(self, numNeurons): self.neurons = [] for i in range(numNeurons): self.neurons.append( Neuron(weights=[ random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1) ]))