def __init__(self, params): self.p = params self.neurons = [] self.input_neurons = [] self.firing_threshold = self.p["initial_firing_threshold"] self.graph = nx.DiGraph() self.node_colors = [] plt.ion() plt.show() self.neuron_params = { "learning_rate": self.p["learning_rate"], "synapse_activity_discount": self.p["synapse_activity_discount"], "initial_firing_threshold": self.p["initial_firing_threshold"], "initial_weight": self.p["initial_weight"] } """Input neurons""" for i in range(self.p["num_inputs"]): self.input_neurons.append(InputNeuron(name="Input" + str(i), graph=self.graph)) self.graph.node[self.input_neurons[i].name]["pos"] = (0, i) """Hidden neuron""" self.neurons.append(Neuron(self.neuron_params, "0", self.graph)) self.graph.node[self.neurons[0].name]["pos"] = (1, 0) for input_neuron in range(self.p["num_inputs"]): self.neurons[0].add_input(self.input_neurons[input_neuron]) """Output neurons""" self.output_neuron_index_range = range(1, self.p["num_outputs"] + 1) for output_neuron in self.output_neuron_index_range: name = "Output" + str(output_neuron) self.neurons.append(Neuron(self.neuron_params, name, self.graph)) self.neurons[output_neuron].add_input(self.neurons[0]) self.graph.node[self.neurons[output_neuron].name]["pos"] = (5, 0)
def __init__(self, genome): self.neurons = {} for i in range(1,Inputs + 1): self.neurons[i] = Neuron() for o in range(1,Outputs + 1): self.neurons[MaxNodes + o] = Neuron() genome.genes = sorted(genome.genes, key=lambda k: k.out) #table.sort(genome.genes, def (a,b) # return (a.out < b.out) #end) for i in range(1,len(genome.genes) + 1): gene = genome.genes[i - 1] if gene.enabled: #print(self.neurons[gene.out]) #if self.neurons.get(gene.out, None) == None: # self.neurons[gene.out] = Neuron() self.neurons.setdefault(gene.out, Neuron()) neuron = self.neurons[gene.out] neuron.incoming.append(gene) #if self.neurons.get(gene.into, None) == None: # self.neurons[gene.into] = Neuron() self.neurons.setdefault(gene.into, Neuron())
def __init__(self, num_neurons, previous_layer, inputs_per_neuron, outputs_per_neuron, learning_rate, ml_lambda, layer_type): self.output_layer = False self.learning_rate = learning_rate self.ml_lambda = ml_lambda if layer_type == "input": self.neurons = [] self.neurons.append(Neuron(0, outputs_per_neuron, 0, "bias")) for x in range(num_neurons): self.neurons.append( Neuron(inputs_per_neuron, outputs_per_neuron, 0, "input")) elif layer_type == "output": self.output_layer = True self.neurons = [] for pointer in range(num_neurons): weights_in = [] for neuron in previous_layer.neurons: weights_in.append(neuron.weights_out[pointer]) self.neurons.append( Neuron(len(weights_in), outputs_per_neuron, weights_in, "from_outputs")) else: # hidden layer self.neurons = [] self.neurons.append(Neuron(0, outputs_per_neuron, 0, "bias")) for x in range(num_neurons): weights_in = [] for neuron in previous_layer.neurons: weights_in.append(neuron.weights_out[x]) self.neurons.append( Neuron(len(weights_in), outputs_per_neuron, weights_in, "hidden"))
def __init__(self, numInputNeurons, numHiddenLayers): # Create Layers self.inputLayer = [Neuron() for _ in range(numInputNeurons)] self.numHiddenLayers = numHiddenLayers self.hiddenLayers = [[Neuron() for _ in range(numInputNeurons)] for _ in range(numHiddenLayers)] self.outputNeuron = Neuron() if numHiddenLayers > 0: # Create edges to connect input neurons to first hidden layer neurons (if exists) for i_neuron in self.inputLayer: for h_neuron in self.hiddenLayers[0]: Edge(i_neuron, h_neuron) # Create edges to connect hidden layer neurons to each other for h_layer1, h_layer2 in [ (self.hiddenLayers[i], self.hiddenLayers[i + 1]) for i in range(self.numHiddenLayers - 1) ]: for h_neuron1 in h_layer1: for h_neuron2 in h_layer2: Edge(h_neuron1, h_neuron2) # Create edges to connect last hidden layer neurons to output neuron for h_neuron in self.hiddenLayers[-1]: Edge(h_neuron, self.outputNeuron) else: # Create edges to connect input neurons to output neuron for i_neuron in self.inputLayer: Edge(i_neuron, self.outputNeuron)
def __init__(self, data, all_y_trues, neurons_in_hl=[0]): # Data Members self.data = data self.all_y_trues = all_y_trues self.hidden_layers = [] # # Hidden Layer 1 # self.hl = HiddenLayer(neurons_in_hl[0]) # For each hidden layer save in the hidden_layer array amount_of_weights = len(self.data[0]) for hl_count in range(len(neurons_in_hl)): hl = HiddenLayer(neurons_in_hl[hl_count]) for neuron in hl.neurons(): neuron.changeProps( [np.random.normal() for i in range(amount_of_weights)], np.random.normal()) amount_of_weights = neurons_in_hl[hl_count] self.hidden_layers.append(hl) # Output Neuron self.o1 = Neuron( [np.random.normal() for i in range(amount_of_weights)], np.random.normal())
def __init__(self, inputLayerSize, hiddenLayersSize, outputLayerSize, epochs, learningStep=0.5, biasNeuron=False): self.learningStep = learningStep self.bias = biasNeuron if biasNeuron: self.biasNeuron = InputNeuron(1) self.inputLayer = [InputNeuron() for _ in range(inputLayerSize)] self.hiddenLayers = [] # populate first hidden layer self.hiddenLayers.append([ Neuron(inputLayerSize + int(self.bias)) for _ in range(hiddenLayersSize.pop(0)) ]) # we allow to pass multiple hidden layers for idx, hiddenLayerSize in enumerate(hiddenLayersSize): self.hiddenLayers.append([ Neuron(len(self.hiddenLayers[idx]) + int(self.bias)) for _ in range(hiddenLayerSize) ]) self.outputLayer = [ Neuron(len(self.hiddenLayers[-1]) + int(self.bias)) for _ in range(outputLayerSize) ] self.layers = [self.inputLayer, *self.hiddenLayers, self.outputLayer] self.epochs = epochs
def Neuron_creation(self) : exci_number = int(self.size * self.ex_in_ratio) inhi_number = self.size - exci_number for i in range(exci_number) : self.neurons.append(Neuron(I = self.I_ex, neuron_type=1)) for i in range(inhi_number) : self.neurons.append(Neuron(I = self.I_ex, neuron_type=-1))
def ketchup(): neuron = Neuron([50, 50, 50], [2, 5, 3], 0, 850, 1 / 35, NeuronTypes.LINEAR) # neuron.target = 850 for i in range(100): print('RESULT STEP=%s WEIGHTS=%s DW=%s' % (i, neuron.weights, neuron.dw)) newWeights = [x + y for x, y in zip(neuron.weights, neuron.dw)] neuron.weights = newWeights
def __init__(self, input_number, layers, neuron_per_layer, output_number): self.inputs = [InputSignal(0) for x in range(input_number)] self.layers = [] prev_layer = self.inputs self.fit = 0 for i in range(layers): new_layer = [Neuron(prev_layer) for j in range(neuron_per_layer)] self.layers.append(new_layer) prev_layer = new_layer self.outputs = [Neuron(prev_layer) for i in range(output_number)]
def add_layer(self, tipo, qtd_neurons=None): if tipo.lower() == "hidden": for i in range(qtd_neurons): self.hidden_layer.append( Neuron(len(self.padroes[0]), len(self.padroes))) elif tipo.lower() == "output": for i in range(len(self.correct_outputs[0])): self.output_layer.append( Neuron(len(self.hidden_layer), len(self.padroes)))
def __init__(self): self.layouts = [] neurons = [] for i in range(1000): neurons.append(Neuron(6, self.f_unipolarna)) self.layouts.append(neurons) self.beta = -1 neurons = [] for i in range(24): neurons.append(Neuron(6, self.f_unipolarna))
def netInitialisation(noInputs, noOutputs, noHiddenNeurons): net = [] hiddenLayer = [] for h in range(noHiddenNeurons): # create hidden layers weights = [random() for i in range(noInputs + 1)] # noInputs and the bias neuron = Neuron(weights) hiddenLayer.append(neuron) net.append(hiddenLayer) outputLayer = [Neuron([random() for i in range(noHiddenNeurons + 1)]) for o in range(noOutputs)] net.append(outputLayer) return net
def __init__(self, topology): self.layers = [] for numNeuron in topology: layer = [] for i in range(numNeuron): if (len(self.layers) == 0): layer.append(Neuron(None)) else: layer.append(Neuron(self.layers[-1])) layer.append(Neuron(None)) # Represents the bias neuron layer[-1].setOutput(1) # setting the output of bias neuron self.layers.append(layer)
def layoutNeurons(self): self.neurons = {} layerCount = 6.0 amountPerLayer = self.size/layerCount currentAmount = 0 currentLayer = 0 for i in range(self.size): neuron = Neuron(network=self, id=i) self.neurons[i] = neuron if i >= currentAmount: currentLayer += 1.0/layerCount currentAmount += amountPerLayer neuron.position[1] = currentLayer
def __init__(self, topology): #topology is a list containing [input_layer,hidden_layers,output_layer] self.layers = [] for numNeuron in topology: layer = [] for i in range(numNeuron): #we got no layers then if not self.layers: layer.append(Neuron(None)) else: layer.append(Neuron(self.layers[-1])) layer.append(Neuron(None)) #this is our bias neuron layer[-1].setOutput(1) #our bias will be of 1 self.layers.append(layer)
def fromDict(self, d): self.nNeurons = d['nNeurons'] self.neurons = [] for i in range(len(self.nNeurons) - 1): currLayer = [] for dN in d['l' + str(i + 1)]: n = Neuron(0) n.fromDict(dN) currLayer.append(n) pass self.neurons.append(currLayer) pass
def setUp(self): """Prepares a Halfadder Neuron Network""" # Layer 1 Neurons: n1 = Neuron([12, 12], Sigmoid().activate, bias=-18) n2 = Neuron([-12, -12], Sigmoid().activate, bias=6) n3 = Neuron([12, 12], Sigmoid().activate, bias=-18) # Layer 2 Neurons: n4 = Neuron([-12, -12, 0], Sigmoid().activate, bias=6) n5 = Neuron([0, 0, 12], Sigmoid().activate, bias=-6) # Layers l1 = NeuronLayer([n1, n2, n3]) l2 = NeuronLayer([n4, n5]) self.ntwrk = NeuronNetwork([l1, l2])
def populate(self, nb_neurons, nb_inputs): """ Populate the Layer with a set of randomly weighted Neurons. Each Neuron be initialized with nbInputs inputs with a random clamped value. @param nb_neurons: Number of neurons. @param nb_inputs: Number of inputs. @return void """ self.neurons = [] for i in range(nb_neurons): n = Neuron(self.neuro_options) n.populate(nb_inputs) self.neurons.append(n)
def AddRandomNeuron(self): #add the neuron anywhere but the output or input layers. Layer = random.randint(1,len(self.LayerList)-2) NeuronID = max(list(self.LayerNodes.keys()))+1 NewNeuron = Neuron(NeuronID) #archive the new neuron self.LayerNodes[NeuronID]=NewNeuron self.LayerList[Layer][NeuronID]=NewNeuron #Setup connections to the new neuron PreviousNeuronID = random.choice(list(self.LayerList[Layer-1].keys())) self.LayerList[Layer-1][PreviousNeuronID].ForwardPropagatorNodes[NeuronID]=NewNeuron self.LayerList[Layer-1][PreviousNeuronID].ForwardPropagatorWeights[NeuronID]=random.randint(-2,2) ForwardNeuronID = random.choice(list(self.LayerList[Layer+1].keys())) NewNeuron.ForwardPropagatorNodes[ForwardNeuronID]=self.LayerList[Layer+1][ForwardNeuronID] NewNeuron.ForwardPropagatorWeights[ForwardNeuronID] = random.randint(-2,2)
def __init__(self,numOfNeurons, activation, input_num, lr, weights=None, name=None): self.input_num = input_num self.lr = lr self.name=name self.activation = activation self.num_neurons = numOfNeurons # print('creating neurons for layer: ', name) if weights is None: self.neurons = [Neuron(activation, input_num, lr) for i in range(numOfNeurons)] else: # self.neurons = [Neuron(activation, input_num, lr, [weights[0][i,:],weights[1][i]]) for i in range(numOfNeurons)] self.neurons = [Neuron(activation, input_num, lr, [weights[0][:,i],weights[1][i]]) for i in range(numOfNeurons)] self.outputShape = numOfNeurons self.update_weights()
def initialise(self): # Setting the Values Neuron.dimensions = self.dimensions Neuron.length = self.length Neuron.width = self.width # Initialising Neuron Weights for i in range(self.length): innerlist = [] for j in range(self.width): temp = Neuron(i, j) temp.weights = np.asarray( [random.uniform(0, 1) for i in range(self.dimensions)], dtype=np.float64) innerlist.append(temp) self.nMap.append(innerlist)
class Neuron_INVERT(unittest.TestCase): """Tests the Neuron class by building a INVERT logic gate Neuron""" def setUp(self): """Prepares a INVERT-type Neuron.""" self.INVERT_Neuron = Neuron([-12], Sigmoid().activate, bias=6) def test_INVERT_high(self): """Tests a scenario in which a Neuron - designed to be an INVERT port - returns a output as close as possible to high (1)""" outcome = self.INVERT_Neuron.activate([0]) self.assertAlmostEqual(outcome, 1, 2) def test_INVERT_low(self): """Tests a scenario in which a Neuron - designed to be an INVERT port - returns a output as close as possible to low (0)""" outcome = self.INVERT_Neuron.activate([1]) self.assertAlmostEqual(outcome, 0, 2)
def build_network(): neurons = [] w01 = random.uniform(-1 / 8, 1 / 8) w02 = random.uniform(-1 / 8, 1 / 8) n0 = Neuron(w01, w02) neurons.append(n0) w11 = random.uniform(-1 / 8, 1 / 8) w12 = random.uniform(-1 / 8, 1 / 8) n1 = Neuron(w11, w12) neurons.append(n1) w21 = random.uniform(-1 / 8, 1 / 8) w22 = random.uniform(-1 / 8, 1 / 8) n2 = Neuron(w21, w22) neurons.append(n2) return neurons
def __init__(self, size): # super(Layer, self).__init__() self.size = size self.Neurons = [] for i in range(0, size): n = Neuron(0.00) self.Neurons.append(n)
def __init__(self, layers_topology, activacion_func_topology, momentum=0.2, learning_rate=0.1, bias=1, epoches=1000, error_measure_frequency=10): self.momentum = momentum self.learning_rate = learning_rate self.bias = bias self.epoches = epoches self.error_measure_frequency = error_measure_frequency self.error = 0 self.error_sum = 0 self.error_max = 0 self.error_X = [] self.error_Y = [] self.correct_Y = [] self.predicted = [] self.expected = [] self.layers = [[ Neuron(layers_topology[i - 1], neuron_number + 1, activacion_func_topology[i], self.momentum, self.learning_rate, self.bias) for neuron_number in range(layers_topology[i]) ] for i in range(len(layers_topology))]
def gen_result_R_neurons(self, n): a = [] for _ in range(n): a.append(Neuron(self.get_weights(), self.b_R)) return a
def addN(self, who): self.Nneurons += len(who[:]) for i, pos in enumerate(who): self.neurons.insert( pos + i, Neuron(self.neurons[0].Nvars, self.neurons[0].afunType))
def _get_item(self, index): if index in self.cache: geometry, morphology, diameter = self.cache[index] else: fn = self.datapath[index] print(fn) neuron = Neuron(file_format='swc', input_file=fn) point_set = neuron.nodes_list # Node: xyz, r, parent, type, children # point_set = np.loadtxt(fn, delimiter=',').astype(np.float32) # get geometry and morphology # move soma to original coordination geometry = np.transpose(neuron.location) # geometry = np.array([node.getxyz() for node in point_set]) # real location diameter = np.array([node.r for node in point_set]) morphology = neuron.parent_index+1 # n_id , neuron.parent_index is begain 0 morphology = morphology[0:self.npoints] # cut morphology[0] = 0 # print("parent index:") # print(morphology) geometry = geometry[0:self.npoints, :] # cut soma # print("after plan origin geometry") # print(geometry) print("geo_diam_morph shape", geometry.shape, diameter.shape, morphology.shape) print("------------------") # normalization # geometry[:, 0:3] = self.pc_normalize(geometry[:, 0:3]) --------------------not use ------------------- # print("after normalization geometry") # print(geometry) if len(self.cache) < self.cache_size: self.cache[index] = (geometry, morphology, diameter) # tuple: geometry, morphology, radius, diameter not use now return geometry, morphology
def __init__(self, neurons=[], synapses=[], layer_type=LayerType.INPUT, neuron_count=-1, synapse_count=-1, weight_per_synapse=-1): self.neurons = [] self.synapses = [] if len(neurons) > 0: Log.i("Using custom neurons.") self.neurons = neurons elif neuron_count > 0: Log.i("Creating Layer with %d neurons." % neuron_count) for i in range(0, neuron_count): self.neurons.append(Neuron()) Log.d('debug neuron count %s' % len(self.neurons)) self.layer_type = layer_type if len(synapses) > 0: self.synapses = synapses elif synapse_count > 0 and weight_per_synapse > 0: for i in range(0, synapse_count): self.synapses.append( Synapse(weights=[], weight_count=weight_per_synapse, random_weight=True))
def __init__(self, _nodes, _classes, weights, obs, _metaLayers, trace, _Random): global perceptrons perceptrons = [] global hiddenLayer hiddenLayer = [] global classes classes = np.arange(_classes) global lastObs lastObs = obs[:] global avgObs avgObs = obs[:] global metaLayers metaLayers = _metaLayers global allowRandom allowRandom = _Random global discount discount = .75 global gradientCount gradientCount = 0 _nodes = _nodes * _metaLayers for k in range(len(classes)): p = Neuron( classes[k], classes[k], _nodes ) # important to set perceptrons here for each new learning rate perceptrons.append(p) for i in range(_nodes): h = HiddenNetwork( i * 1000 + i, i % len(classes), weights ) # important to set perceptrons here for each new learning rate hiddenLayer.append(h)
def __init__(self, n_syn=10, max_spikes=50, shape=[2, 5, 1]): ## Create the Network , NOTE , the input layer is not included self.shape = shape self.hidden_layer = [] self.output_layer = [] self.max_spikes = max_spikes # We make the hidden layer to be a group layer by layer for i in range(len(shape) - 2): self.hidden_layer.append([ Neuron(n_syn=self.shape[i], max_spikes=self.max_spikes) for t in range(self.shape[i + 1]) ]) for i in range(self.shape[-1]): self.output_layer.append( Neuron(n_syn=self.shape[-2], max_spikes=self.max_spikes))
def test_train(self): test_neuron = Neuron(0.01, 0, 2, 1.0) test_neuron.set_weights([0.5, 0.5, 0.5]) test_neuron.compute = MagicMock(return_value=1.0) result = test_neuron.train([1, 1, 1], "oja") updated = test_neuron.get_weights() self.assertEqual(updated, ["0.505", "0.505", "0.505"])
def __init__(self, rate, sigmoid, hidden, examples, variables, layers, rule, dropout): """ Feed-Forward Hebbian network for learning boolean functions with threshold gates. Keyword arguments: rate -- learning rate (float) sigmoid -- sigmoid function for weights if rule is basic hebbian (int) hidden -- number of hidden units, 0 removes hidden layer (int) examples -- number of random boolean examples to present. layers -- number of hidden layers 1 to N (int) rule -- learning rule, "hebbian" or "oja" (str) Initializes layers, weights, connections, variables for Network class """ self.rate = rate self.sigmoid = sigmoid self.inputs = variables self.vis_layer = [] self.hidden_layers = [] self.hidden = hidden self.variables = variables self.data = BOOLEAN(examples, self.variables) self.layers = layers-1 self.rule = rule self.dropout = dropout self.length = int(math.pow(2, self.variables)) for _ in xrange(self.hidden): self.vis_layer.append(Neuron(self.rate, self.sigmoid, self.inputs+1, dropout)) for layer in xrange(self.layers): self.hidden_layers.append([]) for _ in xrange(self.hidden): self.hidden_layers[layer].append(Neuron(self.rate, self.sigmoid, self.hidden+1, dropout)) if self.hidden > 0: self.output_neuron = Neuron(self.rate, self.sigmoid, self.hidden+1, dropout) else: self.output_neuron = Neuron(self.rate, self.sigmoid, self.inputs+1, dropout)
# -*- coding: utf-8 -*- import sys from InputNeuron import InputNeuron from Logger import Logger from Neuron import Neuron from Synapse import Synapse version = (3,0) if sys.version_info < version : print("Python >= {0}.{1} is required to launch this program".format(version[0], version[1])) sys.exit(1) # Création de neurones n1 = Neuron("and") n2 = Neuron("or") # Valeurs d'inputs créées pour initialiser les InputNeuron inputs1 = [1,1,1,1,1,1,1,1,1,1,1,1] inputs2 = [0,0,0,0,0,1] for input in inputs1 : input_neuron = InputNeuron(input) synapse = Synapse(input_neuron, n1) for input in inputs2 : input_neuron = InputNeuron(input) synapse = Synapse(input_neuron, n2) # Liaison de n1 vers n2 Synapse(n1, n2)
""" This Program Makes A Neuron Object. """ """ This Exercise Is Not Complete ! """ from Neuron import Neuron x = Neuron("neuron") x.calculate_values() x.print_matrix() x.plot_graph()
class Network(object): """Network class""" def __init__(self, rate, sigmoid, hidden, examples, variables, layers, rule, dropout): """ Feed-Forward Hebbian network for learning boolean functions with threshold gates. Keyword arguments: rate -- learning rate (float) sigmoid -- sigmoid function for weights if rule is basic hebbian (int) hidden -- number of hidden units, 0 removes hidden layer (int) examples -- number of random boolean examples to present. layers -- number of hidden layers 1 to N (int) rule -- learning rule, "hebbian" or "oja" (str) Initializes layers, weights, connections, variables for Network class """ self.rate = rate self.sigmoid = sigmoid self.inputs = variables self.vis_layer = [] self.hidden_layers = [] self.hidden = hidden self.variables = variables self.data = BOOLEAN(examples, self.variables) self.layers = layers-1 self.rule = rule self.dropout = dropout self.length = int(math.pow(2, self.variables)) for _ in xrange(self.hidden): self.vis_layer.append(Neuron(self.rate, self.sigmoid, self.inputs+1, dropout)) for layer in xrange(self.layers): self.hidden_layers.append([]) for _ in xrange(self.hidden): self.hidden_layers[layer].append(Neuron(self.rate, self.sigmoid, self.hidden+1, dropout)) if self.hidden > 0: self.output_neuron = Neuron(self.rate, self.sigmoid, self.hidden+1, dropout) else: self.output_neuron = Neuron(self.rate, self.sigmoid, self.inputs+1, dropout) @staticmethod def threshold(activation): """ Thresholds output neuron activation to 1 or 0. Keyword arguments: activation -- Output neuron activation (float) returns 1 or 0 """ if activation >= 0.0: return 1 else: return 0 def load(self, filename): """ Loads a model file. Keyword arguments: filename -- name of model file ex: hebb1.txt (str) initializes network to weights in file """ hebbian_weights = open(filename, "r").read().split('\n') for i in xrange(self.hidden): weights = hebbian_weights[i].split('\t') self.vis_layer[i].set_weights(weights) for i in xrange(self.layers): for j in xrange(self.hidden): weights = hebbian_weights[((i+1)*self.hidden)+j].split('\t') self.hidden_layers[i][j].set_weights(weights) weights = hebbian_weights[-2].split('\t') self.output_neuron.set_weights(weights) def save(self, filename): """ Saves a model into a file. Keyword arguments: filename -- name of modle file ex: hebb1.txt (str) saves current model weights to file """ hebbian_weights = open(filename, "w") for i in xrange(self.hidden): hebbian_weights.write("\t".join(self.vis_layer[i].get_weights()) + '\n') for i in xrange(self.layers): for j in xrange(self.hidden): hebbian_weights.write("\t".join(self.hidden_layers[i][j].get_weights()) + '\n') hebbian_weights.write("\t".join(self.output_neuron.get_weights()) + '\n') hebbian_weights.close() def compute(self, example): """ Computes output of model given an example. Keyword arguments: example -- list of 1 and -1 (list) returns threshold value of output neuron. """ activations = [] if self.hidden > 0: for i in xrange(self.hidden): output = self.vis_layer[i].compute(example) activations.append(output) activations.append(1.0) for layer in xrange(self.layers): hidden_activations = [] for i in xrange(self.hidden): hidden_activations.append(self.hidden_layers[layer][i].compute(activations)) hidden_activations.append(1.0) activations = hidden_activations output = self.output_neuron.compute(activations) else: output = self.output_neuron.compute(example) return Network.threshold(output) def index(self, example): """ Finds expected output of example for target function Args: example (list): list of 1 and -1 returns index in current truthtable for given example. """ for i in xrange(self.length): binary = bin(i).lstrip('0b') for i in xrange(self.variables-len(binary)): binary = '0'+binary for j in xrange(self.variables): index = True if example[j] == -1: example[j] = 0 if int(binary[j]) != example[j]: index = False if index: return i return False def train(self, table): """ Trains model given rule for given examples on a single truth table Args: table (list): truth table of 1, -1 returns true if learned and false if not """ training = self.data.load_training() for example in training: activations = [] if self.hidden > 0: for i in xrange(self.hidden): output = self.vis_layer[i].train(example, self.rule) activations.append(output) activations.append(1.0) for layer in xrange(self.layers): hidden_activations = [] for i in xrange(self.hidden): hidden_activations.append(self.hidden_layers[layer][i].train(activations, self.rule)) hidden_activations.append(1.0) activations = hidden_activations self.output_neuron.clamp(activations, table[self.index(example)], self.rule) else: self.output_neuron.clamp(example, table[self.index(example)], self.rule) learned_table = self.truthtable() learned = True for i in xrange(self.length): if learned_table[i] != table[i]: learned = False if learned == True: break return learned def truthtable(self): """Builds the truth table for the current model and returns it""" table = [] for i in xrange(self.length): inputs = [] binary = bin(i).lstrip('0b') for i in xrange(len(binary)): inputs.append(int(binary[i])) inputs.append(1) table.append(self.compute(inputs)) return table def test(self, load_file): """Loads a model from file and prints the table that model produces""" self.load(load_file) table = self.truthtable() print " ..... Test: Model Computes:", table def increase_learning(self, factor): """Increase the learning rate to by 'factor'""" pass def noise(self, stddev): """Add gaussian noise to all weights with stddev""" #add noise to weights pass
# -*- coding: utf-8 -*- # vim: set fileencoding=utf-8 : # vim: set foldmethod=marker commentstring=\ \ #\ %s : # # Author: Taishi Matsumura # Created: 2016-07-26 # # Copyright (C) 2016 Taishi Matsumura # from Neuron import Neuron from Panel import Panel, IstepSlider from matplotlib.pyplot import close, figure, subplots_adjust close('all') neuron = Neuron(1) fig = figure() subplots_adjust(left=0.15, bottom=0.25) sl = IstepSlider() Vm_panel = Panel(211, neuron.get_t(), neuron.get_V(), ylabel='Vm [mV]', ylim=(-80, 50)) Gates_panel = Panel(413, neuron.get_t(), neuron.get_m(), ylabel='Gate vars [-]') Istep_panel = Panel(414, neuron.get_t(), sl.get_value(), ylabel='Istep [uA]', ylim=(sl.minval, sl.maxval)) fig.add_axes(sl.position) fig.add_subplot(Vm_panel.getPanel()) fig.add_subplot(Gates_panel.getPanel()) fig.add_subplot(Istep_panel.getPanel()) # ---------------------------------------------------------------------------- # Main loop # ---------------------------------------------------------------------------- while True:
def test_clamp(self): test_neuron = Neuron(0.01, 0, 2, 1.0) test_neuron.set_weights([0.5, 0.5, 0.5]) result = test_neuron.clamp([1, 1, 1], 1.0, "oja") updated = test_neuron.get_weights() self.assertEqual(updated, ["0.505", "0.505", "0.505"])
def test_compute(self): test_neuron = Neuron(0.01, 0, 2, 1.0) test_neuron.set_weights([0.75, -0.5, -0.5]) linear_activation = test_neuron.compute([1, 1, 1]) self.assertEqual(linear_activation, -1.0)
# -*- coding: utf-8 -*- """ Created on Fri Jun 3 23:28:26 2016 @author: Guanhao Wu """ from Neuron import Neuron from NeuronParameters import NeuronParameters import matplotlib.pyplot as plt import numpy as np T=60000 V=[] N=Neuron() P=NeuronParameters() P.set_RS(N) for t in range(T): if t>50: N.clamp_input(85) N.timestep() V.append(N.V) plt.figure() plt.plot(np.linspace(0,T*N.dt,T),V,'r') plt.show()