def test_predict(self): target = Neuron(tf.constant([1, 2, 3]), tf.constant(4)) prediction = target.predict( tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])) tf.debugging.assert_equal(prediction, tf.constant([18, 36, 54, 72]))
class OurNeuralNetwork: ''' A neural network with: - 2 inputs - a hidden layer with 2 neurons (h1, h2) - an output layer with 1 neuron (o1) Each neuron has the same weights and bias: - w = [0, 1] - b = 0 ''' def __init__(self): weights = np.array([0, 1]) bias = 0 # The Neuron class here is from the previous section self.h1 = Neuron(weights, bias) self.h2 = Neuron(weights, bias) self.o1 = Neuron(weights, bias) def feedforward(self, x): out_h1 = self.h1.feedforward(x) out_h2 = self.h2.feedforward(x) # The inputs for o1 are the outputs from h1 and h2 out_o1 = self.o1.feedforward(np.array([out_h1, out_h2])) return out_o1
def test_integrator(self): neuron = Neuron( weights=[1, 2, 3], transfer_function=BaseTransferFunction, # does nothing ) self.assertEqual(neuron.integrator([1, 1, 1]), 6)
def __init__(self, input_size, lrn_rate=1, activation=signal): """'input_size' is the length of the input. 'lrn_rate' is the learning rate. """ self.neuron = Neuron([0] * input_size, 0, activation) self.lrn_rate = lrn_rate self.fire = self.neuron.fire
class Perceptron(object): def __init__(self, input_size, lrn_rate=1): """'input_size' is the length of the input. 'lrn_rate' is the learning rate. """ self.neuron = Neuron([0]*input_size, 0, signal) self.lrn_rate = lrn_rate self.fire = self.neuron.fire def training(self, examples): epochs = 0 while True: epochs = epochs + 1 error_count = 0 for (input_vector, desired_output) in examples: actual_output = self.neuron.fire(input_vector) error = desired_output - actual_output if error != 0: learned = self.lrn_rate*error self.neuron.update(input_vector, learned) error_count = error_count + 1 if error_count == 0: break return epochs def __str__(self): ret = 'lrn_rate: %s' % self.lrn_rate ret = '%s\n%s' % (ret, self.neuron.__str__()) return ret
def __init__(self, map_dimensions, data_dimension, kernel_func, nb_clusters=0, verbose=False): self._verbose = verbose self._map_dimensions = [] self._map_dimensions[:] = map_dimensions[:] self._data_dimension = data_dimension self._dataset = None self._kernel = kernel_func self._nb_clusters = nb_clusters self._neurons = [] self._clusters = [] for position in generate_positions(map_dimensions): # Initialize _neurons positions in map neuron = Neuron(position=position) # Initialize _neurons coordinates coords = [] for i in xrange(data_dimension): coords.append(random.random() * 10) neuron.set_data(coords) self._neurons.append(neuron)
def test_step_false(self): neuron = Neuron( weights=[1, 2, 3], transfer_function=StepTransferFunction, function=lambda p: p >= 7, # any function can be used here ) self.assertEqual(neuron.run([1, 1, 1]), 0)
def testSinglePreviousEvaluate(self): previousNeuron = InputNeuron() previousNeuron.setValue(1) previousRow = [previousNeuron] neuron = Neuron(previousRow) self.assertGreater(neuron.evaluate(), 1/2)
def __init__(self, layerName, inputArr, neuronCount=0, prevNeuronCount=0, activation="relu", mode="init_rand"): self.layerName = layerName self.inputArr = inputArr self.activation = activation self.mode = mode self.neurons = [] if mode == "init_rand": self.neuronCount = neuronCount self.prevNeuronCount = prevNeuronCount for i in range(self.neuronCount): neuron = Neuron(input=self.inputArr, weight=np.random.rand(self.prevNeuronCount), bias=np.random.randint(-5, 5)) self.neurons.append(neuron) elif mode == "init_read": file = open(self.layerName + ".txt", "r") data = json.load(file) self.neuronCount = data["neuronCount"] self.prevNeuronCount = data["prevNeuronCount"] for i in range(self.neuronCount): weightArr = data["weight" + str(i + 1)] biasVal = data["bias" + str(i + 1)] neuron = Neuron(self.inputArr, weightArr, biasVal) self.neurons.append(neuron)
def play(self, hand, opponent_card_weigth, who_made_first): self.who_made_first = who_made_first cards_in_hand_weights = [k[2] for k in self.cards_in_hand] cards_in_hand_mean = sum(cards_in_hand_weights) / len( cards_in_hand_weights) neuron_entries = [[ who_made_first, opponent_card_weigth, cards_in_hand_mean, hand ]] neuron_value = Neuron(neuron_entries).exec() have_major_card = False for card in self.cards_in_hand: if card[2] >= opponent_card_weigth: have_major_card = True if not have_major_card: return cards_in_hand_weights.index(min(cards_in_hand_weights)) if round(neuron_value) == 1: if self.can_trucar: return self.trucar() k = 0 for card_in_hand in self.cards_in_hand: entries_choice = [[ who_made_first, self.cards_weight(self.opponent_cards_played), card_in_hand[2], hand ]] choice_neuron = Neuron(entries_choice).exec() if round(choice_neuron) == 1: return k k += 1 return 0
def build_layer(weights_filename, biases_filename, prev_layer_size): new_layer = [] # Build neurons with starting weights with open(weights_filename, 'r') as weights_file: reader = csv.reader(weights_file) # Iterate CSV rows / neurons for row in reader: new_neuron = Neuron() weights = [] # Iterate weights for i in range(prev_layer_size): weights.append(float(row[i])) new_neuron.set_weights(weights) new_layer.append(new_neuron) # Add starting biases with open(biases_filename, 'r') as biases_file: reader = csv.reader(biases_file) # Iterate CSV rows / neurons for index, row in enumerate(reader): for bias in row: new_layer[index].set_bias(float(bias)) return new_layer
def half_adder_train(inputs, user_iteration, learning_rate): targets = [[0, 0], [0, 1], [0, 1], [1, 0]] half_adder = NeuronNetwork([ NeuronLayer([ Neuron("1", [randint(-1, 1), randint(-1, 1)], randint(-1, 1)), Neuron("2", [randint(-1, 1), randint(-1, 1)], randint(-1, 1)) ]), NeuronLayer([ Neuron("3", [randint(-1, 1), randint(-1, 1)], randint(-1, 1)), Neuron("4", [randint(-1, 1), randint(-1, 1)], randint(-1, 1)) ]) ]) iterations, errors, outputs = half_adder.train(inputs, targets, learning_rate, user_iteration) print( f"============ | Half adder | ============\n" f"After {iterations} iterations:\n" f"Errors: {errors}\n" f"Inputs: {inputs}\n" f"Outputs: {outputs}\n" f"Targets: {targets}\n" f"Weights:", end=' ') layer = half_adder.layers[-1] print([neuron.weights for neuron in layer.neurons]) print(f"Bias: {[neuron.bias for neuron in layer.neurons]}\n\n")
class Perceptron(object): def __init__(self, input_size, lrn_rate=1): """'input_size' is the length of the input. 'lrn_rate' is the learning rate. """ self.neuron = Neuron([0] * input_size, 0, signal) self.lrn_rate = lrn_rate self.fire = self.neuron.fire def training(self, examples): epochs = 0 while True: epochs = epochs + 1 error_count = 0 for (input_vector, desired_output) in examples: actual_output = self.neuron.fire(input_vector) error = desired_output - actual_output if error != 0: learned = self.lrn_rate * error self.neuron.update(input_vector, learned) error_count = error_count + 1 if error_count == 0: break return epochs def __str__(self): ret = 'lrn_rate: %s' % self.lrn_rate ret = '%s\n%s' % (ret, self.neuron.__str__()) return ret
class Merge(threading.Thread): URI = "" eqc = 0 # counter def __init__(self, __uri): threading.Thread.__init__(self) self.URI = __uri self.n = Neuron() self.n.start() self.eqc = 0 def run(self): while True: for entry in measEntries: self.n.add(entry.Weight) measEntries.clear() if self.n.FireState == True: self.eqc = self.eqc + 1 if self.eqc > 20: self.eqc = 20 else: if self.eqc > 0: self.eqc = self.eqc - 1 if self.eqc > 10: print("EARTHQUAKE!!!") requests.get(self.URI + "/warning?description=Earthquake") elif self.eqc < 5: requests.get(self.URI + "/warning?description=ok") time.sleep(0.1)
def test_set_error_output_layer(self): neuron = Neuron(0, 0, [0.05, 0.05], [1, 1]) neuron.output = 0.518979 neuron.is_output_layer = True neuron.set_output_layer_error(0) self.assertEquals(-0.12955, round_to(neuron.delta_val, 5))
def add_layer(self, neurons, layer_number): """ :param neurons: :param layer_number: :return: """ bias_value = random.randint(1,10) bias = Neuron(activation_func=lambda x: 0, activation_prime=lambda x: 0, isBias=True) bias.y_output = bias_value neurons.append(bias) self.layers[layer_number] = neurons if layer_number == 0: return if layer_number > self.max_layer: self.max_layer = layer_number for input in self.layers[layer_number - 1]: for output in neurons: output.add_input_reference(input) if not output.isBias: weight = self.randomize_weight() input.add_output_connection(output, weight)
def __init__(self, joysize, painsize): defaultweight = 0.3 preneurons = [] #create neurons for i in range(joysize): tempneuron = Neuron('amygdala' + str(i), 'joyneurons', actthreshold) preneurons.append(tempneuron) self.joyneurons = preneurons preneurons = [] for i in range(painsize): tempneuron = Neuron('amygdala' + str(i + joysize), 'painneurons', actthreshold) preneurons.append(tempneuron) self.painneurons = preneurons preneurons = [] for neuron in self.joyneurons: preneurons.append(neuron) for neuron in self.painneurons: preneurons.append(neuron) self.neurons = preneurons self.size = joysize + painsize self.joysize = joysize self.painsize = painsize #connect neurons for neuron1 in self.neurons: for neuron2 in self.neurons: if neuron1 is not neuron2: neuron1.add_inconnects([neuron2, defaultweight]) neuron2.add_outconnects([neuron1, defaultweight])
def reset(self): """Resets the state of the neuron """ Neuron.reset(self) self.voltage.set_value(numpy.zeros(self.size).astype('float32')) self.refractory_time.set_value( numpy.zeros(self.size).astype('float32'))
def test_update_weights(self): neuron = Neuron(0, 0, [0.05, 0.05], [0.519053, 1]) neuron.delta_val = -0.1295578 neuron.update_weights(0.001) self.assertEquals(0.0499327526, round(neuron.weights[0], 10)) self.assertEquals(0.0498704, round(neuron.weights[1], 7))
def Adder(): A = 0 B = 0 weights = -0.5 thresholds = -0.5 gate1 = Neuron([[A, weights], [B, weights]], threshold=thresholds).calc() gate2 = Neuron([[A, weights], [gate1, weights]], threshold=thresholds).calc() gate3 = Neuron([[gate1, weights], [B, weights]], threshold=thresholds).calc() gate4 = Neuron([[gate2, weights], [gate3, weights]], threshold=thresholds).calc() gate5 = Neuron([[gate1, weights], [gate1, weights]], threshold=thresholds).calc() carry = gate5 output = gate4 print("Carry : " + str(carry)) print("Output: " + str(output))
def __init__(self, nodes, initial_weights=None, bias=None, neuron_type="linear"): self._ntype = neuron_type self._neurons = Neuron(neuron_type) assert (len(nodes) == 2), "param nodes is a tuple/list of length 2" assert (type(nodes[0]) == int) assert (type(nodes[1]) == int) self._nodes = nodes self._init_weights = initial_weights self._bias = bias grad_fn = dict() grad_fn = { "linear": self._linear_layer_grad, "logistic": self._logistic_layer_grad } self.grad = grad_fn[neuron_type] if self._init_weights is None: self._init_weights = np.random.rand(nodes[0], nodes[1]) if self._bias is None: self._bias = np.ones(self._nodes[1]) assert (len(self._init_weights.T) == self._nodes[1]) assert (len(self._bias) == self._nodes[1]) self._state = np.zeros(self._nodes[1]) self._bias_weights = np.ones(self._nodes[1]) self._weights = self._init_weights
def test_step_true(self): neuron = Neuron( weights=[1, 2, 3], transfer_function=StepTransferFunction, ) self.assertEqual(neuron.run([1, 2, 3]), 1)
def xor_port_train(inputs, user_iteration, learning_rate): targets = [0, 1, 1, 0] xor_port = NeuronNetwork([ NeuronLayer([ Neuron("Nor gate", [randint(-1, 1), randint(-1, 1)], randint(-1, 1)), Neuron("And gate", [randint(-1, 1), randint(-1, 1)], randint(-1, 1)) ]), NeuronLayer([ Neuron("Nor gate", [randint(-1, 1), randint(-1, 1)], randint(-1, 1)) ]) ]) iterations, errors, outputs = xor_port.train(inputs, targets, learning_rate, user_iteration) print( f"============ | Xor gate | ============\n" f"After {iterations} iterations:\n" f"Errors: {errors}\n" f"Inputs: {inputs}\n" f"Outputs: {outputs}\n" f"Targets: {targets}\n" f"Weights:", end=' ') layer = xor_port.layers[-1] print([neuron.weights for neuron in layer.neurons]) print(f"Bias: {[neuron.bias for neuron in layer.neurons]}\n\n")
def test_updateBias(self): bias1 = tf.constant(7) target = Neuron([4, 5, 6], bias1) self.assertEqual(target._bias, bias1) bias2 = tf.constant(8) target.updateBias(bias2) self.assertEqual(target._bias, bias2)
def loadmat(self): """Load neurons from a single .mat file""" self.header = MATHeader() nrecs = self.header.read(self.path) for nrec in nrecs: neuron = Neuron(self.path, sort=self) neuron.loadmat(nrec) self.alln[neuron.id] = neuron # save it
def __init__(self, input_size, size, weights=None): if weights is None: self.neurons = [ Neuron(np.random.rand(input_size + 1) - 0.5) for _ in range(size) ] else: self.neurons = [Neuron(weights[i]) for i in range(size)]
def setUp(self): # List of Neuron objects neur_1 = Neuron() neur_2 = Neuron() self.neurons = [neur_1, neur_2] # Weight matrix # Input goes 1, 1 goes to 2, 2 goes to output self.weights = np.array([[1, 0, 0], [0, 1, 0]])
def __init__(self, size, dt=0.001, t_rc=0.02, t_ref=0.002): Neuron.__init__(self, size, dt) self.t_rc = t_rc self.t_ref = t_ref self.voltage = theano.shared( numpy.zeros(size).astype('float32')) # internal variables self.refractory_time = theano.shared( numpy.zeros(size).astype('float32')) # internal variables
def create_layer(self, n_num, last_layer=False): self.layers.append([ Neuron(None if len(self.layers) == 0 else self.layers[-1]) for i in range(0, n_num) ]) # cria um neuronio de bias if not last_layer: self.layers[-1].append(Neuron(None, bias_neuron=True))
def test_sigmoid(self): neuron = Neuron( weights=[1, 2, 3], transfer_function=SigmoidTransferFunction, ) v = neuron.run([0, 0, 0]) self.assertEqual(v, 0.5)
def load(path): """ Loads a neural network from a json file @param (String) path - The path to load the neural network from @returns (Network) - The neural network that was loaded """ network = Network() try: with open(path, "r+") as f: network_data = "\n".join(f.readlines()) network_json = json.loads(network_data) layers = network_json["layers"] # For every layer in the network ... for layer in layers: neurons = [] # For every neuron in the layer ... for neuron in layer["neurons"]: weights = neuron["weights"] bias = neuron["bias"] activation = neuron["activation"] # Choose the proper activation function and corresponding derivative activation_func = None derivative_func = None if activation == Network.LINEAR: activation_func = Network.ACTIVATION_LINEAR derivative_func = Network.DERIVATIVE_LINEAR elif activation == Network.SIGMOID: activation_func = Network.ACTIVATION_SIGMOID derivative_func = Network.DERIVATIVE_SIGMOID elif activation == Network.TANH: activation_func = Network.ACTIVATION_TANH derivative_func = Network.DERIVATIVE_TANH elif activation == Network.STEP: activation_func = Network.ACTIVATION_STEP derivative_func = Network.DERIVATIVE_STEP # Create a neuron with the desired info neuron = Neuron(0, activation_func, derivative_func) neuron.weights = weights neuron.bias = bias # Add the processed neuron to the collection neurons.append(neuron) # Create a layer with the desired neurons layer = Layer(0, 0, None, None) layer.neurons = neurons # Add the processed layer to the collection network.layers.append(layer) except: raise Exception("Invalid Neural Network File @ {}!".format(path)) return network
def test_updateWeights(self): weights1 = tf.constant([4, 5, 6]) target = Neuron(weights1, 7) tf.debugging.assert_equal(target._weights, weights1) weights2 = tf.constant([8, 9, 10]) target.updateWeights(weights2) tf.debugging.assert_equal(target._weights, weights2)
def __init__(self, num_inputs, num_hidden_layers, num_neurons): self.fitness = None self.output = Neuron() self.inputs = [Neuron() for _ in range(num_inputs)] self.hidden_layers = [[Neuron() for _ in range(num_neurons)] for _ in range(num_hidden_layers)] self.synapses = [[] for _ in range(num_hidden_layers + 1)] self.num_hidden_layers = num_hidden_layers self.init_synapses()
def __init__(self, size, *pargs): self.base = [] if pargs: layer, rand = pargs for i in range(size): self.base.append(Neuron(layer, rand)) else: for i in range(size): self.base.append(Neuron())
def __init__(self, inputs, weights, bias): Neuron.__init__(self, inputs) # NOTE: The weights and bias properties here are not # numbers, but rather references to other neurons. # The weight and bias values are stored within the # respective neurons. self.weights = weights self.bias = bias
def test_neuron(): print('Testing a neuron...') activations = ActivationFunctions() weights = np.array([3, 1]) # w1 = 3, w2 = 1 bias = -1 # b = -1 n = Neuron(weights, bias, activations.sigmoid) x = np.array([4, 1]) # x1 = 4, x2 = 1 print(n.show_configuration()) print('Input: {} --> Result: {}'.format(x, n.feedforward(x))) # 0.9999938558253978
def generation_crossover(self, datas): new_gen = [] for i in range(0, self.count_individuals): should_mutate = random() < self.mutation_prob if should_mutate: self.root_neurons[i].mutate() should_struct_mutate = random() < self.mutation_struct_prob if should_struct_mutate: self.root_neurons[i].mutate_struct() sorted(datas, key=lambda d: d.score, reverse=True) for i in range(0, len(datas)): for j in range(0, len(datas)): if i == j: continue root_synapses = self.root_neurons[i].synapses for syn in root_synapses: syn.weight = ( syn.weight + self.root_neurons[j].synapses[root_synapses.index(syn)].weight) / 2 root_neuron = Neuron(0, root_synapses) f_genes, s_genes = [] self.root_neurons[i].iterate_children_recursive( lambda neuron, next_neuron, syn: f_genes.append((neuron, next_neuron, syn))) self.root_neurons[j].iterate_children_recursive( lambda neuron, next_neuron, syn: s_genes.append((neuron, next_neuron, syn))) for gene in f_genes: for o_gene in s_genes: if gene[0].id == o_gene[0].id and gene[1].id == o_gene[1].id or random() < self.crossover_unique_gene_transfer_prob: match_neuron = root_neuron.find_child( lambda n: n.id == o_gene[0].id) if o_gene[0].id != 0 else False ancestor_genes = root_neuron new_syn = Synapse(o_gene[2].label (gene[2].input + o_gene[2].input) / 2, (gene[2].weight + o_gene[2].weight) / 2, []) if match_neuron is None: match_neuron = Neuron(o_gene[0].id, [new_syn]) match_ancestor_genes = filter( lambda g: g[1].id == match_neuron.id, s_genes) if len(match_ancestor_genes) > 0: ancestor_genes = match_ancestor_genes[0] new_neuron = Neuron(o_gene[1].id + len(map(lambda n: map(lambda s: s.next_neurons, n.synapses), match_neuron)) + 1, [new_syn]) if ancestor_genes is root_neuron: root_neuron.synapses.append( Synapse(new_syn.label, new_syn.input, new_syn.weight, [new_neuron])) else: ancestor_genes[2].next_neurons.append( new_neuron) if len(new_gen) == self.count_individuals: break self.root_neurons = new_gen self.adjust_weights(datas)
def loadptcs(self): """Load neurons from a single .ptcs file""" self.header = PTCSHeader() with open(self.path, 'rb') as f: self.header.read(f) for i in range(self.header.nneurons): neuron = Neuron(self.path, sort=self) neuron.loadptcs(f, self.header) self.alln[neuron.id] = neuron # save it assert eof(f), 'File %s has unexpected length' % self.path
def init(self, neurons_num=3, inputs=3, activation_function="sigmoid"): self.inputs_num = inputs self.neurons_num = neurons_num self.activation_function = activation_function self.neurons = [] for i in range(self.neurons_num): neuron = Neuron() neuron.init(self.inputs_num, self.activation_function) self.neurons.append(neuron)
def __init__(self): self.rs = RS_232(wrk_dir='/tmp/Cortix', filename='ir_7040') self.neuron = Neuron(task=self.rs.ir_7040,wrk_dir='/tmp/Cortix',name='ir_7040')\ self.socket_list = [] self.init_port = 60000 self.port = 60001 #self.host= str(socket.gethostbyname(socket.gethostname())) self.host = '10.253.90.99' print(self.host)
def test_3(steps): weights = 1 print "Target converse {0}, {1} steps".format(weights, steps) neuron = Neuron(weights, sigm, sigmp, error) errors = [] for i in range(steps): inputs = [random.random() for r in range(weights)] target = 1.0 - inputs[0] neuron.learn_1(inputs, target) errors.append(neuron.last_error) print report(errors)
def test_feed_forward(self): neuron = Neuron(0, 0, [0.05, 0.05], [1, 1]) next_node1 = Neuron(0, 0, [0.05, 0.05], [0, 0]) next_node2 = Neuron(0, 0, [0.05, 0.05], [0, 0]) nodes = [next_node1, next_node2] neuron.feed_forward(nodes) self.assertEquals(0.524, round_to(nodes[0].inputs[0], 3)) self.assertEquals(0, round_to(nodes[0].inputs[1], 3)) self.assertEquals(0.524, round_to(nodes[1].inputs[0], 3)) self.assertEquals(0, round_to(nodes[1].inputs[1], 3))
def __init__(self, size, dt=0.001, tau_rc=0.02, tau_ref=0.002): """Constructor for a set of LIF rate neuron :param int size: number of neurons in set :param float dt: timestep for neuron update function :param float t_rc: the RC time constant :param float tau_ref: refractory period length (s) """ Neuron.__init__(self, size, dt) self.tau_rc = tau_rc self.tau_ref = tau_ref
def test_1(steps): weights = 3 print "Linear combination of weights {0}, {1} steps".format(weights, steps) neuron = Neuron(weights, sigm, sigmp, error) errors = [] for i in range(steps): inputs = [random.random() for r in range(weights)] target = 2*inputs[0] + 0.3*inputs[1] - 0.7*inputs[2] neuron.learn_1(inputs, target) errors.append(neuron.last_error) print report(errors)
class ProgramLogic: def __init__(self): parser.parseFile() self.neuron = Neuron(self.inputCount()) self.neuron.randomize(-1.0, 1.0) self.teachingStep = 0 self.prevResponse = 0 self.prevError = 0 self.curResponse = 0 self.curError = 0 def inputCount(self): return parser.counts[parser.inputCount] def performTeaching(self, teachingRatio): resultPrev = self.neuron.learn(self.currentNormalizedInputs(), self.currentExpectedOutput(), teachingRatio) self.prevResponse = resultPrev[Neuron.prevResponse] self.prevError = resultPrev[Neuron.prevError] self.curResponse = self.neuron(self.currentNormalizedInputs()) self.curError = self.currentExpectedOutput - self.curResponse self.teachingStep += 1 def currentComment(self): return parser.elements[self.realIndex()][parser.comment] def currentExpectedOutput(self): return parser.elements[self.realIndex()][parser.expectedOutputs][0] def currentInputs(self): return parser.elements[self.realIndex()][parser.inputs] def currentNormalizedInputs(self): return Neuron.normalize(self.currentInputs()) def currentPrevWeights(self): return self.neuron.weights def currentPrevResponse(self): return self.prevResponse def currentPrevError(self): return self.prevError def currentResponse(self): return self.curResponse def currentError(self): return self.curError def realIndex(self): return self.teachingStep % len(parser.inputs)
def test_5(steps): weights = 40 print "Target sqrt(avg) {0}, {1} steps".format(weights, steps) neuron = Neuron(weights, sigm, sigmp, error) errors = [] for i in range(steps): inputs = [random.random() for r in range(weights)] avg = sum(inputs)/len(inputs) target = math.sqrt(avg) neuron.learn_1(inputs, target) errors.append(neuron.last_error) print report(errors)
def test_4(steps): weights = 40 print "Target max - min {0}, {1} steps".format(weights, steps) neuron = Neuron(weights, sigm, sigmp, error) errors = [] for i in range(steps): inputs = [random.random() for r in range(weights)] imax = max(inputs) imin = min(inputs) target = imax - imin neuron.learn_1(inputs, target) errors.append(neuron.last_error) print report(errors)
def __init__(self, size, dt=0.001, tau_rc=0.02, tau_ref=0.002): """ Constructor for a set of LIF rate neuron :param int size: number of neurons in set :param float dt: timestep for neuron update function :param float tau_rc: the RC time constant :param float tau_ref: refractory period length (s) """ Neuron.__init__(self, size, dt) self.tau_rc = tau_rc self.tau_ref = tau_ref self.voltage = theano.shared(numpy.zeros(size).astype('float32')) # internal variables self.refractory_time = theano.shared(numpy.zeros(size).astype('float32')) # internal variables
def main(train, test, out): TRAIN_FILE = train TEST_FILE = test OUT_FILE = out img = Image.open(TRAIN_FILE) # read double moons image from .png file pixels = img.load() # generate pixel map width = img.size[0] height = img.size[1] training_set = dict() for i in range(width): for j in range(height): if pixels[i,j] == BLUE: # if pixel is blue training_set[i,j] = BOT # set value to bottom elif pixels[i,j] == RED: # if pixel is red training_set[i,j] = TOP # set value to top # create neuron with 2 input nodes n = Neuron(2) # x-input, y-input print "Neuron created." # training print "Training..." counter = 0 while True: errors = 0 for p in training_set: errors += n.train_step(p, training_set[p]) counter += 1 print "=====" if errors < n.get_margin() * len(training_set): break print "Length of training set: " + str(len(training_set)) print "Iterations: " + str(counter) # test cases img = Image.open(TEST_FILE) pixels = img.load() width = img.size[0] height = img.size[1] for i in range(width): for j in range(height): if pixels[i,j] == BLACK: n.set_input(0, i) n.set_input(1, j) n.activate() ans = n.get_output() if ans == TOP: pixels[i,j] = RED elif ans == BOT: pixels[i,j] = BLUE img.save(OUT_FILE, "PNG")
def add_neuron(self): # return True if len(self.neurons) >= MAX_NUM_OF_NEURONS: return False new = Neuron() self.neurons.append(new) self.alpha_neuron.connect(new) for i in self.next_layer: for a in i.neurons: new.connect(a) for k in self.prev_layer: for b in k.neurons: b.connect(new) self.history.append([1, `new`]) return True
def __init__(self,inputs, hiddentotal): self.learning_constant = 0.5 self.InputNeurons = [] for i in xrange(inputs): self.InputNeurons.append(Neuron()) self.InputNeurons.append(Neuron(1)) #adding bias neuron self.HiddenNeurons = [] for i in xrange(hiddentotal): self.HiddenNeurons.append(Neuron()) self.HiddenNeurons.append(Neuron(1)) #adding bias neuron self.OutputNeuron = Neuron() #connecting everything for ind_in,val_in in enumerate(self.InputNeurons): for ind_hid,val_hid in enumerate(self.HiddenNeurons): c = Connection(val_in,val_hid) self.InputNeurons[ind_in].addConnection(c) self.HiddenNeurons[ind_hid].addConnection(c) for ind,val in enumerate(self.HiddenNeurons): c = Connection(val,self.OutputNeuron) self.HiddenNeurons[ind].addConnection(c) self.OutputNeuron.addConnection(c)
def _create_neurons(self, neuron_count_vec, weight_counts): neuron_count_vec_length = len(neuron_count_vec) for i in range(0, neuron_count_vec_length): self.neurons.append([]) for j in range(0, neuron_count_vec[i]): weights = Network.get_initial_neuron_weights(weight_counts[i]+1) inputs = [0] * (len(weights)-1) inputs.append(1) neuron = Neuron(i, j, weights, inputs) if i == neuron_count_vec_length-1: neuron.is_output_layer = True self.neurons[i].append(neuron)
def __init__(self, input_size, lrn_rate=1): """'input_size' is the length of the input. 'lrn_rate' is the learning rate. """ self.neuron = Neuron([0]*input_size, 0, signal) self.lrn_rate = lrn_rate self.fire = self.neuron.fire
def __init__(self): parser.parseFile() self.neuron = Neuron(self.inputCount()) self.neuron.randomize(-1.0, 1.0) self.teachingStep = 0 self.prevResponse = 0 self.prevError = 0 self.curResponse = 0 self.curError = 0
def __init__ (self, map_dimensions, data_dimension, kernel_func, nb_clusters=0, verbose=False): self._verbose = verbose self._map_dimensions = [] self._map_dimensions[:] = map_dimensions[:] self._data_dimension = data_dimension self._dataset = None self._kernel = kernel_func self._nb_clusters = nb_clusters self._neurons = [] self._clusters = [] for position in generate_positions(map_dimensions): # Initialize _neurons positions in map neuron = Neuron(position=position) # Initialize _neurons coordinates coords = [] for i in xrange(data_dimension): coords.append(random.random() * 10) neuron.set_data(coords) self._neurons.append(neuron)
def updateResult(self): for i in xrange(len(self.neuron.weights)): self.neuron.weights[i] = self.widgetExperiment.tableWidget.cellWidget(i, 1).value() inputs = [self.widgetExperiment.tableWidget.cellWidget(i, 2).value() for i in xrange(len(self.neuron.weights))] response = self.neuron.response(inputs) signalStrength = Neuron.strength(inputs, Neuron.StrenghtNormEuclidean) memStrength = self.neuron.memoryTraceStrength(Neuron.StrenghtNormEuclidean) self.widgetExperiment.signalEdit.setText(str(signalStrength)) self.widgetExperiment.memoryEdit.setText(str(memStrength)) self.widgetExperiment.outputEdit.setText(str(response))
class Perceptron(object): """Online learning Perceptron. """ def __init__(self, input_size, lrn_rate=1, activation=signal): """'input_size' is the length of the input. 'lrn_rate' is the learning rate. """ self.neuron = Neuron([0]*input_size, 0, activation) self.lrn_rate = lrn_rate self.fire = self.neuron.fire def training(self, inputs_vector, outputs, max_epochs): """Not checking if inputs_vector and outputs have the same size. """ epochs = 0 while True: epochs = epochs + 1 error_count = 0 for (inputs, output) in zip(inputs_vector, outputs): actual_output = self.fire(inputs) error = output - actual_output if error != 0: learned = self.lrn_rate*error self.neuron.update(inputs, learned) error_count = error_count + 1 if error_count == 0: break elif max_epochs and (epochs > max_epochs): return False return epochs def __str__(self): ret = 'lrn_rate: %s' % self.lrn_rate ret = '%s\n%s' % (ret, self.neuron.__str__()) return ret