def link_field_to_neuron(entry_fields: dict, name: str, neuron: neurons.Neuron, notify_neuron: Callable): """Link the fields associated with a button to its neuron. Args: entry_fields: A mapping of field names to instances of EntryField. name: …of the field being mapped to the neuron. neuron: notify_neuron: """ entry_fields[name].textvariable.trace_add('write', notify_neuron) neuron.register_event(name)
def calibrate_current(self, ttfs): for current in range(0, 10000, 1): neuron = Neuron() for time in range(0, ttfs + 1): neuron.time_step(current / 10., time) if neuron.has_spiked(): break if neuron.last_spike == ttfs: return current / 10.
def neuron_linker(self, internal_name: str, neuron: neurons.Neuron, neuron_callback: Callable, initial_state: bool = False): """Set a neuron callback which will be called whenever the field is changed by the user. Args: internal_name: Name of widget. The neuron will be notified whenever this widget is changed by the user. neuron: neuron_callback: This will be set as the trace_add method of the field's textvariable. initial_state: Returns: """ self.entry_fields[internal_name].textvariable.trace_add('write', neuron_callback(internal_name, neuron)) neuron.register_event(internal_name, initial_state)
def sum_epsps(self, time, neuron): current = 0 prev = neuron.i_layer-1 for _pre in self.layers[prev]: if _pre.has_spiked(): ls, wt = _pre.last_spike, self.synapses[prev][_pre][neuron].weight if neuron.i_layer == 2 and _pre.type == 'inhibitory': wt *= -1 current += wt * Neuron().v_max * math.e**(-(time-ls)/self.time_const) return current
def __init__(self, n_layers): self.time_const = 3. self.learn_rate = 7. self.excit_ratio = 10 self.t_window = 50 self.trained = defaultdict() self.layers = [[Neuron() for i in range(n)] for n in n_layers] self.synapses = [ np.random.rand(len1, len2) for len1, len2 in pairwise(n_layers) ]
def calibrate_current(self, rate): """ Rate must be less than self.t_window """ for current in range(0, 1000, 1): neuron = Neuron() spikes = 0 for c_time in range(1, self.t_window + 1): if neuron.time_step(current / 10., c_time) == 1: spikes += 1 if spikes == rate: return current / 10. if spikes == self.t_window: break return -1
def __init__(self, n_inputs): s = self s.lambduh = .25 # learning rate s.on_ttfs = 4 # ms when 'on' neurons should spike s.t_threshold = 25 # time to wait for output to spike s.V_rest, s.V_th = 0, n_inputs s.neurons = [Neuron(i_neuron=i) for i in range(n_inputs)] s.synapses = [random.uniform(-1, 1) for neuron in s.neurons] while len(filter(lambda w: w < 0, s.synapses)) >= math.sqrt(n_inputs): s.synapses = [random.uniform(-1, 1) for neuron in s.neurons] # limit inhibs
def __init__(self, n_inputs): s = self s.lambduh = .25 # learning rate s.on_ttfs = 3 # ms when 'on' neurons should spike s.t_threshold = 10 # time to wait for output to spike s.V_rest, s.V_th = 0, n_inputs s.neurons = [Neuron(i_neuron=i) for i in range(n_inputs)] s.synapses = [random.uniform(-1, 1) for neuron in s.neurons] for i in range(len(s.neurons)): if i >= math.sqrt(n_inputs): s.synapses[i] = random.uniform(0, 1) # limit inhibs
def sum_epsps(self, c_time, i_c_layer, i_c_neuron): current = 0 c_layer = self.layers[i_c_layer] p_layer = self.layers[i_c_layer - 1] c_synapse = self.synapses[i_c_layer - 1] for i_p_neuron in range(len(p_layer)): p_neuron = p_layer[i_p_neuron] p_spike = p_neuron.last_spike if p_spike != -1: # ttfs exists weight = c_synapse[i_p_neuron][i_c_neuron] change = weight * Neuron().v_max * math.e**( -(c_time - p_spike) / self.time_const) #if VERBOSE: print('\tadding epsp to current: {}'.format(change)) current += change return current
def __init__(self, n_layers): s = self s.time_const = 3. s.learn_rate = 7. s.excit_ratio = 2 s.trained = defaultdict() s.layers = [[Neuron(i_layer=i_layer, i_neuron=i_neuron) for i_neuron in range(n_layers[i_layer])] for i_layer in range(len(n_layers))] s.synapses = [s.gen_synapse_layer(pre_layer, post_layer) for pre_layer, post_layer in pairwise(s.layers)] # make some of hidden layer inhibitory #hid_out = s.synapses[len(s.layers)/2] for layer in s.layers[0:int(math.ceil(len(s.layers)/s.excit_ratio))]: for neuron in layer: neuron.type = 'inhibitory'
def __init__(self, num_neurons, bias): self.bias = bias self.neurons = [] for _ in range(num_neurons): self.neurons.append(Neuron(self.bias))
from neurons import Neuron, InputNeuron, OutputNeuron i1 = InputNeuron(0) i2 = InputNeuron(1) i3 = InputNeuron(0) # learning rate, thresholds of connected neurons, connected neurons, initial connection weights h1 = Neuron(0.1, 0.5, [i1], [1]) h2 = Neuron(0.1, 0.3, [i2], [1]) h3 = Neuron(0.1, 0.8, [i3], [1]) h4 = Neuron(0.1, 0.6, [h1, h2, h3], [0.3, -0.4, 0.7]) o = OutputNeuron(0.1, [h4], [0.3]) print(o.fire()) for _ in range(100): h4.calc_error_delta_rule(1, 1) print(o.fire())
from neurons import Neuron, Network, Synapse, InhibitorySynapse, ActivityPattern n = Neuron() print(n.state.dendrites) n.update() print(n.state.axon) n.excite() print(n.state.axon) n.inhibit() print(n.state.axon) N = Network() N.add_neuron() N.add_neuron() print(N.neurons[0].state.axon) print(N.neurons[1].state.axon) N = Network() n1 = N.add_neuron() print(N.history) N.ntrace() print(N.history) n1.excite() print(N.history)
def generate(self, inputs): neurons = [] for i in [1] * self.neurons: neurons.append(Neuron(inputs, self.activation)) self.neurons = neurons