def convert_dense(self, model, pre_layer, input_shape, index, onnx_model_graph): onnx_model_graph_node = onnx_model_graph.node node_info = onnx_model_graph_node[index] dense_num = self.get_dense_num(node_info, onnx_model_graph) neuron_type = self.get_neuronType(index, onnx_model_graph_node) with model: x = nengo_dl.tensor_layer(pre_layer, tf.layers.dense, units=dense_num) if neuron_type != "softmax": if neuron_type == "lif": x = nengo_dl.tensor_layer( x, nengo.LIF(amplitude=self.amplitude)) elif neuron_type == "lifrate": x = nengo_dl.tensor_layer( x, nengo.LIFRate(amplitude=self.amplitude)) elif neuron_type == "adaptivelif": x = nengo_dl.tensor_layer( x, nengo.AdaptiveLIF(amplitude=self.amplitude)) elif neuron_type == "adaptivelifrate": x = nengo_dl.tensor_layer( x, nengo.AdaptiveLIFRate(amplitude=self.amplitude)) elif neuron_type == "izhikevich": x = nengo_dl.tensor_layer( x, nengo.Izhikevich(amplitude=self.amplitude)) elif neuron_type == "softlifrate": x = nengo_dl.tensor_layer( x, nengo_dl.neurons.SoftLIFRate(amplitude=self.amplitude)) elif neuron_type == None: #default neuron_type = LIF x = nengo_dl.tensor_layer( x, nengo.LIF(amplitude=self.amplitude)) output_shape = [dense_num, 1] return model, output_shape, x
def test_neuron_slicing(Simulator, plt, seed, rng, allclose): N = 6 sa = slice(None, None, 2) sb = slice(None, None, -2) x = np.array([-1, -0.25, 1]) with nengo.Network(seed=seed) as m: m.config[nengo.Ensemble].neuron_type = nengo.LIFRate() u = nengo.Node(output=x) a = nengo.Ensemble(N, dimensions=3, radius=1.7) b = nengo.Ensemble(N, dimensions=3, radius=1.7) nengo.Connection(u, a) c = nengo.Connection(a.neurons[sa], b.neurons[sb]) c.transform = rng.normal(scale=1e-3, size=(c.size_out, c.size_in)) ap = nengo.Probe(a.neurons, synapse=0.03) bp = nengo.Probe(b.neurons, synapse=0.03) with Simulator(m) as sim: sim.run(0.2) t = sim.trange() x = sim.data[ap] y = np.zeros((len(t), b.n_neurons)) y[:, sb] = np.dot(x[:, sa], c.transform.init.T) y = b.neuron_type.rates(y, sim.data[b].gain, sim.data[b].bias) plt.plot(t, y, "k--") plt.plot(t, sim.data[bp]) assert allclose(y[-10:], sim.data[bp][-10:], atol=3.0, rtol=0.0)
def test_lif_rate(ctx, blockify): """Test the `lif_rate` nonlinearity""" rng = np.random dt = 1e-3 n_neurons = [123459, 23456, 34567] J = RA([rng.normal(loc=1, scale=10, size=n) for n in n_neurons]) R = RA([np.zeros(n) for n in n_neurons]) ref = 2e-3 taus = list(rng.uniform(low=15e-3, high=80e-3, size=len(n_neurons))) queue = cl.CommandQueue(ctx) clJ = CLRA(queue, J) clR = CLRA(queue, R) clTaus = CLRA(queue, RA([t * np.ones(n) for t, n in zip(taus, n_neurons)])) # simulate host nls = [nengo.LIFRate(tau_ref=ref, tau_rc=taus[i]) for i, n in enumerate(n_neurons)] for i, nl in enumerate(nls): nl.step_math(dt, J[i], R[i]) # simulate device plan = plan_lif_rate(queue, dt, clJ, clR, ref, clTaus, blockify=blockify) plan() rate_sum = np.sum([np.sum(r) for r in R]) if rate_sum < 1.0: logger.warn("LIF rate was not tested above the firing threshold!") assert ra.allclose(J, clJ.to_host()) assert ra.allclose(R, clR.to_host())
def build(self, testY): self.count = 0 def update(x): """ Kalman Filter: X_k = A * X_k_1 + B * Y_k """ Externalmat = np.mat(x[2:4]).T Inputmat = np.mat(x[0:2]).T Controlmat = np.matrix([[x[4], x[5]], [x[6], x[7]]]) next_state = np.squeeze( np.asarray(Controlmat * Inputmat + Externalmat)) return next_state with self.model: Dir_Nurons = nengo.Ensemble(1, dimensions=2 + 2 + 4, neuron_type=nengo.Direct()) LIF_Neurons = nengo.Ensemble( self.N_A, dimensions=2, intercepts=Uniform(-1, 1), max_rates=Uniform(self.rate_A[0], self.rate_A[1]), neuron_type=nengo.LIFRate(tau_rc=self.t_rc, tau_ref=self.t_ref)) state_func = Piecewise({ 0.0: [0.0, 0.0], self.dt: np.squeeze(np.asarray(np.mat([testY[0], testY[1]]).T)), 2 * self.dt: [0.0, 0.0] }) state = nengo.Node(output=state_func) # state_probe = nengo.Probe(state) external_input = nengo.Node(output=lambda t: self.data(t)) # external_input_probe = nengo.Probe(external_input) control_signal = nengo.Node(output=lambda t: self.control(t)) conn0 = nengo.Connection(state, Dir_Nurons[0:2]) # conn1 = nengo.Connection(external_input, Dir_Nurons[2:4]) conn2 = nengo.Connection(control_signal, Dir_Nurons[4:8]) conn3 = nengo.Connection(Dir_Nurons, LIF_Neurons[0:2], function=update, synapse=self.tau) conn4 = nengo.Connection(LIF_Neurons[0:2], Dir_Nurons[0:2]) self.output = nengo.Probe(LIF_Neurons[0:2]) self.sim = nengo.Simulator(self.model, dt=self.dt)
def test_scalar_rate(Simulator, seed): _test_RLS_network(Simulator, seed, dims=1, lrate=1, neuron_type=nengo.LIFRate(), tau=None, T_train=1, T_test=0.5, tols=[0.02, 1e-3, 0.02, 0.3])
def _test_temporal_solver(plt, Simulator, seed, neuron_type, tau, f, solver): dt = 0.002 # we are cheating a bit here because we'll use the same training data as # test data. this makes the unit testing a bit simpler since it's more # obvious what will happen when comparing temporal to default t = np.arange(0, 0.2, dt) stim = np.sin(2 * np.pi * 10 * t) function = (f(stim) if tau is None else nengo.Lowpass(tau).filt(f(stim), dt=dt)) with Network(seed=seed) as model: u = nengo.Node(output=nengo.processes.PresentInput(stim, dt)) x = nengo.Ensemble(100, 1, neuron_type=neuron_type) output_ideal = nengo.Node(size_in=1) post = dict(n_neurons=500, dimensions=1, neuron_type=nengo.LIFRate(), seed=seed + 1) output_temporal = nengo.Ensemble(**post) output_default = nengo.Ensemble(**post) nengo.Connection(u, output_ideal, synapse=tau, function=f) nengo.Connection(u, x, synapse=None) nengo.Connection(x, output_temporal, synapse=tau, eval_points=stim[:, None], function=function[:, None], solver=Temporal(synapse=tau, solver=solver)) nengo.Connection(x, output_default, synapse=tau, eval_points=stim[:, None], function=f, solver=solver) p_ideal = nengo.Probe(output_ideal, synapse=None) p_temporal = nengo.Probe(output_temporal, synapse=None) p_default = nengo.Probe(output_default, synapse=None) with Simulator(model, dt) as sim: sim.run(t[-1]) plt.plot(sim.trange(), sim.data[p_ideal] - sim.data[p_default], label="Default") plt.plot(sim.trange(), sim.data[p_ideal] - sim.data[p_temporal], label="Temporal") plt.legend() return (nrmse(sim.data[p_default], target=sim.data[p_ideal]) / nrmse(sim.data[p_temporal], target=sim.data[p_ideal]))
def _test_rates(Simulator, rates, name=None): if name is None: name = rates.__name__ n = 100 max_rates = 50 * np.ones(n) # max_rates = 200 * np.ones(n) intercepts = np.linspace(-0.99, 0.99, n) encoders = np.ones((n, 1)) nparams = dict(n_neurons=n) eparams = dict( max_rates=max_rates, intercepts=intercepts, encoders=encoders) model = nengo.Network() with model: u = nengo.Node(output=whitenoise(1, 5, seed=8393)) a = nengo.Ensemble(nengo.LIFRate(**nparams), 1, **eparams) b = nengo.Ensemble(nengo.LIF(**nparams), 1, **eparams) nengo.Connection(u, a, synapse=0) nengo.Connection(u, b, synapse=0) up = nengo.Probe(u) ap = nengo.Probe(a.neurons, "output", synapse=None) bp = nengo.Probe(b.neurons, "output", synapse=None) dt = 1e-3 sim = Simulator(model, dt=dt) sim.run(2.) t = sim.trange() x = sim.data[up] a_rates = sim.data[ap] / dt spikes = sim.data[bp] b_rates = rates(t, spikes) with Plotter(Simulator) as plt: ax = plt.subplot(411) plt.plot(t, x) ax = plt.subplot(412) implot(plt, t, intercepts, a_rates.T, ax=ax) ax.set_ylabel('intercept') ax = plt.subplot(413) implot(plt, t, intercepts, b_rates.T, ax=ax) ax.set_ylabel('intercept') ax = plt.subplot(414) implot(plt, t, intercepts, (b_rates - a_rates).T, ax=ax) ax.set_xlabel('time [s]') ax.set_ylabel('intercept') plt.savefig('utils.test_neurons.test_rates.%s.pdf' % name) plt.close() tmask = (t > 0.1) & (t < 1.9) relative_rmse = rms(b_rates[tmask] - a_rates[tmask]) / rms(a_rates[tmask]) return relative_rmse
def convert_conv2d(self, model, pre_layer, input_shape, index, onnx_model_graph): onnx_model_graph_node = onnx_model_graph.node node_info = onnx_model_graph_node[index] neuron_type = self.get_neuronType(index, onnx_model_graph_node) filters = self.get_filterNum(node_info, onnx_model_graph) for index in range(len(node_info.attribute)): if node_info.attribute[index].name == "kernel_shape": kernel_size = node_info.attribute[index].ints[0] elif node_info.attribute[index].name == "strides": strides = node_info.attribute[index].ints[0] elif node_info.attribute[index].name == "auto_pad": padding = node_info.attribute[index].s.decode('ascii').lower() if padding != "valid": padding = "same" if padding == "same": output_shape = [input_shape[0], input_shape[1], filters] else: output_shape = [ int((input_shape[0] - kernel_size) / strides + 1), int((input_shape[1] - kernel_size) / strides + 1), filters ] with model: x = nengo_dl.tensor_layer(pre_layer, tf.layers.conv2d, shape_in=(input_shape[0], input_shape[1], input_shape[2]), filters=filters, kernel_size=kernel_size, padding=padding) if neuron_type == "lif": x = nengo_dl.tensor_layer(x, nengo.LIF(amplitude=self.amplitude)) elif neuron_type == "lifrate": x = nengo_dl.tensor_layer( x, nengo.LIFRate(amplitude=self.amplitude)) elif neuron_type == "adaptivelif": x = nengo_dl.tensor_layer( x, nengo.AdaptiveLIF(amplitude=self.amplitude)) elif neuron_type == "adaptivelifrate": x = nengo_dl.tensor_layer( x, nengo.AdaptiveLIFRate(amplitude=self.amplitude)) elif neuron_type == "izhikevich": x = nengo_dl.tensor_layer( x, nengo.Izhikevich(amplitude=self.amplitude)) elif neuron_type == "softlifrate": x = nengo_dl.tensor_layer( x, nengo_dl.neurons.SoftLIFRate(amplitude=self.amplitude)) elif neuron_type == None: #default neuron_type = LIF x = nengo_dl.tensor_layer(x, nengo.LIF(amplitude=self.amplitude)) return model, output_shape, x
def get_numpy_fn(kind, params): if kind == 'lif': lif = nengo.LIFRate(tau_rc=params['tau_rc'], tau_ref=params['tau_ref']) return lambda x: (lif.rates(x, params['gain'], params['bias']) * params['amp']) elif kind == 'softlif': softlif = SoftLIFRate(tau_rc=params['tau_rc'], tau_ref=params['tau_ref'], sigma=params['sigma']) return lambda x: (softlif.rates(x, params['gain'], params['bias']) * params['amp']) else: raise ValueError("Unknown neuron type '%s'" % kind)
def _test_rates(Simulator, rates, plt, seed): n = 100 intercepts = np.linspace(-0.99, 0.99, n) model = nengo.Network(seed=seed) with model: model.config[nengo.Ensemble].max_rates = nengo.dists.Choice([50]) model.config[nengo.Ensemble].encoders = nengo.dists.Choice([[1]]) u = nengo.Node(output=nengo.processes.WhiteSignal(2, high=5)) a = nengo.Ensemble(n, 1, intercepts=intercepts, neuron_type=nengo.LIFRate()) b = nengo.Ensemble(n, 1, intercepts=intercepts, neuron_type=nengo.LIF()) nengo.Connection(u, a, synapse=0) nengo.Connection(u, b, synapse=0) up = nengo.Probe(u) ap = nengo.Probe(a.neurons) bp = nengo.Probe(b.neurons) with Simulator(model, seed=seed + 1) as sim: sim.run(2.) t = sim.trange() x = sim.data[up] a_rates = sim.data[ap] spikes = sim.data[bp] b_rates = rates(t, spikes) if plt is not None: ax = plt.subplot(411) plt.plot(t, x) ax = plt.subplot(412) implot(plt, t, intercepts, a_rates.T, ax=ax) ax.set_ylabel('intercept') ax = plt.subplot(413) implot(plt, t, intercepts, b_rates.T, ax=ax) ax.set_ylabel('intercept') ax = plt.subplot(414) implot(plt, t, intercepts, (b_rates - a_rates).T, ax=ax) ax.set_xlabel('time [s]') ax.set_ylabel('intercept') tmask = (t > 0.1) & (t < 1.9) relative_rmse = rms(b_rates[tmask] - a_rates[tmask]) / rms(a_rates[tmask]) return relative_rmse
def _test_rates(Simulator, rates, plt, seed, name=None): if name is None: name = rates.__name__ n = 100 intercepts = np.linspace(-0.99, 0.99, n) model = nengo.Network(seed=seed) with model: model.config[nengo.Ensemble].max_rates = Choice([50]) model.config[nengo.Ensemble].encoders = Choice([[1]]) u = nengo.Node(output=WhiteNoise(2., 5).f( rng=np.random.RandomState(seed=seed))) a = nengo.Ensemble(n, 1, intercepts=intercepts, neuron_type=nengo.LIFRate()) b = nengo.Ensemble(n, 1, intercepts=intercepts, neuron_type=nengo.LIF()) nengo.Connection(u, a, synapse=0) nengo.Connection(u, b, synapse=0) up = nengo.Probe(u) ap = nengo.Probe(a.neurons) bp = nengo.Probe(b.neurons) sim = Simulator(model) sim.run(2.) t = sim.trange() x = sim.data[up] a_rates = sim.data[ap] spikes = sim.data[bp] b_rates = rates(t, spikes) ax = plt.subplot(411) plt.plot(t, x) ax = plt.subplot(412) implot(plt, t, intercepts, a_rates.T, ax=ax) ax.set_ylabel('intercept') ax = plt.subplot(413) implot(plt, t, intercepts, b_rates.T, ax=ax) ax.set_ylabel('intercept') ax = plt.subplot(414) implot(plt, t, intercepts, (b_rates - a_rates).T, ax=ax) ax.set_xlabel('time [s]') ax.set_ylabel('intercept') plt.saveas = 'utils.test_neurons.test_rates.%s.pdf' % name tmask = (t > 0.1) & (t < 1.9) relative_rmse = rms(b_rates[tmask] - a_rates[tmask]) / rms(a_rates[tmask]) return relative_rmse
def __init__(self, botnet): super(Grabbed, self).__init__() with self: self.has_grabbed = nengo.Ensemble(n_neurons=50, dimensions=1, neuron_type=nengo.LIFRate()) def state(x): if x<0.5: return 0 else: return 1 nengo.Connection(self.has_grabbed, self.has_grabbed, synapse=0.1) def opened_gripper(x): if x > -0.1: return -1 else: return 0 nengo.Connection(botnet.arm[3], self.has_grabbed, function=opened_gripper)
def _add_neuron_layer(self, layer): inputs = [self._get_input(layer)] neuron = layer["neuron"] ntype = neuron["type"] n = layer["outputs"] gain = 1.0 bias = 0.0 amplitude = 1.0 if ntype == "ident": neuron_type = nengo.Direct() elif ntype == "relu": neuron_type = nengo.RectifiedLinear() elif ntype == "logistic": neuron_type = nengo.Sigmoid() elif ntype == "softlif": tau_ref, tau_rc, alpha, amp, sigma = [ neuron["params"][k] for k in ["t", "r", "a", "m", "g"] ] lif_type = self.lif_type.lower() if lif_type == "lif": neuron_type = nengo.LIF(tau_rc=tau_rc, tau_ref=tau_ref) elif lif_type == "lifrate": neuron_type = nengo.LIFRate(tau_rc=tau_rc, tau_ref=tau_ref) elif lif_type == "softlifrate": neuron_type = SoftLIFRate(sigma=sigma, tau_rc=tau_rc, tau_ref=tau_ref) else: raise KeyError("Unrecognized LIF type %r" % self.lif_type) gain = alpha bias = 1.0 amplitude = amp else: raise NotImplementedError("Neuron type %r" % ntype) return self.add_neuron_layer( n, inputs=inputs, neuron_type=neuron_type, synapse=self.synapse, gain=gain, bias=bias, amplitude=amplitude, name=layer["name"], )
def test_softlifrate_rates(plt): gain = 0.9 bias = 1.7 tau_rc = 0.03 tau_ref = 0.002 lif = nengo.LIFRate(tau_rc=tau_rc, tau_ref=tau_ref) softlif = SoftLIFRate(tau_rc=tau_rc, tau_ref=tau_ref, sigma=0.00001) x = np.linspace(-2, 2, 301) lif_r = lif.rates(x, gain, bias) softlif_r = softlif.rates(x, gain, bias) plt.plot(x, lif_r) plt.plot(x, softlif_r) assert np.allclose(softlif_r, lif_r, atol=1e-3, rtol=1e-3)
def test_neuron_build_errors(Simulator): # unsupported neuron type with nengo.Network() as net: nengo.Ensemble(5, 1, neuron_type=nengo.neurons.Sigmoid(tau_ref=0.005)) with pytest.raises(BuildError, match="type 'Sigmoid' cannot be simulated"): with Simulator(net): pass # unsupported RegularSpiking type with nengo.Network() as net: nengo.Ensemble(5, 1, neuron_type=nengo.RegularSpiking( nengo.Sigmoid(tau_ref=0.005))) with pytest.raises(BuildError, match="RegularSpiking.*'Sigmoid'.*cannot be simu"): with Simulator(net): pass # amplitude with RegularSpiking base type with nengo.Network() as net: nengo.Ensemble(5, 1, neuron_type=nengo.RegularSpiking( nengo.LIFRate(amplitude=0.5))) with pytest.raises(BuildError, match="Amplitude is not supported on RegularSpikin"): with Simulator(net): pass # non-zero initial voltage warning with nengo.Network() as net: nengo.Ensemble( 5, 1, neuron_type=nengo.LIF( initial_state={"voltage": nengo.dists.Uniform(0, 1)}), ) with pytest.warns(Warning, match="initial values for 'voltage' being non-zero"): with Simulator(net): pass
def test_softlifrate_rates(plt, allclose): gain = 0.9 bias = 1.7 tau_rc = 0.03 tau_ref = 0.002 lif = nengo.LIFRate(tau_rc=tau_rc, tau_ref=tau_ref) softlif = SoftLIFRate(tau_rc=tau_rc, tau_ref=tau_ref, sigma=0.00001) x = np.linspace(-2, 2, 301) lif_r = lif.rates(x, gain, bias) softlif_r = softlif.rates(x, gain, bias) plt.plot(x, lif_r, label="LIF") plt.plot(x, softlif_r, label="SoftLIF") plt.legend(loc="best") assert allclose(softlif_r, lif_r, atol=1e-3, rtol=1e-3)
def _add_softlif_layer(self, layer): from .neurons import SoftLIFRate taus = dict(tau_rc=layer.tau_rc, tau_ref=layer.tau_ref) lif_type = self.lif_type.lower() if lif_type == 'lif': neuron_type = nengo.LIF(**taus) elif lif_type == 'lifrate': neuron_type = nengo.LIFRate(**taus) elif lif_type == 'softlifrate': neuron_type = SoftLIFRate(sigma=layer.sigma, **taus) else: raise KeyError("Unrecognized LIF type %r" % self.lif_type) n = np.prod(layer.input_shape[1:]) return self.add_neuron_layer( n, neuron_type=neuron_type, synapse=self.synapse, gain=1, bias=1, amplitude=layer.amplitude, name=layer.name)
def test_minibatch(Simulator, seed): with nengo.Network(seed=seed) as net: inp = [ nengo.Node(output=[0.5]), nengo.Node(output=np.sin), nengo.Node(output=nengo.processes.WhiteSignal(5, 0.5, seed=seed)) ] ens = [ nengo.Ensemble(10, 1, neuron_type=nengo.AdaptiveLIF()), nengo.Ensemble(10, 1, neuron_type=nengo.LIFRate()), nengo.Ensemble(10, 2, noise=nengo.processes.WhiteNoise(seed=seed)) ] nengo.Connection(inp[0], ens[0]) nengo.Connection(inp[1], ens[1], synapse=None) nengo.Connection(inp[2], ens[2], synapse=nengo.Alpha(0.1), transform=[[1], [1]]) conn = nengo.Connection(ens[0], ens[1], learning_rule_type=nengo.PES()) nengo.Connection(inp[0], conn.learning_rule) ps = [nengo.Probe(e) for e in ens] with Simulator(net, minibatch_size=None) as sim: probe_data = [[] for _ in ps] for i in range(5): sim.run_steps(100) for j, p in enumerate(ps): probe_data[j] += [sim.data[p]] sim.reset() probe_data = [np.stack(x, axis=0) for x in probe_data] with Simulator(net, minibatch_size=5) as sim: sim.run_steps(100) assert np.allclose(sim.data[ps[0]], probe_data[0], atol=1e-6) assert np.allclose(sim.data[ps[1]], probe_data[1], atol=1e-6) assert np.allclose(sim.data[ps[2]], probe_data[2], atol=1e-6)
def test_neural_accuracy(Simulator, seed, rng, dims, neurons_per_product=128): a = rng.normal(scale=np.sqrt(1.0 / dims), size=dims) b = rng.normal(scale=np.sqrt(1.0 / dims), size=dims) result = circconv(a, b) model = nengo.Network(label="circular conv", seed=seed) model.config[nengo.Ensemble].neuron_type = nengo.LIFRate() with model: input_a = nengo.Node(a) input_b = nengo.Node(b) cconv = nengo.networks.CircularConvolution(neurons_per_product, dimensions=dims) nengo.Connection(input_a, cconv.input_a, synapse=None) nengo.Connection(input_b, cconv.input_b, synapse=None) res_p = nengo.Probe(cconv.output) with Simulator(model) as sim: sim.run(0.01) error = rms(result - sim.data[res_p][-1]) assert error < 0.1
def _add_neuron_layer(self, layer): neuron = layer['neuron'] ntype = neuron['type'] n = layer['outputs'] e = nengo.Ensemble(n, 1, label='%s_neurons' % layer['name']) e.gain = np.ones(n) e.bias = np.zeros(n) transform = 1. if ntype == 'ident': e.neuron_type = nengo.Direct() elif ntype == 'relu': e.neuron_type = nengo.RectifiedLinear() elif ntype == 'logistic': e.neuron_type = nengo.Sigmoid() elif ntype == 'softlif': from .neurons import SoftLIFRate tau_ref, tau_rc, alpha, amp, sigma, noise = [ neuron['params'][k] for k in ['t', 'r', 'a', 'm', 'g', 'n']] lif_type = self.lif_type.lower() if lif_type == 'lif': e.neuron_type = nengo.LIF(tau_rc=tau_rc, tau_ref=tau_ref) elif lif_type == 'lifrate': e.neuron_type = nengo.LIFRate(tau_rc=tau_rc, tau_ref=tau_ref) elif lif_type == 'softlifrate': e.neuron_type = SoftLIFRate( sigma=sigma, tau_rc=tau_rc, tau_ref=tau_ref) else: raise KeyError("Unrecognized LIF type %r" % self.lif_type) e.gain = alpha * np.ones(n) e.bias = np.ones(n) transform = amp else: raise NotImplementedError("Neuron type %r" % ntype) node = nengo.Node(size_in=n, label=layer['name']) nengo.Connection(self._get_input(layer), e.neurons, synapse=None) nengo.Connection( e.neurons, node, transform=transform, synapse=self.synapse) return node
def test_temporal_solver(plt, Simulator, seed): plt.subplot(3, 1, 1) for weights in (False, True): assert 1.2 < _test_temporal_solver( # 1.5153... at dev time plt, Simulator, seed, nengo.LIF(), 0.005, lambda x: x, nengo.solvers.LstsqL2(weights=weights)) # LIFRate has no internal dynamics, and so the two solvers # are actually numerically equivalent plt.subplot(3, 1, 2) assert np.allclose( 1, _test_temporal_solver(plt, Simulator, seed, nengo.LIFRate(), None, lambda x: 1 - 2 * x**2, nengo.solvers.LstsqL2())) # We'll need to overfit slightly (small reg) to see the improvement for # AdaptiveLIF (see thesis for a more principled way to improve) plt.subplot(3, 1, 3) assert 2.0 < _test_temporal_solver( # 2.2838... at dev time plt, Simulator, seed, nengo.AdaptiveLIF(), 0.1, np.sin, nengo.solvers.LstsqL2(reg=1e-5))
def test_learning_rate_schedule(Simulator): with nengo.Network() as net: a = nengo.Node([0]) b = nengo.Ensemble(10, 1, neuron_type=nengo.LIFRate()) nengo.Connection(a, b) p = nengo.Probe(b) with Simulator(net) as sim: vals = [1.0, 0.1, 0.001] with tf.device("/cpu:0"): l_rate = tf.train.piecewise_constant(sim.training_step, [ tf.constant(4, dtype=tf.int64), tf.constant(9, dtype=tf.int64) ], vals) opt = tf.train.GradientDescentOptimizer(l_rate) for i in range(3): assert np.allclose(sim.sess.run(l_rate), vals[i]) sim.train({a: np.zeros((1, 10, 1))}, {p: np.zeros((1, 10, 1))}, opt, n_epochs=5)
def make_vision_system(images, outputs, n_neurons = 1000, AIT_V1_strength = 0.06848695023305285, V1_r_transform = 0.11090645719111913, AIT_r_transform = 0.8079719992231219): #represent currently attended item vision_system = nengo.Network(label = 'vision_system') with vision_system: presentation_node = nengo.Node(None, size_in = images.shape[1], label = 'presentation_node') vision_system.presentation_node = presentation_node rng = np.random.RandomState(9) encoders = Gabor().generate(n_neurons, (11, 11), rng=rng) # gabor encoders, work better, 11,11 apparently, why? encoders = Mask((14, 90)).populate(encoders, rng=rng, flatten=True) V1 = nengo.Ensemble(n_neurons, images.shape[1], eval_points=images, neuron_type=nengo.LIFRate(), intercepts=nengo.dists.Choice([-0.5]), #can switch these off max_rates=nengo.dists.Choice([100]), # why? encoders=encoders, label = 'V1') # 1000 neurons, nrofpix = dimensions # visual_representation = nengo.Node(size_in=Dmid) #output, in this case 466 outputs AIT = nengo.Ensemble(n_neurons, dimensions=outputs.shape[1], label = 'AIT') # output, in this case 466 outputs visconn = nengo.Connection(V1, AIT, synapse=0.005, eval_points = images, function=outputs, solver=nengo.solvers.LstsqL2(reg=0.01)) Ait_V1_backwardsconn = nengo.Connection(AIT,V1, synapse = 0.005, eval_points = outputs, function = images, solver=nengo.solvers.LstsqL2(reg=0.01), transform = AIT_V1_strength) #Transform makes this connection a lot weaker then the forwards conneciton nengo.Connection(presentation_node, V1, synapse=None) nengo.Connection(AIT, AIT, synapse = 0.1, transform = AIT_r_transform) nengo.Connection(V1, V1, synapse = 0.1, transform = V1_r_transform) # display attended item display_node = nengo.Node(display_func, size_in=presentation_node.size_out, label = 'display_node') # to show input nengo.Connection(presentation_node, display_node, synapse=None) # THESE PIECES MAKE EVERYTHING WORK please dont touch them vision_system.AIT = AIT vision_system.V1 = V1 return vision_system
def test_io(tmpdir): tmpfile = str(tmpdir.join("model.pkl")) m1 = nengo.Network() with m1: sin = nengo.Node(output=np.sin) cons = nengo.Node(output=-.5) factors = nengo.Ensemble(nengo.LIF(20), dimensions=2, radius=1.5) factors.encoders = np.tile( [[1, 1], [-1, 1], [1, -1], [-1, -1]], (factors.n_neurons // 4, 1)) product = nengo.Ensemble(nengo.LIFRate(10), dimensions=1) nengo.Connection(sin, factors[0]) nengo.Connection(cons, factors[1]) factors_p = nengo.Probe( factors, 'decoded_output', sample_every=.01, synapse=.01) assert factors_p # To suppress F841 product_p = nengo.Probe( product, 'decoded_output', sample_every=.01, synapse=.01) assert product_p # To suppress F841 m1.save(tmpfile) m2 = nengo.Network.load(tmpfile) assert m1 == m2
def _add_neuron_layer(self, layer): inputs = [self._get_input(layer)] neuron = layer['neuron'] ntype = neuron['type'] n = layer['outputs'] gain = 1. bias = 0. amplitude = 1. if ntype == 'ident': neuron_type = nengo.Direct() elif ntype == 'relu': neuron_type = nengo.RectifiedLinear() elif ntype == 'logistic': neuron_type = nengo.Sigmoid() elif ntype == 'softlif': from .neurons import SoftLIFRate tau_ref, tau_rc, alpha, amp, sigma, noise = [ neuron['params'][k] for k in ['t', 'r', 'a', 'm', 'g', 'n']] lif_type = self.lif_type.lower() if lif_type == 'lif': neuron_type = nengo.LIF(tau_rc=tau_rc, tau_ref=tau_ref) elif lif_type == 'lifrate': neuron_type = nengo.LIFRate(tau_rc=tau_rc, tau_ref=tau_ref) elif lif_type == 'softlifrate': neuron_type = SoftLIFRate( sigma=sigma, tau_rc=tau_rc, tau_ref=tau_ref) else: raise KeyError("Unrecognized LIF type %r" % self.lif_type) gain = alpha bias = 1. amplitude = amp else: raise NotImplementedError("Neuron type %r" % ntype) return self.add_neuron_layer( n, inputs=inputs, neuron_type=neuron_type, synapse=self.synapse, gain=gain, bias=bias, amplitude=amplitude, name=layer['name'])
def test_input_magnitude(Simulator, seed, rng, dims=16, magnitude=10): """Test to make sure the magnitude scaling works. Builds two different CircularConvolution networks, one with the correct magnitude and one with 1.0 as the input_magnitude. """ neurons_per_product = 128 a = rng.normal(scale=np.sqrt(1. / dims), size=dims) * magnitude b = rng.normal(scale=np.sqrt(1. / dims), size=dims) * magnitude result = circconv(a, b) model = nengo.Network(label="circular conv", seed=seed) model.config[nengo.Ensemble].neuron_type = nengo.LIFRate() with model: inputA = nengo.Node(a) inputB = nengo.Node(b) cconv = nengo.networks.CircularConvolution(neurons_per_product, dimensions=dims, input_magnitude=magnitude) nengo.Connection(inputA, cconv.A, synapse=None) nengo.Connection(inputB, cconv.B, synapse=None) res_p = nengo.Probe(cconv.output) cconv_bad = nengo.networks.CircularConvolution( neurons_per_product, dimensions=dims, input_magnitude=1) # incorrect magnitude nengo.Connection(inputA, cconv_bad.A, synapse=None) nengo.Connection(inputB, cconv_bad.B, synapse=None) res_p_bad = nengo.Probe(cconv_bad.output) sim = Simulator(model) sim.run(0.01) error = rmse(result, sim.data[res_p][-1]) / (magnitude**2) error_bad = rmse(result, sim.data[res_p_bad][-1]) / (magnitude**2) assert error < 0.1 assert error_bad > 0.1
def test_connection(Simulator, seed, d): with Network(seed=seed) as model: stim = nengo.Node(output=lambda t: np.sin(t*2*np.pi), size_out=d) x = nengo.Ensemble(1, d, intercepts=[-1], neuron_type=nengo.LIFRate()) default = nengo.Node(size_in=d) improved = nengo.Node(size_in=d) stim_conn = Connection(stim, x, synapse=None) default_conn = nengo.Connection(x, default) improved_conn = Connection(x, improved) p_default = nengo.Probe(default) p_improved = nengo.Probe(improved) p_stim = nengo.Probe(stim, synapse=0.005) assert not isinstance(stim_conn.solver, BiasedSolver) assert not isinstance(default_conn.solver, BiasedSolver) assert isinstance(improved_conn.solver, BiasedSolver) with Simulator(model) as sim: sim.run(1.0) assert (rmse(sim.data[p_default], sim.data[p_stim]) > rmse(sim.data[p_improved], sim.data[p_stim]))
def convert_dense(self, model, pre_layer, input_shape, index, onnx_model_graph): onnx_model_graph_node = onnx_model_graph.node node_info = onnx_model_graph_node[index] dense_num = self.get_dense_num(node_info, onnx_model_graph) neuron_type = self.get_neuronType( index, onnx_model_graph_node) # node들 지나다니면서 - neuron이 op_type이 어떤건지 찾음 with model: x = nengo_dl.Layer( tf.keras.layers.Dense(units=dense_num))(pre_layer) if neuron_type != "softmax": if neuron_type == "lif": x = nengo_dl.Layer(nengo.LIF(amplitude=self.amplitude))(x) elif neuron_type == "lifrate": x = nengo_dl.Layer( nengo.LIFRate(amplitude=self.amplitude))(x) elif neuron_type == "adaptivelif": x = nengo_dl.Layer( nengo.AdaptiveLIF(amplitude=self.amplitude))(x) elif neuron_type == "adaptivelifrate": x = nengo_dl.Layer( nengo.AdaptiveLIFRate(amplitude=self.amplitude))(x) elif neuron_type == "izhikevich": x = nengo_dl.Layer( nengo.Izhikevich(amplitude=self.amplitude))(x) elif neuron_type == "softlifrate": x = nengo_dl.Layer( nengo_dl.neurons.SoftLIFRate( amplitude=self.amplitude))(x) elif neuron_type == None: # default neuron_type = LIF x = nengo_dl.Layer(nengo.LIF(amplitude=self.amplitude))(x) output_shape = [dense_num, 1] print('convert Dense finish') return model, output_shape, x # x를 return 하면서 모델을 계속 쌓아감
test_targets = one_hot(y_test, 10) # --- set up network parameters n_vis = X_train.shape[1] n_out = train_targets.shape[1] # n_hid = 300 n_hid = 1000 # n_hid = 3000 # encoders = rng.normal(size=(n_hid, 11, 11)) encoders = Gabor().generate(n_hid, (11, 11), rng=rng) encoders = Mask((28, 28)).populate(encoders, rng=rng, flatten=True) ens_params = dict( eval_points=X_train, neuron_type=nengo.LIFRate(), intercepts=nengo.dists.Choice([-0.5]), max_rates=nengo.dists.Choice([100]), encoders=encoders, ) solver = nengo.solvers.LstsqL2(reg=0.01) # solver = nengo.solvers.LstsqL2(reg=0.0001) with nengo.Network(seed=3) as model: a = nengo.Ensemble(n_hid, n_vis, **ens_params) v = nengo.Node(size_in=n_out) conn = nengo.Connection( a, v, synapse=None, eval_points=X_train, function=train_targets, solver=solver)
def __init__(self, kp=0, kd=0, neural=False, adapt = False, num_motors=4, neuron_model=False, pes_learning_rate=1e-4): #TODO self.kp = kp self.kd = kd self.prev_time = time.time() self.output = np.zeros(num_motors) self.adapt = adapt self.pes_learning_rate = pes_learning_rate self.neuron_model = neuron_model if neural == True: model = nengo.Network(label="Adaptive Controller") tau_rc = 0.02 #TODO: Check if this is in ms or s. tau_ref = 0.002 if self.neuron_model == "RELU": cur_model = nengo.RectifiedLinear() elif self.neuron_model == "LIF": cur_model = nengo.LIF(tau_rc=tau_rc, tau_ref=tau_ref) #lif model object. elif self.neuron_model == "LIFRate": cur_model = nengo.LIFRate(tau_rc=tau_rc, tau_ref=tau_ref) #lif model object. def output_func(t, x): self.output = np.copy(x) def input_func_q(t, x): return self.q def input_func_dq(t, x): return self.dq def input_func_target(t, x): return self.target def input_func_d_target(t, x): return self.d_target with model: output = nengo.Node(output_func, size_in=num_motors, size_out=0) input_q = nengo.Node(input_func_q, size_in=num_motors, size_out=num_motors) input_dq = nengo.Node(input_func_dq, size_in=num_motors, size_out=num_motors) input_target = nengo.Node(input_func_target, size_in=num_motors, size_out=num_motors) input_d_target = nengo.Node(input_func_d_target, size_in=num_motors, size_out=num_motors) pes_learning_rate = 1e-4 #Adaptive component if self.adapt: adapt_ens = nengo.Ensemble( n_neurons=1000, dimensions=num_motors, radius=1.5, neuron_type=cur_model) learn_conn = nengo.Connection( adapt_ens, output, learning_rule_type=nengo.PES(pes_learning_rate)) for i in range(num_motors): inverter = nengo.Ensemble(500, dimensions=2, radius=1.5, neuron_type = cur_model) proportional = nengo.Ensemble(500, dimensions=1, radius=1.5, neuron_type = cur_model) derivative = nengo.Ensemble(500, dimensions=1, radius=1.5, neuron_type = cur_model) control_signal = nengo.Ensemble(500, dimensions=1, radius=1.5, neuron_type = cur_model) # invert terms that will be subtracted. nengo.Connection(input_q[i], inverter[0], synapse=None, function=lambda x: x*-1) nengo.Connection(input_dq[i], inverter[1], synapse=None, function=lambda x: x*-1) # calculate proportional part nengo.Connection(inverter[0], proportional, synapse=None, function=lambda x:x*kp) nengo.Connection(input_target[i], proportional, synapse=None, function=lambda x:x*kp) # calculate derivative part nengo.Connection(inverter[1], derivative, synapse=None, function=lambda x:(x*kd)) nengo.Connection(input_d_target[i], derivative, synapse=None, function=lambda x:(x*kd)) # output nengo.Connection(proportional, control_signal) nengo.Connection(derivative, control_signal) nengo.Connection(control_signal, output[i]) if self.adapt: # adapt connections nengo.Connection(input_q[i], adapt_ens, function=lambda x: np.zeros(num_motors), synapse=None) nengo.Connection(control_signal, learn_conn.learning_rule[i], transform=-1, synapse=None) # Nodes to access PID components. # PID_Ens.append({"inverter":inverter, # "proportional":proportional, # "derivative": derivative, #TODO: Remove all except control signal. # "control_signal": control_signal}) # control_signal_p = nengo.Probe(PID_Ens[0]["control_signal"], synapse=.01) self.sim = nengo.Simulator(model)