def __init__(self, target_func, cid=None, decoder_solver=None, noise_std=None): gain_matrix, adaptive_filter, task_to_rotor = gain_sets.get_gain_matrices( gain_set='hybrid_fast') self.model = nengo.Network(label='V-REP Adaptive Quadcopter', seed=13) with self.model: # Sensors and Actuators self.copter_node = Quadcopter(target_func=target_func, cid=cid, noise_std=noise_std) copter = nengo.Node(self.copter_node, size_in=4, size_out=12) # State Error Population state = nengo.Ensemble(n_neurons=1, dimensions=12, neuron_type=nengo.Direct()) # Contains the rotor speeds motor = nengo.Ensemble(n_neurons=1, dimensions=4, neuron_type=nengo.Direct()) # Command in 'task' space (up/down, forward/back, left/right, rotate) task = nengo.Ensemble(n_neurons=1, dimensions=4, neuron_type=nengo.Direct()) adaptation = nengo.Ensemble(n_neurons=1000, dimensions=12) nengo.Connection(state, adaptation, synapse=None) if decoder_solver is None: self.a_conn = nengo.Connection( adaptation, task, function=lambda x: [0, 0, 0, 0], learning_rule_type=nengo.PES(learning_rate=1e-4)) else: self.a_conn = nengo.Connection( adaptation, task, function=lambda x: [0, 0, 0, 0], solver=decoder_solver[0], learning_rule_type=nengo.PES(learning_rate=1e-4)) # Sign of the error changed in newer versions of Nengo since this work error_conn = nengo.Connection(state, self.a_conn.learning_rule, transform=-1 * gain_matrix) nengo.Connection(state, task, transform=gain_matrix) nengo.Connection(task, motor, transform=task_to_rotor) nengo.Connection(copter, state, synapse=None) nengo.Connection(motor, copter, synapse=0.001)
def test_pes_multidim_error(Simulator, rng): """Test that PES works on error connections mapping from N to 1 dims. Note that the transform is applied before the learning rule, so the error signal should be 1-dimensional. """ with nengo.Network() as net: err = nengo.Node(output=[0]) ens1 = nengo.Ensemble(20, 3) ens2 = nengo.Ensemble(10, 1) # Case 1: ens -> ens, weights=False conn = nengo.Connection(ens1, ens2, transform=np.ones((1, 3)), solver=nengo.solvers.LstsqL2(weights=False), learning_rule_type={"pes": nengo.PES()}) nengo.Connection(err, conn.learning_rule["pes"]) # Case 2: ens -> ens, weights=True conn = nengo.Connection(ens1, ens2, transform=np.ones((1, 3)), solver=nengo.solvers.LstsqL2(weights=True), learning_rule_type={"pes": nengo.PES()}) nengo.Connection(err, conn.learning_rule["pes"]) # Case 3: neurons -> ens conn = nengo.Connection(ens1.neurons, ens2, transform=np.ones((1, ens1.n_neurons)), learning_rule_type={"pes": nengo.PES()}) nengo.Connection(err, conn.learning_rule["pes"]) with Simulator(net) as sim: sim.run(0.01)
def test_null_error(): with nengo.Network(): a = nengo.Ensemble(1, 1) b = nengo.Ensemble(1, 1) # works with a decoded connection (since we'll be generating weights as # part of the decoding process) nengo.Connection(a, b, learning_rule_type=nengo.PES(), transform=None) # error on neuron connection for decoder learning rule with pytest.raises(ValidationError, match="does not have weights"): nengo.Connection( a.neurons, b, learning_rule_type=nengo.PES(), transform=None ) # works for decoded connection with solver.weights=True nengo.Connection( a, b, solver=nengo.solvers.LstsqL2(weights=True), learning_rule_type=nengo.BCM(), transform=None, ) # error on neuron connection for weights learning rule with pytest.raises(ValidationError, match="does not have weights"): nengo.Connection( a.neurons, b.neurons, learning_rule_type=nengo.BCM(), transform=None ) # works with encoder learning rules (since they don't require a transform) nengo.Connection(a.neurons, b, learning_rule_type=Voja(), transform=None)
def test_split_host_to_learning_rule(): with nengo.Network() as net: add_params(net) pre = nengo.Ensemble(10, 1, label="pre") post = nengo.Ensemble(10, 1, label="post") err_onchip = nengo.Ensemble(10, 1, label="err_onchip") err_offchip = nengo.Ensemble(10, 1, label="err_offchip") net.config[err_offchip].on_chip = False ens_conn = nengo.Connection(pre, post, learning_rule_type=nengo.PES()) neurons_conn = nengo.Connection(pre.neurons, post.neurons, learning_rule_type=nengo.PES()) nengo.Connection(err_onchip, ens_conn.learning_rule) nengo.Connection( err_onchip, neurons_conn.learning_rule) nengo.Connection(err_offchip, ens_conn.learning_rule) nengo.Connection( err_offchip, neurons_conn.learning_rule) split = Split(net) assert split.on_chip(pre) assert not split.on_chip(post) assert not split.on_chip(err_onchip) assert not split.on_chip(err_offchip)
def test_chip_learning_errors(): with nengo.Network() as net: add_params(net) a = nengo.Ensemble(100, 1) b = nengo.Ensemble(100, 1) net.config[b].on_chip = True nengo.Connection(a, b, learning_rule_type=nengo.PES()) with pytest.raises(BuildError, match="Post ensemble"): Split(net) with nengo.Network() as net: add_params(net) a = nengo.Ensemble(100, 1) b = nengo.Ensemble(100, 1) error = nengo.Ensemble(100, 1) net.config[error].on_chip = True conn = nengo.Connection(a, b, learning_rule_type=nengo.PES()) nengo.Connection(error, conn.learning_rule) with pytest.raises(BuildError, match="Pre ensemble"): Split(net)
def test_learning_post_error(): with nengo.Network(): ens = nengo.Ensemble(10, 1) conn = nengo.Connection(ens, ens, learning_rule_type=nengo.PES()) with pytest.raises(ValidationError, match="'post' must.*'Ensemble', 'Neurons"): nengo.Connection(ens, conn.learning_rule, learning_rule_type=nengo.PES())
def test_learning_rule_equality(): with nengo.Network(): ens = nengo.Ensemble(10, 1) lr = nengo.PES() conn0 = nengo.Connection(ens, ens, learning_rule_type=lr) assert conn0.learning_rule == conn0.learning_rule assert conn0.learning_rule != lr conn1 = nengo.Connection(ens, ens, learning_rule_type=[lr, nengo.PES()]) assert conn0.learning_rule[0] != conn1.learning_rule assert conn1.learning_rule_type[0] == conn0.learning_rule_type assert conn1.learning_rule[0] == conn1.learning_rule[1]
def test_set_learning_rule(): with nengo.Network(): a = nengo.Ensemble(10, 2) b = nengo.Ensemble(10, 2) nengo.Connection(a, b, learning_rule_type=nengo.PES()) nengo.Connection(a, b, learning_rule_type=nengo.PES(), solver=LstsqL2(weights=True)) nengo.Connection(a.neurons, b.neurons, learning_rule_type=nengo.PES()) nengo.Connection(a.neurons, b.neurons, learning_rule_type=nengo.Oja()) n = nengo.Node(output=lambda t, x: t * x, size_in=2) with pytest.raises(ValueError): nengo.Connection(n, a, learning_rule_type=nengo.PES())
def AND_3bit(): with nengo.Network() as AND: AND.I = nengo.Ensemble(1800, dimensions=3, radius=1) AND.O = nengo.Ensemble(200, dimensions=1, radius=1) AND.O01 = nengo.Ensemble(200, dimensions=1, radius=1) AND.I12 = nengo.Ensemble(200, dimensions=2, radius=1) def and_func(x): return 1 if (x[0] > 0) and (x[1] > 0) else -1 nengo.Connection(AND.O01, AND.I12[0]) nengo.Connection(AND.I[2], AND.I12[1]) nengo.Connection(AND.I[0:2], AND.O01, function=and_func, learning_rule_type=nengo.PES(learning_rate=2e-4)) nengo.Connection(AND.I12, AND.O, function=and_func, learning_rule_type=nengo.PES(learning_rate=2e-4)) return AND
def test_multiple_pes(allclose, plt, seed, Simulator): n_errors = 5 targets = np.linspace(-0.9, 0.9, n_errors) with nengo.Network(seed=seed) as model: pre_ea = nengo.networks.EnsembleArray(200, n_ensembles=n_errors) output = nengo.Node(size_in=n_errors) target = nengo.Node(targets) for i in range(n_errors): conn = nengo.Connection( pre_ea.ea_ensembles[i], output[i], learning_rule_type=nengo.PES(learning_rate=5e-4), ) nengo.Connection(target[i], conn.learning_rule, transform=-1) nengo.Connection(output[i], conn.learning_rule) probe = nengo.Probe(output, synapse=0.1) with Simulator(model) as sim: sim.run(1.0) t = sim.trange() plt.plot(t, sim.data[probe]) for target, style in zip(targets, plt.rcParams["axes.prop_cycle"]): plt.axhline(target, **style) for i, target in enumerate(targets): assert allclose(sim.data[probe][t > 0.8, i], target, atol=0.1)
def test_learning_seed(Simulator, request, seed): def set_srun_options(options): # TODO: the SRUN_OPTIONS environment variable will be read in a future # version of NxSDK. Until that's released, this test is expected to fail. os.environ["SRUN_OPTIONS"] = options request.addfinalizer(lambda: set_srun_options("")) set_srun_options("-p loihi -x ncl-ext-ghrd-02") n_per_dim = 120 dims = 1 tau = 0.005 simtime = 0.2 model, probes = pes_network( n_per_dim, dims, seed, learn_synapse=tau, learning_rule_type=nengo.PES(learning_rate=1e-2), period=simtime / 2, ) sim_args = dict(hardware_options={"allocator": RoundRobin()}) with Simulator(model, seed=seed, **sim_args) as sim: sim.run(simtime) with Simulator(model, seed=seed, **sim_args) as sim1: sim1.run(simtime) with Simulator(model, seed=seed + 1, **sim_args) as sim2: sim2.run(simtime) assert np.allclose(sim1.data[probes["post"]], sim.data[probes["post"]]) assert not np.allclose(sim2.data[probes["post"]], sim.data[probes["post"]])
def test_place_ensembles(): with nengo.Network() as net: add_params(net) offchip = nengo.Ensemble(10, 1, label="offchip") net.config[offchip].on_chip = False direct = nengo.Ensemble(1, 1, neuron_type=nengo.Direct(), label="direct") with nengo.Network(): onchip = nengo.Ensemble(20, 1, label="onchip") pre = nengo.Ensemble(10, 1, label="pre") post = nengo.Ensemble(10, 1, label="post") error = nengo.Ensemble(10, 1, label="error") conn = nengo.Connection(pre, post, learning_rule_type=nengo.PES()) nengo.Connection(error, conn.learning_rule) networks = SplitNetworks(net, node_neurons=default_node_neurons) place_ensembles(networks) assert networks.moves[offchip] == "host" assert networks.moves[direct] == "host" assert networks.moves[onchip] == "chip" assert networks.moves[pre] == "chip" assert networks.moves[post] == "host" assert networks.moves[error] == "host"
def test_multiple_pes(init_function, request, allclose, plt, seed, Simulator): n_errors = 5 targets = np.linspace(-0.9, 0.9, n_errors) with nengo.Network(seed=seed) as model: pre_ea = nengo.networks.EnsembleArray(200, n_ensembles=n_errors) output = nengo.Node(size_in=n_errors) target = nengo.Node(targets) for i in range(n_errors): conn = nengo.Connection( pre_ea.ea_ensembles[i], output[i], function=init_function, learning_rule_type=nengo.PES(learning_rate=1e-2), ) nengo.Connection(target[i], conn.learning_rule, transform=-1) nengo.Connection(output[i], conn.learning_rule) probe = nengo.Probe(output, synapse=0.1) simtime = 0.6 with Simulator(model, hardware_options={"allocator": RoundRobin()}) as sim: sim.run(simtime) t = sim.trange() tmask = t > simtime * 0.85 plt.plot(t, sim.data[probe]) for target, style in zip(targets, plt.rcParams["axes.prop_cycle"]): plt.axhline(target, **style) for i, target in enumerate(targets): assert allclose(sim.data[probe][tmask, i], target, atol=0.1, rtol=0.1), ("Target %d not close" % i)
def test_pes_learning_initial_weights(Simulator, nl_nodirect): n = 200 learned_vector = [0.5, -0.5] m = nengo.Network(seed=3902) with m: m.config[nengo.Ensemble].neuron_type = nl_nodirect() u = nengo.Node(output=learned_vector) a = nengo.Ensemble(n, dimensions=2) u_learned = nengo.Ensemble(n, dimensions=2) e = nengo.Ensemble(n, dimensions=2) initial_weights = np.random.random((a.n_neurons, u_learned.n_neurons)) nengo.Connection(u, a) err_conn = nengo.Connection(e, u_learned, modulatory=True) nengo.Connection(a.neurons, u_learned.neurons, transform=initial_weights, learning_rule=nengo.PES(err_conn, 10)) nengo.Connection(u_learned, e, transform=-1) nengo.Connection(u, e) u_learned_p = nengo.Probe(u_learned, synapse=0.1) e_p = nengo.Probe(e, synapse=0.1) sim = Simulator(m) sim.run(1.) assert np.allclose(sim.data[u_learned_p][-1], learned_vector, atol=0.05) assert np.allclose(sim.data[e_p][-1], np.zeros(len(learned_vector)), atol=0.05)
def test_online_learning_reset(Simulator, tmpdir, seed): with nengo.Network(seed=seed) as net: inp = nengo.Ensemble(10, 1) out = nengo.Node(size_in=1) conn = nengo.Connection(inp, out, learning_rule_type=nengo.PES(1)) nengo.Connection(nengo.Node([1]), conn.learning_rule) with Simulator(net) as sim: w0 = np.array(sim.data[conn].weights) sim.run(0.1, stateful=False) w1 = np.array(sim.data[conn].weights) sim.save_params(str(tmpdir.join("tmp"))) # test that learning has changed weights assert not np.allclose(w0, w1) # test that include_trainable=False does NOT reset the online learning weights sim.reset(include_trainable=False) assert np.allclose(w1, sim.data[conn].weights) # test that full reset DOES reset the online learning weights sim.reset(include_trainable=True) assert np.allclose(w0, sim.data[conn].weights) # test that weights load correctly with Simulator(net) as sim: assert not np.allclose(w1, sim.data[conn].weights) sim.load_params(str(tmpdir.join("tmp"))) assert np.allclose(w1, sim.data[conn].weights)
def test_pes_error_clip(plt, seed, Simulator): dims = 2 n_per_dim = 120 tau = 0.01 error_scale = 5. # scale up error signal so it clips simtime = 0.3 model, probes = pes_network( n_per_dim, dims, seed, learn_synapse=tau, learning_rule_type=nengo.PES(learning_rate=1e-2 / error_scale), input_scale=np.array([1., -1.]), error_scale=error_scale, period=simtime) with pytest.warns(UserWarning, match=r'.*PES error.*Clipping.'): with Simulator(model) as loihi_sim: loihi_sim.run(simtime) t = loihi_sim.trange() post_tmask = t > simtime - 1.0 dec_tau = loihi_sim.model.decode_tau y = loihi_sim.data[probes['stim']] y_dpre = nengo.Lowpass(dec_tau).filt(y) y_dpost = nengo.Lowpass(tau).combine(nengo.Lowpass(dec_tau)).filt(y_dpre) y_loihi = loihi_sim.data[probes['post']] plt.plot(t, y_dpost, 'k', label='target') plt.plot(t, y_loihi, 'g', label='loihi') # --- assert that we've learned something, but not everything error = (rms(y_loihi[post_tmask] - y_dpost[post_tmask]) / rms(y_dpost[post_tmask])) assert error < 0.5 assert error > 0.05
def model(self, p): model = nengo.Network() with model: def stim_func(t): return [np.sin(t)] * p.D_in stim = nengo.Node(stim_func) ens = nengo.Ensemble(n_neurons=p.n_neurons, dimensions=p.D_in) self.times = [] def output_func(t, x): self.times.append(t) output = nengo.Node(output_func, size_in=p.D_out) nengo.Connection(stim, ens) c = nengo.Connection(ens, output, function=lambda x: [0]*p.D_out, learning_rule_type=nengo.PES()) def error_func(t): return [np.sin(t)] * p.D_out error = nengo.Node(error_func) nengo.Connection(error, c.learning_rule) self.error = error self.stim = stim self.output = output return model
def test_pes_pre_post_varieties(Simulator, pre_neurons, post_neurons, weight_solver, pre_slice, post_slice): with nengo.Network() as net: pre = nengo.Ensemble(10, 12) post = nengo.Ensemble(20, 22) pre_size = pre.n_neurons if pre_neurons else pre.dimensions post_size = post.n_neurons if post_neurons else post.dimensions if pre_slice: pre_size //= 2 pre_slice = slice(0, pre_size) else: pre_slice = slice(None) if post_slice: post_size //= 2 post_slice = slice(0, post_size) else: post_slice = slice(None) nengo.Connection( (pre.neurons if pre_neurons else pre)[pre_slice], (post.neurons if post_neurons else post)[post_slice], solver=nengo.solvers.LstsqL2(weights=weight_solver), learning_rule_type=nengo.PES(), transform=np.ones((post_size, pre_size)), ) apply_encoders = post_neurons or (not pre_neurons and not post_neurons and weight_solver) with Simulator(net) as sim: assert (any(op.tag == "PES:encode" for op in sim.model.operators) == apply_encoders) sim.step()
def test_learning_seed(Simulator, seed): n_per_dim = 120 dims = 1 tau = 0.005 simtime = 0.2 model, probes = pes_network( n_per_dim, dims, seed, learn_synapse=tau, learning_rule_type=nengo.PES(learning_rate=1e-2), period=simtime / 2, ) sim_args = dict(hardware_options={"allocator": RoundRobin()}) with Simulator(model, seed=seed, **sim_args) as sim: sim.run(simtime) with Simulator(model, seed=seed, **sim_args) as sim1: sim1.run(simtime) with Simulator(model, seed=seed + 1, **sim_args) as sim2: sim2.run(simtime) assert np.allclose(sim1.data[probes["post"]], sim.data[probes["post"]]) assert not np.allclose(sim2.data[probes["post"]], sim.data[probes["post"]])
def test_pes_learning_rate(Simulator, plt, seed): n = 50 dt = 0.0005 T = 1.0 initial = 0.7 desired = -0.9 epsilon = 1e-3 # get to factor epsilon with T seconds # Get activity vector and initial decoders with Network(seed=seed) as model: x = nengo.Ensemble(n, 1, seed=seed, neuron_type=nengo.neurons.LIFRate()) y = nengo.Node(size_in=1) conn = nengo.Connection(x, y, synapse=None) with Simulator(model, dt=dt) as sim: a = get_activities(sim.model, x, [initial]) d = sim.data[conn].weights assert np.any(a > 0) # Use util function to calculate learning_rate init_error = float(desired - np.dot(d, a)) learning_rate, gamma = pes_learning_rate(epsilon / abs(init_error), a, T, dt) # Build model with no filtering on any connections with model: stim = nengo.Node(output=initial) ystar = nengo.Node(output=desired) conn.learning_rule_type = nengo.PES(pre_tau=1e-15, learning_rate=learning_rate) nengo.Connection(stim, x, synapse=None) nengo.Connection(ystar, conn.learning_rule, synapse=0, transform=-1) nengo.Connection(y, conn.learning_rule, synapse=0) p = nengo.Probe(y, synapse=None) decoders = nengo.Probe(conn, 'weights', synapse=None) with Simulator(model, dt=dt) as sim: sim.run(T) # Check that the final error is exactly epsilon assert np.allclose(abs(desired - sim.data[p][-1]), epsilon) # Check that all of the errors are given exactly by gamma**k k = np.arange(len(sim.trange())) error = init_error * gamma**k assert np.allclose(sim.data[p].flatten(), desired - error) # Check that all of the decoders are equal to their analytical solution dk = d.T + init_error * a.T[:, None] * (1 - gamma**k) / np.dot(a, a.T) assert np.allclose(dk, np.squeeze(sim.data[decoders].T)) plt.figure() plt.plot(sim.trange(), sim.data[p], lw=5, alpha=0.5) plt.plot(sim.trange(), desired - error, linestyle='--', lw=5, alpha=0.5)
def test_conditional_update(Simulator, use_loop, caplog): caplog.set_level(logging.INFO) with nengo.Network() as net: config.configure_settings(stateful=False, use_loop=use_loop) a = nengo.Ensemble(10, 1) b = nengo.Node(size_in=1) conn = nengo.Connection(a, b) with Simulator(net): pass assert "Number of state updates: 0" in caplog.text caplog.clear() conn.learning_rule_type = nengo.PES() with Simulator(net): pass assert "Number of state updates: 1" in caplog.text caplog.clear() with net: config.configure_settings(trainable=True) with Simulator(net): pass assert "Number of state updates: 1" in caplog.text
def test_pes_recurrent_slice(Simulator, seed, weights, allclose): """Test that PES works on recurrent connections from N to 1 dims.""" with nengo.Network(seed=seed) as net: err = nengo.Node(output=[-1]) stim = nengo.Node(output=[0, 0]) post = nengo.Ensemble(50, 2, radius=2) nengo.Connection(stim, post) conn = nengo.Connection( post, post[1], function=lambda x: 0.0, solver=nengo.solvers.LstsqL2(weights=weights), learning_rule_type=nengo.PES(learning_rate=5e-4), ) nengo.Connection(err, conn.learning_rule) p = nengo.Probe(post, synapse=0.025) with Simulator(net) as sim: sim.run(0.2) # Learning rule should drive second dimension high, but not first assert allclose(sim.data[p][-10:, 0], 0, atol=0.2) assert np.all(sim.data[p][-10:, 1] > 0.8)
def test_pes_transform(Simulator, seed, allclose): """Test behaviour of PES when function and transform both defined.""" n = 200 # error must be with respect to transformed vector (conn.size_out) T = np.asarray([[0.5], [-0.5]]) # transform to output m = nengo.Network(seed=seed) with m: u = nengo.Node(output=[1]) a = nengo.Ensemble(n, dimensions=1) b = nengo.Node(size_in=2) e = nengo.Node(size_in=1) nengo.Connection(u, a) learned_conn = nengo.Connection( a, b, function=lambda x: [0], transform=T, learning_rule_type=nengo.PES(learning_rate=1e-3), ) assert T.shape[0] == learned_conn.size_out assert T.shape[1] == learned_conn.size_mid nengo.Connection(b[0], e, synapse=None) nengo.Connection(nengo.Node(output=-1), e) nengo.Connection(e, learned_conn.learning_rule, transform=T, synapse=None) p_b = nengo.Probe(b, synapse=0.05) with Simulator(m) as sim: sim.run(1.0) tend = sim.trange() > 0.7 assert allclose(sim.data[p_b][tend], [1, -1], atol=1e-2)
def test_probeable(): net = nengo.Network() def check_learning_rule(learning_rule_type, expected, net=net): assert learning_rule_type.probeable == expected post = net.e if isinstance(learning_rule_type, Voja) else net.n transform = np.ones( (1, 10)) if isinstance(learning_rule_type, Voja) else 1.0 conn = nengo.Connection(net.n, post, transform=transform, learning_rule_type=learning_rule_type) assert conn.learning_rule.probeable == expected with net: net.e = nengo.Ensemble(10, 1) net.n = net.e.neurons check_learning_rule(nengo.PES(), ("error", "activities", "delta")) check_learning_rule(nengo.RLS(), ("pre_filtered", "error", "delta", "inv_gamma")) check_learning_rule( nengo.BCM(), ("theta", "pre_filtered", "post_filtered", "delta")) check_learning_rule(nengo.Oja(), ("pre_filtered", "post_filtered", "delta")) check_learning_rule(nengo.Voja(), ("post_filtered", "scaled_encoders", "delta"))
def create_model(): dimensions = 4 model = nengo.Network() with model: num_neurons = dimensions * 30 inp = nengo.Node(WhiteSignal(num_neurons, high=5), size_out=dimensions, label="white_noise") pre = nengo.Ensemble(num_neurons, dimensions=dimensions, label="pre") nengo.Connection(inp, pre) post = nengo.Ensemble(num_neurons, dimensions=dimensions, label="post") conn = nengo.Connection( pre, post, function=lambda x: np.random.random(dimensions)) inp_p = nengo.Probe(inp, label="inp_p") pre_p = nengo.Probe(pre, synapse=0.01, label="pre_p") post_p = nengo.Probe(post, synapse=0.01, label="post_p") error = nengo.Ensemble(num_neurons, dimensions=dimensions, label="error") error_p = nengo.Probe(error, synapse=0.03, label="error_p") # Error = actual - target = post - pre nengo.Connection(post, error) nengo.Connection(pre, error, transform=-1) # Add the learning rule to the connection conn.learning_rule_type = nengo.PES() # Connect the error into the learning rule nengo.Connection(error, conn.learning_rule) return model, list(), dict()
def test_place_ensembles(): # builder will move the learning stuff onto the host with nengo.Network() as net: add_params(net) offchip = nengo.Ensemble(10, 1, label="offchip") net.config[offchip].on_chip = False direct = nengo.Ensemble( 1, 1, neuron_type=nengo.Direct(), label="direct") with nengo.Network(): onchip = nengo.Ensemble(20, 1, label="onchip") pre = nengo.Ensemble(10, 1, label="pre") post = nengo.Ensemble(10, 1, label="post") error = nengo.Ensemble(10, 1, label="error") conn = nengo.Connection(pre, post, learning_rule_type=nengo.PES()) nengo.Connection(error, conn.learning_rule) split = Split(net) assert not split.on_chip(offchip) assert not split.on_chip(direct) assert split.on_chip(onchip) assert split.on_chip(pre) assert not split.on_chip(post) assert not split.on_chip(error) for obj in net.all_ensembles + net.all_nodes: assert not split.is_precomputable(obj) with pytest.raises(BuildError, match="Locations are only established"): split.on_chip(conn)
def AND_OPERATION(): with nengo.Network() as AND: AND.I = nengo.Ensemble(800, dimensions=2, radius=1.6) AND.O = nengo.Ensemble(400, dimensions=1, radius=1.1) AND.PROD = nengo.Ensemble(800, dimensions=1, radius=1) AND.MIN = nengo.Ensemble(800, dimensions=1, radius=1) # Fuzzy set AND operations. You can try different AND operations replacing the bdif_func at the connections def prod_func(x): x_arr = np.array(x) return np.prod(x_arr) def min_func(x): x_arr = np.array(x) return np.min(x_arr) def bdif_func(x): x_arr = np.array(x) return np.maximum(0, x[0] + x[1] - 1) nengo.Connection(AND.I, AND.O, function=prod_func, learning_rule_type=nengo.PES(learning_rate=2e-4)) return AND
def test_precompute_host_to_learning_rule_unsupported(): with nengo.Network() as net: pre = nengo.Ensemble(10, 1, label="pre") post = nengo.Ensemble(10, 1, label="post") nengo.Connection(pre, post, learning_rule_type=nengo.PES()) with pytest.raises(BuildError, match="learning rules"): Split(net, precompute=True)
def test_pes_direct_errors(): """Test that applying a learning rule to a direct ensemble errors.""" with nengo.Network(): pre = nengo.Ensemble(10, 1, neuron_type=nengo.Direct()) post = nengo.Ensemble(10, 1) conn = nengo.Connection(pre, post) with pytest.raises(ValidationError): conn.learning_rule_type = nengo.PES()
def test_pes_comm_channel(dims, allclose, plt, seed, Simulator): n_per_dim = 120 tau = 0.01 simtime = 1.5 model, probes = pes_network( n_per_dim, dims, seed, learn_synapse=tau, learning_rule_type=nengo.PES(learning_rate=1e-2), period=simtime / 2) with nengo.Simulator(model) as nengo_sim: nengo_sim.run(simtime) with Simulator(model) as loihi_sim: loihi_sim.run(simtime) with Simulator(model, target='simreal') as real_sim: real_sim.run(simtime) t = nengo_sim.trange() pre_tmask = t > 0.1 post_tmask = t > simtime / 2 dec_tau = loihi_sim.model.decode_tau y = nengo_sim.data[probes['stim']] y_dpre = nengo.Lowpass(dec_tau).filt(y) y_dpost = nengo.Lowpass(tau).combine(nengo.Lowpass(dec_tau)).filt(y_dpre) y_nengo = nengo_sim.data[probes['post']] y_loihi = loihi_sim.data[probes['post']] y_real = real_sim.data[probes['post']] plt.subplot(211) plt.plot(t, y_dpost, 'k', label='target') plt.plot(t, y_nengo, 'b', label='nengo') plt.plot(t, y_loihi, 'g', label='loihi') plt.plot(t, y_real, 'r:', label='real') plt.legend() plt.subplot(212) plt.plot(t[post_tmask], y_loihi[post_tmask] - y_dpost[post_tmask], 'k') plt.plot(t[post_tmask], y_loihi[post_tmask] - y_nengo[post_tmask], 'b') x_loihi = loihi_sim.data[probes['pre']] assert allclose(x_loihi[pre_tmask], y_dpre[pre_tmask], atol=0.1, rtol=0.05) assert allclose(y_loihi[post_tmask], y_dpost[post_tmask], atol=0.1, rtol=0.05) assert allclose(y_loihi, y_nengo, atol=0.2, rtol=0.2) assert allclose(y_real[post_tmask], y_dpost[post_tmask], atol=0.1, rtol=0.05) assert allclose(y_real, y_nengo, atol=0.2, rtol=0.2)