def learning_rule_gate_fn(t, x): if np.abs(x) > 1.0: return 0 else: return x learning_rule_gate = nengo.Node(learning_rule_gate_fn, size_in=1) nengo.Connection(ctrl, learning_rule_gate, synapse=None) nengo.Connection(learning_rule_gate, conn.learning_rule, synapse=None, transform=-1) import nengo_learning_display learn = nengo_learning_display.Plot1D(conn, np.linspace(-1, 1, 50), range=(-2, 2)) compare = nengo.Node(None, size_in=2) nengo.Connection(read_pos, compare[0], synapse=None) nengo.Connection(target, compare[1], synapse=None) def on_close(sim): link.write(m1 + 'duty_cycle_sp', '0') def on_step(sim): learn.update(sim)
learning_rule_type=nengo.PES(learning_rate=1e-4, pre_tau=0.01), function=lambda x: [0] * D) syn2 = 0.01 error = nengo.Node(None, size_in=D) nengo.Connection(student, error, synapse=syn2) nengo.Connection(teacher, error, synapse=syn2, transform=-1) #nengo.Connection(ideal_teacher, error, synapse=syn2, transform=-1) nengo.Connection(error, conn.learning_rule, synapse=None) import nengo_learning_display S = 30 domain = np.zeros((D, S)) domain[0, :] = np.linspace(-radius, radius, S) teach_x = nengo_learning_display.Plot1D(teach_conn, domain=domain.T, range=(-radius, radius)) learn_x = nengo_learning_display.Plot1D(conn, domain=domain.T, range=(-radius, radius)) def on_step(sim): if sim is None: return if sim.n_steps < 2: teach_x.update(sim) learn_x.update(sim)
import numpy as np model = nengo.Network() with model: stim = nengo.Node(lambda t: np.sin(10 * t)) pre = nengo.Ensemble(n_neurons=100, dimensions=1) post = nengo.Ensemble(n_neurons=100, dimensions=3, radius=2) c = nengo.Connection(pre, post, function=lambda x: [0, 0, 0], learning_rule_type=nengo.PES()) def func(x): return x, -x, x**2 nengo.Connection(post, c.learning_rule) nengo.Connection(stim, c.learning_rule, function=func, transform=-1) nengo.Connection(stim, pre) plot = nengo_learning_display.Plot1D(c, domain=np.linspace(-2, 2, 30), range=(-1.5, 1.5)) def on_step(sim): plot.update(sim)
nengo.Connection(env.q, q_diff, synapse=None, transform=-1) Kp = 1.0 nengo.Connection(q_diff, env.u, transform=Kp, synapse=None) dq_diff = nengo.Ensemble(n_neurons=100, dimensions=1) nengo.Connection(dq_target, dq_diff, synapse=None) nengo.Connection(env.dq, dq_diff, synapse=None, transform=-1) Kd = 0.2 nengo.Connection(dq_diff, env.u, transform=Kd, synapse=None) context = nengo.Ensemble(n_neurons=100, dimensions=1) nengo.Connection(env.q, context, synapse=None) def initial_function(x): return 0 c = nengo.Connection(context, env.u_extra, synapse=None, function=initial_function, learning_rule_type=nengo.PES(learning_rate=1e-4)) nengo.Connection(env.u, c.learning_rule, transform=-1) import nengo_learning_display learned = nengo_learning_display.Plot1D(c, domain=np.linspace(-1,1,30), range=(-1,1)) def on_step(sim): learned.update(sim)
dx = prey.x - body.x dist2 = dx**2 + dy**2 while dist2 < 0.25: prey.x, prey.y = positions[pos_counter] pos_counter = (pos_counter + 1) % len(positions) #prey.x = np.random.uniform(1, world.width-2) #prey.y = np.random.uniform(1, world.height-2) dy = prey.y - body.y dx = prey.x - body.x dist2 = dx**2 + dy**2 move_prey = nengo.Node(move_prey) import nengo_learning_display theta = np.linspace(-np.pi, np.pi, 30) domain = np.array([np.sin(theta), np.cos(theta)]).T learned_far = nengo_learning_display.Plot1D(conn, domain=domain * 0.3, range=(-1.0, 1.0)) learned_near = nengo_learning_display.Plot1D(conn, domain=domain * 1.0, range=(-1.0, 1.0)) learned_far.label = ' Learned Action Utilities (far target)' learned_near.label = ' Learned Action Utilities (near target)' def on_step(sim): learned_far.update(sim) learned_near.update(sim)
import nengo_learning_display import nengo import numpy as np model = nengo.Network() with model: stim = nengo.Node(lambda t: (np.sin(10 * t), np.cos(10 * t))) pre = nengo.Ensemble(n_neurons=100, dimensions=2) post = nengo.Ensemble(n_neurons=100, dimensions=2) c = nengo.Connection(pre, post, function=lambda x: [0, 0], learning_rule_type=nengo.PES()) nengo.Connection(post, c.learning_rule) nengo.Connection(stim, c.learning_rule, transform=-1) nengo.Connection(stim, pre) theta = np.linspace(-np.pi, np.pi, 30) domain = np.array([np.cos(theta), np.sin(theta)]).T plot = nengo_learning_display.Plot1D(c, domain=domain, range=(-1.5, 1.5)) def on_step(sim): plot.update(sim)
nengo.Connection(env.q, context[1], synapse=None) nengo.Connection(dq_target, context[2], synapse=None, transform=1) nengo.Connection(env.dq, context[2], synapse=None, transform=-1) def pd(x): q_target, q, dq_diff = x Kp = 1.0 Kd = 0.2 return Kp*(q_target-q) + Kd*(dq_diff) c = nengo.Connection(context, env.u, function=pd, synapse=None, learning_rule_type=nengo.PES(learning_rate=1e-4)) nengo.Connection(context, c.learning_rule, function=pd, transform=-1) import nengo_learning_display domain = np.zeros((30, 3)) domain[:,1] = np.linspace(-1, 1, 30) domain[:,0] = np.linspace(-1, 1, 30) learned = nengo_learning_display.Plot1D(c, domain=domain, range=(-1,1)) def on_step(sim): learned.update(sim)