def test_simple_pyfunc(self): dt = 0.001 m = nengo.Model("test_simple_pyfunc") time = Signal(np.zeros(1), name='time') sig = Signal(np.zeros(1), name='sig') pop = nengo.PythonFunction(fn=lambda t, x: np.sin(x), n_in=1) m.operators = [] b = Builder() b.model = m b.build_pyfunc(pop) m.operators += [ ProdUpdate(Signal(dt), Signal(1), Signal(1), time), DotInc(Signal([[1.0]]), time, pop.input_signal), ProdUpdate(Signal([[1.0]]), pop.output_signal, Signal(0), sig), ] sim = self.Simulator(m, dt=dt, builder=testbuilder) sim.step() for i in range(5): sim.step() t = (i + 2) * dt self.assertTrue(np.allclose(sim.signals[time], t), msg='%s != %s' % (sim.signals[time], t)) self.assertTrue( np.allclose( sim.signals[sig], np.sin(t - dt*2)), msg='%s != %s' % (sim.signals[sig], np.sin(t - dt*2)))
def _test_lif_base(self, cls=nengo.LIF): """Test that the dynamic model approximately matches the rates""" rng = np.random.RandomState(85243) dt = 0.001 d = 1 n = 5000 m = nengo.Model("") ins = Signal(0.5 * np.ones(d), name='ins') lif = cls(n) lif.set_gain_bias(max_rates=rng.uniform(low=10, high=200, size=n), intercepts=rng.uniform(low=-1, high=1, size=n)) m.operators = [] b = Builder() b.model = m b._builders[cls](lif) m.operators += [DotInc(Signal(np.ones((n, d))), ins, lif.input_signal)] sim = self.Simulator(m, dt=dt, builder=testbuilder) t_final = 1.0 spikes = np.zeros(n) for i in range(int(np.round(t_final / dt))): sim.step() spikes += sim.signals[lif.output_signal] math_rates = lif.rates(sim.signals[lif.input_signal] - lif.bias) sim_rates = spikes / t_final logger.debug("ME = %f", (sim_rates - math_rates).mean()) logger.debug("RMSE = %f", rms(sim_rates - math_rates) / (rms(math_rates) + 1e-20)) self.assertTrue(np.sum(math_rates > 0) > 0.5*n, "At least 50% of neurons must fire") self.assertTrue(np.allclose(sim_rates, math_rates, atol=1, rtol=0.02))
def test_encoder_decoder_pathway(self): """Verifies (like by hand) that the simulator does the right things in the right order.""" m = nengo.Model("") dt = 0.001 foo = Signal([1.0], name='foo') pop = nengo.PythonFunction(fn=lambda t, x: x + 1, n_in=2, label='pop') decoders = np.asarray([.2, .1]) decs = Signal(decoders * 0.5) m.operators = [] b = Builder() b.model = m b.build_pyfunc(pop) m.operators += [ DotInc(Signal([[1.0], [2.0]]), foo, pop.input_signal), ProdUpdate(decs, pop.output_signal, Signal(0.2), foo) ] def check(sig, target): self.assertTrue(np.allclose(sim.signals[sig], target), "%s: value %s is not close to target %s" % (sig, sim.signals[sig], target)) sim = self.Simulator(m, dt=dt, builder=testbuilder) check(foo, 1.0) check(pop.input_signal, 0) check(pop.output_signal, 0) sim.step() #DotInc to pop.input_signal (input=[1.0,2.0]) #produpdate updates foo (foo=[0.2]) #pop updates pop.output_signal (output=[2,3]) check(pop.input_signal, [1, 2]) check(pop.output_signal, [2, 3]) check(foo, .2) check(decs, [.1, .05]) sim.step() #DotInc to pop.input_signal (input=[0.2,0.4]) # (note that pop resets its own input signal each timestep) #produpdate updates foo (foo=[0.39]) 0.2*0.5*2+0.1*0.5*3 + 0.2*0.2 #pop updates pop.output_signal (output=[1.2,1.4]) check(decs, [.1, .05]) check(pop.input_signal, [0.2, 0.4]) check(pop.output_signal, [1.2, 1.4]) # -- foo is computed as a prodUpdate of the *previous* output signal # foo <- .2 * foo + dot(decoders * .5, output_signal) # .2 * .2 + dot([.2, .1] * .5, [2, 3]) # .04 + (.2 + .15) # <- .39 check(foo, .39)
def test_simple_direct_mode(self): dt = 0.001 m = nengo.Model("test_simple_direct_mode") time = Signal(n=1, name='time') sig = Signal(n=1, name='sig') pop = Direct(n_in=1, n_out=1, fn=np.sin) m.signals = [sig, time] m.operators = [] Builder().build_direct(pop, m, dt) m.operators += [ ProdUpdate(Constant(dt), Constant(1), Constant(1), time), DotInc(Constant([[1.0]]), time, pop.input_signal), ProdUpdate(Constant([[1.0]]), pop.output_signal, Constant(0), sig) ] sim = m.simulator(sim_class=self.Simulator, dt=dt, builder=testbuilder) sim.step() for i in range(5): sim.step() t = (i + 2) * dt self.assertTrue(np.allclose(sim.signals[time], t), msg='%s != %s' % (sim.signals[time], t)) self.assertTrue(np.allclose(sim.signals[sig], np.sin(t - dt * 2)), msg='%s != %s' % (sim.signals[sig], np.sin(t - dt * 2)))
def test_encoder_decoder_with_views(self): m = nengo.Model("") dt = 0.001 foo = Signal([1.0], name='foo') pop = nengo.PythonFunction(fn=lambda t, x: x + 1, n_in=2, label='pop') decoders = np.asarray([.2, .1]) m.operators = [] b = Builder() b.model = m b.build_pyfunc(pop) m.operators += [ DotInc(Signal([[1.0], [2.0]]), foo[:], pop.input_signal), ProdUpdate( Signal(decoders * 0.5), pop.output_signal, Signal(0.2), foo[:]) ] def check(sig, target): self.assertTrue(np.allclose(sim.signals[sig], target), "%s: value %s is not close to target %s" % (sig, sim.signals[sig], target)) sim = self.Simulator(m, dt=dt, builder=testbuilder) #pop.input_signal = [0,0] #pop.output_signal = [0,0] sim.step() #DotInc to pop.input_signal (input=[1.0,2.0]) #produpdate updates foo (foo=[0.2]) #pop updates pop.output_signal (output=[2,3]) check(foo, .2) check(pop.input_signal, [1, 2]) check(pop.output_signal, [2, 3]) sim.step() #DotInc to pop.input_signal (input=[0.2,0.4]) # (note that pop resets its own input signal each timestep) #produpdate updates foo (foo=[0.39]) 0.2*0.5*2+0.1*0.5*3 + 0.2*0.2 #pop updates pop.output_signal (output=[1.2,1.4]) check(foo, .39) check(pop.input_signal, [0.2, 0.4]) check(pop.output_signal, [1.2, 1.4])
def test_pyfunc(self): """Test Python Function nonlinearity""" dt = 0.001 d = 3 n_steps = 3 n_trials = 3 rng = np.random.RandomState(seed=987) for i in range(n_trials): A = rng.normal(size=(d, d)) fn = lambda t, x: np.cos(np.dot(A, x)) x = np.random.normal(size=d) m = nengo.Model("") ins = Signal(x, name='ins') pop = nengo.PythonFunction(fn=fn, n_in=d) m.operators = [] b = Builder() b.model = m b.build_pyfunc(pop) m.operators += [ DotInc(Signal(np.eye(d)), ins, pop.input_signal), ProdUpdate( Signal(np.eye(d)), pop.output_signal, Signal(0), ins) ] sim = self.Simulator(m, dt=dt, builder=testbuilder) p0 = np.zeros(d) s0 = np.array(x) for j in range(n_steps): tmp = p0 p0 = fn(0, s0) s0 = tmp sim.step() assert_allclose(self, logger, s0, sim.signals[ins]) assert_allclose( self, logger, p0, sim.signals[pop.output_signal])
def test_custom_type(Simulator, allclose): """Test with custom learning rule type. A custom learning type may have ``size_in`` not equal to 0, 1, or None. """ class TestRule(nengo.learning_rules.LearningRuleType): modifies = "decoders" def __init__(self): super().__init__(1.0, size_in=3) def build_test_rule(model, _, rule): error = Signal(np.zeros(rule.connection.size_in)) model.add_op(Reset(error)) model.sig[rule]["in"] = error[:rule.size_in] model.add_op(Copy(error, model.sig[rule]["delta"])) Builder.register(TestRule)(build_test_rule) with nengo.Network() as net: a = nengo.Ensemble(10, 1) b = nengo.Ensemble(10, 1) conn = nengo.Connection(a.neurons, b, transform=np.zeros((1, 10)), learning_rule_type=TestRule()) err = nengo.Node([1, 2, 3]) nengo.Connection(err, conn.learning_rule, synapse=None) p = nengo.Probe(conn, "weights") with Simulator(net) as sim: sim.run(sim.dt * 5) assert allclose(sim.data[p][:, 0, :3], np.outer(np.arange(1, 6), np.arange(1, 4))) assert allclose(sim.data[p][:, :, 3:], 0)
def test_encoder_decoder_with_views(self): m = nengo.Model("") dt = 0.001 foo = Signal(n=1, name='foo') pop = Direct(n_in=2, n_out=2, fn=lambda x: x + 1, name='pop') decoders = np.asarray([.2, .1]) m.signals = [foo] m.operators = [] Builder().build_direct(pop, m, dt) m.operators += [ DotInc(Constant([[1.0], [2.0]]), foo[:], pop.input_signal), ProdUpdate(Constant(decoders * 0.5), pop.output_signal, Constant(0.2), foo[:]) ] def check(sig, target): self.assertTrue( np.allclose(sim.signals[sig], target), "%s: value %s is not close to target %s" % (sig, sim.signals[sig], target)) sim = m.simulator(sim_class=self.Simulator, dt=dt, builder=testbuilder) #set initial value of foo (foo=1.0) sim.signals[foo] = np.asarray([1.0]) #pop.input_signal = [0,0] #pop.output_signal = [0,0] sim.step() #DotInc to pop.input_signal (input=[1.0,2.0]) #produpdate updates foo (foo=[0.2]) #pop updates pop.output_signal (output=[2,3]) check(foo, .2) check(pop.input_signal, [1, 2]) check(pop.output_signal, [2, 3]) sim.step() #DotInc to pop.input_signal (input=[0.2,0.4]) # (note that pop resets its own input signal each timestep) #produpdate updates foo (foo=[0.39]) 0.2*0.5*2+0.1*0.5*3 + 0.2*0.2 #pop updates pop.output_signal (output=[1.2,1.4]) check(foo, .39) check(pop.input_signal, [0.2, 0.4]) check(pop.output_signal, [1.2, 1.4])
def test_direct(self): """Test direct mode""" dt = 0.001 d = 3 n_steps = 3 n_trials = 3 rng = np.random.RandomState(seed=987) for i in xrange(n_trials): A = rng.normal(size=(d, d)) fn = lambda x: np.cos(np.dot(A, x)) x = np.random.normal(size=d) m = nengo.Model("") ins = Signal(n=d, name='ins') pop = Direct(n_in=d, n_out=d, fn=fn) m.signals = [ins] m.operators = [] Builder().build_direct(pop, m, dt) m.operators += [ DotInc(Constant(np.eye(d)), ins, pop.input_signal), ProdUpdate(Constant(np.eye(d)), pop.output_signal, Constant(0), ins) ] sim = m.simulator(sim_class=self.Simulator, dt=dt, builder=testbuilder) sim.signals[ins] = x p0 = np.zeros(d) s0 = np.array(x) for j in xrange(n_steps): tmp = p0 p0 = fn(s0) s0 = tmp sim.step() assert np.allclose(s0, sim.signals[ins]) assert np.allclose(p0, sim.signals[pop.output_signal])
def _test_lif_base(self, cls=LIF): """Test that the dynamic model approximately matches the rates""" rng = np.random.RandomState(85243) dt = 0.001 d = 1 n = 5e3 m = nengo.Model("") ins = Signal(n=d, name='ins') lif = cls(n) lif.set_gain_bias(max_rates=rng.uniform(low=10, high=200, size=n), intercepts=rng.uniform(low=-1, high=1, size=n)) m.signals = [ins] m.operators = [] b = Builder() b._builders[cls](lif, m, dt) m.operators += [ DotInc(Constant(np.ones((n, d))), ins, lif.input_signal) ] sim = m.simulator(sim_class=self.Simulator, dt=dt, builder=testbuilder) sim.signals[ins] = 0.5 * np.ones(d) t_final = 1.0 spikes = np.zeros(n) for i in xrange(int(np.round(t_final / dt))): sim.step() spikes += sim.signals[lif.output_signal] math_rates = lif.rates(sim.signals[lif.input_signal] - lif.bias) sim_rates = spikes / t_final logger.debug("ME = %f", (sim_rates - math_rates).mean()) logger.debug("RMSE = %f", rms(sim_rates - math_rates) / (rms(math_rates) + 1e-20)) self.assertTrue( np.sum(math_rates > 0) > 0.5 * n, "At least 50% of neurons must fire") self.assertTrue(np.allclose(sim_rates, math_rates, atol=1, rtol=0.02))
def __init__(self, network, dt=0.001, seed=None, model=None): """Initialize the simulator with a network and (optionally) a model. Most of the time, you will pass in a network and sometimes a dt:: sim1 = nengo.Simulator(my_network) # Uses default 0.001s dt sim2 = nengo.Simulator(my_network, dt=0.01) # Uses 0.01s dt For more advanced use cases, you can initialize the model yourself, and also pass in a network that will be built into the same model that you pass in:: sim = nengo.Simulator(my_network, model=my_model) If you want full control over the build process, then you can build your network into the model manually. If you do this, then you must explicitly pass in ``None`` for the network:: sim = nengo.Simulator(None, model=my_model) Parameters ---------- network : nengo.Network instance or None A network object to the built and then simulated. If a fully built ``model`` is passed in, then you can skip building the network by passing in network=None. dt : float The length of a simulator timestep, in seconds. seed : int A seed for all stochastic operators used in this simulator. Note that there are not stochastic operators implemented currently, so this parameters does nothing. model : nengo.builder.Model instance or None A model object that contains build artifacts to be simulated. Usually the simulator will build this model for you; however, if you want to build the network manually, or to inject some build artifacts in the Model before building the network, then you can pass in a ``nengo.builder.Model`` instance. """ self.dt = dt if model is None: self.model = Model(dt=self.dt, label="%s, dt=%f" % (network.label, dt), seed=network.seed) else: self.model = model if network is not None: # Build the network into the model Builder.build(network, model=self.model) # Use model seed as simulator seed if the seed is not provided # Note: seed is not used right now, but one day... self.seed = self.model.seed if seed is None else seed # -- map from Signal.base -> ndarray self.signals = SignalDict(__time__=np.asarray(0.0, dtype=np.float64)) for op in self.model.operators: op.init_signals(self.signals, self.dt) self.dg = operator_depencency_graph(self.model.operators) self._step_order = [node for node in toposort(self.dg) if hasattr(node, "make_step")] self._steps = [node.make_step(self.signals, self.dt) for node in self._step_order] self.n_steps = 0 # Add built states to the probe dictionary self._probe_outputs = self.model.params # Provide a nicer interface to probe outputs self.data = ProbeDict(self._probe_outputs)
def test_encoder_decoder_pathway(self): # # This test is a very short and small simulation that # verifies (like by hand) that the simulator does the right # things in the right order. # m = nengo.Model("") dt = 0.001 foo = Signal(n=1, name='foo') pop = Direct(n_in=2, n_out=2, fn=lambda x: x + 1, name='pop') decoders = np.asarray([.2, .1]) decs = Constant(decoders * 0.5) m.signals = [foo, decs] m.operators = [] Builder().build_direct(pop, m, dt) m.operators += [ DotInc(Constant([[1.0], [2.0]]), foo, pop.input_signal), ProdUpdate(decs, pop.output_signal, Constant(0.2), foo) ] def check(sig, target): self.assertTrue( np.allclose(sim.signals[sig], target), "%s: value %s is not close to target %s" % (sig, sim.signals[sig], target)) sim = m.simulator(sim_class=self.Simulator, dt=dt, builder=testbuilder) # -- initialize things sim.signals[foo] = np.asarray([1.0]) check(foo, 1.0) check(pop.input_signal, 0) check(pop.output_signal, 0) sim.step() #DotInc to pop.input_signal (input=[1.0,2.0]) #produpdate updates foo (foo=[0.2]) #pop updates pop.output_signal (output=[2,3]) check(pop.input_signal, [1, 2]) check(pop.output_signal, [2, 3]) check(foo, .2) check(decs, [.1, .05]) sim.step() #DotInc to pop.input_signal (input=[0.2,0.4]) # (note that pop resets its own input signal each timestep) #produpdate updates foo (foo=[0.39]) 0.2*0.5*2+0.1*0.5*3 + 0.2*0.2 #pop updates pop.output_signal (output=[1.2,1.4]) check(decs, [.1, .05]) check(pop.input_signal, [0.2, 0.4]) check(pop.output_signal, [1.2, 1.4]) # -- foo is computed as a prodUpdate of the *previous* output signal # foo <- .2 * foo + dot(decoders * .5, output_signal) # .2 * .2 + dot([.2, .1] * .5, [2, 3]) # .04 + (.2 + .15) # <- .39 check(foo, .39)
def __init__(self, network, dt=0.001, seed=None, model=None): """Initialize the simulator with a network and (optionally) a model. Most of the time, you will pass in a network and sometimes a dt:: sim1 = nengo.Simulator(my_network) # Uses default 0.001s dt sim2 = nengo.Simulator(my_network, dt=0.01) # Uses 0.01s dt For more advanced use cases, you can initialize the model yourself, and also pass in a network that will be built into the same model that you pass in:: sim = nengo.Simulator(my_network, model=my_model) If you want full control over the build process, then you can build your network into the model manually. If you do this, then you must explicitly pass in ``None`` for the network:: sim = nengo.Simulator(None, model=my_model) Parameters ---------- network : nengo.Network instance or None A network object to the built and then simulated. If a fully built ``model`` is passed in, then you can skip building the network by passing in network=None. dt : float The length of a simulator timestep, in seconds. seed : int A seed for all stochastic operators used in this simulator. Note that there are not stochastic operators implemented currently, so this parameters does nothing. model : nengo.builder.Model instance or None A model object that contains build artifacts to be simulated. Usually the simulator will build this model for you; however, if you want to build the network manually, or to inject some build artifacts in the Model before building the network, then you can pass in a ``nengo.builder.Model`` instance. """ self.dt = dt if model is None: self.model = Model(dt=self.dt, label="%s, dt=%f" % (network.label, dt), seed=network.seed) else: self.model = model if network is not None: # Build the network into the model Builder.build(network, model=self.model) # Use model seed as simulator seed if the seed is not provided # Note: seed is not used right now, but one day... self.seed = self.model.seed if seed is None else seed # -- map from Signal.base -> ndarray self.signals = SignalDict(__time__=np.asarray(0.0, dtype=np.float64)) for op in self.model.operators: op.init_signals(self.signals, self.dt) self.dg = operator_depencency_graph(self.model.operators) self._step_order = [node for node in toposort(self.dg) if hasattr(node, 'make_step')] self._steps = [node.make_step(self.signals, self.dt) for node in self._step_order] self.n_steps = 0 # Add built states to the probe dictionary self._probe_outputs = self.model.params # Provide a nicer interface to probe outputs self.data = ProbeDict(self._probe_outputs)