def test_simple(self): params = dict(simulator=self.Simulator, seed=123, dt=0.001) # Old API net = nef.Network('test_simple', **params) net.make_input('in', value=np.sin) p = net.make_probe('in', dt_sample=0.001, pstc=0.0) rawp = net._raw_probe(net.inputs['in'], dt_sample=.001) st_probe = net._raw_probe(net.model.simtime, dt_sample=.001) net.run(0.01) data = p.get_data() raw_data = rawp.get_data() st_data = st_probe.get_data() self.assertTrue( np.allclose(st_data.ravel(), np.arange(0.001, 0.0105, .001))) self.assertTrue( np.allclose(raw_data.ravel(), np.sin(np.arange(0, 0.0095, .001)))) # -- the make_probe call induces a one-step delay # on readout even when the pstc is really small. self.assertTrue( np.allclose(data.ravel()[1:], np.sin(np.arange(0, 0.0085, .001)))) # New API m = nengo.Model('test_simple', **params) node = m.make_node('in', output=np.sin) m.probe('in') m.run(0.01) self.assertTrue( np.allclose(m.data[m.simtime].ravel(), np.arange(0.001, 0.0105, .001))) self.assertTrue( np.allclose(m.data['in'].ravel(), np.sin(np.arange(0, 0.0095, .001))))
def test_scalar(self): """A network that represents sin(t).""" simulator = self.Simulator params = dict(simulator=simulator, seed=123, dt=0.001) N = 30 target = np.sin(np.arange(4999) / 1000.) target.shape = (4999, 1) # Old API net = nef.Network('test_scalar', **params) net.make_input('in', value=np.sin) net.make('A', N, 1) net.connect('in', 'A') in_p = net.make_probe('in', dt_sample=0.001, pstc=0.0) a_p = net.make_probe('A', dt_sample=0.001, pstc=0.02) net.run(5) in_data = in_p.get_data() a_data = a_p.get_data() with Plotter(simulator) as plt: t = net.model.data[net.model.simtime] plt.plot(t, in_data, label='Input') plt.plot(t, a_data, label='Neuron approximation, pstc=0.02') plt.legend(loc=0) plt.savefig('test_ensemble.test_scalar-old.pdf') plt.close() logger.debug("[Old API] input RMSE: %f", rmse(target, in_data)) logger.debug("[Old API] A RMSE: %f", rmse(target, a_data)) self.assertTrue(rmse(target, in_data) < 0.001) self.assertTrue(rmse(target, a_data) < 0.1) # New API m = nengo.Model('test_scalar', **params) m.make_node('in', output=np.sin) m.make_ensemble('A', nengo.LIF(N), 1) m.connect('in', 'A') m.probe('in') m.probe('A', filter=0.02) m.run(5) with Plotter(simulator) as plt: t = m.data[m.simtime] plt.plot(t, m.data['in'], label='Input') plt.plot(t, m.data['A'], label='Neuron approximation, pstc=0.02') plt.legend(loc=0) plt.savefig('test_ensemble.test_scalar-new.pdf') plt.close() logger.debug("[New API] input RMSE: %f", rmse(target, m.data['in'])) logger.debug("[New API] A RMSE: %f", rmse(target, m.data['A'])) self.assertTrue(rmse(target, m.data['in']) < 0.001) self.assertTrue(rmse(target, m.data['A']) < 0.1) # Check old/new API similarity logger.debug("Old/New API RMSE: %f", rmse(a_data, m.data['A'])) self.assertTrue(rmse(a_data, m.data['A']) < 0.1)
def test_prod(self): def product(x): return x[0] * x[1] N = 250 seed = 123 net = nef.Network('Matrix Multiplication', seed=seed, simulator=self.Simulator) net.make_input('sin', value=np.sin) net.make_input('neg', value=[-.5]) net.make_array('p', 2 * N, 1, dimensions=2, radius=1.5) net.make_array('D', N, 1, dimensions=1) net.connect('sin', 'p', transform=[[1], [0]]) net.connect('neg', 'p', transform=[[0], [1]]) net.connect('p', 'D', func=product, pstc=0.01) p_raw = net._probe_decoded_signals( [net.ensembles['p'].origin['product'].sigs[0]], dt_sample=.01, pstc=.01) probe_p = net.make_probe('p', dt_sample=.01, pstc=.01) probe_d = net.make_probe('D', dt_sample=.01, pstc=.01) net.run(6) data_p = probe_p.get_data() data_d = probe_d.get_data() data_r = p_raw.get_data() with Plotter(self.Simulator) as plt: plt.subplot(211) plt.plot(data_p) plt.plot(np.sin(np.arange(0, 6, .01))) plt.subplot(212) plt.plot(data_d) plt.plot(data_r) plt.plot(-.5 * np.sin(np.arange(0, 6, .01))) plt.savefig('test_old_api.test_prod.pdf') plt.close() self.assertTrue( np.allclose(data_p[:, 0], np.sin(np.arange(0, 6, .01)), atol=.1, rtol=.01)) self.assertTrue(np.allclose(data_p[20:, 1], -0.5, atol=.1, rtol=.01)) def match(a, b): self.assertTrue(np.allclose(a, b, .1, .1)) match(data_d[:, 0], -0.5 * np.sin(np.arange(0, 6, .01))) match(data_r[:, 0], -0.5 * np.sin(np.arange(0, 6, .01)))
def test_constant_vector(self): """A network that represents a constant 3D vector.""" simulator = self.Simulator params = dict(simulator=simulator, seed=123, dt=0.001) N = 30 vals = [0.6, 0.1, -0.5] # Old API net = nef.Network('test_constant_vector', **params) net.make_input('in', value=vals) net.make('A', N * len(vals), len(vals)) net.connect('in', 'A', transform=np.eye(len(vals))) in_p = net.make_probe('in', dt_sample=0.001, pstc=0.0) a_p = net.make_probe('A', dt_sample=0.001, pstc=0.1) net.run(1) in_data = in_p.get_data() a_data = a_p.get_data() with Plotter(simulator) as plt: t = net.model.data[net.model.simtime] plt.plot(t, in_data, label='Input') plt.plot(t, a_data, label='Neuron approximation, pstc=0.1') plt.legend(loc=0, prop={'size': 10}) plt.savefig('test_ensemble.test_constant_vector-old.pdf') plt.close() self.assertTrue(np.allclose(in_data[-10:], vals, atol=.05, rtol=.05)) self.assertTrue(np.allclose(a_data[-10:], vals, atol=.05, rtol=.05)) # New API m = nengo.Model('test_constant_vector', **params) m.make_node('in', output=vals) m.make_ensemble('A', nengo.LIF(N * len(vals)), len(vals)) m.connect('in', 'A') m.probe('in') m.probe('A', filter=0.1) m.run(1) with Plotter(simulator) as plt: t = m.data[m.simtime] plt.plot(t, m.data['in'], label='Input') plt.plot(t, m.data['A'], label='Neuron approximation, pstc=0.1') plt.legend(loc=0, prop={'size': 10}) plt.savefig('test_ensemble.test_constant_vector-new.pdf') plt.close() self.assertTrue( np.allclose(m.data['in'][-10:], vals, atol=.05, rtol=.05)) self.assertTrue( np.allclose(m.data['A'][-10:], vals, atol=.05, rtol=.05))
def test_constant_scalar(self): """A Network that represents a constant value.""" simulator = self.Simulator params = dict(simulator=simulator, seed=123, dt=0.001) N = 30 val = 0.5 # old api net = nef.Network('test_constant_scalar', **params) net.make_input('in', value=[val]) net.make('A', N, 1) net.connect('in', 'A') in_p = net.make_probe('in', dt_sample=0.001, pstc=0.0) a_p = net.make_probe('A', dt_sample=0.001, pstc=0.1) net.run(1) in_data = in_p.get_data() a_data = a_p.get_data() with Plotter(simulator) as plt: t = net.model.data[net.model.simtime] plt.plot(t, in_data, label='Input') plt.plot(t, a_data, label='Neuron approximation, pstc=0.1') plt.legend(loc=0) plt.savefig('test_ensemble.test_constant_scalar-old.pdf') plt.close() self.assertTrue(np.allclose(in_data.ravel(), val, atol=.05, rtol=.05)) self.assertTrue(np.allclose(a_data[-10:], val, atol=.05, rtol=.05)) # New API m = nengo.Model('test_constant_scalar', **params) m.make_node('in', output=val) m.make_ensemble('A', nengo.LIF(N), 1) m.connect('in', 'A') m.probe('in') m.probe('A', filter=0.1) m.run(1) with Plotter(simulator) as plt: t = m.data[m.simtime] plt.plot(t, m.data['in'], label='Input') plt.plot(t, m.data['A'], label='Neuron approximation, pstc=0.1') plt.legend(loc=0) plt.savefig('test_ensemble.test_constant_scalar-new.pdf') plt.close() self.assertTrue( np.allclose(m.data['in'].ravel(), val, atol=.05, rtol=.05)) self.assertTrue(np.allclose(m.data['A'][-10:], val, atol=.05, rtol=.05))
def test_counters(self): params = dict(simulator=self.Simulator, seed=123, dt=0.001) # Old API net = nef.Network('test_counters', **params) simtime_probe = net._raw_probe(net.model.simtime, dt_sample=.001) steps_probe = net._raw_probe(net.model.steps, dt_sample=.001) net.run(0.003) simtime_data = simtime_probe.get_data() steps_data = steps_probe.get_data() self.assertTrue(np.allclose(simtime_data.flatten(), [.001, .002, .003])) self.assertTrue(np.allclose(steps_data.flatten(), [1, 2, 3])) # New API m = nengo.Model('test_counters', **params) m.probe(m.simtime) m.probe(m.steps) m.run(0.003) self.assertTrue( np.allclose(m.data[m.simtime].flatten(), [.001, .002, .003])) self.assertTrue(np.allclose(m.data[m.steps].flatten(), [1, 2, 3]))
def test_vector(self): """A network that represents sin(t), cos(t), arctan(t).""" simulator = self.Simulator params = dict(simulator=simulator, seed=123, dt=0.001) N = 40 target = np.vstack( (np.sin(np.arange(4999) / 1000.), np.cos(np.arange(4999) / 1000.), np.arctan(np.arange(4999) / 1000.))).T # Old API net = nef.Network('test_vector', **params) net.make_input('sin', value=np.sin) net.make_input('cos', value=np.cos) net.make_input('arctan', value=np.arctan) net.make('A', N * 3, 3, radius=2) net.connect('sin', 'A', transform=[[1], [0], [0]]) net.connect('cos', 'A', transform=[[0], [1], [0]]) net.connect('arctan', 'A', transform=[[0], [0], [1]]) sin_p = net.make_probe('sin', dt_sample=0.001, pstc=0.0) cos_p = net.make_probe('cos', dt_sample=0.001, pstc=0.0) arctan_p = net.make_probe('arctan', dt_sample=0.001, pstc=0.0) a_p = net.make_probe('A', dt_sample=0.001, pstc=0.02) net.run(5) sin_data = sin_p.get_data() cos_data = cos_p.get_data() arctan_data = arctan_p.get_data() a_data = a_p.get_data() with Plotter(simulator) as plt: t = net.model.data[net.model.simtime] plt.plot(t, sin_data, label='sin') plt.plot(t, cos_data, label='cos') plt.plot(t, arctan_data, label='arctan') plt.plot(t, a_data, label='Neuron approximation, pstc=0.02') plt.legend(loc=0, prop={'size': 10}) plt.savefig('test_ensemble.test_vector-old.pdf') plt.close() logger.debug("[Old API] sin RMSE: %f", rmse(target[:, 0], sin_data)) logger.debug("[Old API] cos RMSE: %f", rmse(target[:, 1], cos_data)) logger.debug("[Old API] atan RMSE: %f", rmse(target[:, 2], arctan_data)) logger.debug("[Old API] A RMSE: %f", rmse(target, a_data)) self.assertTrue(rmse(target, a_data) < 0.1) # New API m = nengo.Model('test_vector', **params) m.make_node('sin', output=np.sin) m.make_node('cos', output=np.cos) m.make_node('arctan', output=np.arctan) m.make_ensemble('A', nengo.LIF(N * 3), 3, radius=2) m.connect('sin', 'A', transform=[[1], [0], [0]]) m.connect('cos', 'A', transform=[[0], [1], [0]]) m.connect('arctan', 'A', transform=[[0], [0], [1]]) m.probe('sin') m.probe('cos') m.probe('arctan') m.probe('A', filter=0.02) m.run(5) with Plotter(simulator) as plt: t = m.data[m.simtime] plt.plot(t, m.data['sin'], label='sin') plt.plot(t, m.data['cos'], label='cos') plt.plot(t, m.data['arctan'], label='arctan') plt.plot(t, m.data['A'], label='Neuron approximation, pstc=0.02') plt.legend(loc=0, prop={'size': 10}) plt.savefig('test_ensemble.test_vector-new.pdf') plt.close() # Not sure why, but this isn't working... logger.debug("[New API] sin RMSE: %f", rmse(target[:, 0], m.data['sin'])) logger.debug("[New API] cos RMSE: %f", rmse(target[:, 1], m.data['cos'])) logger.debug("[New API] atan RMSE: %f", rmse(target[:, 2], m.data['arctan'])) logger.debug("[New API] A RMSE: %f", rmse(target, m.data['A'])) self.assertTrue(rmse(target, m.data['A']) < 0.1) # Check old/new API similarity logger.debug("Old/New API RMSE: %f", rmse(a_data, m.data['A'])) self.assertTrue(rmse(a_data, m.data['A']) < 0.1)
[Input] ----> (A) --' ^ [Control] -----' Network behaviour: A = tau * Input + Input * Control """ import nengo.old_api as api ### Define model parameters tau = 0.1 ### Create the nengo model model = api.Network('Controlled Integrator') ### Create the model inputs input_d = {0.2: 5, 0.3: 0, 0.44: -10, 0.54: 0, 0.8: 5, 0.9: 0}.items() input_d.sort(reverse=True) def input_f(t): for key, value in input_d: if t > key: return value return 0.0 model.make_input('Input', input_f) model.make_input('Control', [1])
v | [Input] ---> (A) --' Network behaviour: A = A_matrix * A """ import nengo.old_api as api import numpy as np ### Define model parameters speed = 10 # Base frequency of oscillation tau = 0.1 # TODO: this is supposed to be the feedback time constant ### Create the nengo model model = api.Network('Integrator') ### Create the model inputs def start_input(t): if t < 0.01: return [1,0] else: return [0,0] model.make_input('Input', start_input) def speed_func(t): if t < 0.3: return 1 elif t < 0.6: return 0.5 else: return 1 model.make_input('Speed', speed_func)
def test_multidim_probe(self): # Adjust these values to change the matrix dimensions # Matrix A is D1xD2 # Matrix B is D2xD3 # result is D1xD3 D1 = 1 D2 = 2 D3 = 3 seed = 123 N = 200 Amat = np.asarray([[.4, .8]]) Bmat = np.asarray([[-1.0, -0.6, -.15], [0.25, .5, .7]]) net = nef.Network('V', seed=seed, simulator=self.Simulator) # values should stay within the range (-radius,radius) radius = 2.0 # make 2 matrices to store the input logging.debug("make_array: input matrices A and B") net.make_array('A', neurons=N, array_size=D1 * D2, radius=radius, neuron_type='lif') net.make_array('B', neurons=N, array_size=D2 * D3, radius=radius, neuron_type='lif') # connect inputs to them so we can set their value inputA = net.make_input('input A', value=Amat.flatten()) inputB = net.make_input('input B', value=Bmat.flatten()) logging.debug("connect: input matrices A and B") net.connect('input A', 'A') net.connect('input B', 'B') # the C matrix holds the intermediate product calculations # need to compute D1*D2*D3 products to multiply 2 matrices together logging.debug("make_array: intermediate C") net.make_array('C', 4 * N, D1 * D2 * D3, dimensions=2, radius=1.5 * radius, encoders=[[1, 1], [1, -1], [-1, 1], [-1, -1]], neuron_type='lif') transformA = [[0] * (D1 * D2) for i in range(D1 * D2 * D3 * 2)] transformB = [[0] * (D2 * D3) for i in range(D1 * D2 * D3 * 2)] for i in range(D1): for k in range(D3): for j in range(D2): tmp = (j + k * D2 + i * D2 * D3) transformA[tmp * 2][j + i * D2] = 1 transformB[tmp * 2 + 1][k + j * D3] = 1 logging.debug("transA: %s", str(transformA)) logging.debug("transB: %s", str(transformB)) logging.debug("connect A->C") net.connect('A', 'C', transform=transformA) logging.debug("connect B->C") net.connect('B', 'C', transform=transformB) Cprobe = net.make_probe('C', dt_sample=0.01, pstc=0.01) net.run(1) logging.debug("Cprove.shape=%s", str(Cprobe.get_data().shape)) logging.debug("Amat=%s", str(Amat)) logging.debug("Bmat=%s", str(Bmat)) data = Cprobe.get_data() with Plotter(self.Simulator) as plt: for i in range(D1): for k in range(D3): for j in range(D2): tmp = (j + k * D2 + i * D2 * D3) plt.subplot(D1 * D2 * D3, 2, 1 + 2 * tmp) plt.title('A[%i, %i]' % (i, j)) plt.axhline(Amat[i, j]) plt.ylim(-radius, radius) plt.plot(data[:, 2 * tmp]) plt.subplot(D1 * D2 * D3, 2, 2 + 2 * tmp) plt.title('B[%i, %i]' % (j, k)) plt.axhline(Bmat[j, k]) plt.ylim(-radius, radius) plt.plot(data[:, 2 * tmp + 1]) plt.savefig('test_old_api.test_multidimprobe.pdf') plt.close() for i in range(D1): for k in range(D3): for j in range(D2): tmp = (j + k * D2 + i * D2 * D3) self.assertTrue( np.allclose(data[-10:, 2 * tmp], Amat[i, j], atol=0.1, rtol=0.1), (data[-10:, 2 * tmp], Amat[i, j])) self.assertTrue( np.allclose(data[-10:, 1 + 2 * tmp], Bmat[j, k], atol=0.1, rtol=0.1))
def test_matrix_mul(self): # Adjust these values to change the matrix dimensions # Matrix A is D1xD2 # Matrix B is D2xD3 # result is D1xD3 D1 = 1 D2 = 2 D3 = 2 seed = 123 N = 200 Amat = np.asarray([[.5, -.5]]) Bmat = np.asarray([[ 0, -1., ], [.7, 0]]) net = nef.Network('Matrix Multiplication', seed=seed, simulator=self.Simulator) # values should stay within the range (-radius,radius) radius = 1 # make 2 matrices to store the input logging.debug("make_array: input matrices A and B") net.make_array('A', neurons=N, array_size=D1 * D2, radius=radius, neuron_type='lif') net.make_array('B', neurons=N, array_size=D2 * D3, radius=radius, neuron_type='lif') # connect inputs to them so we can set their value inputA = net.make_input('input A', value=Amat.ravel()) inputB = net.make_input('input B', value=Bmat.ravel()) logging.debug("connect: input matrices A and B") net.connect('input A', 'A') net.connect('input B', 'B') # the C matrix holds the intermediate product calculations # need to compute D1*D2*D3 products to multiply 2 matrices together logging.debug("make_array: intermediate C") net.make_array('C', 4 * N, D1 * D2 * D3, dimensions=2, radius=1.5 * radius, encoders=[[1, 1], [1, -1], [-1, 1], [-1, -1]], neuron_type='lif') # determine the transformation matrices to get the correct pairwise # products computed. This looks a bit like black magic but if # you manually try multiplying two matrices together, you can see # the underlying pattern. Basically, we need to build up D1*D2*D3 # pairs of numbers in C to compute the product of. If i,j,k are the # indexes into the D1*D2*D3 products, we want to compute the product # of element (i,j) in A with the element (j,k) in B. The index in # A of (i,j) is j+i*D2 and the index in B of (j,k) is k+j*D3. # The index in C is j+k*D2+i*D2*D3, multiplied by 2 since there are # two values per ensemble. We add 1 to the B index so it goes into # the second value in the ensemble. transformA = [[0] * (D1 * D2) for i in range(D1 * D2 * D3 * 2)] transformB = [[0] * (D2 * D3) for i in range(D1 * D2 * D3 * 2)] for i in range(D1): for j in range(D2): for k in range(D3): tmp = (j + k * D2 + i * D2 * D3) transformA[tmp * 2][j + i * D2] = 1 transformB[tmp * 2 + 1][k + j * D3] = 1 logging.debug("connect A->C") net.connect('A', 'C', transform=transformA) logging.debug("connect B->C") net.connect('B', 'C', transform=transformB) # now compute the products and do the appropriate summing logging.debug("make_array: output D") net.make_array('D', N, D1 * D3, radius=radius, neuron_type='lif') def product(x): return x[0] * x[1] # the mapping for this transformation is much easier, since we want to # combine D2 pairs of elements (we sum D2 products together) net.connect('C', 'D', index_post=[i / D2 for i in range(D1 * D2 * D3)], func=product) Aprobe = net.make_probe('A', dt_sample=0.01, pstc=0.01) Bprobe = net.make_probe('B', dt_sample=0.01, pstc=0.01) Cprobe = net.make_probe('C', dt_sample=0.01, pstc=0.01) Dprobe = net.make_probe('D', dt_sample=0.01, pstc=0.01) prod_probe = net._probe_decoded_signals( net.ensembles['C'].origin['product'].sigs, dt_sample=0.01, pstc=.01) net.run(1) Dmat = np.dot(Amat, Bmat) data = Dprobe.get_data() with Plotter(self.Simulator) as plt: for i in range(D1): for k in range(D3): plt.subplot(D1, D3, i * D3 + k + 1) plt.title('D[%i, %i]' % (i, k)) plt.plot(data[:, i * D3 + k]) plt.axhline(Dmat[i, k]) plt.ylim(-radius, radius) plt.savefig('test_old_api.test_matrix_mul.pdf') plt.close() self.assertTrue( np.allclose(Aprobe.get_data()[50:, 0], 0.5, atol=.1, rtol=.01)) self.assertTrue( np.allclose(Aprobe.get_data()[50:, 1], -0.5, atol=.1, rtol=.01)) self.assertTrue( np.allclose(Bprobe.get_data()[50:, 0], 0, atol=.1, rtol=.01)) self.assertTrue( np.allclose(Bprobe.get_data()[50:, 1], -1, atol=.1, rtol=.01)) self.assertTrue( np.allclose(Bprobe.get_data()[50:, 2], .7, atol=.1, rtol=.01)) self.assertTrue( np.allclose(Bprobe.get_data()[50:, 3], 0, atol=.1, rtol=.01)) for i in range(D1): for k in range(D3): self.assertTrue( np.allclose(data[-10:, i * D3 + k], Dmat[i, k], atol=0.1, rtol=0.1), (data[-10:, i * D3 + k], Dmat[i, k]))