def test_lif_step(upsample, n_elements): """Test the lif nonlinearity, comparing one step with the Numpy version.""" dt = 1e-3 # n_neurons = [3, 3, 3] n_neurons = [12345, 23456, 34567] N = len(n_neurons) J = RA([np.random.normal(scale=1.2, size=n) for n in n_neurons]) V = RA([np.random.uniform(low=0, high=1, size=n) for n in n_neurons]) W = RA([np.random.uniform(low=-5*dt, high=5*dt, size=n) for n in n_neurons]) OS = RA([np.zeros(n) for n in n_neurons]) ref = 2e-3 # tau = 20e-3 # refs = list(np.random.uniform(low=1.7e-3, high=4.2e-3, size=len(n_neurons))) taus = list(np.random.uniform(low=15e-3, high=80e-3, size=len(n_neurons))) queue = cl.CommandQueue(ctx) clJ = CLRA(queue, J) clV = CLRA(queue, V) clW = CLRA(queue, W) clOS = CLRA(queue, OS) # clRef = CLRA(queue, RA(refs)) clTau = CLRA(queue, RA(taus)) ### simulate host nls = [LIF(n, tau_ref=ref, tau_rc=taus[i]) for i, n in enumerate(n_neurons)] for i, nl in enumerate(nls): if upsample <= 1: nl.step_math(dt, J[i], V[i], W[i], OS[i]) else: s = np.zeros_like(OS[i]) for j in xrange(upsample): nl.step_math(dt/upsample, J[i], V[i], W[i], s) OS[i] = (OS[i] > 0.5) | (s > 0.5) ### simulate device plan = plan_lif(queue, clJ, clV, clW, clV, clW, clOS, ref, clTau, dt, n_elements=n_elements, upsample=upsample) plan() if 1: a, b = V, clV for i in xrange(len(a)): nc, _ = not_close(a[i], b[i]).nonzero() if len(nc) > 0: j = nc[0] print "i", i, "j", j print "J", J[i][j], clJ[i][j] print "V", V[i][j], clV[i][j] print "W", W[i][j], clW[i][j] print "...", len(nc) - 1, "more" n_spikes = np.sum([np.sum(os) for os in OS]) if n_spikes < 1.0: logger.warn("LIF spiking mechanism was not tested!") assert ra.allclose(J, clJ.to_host()) assert ra.allclose(V, clV.to_host()) assert ra.allclose(W, clW.to_host()) assert ra.allclose(OS, clOS.to_host())
def test_lif_rate(n_elements): """Test the `lif_rate` nonlinearity""" # n_neurons = [3, 3, 3] n_neurons = [123459, 23456, 34567] N = len(n_neurons) J = RA([np.random.normal(loc=1, scale=10, size=n) for n in n_neurons]) R = RA([np.zeros(n) for n in n_neurons]) ref = 2e-3 taus = list(np.random.uniform(low=15e-3, high=80e-3, size=len(n_neurons))) queue = cl.CommandQueue(ctx) clJ = CLRA(queue, J) clR = CLRA(queue, R) clTau = CLRA(queue, RA(taus)) ### simulate host nls = [LIF(n, tau_ref=ref, tau_rc=taus[i]) for i, n in enumerate(n_neurons)] for i, nl in enumerate(nls): nl.gain = 1 nl.bias = 0 R[i] = nl.rates(J[i].flatten()).reshape((-1,1)) ### simulate device plan = plan_lif_rate(queue, clJ, clR, ref, clTau, dt=1, n_elements=n_elements) plan() rate_sum = np.sum([np.sum(r) for r in R]) if rate_sum < 1.0: logger.warn("LIF rate was not tested above the firing threshold!") assert ra.allclose(J, clJ.to_host()) assert ra.allclose(R, clR.to_host())
def test_lif_rate(ctx, blockify): """Test the `lif_rate` nonlinearity""" rng = np.random dt = 1e-3 n_neurons = [123459, 23456, 34567] J = RA([rng.normal(loc=1, scale=10, size=n) for n in n_neurons]) R = RA([np.zeros(n) for n in n_neurons]) ref = 2e-3 taus = list(rng.uniform(low=15e-3, high=80e-3, size=len(n_neurons))) queue = cl.CommandQueue(ctx) clJ = CLRA(queue, J) clR = CLRA(queue, R) clTaus = CLRA(queue, RA([t * np.ones(n) for t, n in zip(taus, n_neurons)])) # simulate host nls = [nengo.LIFRate(tau_ref=ref, tau_rc=taus[i]) for i, n in enumerate(n_neurons)] for i, nl in enumerate(nls): nl.step_math(dt, J[i], R[i]) # simulate device plan = plan_lif_rate(queue, dt, clJ, clR, ref, clTaus, blockify=blockify) plan() rate_sum = np.sum([np.sum(r) for r in R]) if rate_sum < 1.0: logger.warn("LIF rate was not tested above the firing threshold!") assert ra.allclose(J, clJ.to_host()) assert ra.allclose(R, clR.to_host())
def test_lif_rate(n_elements): """Test the `lif_rate` nonlinearity""" rng = np.random dt = 1e-3 n_neurons = [123459, 23456, 34567] J = RA([rng.normal(loc=1, scale=10, size=n) for n in n_neurons]) R = RA([np.zeros(n) for n in n_neurons]) ref = 2e-3 taus = list(rng.uniform(low=15e-3, high=80e-3, size=len(n_neurons))) queue = cl.CommandQueue(ctx) clJ = CLRA(queue, J) clR = CLRA(queue, R) clTau = CLRA(queue, RA(taus)) # simulate host nls = [LIFRate(tau_ref=ref, tau_rc=taus[i]) for i, n in enumerate(n_neurons)] for i, nl in enumerate(nls): nl.step_math(dt, J[i], R[i]) # simulate device plan = plan_lif_rate(queue, clJ, clR, ref, clTau, dt=dt, n_elements=n_elements) plan() rate_sum = np.sum([np.sum(r) for r in R]) if rate_sum < 1.0: logger.warn("LIF rate was not tested above the firing threshold!") assert ra.allclose(J, clJ.to_host()) assert ra.allclose(R, clR.to_host())
def test_lif_step(ctx, upsample): """Test the lif nonlinearity, comparing one step with the Numpy version.""" rng = np.random dt = 1e-3 n_neurons = [12345, 23456, 34567] J = RA([rng.normal(scale=1.2, size=n) for n in n_neurons]) V = RA([rng.uniform(low=0, high=1, size=n) for n in n_neurons]) W = RA([rng.uniform(low=-5 * dt, high=5 * dt, size=n) for n in n_neurons]) OS = RA([np.zeros(n) for n in n_neurons]) ref = 2e-3 taus = rng.uniform(low=15e-3, high=80e-3, size=len(n_neurons)) amp = 1. queue = cl.CommandQueue(ctx) clJ = CLRA(queue, J) clV = CLRA(queue, V) clW = CLRA(queue, W) clOS = CLRA(queue, OS) clTaus = CLRA(queue, RA([t * np.ones(n) for t, n in zip(taus, n_neurons)])) # simulate host nls = [nengo.LIF(tau_ref=ref, tau_rc=taus[i]) for i, n in enumerate(n_neurons)] for i, nl in enumerate(nls): if upsample <= 1: nl.step_math(dt, J[i], OS[i], V[i], W[i]) else: s = np.zeros_like(OS[i]) for j in range(upsample): nl.step_math(dt / upsample, J[i], s, V[i], W[i]) OS[i] = (1./dt) * ((OS[i] > 0) | (s > 0)) # simulate device plan = plan_lif( queue, dt, clJ, clV, clW, clOS, ref, clTaus, amp, upsample=upsample) plan() if 1: a, b = V, clV for i in range(len(a)): nc, _ = not_close(a[i], b[i]).nonzero() if len(nc) > 0: j = nc[0] print("i", i, "j", j) print("J", J[i][j], clJ[i][j]) print("V", V[i][j], clV[i][j]) print("W", W[i][j], clW[i][j]) print("...", len(nc) - 1, "more") n_spikes = np.sum([np.sum(os) for os in OS]) if n_spikes < 1.0: logger.warn("LIF spiking mechanism was not tested!") assert ra.allclose(J, clJ.to_host()) assert ra.allclose(V, clV.to_host()) assert ra.allclose(W, clW.to_host()) assert ra.allclose(OS, clOS.to_host())
def test_lif_step(upsample): """Test the lif nonlinearity, comparing one step with the Numpy version.""" rng = np.random dt = 1e-3 n_neurons = [12345, 23456, 34567] J = RA([rng.normal(scale=1.2, size=n) for n in n_neurons]) V = RA([rng.uniform(low=0, high=1, size=n) for n in n_neurons]) W = RA([rng.uniform(low=-5 * dt, high=5 * dt, size=n) for n in n_neurons]) OS = RA([np.zeros(n) for n in n_neurons]) ref = 2e-3 taus = rng.uniform(low=15e-3, high=80e-3, size=len(n_neurons)) queue = cl.CommandQueue(ctx) clJ = CLRA(queue, J) clV = CLRA(queue, V) clW = CLRA(queue, W) clOS = CLRA(queue, OS) clTaus = CLRA(queue, RA([t * np.ones(n) for t, n in zip(taus, n_neurons)])) # simulate host nls = [nengo.LIF(tau_ref=ref, tau_rc=taus[i]) for i, n in enumerate(n_neurons)] for i, nl in enumerate(nls): if upsample <= 1: nl.step_math(dt, J[i], OS[i], V[i], W[i]) else: s = np.zeros_like(OS[i]) for j in range(upsample): nl.step_math(dt / upsample, J[i], s, V[i], W[i]) OS[i] = (1./dt) * ((OS[i] > 0) | (s > 0)) # simulate device plan = plan_lif( queue, dt, clJ, clV, clW, clOS, ref, clTaus, upsample=upsample) plan() if 1: a, b = V, clV for i in range(len(a)): nc, _ = not_close(a[i], b[i]).nonzero() if len(nc) > 0: j = nc[0] print("i", i, "j", j) print("J", J[i][j], clJ[i][j]) print("V", V[i][j], clV[i][j]) print("W", W[i][j], clW[i][j]) print("...", len(nc) - 1, "more") n_spikes = np.sum([np.sum(os) for os in OS]) if n_spikes < 1.0: logger.warn("LIF spiking mechanism was not tested!") assert ra.allclose(J, clJ.to_host()) assert ra.allclose(V, clV.to_host()) assert ra.allclose(W, clW.to_host()) assert ra.allclose(OS, clOS.to_host())
def test_discontiguous_setitem(ctx, rng): A = make_random_ra(3, 2, rng=rng) A0 = np.array(A[0]) a = A0[::3, ::2] v = rng.uniform(-1, 1, size=a.shape) assert a.size > 0 A.add_views( [A.starts[0]], [a.shape[0]], [a.shape[1]], [a.strides[0] / a.itemsize], [a.strides[1] / a.itemsize], ) queue = cl.CommandQueue(ctx) clA = CLRA(queue, A) a[...] = v A[-1] = v assert np.allclose(A[0], A0) assert np.allclose(A[-1], v) print(clA[0].shape) print(clA[-1].shape) clA[-1] = v assert ra.allclose(A, clA.to_host()) assert np.allclose(clA[-1], v)
def test_small(rng): sizes = [3] * 3 vals = [rng.normal(size=size) for size in sizes] A = RA(vals) queue = cl.CommandQueue(ctx) clA = CLRA(queue, A) assert ra.allclose(A, clA.to_host())
def test_small(ctx, rng): sizes = [3] * 3 vals = [rng.normal(size=size).astype(np.float32) for size in sizes] A = RA(vals) queue = cl.CommandQueue(ctx) clA = CLRA(queue, A) assert ra.allclose(A, clA.to_host())
def test_small(): n = 3 sizes = [3] * 3 vals = [np.random.normal(size=size) for size in sizes] A = RA(vals) queue = cl.CommandQueue(ctx) clA = CLRA(queue, A) assert ra.allclose(A, clA.to_host())
def test_setitem(ctx, rng): A, clA = make_random_pair(ctx, 3, 2, rng=rng) v = rng.uniform(0, 1, size=A[0].shape) A[0] = v.astype(A.dtype) clA[0] = v.astype(clA.dtype) A[1] = 3 clA[1] = 3 assert ra.allclose(A, clA.to_host())
def test_setitem(rng): A, clA = make_random_pair(3, 2, rng=rng) v = rng.uniform(0, 1, size=A[0].shape) A[0] = v.astype(A.dtype) clA[0] = v.astype(clA.dtype) A[1] = 3 clA[1] = 3 assert ra.allclose(A, clA.to_host())
def test_lif_rate(self, n_elements=0): """Test the `lif_rate` nonlinearity""" # n_neurons = [3, 3, 3] n_neurons = [123459, 23456, 34567] N = len(n_neurons) J = RA([np.random.normal(loc=1, scale=10, size=n) for n in n_neurons]) R = RA([np.zeros(n) for n in n_neurons]) ref = 2e-3 taus = list( np.random.uniform(low=15e-3, high=80e-3, size=len(n_neurons))) queue = cl.CommandQueue(ctx) clJ = CLRA(queue, J) clR = CLRA(queue, R) clTau = CLRA(queue, RA(taus)) ### simulate host nls = [ LIF(n, tau_ref=ref, tau_rc=taus[i]) for i, n in enumerate(n_neurons) ] for i, nl in enumerate(nls): R[i] = nl.rates(J[i].flatten()).reshape((-1, 1)) ### simulate device plan = plan_lif_rate(queue, clJ, clR, ref, clTau, n_elements=n_elements) plan() rate_sum = np.sum([np.sum(r) for r in R]) if rate_sum < 1.0: logger.warn("LIF rate was not tested above the firing threshold!") assert ra.allclose(J, clJ.to_host()) assert ra.allclose(R, clR.to_host())
def test_discontiguous_setitem(ctx, rng): A = make_random_ra(3, 2, rng=rng) A0 = np.array(A[0]) a = A0[::3, ::2] v = rng.uniform(-1, 1, size=a.shape) assert a.size > 0 A.add_views([A.starts[0]], [a.shape[0]], [a.shape[1]], [a.strides[0] / a.itemsize], [a.strides[1] / a.itemsize]) queue = cl.CommandQueue(ctx) clA = CLRA(queue, A) a[...] = v A[-1] = v assert np.allclose(A[0], A0) assert np.allclose(A[-1], v) print(clA[0].shape) print(clA[-1].shape) clA[-1] = v assert ra.allclose(A, clA.to_host()) assert np.allclose(clA[-1], v)
def test_random_vectors(ctx, rng): n = np.int32(rng.randint(low=5, high=10)) A, clA = make_random_pair(ctx, n, 1, low=3000, high=4000, rng=rng) assert ra.allclose(A, clA.to_host())
def test_random_vectors(): n = np.random.randint(low=5, high=10) A, clA = make_random_pair(n, 1, low=3000, high=4000) assert ra.allclose(A, clA.to_host())
def test_getitems(rng): """Try getting multiple items using a list of indices""" A, clA = make_random_pair(10, 2, rng=rng) s = [1, 3, 7, 8] assert ra.allclose(A[s], clA[s].to_host())
def test_random_matrices(rng): n = rng.randint(low=5, high=10) A, clA = make_random_pair(n, 2, low=20, high=40, rng=rng) assert ra.allclose(A, clA.to_host())
def test_random_vectors(rng): n = rng.randint(low=5, high=10) A, clA = make_random_pair(n, 1, low=3000, high=4000, rng=rng) assert ra.allclose(A, clA.to_host())
def test_random_matrices(): n = np.random.randint(low=5, high=10) A, clA = make_random_pair(n, 2, low=20, high=40) assert ra.allclose(A, clA.to_host())
def test_random_vectors(self): n = np.random.randint(low=5, high=10) A, clA = make_random_pair(n, 1, low=3000, high=4000) assert ra.allclose(A, clA.to_host())
def test_getitems(ctx, rng): """Try getting multiple items using a list of indices""" A, clA = make_random_pair(ctx, 10, 2, rng=rng) s = [1, 3, 7, 8] assert ra.allclose(A[s], clA[s].to_host())
def test_random_matrices(ctx, rng): n = rng.randint(low=5, high=10) A, clA = make_random_pair(ctx, n, 2, low=20, high=40, rng=rng) assert ra.allclose(A, clA.to_host())
def test_random_matrices(self): n = np.random.randint(low=5, high=10) A, clA = make_random_pair(n, 2, low=20, high=40) assert ra.allclose(A, clA.to_host())
def test_lif_step(self, upsample=1, n_elements=0): """Test the lif nonlinearity, comparing one step with the Numpy version.""" dt = 1e-3 # n_neurons = [3, 3, 3] n_neurons = [12345, 23456, 34567] N = len(n_neurons) J = RA([np.random.normal(scale=1.2, size=n) for n in n_neurons]) V = RA([np.random.uniform(low=0, high=1, size=n) for n in n_neurons]) W = RA([ np.random.uniform(low=-5 * dt, high=5 * dt, size=n) for n in n_neurons ]) OS = RA([np.zeros(n) for n in n_neurons]) ref = 2e-3 # tau = 20e-3 # refs = list(np.random.uniform(low=1.7e-3, high=4.2e-3, size=len(n_neurons))) taus = list( np.random.uniform(low=15e-3, high=80e-3, size=len(n_neurons))) queue = cl.CommandQueue(ctx) clJ = CLRA(queue, J) clV = CLRA(queue, V) clW = CLRA(queue, W) clOS = CLRA(queue, OS) # clRef = CLRA(queue, RA(refs)) clTau = CLRA(queue, RA(taus)) ### simulate host nls = [ LIF(n, tau_ref=ref, tau_rc=taus[i]) for i, n in enumerate(n_neurons) ] for i, nl in enumerate(nls): if upsample <= 1: nl.step_math0(dt, J[i], V[i], W[i], OS[i]) else: s = np.zeros_like(OS[i]) for j in xrange(upsample): nl.step_math0(dt / upsample, J[i], V[i], W[i], s) OS[i] = (OS[i] > 0.5) | (s > 0.5) ### simulate device plan = plan_lif(queue, clJ, clV, clW, clV, clW, clOS, ref, clTau, dt, n_elements=n_elements, upsample=upsample) plan() if 1: a, b = V, clV for i in xrange(len(a)): nc, _ = not_close(a[i], b[i]).nonzero() if len(nc) > 0: j = nc[0] print "i", i, "j", j print "J", J[i][j], clJ[i][j] print "V", V[i][j], clV[i][j] print "W", W[i][j], clW[i][j] print "...", len(nc) - 1, "more" n_spikes = np.sum([np.sum(os) for os in OS]) if n_spikes < 1.0: logger.warn("LIF spiking mechanism was not tested!") assert ra.allclose(J, clJ.to_host()) assert ra.allclose(V, clV.to_host()) assert ra.allclose(W, clW.to_host()) assert ra.allclose(OS, clOS.to_host())