def test_nest_dense_chain(): p1 = pynn.Population(12, pynn.SpikeSourcePoisson(rate = 100)) p2 = pynn.Population(10, pynn.IF_cond_exp()) p3 = pynn.Population(2, pynn.IF_cond_exp()) p3.record('spikes') d1 = v.Dense(p1, p2, v.ReLU()) d2 = v.Dense(p2, p3, v.ReLU()) pynn.run(1000) assert len(p3.get_data().segments[-1].spiketrains) > 0
def test_nest_dense_normalisation(): p1 = pynn.Population(12, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS)) p2 = pynn.Population(10, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS)) l = v.Dense(p1, p2, v.ReLU(), weights=1) m = v.Model(l) out = m.predict(np.ones(12) * 12, 50) assert np.allclose(np.ones(10), out, atol=0.1)
def test_nest_dense_create(): p1 = pynn.Population(12, pynn.IF_cond_exp()) p2 = pynn.Population(10, pynn.IF_cond_exp()) d = v.Dense(p1, p2, v.ReLU()) expected_weights = np.ones((12, 10)) actual_weights = d.projection.get('weight', format='array') assert not np.allclose(actual_weights, expected_weights) # Should be normal distributed assert abs(actual_weights.sum()) <= 24
def test_nest_dense_shape(): p1 = pynn.Population(12, pynn.SpikeSourcePoisson(rate = 10)) p2 = pynn.Population(10, pynn.IF_cond_exp()) d = v.Dense(p1, p2, v.ReLU(), weights = 1) pynn.run(1000) d.store_spikes() assert d.input.shape == (12,) assert d.output.shape[0] == 10
def test_nest_dense_increased_weight_fire(): p1 = pynn.Population(1, pynn.SpikeSourcePoisson(rate = 1)) p2 = pynn.Population(1, pynn.IF_cond_exp()) p2.record('spikes') d = v.Dense(p1, p2, v.ReLU(), weights = 2) pynn.run(1000) spiketrains = p2.get_data().segments[-1].spiketrains count1 = spiketrains[0].size pynn.reset() p1 = pynn.Population(1, pynn.SpikeSourcePoisson(rate = 1)) p2 = pynn.Population(1, pynn.IF_cond_exp()) p2.record('spikes') d = v.Dense(p1, p2, v.ReLU(), weights = 2) pynn.run(1000) spiketrains = p2.get_data().segments[-1].spiketrains count2 = spiketrains[0].size assert count2 >= count1 * 2
def test_nest_model_predict_inactive(): p1 = pynn.Population(2, pynn.IF_cond_exp()) p2 = pynn.Population(2, pynn.IF_cond_exp()) l = v.Dense(p1, p2, v.ReLU(), decoder=v.spike_count) m = v.Model(l) out = m.predict(np.array([0, 0]), 10) assert len(out) == 2 assert out.sum() < 10
def test_nest_model_linear_scaling(): p1 = pynn.Population(2, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS)) p2 = pynn.Population(2, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS)) l1 = v.Dense(p1, p2, v.ReLU(), decoder=v.spike_count, weights=1) m = v.Model(l1) xs = np.array([12, 12]) out = m.predict(xs, 50) assert np.allclose([38, 38], out)
def test_nest_model_predict_active(): p1 = pynn.Population(2, pynn.IF_cond_exp()) p2 = pynn.Population(2, pynn.IF_cond_exp()) l = v.Dense(p1, p2, v.ReLU(), decoder=v.spike_count, weights=1) m = v.Model(l) out = m.predict(np.array([1, 1]), 1000) assert len(out) == 2 assert abs(out[0] - out[1]) <= 10 # Must be approx same spikes
def test_replicate_can_replicate(): p1 = pynn.Population(6, pynn.IF_cond_exp(i_offset=10)) p2 = pynn.Population(6, pynn.IF_cond_exp()) p3 = pynn.Population(6, pynn.IF_cond_exp()) l = v.Replicate(p1, (p2, p3), v.ReLU(), weights=(1, 1)) pynn.run(1000) l.store_spikes() expected = np.ones((2, 6)) assert np.allclose(expected, l.get_output())
def test_replicate_create(): p1 = pynn.Population(6, pynn.IF_cond_exp()) p2 = pynn.Population(6, pynn.IF_cond_exp()) p3 = pynn.Population(6, pynn.IF_cond_exp()) l = v.Replicate(p1, (p2, p3), v.ReLU(), weights=(1, 1)) pynn.run(1000) l.store_spikes() assert l.layer1.input.shape == (6, 0) assert l.layer2.input.shape == (6, 0) assert l.get_output().shape == (2, 6)
def test_nest_dense_reduced_weight_fire(): p1 = pynn.Population(1, pynn.IF_cond_exp(i_offset=10)) p2 = pynn.Population(2, pynn.IF_cond_exp()) d = v.Dense(p1, p2, v.ReLU(), weights = np.array([[1, 0]])) pynn.run(1000) spiketrains1 = p1.get_data().segments[-1].spiketrains spiketrains2 = p2.get_data().segments[-1].spiketrains assert spiketrains1[0].size > 0 assert spiketrains2[0].size > 0 assert spiketrains2[1].size == 0
def test_nest_create_input_populations(): p1 = pynn.Population(2, pynn.IF_cond_exp()) p2 = pynn.Population(2, pynn.IF_cond_exp()) l = v.Dense(p1, p2, v.ReLU()) m = v.Model(l) assert len(m.input_populations) == 2 inp = np.array([1, 0.2]) m.set_input(inp) assert m.input_populations[0].get('i_offset') == 1 assert m.input_populations[1].get('i_offset') == 0.2
def test_nest_dense_projection(): p1 = pynn.Population(12, pynn.SpikeSourcePoisson(rate = 10)) p2 = pynn.Population(10, pynn.IF_cond_exp()) p2.record('spikes') d = v.Dense(p1, p2, v.ReLU(), weights = 1) pynn.run(1000) spiketrains = p2.get_data().segments[-1].spiketrains assert len(spiketrains) == 10 avg_len = np.array(list(map(len, spiketrains))).mean() # Should have equal activation for train in spiketrains: assert abs(len(train) - avg_len) <= 1
def test_nest_input_projection(): p1 = pynn.Population(2, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS)) p2 = pynn.Population(2, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS)) l = v.Dense(p1, p2, v.ReLU(), weights=1) m = v.Model(l) t = v.LinearTranslation() assert np.allclose(l.get_weights(), np.ones((2, 2))) assert m.input_projection[0].weight == t.weights(1, 1) assert m.input_projection[1].weight == t.weights(1, 1) m.predict([1, 1], 50) spiketrains = l.output assert abs(len(spiketrains[0]) - len(spiketrains[1])) <= 20
def test_nest_dense_restore(): p1 = pynn.Population(12, pynn.IF_cond_exp()) p2 = pynn.Population(10, pynn.IF_cond_exp()) d = v.Dense(p1, p2, v.ReLU(), weights = 2) d.set_weights(-1) t = v.LinearTranslation() assert np.array_equal(d.projection.get('weight', format='array'), t.weights(np.ones((12, 10)) * -1, 12)) d.projection.set(weight = 1) # Simulate reset() assert np.array_equal(d.projection.get('weight', format='array'), np.ones((12, 10))) d.restore_weights() assert np.array_equal(d.projection.get('weight', format='array'), t.weights(np.ones((12, 10)) * -1, 12))
def test_nest_model_backwards(): p1 = pynn.Population(2, pynn.IF_cond_exp()) p2 = pynn.Population(3, pynn.IF_cond_exp()) l1 = v.Dense(p1, p2, v.ReLU(), decoder=v.spike_count_linear, weights=1) m = v.Model(l1) xs = np.array([12, 12]) spikes = m.predict(xs, 50) def l(w, g, b, bg): return w - g, b - bg m.backward([0, 1, 1], l) # no learning rate expected_weights = np.array([[1, 0, 0], [1, 0, 0]]) assert np.allclose(l1.get_weights(), expected_weights, atol=0.2)
def test_nest_model_backwards_reset(): p1 = pynn.Population(2, pynn.IF_cond_exp()) p2 = pynn.Population(2, pynn.IF_cond_exp()) l1 = v.Dense(p1, p2, v.ReLU(), decoder=v.spike_count_normalised, weights=1) m = v.Model(l1) xs1 = np.array([10, 10]) ys1 = np.array([0, 10]) xs2 = np.array([10, 10]) ys2 = np.array([0, 10]) # First pass target1 = m.predict(xs1, 50) m.backward([0, 1], lambda w, g, b, bg: (w - g, b - bg)) expected_weights = np.array([[1, 0], [1, 0]]) assert np.allclose(l1.get_weights(), expected_weights) # Second pass target2 = m.predict(xs2, 50) m.backward([1, 0], lambda w, g, b, bg: (w - g, b - bg)) expected_weights = np.array([[-1, 0], [-1, 0]]) assert np.allclose(l1.get_weights(), expected_weights)