def test_nest_dense_normalisation(): p1 = pynn.Population(12, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS)) p2 = pynn.Population(10, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS)) l = v.Dense(p1, p2, v.ReLU(), weights=1) m = v.Model(l) out = m.predict(np.ones(12) * 12, 50) assert np.allclose(np.ones(10), out, atol=0.1)
def test_nest_model_predict_active(): p1 = pynn.Population(2, pynn.IF_cond_exp()) p2 = pynn.Population(2, pynn.IF_cond_exp()) l = v.Dense(p1, p2, v.ReLU(), decoder=v.spike_count, weights=1) m = v.Model(l) out = m.predict(np.array([1, 1]), 1000) assert len(out) == 2 assert abs(out[0] - out[1]) <= 10 # Must be approx same spikes
def test_nest_model_predict_inactive(): p1 = pynn.Population(2, pynn.IF_cond_exp()) p2 = pynn.Population(2, pynn.IF_cond_exp()) l = v.Dense(p1, p2, v.ReLU(), decoder=v.spike_count) m = v.Model(l) out = m.predict(np.array([0, 0]), 10) assert len(out) == 2 assert out.sum() < 10
def test_nest_dense_create(): p1 = pynn.Population(12, pynn.IF_cond_exp()) p2 = pynn.Population(10, pynn.IF_cond_exp()) d = v.Dense(p1, p2, v.ReLU()) expected_weights = np.ones((12, 10)) actual_weights = d.projection.get('weight', format='array') assert not np.allclose(actual_weights, expected_weights) # Should be normal distributed assert abs(actual_weights.sum()) <= 24
def test_nest_projection_gaussian(): p1 = pynn.Population(2, pynn.IF_cond_exp()) p2 = pynn.Population(2, pynn.IF_cond_exp()) c = pynn.Projection(p1, p2, pynn.AllToAllConnector(allow_self_connections=False)) c.set(weight=pynn.random.RandomDistribution('normal', mu=0.5, sigma=0.1)) weights = c.get('weight', format='array') assert len(weights[weights == 0]) < 1
def test_nest_model_linear_scaling(): p1 = pynn.Population(2, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS)) p2 = pynn.Population(2, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS)) l1 = v.Dense(p1, p2, v.ReLU(), decoder=v.spike_count, weights=1) m = v.Model(l1) xs = np.array([12, 12]) out = m.predict(xs, 50) assert np.allclose([38, 38], out)
def test_replicate_assert_size(): p1 = pynn.Population(6, pynn.IF_cond_exp()) p2 = pynn.Population(6, pynn.IF_cond_exp()) p3 = pynn.Population(5, pynn.IF_cond_exp()) with pytest.raises(ValueError): v.Replicate(p1, (p2, p3)) with pytest.raises(ValueError): v.Replicate(p1, (p3, p2))
def test_nest_dense_chain(): p1 = pynn.Population(12, pynn.SpikeSourcePoisson(rate = 100)) p2 = pynn.Population(10, pynn.IF_cond_exp()) p3 = pynn.Population(2, pynn.IF_cond_exp()) p3.record('spikes') d1 = v.Dense(p1, p2, v.ReLU()) d2 = v.Dense(p2, p3, v.ReLU()) pynn.run(1000) assert len(p3.get_data().segments[-1].spiketrains) > 0
def setUp(self): sim.setup() self.p1 = sim.Population(7, sim.IF_cond_exp()) self.p2 = sim.Population(4, sim.IF_cond_exp()) self.p3 = sim.Population(5, sim.IF_curr_alpha()) self.syn_rnd = sim.StaticSynapse(weight=0.123, delay=0.5) self.syn_a2a = sim.StaticSynapse(weight=0.456, delay=0.4) self.random_connect = sim.FixedNumberPostConnector(n=2) self.all2all = sim.AllToAllConnector()
def test_replicate_can_replicate(): p1 = pynn.Population(6, pynn.IF_cond_exp(i_offset=10)) p2 = pynn.Population(6, pynn.IF_cond_exp()) p3 = pynn.Population(6, pynn.IF_cond_exp()) l = v.Replicate(p1, (p2, p3), v.ReLU(), weights=(1, 1)) pynn.run(1000) l.store_spikes() expected = np.ones((2, 6)) assert np.allclose(expected, l.get_output())
def test_nest_create_input_populations(): p1 = pynn.Population(2, pynn.IF_cond_exp()) p2 = pynn.Population(2, pynn.IF_cond_exp()) l = v.Dense(p1, p2, v.ReLU()) m = v.Model(l) assert len(m.input_populations) == 2 inp = np.array([1, 0.2]) m.set_input(inp) assert m.input_populations[0].get('i_offset') == 1 assert m.input_populations[1].get('i_offset') == 0.2
def test_replicate_create(): p1 = pynn.Population(6, pynn.IF_cond_exp()) p2 = pynn.Population(6, pynn.IF_cond_exp()) p3 = pynn.Population(6, pynn.IF_cond_exp()) l = v.Replicate(p1, (p2, p3), v.ReLU(), weights=(1, 1)) pynn.run(1000) l.store_spikes() assert l.layer1.input.shape == (6, 0) assert l.layer2.input.shape == (6, 0) assert l.get_output().shape == (2, 6)
def test_nest_dense_reduced_weight_fire(): p1 = pynn.Population(1, pynn.IF_cond_exp(i_offset=10)) p2 = pynn.Population(2, pynn.IF_cond_exp()) d = v.Dense(p1, p2, v.ReLU(), weights = np.array([[1, 0]])) pynn.run(1000) spiketrains1 = p1.get_data().segments[-1].spiketrains spiketrains2 = p2.get_data().segments[-1].spiketrains assert spiketrains1[0].size > 0 assert spiketrains2[0].size > 0 assert spiketrains2[1].size == 0
def test_nest_model_spike_normalisation(): p1 = pynn.Population(2, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS)) p2 = pynn.Population(4, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS)) p3 = pynn.Population(4, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS)) l1 = v.Dense(p1, p2, decoder=v.spike_rate(50), weights=1) l2 = v.Dense(p2, p3, decoder=v.spike_rate(50), weights=1) m = v.Model(l1, l2) m.predict([12, 12], 50) for rate in np.concatenate((l1.get_output(), l2.get_output())): assert rate > 0 assert rate < 1
def test_nest_dense_backprop(): p1 = pynn.Population(4, pynn.IF_cond_exp()) p2 = pynn.Population(2, pynn.IF_cond_exp()) l = v.Dense(p1, p2, v.UnitActivation(), weights = 1, decoder = lambda x: x) old_weights = l.get_weights() l.input_cache = np.ones((1, 4)) # Mock spikes errors = l.backward(np.array([[0, 1]]), lambda w, g, b, bg: (w - g, b - bg)) expected_errors = np.zeros((2, 4)) + 4 assert np.allclose(errors, expected_errors) expected_weights = np.tile([1, -3], (4, 1)) assert np.allclose(l.get_weights(), expected_weights)
def test_nest_input_projection(): p1 = pynn.Population(2, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS)) p2 = pynn.Population(2, pynn.IF_cond_exp(**v.DEFAULT_NEURON_PARAMETERS)) l = v.Dense(p1, p2, v.ReLU(), weights=1) m = v.Model(l) t = v.LinearTranslation() assert np.allclose(l.get_weights(), np.ones((2, 2))) assert m.input_projection[0].weight == t.weights(1, 1) assert m.input_projection[1].weight == t.weights(1, 1) m.predict([1, 1], 50) spiketrains = l.output assert abs(len(spiketrains[0]) - len(spiketrains[1])) <= 20
def test_nest_dense_restore(): p1 = pynn.Population(12, pynn.IF_cond_exp()) p2 = pynn.Population(10, pynn.IF_cond_exp()) d = v.Dense(p1, p2, v.ReLU(), weights = 2) d.set_weights(-1) t = v.LinearTranslation() assert np.array_equal(d.projection.get('weight', format='array'), t.weights(np.ones((12, 10)) * -1, 12)) d.projection.set(weight = 1) # Simulate reset() assert np.array_equal(d.projection.get('weight', format='array'), np.ones((12, 10))) d.restore_weights() assert np.array_equal(d.projection.get('weight', format='array'), t.weights(np.ones((12, 10)) * -1, 12))
def test_nest_model_backwards(): p1 = pynn.Population(2, pynn.IF_cond_exp()) p2 = pynn.Population(3, pynn.IF_cond_exp()) l1 = v.Dense(p1, p2, v.ReLU(), decoder=v.spike_count_linear, weights=1) m = v.Model(l1) xs = np.array([12, 12]) spikes = m.predict(xs, 50) def l(w, g, b, bg): return w - g, b - bg m.backward([0, 1, 1], l) # no learning rate expected_weights = np.array([[1, 0, 0], [1, 0, 0]]) assert np.allclose(l1.get_weights(), expected_weights, atol=0.2)
def test_nest_dense_increased_weight_fire(): p1 = pynn.Population(1, pynn.SpikeSourcePoisson(rate = 1)) p2 = pynn.Population(1, pynn.IF_cond_exp()) p2.record('spikes') d = v.Dense(p1, p2, v.ReLU(), weights = 2) pynn.run(1000) spiketrains = p2.get_data().segments[-1].spiketrains count1 = spiketrains[0].size pynn.reset() p1 = pynn.Population(1, pynn.SpikeSourcePoisson(rate = 1)) p2 = pynn.Population(1, pynn.IF_cond_exp()) p2.record('spikes') d = v.Dense(p1, p2, v.ReLU(), weights = 2) pynn.run(1000) spiketrains = p2.get_data().segments[-1].spiketrains count2 = spiketrains[0].size assert count2 >= count1 * 2
def test_nest_dense_shape(): p1 = pynn.Population(12, pynn.SpikeSourcePoisson(rate = 10)) p2 = pynn.Population(10, pynn.IF_cond_exp()) d = v.Dense(p1, p2, v.ReLU(), weights = 1) pynn.run(1000) d.store_spikes() assert d.input.shape == (12,) assert d.output.shape[0] == 10
def test_nest_model_backwards_reset(): p1 = pynn.Population(2, pynn.IF_cond_exp()) p2 = pynn.Population(2, pynn.IF_cond_exp()) l1 = v.Dense(p1, p2, v.ReLU(), decoder=v.spike_count_normalised, weights=1) m = v.Model(l1) xs1 = np.array([10, 10]) ys1 = np.array([0, 10]) xs2 = np.array([10, 10]) ys2 = np.array([0, 10]) # First pass target1 = m.predict(xs1, 50) m.backward([0, 1], lambda w, g, b, bg: (w - g, b - bg)) expected_weights = np.array([[1, 0], [1, 0]]) assert np.allclose(l1.get_weights(), expected_weights) # Second pass target2 = m.predict(xs2, 50) m.backward([1, 0], lambda w, g, b, bg: (w - g, b - bg)) expected_weights = np.array([[-1, 0], [-1, 0]]) assert np.allclose(l1.get_weights(), expected_weights)
def setUp(self): sim.setup() self.p = sim.Population( 4, sim.IF_cond_exp( **{ 'tau_m': 12.3, 'cm': lambda i: 0.987 + 0.01 * i, 'i_offset': np.array([-0.21, -0.20, -0.19, -0.18]) }))
def test_nest_dense_projection(): p1 = pynn.Population(12, pynn.SpikeSourcePoisson(rate = 10)) p2 = pynn.Population(10, pynn.IF_cond_exp()) p2.record('spikes') d = v.Dense(p1, p2, v.ReLU(), weights = 1) pynn.run(1000) spiketrains = p2.get_data().segments[-1].spiketrains assert len(spiketrains) == 10 avg_len = np.array(list(map(len, spiketrains))).mean() # Should have equal activation for train in spiketrains: assert abs(len(train) - avg_len) <= 1
def test_reset(self, logcapture): """ Test the reset functionality. The reset must not unload the brain, therefore the neurons that have been setup before must still exist after the reset. """ self.control.reset() population = sim.Population(10, sim.IF_cond_exp()) # As PyNN >= 0.8 creates a multimeter and a spike_recorder per population, the ID of the # 10th neuron of the 10th population (9 are created during setUp) must be 120 self.assertEqual(population.all_cells[9], 120) logcapture.check(('hbp_nrp_cle.brainsim.pynn.PyNNControlAdapter', 'INFO', 'neuronal simulator reset'))
def test_projection(self): path = tempfile.mkstemp()[1] size_a = random.randint(1, 100) size_b = random.randint(1, 100) dist = pyhmf.RandomDistribution(rng=pyhmf.NativeRNG(1337)) conn_pyhmf = pyhmf.AllToAllConnector(weights=dist, delays=42) proj_pyhmf = pyhmf.Projection( pyhmf.Population(size_a, pyhmf.IF_cond_exp), pyhmf.Population(size_b, pyhmf.IF_cond_exp), conn_pyhmf) proj_pyhmf.saveConnections(getattr(pyhmf, self.file_type)(path, 'wb')) conn_pynn = pynn.FromFileConnector(getattr(pynn.recording.files, self.file_type)(path)) proj_pynn = pynn.Projection( pynn.Population(size_a, pynn.IF_cond_exp()), pynn.Population(size_b, pynn.IF_cond_exp()), conn_pynn) numpy.testing.assert_equal(proj_pyhmf.getWeights(format='array'), proj_pynn.getWeights(format='array'))
def test_native_stdp_model(): nest = pyNN.nest from pyNN.utility import init_logging init_logging(logfile=None, debug=True) nest.setup() p1 = nest.Population(10, nest.IF_cond_exp()) p2 = nest.Population(10, nest.SpikeSourcePoisson()) stdp_params = {'Wmax': 50.0, 'lambda': 0.015, 'weight': 0.001} stdp = nest.native_synapse_type("stdp_synapse")(**stdp_params) connector = nest.AllToAllConnector() prj = nest.Projection(p2, p1, connector, receptor_type='excitatory', synapse_type=stdp)
def test(): if not HAVE_H5PY and HAVE_NEST: raise SkipTest sim.setup() p1 = sim.Population(10, sim.IF_cond_exp(v_rest=-65, tau_m=lambda i: 10 + 0.1 * i, cm=RD('normal', (0.5, 0.05))), label="population_one") p2 = sim.Population(20, sim.IF_curr_alpha(v_rest=-64, tau_m=lambda i: 11 + 0.1 * i), label="population_two") prj = sim.Projection(p1, p2, sim.FixedProbabilityConnector(p_connect=0.5), synapse_type=sim.StaticSynapse(weight=RD( 'uniform', [0.0, 0.1]), delay=0.5), receptor_type='excitatory') net = Network(p1, p2, prj) export_to_sonata(net, "tmp_serialization_test", overwrite=True) net2 = import_from_sonata("tmp_serialization_test/circuit_config.json", sim) for orig_population in net.populations: imp_population = net2.get_component(orig_population.label) assert orig_population.size == imp_population.size for name in orig_population.celltype.default_parameters: assert_array_almost_equal(orig_population.get(name), imp_population.get(name), 12) w1 = prj.get('weight', format='array') prj2 = net2.get_component(asciify(prj.label).decode('utf-8') + "-0") w2 = prj2.get('weight', format='array') assert_array_almost_equal(w1, w2, 12)
def setUp(self): """ Instantiates the PyNN communication and control adapter """ brainconfig.rng_seed = 123456 with LogCapture(('hbp_nrp_cle.brainsim.pynn.PyNNControlAdapter', 'hbp_nrp_cle.brainsim.pynn.PyNNCommunicationAdapter', 'hbp_nrp_cle.brainsim.common.__AbstractCommunicationAdapter')) as log_capt: self.control = PyNNControlAdapter(sim) self.assertEqual(self.control.is_alive(), False) self.control.initialize(timestep=0.1, min_delay=0.1, max_delay=4.0, num_threads=1) self.control.initialize(timestep=0.1, min_delay=0.1, max_delay=4.0, num_threads=1) self.communicator = PyNNNestCommunicationAdapter() self.neurons_cond = sim.Population(10, sim.IF_cond_exp()) self.neurons_curr = sim.Population(10, sim.IF_curr_exp()) self.two_neurons_pop_cond = [sim.Population(10, sim.IF_cond_exp()), sim.Population(10, sim.IF_cond_exp())] self.two_neurons_pop_curr = [sim.Population(10, sim.IF_curr_exp()), sim.Population(10, sim.IF_curr_exp())] self.three_neurons_pop_cond = [sim.Population(10, sim.IF_cond_exp()), sim.Population(10, sim.IF_cond_exp()), sim.Population(10, sim.IF_cond_exp())] self.assertEqual(self.communicator.is_initialized, False) self.assertEqual(self.communicator.detector_devices, []) self.assertEqual(self.communicator.generator_devices, []) log_capt.check(('hbp_nrp_cle.brainsim.pynn.PyNNControlAdapter', 'INFO', 'neuronal simulator initialized'), ('hbp_nrp_cle.brainsim.pynn.PyNNControlAdapter', 'WARNING', 'trying to initialize an already initialized controller'))
def test_nest_dense_numerical_gradient(): # Test idea from https://github.com/stephencwelch/Neural-Networks-Demystified/blob/master/partSix.py # Use simple power function f = lambda x: x**2 fd = lambda x: 2 * x e = 1e-4 weights1 = np.ones((2, 3)).ravel() weights2 = np.ones((3, 1)).ravel() p1 = pynn.Population(2, pynn.IF_cond_exp()) p2 = pynn.Population(3, pynn.IF_cond_exp()) p3 = pynn.Population(1, pynn.IF_cond_exp()) l1 = v.Dense(p1, p2, v.Sigmoid(), decoder = lambda x: x) l2 = v.Dense(p2, p3, v.Sigmoid(), decoder = lambda x: x) m = v.Model(l1, l2) error = v.SumSquared() def forward_pass(xs): "Simple sigmoid forward pass function" l1.input_cache = xs l1.output = l2.input_cache = v.Sigmoid()(np.matmul(xs, l1.weights)) l2.output = v.Sigmoid()(np.matmul(l2.input_cache, l2.weights)) return l2.output def compute_numerical_gradient(xs, ys): "Computes the numerical gradient of a layer" weights1 = l1.get_weights().ravel() # 1D weights2 = l2.get_weights().ravel() weights = np.concatenate((weights1, weights2)) gradients = np.zeros(weights.shape) def initialise_with_distortion(index, delta): distortion = np.copy(weights) distortion[index] = distortion[index] + delta l1.set_weights(distortion[:len(weights1)].reshape(l1.weights.shape)) l2.set_weights(distortion[len(weights1):].reshape(l2.weights.shape)) forward_pass(xs) # Calculate gradients for index in range(len(weights)): initialise_with_distortion(index, e) error1 = -error(l2.output, ys) initialise_with_distortion(index, -e) error2 = -error(l2.output, ys) gradients[index] = (error2 - error1) / (2 * e) # Reset weights l1.set_weights(weights1.reshape(2, 3)) l2.set_weights(weights2.reshape(3, 1)) return gradients def compute_gradients(xs, ys): class GradientOptimiser(): counter = 2 gradients1 = None gradients2 = None def __call__(self, w, wg, b, bg): if self.counter > 1: self.gradients2 = wg else: self.gradients1 = wg self.counter -= 1 return (w, b) output = forward_pass(xs) optimiser = GradientOptimiser() m.backward(error.prime(l2.output, ys), optimiser) return np.concatenate((optimiser.gradients1.ravel(), optimiser.gradients2.ravel())) # Normalise inputs xs = np.array(([3,5], [5,1], [10,2]), dtype=float) xs = xs - np.amax(xs, axis=0) ys = np.array(([75], [82], [93]), dtype=float) ys = ys / 100 # Calculate numerical gradients numerical_gradients = compute_numerical_gradient(xs, ys) # Calculate 'normal' gradients gradients = compute_gradients(xs, ys) # Calculate the ratio between the difference and the sum of vector norms ratio = np.linalg.norm(gradients - numerical_gradients) /\ np.linalg.norm(gradients + numerical_gradients) assert ratio < 1e-07
'v_reset': 0.0, 'v_rest': 0.0, 'e_rev_E': 10.0, 'e_rev_I': -10.0, 'i_offset': 0.0, 'cm': 0.1, 'tau_m': 0.8325, 'tau_syn_E': tau_syn, 'tau_syn_I': tau_syn, 'tau_refrac': 0.0 } cellvalues = {'gsyn_exc': 0.0, 'v': 0.0, 'gsyn_inh': 0.0} input_celltype = sim.SpikeSourceArray(spike_times=spike_times) fc_celltype = sim.IF_cond_exp(**cellparams) input_pop = sim.Population(34 * 34 * 2, input_celltype) fc0 = sim.Population(w1.shape[0], fc_celltype) fc0.initialize(**cellvalues) fc0.set(v_thresh=vth0) fc1 = sim.Population(w1.shape[0], fc_celltype) fc1.initialize(**cellvalues) fc1.set(v_thresh=vth1) #fc1.set(i_offset=b1) fc2 = sim.Population(10, fc_celltype) fc2.initialize(**cellvalues) fc2.set(v_thresh=vth2) #fc2.set(i_offset=b2)