def test_popview_on_neuron(self): pynn.setup(marocco=self.marocco) pop = pynn.Population(4, pynn.IF_cond_exp, {}) neuron_block = C.NeuronBlockOnWafer(C.NeuronBlockOnHICANN(3)) neuron_block_1 = C.NeuronBlockOnWafer(C.NeuronBlockOnHICANN(2)) logical_neuron = (LogicalNeuron.on(neuron_block) .add(C.NeuronOnNeuronBlock(X(3), Y(0)), 2) .add(C.NeuronOnNeuronBlock(X(3), Y(1)), 2) .done()) logical_neuron_1 = (LogicalNeuron.on(neuron_block_1) .add(C.NeuronOnNeuronBlock(X(4), Y(0)), 2) .add(C.NeuronOnNeuronBlock(X(4), Y(1)), 2) .done()) popview = pynn.PopulationView(pop,[0]) popview_1 = pynn.PopulationView(pop,[2]) popview_auto_placement= pynn.PopulationView(pop,[1,3]) self.marocco.manual_placement.on_neuron(popview, logical_neuron) self.marocco.manual_placement.on_neuron(popview_1, logical_neuron_1) pynn.run(0) pynn.end() results = self.load_results() placement_item, = results.placement.find(popview[0]) self.assertEqual(logical_neuron, placement_item.logical_neuron()) placement_item, = results.placement.find(popview_1[0]) self.assertEqual(logical_neuron_1, placement_item.logical_neuron()) for nrn in popview_auto_placement: placement_item, = results.placement.find(nrn) self.assertIsNotNone(placement_item.logical_neuron())
def test_on_neurons(self, pop_size): pynn.setup(marocco=self.marocco) pop = pynn.Population(pop_size, pynn.IF_cond_exp, {}) neuron_block = C.NeuronBlockOnWafer(C.NeuronBlockOnHICANN(3)) logical_neurons = [ (LogicalNeuron.on(neuron_block) .add(C.NeuronOnNeuronBlock(X(3), Y(0)), 2) .add(C.NeuronOnNeuronBlock(X(3), Y(1)), 2) .done()), (LogicalNeuron.on(neuron_block) .add(C.NeuronOnNeuronBlock(X(11), Y(0)), 2) .add(C.NeuronOnNeuronBlock(X(11), Y(1)), 2) .done()), ] self.marocco.manual_placement.on_neuron(pop, logical_neurons) if pop_size != len(logical_neurons): with self.assertRaises(RuntimeError): pynn.run(0) pynn.end() return pynn.run(0) pynn.end() results = self.load_results() for nrn, logical_neuron in zip(pop, logical_neurons): placement_item, = results.placement.find(nrn) self.assertEqual(logical_neuron, placement_item.logical_neuron())
def test_on_neuron_block(self, size): pynn.setup(marocco=self.marocco) neuron_size = 4 self.marocco.neuron_placement.default_neuron_size(neuron_size) neuron_block = C.NeuronBlockOnWafer(C.NeuronBlockOnHICANN(3)) pop = pynn.Population(size, pynn.IF_cond_exp, {}) self.marocco.manual_placement.on_neuron_block(pop, neuron_block) if neuron_size * size > C.NeuronOnNeuronBlock.enum_type.size: with self.assertRaises(RuntimeError): pynn.run(0) pynn.end() return pynn.run(0) pynn.end() results = self.load_results() for nrn in pop: placement_item, = results.placement.find(nrn) logical_neuron = placement_item.logical_neuron() self.assertEqual(neuron_size, logical_neuron.size()) for denmem in logical_neuron: self.assertEqual(neuron_block, denmem.toNeuronBlockOnWafer())
def test_hw_merging_spl1_should_merge_some(self): """ some DNCs shall be merged, but not all, because of syndriver requirements on each NB. 2 neurons will be placed (same HICANN). A fully connected network is built. This results in 8*2 = 16 synapses being routed to each neuron. With neuron size 4 and chain length 3 -> 12 synapses can be realised on each neuron. As a result at maximum 12 synapses shall be on the same L1Route. The merger tries to merge them and will fail, then spit it and merge 8 to each merger [3,5]. The result is a better L1 utilisation compared to one-to-one mapping, 2 instead of 8 routes, while staying within hardware constrains, compared to merge all (16 synapses requiring 4 drivers, 1 driver will be lost). """ pynn.setup(marocco=self.marocco) neuron_size = 4 self.marocco.neuron_placement.default_neuron_size(neuron_size) self.marocco.merger_routing.strategy( self.marocco.merger_routing.minimize_as_possible) # restrict to 3 driver, so that this test is hardware agnostic self.marocco.synapse_routing.driver_chain_length(3) hicann = C.HICANNOnWafer(Enum(123)) pops = [] # All but the first neuron block are occupied. for nb in range(C.NeuronBlockOnHICANN.end): pop = pynn.Population(2, pynn.IF_cond_exp, {}) self.marocco.manual_placement.on_neuron_block( pop, C.NeuronBlockOnWafer(C.NeuronBlockOnHICANN(nb), hicann)) pops.append(pop) for p in pops: for other_p in pops: pynn.Projection(p, other_p, pynn.AllToAllConnector(weights=1.)) pynn.run(0) pynn.end() merged_dncs = [3, 3, 3, 3, 5, 5, 5, 5] results = self.load_results() for pop in pops: nrn = pop[0] placement_item, = results.placement.find(nrn) logical_neuron = placement_item.logical_neuron() self.assertEqual(neuron_size, logical_neuron.size()) for denmem in logical_neuron: self.assertEqual(hicann, denmem.toHICANNOnWafer()) address = placement_item.address() # some DNCs shall be merged. dnc = C.DNCMergerOnHICANN(merged_dncs[pop.euter_id()]) self.assertEqual(hicann, address.toHICANNOnWafer()) self.assertEqual(dnc, address.toDNCMergerOnHICANN()) self.assertEqual(C.DNCMergerOnWafer(dnc, hicann), address.toDNCMergerOnWafer())
def test_min_spl1_should_allow_external_input_on_same_chip(self): """ Even when the rightmost neuron block / DNC merger is not reserved for external input, it should be possible to place external input on the same chip. """ pynn.setup(marocco=self.marocco) neuron_size = 4 self.marocco.neuron_placement.default_neuron_size(neuron_size) self.marocco.merger_routing.strategy( self.marocco.merger_routing.minimize_number_of_sending_repeaters) # Do not reserve rightmost neuron block / DNC merger for external input. self.marocco.neuron_placement.restrict_rightmost_neuron_blocks(False) hicann = C.HICANNOnWafer(Enum(123)) pops = [] # All but the first neuron block are occupied. for nb in range(1, C.NeuronBlockOnHICANN.end): pop = pynn.Population(1, pynn.IF_cond_exp, {}) self.marocco.manual_placement.on_neuron_block( pop, C.NeuronBlockOnWafer(C.NeuronBlockOnHICANN(nb), hicann)) pops.append(pop) in_pop = pynn.Population(1, pynn.SpikeSourceArray, {}) self.marocco.manual_placement.on_hicann(in_pop, hicann) pynn.run(0) pynn.end() results = self.load_results() for pop in pops: nrn = pop[0] placement_item, = results.placement.find(nrn) logical_neuron = placement_item.logical_neuron() self.assertEqual(neuron_size, logical_neuron.size()) for denmem in logical_neuron: self.assertEqual(hicann, denmem.toHICANNOnWafer()) address = placement_item.address() # All used neuron blocks should be connected to a single DNC merger. dnc = C.DNCMergerOnHICANN(3) self.assertEqual(hicann, address.toHICANNOnWafer()) self.assertEqual(dnc, address.toDNCMergerOnHICANN()) self.assertEqual(C.DNCMergerOnWafer(dnc, hicann), address.toDNCMergerOnWafer()) nrn = in_pop[0] placement_item, = results.placement.find(nrn) logical_neuron = placement_item.logical_neuron() self.assertTrue(logical_neuron.is_external()) address = placement_item.address() # External input must not be on DNC 3 merger, since all other # mergers do not have direct access to a background generator. dnc = C.DNCMergerOnHICANN(3) self.assertEqual(hicann, address.toHICANNOnWafer()) self.assertNotEqual(dnc, address.toDNCMergerOnHICANN()) self.assertNotEqual(C.DNCMergerOnWafer(dnc, hicann), address.toDNCMergerOnWafer())
def test_min_spl1_is_nongreedy_when_pops_are_placed_to_nbs(self, nbs): """ See above. Instead of a single population placed to the HICANN, populations are placed to specific neuron blocks. """ pynn.setup(marocco=self.marocco) neuron_size = 4 self.marocco.neuron_placement.default_neuron_size(neuron_size) self.marocco.merger_routing.strategy( self.marocco.merger_routing.minimize_number_of_sending_repeaters) self.marocco.neuron_placement.restrict_rightmost_neuron_blocks(True) hicann = C.HICANNOnWafer(Enum(123)) pops = [] for nb in nbs: pop = pynn.Population(1, pynn.IF_cond_exp, {}) self.marocco.manual_placement.on_neuron_block( pop, C.NeuronBlockOnWafer(C.NeuronBlockOnHICANN(nb), hicann)) pops.append(pop) in_pop = pynn.Population(1, pynn.SpikeSourceArray, {}) self.marocco.manual_placement.on_hicann(in_pop, hicann) pynn.run(0) pynn.end() results = self.load_results() for pop in pops: nrn = pop[0] placement_item, = results.placement.find(nrn) logical_neuron = placement_item.logical_neuron() self.assertEqual(neuron_size, logical_neuron.size()) for denmem in logical_neuron: self.assertEqual(hicann, denmem.toHICANNOnWafer()) address = placement_item.address() # All used neuron blocks should still be connected to a single DNC merger. dnc = C.DNCMergerOnHICANN(3) self.assertEqual(hicann, address.toHICANNOnWafer()) self.assertEqual(dnc, address.toDNCMergerOnHICANN()) self.assertEqual(C.DNCMergerOnWafer(dnc, hicann), address.toDNCMergerOnWafer()) nrn = in_pop[0] placement_item, = results.placement.find(nrn) logical_neuron = placement_item.logical_neuron() self.assertTrue(logical_neuron.is_external()) address = placement_item.address() # External input should be on the rightmost DNC merger since that is tried first. dnc = C.DNCMergerOnHICANN(7) self.assertEqual(hicann, address.toHICANNOnWafer()) self.assertEqual(dnc, address.toDNCMergerOnHICANN()) self.assertEqual(C.DNCMergerOnWafer(dnc, hicann), address.toDNCMergerOnWafer())
def test_on_neuron_wrong_shape(self): pynn.setup(marocco=self.marocco) pop = pynn.Population(1, pynn.IF_cond_exp, {}) neuron_block = C.NeuronBlockOnWafer(C.NeuronBlockOnHICANN(3)) logical_neuron = (LogicalNeuron.on(neuron_block) .add(C.NeuronOnNeuronBlock(X(2), Y(0)), 4) .add(C.NeuronOnNeuronBlock(X(3), Y(1)), 2) .done()) self.marocco.manual_placement.on_neuron(pop, logical_neuron) with self.assertRaises(RuntimeError): pynn.run(0) pynn.end()
def test_on_neuron(self): pynn.setup(marocco=self.marocco) pop = pynn.Population(1, pynn.IF_cond_exp, {}) neuron_block = C.NeuronBlockOnWafer(C.NeuronBlockOnHICANN(3)) logical_neuron = (LogicalNeuron.on(neuron_block) .add(C.NeuronOnNeuronBlock(X(3), Y(0)), 2) .add(C.NeuronOnNeuronBlock(X(3), Y(1)), 2) .done()) self.marocco.manual_placement.on_neuron(pop, logical_neuron) pynn.run(0) pynn.end() results = self.load_results() placement_item, = results.placement.find(pop[0]) self.assertEqual(logical_neuron, placement_item.logical_neuron())