def __call__(self, machine, n_machine_time_steps, n_samples_per_recording, sampling_frequency, time_scale_factor, machine_time_step, pre_allocated_resources=None): """ :param pre_allocated_resources: other preallocated resources :param machine: the SpiNNaker machine as discovered :param n_machine_time_steps: the number of machine\ time steps used by the simulation during this phase :param n_samples_per_recording: how many samples between record entries :param sampling_frequency: the frequency of sampling :param time_scale_factor: the time scale factor :param machine_time_step: the machine time step :return: preallocated resources """ # pylint: disable=too-many-arguments progress_bar = ProgressBar( machine.n_chips, "Preallocating resources for chip power monitor") # store how much SDRAM the power monitor uses per core resources = ChipPowerMonitor.get_resources( n_machine_time_steps=n_machine_time_steps, n_samples_per_recording=n_samples_per_recording, sampling_frequency=sampling_frequency, time_scale_factor=time_scale_factor, time_step=machine_time_step) # for every Ethernet connected chip, get the resources needed by the # live packet gatherers sdrams = list() cores = list() for chip in progress_bar.over(machine.chips): sdrams.append( SpecificChipSDRAMResource(chip, resources.sdram.get_value())) cores.append(CoreResource(chip, 1)) # create preallocated resource container cpm_pre_allocated_resource_container = PreAllocatedResourceContainer( specific_sdram_usage=sdrams, core_resources=cores) # add other preallocated resources if pre_allocated_resources is not None: cpm_pre_allocated_resource_container.extend( pre_allocated_resources) # return preallocated resources return cpm_pre_allocated_resource_container
def __call__( self, machine, n_machine_time_steps, n_samples_per_recording, sampling_frequency, time_scale_factor, machine_time_step, pre_allocated_resources=None): """ :param pre_allocated_resources: other preallocated resources :param machine: the SpiNNaker machine as discovered :param n_machine_time_steps: the number of machine\ time steps used by the simulation during this phase :param n_samples_per_recording: how many samples between record entries :param sampling_frequency: the frequency of sampling :param time_scale_factor: the time scale factor :param machine_time_step: the machine time step :return: preallocated resources """ # pylint: disable=too-many-arguments progress_bar = ProgressBar( machine.n_chips, "Preallocating resources for chip power monitor") # store how much SDRAM the power monitor uses per core resources = ChipPowerMonitorMachineVertex.get_resources( n_machine_time_steps=n_machine_time_steps, n_samples_per_recording=n_samples_per_recording, sampling_frequency=sampling_frequency, time_scale_factor=time_scale_factor, time_step=machine_time_step) # for every Ethernet connected chip, get the resources needed by the # live packet gatherers sdrams = list() cores = list() for chip in progress_bar.over(machine.chips): sdrams.append( SpecificChipSDRAMResource(chip, resources.sdram.get_value())) cores.append(CoreResource(chip, 1)) # create preallocated resource container cpm_pre_allocated_resource_container = PreAllocatedResourceContainer( specific_sdram_usage=sdrams, core_resources=cores) # add other preallocated resources if pre_allocated_resources is not None: cpm_pre_allocated_resource_container.extend( pre_allocated_resources) # return preallocated resources return cpm_pre_allocated_resource_container
def test_1_chip_pre_allocated_same_core(self): machine = VirtualMachine(width=8, height=8) graph = ApplicationGraph("Test") partitioner = PartitionAndPlacePartitioner() # add graph vertices which reside on 0,0 for p in range(0, 13): graph.add_vertex( SimpleTestVertex( constraints=[ChipAndCoreConstraint(x=0, y=0, p=p)], n_atoms=1)) # add pre-allocated resources for cores on 0,0 core_pre = SpecificCoreResource(chip=machine.get_chip_at(0, 0), cores=[4]) pre_allocated_res = PreAllocatedResourceContainer( specific_core_resources=[core_pre]) # run partitioner that should go boom try: partitioner(graph, machine, pre_allocated_res) raise Exception("should have blown up here") except PacmanValueError: pass except Exception: raise Exception("should have blown up here")
def test_1_chip_over_pre_allocated(self): machine = virtual_machine(width=8, height=8) graph = ApplicationGraph("Test") partitioner = SplitterPartitioner() # add graph vertices which reside on 0,0 for _ in range(0, 13): vertex = SimpleTestVertex( constraints=[ChipAndCoreConstraint(x=0, y=0)], n_atoms=1) vertex.splitter = SplitterSliceLegacy() graph.add_vertex(vertex) # add pre-allocated resources for cores on 0,0 core_pre = CoreResource(chip=machine.get_chip_at(0, 0), n_cores=5) pre_allocated_res = PreAllocatedResourceContainer( core_resources=[core_pre]) # run partitioner that should go boom try: partitioner(graph, machine, plan_n_time_steps=None, pre_allocated_resources=pre_allocated_res) raise Exception("should have blown up here") except PacmanInvalidParameterException: pass
def test_1_chip_no_pre_allocated_too_much_sdram(self): machine = virtual_machine(width=8, height=8) graph = ApplicationGraph("Test") partitioner = SplitterPartitioner() eight_meg = 8 * 1024 * 1024 # add graph vertices which reside on 0,0 for _ in range(0, 13): vertex = SimpleTestVertex( constraints=[ChipAndCoreConstraint(x=0, y=0)], n_atoms=1, fixed_sdram_value=eight_meg) vertex.splitter = SplitterSliceLegacy() graph.add_vertex(vertex) # add pre-allocated resources for cores on 0,0 pre_allocated_res = PreAllocatedResourceContainer() # run partitioner that should go boom try: partitioner(graph, machine, plan_n_time_steps=None, pre_allocated_resources=pre_allocated_res) except Exception: raise Exception("should have blown up here")
def test_1_chip_pre_allocated_too_much_sdram(self): machine = virtual_machine(width=8, height=8) graph = ApplicationGraph("Test") partitioner = SplitterPartitioner() eight_meg = 8 * 1024 * 1024 # add graph vertices which reside on 0,0 for _ in range(0, 13): vertex = SimpleTestVertex( constraints=[ChipAndCoreConstraint(x=0, y=0)], n_atoms=1, fixed_sdram_value=eight_meg) vertex.splitter = SplitterSliceLegacy() graph.add_vertex(vertex) # add pre-allocated resources for cores on 0,0 twenty_meg = ConstantSDRAM(20 * 1024 * 1024) core_pre = SpecificChipSDRAMResource( chip=machine.get_chip_at(0, 0), sdram_usage=twenty_meg) pre_allocated_res = PreAllocatedResourceContainer( specific_sdram_usage=[core_pre]) # run partitioner that should go boom try: partitioner(graph, machine, plan_n_time_steps=None, pre_allocated_resources=pre_allocated_res) raise Exception("should have blown up here") except PacmanPartitionException: pass except Exception: exc_info = sys.exc_info() six.reraise(*exc_info)
def test_n_cores_available(self): machine = virtual_machine(width=2, height=2, n_cpus_per_chip=18) chip = machine.get_chip_at(0, 0) preallocated_resources = PreAllocatedResourceContainer( specific_core_resources=[ SpecificCoreResource(chip=chip, cores=[1]) ], core_resources=[CoreResource(chip=chip, n_cores=2)]) tracker = ResourceTracker( machine, plan_n_timesteps=None, preallocated_resources=preallocated_resources) # Should be 14 cores = 18 - 1 monitor - 1 specific core - 2 other cores self.assertEqual(tracker._n_cores_available(chip, (0, 0), None), 14) # Should be 0 since the core is already pre allocated self.assertEqual(tracker._n_cores_available(chip, (0, 0), 1), 0) # Should be 1 since the core is not pre allocated self.assertEqual(tracker._n_cores_available(chip, (0, 0), 2), 1) # Should be 0 since the core is monitor self.assertEqual(tracker._n_cores_available(chip, (0, 0), 0), 0) # Allocate a core tracker._allocate_core(chip, (0, 0), 2) # Should be 13 cores as one now allocated self.assertEqual(tracker._n_cores_available(chip, (0, 0), None), 13)
def test_1_chip_pre_allocated_too_much_sdram(self): machine = VirtualMachine(width=8, height=8) graph = ApplicationGraph("Test") partitioner = PartitionAndPlacePartitioner() eight_meg = 8 * 1024 * 1024 # add graph vertices which reside on 0,0 for _ in range(0, 13): graph.add_vertex( SimpleTestVertex(constraints=[ChipAndCoreConstraint(x=0, y=0)], n_atoms=1, fixed_sdram_value=eight_meg)) # add pre-allocated resources for cores on 0,0 twenty_meg = 20 * 1024 * 1024 core_pre = SpecificChipSDRAMResource(chip=machine.get_chip_at(0, 0), sdram_usage=twenty_meg) pre_allocated_res = PreAllocatedResourceContainer( specific_sdram_usage=[core_pre]) # run partitioner that should go boom try: partitioner(graph, machine, pre_allocated_res) raise Exception("should have blown up here") except PacmanPartitionException: pass except Exception: raise Exception("should have blown up here")
def test_partition_with_more_sdram_than_default(self): """ test that the partitioner works when its machine is slightly malformed in that it has more SDRAM available """ n_processors = 18 (e, ne, n, w, _, _) = range(6) links = list() links.append(Link(0, 0, e, 0, 1)) _sdram = SDRAM(128 * (2**21)) links = list() links.append(Link(0, 0, e, 1, 1)) links.append(Link(0, 1, ne, 1, 0)) links.append(Link(1, 1, n, 0, 0)) links.append(Link(1, 0, w, 0, 1)) r = Router(links, False, 1024) ip = "192.162.240.253" chips = list() for x in range(5): for y in range(5): if x == y == 0: chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0, ip)) else: chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0)) self.machine = machine_from_chips(chips) splitter_partitioner(self.graph, self.machine, 3000, PreAllocatedResourceContainer())
def __call__( self, machine, n_samples_per_recording, sampling_frequency, time_scale_factor, machine_time_step, pre_allocated_resources=None): """ :param ~.PreAllocatedResourceContainer pre_allocated_resources: :param ~.Machine machine: :param int n_samples_per_recording: :param int sampling_frequency: :param int time_scale_factor: :param int machine_time_step: :rtype: ~.PreAllocatedResourceContainer """ # pylint: disable=too-many-arguments progress_bar = ProgressBar( machine.n_chips, "Preallocating resources for chip power monitor") # store how much SDRAM the power monitor uses per core resources = ChipPowerMonitorMachineVertex.get_resources( n_samples_per_recording=n_samples_per_recording, sampling_frequency=sampling_frequency, time_scale_factor=time_scale_factor, time_step=machine_time_step) # for every Ethernet connected chip, get the resources needed by the # live packet gatherers sdrams = list() cores = list() for chip in progress_bar.over(machine.chips): sdrams.append( SpecificChipSDRAMResource(chip, resources.sdram)) cores.append(CoreResource(chip, 1)) # create preallocated resource container cpm_pre_allocated_resource_container = PreAllocatedResourceContainer( specific_sdram_usage=sdrams, core_resources=cores) # add other preallocated resources if pre_allocated_resources is not None: cpm_pre_allocated_resource_container.extend( pre_allocated_resources) # return preallocated resources return cpm_pre_allocated_resource_container
def test_partitioning_with_2_massive_pops(self): constrained_vertex = SimpleTestVertex(16000, "Constrained") constrained_vertex.splitter = SplitterSliceLegacy() self.graph.add_vertex(constrained_vertex) constrained_vertex = SimpleTestVertex(16000, "Constrained") constrained_vertex.splitter = SplitterSliceLegacy() self.graph.add_vertex(constrained_vertex) splitter_partitioner(self.graph, self.machine, 3000, PreAllocatedResourceContainer())
def test_partition_with_empty_graph(self): """test that the partitioner can work with an empty graph """ self.graph = ApplicationGraph("foo") graph, _ = splitter_partitioner( self.graph, self.machine, plan_n_time_steps=100, pre_allocated_resources=PreAllocatedResourceContainer()) self.assertEqual(len(list(graph.vertices)), 0)
def test_partition_with_no_additional_constraints_extra_edge(self): """test that the basic form with an extra edge works """ self.graph.add_edge(ApplicationEdge(self.vert3, self.vert1), "TEST") graph, _ = splitter_partitioner( self.graph, self.machine, plan_n_time_steps=100, pre_allocated_resources=PreAllocatedResourceContainer()) self.assertEqual(len(list(graph.vertices)), 3) self.assertEqual(len(list(graph.edges)), 4)
def __call__(self, live_packet_gatherer_parameters, machine, pre_allocated_resources=None): """ :param live_packet_gatherer_parameters: :type live_packet_gatherer_parameters: dict(LivePacketGatherParameters, list(tuple(~.AbstractVertex, list(str)))) :param ~.PreAllocatedResourceContainer pre_allocated_resources: :param ~.Machine machine: :rtype: ~.PreAllocatedResourceContainer """ progress = ProgressBar(len(machine.ethernet_connected_chips), "Preallocating resources for Live Recording") # store how much SDRAM the LPG uses per core sdram_requirement = LPG.get_sdram_usage() # for every Ethernet connected chip, get the resources needed by the # live packet gatherers sdrams = list() cores = list() iptags = list() for chip in progress.over(machine.ethernet_connected_chips): self._add_chip_lpg_reqs(live_packet_gatherer_parameters, chip, sdram_requirement, sdrams, cores, iptags) # create preallocated resource container lpg_prealloc_resource_container = PreAllocatedResourceContainer( specific_sdram_usage=sdrams, core_resources=cores, specific_iptag_resources=iptags) # add other preallocated resources if pre_allocated_resources is not None: lpg_prealloc_resource_container.extend(pre_allocated_resources) # return preallocated resources return lpg_prealloc_resource_container
def __call__(self, live_packet_gatherer_parameters, machine, pre_allocated_resources=None): """ :param live_packet_gatherer_parameters:\ the LPG parameters requested by the script :param previous_allocated_resources: other preallocated resources :param machine: the SpiNNaker machine as discovered :return: preallocated resources """ progress = ProgressBar(len(machine.ethernet_connected_chips), "Preallocating resources for Live Recording") # store how much SDRAM the LPG uses per core lpg_sdram_requirement = LPGVertex.get_sdram_usage() # for every Ethernet connected chip, get the resources needed by the # live packet gatherers sdrams = list() cores = list() iptags = list() for chip in progress.over(machine.ethernet_connected_chips): self._add_chip_lpg_reqs(live_packet_gatherer_parameters, chip, lpg_sdram_requirement, sdrams, cores, iptags) # create pre allocated resource container lpg_prealloc_resource_container = PreAllocatedResourceContainer( specific_sdram_usage=sdrams, core_resources=cores, specific_iptag_resources=iptags) # add other pre allocated resources if pre_allocated_resources is not None: lpg_prealloc_resource_container.extend(pre_allocated_resources) # return pre allocated resources return lpg_prealloc_resource_container
def test_n_cores_available(self): machine = virtual_machine(width=2, height=2, n_cpus_per_chip=18) preallocated_resources = PreAllocatedResourceContainer() preallocated_resources.add_cores_all(2) preallocated_resources.add_cores_ethernet(3) tracker = ResourceTracker( machine, plan_n_timesteps=None, preallocated_resources=preallocated_resources) # Should be 15 cores = 18 - 1 Monitor -3 ethernet -2 all cores self.assertEqual(tracker._get_core_tracker(0, 0).n_cores_available, 12) # Should be 15 cores = 18 -2 other cores self.assertEqual(tracker._get_core_tracker(0, 1).n_cores_available, 15) # Should be True since the core is not pre allocated self.assertTrue(tracker._get_core_tracker(0, 0).is_core_available(2)) # Should be False since the core is monitor self.assertFalse(tracker._get_core_tracker(0, 0).is_core_available(0)) # Allocate a core tracker._get_core_tracker(0, 0).allocate(2) # Should be 11 cores as one now allocated self.assertEqual(tracker._get_core_tracker(0, 0).n_cores_available, 11) with self.assertRaises(PacmanInvalidParameterException): tracker._get_core_tracker(2, 2)
def __call__(self, machine, pre_allocated_resources=None, n_cores_to_allocate=1): """ :param ~.Machine machine: :param ~.PreAllocatedResourceContainer pre_allocated_resources: :param int n_cores_to_allocate: :rtype: ~.PreAllocatedResourceContainer """ progress = ProgressBar( len(list(machine.ethernet_connected_chips)) + machine.n_chips, "Preallocating resources for Extra Monitor support vertices") sdrams = list() cores = list() tags = list() # add resource requirements for the gatherers on each Ethernet # connected chip. part of data extraction self._handle_packet_gathering_support(sdrams, cores, tags, machine, progress, n_cores_to_allocate) # add resource requirements for re-injector and reader for data # extractor self._handle_second_monitor_support(cores, sdrams, machine, progress) # create pre allocated resource container extra_monitor_pre_allocations = PreAllocatedResourceContainer( specific_sdram_usage=sdrams, core_resources=cores, specific_iptag_resources=tags) # add other pre allocated resources if pre_allocated_resources is not None: extra_monitor_pre_allocations.extend(pre_allocated_resources) # return pre allocated resources return extra_monitor_pre_allocations
def test_operation_with_same_size_as_vertex_constraint_exception(self): """ test that a partition same as constraint with different size atoms causes errors """ with self.assertRaises(NotImplementedError): constrained_vertex = SimpleTestVertex(100, "Constrained") constrained_vertex.add_constraint( SameAtomsAsVertexConstraint(self.vert2)) constrained_vertex.splitter_object = SplitterSliceLegacy() self.graph.add_vertex(constrained_vertex) self.assertRaises(PacmanPartitionException, splitter_partitioner, self.graph, self.machine, 1000, PreAllocatedResourceContainer())
def __call__( self, machine, pre_allocated_resources=None, n_cores_to_allocate=1): """ :param machine: SpiNNaker machine object :param pre_allocated_resources: resources already pre allocated :param n_cores_to_allocate: config params for how many gatherers to use """ progress = ProgressBar( len(list(machine.ethernet_connected_chips)) + machine.n_chips, "Pre allocating resources for Extra Monitor support vertices") sdrams = list() cores = list() tags = list() # add resource requirements for the gatherers on each Ethernet # connected chip. part of data extraction self._handle_packet_gathering_support( sdrams, cores, tags, machine, progress, n_cores_to_allocate) # add resource requirements for re-injector and reader for data # extractor self._handle_second_monitor_support(cores, sdrams, machine, progress) # create pre allocated resource container extra_monitor_pre_allocations = PreAllocatedResourceContainer( specific_sdram_usage=sdrams, core_resources=cores, specific_iptag_resources=tags) # add other pre allocated resources if pre_allocated_resources is not None: extra_monitor_pre_allocations.extend(pre_allocated_resources) # return pre allocated resources return extra_monitor_pre_allocations
def test_partition_on_large_vertex_than_has_to_be_split(self): """ test that partitioning 1 large vertex can make it into 2 small ones """ large_vertex = SimpleTestVertex(300, "Large vertex") large_vertex.splitter = SplitterSliceLegacy() self.graph = ApplicationGraph("Graph with large vertex") self.graph.add_vertex(large_vertex) graph, _ = splitter_partitioner( self.graph, self.machine, plan_n_time_steps=100, pre_allocated_resources=PreAllocatedResourceContainer()) self.assertEqual(large_vertex._model_based_max_atoms_per_core, 256) self.assertGreater(len(list(graph.vertices)), 1)
def test_partition_with_no_additional_constraints(self): """test a partitioning with a graph with no extra constraints """ graph, _ = splitter_partitioner( self.graph, self.machine, plan_n_time_steps=100, pre_allocated_resources=PreAllocatedResourceContainer()) self.assertEqual(len(list(graph.vertices)), 3) vert_sizes = [] for vert in self.verts: vert_sizes.append(vert.n_atoms) self.assertEqual(len(list(graph.edges)), 3) for vertex in graph.vertices: self.assertIn(vertex.vertex_slice.n_atoms, vert_sizes)
def __call__(self, machine, sdram_to_pre_alloc_for_bit_fields, pre_allocated_resources=None): """ :param ~.PreAllocatedResourceContainer pre_allocated_resources: :param int sdram_to_pre_alloc_for_bit_fields: :param ~.Machine machine: :rtype: ~.PreAllocatedResourceContainer """ progress_bar = ProgressBar( machine.n_chips, "Preallocating resources for bit field compressor") # for every Ethernet connected chip, get the resources needed by the # live packet gatherers sdrams = list() for chip in progress_bar.over(machine.chips): sdrams.append( SpecificChipSDRAMResource( chip, (SIZE_OF_SDRAM_ADDRESS_IN_BYTES * chip.n_user_processors) + sdram_to_pre_alloc_for_bit_fields)) # create preallocated resource container resource_container = PreAllocatedResourceContainer( specific_sdram_usage=sdrams) # add other preallocated resources if pre_allocated_resources is not None: resource_container.extend(pre_allocated_resources) # return preallocated resources return resource_container
def test_partition_on_target_size_vertex_than_has_to_be_split(self): """ test that fixed partitioning causes correct number of vertices """ large_vertex = SimpleTestVertex(1000, "Large vertex") large_vertex.add_constraint(MaxVertexAtomsConstraint(10)) large_vertex.splitter = SplitterSliceLegacy() self.graph = ApplicationGraph("Graph with large vertex") self.graph.add_vertex(large_vertex) graph, _ = splitter_partitioner( self.graph, self.machine, plan_n_time_steps=100, pre_allocated_resources=PreAllocatedResourceContainer()) self.assertEqual(len(list(graph.vertices)), 100)
def test_partition_with_barely_sufficient_space(self): """ test that partitioning will work when close to filling the machine """ self.setup() n_processors = 18 (e, ne, n, w, _, _) = range(6) links = list() links.append(Link(0, 0, e, 0, 1)) _sdram = SDRAM(2**12) links = list() links.append(Link(0, 0, e, 1, 1)) links.append(Link(0, 1, ne, 1, 0)) links.append(Link(1, 1, n, 0, 0)) links.append(Link(1, 0, w, 0, 1)) r = Router(links, False, 1024) ip = "192.162.240.253" chips = list() for x in range(5): for y in range(5): if x == y == 0: chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0, ip)) else: chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0)) self.machine = machine_from_chips(chips) n_neurons = 17 * 5 * 5 singular_vertex = SimpleTestVertex(n_neurons, "Large vertex", max_atoms_per_core=1) singular_vertex.splitter = SplitterSliceLegacy() self.assertEqual(singular_vertex._model_based_max_atoms_per_core, 1) self.graph = ApplicationGraph("Graph with large vertex") self.graph.add_vertex(singular_vertex) graph, _ = self.bp( self.graph, self.machine, plan_n_time_steps=100, pre_allocated_resources=PreAllocatedResourceContainer()) self.assertEqual(singular_vertex._model_based_max_atoms_per_core, 1) self.assertEqual(len(list(graph.vertices)), n_neurons)
def test_operation_with_same_size_as_vertex_constraint(self): """ test that the partition and place partitioner can handle same size as constraints on a vertex that is split into one core """ with self.assertRaises(NotImplementedError): constrained_vertex = SimpleTestVertex(5, "Constrained") constrained_vertex.add_constraint( SameAtomsAsVertexConstraint(self.vert2)) constrained_vertex.splitter_object = SplitterSliceLegacy() self.graph.add_vertex(constrained_vertex) graph, _ = splitter_partitioner( self.graph, self.machine, plan_n_time_steps=100, pre_allocated_resources=PreAllocatedResourceContainer()) self.assertEqual(len(list(graph.vertices)), 4)
def test_partition_with_insufficient_space(self): """ test that if there's not enough space, the test the partitioner will raise an error """ self.setup() n_processors = 18 (e, ne, n, w, _, _) = range(6) links = list() links.append(Link(0, 0, e, 0, 1)) _sdram = SDRAM(2**11) links = list() links.append(Link(0, 0, e, 1, 1)) links.append(Link(0, 1, ne, 1, 0)) links.append(Link(1, 1, n, 0, 0)) links.append(Link(1, 0, w, 0, 1)) r = Router(links, False, 1024) ip = "192.162.240.253" chips = list() for x in range(5): for y in range(5): if x == y == 0: chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0, ip)) else: chips.append(Chip(x, y, n_processors, r, _sdram, 0, 0)) self.machine = machine_from_chips(chips) large_vertex = SimpleTestVertex(3000, "Large vertex", max_atoms_per_core=1) large_vertex.splitter = SplitterSliceLegacy() self.assertEqual(large_vertex._model_based_max_atoms_per_core, 1) self.graph = ApplicationGraph("Graph with large vertex") self.graph.add_vertex(large_vertex) with self.assertRaises(PacmanValueError): self.bp(self.graph, self.machine, 3000, PreAllocatedResourceContainer())
def test_operation_with_same_size_as_vertex_constraint_large_vertices( self): """ test that the partition and place partitioner can handle same size as constraints on a vertex which has to be split over many cores """ with self.assertRaises(NotImplementedError): constrained_vertex = SimpleTestVertex(300, "Constrained") new_large_vertex = SimpleTestVertex(300, "Non constrained") constrained_vertex.add_constraint( SameAtomsAsVertexConstraint(new_large_vertex)) new_large_vertex.splitter_object = SplitterSliceLegacy() constrained_vertex.splitter_object = SplitterSliceLegacy() self.graph.add_vertices([new_large_vertex, constrained_vertex]) graph, _ = splitter_partitioner( self.graph, self.machine, plan_n_time_steps=100, pre_allocated_resources=PreAllocatedResourceContainer()) self.assertEqual(len(list(graph.vertices)), 7)
def test_operation_same_size_as_vertex_constraint_different_order(self): """ test that the partition and place partitioner can handle same size as constraints on a vertex which has to be split over many cores where the order of the vertices being added is different. """ with self.assertRaises(NotImplementedError): self.setup() constrained_vertex = SimpleTestVertex(300, "Constrained") new_large_vertex = SimpleTestVertex(300, "Non constrained") constrained_vertex.add_constraint( SameAtomsAsVertexConstraint(new_large_vertex)) constrained_vertex.splitter_object = SplitterSliceLegacy() new_large_vertex.splitter_object = SplitterSliceLegacy() self.graph.add_vertices([constrained_vertex, new_large_vertex]) graph, _ = self.bp( self.graph, self.machine, plan_n_time_steps=100, pre_allocated_resources=PreAllocatedResourceContainer()) # split in 256 each, so 4 machine vertices self.assertEqual(len(list(graph.vertices)), 7)
def test_added_pre_res(self): machine = virtual_machine(width=12, height=12, with_wrap_arounds=True) default_params = { 'use_prefix': False, 'key_prefix': None, 'prefix_type': None, 'message_type': EIEIOType.KEY_32_BIT, 'right_shift': 0, 'payload_as_time_stamps': True, 'use_payload_prefix': True, 'payload_prefix': None, 'payload_right_shift': 0, 'number_of_packets_sent_per_time_step': 0, 'hostname': None, 'port': None, 'strip_sdp': None, 'board_address': None, 'tag': None} # data stores needed by algorithm live_packet_gatherers = dict() extended = dict(default_params) extended.update({'partition_id': "EVENTS"}) default_params_holder = LivePacketGatherParameters(**extended) live_packet_gatherers[default_params_holder] = list() # create pre res sdram_requirements = {machine.get_chip_at(2, 2): 30000, machine.get_chip_at(7, 7): 50000} core_requirements = {machine.get_chip_at(3, 3): 2} sdrams = list() cores = list() for chip in sdram_requirements: sdrams.append(SpecificChipSDRAMResource( chip, ConstantSDRAM(sdram_requirements[chip]))) for chip in core_requirements: cores.append(CoreResource(chip, core_requirements[chip])) pre_pre_res = PreAllocatedResourceContainer( core_resources=cores, specific_sdram_usage=sdrams) # run pre allocator pre_alloc = PreAllocateResourcesForLivePacketGatherers() pre_res = pre_alloc( live_packet_gatherer_parameters=live_packet_gatherers, machine=machine, pre_allocated_resources=pre_pre_res) locs = list() locs.append((0, 0)) locs.append((4, 8)) locs.append((8, 4)) locs.append((2, 2)) locs.append((7, 7)) # verify sdram sdrams = pre_res.specific_sdram_usage for sdram in sdrams: locs.remove((sdram.chip.x, sdram.chip.y)) if sdram.sdram_usage.get_total_sdram(0) != \ LivePacketGatherMachineVertex.get_sdram_usage(): self.assertIn(sdram.chip.x, (2, 7)) self.assertIn(sdram.chip.y, (2, 7)) self.assertEqual(sdram.chip.x, sdram.chip.y) if sdram.chip.x == 2 and sdram.chip.y == 2: self.assertEqual(sdram.sdram_usage.get_total_sdram(0), 30000) elif sdram.chip.x == 7 and sdram.chip.y == 7: self.assertEqual(sdram.sdram_usage.get_total_sdram(0), 50000) self.assertEqual(len(locs), 0) locs = list() locs.append((0, 0)) locs.append((4, 8)) locs.append((8, 4)) locs.append((3, 3)) # verify cores cores = pre_res.core_resources for core in cores: locs.remove((core.chip.x, core.chip.y)) if core.n_cores != 1: self.assertEqual(core.chip.x, 3) self.assertEqual(core.chip.y, 3) self.assertEqual(core.n_cores, 2) self.assertEqual(len(locs), 0) # verify specific cores self.assertEqual(len(pre_res.specific_core_resources), 0)