def setUp(self): self.machine = virtual_machine(8, 8) self.mach_graph = MachineGraph("machine") self.vertices = list() self.vertex1 = T_MachineVertex( 0, 1, get_resources_used_by_atoms(0, 1, []), "First vertex") self.vertex2 = T_MachineVertex( 1, 5, get_resources_used_by_atoms(1, 5, []), "Second vertex") self.vertex3 = T_MachineVertex( 5, 10, get_resources_used_by_atoms(5, 10, []), "Third vertex") self.vertex4 = T_MachineVertex( 10, 100, get_resources_used_by_atoms(10, 100, []), "Fourth vertex") self.vertices.append(self.vertex1) self.mach_graph.add_vertex(self.vertex1) self.vertices.append(self.vertex2) self.mach_graph.add_vertex(self.vertex2) self.vertices.append(self.vertex3) self.mach_graph.add_vertex(self.vertex3) self.vertices.append(self.vertex4) self.mach_graph.add_vertex(self.vertex4) self.edges = list() self.plan_n_timesteps = 100
def test_partition_with_fixed_atom_constraints(self): """ test a partitioning with a graph with fixed atom constraint """ # Create a 2x2 machine with 10 cores per chip (so 40 cores), # but 1MB off 2MB per chip (so 19MB per chip) n_cores_per_chip = 10 sdram_per_chip = (n_cores_per_chip * 2) - 1 machine = virtual_machine( width=2, height=2, n_cpus_per_chip=n_cores_per_chip, sdram_per_chip=sdram_per_chip) # Create a vertex where each atom requires 1MB (default) of SDRAM # but which can't be subdivided lower than 2 atoms per core. # The vertex has 1 atom per MB of SDRAM, and so would fit but will # be disallowed by the fixed atoms per core constraint vertex = SimpleTestVertex( sdram_per_chip * machine.n_chips, max_atoms_per_core=2, constraints=[FixedVertexAtomsConstraint(2)]) vertex.splitter = SplitterSliceLegacy() app_graph = ApplicationGraph("Test") app_graph.add_vertex(vertex) # Do the partitioning - this should result in an error with self.assertRaises(PacmanValueError): partitioner = SplitterPartitioner() partitioner(app_graph, machine, 3000)
def test_virtual_placement(placer): machine = virtual_machine(width=8, height=8) graph = MachineGraph("Test") virtual_vertex = MachineSpiNNakerLinkVertex(spinnaker_link_id=0) graph.add_vertex(virtual_vertex) extended_machine = MallocBasedChipIdAllocator()(machine, graph) n_keys_map = DictBasedMachinePartitionNKeysMap() inputs = { "MemoryExtendedMachine": machine, "MemoryMachine": machine, "MemoryMachineGraph": graph, "PlanNTimeSteps": 1000, "MemoryMachinePartitionNKeysMap": n_keys_map } algorithms = [placer] xml_paths = [] executor = PACMANAlgorithmExecutor(algorithms, [], inputs, [], [], [], xml_paths) executor.execute_mapping() placements = executor.get_item("MemoryPlacements") placement = placements.get_placement_of_vertex(virtual_vertex) chip = extended_machine.get_chip_at(placement.x, placement.y) assert chip.virtual
def test_ip_tags(self): machine = virtual_machine(12, 12) eth_chips = machine.ethernet_connected_chips vertices = [ SimpleMachineVertex(ResourceContainer( iptags=[IPtagResource("127.0.0.1", port=None, strip_sdp=True) ]), label="Vertex {}".format(i)) for i in range(len(eth_chips)) ] print("Created {} vertices".format(len(vertices))) placements = Placements( Placement(vertex, chip.x, chip.y, 1) for vertex, chip in zip(vertices, eth_chips)) allocator = BasicTagAllocator() _, _, tags = allocator(machine, plan_n_timesteps=None, placements=placements) for vertex, chip in zip(vertices, eth_chips): iptags = tags.get_ip_tags_for_vertex(vertex) self.assertEqual(len(iptags), 1, "Incorrect number of tags assigned") self.assertEqual(iptags[0].destination_x, chip.x, "Destination of tag incorrect") self.assertEqual(iptags[0].destination_y, chip.y, "Destination of tag incorrect") placement = placements.get_placement_of_vertex(vertex) print(placement, "has tag", iptags[0])
def test_ner_route_default(): unittest_setup() graph = MachineGraph("Test") machine = virtual_machine(8, 8) placements = Placements() source_vertex = SimpleMachineVertex(None) graph.add_vertex(source_vertex) placements.add_placement(Placement(source_vertex, 0, 0, 1)) target_vertex = SimpleMachineVertex(None) graph.add_vertex(target_vertex) placements.add_placement(Placement(target_vertex, 0, 2, 1)) edge = MachineEdge(source_vertex, target_vertex) graph.add_edge(edge, "Test") partition = graph.get_outgoing_partition_for_edge(edge) routes = ner_route(graph, machine, placements) source_route = routes.get_entries_for_router(0, 0)[partition] assert (not source_route.defaultable) mid_route = routes.get_entries_for_router(0, 1)[partition] print(mid_route.incoming_link, mid_route.link_ids) assert (mid_route.defaultable) end_route = routes.get_entries_for_router(0, 2)[partition] assert (not end_route.defaultable)
def setUp(self): unittest_setup() self.machine = virtual_machine(8, 8) self.mach_graph = MachineGraph("machine") self.vertices = list() self.vertex1 = get_resourced_machine_vertex(0, 1, "First vertex") self.vertex2 = get_resourced_machine_vertex(1, 5, "Second vertex") self.vertex3 = get_resourced_machine_vertex(5, 10, "Third vertex") self.vertex4 = get_resourced_machine_vertex(10, 100, "Fourth vertex") self.vertices.append(self.vertex1) self.mach_graph.add_vertex(self.vertex1) self.vertices.append(self.vertex2) self.mach_graph.add_vertex(self.vertex2) self.vertices.append(self.vertex3) self.mach_graph.add_vertex(self.vertex3) self.vertices.append(self.vertex4) self.mach_graph.add_vertex(self.vertex4) self.edges = list() edge1 = MachineEdge(self.vertex2, self.vertex3) self.edges.append(edge1) self.mach_graph.add_edge(edge1, "packet") edge2 = MachineEdge(self.vertex2, self.vertex4) self.edges.append(edge2) self.mach_graph.add_edge(edge2, "packet") edge3 = MachineEdge(self.vertex3, self.vertex4) self.edges.append(edge3) self.mach_graph.add_edge(edge3, "packet") edge4 = MachineEdge(self.vertex3, self.vertex1) self.edges.append(edge4) self.plan_n_timesteps = 100
def test_one_lpg_params(self): machine = virtual_machine(width=12, height=12, with_wrap_arounds=True) default_params = { 'use_prefix': False, 'key_prefix': None, 'prefix_type': None, 'message_type': EIEIOType.KEY_32_BIT, 'right_shift': 0, 'payload_as_time_stamps': True, 'use_payload_prefix': True, 'payload_prefix': None, 'payload_right_shift': 0, 'number_of_packets_sent_per_time_step': 0, 'hostname': None, 'port': None, 'strip_sdp': None, 'board_address': None, 'tag': None} # data stores needed by algorithm live_packet_gatherers = dict() extended = dict(default_params) extended.update({'partition_id': "EVENTS"}) default_params_holder = LivePacketGatherParameters(**extended) live_packet_gatherers[default_params_holder] = list() # run pre allocator pre_alloc = PreAllocateResourcesForLivePacketGatherers() pre_res = pre_alloc( live_packet_gatherer_parameters=live_packet_gatherers, machine=machine) locs = list() locs.append((0, 0)) locs.append((4, 8)) locs.append((8, 4)) # verify sdram sdrams = pre_res.specific_sdram_usage for sdram in sdrams: locs.remove((sdram.chip.x, sdram.chip.y)) self.assertEqual( sdram.sdram_usage.get_total_sdram(0), LivePacketGatherMachineVertex.get_sdram_usage()) self.assertEqual(len(locs), 0) locs = list() locs.append((0, 0)) locs.append((4, 8)) locs.append((8, 4)) # verify cores cores = pre_res.core_resources for core in cores: locs.remove((core.chip.x, core.chip.y)) self.assertEqual(core.n_cores, 1) self.assertEqual(len(locs), 0) # verify specific cores self.assertEqual(len(pre_res.specific_core_resources), 0)
def test_n_cores_available(self): machine = virtual_machine(width=2, height=2, n_cpus_per_chip=18) preallocated_resources = PreAllocatedResourceContainer() preallocated_resources.add_cores_all(2) preallocated_resources.add_cores_ethernet(3) tracker = ResourceTracker( machine, plan_n_timesteps=None, preallocated_resources=preallocated_resources) # Should be 15 cores = 18 - 1 Monitor -3 ethernet -2 all cores self.assertEqual(tracker._get_core_tracker(0, 0).n_cores_available, 12) # Should be 15 cores = 18 -2 other cores self.assertEqual(tracker._get_core_tracker(0, 1).n_cores_available, 15) # Should be True since the core is not pre allocated self.assertTrue(tracker._get_core_tracker(0, 0).is_core_available(2)) # Should be False since the core is monitor self.assertFalse(tracker._get_core_tracker(0, 0).is_core_available(0)) # Allocate a core tracker._get_core_tracker(0, 0).allocate(2) # Should be 11 cores as one now allocated self.assertEqual(tracker._get_core_tracker(0, 0).n_cores_available, 11) with self.assertRaises(PacmanInvalidParameterException): tracker._get_core_tracker(2, 2)
def test_deallocation_of_resources(self): machine = virtual_machine(width=2, height=2, n_cpus_per_chip=18) chip_sdram = machine.get_chip_at(1, 1).sdram.size res_sdram = 12345 tracker = ResourceTracker(machine, plan_n_timesteps=None, preallocated_resources=None) sdram_res = ConstantSDRAM(res_sdram) resources = ResourceContainer(sdram=sdram_res) chip_0 = machine.get_chip_at(0, 0) # verify core tracker is empty if (0, 0) in tracker._core_tracker: raise Exception("shouldnt exist") tracker._get_core_tracker(1, 1) # verify core tracker not empty if (1, 1) not in tracker._core_tracker: raise Exception("should exist") # verify sdram tracker # 0, 0 in _sdram_tracker due to the get_core_tracker(0, 0) call if tracker._sdram_tracker[1, 1] != chip_sdram: raise Exception("incorrect sdram of {}".format( tracker._sdram_tracker[1, 1])) # allocate some res chip_x, chip_y, processor_id, ip_tags, reverse_ip_tags = \ tracker.allocate_resources(resources, [(0, 0)]) # verify chips used is updated cores = list(tracker._core_tracker[(0, 0)]._cores) self.assertEqual(len(cores), chip_0.n_user_processors - 1) # verify sdram used is updated sdram = tracker._sdram_tracker[(0, 0)] self.assertEqual(sdram, chip_sdram - res_sdram) if (0, 0) not in tracker._chips_used: raise Exception("should exist") # deallocate res tracker.unallocate_resources(chip_x, chip_y, processor_id, resources, ip_tags, reverse_ip_tags) # verify chips used is updated if tracker._core_tracker[(0, 0)].n_cores_available != \ chip_0.n_user_processors: raise Exception("shouldn't exist or should be right size") # if (0, 0) in tracker._chips_used: # raise Exception("shouldnt exist") # verify sdram tracker if tracker._sdram_tracker[0, 0] != chip_sdram: raise Exception("incorrect sdram of {}".format( tracker._sdram_tracker[0, 0]))
def test_1_chip_pre_allocated_same_core(self): machine = virtual_machine(width=8, height=8) graph = ApplicationGraph("Test") partitioner = SplitterPartitioner() # add graph vertices which reside on 0,0 for p in range(0, 13): vertex = SimpleTestVertex( constraints=[ChipAndCoreConstraint(x=0, y=0, p=p)], n_atoms=1) vertex.splitter = SplitterSliceLegacy() graph.add_vertex(vertex) # add pre-allocated resources for cores on 0,0 core_pre = SpecificCoreResource( chip=machine.get_chip_at(0, 0), cores=[4]) pre_allocated_res = PreAllocatedResourceContainer( specific_core_resources=[core_pre]) # run partitioner that should go boom try: partitioner(graph, machine, plan_n_time_steps=None, pre_allocated_resources=pre_allocated_res) raise Exception("should have blown up here") except PacmanValueError: pass except Exception: raise Exception("should have blown up here")
def test_1_chip_no_pre_allocated_too_much_sdram(self): machine = virtual_machine(width=8, height=8) graph = ApplicationGraph("Test") partitioner = SplitterPartitioner() eight_meg = 8 * 1024 * 1024 # add graph vertices which reside on 0,0 for _ in range(0, 13): vertex = SimpleTestVertex( constraints=[ChipAndCoreConstraint(x=0, y=0)], n_atoms=1, fixed_sdram_value=eight_meg) vertex.splitter = SplitterSliceLegacy() graph.add_vertex(vertex) # add pre-allocated resources for cores on 0,0 pre_allocated_res = PreAllocatedResourceContainer() # run partitioner that should go boom try: partitioner(graph, machine, plan_n_time_steps=None, pre_allocated_resources=pre_allocated_res) except Exception: raise Exception("should have blown up here")
def test_1_chip_pre_allocated_too_much_sdram(self): machine = virtual_machine(width=8, height=8) graph = ApplicationGraph("Test") partitioner = SplitterPartitioner() eight_meg = 8 * 1024 * 1024 # add graph vertices which reside on 0,0 for _ in range(0, 13): vertex = SimpleTestVertex( constraints=[ChipAndCoreConstraint(x=0, y=0)], n_atoms=1, fixed_sdram_value=eight_meg) vertex.splitter = SplitterSliceLegacy() graph.add_vertex(vertex) # add pre-allocated resources for cores on 0,0 twenty_meg = ConstantSDRAM(20 * 1024 * 1024) core_pre = SpecificChipSDRAMResource( chip=machine.get_chip_at(0, 0), sdram_usage=twenty_meg) pre_allocated_res = PreAllocatedResourceContainer( specific_sdram_usage=[core_pre]) # run partitioner that should go boom try: partitioner(graph, machine, plan_n_time_steps=None, pre_allocated_resources=pre_allocated_res) raise Exception("should have blown up here") except PacmanPartitionException: pass except Exception: exc_info = sys.exc_info() six.reraise(*exc_info)
def test_n_cores_available(self): machine = virtual_machine(width=2, height=2, n_cpus_per_chip=18) chip = machine.get_chip_at(0, 0) preallocated_resources = PreAllocatedResourceContainer( specific_core_resources=[ SpecificCoreResource(chip=chip, cores=[1]) ], core_resources=[CoreResource(chip=chip, n_cores=2)]) tracker = ResourceTracker( machine, plan_n_timesteps=None, preallocated_resources=preallocated_resources) # Should be 14 cores = 18 - 1 monitor - 1 specific core - 2 other cores self.assertEqual(tracker._n_cores_available(chip, (0, 0), None), 14) # Should be 0 since the core is already pre allocated self.assertEqual(tracker._n_cores_available(chip, (0, 0), 1), 0) # Should be 1 since the core is not pre allocated self.assertEqual(tracker._n_cores_available(chip, (0, 0), 2), 1) # Should be 0 since the core is monitor self.assertEqual(tracker._n_cores_available(chip, (0, 0), 0), 0) # Allocate a core tracker._allocate_core(chip, (0, 0), 2) # Should be 13 cores as one now allocated self.assertEqual(tracker._n_cores_available(chip, (0, 0), None), 13)
def test_partition_with_fixed_atom_constraints_at_limit(self): """ test a partitioning with a graph with fixed atom constraint which\ should fit but is close to the limit """ # Create a 2x2 machine with 1 core per chip (so 4 cores), # and 8MB SDRAM per chip n_cores_per_chip = 2 # Remember 1 core is the monitor sdram_per_chip = 8 machine = virtual_machine( width=2, height=2, n_cpus_per_chip=n_cores_per_chip, sdram_per_chip=sdram_per_chip) # Create a vertex which will need to be split perfectly into 4 cores # to work and which max atoms per core must be ignored vertex = SimpleTestVertex( sdram_per_chip * 2, max_atoms_per_core=sdram_per_chip, constraints=[FixedVertexAtomsConstraint(sdram_per_chip // 2)]) vertex.splitter = SplitterSliceLegacy() app_graph = ApplicationGraph("Test") app_graph.add_vertex(vertex) # Do the partitioning - this should just work partitioner = SplitterPartitioner() machine_graph, _ = partitioner(app_graph, machine, 3000) self.assertEqual(4, len(machine_graph.vertices))
def test_one_lpg_params_and_3_specific(self): machine = virtual_machine(width=12, height=12) default_params = { 'use_prefix': False, 'key_prefix': None, 'prefix_type': None, 'message_type': EIEIOType.KEY_32_BIT, 'right_shift': 0, 'payload_as_time_stamps': True, 'use_payload_prefix': True, 'payload_prefix': None, 'payload_right_shift': 0, 'number_of_packets_sent_per_time_step': 0, 'hostname': None, 'port': None, 'strip_sdp': None, 'tag': None, 'label': "Test" } # data stores needed by algorithm live_packet_gatherers = dict() default_params_holder = LivePacketGatherParameters(**default_params) live_packet_gatherers[default_params_holder] = list() # and special LPG on Ethernet connected chips for chip in machine.ethernet_connected_chips: extended = dict(default_params) extended['board_address'] = chip.ip_address default_params_holder2 = LivePacketGatherParameters(**extended) live_packet_gatherers[default_params_holder2] = list() pre_alloc = PreAllocateResourcesForLivePacketGatherers() pre_res = pre_alloc( live_packet_gatherer_parameters=live_packet_gatherers, machine=machine) # verify sdram locs = [(0, 0), (4, 8), (8, 4)] sdrams = pre_res.specific_sdram_usage for sdram in sdrams: locs.remove((sdram.chip.x, sdram.chip.y)) self.assertEqual( sdram.sdram_usage.get_total_sdram(0), LivePacketGatherMachineVertex.get_sdram_usage() * 2) self.assertEqual(len(locs), 0) # verify cores locs = {(0, 0): 0, (4, 8): 0, (8, 4): 0} cores = pre_res.core_resources for core in cores: locs[core.chip.x, core.chip.y] += core.n_cores for (x, y) in [(0, 0), (4, 8), (8, 4)]: self.assertEqual(locs[x, y], 2) # verify specific cores self.assertEqual(len(pre_res.specific_core_resources), 0)
def test_fail(self): machine = virtual_machine(width=12, height=12, with_wrap_arounds=True) live_packet_gatherers = dict() pre_alloc = PreAllocateResourcesForLivePacketGatherers() self.assertRaises( Exception, pre_alloc( live_packet_gatherer_parameters=live_packet_gatherers, machine=machine))
def test_that_3_lpgs_are_generated_on_3_board_app_graph(self): machine = virtual_machine(width=12, height=12) app_graph = ApplicationGraph("Test") graph = MachineGraph("Test", app_graph) default_params = { 'use_prefix': False, 'key_prefix': None, 'prefix_type': None, 'message_type': EIEIOType.KEY_32_BIT, 'right_shift': 0, 'payload_as_time_stamps': True, 'use_payload_prefix': True, 'payload_prefix': None, 'payload_right_shift': 0, 'number_of_packets_sent_per_time_step': 0, 'hostname': None, 'port': None, 'strip_sdp': None, 'tag': None, 'label': "Test"} # data stores needed by algorithm live_packet_gatherers = dict() default_params_holder = LivePacketGatherParameters(**default_params) live_packet_gatherers[default_params_holder] = list() # run edge inserter that should go boom edge_inserter = InsertLivePacketGatherersToGraphs() lpg_verts_mapping = edge_inserter( live_packet_gatherer_parameters=live_packet_gatherers, machine=machine, machine_graph=graph, application_graph=app_graph) self.assertEqual(len(lpg_verts_mapping[default_params_holder]), 3) locs = list() locs.append((0, 0)) locs.append((4, 8)) locs.append((8, 4)) for vertex in itervalues(lpg_verts_mapping[default_params_holder]): x = list(vertex.constraints)[0].x y = list(vertex.constraints)[0].y key = (x, y) locs.remove(key) self.assertEqual(len(locs), 0) verts = lpg_verts_mapping[default_params_holder].values() for vertex in graph.vertices: self.assertIn(vertex, verts) app_verts = set() for vertex in itervalues(lpg_verts_mapping[default_params_holder]): app_vertex = vertex.app_vertex self.assertNotEqual(app_vertex, None) self.assertIsInstance(app_vertex, ApplicationVertex) app_verts.add(app_vertex) self.assertEqual(len(app_verts), 3)
def test_none(self): machine = virtual_machine(width=12, height=12, with_wrap_arounds=True) live_packet_gatherers = dict() # run pre allocator pre_alloc = PreAllocateResourcesForLivePacketGatherers() pre_res = pre_alloc( live_packet_gatherer_parameters=live_packet_gatherers, machine=machine) self.assertEqual(len(pre_res.specific_core_resources), 0) self.assertEqual(len(pre_res.core_resources), 0) self.assertEqual(len(pre_res.specific_sdram_usage), 0)
def _do_test(self, placer): machine = virtual_machine(width=8, height=8) graph = MachineGraph("Test") vertices = [ SimpleMachineVertex(ResourceContainer(), label="v{}".format(i)) for i in range(100) ] for vertex in vertices: graph.add_vertex(vertex) same_vertices = [ SimpleMachineVertex(ResourceContainer(), label="same{}".format(i)) for i in range(10) ] random.seed(12345) for vertex in same_vertices: graph.add_vertex(vertex) for _i in range(0, random.randint(1, 5)): vertex.add_constraint( SameChipAsConstraint( vertices[random.randint(0, 99)])) n_keys_map = DictBasedMachinePartitionNKeysMap() inputs = { "MemoryExtendedMachine": machine, "MemoryMachine": machine, "MemoryMachineGraph": graph, "PlanNTimeSteps": None, "MemoryMachinePartitionNKeysMap": n_keys_map } algorithms = [placer] xml_paths = [] executor = PACMANAlgorithmExecutor( algorithms, [], inputs, [], [], [], xml_paths) executor.execute_mapping() placements = executor.get_item("MemoryPlacements") for same in same_vertices: print("{0.vertex.label}, {0.x}, {0.y}, {0.p}: {1}".format( placements.get_placement_of_vertex(same), ["{0.vertex.label}, {0.x}, {0.y}, {0.p}".format( placements.get_placement_of_vertex(constraint.vertex)) for constraint in same.constraints])) placement = placements.get_placement_of_vertex(same) for constraint in same.constraints: if isinstance(constraint, SameChipAsConstraint): other_placement = placements.get_placement_of_vertex( constraint.vertex) self.assertTrue( other_placement.x == placement.x and other_placement.y == placement.y, "Vertex was not placed on the same chip as requested")
def _do_test(self, placer): machine = virtual_machine(width=8, height=8) graph = MachineGraph("Test") plan_n_timesteps = 100 vertices = [ SimpleMachineVertex(ResourceContainer(), label="v{}".format(i), sdram_cost=20) for i in range(100) ] for vertex in vertices: graph.add_vertex(vertex) same_vertices = [ SimpleMachineVertex(ResourceContainer(), label="same{}".format(i), sdram_cost=20) for i in range(10) ] random.seed(12345) sdram_edges = list() for vertex in same_vertices: graph.add_vertex(vertex) graph.add_outgoing_edge_partition( ConstantSDRAMMachinePartition(identifier="Test", pre_vertex=vertex, label="bacon")) for _i in range(0, random.randint(1, 5)): sdram_edge = SDRAMMachineEdge(vertex, vertices[random.randint(0, 99)], label="bacon", app_edge=None) sdram_edges.append(sdram_edge) graph.add_edge(sdram_edge, "Test") n_keys_map = DictBasedMachinePartitionNKeysMap() inputs = { "MemoryExtendedMachine": machine, "MemoryMachine": machine, "MemoryMachineGraph": graph, "PlanNTimeSteps": plan_n_timesteps, "MemoryMachinePartitionNKeysMap": n_keys_map } algorithms = [placer] xml_paths = [] executor = PACMANAlgorithmExecutor(algorithms, [], inputs, [], [], [], xml_paths) executor.execute_mapping() placements = executor.get_item("MemoryPlacements") for edge in sdram_edges: pre_place = placements.get_placement_of_vertex(edge.pre_vertex) post_place = placements.get_placement_of_vertex(edge.post_vertex) assert pre_place.x == post_place.x assert pre_place.y == post_place.y
def test_too_many_ip_tags_for_1_board(self): n_extra_vertices = 3 machine = virtual_machine(12, 12) eth_chips = machine.ethernet_connected_chips eth_chip = eth_chips[0] eth_chip_2 = machine.get_chip_at(eth_chip.x + 1, eth_chip.y + 1) eth_procs = [ proc.processor_id for proc in eth_chip.processors if not proc.is_monitor ] procs = [proc for proc in eth_chip_2.processors if not proc.is_monitor] eth2_procs = [proc.processor_id for proc in procs] proc = procs[-1] eth_vertices = [ SimpleMachineVertex(ResourceContainer( iptags=[IPtagResource("127.0.0.1", port=tag, strip_sdp=True)]), label="Ethernet Vertex {}".format(proc)) for tag in eth_chip.tag_ids ] eth2_vertices = [ SimpleMachineVertex(ResourceContainer(iptags=[ IPtagResource("127.0.0.1", port=10000 + tag, strip_sdp=True) ]), label="Ethernet 2 Vertex {}".format(proc)) for tag in range(n_extra_vertices) ] placements = Placements( Placement(vertex, eth_chip.x, eth_chip.y, proc) for proc, vertex in zip(eth_procs, eth_vertices)) placements.add_placements( Placement(vertex, eth_chip_2.x, eth_chip_2.y, proc) for proc, vertex in zip(eth2_procs, eth2_vertices)) allocator = BasicTagAllocator() _, _, tags = allocator(machine, plan_n_timesteps=None, placements=placements) tags_by_board = defaultdict(set) for vertices in (eth_vertices, eth2_vertices): for vertex in vertices: iptags = tags.get_ip_tags_for_vertex(vertex) self.assertEqual(len(iptags), 1, "Incorrect number of tags assigned") placement = placements.get_placement_of_vertex(vertex) print(placement, "has tag", iptags[0]) self.assertFalse( iptags[0].tag in tags_by_board[iptags[0].board_address], "Tag used more than once") tags_by_board[iptags[0].board_address].add(iptags[0].tag) self.assertEqual(len(tags_by_board[eth_chip.ip_address]), len(eth_chip.tag_ids), "Wrong number of tags assigned to first Ethernet")
def test_that_6_lpgs_are_generated_2_on_each_eth_chip(self): machine = virtual_machine(width=12, height=12, with_wrap_arounds=True) graph = MachineGraph("Test") default_params = { 'use_prefix': False, 'key_prefix': None, 'prefix_type': None, 'message_type': EIEIOType.KEY_32_BIT, 'right_shift': 0, 'payload_as_time_stamps': True, 'use_payload_prefix': True, 'payload_prefix': None, 'payload_right_shift': 0, 'number_of_packets_sent_per_time_step': 0, 'hostname': None, 'port': None, 'strip_sdp': None, 'board_address': None, 'tag': None} # data stores needed by algorithm live_packet_gatherers = dict() extended = dict(default_params) extended.update({'partition_id': "EVENTS"}) default_params_holder = LivePacketGatherParameters(**extended) live_packet_gatherers[default_params_holder] = list() # and special LPG on Ethernet connected chips chip_special = dict() for chip in machine.ethernet_connected_chips: extended['board_address'] = chip.ip_address default_params_holder2 = LivePacketGatherParameters(**extended) live_packet_gatherers[default_params_holder2] = list() chip_special[(chip.x, chip.y)] = default_params_holder2 # run edge inserter that should go boom edge_inserter = InsertLivePacketGatherersToGraphs() lpg_verts_mapping = edge_inserter( live_packet_gatherer_parameters=live_packet_gatherers, machine=machine, machine_graph=graph, application_graph=None, graph_mapper=None) self.assertEqual(len(lpg_verts_mapping[default_params_holder]), 3) for eth_chip in chip_special: params = chip_special[eth_chip] self.assertEqual(len(lpg_verts_mapping[params]), 1) vertex = lpg_verts_mapping[params][eth_chip] self.assertEqual(eth_chip[0], list(vertex.constraints)[0].x) self.assertEqual(eth_chip[1], list(vertex.constraints)[0].y)
def test_that_3_lpgs_are_generated_on_3_board(self): machine = virtual_machine(width=12, height=12, with_wrap_arounds=True) graph = MachineGraph("Test") default_params = { 'use_prefix': False, 'key_prefix': None, 'prefix_type': None, 'message_type': EIEIOType.KEY_32_BIT, 'right_shift': 0, 'payload_as_time_stamps': True, 'use_payload_prefix': True, 'payload_prefix': None, 'payload_right_shift': 0, 'number_of_packets_sent_per_time_step': 0, 'hostname': None, 'port': None, 'strip_sdp': None, 'board_address': None, 'tag': None} # data stores needed by algorithm live_packet_gatherers = dict() extended = dict(default_params) extended.update({'partition_id': "EVENTS"}) default_params_holder = LivePacketGatherParameters(**extended) live_packet_gatherers[default_params_holder] = list() # run edge inserter that should go boom edge_inserter = InsertLivePacketGatherersToGraphs() lpg_verts_mapping = edge_inserter( live_packet_gatherer_parameters=live_packet_gatherers, machine=machine, machine_graph=graph, application_graph=None, graph_mapper=None) self.assertEqual(len(lpg_verts_mapping[default_params_holder]), 3) locs = list() locs.append((0, 0)) locs.append((4, 8)) locs.append((8, 4)) for vertex in itervalues(lpg_verts_mapping[default_params_holder]): x = list(vertex.constraints)[0].x y = list(vertex.constraints)[0].y key = (x, y) locs.remove(key) self.assertEqual(len(locs), 0) verts = lpg_verts_mapping[default_params_holder].values() for vertex in graph.vertices: self.assertIn(vertex, verts)
def _do_test(self, placer): machine = virtual_machine(width=8, height=8) graph = MachineGraph("Test") vertices = [ SimpleMachineVertex(ResourceContainer(), label="v{}".format(i)) for i in range(100) ] for vertex in vertices: graph.add_vertex(vertex) same_vertices = [ SimpleMachineVertex(ResourceContainer(), label="same{}".format(i)) for i in range(10) ] random.seed(12345) for vertex in same_vertices: graph.add_vertex(vertex) for _i in range(0, random.randint(1, 5)): vertex.add_constraint( SameChipAsConstraint(vertices[random.randint(0, 99)])) n_keys_map = DictBasedMachinePartitionNKeysMap() if placer == "ConnectiveBasedPlacer": placements = connective_based_placer(graph, machine, None) elif placer == "OneToOnePlacer": placements = one_to_one_placer(graph, machine, None) elif placer == "RadialPlacer": placements = radial_placer(graph, machine, None) elif placer == "SpreaderPlacer": placements = spreader_placer(graph, machine, n_keys_map, None) else: raise NotImplementedError(placer) for same in same_vertices: print("{0.vertex.label}, {0.x}, {0.y}, {0.p}: {1}".format( placements.get_placement_of_vertex(same), [ "{0.vertex.label}, {0.x}, {0.y}, {0.p}".format( placements.get_placement_of_vertex(constraint.vertex)) for constraint in same.constraints ])) placement = placements.get_placement_of_vertex(same) for constraint in same.constraints: if isinstance(constraint, SameChipAsConstraint): other_placement = placements.get_placement_of_vertex( constraint.vertex) self.assertTrue( other_placement.x == placement.x and other_placement.y == placement.y, "Vertex was not placed on the same chip as requested")
def test_all_working(width, height, with_down_links, with_down_chips): temp_machine = virtual_machine(width=width, height=height) down_links = None if with_down_links: down_links = set() for ethernet_chip in temp_machine.ethernet_connected_chips: down_links.add((ethernet_chip.x + 1, ethernet_chip.y, 5)) down_links.add((ethernet_chip.x, ethernet_chip.y + 1, 3)) down_chips = None if with_down_chips: down_chips = set( (ethernet_chip.x + 1, ethernet_chip.y + 1) for ethernet_chip in temp_machine.ethernet_connected_chips) _check_setup(width, height, down_chips, down_links)
def _do_test(self, placer): machine = virtual_machine(width=8, height=8) graph = MachineGraph("Test") vertices = [ MockMachineVertex(ResourceContainer(), label="v{}".format(i), sdram_requirement=20) for i in range(100) ] for vertex in vertices: graph.add_vertex(vertex) same_vertices = [ MockMachineVertex(ResourceContainer(), label="same{}".format(i), sdram_requirement=20) for i in range(10) ] random.seed(12345) sdram_edges = list() for vertex in same_vertices: graph.add_vertex(vertex) graph.add_outgoing_edge_partition( ConstantSDRAMMachinePartition(identifier="Test", pre_vertex=vertex, label="bacon")) for _i in range(0, random.randint(1, 5)): sdram_edge = SDRAMMachineEdge(vertex, vertices[random.randint(0, 99)], label="bacon", app_edge=None) sdram_edges.append(sdram_edge) graph.add_edge(sdram_edge, "Test") n_keys_map = DictBasedMachinePartitionNKeysMap() if placer == "ConnectiveBasedPlacer": placements = connective_based_placer(graph, machine, None) elif placer == "OneToOnePlacer": placements = one_to_one_placer(graph, machine, None) elif placer == "RadialPlacer": placements = radial_placer(graph, machine, None) elif placer == "SpreaderPlacer": placements = spreader_placer(graph, machine, n_keys_map, None) else: raise NotImplementedError(placer) for edge in sdram_edges: pre_place = placements.get_placement_of_vertex(edge.pre_vertex) post_place = placements.get_placement_of_vertex(edge.post_vertex) assert pre_place.x == post_place.x assert pre_place.y == post_place.y
def _check_setup(width, height): machine = virtual_machine(width=width, height=height) ethernet_chips = machine.ethernet_connected_chips placements = Placements( Placement(DestinationVertex(), ethernet_chip.x, ethernet_chip.y, 1) for ethernet_chip in ethernet_chips) fixed_route_tables = fixed_route_router(machine, placements, DestinationVertex) for x, y in machine.chip_coordinates: assert (x, y) in fixed_route_tables chip = machine.get_chip_at(x, y) destinations = _get_destinations(machine, fixed_route_tables, x, y) assert len(destinations) == 1 assert ((chip.nearest_ethernet_x, chip.nearest_ethernet_y, 1) in destinations)
def test_all_working(width, height, with_down_links, with_down_chips): unittest_setup() temp_machine = virtual_machine(width=width, height=height) down_links = None if with_down_links: down_links = set() for ethernet_chip in temp_machine.ethernet_connected_chips: down_links.add((ethernet_chip.x + 1, ethernet_chip.y, 5)) down_links.add((ethernet_chip.x, ethernet_chip.y + 1, 3)) down_str = ":".join([f"{x},{y},{link}" for x, y, link in down_links]) set_config("Machine", "down_links", down_str) down_chips = None if with_down_chips: down_chips = set( (ethernet_chip.x + 1, ethernet_chip.y + 1) for ethernet_chip in temp_machine.ethernet_connected_chips) down_str = ":".join([f"{x},{y}" for x, y in down_chips]) set_config("Machine", "down_chips", down_str) _check_setup(width, height)
def test_operation_with_same_size_as_vertex_constraint_chain(self): """ Test that a chain of same size constraints works even when the\ order of vertices is not correct for the chain """ with self.assertRaises(NotImplementedError): graph = ApplicationGraph("Test") vertex_1 = SimpleTestVertex(10, "Vertex_1", 5) vertex_1.splitter_object = SplitterSliceLegacy() vertex_2 = SimpleTestVertex(10, "Vertex_2", 4) vertex_3 = SimpleTestVertex(10, "Vertex_3", 2) vertex_3.add_constraint(SameAtomsAsVertexConstraint(vertex_2)) vertex_2.add_constraint(SameAtomsAsVertexConstraint(vertex_1)) vertex_2.splitter_object = SplitterSliceLegacy() vertex_3.splitter_object = SplitterSliceLegacy() graph.add_vertices([vertex_1, vertex_2, vertex_3]) machine = virtual_machine(width=2, height=2) splitter_partitioner(graph, machine, plan_n_time_steps=None) subvertices_1 = list(vertex_1.machine_vertices) subvertices_2 = list(vertex_2.machine_vertices) subvertices_3 = list(vertex_3.machine_vertices) self.assertEqual(len(subvertices_1), len(subvertices_2)) self.assertEqual(len(subvertices_2), len(subvertices_3))
def test_virtual_placement(placer): unittest_setup() machine = virtual_machine(width=8, height=8) graph = MachineGraph("Test") virtual_vertex = MachineSpiNNakerLinkVertex(spinnaker_link_id=0) graph.add_vertex(virtual_vertex) extended_machine = malloc_based_chip_id_allocator(machine, graph) n_keys_map = DictBasedMachinePartitionNKeysMap() if placer == "ConnectiveBasedPlacer": placements = connective_based_placer(graph, machine, None) elif placer == "OneToOnePlacer": placements = one_to_one_placer(graph, machine, None) elif placer == "RadialPlacer": placements = radial_placer(graph, machine, None) elif placer == "SpreaderPlacer": placements = spreader_placer(graph, machine, n_keys_map, None) else: raise NotImplementedError(placer) placement = placements.get_placement_of_vertex(virtual_vertex) chip = extended_machine.get_chip_at(placement.x, placement.y) assert chip.virtual