def _init_machine_and_vertexes(self, proportion_occuped_processors=1.0): """ Utility function: Create a test machine filled with enough verticies such that proportion_occuped_processors will be filled. No partitioning, placement and routing will have occurred allowing the connectivity of the subverticies to be set up. Sets: * self.machine to a machine * self.vertices to a list of vertices sized to fill the specified proportion of processors """ self.machine = lib_machine.Machine(hostname="m", x=2, y=2, type="unwrapped") # Use this type of neuron for all test vertexes neuron_type = pynn.IF_curr_exp requirements = neuron_type.get_requirements_per_atom() resources = self.machine.get_resources_per_processor() atoms_per_processor = resources / requirements num_processors = len(self.machine.get_processors()) # Create one vertex for each processor we want occupied. Also, make # this vertex as big will fit on one processor. self.vertices = [ graph.Vertex(atoms_per_processor, neuron_type) for _ in xrange(int(num_processors * proportion_occuped_processors)) ]
def test_maxiumum_test_case_success_edge_case_spinn2(): machine = lib_machine.Machine('spinn-2', x=2, y=2, type="spinn2") chip_router = machine.get_chip(0, 0).router for index in range(999): chip_router.ralloc(index, 0xffffffff) router.Router.check_for_table_supassing_maxiumum_level(machine) return 0
def test_partition_production_with_two_subvertices(self): """ Calculates max atoms per vertex based on the vertex model and then it adds one more atom in to create a subvertex. This test checks if the subvertices are an instance of graph.Subvertex, if total number of subvertices is two and if the subedges are zero. """ # Specify a SpiNNaker machine machine = lib_machine.Machine('test', 1, 1, 'unwrapped') # Calculate the max atoms per core # The following code has been taken from core.mapper.partition_raw requirements = pynn.IF_curr_exp.get_requirements_per_atom() resources = machine.get_resources_per_processor() atoms = resources / requirements vertices = [graph.Vertex(atoms + 1, pynn.IF_curr_exp)] max_atoms_per_core = dict() subvertices, subedges = basic_partitioner.BasicPartitioner.partition_raw( machine, vertices, max_atoms_per_core) # subvertices should be an instance of the Subvertex class for subvertex in subvertices: self.assertIsInstance(subvertex, graph.Subvertex) # subvertices should have only two vertices since all atoms won't fit in a # core self.assertEqual(len(subvertices), 2) # subedges should be zero self.assertEqual(subedges, [])
def test_redundant_paths_spinn7_via_router_require_turn(): the_machine = machine.Machine('spinn-7', type="spinn4") src_vertex_constraints = lib_map.VertexConstraints(x=0, y=0) src_vrt = graph.Vertex(1,models.IF_curr_exp, constraints=src_vertex_constraints) src_sub_vert = graph.Subvertex(src_vrt, 0,1) dest_vertex_constraints = lib_map.VertexConstraints(x=2, y=3) dest_vrt = graph.Vertex(1,models.IF_curr_exp, constraints=dest_vertex_constraints) dest_sub_vert = graph.Subvertex(dest_vrt, 0,1) dest_sub_vert2 = graph.Subvertex(dest_vrt, 0,1) edge = graph.Edge(None, src_vrt, dest_vrt) sbedge = graph.Subedge(edge, src_sub_vert, dest_sub_vert) sbedge2 = graph.Subedge(edge, src_sub_vert, dest_sub_vert2) dao_object = dao #place vertexes in correct cores placements = Placer.place_raw(the_machine, [src_sub_vert, dest_sub_vert, dest_sub_vert2]) dao.placements = placements routings = dijkstra_routing.DijkstraRouting.\ route_raw(the_machine, [src_sub_vert, dest_sub_vert, dest_sub_vert2]) inconsistant_routings, redundant_paths = \ Router.check_for_inconsistant_routings(the_machine) assert(len(redundant_paths) > 0) assert(len(inconsistant_routings) == 0) Router.redundant_path_removal(redundant_paths, the_machine) inconsistant_routings, redundant_paths = \ Router.check_for_inconsistant_routings(the_machine) assert(len(redundant_paths) == 0) assert(len(inconsistant_routings) == 0)
def set_hostname( self, hostname ): #determine how many vertexes are virtual virtual_vertexs = list() for vertex in self.vertices: if vertex.virtual: self.check_if_vertex_needs_merging(vertex, virtual_vertexs) # Generate and store a machine object if not hostname in machines.machines: warningStr = "Structure of target SpiNNaker machine not known.\n" warningStr += "Interrogating machine for details of structure." logger.warning(warningStr) self.machine = machine.Machine(hostname, type="dynamic", virtual_verts=virtual_vertexs) else: description = machines.machines[hostname] description['virtual_verts'] = virtual_vertexs self.machine = machine.Machine(**description)
def test_place_valid_core_assignment(self): """ Place a range of designs on machines of different sizes. Are only valid cores used? """ # Set up a record of the exception (not) thrown exception = None # Set dimensions of machine: machine_sizes = [[1, 1], [2, 2], [4, 4], [8, 8], [1, 8], [8, 1]] # Try each machine size. Place, then check core assignment. Are all cores valid? for m in machine_sizes: machine_x, machine_y = m load = machine_x * machine_y * 8 # Make a machine with required number of processors machine = lib_machine.Machine('test', machine_x, machine_y, 'unwrapped') # Make a vertex to split into subvertices vertex = graph.Vertex(load, None) # Split vertex into subvertices subvertices = list() for i in xrange(load): subvertex = graph.Subvertex(vertex, i, i) subvertices.append(subvertex) # Call partition raw which should generate no exception: placer.Placer.place_raw(machine, subvertices) # Check that every placement was to a valid core: for i in subvertices: my_x, my_y, my_p = i.placement.processor.get_coordinates() self.assertGreaterEqual( my_x, 0, "Placement Exception - outside valid machine x range") self.assertGreaterEqual( my_y, 0, "Placement Exception - outside valid machine y range") self.assertGreaterEqual( my_p, 1, "Placement Exception - outside valid core range (<1)") self.assertLess( my_x, machine_x, "Placement Exception - outside valid machine x range") self.assertLess( my_y, machine_y, "Placement Exception - outside valid machine y range") self.assertLess( my_p, 17, "Placement Exception - outside valid core range (>16)") # Check that every subvertex got assigned somwhere uniquely: usage = [] for i in subvertices: my_x, my_y, my_p = i.placement.processor.get_coordinates() my_location = [my_x, my_y, my_p] self.assertNotIn( usage, my_location, "Placement Exception - core placement duplicated") usage.append(my_location)
def test_maxiumum_test_case_fail_spinn2(): machine = lib_machine.Machine('spinn-2', x=2, y=2, type="spinn2") chip_router = machine.get_chip(0, 0).router for index in range(1010): chip_router.ralloc(index, 0xffffffff) try: router.Router.check_for_table_supassing_maxiumum_level(machine) raise exceptions.SystemException("router was passed when had over" " the maxiumum level of entries") except exceptions.RouteTableDSGException as e: return 0
def test_inconsistant_table_entry_success(): machine = lib_machine.Machine('spinn-2', x=2, y=2, type="spinn2") first_chip_router = machine.get_chip(0, 0).router used_masks = dict() used_masks[0x0000000f] = list() for index in range(10): routing_entry = first_chip_router.ralloc(index, 0x0000000f) used_masks[0x0000000f].append(index) routing_entry.route = random.randint(0, 6) inconsistant_routings, redundant_paths = \ router.Router.check_for_inconsistant_routings(machine, used_masks) assert(len(inconsistant_routings) == 0)
def test_partition_multiple_productions_with_multiple_subvertices_and_multiple_subedges( self): """ Calculates max atoms per vertex based on a vertex model and then it creates 48 pre vertices and 48 pro vertices. This test checks if the subvertices are an instance of graph.Subvertex, subedges are an instance of graph.SubEdge if total number of subvertices (is i+j+2)*48 and subedges is (i+1)*(j+1)*48*48. """ # Specify a SpiNNaker machine machine = lib_machine.Machine('test', 1, 1, 'wrapped') # Calculate the max atoms per core # The following code has been taken from core.mapper.partition_raw requirements = pynn.IF_curr_exp.get_requirements_per_atom() resources = machine.get_resources_per_processor() atoms = resources / requirements print "Long Test: (test_partition_multiple_productions_with_multiple_subvertices_and_multiple_subedges)" for i in range(16): # i is the number of subvertices of a prev vertex print " progress: set", i + 1, "of 16" for j in range( 16): # j is the number of subvertices of a pro vertex preVertices = [] proVertices = [] for k in range(48): # k is the number of vertices preVertices.append( graph.Vertex(atoms * (i + 1), pynn.IF_curr_exp)) proVertices.append( graph.Vertex(atoms * (j + 1), pynn.IF_curr_exp)) for k1 in range(48): for k2 in range(48): edges = graph.Edge(None, preVertices[k1], proVertices[k2]) max_atoms_per_core = dict() subvertices, subedges = \ basic_partitioner.BasicPartitioner.partition_raw(machine, preVertices + proVertices, max_atoms_per_core) # subvertices should be an instance of the Subvertex class for subvertex in subvertices: self.assertIsInstance(subvertex, graph.Subvertex) # subedges should be an instance of the Subvedge class for subedge in subedges: self.assertIsInstance(subedge, graph.Subedge) # subvertices should be (i+1+j+1)*48 self.assertEqual(len(subvertices), (i + j + 2) * 48) # subedges should be number of preSubvertices times number of proSubvertices self.assertEqual(len(subedges), (i + 1) * (j + 1) * 48 * 48)
def test_place_zero_subvertices(self): """ Perform place with no sub-vertices. No exception expected. """ # Set up a record of the exception (not) thrown exception = None # Set dimensions of machine and processor-load count x, y = 1, 1 load = 1 # Make a machine with 16 processors machine = lib_machine.Machine('test', x, y, 'unwrapped') # Construct empty subvertex list: subvertices = list() # Call partition raw which should generate no exception: placer.Placer.place_raw(machine, subvertices)
def test_redundant_paths_spinn7_via_router_same_chip(): the_machine = machine.Machine('spinn-7', type="spinn4") src_vertex_constraints = lib_map.VertexConstraints(x=0, y=0, p=2) src_vrt = graph.Vertex(1, models.IF_curr_exp, constraints=src_vertex_constraints) src_sub_vert = graph.Subvertex(src_vrt, 0,1) dest_vertex_constraints = lib_map.VertexConstraints(x=0, y=0, p=5) dest_vrt = graph.Vertex(1, models.IF_curr_exp, constraints=dest_vertex_constraints) dest_sub_vert = graph.Subvertex(dest_vrt, 0,1) dest_sub_vert2 = graph.Subvertex(dest_vrt, 0,1) dest_vertex_constraints2 = lib_map.VertexConstraints(x=0, y=0, p=6) dest_vrt2 = graph.Vertex(1, models.IF_curr_exp, constraints=dest_vertex_constraints2) dest_sub_vert2 = graph.Subvertex(dest_vrt2, 0, 1) edge = graph.Edge(None, src_vrt, dest_vrt) sbedge = graph.Subedge(edge, src_sub_vert, dest_sub_vert) sbedge2 = graph.Subedge(edge, src_sub_vert, dest_sub_vert2) dao_object = dao #place vertexes in correct cores placements = Placer.place_raw(the_machine, [src_sub_vert, dest_sub_vert, dest_sub_vert2]) dao.placements = placements routings = dijkstra_routing.\ DijkstraRouting.route_raw(the_machine, [src_sub_vert, dest_sub_vert, dest_sub_vert2]) inconsistant_routings, redundant_paths = \ Router.check_for_inconsistant_routings(the_machine) assert(len(redundant_paths) > 0) assert(len(inconsistant_routings) == 0) #print "entry {} and entry {}".format(redundant_paths[0][2].route, redundant_paths[0][3].route) Router.redundant_path_removal(redundant_paths, the_machine) inconsistant_routings, redundant_paths = \ Router.check_for_inconsistant_routings(the_machine) assert(len(redundant_paths) == 0) assert(len(inconsistant_routings) == 0) for key in the_machine.chips[0][0].router.cam.keys(): entry_list = the_machine.chips[0][0].router.cam.get(key) assert(len(entry_list) == 1) #print "entry is {}".format(entry_list[0].route) assert(entry_list[0].route == 6144)
def test_parition_production_with_zero_atoms(self): """ Tests for the pacman103.core.mapper.partition_raw function. """ atoms = 0 machine = lib_machine.Machine('test', 1, 1, 'unwrapped') vertices = [graph.Vertex(atoms, pynn.IF_curr_exp)] max_atoms_per_core = dict() subvertices, subedges = basic_partitioner.BasicPartitioner.partition_raw( machine, vertices, max_atoms_per_core) # subvertices and subedges should be zero self.assertEqual((subvertices, subedges), ([], [])) # subvertices should be an instance of the Subvertex class for subvertex in subvertices: self.assertIsInstance(subvertex, graph.Subvertex)
def test_partition_production_with_multiple_subvertices_and_multiple_subedges( self): """ Calculates max atoms per vertex based on a vertex model and then it creates two vertices. Vertex2 has a multiple of max atoms per core controlled by i while the size of vertex1 stays fixed to max atoms per core. This test checks if the subvertices are an instance of graph.Subvertex, subedges are an instance of graph.SubEdge if total number of subvertices is n+2 and subedges is n+1. """ # Specify a SpiNNaker machine machine = lib_machine.Machine('test', 1, 1, 'unwrapped') # Calculate the max atoms per core # The following code has been taken from core.mapper.partition_raw requirements = pynn.IF_curr_exp.get_requirements_per_atom() resources = machine.get_resources_per_processor() atoms = resources / requirements for i in range(16): for j in range(16): vertex1 = graph.Vertex(atoms * (i + 1), pynn.IF_curr_exp) vertex2 = graph.Vertex(atoms * (j + 1), pynn.IF_curr_exp) # Generate a self projected edge edge = graph.Edge(None, vertex1, vertex2) max_atoms_per_core = dict() subvertices1, subedges1 = basic_partitioner.BasicPartitioner.partition_raw( machine, [vertex1, vertex2], max_atoms_per_core) # subvertices should be an instance of the Subvertex class for subvertex in subvertices1: self.assertIsInstance(subvertex, graph.Subvertex) # subedges should be an instance of the Subvedge class for subedge in subedges1: self.assertIsInstance(subedge, graph.Subedge) # subvertices should be 1 self.assertEqual(len(subvertices1), i + j + 2) # subedges should be squared the number of subvertices self.assertEqual(len(subedges1), (i + 1) * (j + 1))
def test_inconsistant_table_entry_fail_multiple_masks_combined(): machine = lib_machine.Machine('spinn-2', x=2, y=2, type="spinn2") first_chip_router = machine.get_chip(0, 0).router routing_entry = first_chip_router.ralloc(18, 0x0000000f) routing_entry.route = 5 for index in range(10): routing_entry = first_chip_router.ralloc(index, 0x0000000f) routing_entry.route = 6 routing_entry = first_chip_router.ralloc(17, 0x0000000f) routing_entry.route = 5 used_masks = dict() used_masks[0x0000000f] = list() used_masks[0x0000000f].append(18) used_masks[0x0000000f].append(17) inconsistant_routings, redundant_paths = \ router.Router.check_for_inconsistant_routings(machine, used_masks) assert(len(inconsistant_routings) == 0)
def test_place_single_subvertex(self): """ Perform place with only a single sub-vertex """ # Set up a record of the exception (not) thrown exception = None # Set dimensions of machine and processor-load count x, y = 1, 1 load = 1 # Make a machine with 16 processors machine = lib_machine.Machine('test', x, y, 'unwrapped') # Make a vertex to split into subvertices vertex = graph.Vertex(load, None) # Split vertex into a single subvertex subvertex = graph.Subvertex(vertex, 0, 0) subvertices = list() subvertices.append(subvertex) # Call partition raw which should generate no exception: placer.Placer.place_raw(machine, subvertices)
def test_partition_production_with_multiple_subvertices_and_self_projected_subedges( self): """ Calculates max atoms per vertex based on the vertex model and then it creates a vertex with n multiples of max atoms and adds an edge which projects back to itself. This test checks if the subvertices are an instance of graph.Subvertex, subedges are an instance of graph.SubEdge if total number of subvertices and subedges is 1. """ # Specify a SpiNNaker machine machine = lib_machine.Machine('test', 1, 1, 'unwrapped') # Calculate the max atoms per core # The following code has been taken from core.mapper.partition_raw requirements = pynn.IF_curr_exp.get_requirements_per_atom() resources = machine.get_resources_per_processor() atoms = resources / requirements for i in range(16): vertex1 = graph.Vertex(atoms * (i + 1), pynn.IF_curr_exp) # Generate a self projected edge edge = graph.Edge(None, vertex1, vertex1) max_atoms_per_core = dict() subvertices, subedges = basic_partitioner.BasicPartitioner.partition_raw( machine, [vertex1], max_atoms_per_core) # subvertices should be an instance of the Subvertex class for subvertex in subvertices: self.assertIsInstance(subvertex, graph.Subvertex) # subedges should be an instance of the Subvedge class for subedge in subedges: self.assertIsInstance(subedge, graph.Subedge) # subvertices are equal to the number of i+1 self.assertEqual(len(subvertices), (i + 1)) # subedges should be squared the number of subvertices self.assertEqual(len(subedges), (i + 1)**2)
def test_partition_production_with_self_projected_edge(self): """ Calculates max atoms per vertex based on the vertex model and then it creates a vertex and adds an edge which projects back to itself. This test checks if the subvertices are an instance of graph.Subvertex, subedges are an instance of graph.SubEdge if total number of subvertices and subedges is 1. """ # Specify a SpiNNaker machine machine = lib_machine.Machine('test', 1, 1, 'unwrapped') # Calculate the max atoms per core # The following code has been taken from core.mapper.partition_raw requirements = pynn.IF_curr_exp.get_requirements_per_atom() resources = machine.get_resources_per_processor() atoms = resources / requirements vertex1 = graph.Vertex(atoms, pynn.IF_curr_exp) pdb.set_trace edge = graph.Edge(None, vertex1, vertex1) max_atoms_per_core = dict() subvertices, subedges = basic_partitioner.BasicPartitioner.partition_raw( machine, [vertex1], max_atoms_per_core) # subvertices should be an instance of the Subvertex class for subvertex in subvertices: self.assertIsInstance(subvertex, graph.Subvertex) # subedges should be an instance of the Subvedge class for subedge in subedges: self.assertIsInstance(subedge, graph.Subedge) # subvertices should have only two vertices since all atoms fit in two # cores self.assertEqual(len(subvertices), 1) # subedges should be zero self.assertEqual(len(subedges), 1)
def test_palloc_exception(self): """ Generates more subvertices than can be placed on the machine and checks that a PallocException is thrown. """ # Set up a record of the exception (not) thrown exception = None with self.assertRaises(exceptions.PallocException): # Set dimensions of machine and processor-overload count x, y = 1, 1 overload = x * y * 16 + 1 # Make a machine with 16 processors machine = lib_machine.Machine('test', x, y, 'unwrapped') # Make a vertex to split into subvertices vertex = graph.Vertex(overload, None) # Split vertex into 17 subvertices subvertices = list() for i in xrange(overload): subvertex = graph.Subvertex(vertex, i, i) subvertices.append(subvertex) # Call parition raw to generate PallocException placer.Placer.place_raw(machine, subvertices)
def test_routing(self): the_machine = machine.Machine('host', type="spinn4") machine_size = len(the_machine.processors) vertexes = list() for i in xrange(machine_size): v = graph.Vertex(1, None) v.model = models.IF_curr_exp vertexes.append(v) number_of_projections = machine_size * machine_size * test_connectivity_percentage src_vertexes = random.sample( xrange(machine_size), int(math.ceil(math.sqrt(number_of_projections)))) dst_vertexes = random.sample( xrange(machine_size), int(math.floor(math.sqrt(number_of_projections)))) src_vertexes.sort() dst_vertexes.sort() # print "test_routing: total number of projections:", number_of_projections # print "test_routing: source vertexes", src_vertexes # print "test_routing: destination vertexes:", dst_vertexes # print "test_routing: number of projections:", len(src_vertexes) * len(dst_vertexes) edges = list() for i in xrange(len(src_vertexes)): for j in xrange(len(dst_vertexes)): src = vertexes[src_vertexes[i]] dst = vertexes[src_vertexes[j]] edges.append(graph.Edge(src, dst)) subvertexes = list() for i in xrange(machine_size): subvertexes.append(graph.Subvertex(vertexes[i], 0, 0, None)) sb_edges = list() for i in xrange(len(edges)): pre = edges[i].prevertex.subvertices[0] post = edges[i].postvertex.subvertices[0] sb_edges.append(graph.Subedge(edges[i], pre, post)) radial_placer.RadialPlacer.place_raw(the_machine, subvertexes) dijkstra_routing.DijkstraRouting.route_raw(the_machine, subvertexes) #the machine object now contains all the routes #now I need to test that the routes are correct #(i.e. from a single source they go to the correct destination(s)) for k in xrange(len(src_vertexes)): #take the list of the outgoing edges from each of the subvertexes src_sbvrt = subvertexes[src_vertexes[k]] out_sbedges_lst = src_sbvrt.out_subedges #the routing key dictionary associates to a routing key the list of #destinations rt_key_list = dict() for i in range(len(out_sbedges_lst)): #for each subvertex and for each subedge key and mask are retrieved key, mask = src_sbvrt.vertex.model.generate_routing_info( out_sbedges_lst[i]) #then the destination of the subedge is retrieved dst = out_sbedges_lst[ i].postsubvertex.placement.processor.get_coordinates() #and it is added to the appropriate list #then all the destinations are merged in the list rt_key_list if key in rt_key_list: #if the routing key for the subedge is already in the rt_key_list #then append the destination to the list of the destinations #extracted from the graph if dst not in rt_key_list[key]: rt_key_list[key].append(dst) else: #if the routing key was not present in the lsit of already known #routing keys, then append it with the new destination rt_key_list.update({key: [dst]}) #rt_key list at this point is a dictionary of elements; each of these #elements contains a list where the elements are destinations #{'key1':[dst1, dst2, ...], 'key2':[dst3, dst4, ...], ...} for i in range(len(rt_key_list.keys())): #retrieve the list of desired destinations from rt_key_list key = rt_key_list.keys()[i] desired_dsts = rt_key_list[key] #sort destinations desired_dsts.sort() #print "Desired destinations:", desired_dsts #get from the binaries of the routing tables the destination core(s) #for a specific routing key starting from a particular chip test = TestRoutes(the_machine, src_sbvrt, key) test.TraceRoute() dsts = test.dsts dsts.sort() #print "Actual destinations:", dsts #dsts contains the list of processors to which the routing key is #addresses. this list must be equal to the desired destination(s) #extracted from the graph #test desired_dsts and dsts. if not equal, the routing made a mess! self.assertEqual(dsts, desired_dsts)
def test_redundent_paths_spinn7(): the_machine = machine.Machine('spinn-7', type="spinn4") #first route chip_router = the_machine.chips[0][0].router routing_entry = chip_router.ralloc(0, 0xffffffff) routing_entry.route = 1 << 2 # north old_entry = routing_entry chip_router = the_machine.chips[0][1].router routing_entry = chip_router.ralloc(0, 0xffffffff) routing_entry.route = 1 << 2 # north routing_entry.previous_router_entry = old_entry routing_entry.previous_router_entry_direction = 1 << 2 old_entry = routing_entry chip_router = the_machine.chips[0][2].router routing_entry = chip_router.ralloc(0, 0xffffffff) routing_entry.route = 1 << 2 # north routing_entry.previous_router_entry = old_entry routing_entry.previous_router_entry_direction = 1 << 2 old_entry = routing_entry chip_router = the_machine.chips[0][3].router routing_entry = chip_router.ralloc(0, 0xffffffff) routing_entry.route = 1 << 8 # internal routing_entry.previous_router_entry = old_entry routing_entry.previous_router_entry_direction = 1 << 2 #second route chip_router = the_machine.chips[0][0].router routing_entry = chip_router.ralloc(0, 0xffffffff) routing_entry.route = 1 << 1 # north east old_entry = routing_entry chip_router = the_machine.chips[1][1].router routing_entry = chip_router.ralloc(0, 0xffffffff) routing_entry.route = 1 << 2 # north routing_entry.previous_router_entry = old_entry routing_entry.previous_router_entry_direction = 1 << 1 old_entry = routing_entry chip_router = the_machine.chips[1][2].router routing_entry = chip_router.ralloc(0, 0xffffffff) routing_entry.route = 1 << 2 # north routing_entry.previous_router_entry = old_entry routing_entry.previous_router_entry_direction = 1 << 2 old_entry = routing_entry chip_router = the_machine.chips[1][3].router routing_entry = chip_router.ralloc(0, 0xffffffff) routing_entry.route = 1 << 7 # west routing_entry.previous_router_entry = old_entry routing_entry.previous_router_entry_direction = 1 << 2 old_entry = routing_entry chip_router = the_machine.chips[0][3].router routing_entry = chip_router.ralloc(0, 0xffffffff) routing_entry.route = 1 << 10 # internal routing_entry.previous_router_entry = old_entry routing_entry.previous_router_entry_direction = 1 < 7 old_entry = routing_entry inconsistant_routings, redundant_paths = \ Router.check_for_inconsistant_routings(the_machine) assert(len(inconsistant_routings) == 0) assert(len(redundant_paths) > 0) Router.redundant_path_removal(redundant_paths, the_machine) inconsistant_routings, redundant_paths = \ Router.check_for_inconsistant_routings(the_machine) assert(len(redundant_paths) == 0) assert(len(inconsistant_routings) == 0) assert(len(the_machine.chips[0][0].router.cam) == 1) assert(len(the_machine.chips[0][1].router.cam) == 1) assert(len(the_machine.chips[0][2].router.cam) == 1) assert(len(the_machine.chips[0][3].router.cam) == 1) assert(len(the_machine.chips[1][1].router.cam) == 0) assert(len(the_machine.chips[1][2].router.cam) == 0) assert(len(the_machine.chips[1][3].router.cam) == 0) assert(the_machine.chips[0][3].router.cam.get(0 & 0xffffffff)[0].route == (1 << 10) + (1 << 8)) #print bin(the_machine.chips[0][0].router.cam.get(0 & 0xffffffff)[0].route) #print bin(1 << 2) assert(the_machine.chips[0][0].router.cam.get(0 & 0xffffffff)[0].route == (1 << 2))
def try_creating_route(source, dests, machine_id): ''' create vertexes subverts and placements, run routing, and start backward chasing ''' the_machine = None #initilise machine description = machines.machines[machine_id] the_machine = machine.Machine(**description) subedges = dict() sub_verts = list() src_vertex_constraints = lib_map.VertexConstraints(x=source[0], y=source[1], p=source[2]) src_vrt = models.IF_curr_exp(1, constraints=src_vertex_constraints) src_sub_vert = graph.Subvertex(src_vrt, 0, 1, 0) sub_verts.append(src_sub_vert) #place vertexes in correct cores placement_chip = the_machine.get_chip( src_sub_vert.vertex.constraints.x, src_sub_vert.vertex.constraints.y) placement_processor = placement_chip.get_processor( src_sub_vert.vertex.constraints.p) placement = lib_map.Placement(src_sub_vert, placement_processor) src_sub_vert.placement = placement #add subvert and edge for each destination vertex dest_subverts = list() for dest in dests: dest_vertex_constraints = lib_map.VertexConstraints(x=dest[0], y=dest[1], p=dest[2]) dest_vrt = models.IF_curr_exp(1, constraints=dest_vertex_constraints) dest_sub_vert = graph.Subvertex(dest_vrt, 0, 1, 0) edge = graph.Edge(src_vrt, dest_vrt) sbedge = graph.Subedge(edge, src_sub_vert, dest_sub_vert) #give its routing key and mask key, mask = src_sub_vert.vertex.generate_routing_info(sbedge) sbedge.key = key sbedge.mask = mask sbedge.key_mask_combo = key & mask subedges[dest_sub_vert] = sbedge sub_verts.append(dest_sub_vert) dest_subverts.append(dest_sub_vert) #place vertexes in correct cores placement_chip = the_machine.get_chip( dest_sub_vert.vertex.constraints.x, dest_sub_vert.vertex.constraints.y) placement_processor = placement_chip.get_processor( dest_sub_vert.vertex.constraints.p) placement = lib_map.Placement(dest_sub_vert, placement_processor) dest_sub_vert.placement = placement fails = list() #try to route between the verts try: dijkstra_routing.DijkstraRouting.route_raw(the_machine, sub_verts) return src_sub_vert, dest_subverts, machine, subedges, None except Exception as e: print traceback.print_exc(e) return src_sub_vert, dest_subverts, machine, subedges, \ fails.append([src_sub_vert, dests, "failed to generate a route"])
def new_machine(): return lib_machine.Machine(hostname="m", x=2, y=2, type="unwrapped")
def test_routing_simple(self): #print "test_routing simple" the_machine = machine.Machine('host', type="spinn4") src_vrt = graph.Vertex(1, None) src_vrt.model = models.IF_curr_exp dst_vrt = graph.Vertex(1, None) dst_vrt.model = models.IF_curr_exp edge = graph.Edge(src_vrt, dst_vrt) src_sbvrt = graph.Subvertex(src_vrt, 0, 0, None) dst_sbvrt = graph.Subvertex(dst_vrt, 0, 0, None) sbedge = graph.Subedge(edge, src_sbvrt, dst_sbvrt) radial_placer = radial_placer.RadialPlacer(None) radial_placer.RadialPlacer.place_raw(the_machine, [src_sbvrt, dst_sbvrt]) dijkstra_routing.DijkstraRouting.route_raw(the_machine, [src_sbvrt, dst_sbvrt]) #the machine object now contains all the routes #now I need to test that the routes are correct #(i.e. from a single source they go to the correct destination(s)) out_sbedges_lst = src_sbvrt.out_subedges #the routing key dictionary associates to a routing key the list of #destinations rt_key_list = dict() for i in range(len(out_sbedges_lst)): #for each subvertex and for each subedge key and mask are retrieved key, mask = src_sbvrt.vertex.model.generate_routing_info( out_sbedges_lst[i]) #then the destination of the subedge is retrieved dst = out_sbedges_lst[ i].postsubvertex.placement.processor.get_coordinates() #and it is added to the appropriate list #then all the destinations are merged in the list rt_key_list if key in rt_key_list: #if the routing key for the subedge is already in the rt_key_list #then append the destination to the list of the destinations #extracted from the graph if dst not in rt_key_list[key]: rt_key_list[key].append(dst) else: #if the routing key was not present in the lsit of already known #routing keys, then append it with the new destination rt_key_list.update({key: [dst]}) #rt_key list at this point is a dictionary of elements; each of these #elements contains a list where the elements are destinations #{'key1':[dst1, dst2, ...], 'key2':[dst3, dst4, ...], ...} for i in range(len(rt_key_list.keys())): #retrieve the list of desired destinations from rt_key_list key = rt_key_list.keys()[i] desired_dsts = rt_key_list[key] #sort destinations desired_dsts.sort() #get from the binaries of the routing tables the destination core(s) #for a specific routing key starting from a particular chip test = TestRoutes(the_machine, src_sbvrt, key) test.TraceRoute() dsts = test.dsts dsts.sort() #dsts contains the list of processors to which the routing key is #addresses. this list must be equal to the desired destination(s) #extracted from the graph #test desired_dsts and dsts. if not equal, the routing made a mess! #print "desired_dsts: ", desired_dsts #print "dsts: ",dsts self.assertEqual(dsts, desired_dsts)