def __call__(self, machine, extra_monitor_cores, placements): """ :param ~spinn_machine.Machine machine: :param dict(tuple(int,int),ExtraMonitorSupportMachineVertex) \ extra_monitor_cores: :param ~pacman.model.placements.Placements placements: :rtype: tuple(MulticastRoutingTables, dict(tuple(int,int),int), dict(tuple(int,int),int)) """ # pylint: disable=attribute-defined-outside-init self._machine = machine self._placements = placements self._monitors = extra_monitor_cores self._routing_tables = MulticastRoutingTables() self._key_to_destination_map = dict() self._time_out_keys_by_board = dict() # create progress bar progress = ProgressBar( machine.ethernet_connected_chips, "Generating routing tables for data in system processes") for ethernet_chip in progress.over(machine.ethernet_connected_chips): tree = self._generate_routing_tree(ethernet_chip) self._add_routing_entries(ethernet_chip, tree) return (self._routing_tables, self._key_to_destination_map, self._time_out_keys_by_board)
def test_new_multicast_routing_tables(self): key_combo = 0xff35 mask = 0xffff proc_ids = list() link_ids = list() for i in range(18): proc_ids.append(i) for i in range(6): link_ids.append(i) multicast_entries1 = MulticastRoutingEntry(key_combo, mask, proc_ids, link_ids, True) multicast_entries2 = MulticastRoutingEntry(key_combo - 1, mask - 1, proc_ids, link_ids, True) mrt = list() t1 = UnCompressedMulticastRoutingTable(0, 0, [multicast_entries1]) t2 = UnCompressedMulticastRoutingTable(1, 0, [multicast_entries2]) mrt.append(t1) mrt.append(t2) tables = MulticastRoutingTables(mrt) retrieved_tables = tables.routing_tables self.assertEqual(len(retrieved_tables), len(mrt)) for tab in retrieved_tables: self.assertIn(tab, mrt) self.assertEqual(tables.get_routing_table_for_chip(0, 0), t1) self.assertEqual(tables.get_routing_table_for_chip(1, 0), t2) self.assertEqual(tables.get_routing_table_for_chip(2, 0), None) json_obj = to_json(tables) file_format_schemas.validate(json_obj, "routing_tables.json") new_tables = from_json(json_obj) self.assertEqual(new_tables.get_routing_table_for_chip(0, 0), t1) self.assertEqual(new_tables.get_routing_table_for_chip(1, 0), t2) self.assertEqual(new_tables.get_routing_table_for_chip(2, 0), None)
def __call__(self, router_tables): tables = MulticastRoutingTables() previous_masks = dict() progress = ProgressBar( len(router_tables.routing_tables) * 2, "Compressing Routing Tables") # Create all masks without holes allowed_masks = [_32_BITS - ((2**i) - 1) for i in range(33)] # Check that none of the masks have "holes" e.g. 0xFFFF0FFF has a hole for router_table in router_tables.routing_tables: for entry in router_table.multicast_routing_entries: if entry.mask not in allowed_masks: raise PacmanRoutingException( "Only masks without holes are allowed in tables for" " MallocBasedRouteMerger (disallowed mask={})".format( hex(entry.mask))) for router_table in progress.over(router_tables.routing_tables): new_table = self._merge_routes(router_table, previous_masks) tables.add_routing_table(new_table) n_entries = len([ entry for entry in new_table.multicast_routing_entries if not entry.defaultable ]) print("Reduced from {} to {}".format( len(router_table.multicast_routing_entries), n_entries)) if n_entries > 1023: raise PacmanRoutingException( "Cannot make table small enough: {} entries".format( n_entries)) return tables
def __call__(self, router_tables): tables = MulticastRoutingTables() previous_masks = dict() progress = ProgressBar( len(router_tables.routing_tables) * 2, "Compressing Routing Tables") # Create all masks without holes allowed_masks = [_32_BITS - ((2 ** i) - 1) for i in range(33)] # Check that none of the masks have "holes" e.g. 0xFFFF0FFF has a hole for router_table in router_tables.routing_tables: for entry in router_table.multicast_routing_entries: if entry.mask not in allowed_masks: raise PacmanRoutingException( "Only masks without holes are allowed in tables for" " BasicRouteMerger (disallowed mask={})".format( hex(entry.mask))) for router_table in progress.over(router_tables.routing_tables): new_table = self._merge_routes(router_table, previous_masks) tables.add_routing_table(new_table) n_entries = len([ entry for entry in new_table.multicast_routing_entries if not entry.defaultable]) # print("Reduced from {} to {}".format( # len(router_table.multicast_routing_entries), n_entries)) if n_entries > 1023: raise PacmanRoutingException( "Cannot make table small enough: {} entries".format( n_entries)) return tables
def __call__(self, router_tables, target_length=None): # build storage compressed_pacman_router_tables = MulticastRoutingTables() # create progress bar progress = ProgressBar( router_tables.routing_tables, "Compressing routing Tables") # compress each router for router_table in progress.over(router_tables.routing_tables): # convert to rig format entries = self._convert_to_mundy_format(router_table) # compress the router entries compressed_router_table_entries = \ rigs_compressor.minimise(entries, target_length) # convert back to pacman model compressed_pacman_table = self._convert_to_pacman_router_table( compressed_router_table_entries, router_table.x, router_table.y) # add to new compressed routing tables compressed_pacman_router_tables.add_routing_table( compressed_pacman_table) # return return compressed_pacman_router_tables
def __call__(self, router_tables, target_length=None): # build storage compressed_pacman_router_tables = MulticastRoutingTables() # create progress bar progress = ProgressBar(router_tables.routing_tables, "Compressing routing Tables") # compress each router for router_table in progress.over(router_tables.routing_tables): # convert to rig format entries = self._convert_to_mundy_format(router_table) # compress the router entries compressed_router_table_entries = \ rigs_compressor.minimise(entries, target_length) # convert back to pacman model compressed_pacman_table = self._convert_to_pacman_router_table( compressed_router_table_entries, router_table.x, router_table.y) # add to new compressed routing tables compressed_pacman_router_tables.add_routing_table( compressed_pacman_table) # return return compressed_pacman_router_tables
def test_new_multicast_routing_tables(self): key_combo = 0xff35 mask = 0xffff proc_ids = list() link_ids = list() for i in range(18): proc_ids.append(i) for i in range(6): link_ids.append(i) multicast_entries1 = MulticastRoutingEntry( key_combo, mask, proc_ids, link_ids, True) multicast_entries2 = MulticastRoutingEntry( key_combo - 1, mask - 1, proc_ids, link_ids, True) mrt = list() t1 = MulticastRoutingTable(0, 0, [multicast_entries1]) t2 = MulticastRoutingTable(1, 0, [multicast_entries2]) mrt.append(t1) mrt.append(t2) tables = MulticastRoutingTables(mrt) retrieved_tables = tables.routing_tables self.assertEqual(len(retrieved_tables), len(mrt)) for tab in retrieved_tables: self.assertIn(tab, mrt) self.assertEqual(tables.get_routing_table_for_chip(0, 0), t1) self.assertEqual(tables.get_routing_table_for_chip(1, 0), t2) self.assertEqual(tables.get_routing_table_for_chip(2, 0), None)
def test_new_multicast_routing_tables(self): key_combo = 0xff35 mask = 0xffff proc_ids = list() link_ids = list() for i in range(18): proc_ids.append(i) for i in range(6): link_ids.append(i) multicast_entries1 = MulticastRoutingEntry(key_combo, mask, proc_ids, link_ids, True) multicast_entries2 = MulticastRoutingEntry(key_combo - 1, mask - 1, proc_ids, link_ids, True) mrt = list() t1 = MulticastRoutingTable(0, 0, [multicast_entries1]) t2 = MulticastRoutingTable(1, 0, [multicast_entries2]) mrt.append(t1) mrt.append(t2) tables = MulticastRoutingTables(mrt) retrieved_tables = tables.routing_tables self.assertEqual(len(retrieved_tables), len(mrt)) for tab in retrieved_tables: self.assertIn(tab, mrt) self.assertEqual(tables.get_routing_table_for_chip(0, 0), t1) self.assertEqual(tables.get_routing_table_for_chip(1, 0), t2) self.assertEqual(tables.get_routing_table_for_chip(2, 0), None)
def range_compressor(router_tables, accept_overflow=True): """ :param MulticastRoutingTables router_tables: :param bool accept_overflow: A flag which should only be used in testing to stop raising an exception if result is too big :rtype: MulticastRoutingTables """ if accept_overflow: message = "Precompressing tables using Range Compressor" else: message = "Compressing tables using Range Compressor" progress = ProgressBar(len(router_tables.routing_tables), message) compressor = RangeCompressor() compressed_tables = MulticastRoutingTables() for table in progress.over(router_tables.routing_tables): new_table = compressor.compress_table(table) if (new_table.number_of_entries > Machine.ROUTER_ENTRIES and not accept_overflow): raise MinimisationFailedError( f"The routing table {table.x} {table.y} with " f"{table.number_of_entries} entries after compression " f"still has {new_table.number_of_entries} so will not fit") compressed_tables.add_routing_table(new_table) logger.info(f"Ranged compressor resulted with the largest table of size " f"{compressed_tables.max_number_of_entries}") return compressed_tables
class TestCompressor(unittest.TestCase): def setUp(self): self.original_tables = MulticastRoutingTables() original_table = UnCompressedMulticastRoutingTable(x=0, y=0) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0101, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1110, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1100, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0010, 0b1011, [4, 5], [], False)) self.original_tables.add_routing_table(original_table) unittest_setup() set_config("Mapping", "router_table_compress_as_far_as_possible", True) def check_compression(self, compressed_tables): for original in self.original_tables: compressed = compressed_tables.get_routing_table_for_chip( original.x, original.y) assert compressed.number_of_entries < original.number_of_entries compare_tables(original, original) def test_pair_compressor(self): compressed_tables = pair_compressor(self.original_tables) self.check_compression(compressed_tables) def test_range_compressor_skipped(self): compressed_tables = range_compressor(self.original_tables) for original in self.original_tables: compressed = compressed_tables.get_routing_table_for_chip( original.x, original.y) self.assertEqual(original, compressed) def test_checked_unordered_pair_compressor(self): compressed_tables = pair_compressor(self.original_tables, ordered=False, accept_overflow=False) self.check_compression(compressed_tables) def test_unordered_pair_compressor(self): compressed_tables = pair_compressor(self.original_tables, ordered=False, accept_overflow=True) self.check_compression(compressed_tables) def test_ordered_covering_compressor(self): compressed_tables = ordered_covering_compressor(self.original_tables) self.check_compression(compressed_tables)
def test_tables(self): tables = MulticastRoutingTables() path = os.path.dirname(sys.modules[self.__module__].__file__) table_path = os.path.join(path, "table2.csv.gz") table = from_csv(table_path) tables.add_routing_table(table) compressed = range_compressor(tables) c_table = compressed.get_routing_table_for_chip(0, 0) compare_tables(table, c_table)
def __call__(self, transceiver, routing_tables, app_id): progress = ProgressBar(routing_tables, "Reading Routing Tables from Machine") machine_routing_tables = MulticastRoutingTables() for routing_table in progress.over(routing_tables): # get multicast entries from machine machine_routing_table = self._read_routing_table( transceiver, routing_table, app_id) machine_routing_tables.add_routing_table(machine_routing_table) return machine_routing_tables
class MyTestCase(unittest.TestCase): def setUp(self): self.original_tables = MulticastRoutingTables() original_table = UnCompressedMulticastRoutingTable(x=0, y=0) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0101, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1110, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1100, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0010, 0b1011, [4, 5], [], False)) self.original_tables.add_routing_table(original_table) def check_compression(self, compressed_tables): for original in self.original_tables: compressed = compressed_tables.get_routing_table_for_chip( original.x, original.y) assert compressed.number_of_entries < original.number_of_entries compare_tables(original, original) def test_pair_compressor(self): compressor = PairCompressor() compressed_tables = compressor(self.original_tables) self.check_compression(compressed_tables) def test_checked_unordered_pair_compressor(self): compressor = CheckedUnorderedPairCompressor() compressed_tables = compressor(self.original_tables) self.check_compression(compressed_tables) def test_unordered_pair_compressor(self): compressor = UnorderedPairCompressor() compressed_tables = compressor(self.original_tables) self.check_compression(compressed_tables) def test_ordered_covering_compressor(self): compressor = OrderedCoveringCompressor() compressed_tables = compressor(self.original_tables) self.check_compression(compressed_tables)
def __call__(self, machine, extra_monitor_cores, placements): # pylint: disable=attribute-defined-outside-init self._real_machine = machine self._real_placements = placements self._monitors = extra_monitor_cores # create progress bar progress = ProgressBar( machine.ethernet_connected_chips, "Generating routing tables for data in system processes") # create routing table holder routing_tables = MulticastRoutingTables() key_to_destination_map = dict() for ethernet_chip in progress.over(machine.ethernet_connected_chips): fake_graph, fake_placements, fake_machine, key_to_dest_map = \ self._create_fake_network(ethernet_chip) # update dict for key mapping key_to_destination_map.update(key_to_dest_map) # do routing routing_tables_by_partition = self._do_routing( fake_graph=fake_graph, fake_placements=fake_placements, fake_machine=fake_machine) self._generate_routing_tables(routing_tables, routing_tables_by_partition, ethernet_chip) return routing_tables, key_to_destination_map
def __call__(self, machine_graph, placements, n_keys_map): """ :param MachineGraph machine_graph: The graph to allocate the routing info for :param Placements placements: The placements of the vertices :param AbstractMachinePartitionNKeysMap n_keys_map: A map between the edges and the number of keys required by the edges :return: The routing information :rtype: tuple(RoutingInfo, AbstractMulticastRoutingTable) :raise PacmanRouteInfoAllocationException: If something goes wrong with the allocation """ # check that this algorithm supports the constraints put onto the # partitions check_algorithm_can_support_constraints( constrained_vertices=machine_graph.partitions, supported_constraints=[], abstract_constraint_type=AbstractKeyAllocatorConstraint) # take each edge and create keys from its placement progress = ProgressBar(machine_graph.n_outgoing_edge_partitions, "Allocating routing keys") routing_infos = RoutingInfo() routing_tables = MulticastRoutingTables() for partition in progress.over(machine_graph.outgoing_edge_partitions): for edge in partition.edges: routing_infos.add_partition_info( self._allocate_partition_route(edge, placements, machine_graph, n_keys_map)) return routing_infos, routing_tables
def __call__( self, routing_infos, routing_table_by_partitions, machine): """ :param routing_infos: :param routing_table_by_partitions: :param machine: """ progress = ProgressBar(machine.n_chips, "Generating routing tables") routing_tables = MulticastRoutingTables() for chip in progress.over(machine.chips): partitions_in_table = routing_table_by_partitions.\ get_entries_for_router(chip.x, chip.y) if partitions_in_table: routing_tables.add_routing_table(self._create_routing_table( chip, partitions_in_table, routing_infos)) return routing_tables
def __call__(self, routing_infos, routing_table_by_partitions, machine): """ :param RoutingInfo routing_infos: :param MulticastRoutingTableByPartition routing_table_by_partitions: :param ~spinn_machine.Machine machine: :rtype: MulticastRoutingTables """ progress = ProgressBar(machine.n_chips, "Generating routing tables") routing_tables = MulticastRoutingTables() for chip in progress.over(machine.chips): partitions_in_table = routing_table_by_partitions.\ get_entries_for_router(chip.x, chip.y) if partitions_in_table: routing_tables.add_routing_table(self._create_routing_table( chip, partitions_in_table, routing_infos)) return routing_tables
def compress_tables(self, router_tables, progress): """ Compress all the unordered routing tables Tables who start of smaller than target_length are not compressed :param MulticastRoutingTables router_tables: Routing tables :param ~spinn_utilities.progress_bar.ProgressBar progress: Progress bar to show while working :return: The compressed but still unordered routing tables :rtype: MulticastRoutingTables :raises MinimisationFailedError: on failure """ compressed_tables = MulticastRoutingTables() self._problems = "" if get_config_bool("Mapping", "router_table_compress_as_far_as_possible"): # Compress as much as possible target_length = 0 else: target_length = Machine.ROUTER_ENTRIES for table in progress.over(router_tables.routing_tables): if table.number_of_entries < target_length: new_table = table else: compressed_table = self.compress_table(table) new_table = CompressedMulticastRoutingTable(table.x, table.y) for entry in compressed_table: new_table.add_multicast_routing_entry( entry.to_MulticastRoutingEntry()) if new_table.number_of_entries > Machine.ROUTER_ENTRIES: self._problems += "(x:{},y:{})={} ".format( new_table.x, new_table.y, new_table.number_of_entries) compressed_tables.add_routing_table(new_table) if len(self._problems) > 0: if self._ordered and not self._accept_overflow: raise MinimisationFailedError( "The routing table after compression will still not fit" " within the machines router: {}".format(self._problems)) else: logger.warning(self._problems) return compressed_tables
def test_router_compressor_on_error(): routing_tables = MulticastRoutingTables( [UnCompressedMulticastRoutingTable(0, 0)]) transceiver = MockTransceiverError() machine = virtual_machine(width=8, height=8) mundy_on_chip_router_compression(routing_tables, transceiver, machine, app_id=17, system_provenance_folder="")
def test_router_compressor_on_error(): compressor = MundyOnChipRouterCompression() routing_tables = MulticastRoutingTables([MulticastRoutingTable(0, 0)]) transceiver = MockTransceiverError() machine = virtual_machine(version=5) with pytest.raises(SpinnFrontEndException): compressor(routing_tables, transceiver, machine, app_id=17, provenance_file_path="")
def setUp(self): self.original_tables = MulticastRoutingTables() original_table = UnCompressedMulticastRoutingTable(x=0, y=0) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0101, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1110, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1100, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0010, 0b1011, [4, 5], [], False)) self.original_tables.add_routing_table(original_table)
def __call__(self, report_default_directory, routing_tables, transceiver, app_id): # pylint: disable=protected-access tables = list(routing_tables.routing_tables) progress = ProgressBar(tables, "Reading Routing Tables from Machine") folder_name = os.path.join(report_default_directory, _FOLDER_NAME) os.mkdir(folder_name) machine_routing_tables = MulticastRoutingTables() # generate a file for every multicast entry for routing_table in progress.over(tables): # get multicast entries from machine machine_routing_table = self._read_routing_table( transceiver, routing_table, app_id) machine_routing_tables.add_routing_table(machine_routing_table) reports._generate_routing_table(machine_routing_table, folder_name) return machine_routing_tables
def basic_routing_table_generator(routing_infos, routing_table_by_partitions, machine): """ An basic algorithm that can produce routing tables :param RoutingInfo routing_infos: :param MulticastRoutingTableByPartition routing_table_by_partitions: :param ~spinn_machine.Machine machine: :rtype: MulticastRoutingTables """ progress = ProgressBar(machine.n_chips, "Generating routing tables") routing_tables = MulticastRoutingTables() for chip in progress.over(machine.chips): partitions_in_table = routing_table_by_partitions.\ get_entries_for_router(chip.x, chip.y) if partitions_in_table: routing_tables.add_routing_table( __create_routing_table(chip, partitions_in_table, routing_infos)) return routing_tables
def __call__( self, report_default_directory, routing_tables, transceiver, app_id): # pylint: disable=protected-access tables = list(routing_tables.routing_tables) progress = ProgressBar(tables, "Reading Routing Tables from Machine") folder_name = os.path.join(report_default_directory, _FOLDER_NAME) os.mkdir(folder_name) machine_routing_tables = MulticastRoutingTables() # generate a file for every multicast entry for routing_table in progress.over(tables): # get multicast entries from machine machine_routing_table = self._read_routing_table( transceiver, routing_table, app_id) machine_routing_tables.add_routing_table(machine_routing_table) reports._generate_routing_table(machine_routing_table, folder_name) return machine_routing_tables
def setUp(self): self.original_tables = MulticastRoutingTables() original_table = UnCompressedMulticastRoutingTable(x=0, y=0) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0101, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1110, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1100, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0010, 0b1011, [4, 5], [], False)) self.original_tables.add_routing_table(original_table) unittest_setup() set_config("Mapping", "router_table_compress_as_far_as_possible", True)
def compress_tables(self, router_tables, progress): """ Compress all the unordered routing tables Tables who start of smaller than target_length are not compressed :param MulticastRoutingTables router_tables: Routing tables :param ~spinn_utilities.progress_bar.ProgressBar progress: Progress bar to show while working :return: The compressed but still unordered routing tables :rtype: MulticastRoutingTables :raises MinimisationFailedError: on failure """ compressed_tables = MulticastRoutingTables() self._problems = "" for table in progress.over(router_tables.routing_tables): if table.number_of_entries < self._target_length: new_table = table else: compressed_table = self.compress_table(table) new_table = CompressedMulticastRoutingTable(table.x, table.y) for entry in compressed_table: new_table.add_multicast_routing_entry( entry.to_MulticastRoutingEntry()) if new_table.number_of_entries > self.MAX_SUPPORTED_LENGTH: self._problems += "(x:{},y:{})={} ".format( new_table.x, new_table.y, new_table.number_of_entries) compressed_tables.add_routing_table(new_table) if len(self._problems) > 0: if self._ordered: raise MinimisationFailedError( "The routing table after compression will still not fit" " within the machines router: {}".format(self._problems)) else: logger.warning(self._problems) return compressed_tables
def test_add_routing_table_for_duplicate_chip(self): key_combo = 0xff35 mask = 0xffff proc_ids = list() link_ids = list() for i in range(18): proc_ids.append(i) for i in range(6): link_ids.append(i) multicast_entries1 = MulticastRoutingEntry(key_combo, mask, proc_ids, link_ids, True) multicast_entries2 = MulticastRoutingEntry(key_combo - 1, mask, proc_ids, link_ids, True) mrt = list() mrt.append(MulticastRoutingTable(3, 0, [multicast_entries1])) mrt.append(MulticastRoutingTable(3, 0, [multicast_entries2])) with self.assertRaises(PacmanAlreadyExistsException): MulticastRoutingTables(mrt)
def __call__(self, machine_graph, placements, n_keys_map): """ :param machine_graph: The graph to allocate the routing info for :type machine_graph:\ :py:class:`pacman.model.graphs.machine.MachineGraph` :param placements: The placements of the vertices :type placements:\ :py:class:`pacman.model.placements.Placements` :param n_keys_map: A map between the edges and the number of keys\ required by the edges :type n_keys_map:\ :py:class:`pacman.model.routing_info.AbstractMachinePartitionNKeysMap` :return: The routing information :rtype: \ :py:class:`pacman.model.routing_info.RoutingInfo`, \ :py:class:`pacman.model.routing_tables.MulticastRoutingTable :raise pacman.exceptions.PacmanRouteInfoAllocationException: \ If something goes wrong with the allocation """ # check that this algorithm supports the constraints put onto the # partitions supported_constraints = [] utility_calls.check_algorithm_can_support_constraints( constrained_vertices=machine_graph.partitions, supported_constraints=supported_constraints, abstract_constraint_type=AbstractKeyAllocatorConstraint) # take each edge and create keys from its placement progress = ProgressBar(machine_graph.n_outgoing_edge_partitions, "Allocating routing keys") routing_infos = RoutingInfo() routing_tables = MulticastRoutingTables() for partition in progress.over(machine_graph.outgoing_edge_partitions): for edge in partition.edges: routing_infos.add_partition_info( self._allocate_partition_route(edge, placements, machine_graph, n_keys_map)) return routing_infos, routing_tables
class SystemMulticastRoutingGenerator(object): """ Generates routing table entries used by the data in processes with the\ extra monitor cores. :param ~spinn_machine.Machine machine: :param extra_monitor_cores: :type extra_monitor_cores: dict(tuple(int,int),ExtraMonitorSupportMachineVertex) :param ~pacman.model.placements.Placements placements: :return: routing tables, destination-to-key map, board-locn-to-timeout-key map :rtype: tuple(MulticastRoutingTables, dict(tuple(int,int),int), dict(tuple(int,int),int)) """ __slots__ = ["_monitors", "_machine", "_key_to_destination_map", "_placements", "_routing_tables", "_time_out_keys_by_board"] def __call__(self, machine, extra_monitor_cores, placements): """ :param ~spinn_machine.Machine machine: :param dict(tuple(int,int),ExtraMonitorSupportMachineVertex) \ extra_monitor_cores: :param ~pacman.model.placements.Placements placements: :rtype: tuple(MulticastRoutingTables, dict(tuple(int,int),int), dict(tuple(int,int),int)) """ # pylint: disable=attribute-defined-outside-init self._machine = machine self._placements = placements self._monitors = extra_monitor_cores self._routing_tables = MulticastRoutingTables() self._key_to_destination_map = dict() self._time_out_keys_by_board = dict() # create progress bar progress = ProgressBar( machine.ethernet_connected_chips, "Generating routing tables for data in system processes") for ethernet_chip in progress.over(machine.ethernet_connected_chips): tree = self._generate_routing_tree(ethernet_chip) self._add_routing_entries(ethernet_chip, tree) return (self._routing_tables, self._key_to_destination_map, self._time_out_keys_by_board) def _generate_routing_tree(self, ethernet_chip): """ Generates a map for each chip to over which link it gets its data. :param ~spinn_machine.Chip ethernet_chip: :return: Map of chip.x, chip.y tp (source.x, source.y, source.link) :rtype: dict(tuple(int, int), tuple(int, int, int)) """ eth_x = ethernet_chip.x eth_y = ethernet_chip.y tree = dict() to_reach = set( self._machine.get_existing_xys_by_ethernet(eth_x, eth_y)) reached = set() reached.add((eth_x, eth_y)) to_reach.remove((eth_x, eth_y)) found = set() found.add((eth_x, eth_y)) while len(to_reach) > 0: just_reached = found found = set() for x, y in just_reached: # Check links starting with the most direct from 0,0 for link_id in [1, 0, 2, 5, 3, 4]: # Get protential destination destination = self._machine.xy_over_link(x, y, link_id) # If it is useful if destination in to_reach: # check it actually exits if self._machine.is_link_at(x, y, link_id): # Add to tree and record chip reachable tree[destination] = (x, y, link_id) to_reach.remove(destination) found.add(destination) if len(found) == 0: raise PacmanRoutingException( "Unable to do data in routing on {}.".format( ethernet_chip.ip_address)) return tree def _add_routing_entry(self, x, y, key, processor_id=None, link_ids=None): """ Adds a routing entry on this chip, creating the table if needed. :param int x: chip.x :param int y: chip.y :param int key: The key to use :param int processor_id: placement.p of the monitor vertex if applicable :param int link_id: If of the link out if applicable """ table = self._routing_tables.get_routing_table_for_chip(x, y) if table is None: table = UnCompressedMulticastRoutingTable(x, y) self._routing_tables.add_routing_table(table) if processor_id is None: processor_ids = [] else: processor_ids = [processor_id] if link_ids is None: link_ids = [] entry = MulticastRoutingEntry( routing_entry_key=key, mask=ROUTING_MASK, processor_ids=processor_ids, link_ids=link_ids, defaultable=False) table.add_multicast_routing_entry(entry) def _add_routing_entries(self, ethernet_chip, tree): """ Adds the routing entires based on the tree. For every chip with this ethernet: - A key is generated (and saved) for this chip. - A local route to the monitor core is added. - The tree is walked adding a route on each source to get here :param ~spinn_machine.Chip ethernet_chip: the ethernet chip to make entries for :param dict(tuple(int,int),tuple(int,int,int)) tree: map of chips and links """ eth_x = ethernet_chip.x eth_y = ethernet_chip.y key = KEY_START_VALUE for (x, y) in self._machine.get_existing_xys_by_ethernet( eth_x, eth_y): self._key_to_destination_map[x, y] = key placement = self._placements.get_placement_of_vertex( self._monitors[x, y]) self._add_routing_entry(x, y, key, processor_id=placement.p) while (x, y) in tree: x, y, link = tree[(x, y)] self._add_routing_entry(x, y, key, link_ids=[link]) key += N_KEYS_PER_PARTITION_ID # accum links to make a broad cast links_per_chip = defaultdict(list) for chip_key in tree: x, y, link = tree[chip_key] links_per_chip[x, y].append(link) # add broadcast router timeout keys time_out_key = key for (x, y) in self._machine.get_existing_xys_by_ethernet( eth_x, eth_y): placement = self._placements.get_placement_of_vertex( self._monitors[x, y]) self._add_routing_entry( x, y, time_out_key, processor_id=placement.p, link_ids=links_per_chip[x, y]) # update tracker self._time_out_keys_by_board[(eth_x, eth_y)] = key key += N_KEYS_PER_REINJECTION_PARTITION
def __call__( self, routing_tables, transceiver, machine, app_id, provenance_file_path, machine_graph, placements, executable_finder, read_algorithm_iobuf, produce_report, default_report_folder, target_length, routing_infos, time_to_try_for_each_iteration, use_timer_cut_off, machine_time_step, time_scale_factor, threshold_percentage, executable_targets, compress_as_much_as_possible=False, provenance_data_objects=None): """ entrance for routing table compression with bit field :param ~.MulticastRoutingTables routing_tables: :param ~.Transceiver transceiver: :param ~.Machine machine: :param int app_id: :param str provenance_file_path: :param ~.MachineGraph machine_graph: :param ~.Placements placements: :param ~.ExecutableFinder executable_finder: :param bool read_algorithm_iobuf: :param bool produce_report: :param str default_report_folder: :param bool use_timer_cut_off: :param int machine_time_step: :param int time_scale_factor: :param int threshold_percentage: :param ExecutableTargets executable_targets: :param bool compress_as_much_as_possible: :param list(ProvenanceDataItem) provenance_data_objects: :rtype: tuple(ExecutableTargets,list(ProvenanceDataItem)) """ # build provenance data objects if provenance_data_objects is not None: prov_items = provenance_data_objects else: prov_items = list() if len(routing_tables.routing_tables) == 0: return ExecutableTargets(), prov_items # new app id for this simulation routing_table_compressor_app_id = \ transceiver.app_id_tracker.get_new_id() progress_bar = ProgressBar( total_number_of_things_to_do=( len(machine_graph.vertices) + (len(routing_tables.routing_tables) * self.TIMES_CYCLED_ROUTING_TABLES)), string_describing_what_being_progressed=self._PROGRESS_BAR_TEXT) # locate data and on_chip_cores to load binary on (addresses, matrix_addresses_and_size) = self._generate_addresses( machine_graph, placements, transceiver, progress_bar) # create executable targets (compressor_executable_targets, bit_field_sorter_executable_path, bit_field_compressor_executable_path) = self._generate_core_subsets( routing_tables, executable_finder, machine, progress_bar, executable_targets) # load data into sdram on_host_chips = self._load_data( addresses, transceiver, routing_table_compressor_app_id, routing_tables, app_id, machine, compress_as_much_as_possible, progress_bar, compressor_executable_targets, matrix_addresses_and_size, time_to_try_for_each_iteration, bit_field_compressor_executable_path, bit_field_sorter_executable_path, threshold_percentage) # load and run binaries system_control_logic.run_system_application( compressor_executable_targets, routing_table_compressor_app_id, transceiver, provenance_file_path, executable_finder, read_algorithm_iobuf, functools.partial( self._check_bit_field_router_compressor_for_success, host_chips=on_host_chips, sorter_binary_path=bit_field_sorter_executable_path, prov_data_items=prov_items), [CPUState.FINISHED], True, "bit_field_compressor_on_{}_{}_{}.txt", [bit_field_sorter_executable_path], progress_bar) # start the host side compressions if needed if len(on_host_chips) != 0: logger.warning(self._ON_HOST_WARNING_MESSAGE, len(on_host_chips)) progress_bar = ProgressBar( total_number_of_things_to_do=len(on_host_chips), string_describing_what_being_progressed=self._HOST_BAR_TEXT) host_compressor = HostBasedBitFieldRouterCompressor() compressed_pacman_router_tables = MulticastRoutingTables() key_atom_map = host_compressor.generate_key_to_atom_map( machine_graph, routing_infos) for (chip_x, chip_y) in progress_bar.over(on_host_chips, False): bit_field_sdram_base_addresses = defaultdict(dict) host_compressor.collect_bit_field_sdram_base_addresses( chip_x, chip_y, machine, placements, transceiver, bit_field_sdram_base_addresses) host_compressor.start_compression_selection_process( router_table=routing_tables.get_routing_table_for_chip( chip_x, chip_y), produce_report=produce_report, report_folder_path=host_compressor.generate_report_path( default_report_folder), bit_field_sdram_base_addresses=( bit_field_sdram_base_addresses), transceiver=transceiver, machine_graph=machine_graph, placements=placements, machine=machine, target_length=target_length, time_to_try_for_each_iteration=( time_to_try_for_each_iteration), use_timer_cut_off=use_timer_cut_off, compressed_pacman_router_tables=( compressed_pacman_router_tables), key_atom_map=key_atom_map) # load host compressed routing tables for table in compressed_pacman_router_tables.routing_tables: if (not machine.get_chip_at(table.x, table.y).virtual and table.multicast_routing_entries): transceiver.clear_multicast_routes(table.x, table.y) transceiver.load_multicast_routes( table.x, table.y, table.multicast_routing_entries, app_id=app_id) progress_bar.end() return compressor_executable_targets, prov_items
from pacman.operations.router_compressors.routing_compression_checker import ( compare_tables) from pacman.operations.router_compressors.ordered_covering_router_compressor \ import ordered_covering_compressor from pacman.operations.router_compressors import pair_compressor from pacman.config_setup import unittest_setup unittest_setup() # original_tables = from_json("routing_table_15_25.json") original_tables = from_json("malloc_hard_routing_tables.json.gz") # original_tables = from_json("routing_tables_speader_big.json.gz") SPLIT = False if SPLIT: bad = MulticastRoutingTables() good = MulticastRoutingTables() for original in original_tables: if original.x == 19 and original.y == 22: good.add_routing_table(original) else: bad.add_routing_table(original) json_obj = to_json(bad) # dump to json file with open("routing_tables_zoned_bad1.json", "w") as f: json.dump(json_obj, f) json_obj = to_json(good) # dump to json file with open("routing_tables_zoned_2000.json", "w") as f: json.dump(json_obj, f)
def __call__(self, router_tables, machine, placements, transceiver, default_report_folder, produce_report, use_timer_cut_off, machine_graph, routing_infos, machine_time_step, time_scale_factor, target_length=None, time_to_try_for_each_iteration=None): """ :param ~.MulticastRoutingTables router_tables: :param ~.Machine machine: :param ~.Placements placements: :param ~.Transceiver transceiver: :param str default_report_folder: :param bool produce_report: :param bool use_timer_cut_off: :param ~.MachineGraph machine_graph: :param ~.RoutingInfo routing_infos: :param int machine_time_step: :param int time_scale_factor: :param int target_length: :param int time_to_try_for_each_iteration: :rtype: ~.MulticastRoutingTables """ if target_length is None: target_length = self._MAX_SUPPORTED_LENGTH if time_to_try_for_each_iteration is None: time_to_try_for_each_iteration = self._DEFAULT_TIME_PER_ITERATION # create progress bar progress = ProgressBar( len(router_tables.routing_tables) * 2, "Compressing routing Tables with bitfields in host") # create report report_folder_path = None if produce_report: report_folder_path = self.generate_report_path( default_report_folder) # compressed router table compressed_pacman_router_tables = MulticastRoutingTables() key_atom_map = self.generate_key_to_atom_map(machine_graph, routing_infos) # holder for the bitfields in bit_field_sdram_base_addresses = defaultdict(dict) for router_table in progress.over(router_tables.routing_tables, False): self.collect_bit_field_sdram_base_addresses( router_table.x, router_table.y, machine, placements, transceiver, bit_field_sdram_base_addresses) # start the routing table choice conversion for router_table in progress.over(router_tables.routing_tables): self.start_compression_selection_process( router_table, produce_report, report_folder_path, bit_field_sdram_base_addresses, transceiver, machine_graph, placements, machine, target_length, time_to_try_for_each_iteration, use_timer_cut_off, compressed_pacman_router_tables, key_atom_map) # return compressed tables return compressed_pacman_router_tables
def test(self): """Test minimising a table of the form: 0000 -> N NE 0001 -> E 0101 -> SW 1000 -> N NE 1001 -> E 1110 -> SW 1100 -> N NE 0X00 -> S SW The result (worked out by hand) should be: 0000 -> N NE 0X00 -> S SW 1X00 -> N NE X001 -> E X1XX -> SW """ original_tables = MulticastRoutingTables() original_table = MulticastRoutingTable(x=0, y=0) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0101, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1110, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1100, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0000, 0b1011, [4, 5], [], False)) original_tables.add_routing_table(original_table) mundy_compressor = MundyRouterCompressor() compressed_tables = mundy_compressor(original_tables) compressed_table = compressed_tables.get_routing_table_for_chip(0, 0) # TODO: FIX THIS SO THAT WE TEST THAT THE RESULT IS VALID # result_table_expected = MulticastRoutingTable(x=0, y=0) # result_table_expected.add_multicast_routing_entry( # MulticastRoutingEntry(0b0000, 0b1111, [1, 2], [], False)) # result_table_expected.add_multicast_routing_entry( # MulticastRoutingEntry(0b0000, 0b1011, [4, 5], [], False)) # result_table_expected.add_multicast_routing_entry( # MulticastRoutingEntry(0b1000, 0b1011, [1, 2], [], False)) # result_table_expected.add_multicast_routing_entry( # MulticastRoutingEntry(0b0001, 0b0111, [0], [], False)) # result_table_expected.add_multicast_routing_entry( # MulticastRoutingEntry(0b0100, 0b0100, [4], [], False)) # Minimise as far as possible assert compressed_table.number_of_entries == 5 # assert compressed_table == result_table_expected compare_table(original_table, compressed_table)
def test_new_multicast_routing_tables_empty(self): MulticastRoutingTables()