def range_compressor(router_tables, accept_overflow=True): """ :param MulticastRoutingTables router_tables: :param bool accept_overflow: A flag which should only be used in testing to stop raising an exception if result is too big :rtype: MulticastRoutingTables """ if accept_overflow: message = "Precompressing tables using Range Compressor" else: message = "Compressing tables using Range Compressor" progress = ProgressBar(len(router_tables.routing_tables), message) compressor = RangeCompressor() compressed_tables = MulticastRoutingTables() for table in progress.over(router_tables.routing_tables): new_table = compressor.compress_table(table) if (new_table.number_of_entries > Machine.ROUTER_ENTRIES and not accept_overflow): raise MinimisationFailedError( f"The routing table {table.x} {table.y} with " f"{table.number_of_entries} entries after compression " f"still has {new_table.number_of_entries} so will not fit") compressed_tables.add_routing_table(new_table) logger.info(f"Ranged compressor resulted with the largest table of size " f"{compressed_tables.max_number_of_entries}") return compressed_tables
def __call__(self, router_tables): tables = MulticastRoutingTables() previous_masks = dict() progress = ProgressBar( len(router_tables.routing_tables) * 2, "Compressing Routing Tables") # Create all masks without holes allowed_masks = [_32_BITS - ((2**i) - 1) for i in range(33)] # Check that none of the masks have "holes" e.g. 0xFFFF0FFF has a hole for router_table in router_tables.routing_tables: for entry in router_table.multicast_routing_entries: if entry.mask not in allowed_masks: raise PacmanRoutingException( "Only masks without holes are allowed in tables for" " MallocBasedRouteMerger (disallowed mask={})".format( hex(entry.mask))) for router_table in progress.over(router_tables.routing_tables): new_table = self._merge_routes(router_table, previous_masks) tables.add_routing_table(new_table) n_entries = len([ entry for entry in new_table.multicast_routing_entries if not entry.defaultable ]) print("Reduced from {} to {}".format( len(router_table.multicast_routing_entries), n_entries)) if n_entries > 1023: raise PacmanRoutingException( "Cannot make table small enough: {} entries".format( n_entries)) return tables
def __call__(self, router_tables, target_length=None): # build storage compressed_pacman_router_tables = MulticastRoutingTables() # create progress bar progress = ProgressBar( router_tables.routing_tables, "Compressing routing Tables") # compress each router for router_table in progress.over(router_tables.routing_tables): # convert to rig format entries = self._convert_to_mundy_format(router_table) # compress the router entries compressed_router_table_entries = \ rigs_compressor.minimise(entries, target_length) # convert back to pacman model compressed_pacman_table = self._convert_to_pacman_router_table( compressed_router_table_entries, router_table.x, router_table.y) # add to new compressed routing tables compressed_pacman_router_tables.add_routing_table( compressed_pacman_table) # return return compressed_pacman_router_tables
def __call__(self, router_tables): tables = MulticastRoutingTables() previous_masks = dict() progress = ProgressBar( len(router_tables.routing_tables) * 2, "Compressing Routing Tables") # Create all masks without holes allowed_masks = [_32_BITS - ((2 ** i) - 1) for i in range(33)] # Check that none of the masks have "holes" e.g. 0xFFFF0FFF has a hole for router_table in router_tables.routing_tables: for entry in router_table.multicast_routing_entries: if entry.mask not in allowed_masks: raise PacmanRoutingException( "Only masks without holes are allowed in tables for" " BasicRouteMerger (disallowed mask={})".format( hex(entry.mask))) for router_table in progress.over(router_tables.routing_tables): new_table = self._merge_routes(router_table, previous_masks) tables.add_routing_table(new_table) n_entries = len([ entry for entry in new_table.multicast_routing_entries if not entry.defaultable]) # print("Reduced from {} to {}".format( # len(router_table.multicast_routing_entries), n_entries)) if n_entries > 1023: raise PacmanRoutingException( "Cannot make table small enough: {} entries".format( n_entries)) return tables
def __call__(self, router_tables, target_length=None): # build storage compressed_pacman_router_tables = MulticastRoutingTables() # create progress bar progress = ProgressBar(router_tables.routing_tables, "Compressing routing Tables") # compress each router for router_table in progress.over(router_tables.routing_tables): # convert to rig format entries = self._convert_to_mundy_format(router_table) # compress the router entries compressed_router_table_entries = \ rigs_compressor.minimise(entries, target_length) # convert back to pacman model compressed_pacman_table = self._convert_to_pacman_router_table( compressed_router_table_entries, router_table.x, router_table.y) # add to new compressed routing tables compressed_pacman_router_tables.add_routing_table( compressed_pacman_table) # return return compressed_pacman_router_tables
def test_tables(self): tables = MulticastRoutingTables() path = os.path.dirname(sys.modules[self.__module__].__file__) table_path = os.path.join(path, "table2.csv.gz") table = from_csv(table_path) tables.add_routing_table(table) compressed = range_compressor(tables) c_table = compressed.get_routing_table_for_chip(0, 0) compare_tables(table, c_table)
class TestCompressor(unittest.TestCase): def setUp(self): self.original_tables = MulticastRoutingTables() original_table = UnCompressedMulticastRoutingTable(x=0, y=0) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0101, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1110, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1100, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0010, 0b1011, [4, 5], [], False)) self.original_tables.add_routing_table(original_table) unittest_setup() set_config("Mapping", "router_table_compress_as_far_as_possible", True) def check_compression(self, compressed_tables): for original in self.original_tables: compressed = compressed_tables.get_routing_table_for_chip( original.x, original.y) assert compressed.number_of_entries < original.number_of_entries compare_tables(original, original) def test_pair_compressor(self): compressed_tables = pair_compressor(self.original_tables) self.check_compression(compressed_tables) def test_range_compressor_skipped(self): compressed_tables = range_compressor(self.original_tables) for original in self.original_tables: compressed = compressed_tables.get_routing_table_for_chip( original.x, original.y) self.assertEqual(original, compressed) def test_checked_unordered_pair_compressor(self): compressed_tables = pair_compressor(self.original_tables, ordered=False, accept_overflow=False) self.check_compression(compressed_tables) def test_unordered_pair_compressor(self): compressed_tables = pair_compressor(self.original_tables, ordered=False, accept_overflow=True) self.check_compression(compressed_tables) def test_ordered_covering_compressor(self): compressed_tables = ordered_covering_compressor(self.original_tables) self.check_compression(compressed_tables)
def __call__(self, transceiver, routing_tables, app_id): progress = ProgressBar(routing_tables, "Reading Routing Tables from Machine") machine_routing_tables = MulticastRoutingTables() for routing_table in progress.over(routing_tables): # get multicast entries from machine machine_routing_table = self._read_routing_table( transceiver, routing_table, app_id) machine_routing_tables.add_routing_table(machine_routing_table) return machine_routing_tables
class MyTestCase(unittest.TestCase): def setUp(self): self.original_tables = MulticastRoutingTables() original_table = UnCompressedMulticastRoutingTable(x=0, y=0) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0101, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1110, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1100, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0010, 0b1011, [4, 5], [], False)) self.original_tables.add_routing_table(original_table) def check_compression(self, compressed_tables): for original in self.original_tables: compressed = compressed_tables.get_routing_table_for_chip( original.x, original.y) assert compressed.number_of_entries < original.number_of_entries compare_tables(original, original) def test_pair_compressor(self): compressor = PairCompressor() compressed_tables = compressor(self.original_tables) self.check_compression(compressed_tables) def test_checked_unordered_pair_compressor(self): compressor = CheckedUnorderedPairCompressor() compressed_tables = compressor(self.original_tables) self.check_compression(compressed_tables) def test_unordered_pair_compressor(self): compressor = UnorderedPairCompressor() compressed_tables = compressor(self.original_tables) self.check_compression(compressed_tables) def test_ordered_covering_compressor(self): compressor = OrderedCoveringCompressor() compressed_tables = compressor(self.original_tables) self.check_compression(compressed_tables)
def __call__( self, routing_infos, routing_table_by_partitions, machine): """ :param routing_infos: :param routing_table_by_partitions: :param machine: """ progress = ProgressBar(machine.n_chips, "Generating routing tables") routing_tables = MulticastRoutingTables() for chip in progress.over(machine.chips): partitions_in_table = routing_table_by_partitions.\ get_entries_for_router(chip.x, chip.y) if partitions_in_table: routing_tables.add_routing_table(self._create_routing_table( chip, partitions_in_table, routing_infos)) return routing_tables
def __call__(self, routing_infos, routing_table_by_partitions, machine): """ :param RoutingInfo routing_infos: :param MulticastRoutingTableByPartition routing_table_by_partitions: :param ~spinn_machine.Machine machine: :rtype: MulticastRoutingTables """ progress = ProgressBar(machine.n_chips, "Generating routing tables") routing_tables = MulticastRoutingTables() for chip in progress.over(machine.chips): partitions_in_table = routing_table_by_partitions.\ get_entries_for_router(chip.x, chip.y) if partitions_in_table: routing_tables.add_routing_table(self._create_routing_table( chip, partitions_in_table, routing_infos)) return routing_tables
def compress_tables(self, router_tables, progress): """ Compress all the unordered routing tables Tables who start of smaller than target_length are not compressed :param MulticastRoutingTables router_tables: Routing tables :param ~spinn_utilities.progress_bar.ProgressBar progress: Progress bar to show while working :return: The compressed but still unordered routing tables :rtype: MulticastRoutingTables :raises MinimisationFailedError: on failure """ compressed_tables = MulticastRoutingTables() self._problems = "" if get_config_bool("Mapping", "router_table_compress_as_far_as_possible"): # Compress as much as possible target_length = 0 else: target_length = Machine.ROUTER_ENTRIES for table in progress.over(router_tables.routing_tables): if table.number_of_entries < target_length: new_table = table else: compressed_table = self.compress_table(table) new_table = CompressedMulticastRoutingTable(table.x, table.y) for entry in compressed_table: new_table.add_multicast_routing_entry( entry.to_MulticastRoutingEntry()) if new_table.number_of_entries > Machine.ROUTER_ENTRIES: self._problems += "(x:{},y:{})={} ".format( new_table.x, new_table.y, new_table.number_of_entries) compressed_tables.add_routing_table(new_table) if len(self._problems) > 0: if self._ordered and not self._accept_overflow: raise MinimisationFailedError( "The routing table after compression will still not fit" " within the machines router: {}".format(self._problems)) else: logger.warning(self._problems) return compressed_tables
def __call__(self, report_default_directory, routing_tables, transceiver, app_id): # pylint: disable=protected-access tables = list(routing_tables.routing_tables) progress = ProgressBar(tables, "Reading Routing Tables from Machine") folder_name = os.path.join(report_default_directory, _FOLDER_NAME) os.mkdir(folder_name) machine_routing_tables = MulticastRoutingTables() # generate a file for every multicast entry for routing_table in progress.over(tables): # get multicast entries from machine machine_routing_table = self._read_routing_table( transceiver, routing_table, app_id) machine_routing_tables.add_routing_table(machine_routing_table) reports._generate_routing_table(machine_routing_table, folder_name) return machine_routing_tables
def __call__( self, report_default_directory, routing_tables, transceiver, app_id): # pylint: disable=protected-access tables = list(routing_tables.routing_tables) progress = ProgressBar(tables, "Reading Routing Tables from Machine") folder_name = os.path.join(report_default_directory, _FOLDER_NAME) os.mkdir(folder_name) machine_routing_tables = MulticastRoutingTables() # generate a file for every multicast entry for routing_table in progress.over(tables): # get multicast entries from machine machine_routing_table = self._read_routing_table( transceiver, routing_table, app_id) machine_routing_tables.add_routing_table(machine_routing_table) reports._generate_routing_table(machine_routing_table, folder_name) return machine_routing_tables
def basic_routing_table_generator(routing_infos, routing_table_by_partitions, machine): """ An basic algorithm that can produce routing tables :param RoutingInfo routing_infos: :param MulticastRoutingTableByPartition routing_table_by_partitions: :param ~spinn_machine.Machine machine: :rtype: MulticastRoutingTables """ progress = ProgressBar(machine.n_chips, "Generating routing tables") routing_tables = MulticastRoutingTables() for chip in progress.over(machine.chips): partitions_in_table = routing_table_by_partitions.\ get_entries_for_router(chip.x, chip.y) if partitions_in_table: routing_tables.add_routing_table( __create_routing_table(chip, partitions_in_table, routing_infos)) return routing_tables
def compress_tables(self, router_tables, progress): """ Compress all the unordered routing tables Tables who start of smaller than target_length are not compressed :param MulticastRoutingTables router_tables: Routing tables :param ~spinn_utilities.progress_bar.ProgressBar progress: Progress bar to show while working :return: The compressed but still unordered routing tables :rtype: MulticastRoutingTables :raises MinimisationFailedError: on failure """ compressed_tables = MulticastRoutingTables() self._problems = "" for table in progress.over(router_tables.routing_tables): if table.number_of_entries < self._target_length: new_table = table else: compressed_table = self.compress_table(table) new_table = CompressedMulticastRoutingTable(table.x, table.y) for entry in compressed_table: new_table.add_multicast_routing_entry( entry.to_MulticastRoutingEntry()) if new_table.number_of_entries > self.MAX_SUPPORTED_LENGTH: self._problems += "(x:{},y:{})={} ".format( new_table.x, new_table.y, new_table.number_of_entries) compressed_tables.add_routing_table(new_table) if len(self._problems) > 0: if self._ordered: raise MinimisationFailedError( "The routing table after compression will still not fit" " within the machines router: {}".format(self._problems)) else: logger.warning(self._problems) return compressed_tables
from pacman.operations.router_compressors import pair_compressor from pacman.config_setup import unittest_setup unittest_setup() # original_tables = from_json("routing_table_15_25.json") original_tables = from_json("malloc_hard_routing_tables.json.gz") # original_tables = from_json("routing_tables_speader_big.json.gz") SPLIT = False if SPLIT: bad = MulticastRoutingTables() good = MulticastRoutingTables() for original in original_tables: if original.x == 19 and original.y == 22: good.add_routing_table(original) else: bad.add_routing_table(original) json_obj = to_json(bad) # dump to json file with open("routing_tables_zoned_bad1.json", "w") as f: json.dump(json_obj, f) json_obj = to_json(good) # dump to json file with open("routing_tables_zoned_2000.json", "w") as f: json.dump(json_obj, f) original_tables = bad MUNDY = True PRE = True
def test(self): """Test minimising a table of the form: 0000 -> N NE 0001 -> E 0101 -> SW 1000 -> N NE 1001 -> E 1110 -> SW 1100 -> N NE 0X00 -> S SW The result (worked out by hand) should be: 0000 -> N NE 0X00 -> S SW 1X00 -> N NE X001 -> E X1XX -> SW """ original_tables = MulticastRoutingTables() original_table = MulticastRoutingTable(x=0, y=0) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0101, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1110, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1100, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0000, 0b1011, [4, 5], [], False)) original_tables.add_routing_table(original_table) mundy_compressor = MundyRouterCompressor() compressed_tables = mundy_compressor(original_tables) compressed_table = compressed_tables.get_routing_table_for_chip(0, 0) # TODO: FIX THIS SO THAT WE TEST THAT THE RESULT IS VALID # result_table_expected = MulticastRoutingTable(x=0, y=0) # result_table_expected.add_multicast_routing_entry( # MulticastRoutingEntry(0b0000, 0b1111, [1, 2], [], False)) # result_table_expected.add_multicast_routing_entry( # MulticastRoutingEntry(0b0000, 0b1011, [4, 5], [], False)) # result_table_expected.add_multicast_routing_entry( # MulticastRoutingEntry(0b1000, 0b1011, [1, 2], [], False)) # result_table_expected.add_multicast_routing_entry( # MulticastRoutingEntry(0b0001, 0b0111, [0], [], False)) # result_table_expected.add_multicast_routing_entry( # MulticastRoutingEntry(0b0100, 0b0100, [4], [], False)) # Minimise as far as possible assert compressed_table.number_of_entries == 5 # assert compressed_table == result_table_expected compare_table(original_table, compressed_table)
class SystemMulticastRoutingGenerator(object): """ Generates routing table entries used by the data in processes with the\ extra monitor cores. :param ~spinn_machine.Machine machine: :param extra_monitor_cores: :type extra_monitor_cores: dict(tuple(int,int),ExtraMonitorSupportMachineVertex) :param ~pacman.model.placements.Placements placements: :return: routing tables, destination-to-key map, board-locn-to-timeout-key map :rtype: tuple(MulticastRoutingTables, dict(tuple(int,int),int), dict(tuple(int,int),int)) """ __slots__ = ["_monitors", "_machine", "_key_to_destination_map", "_placements", "_routing_tables", "_time_out_keys_by_board"] def __call__(self, machine, extra_monitor_cores, placements): """ :param ~spinn_machine.Machine machine: :param dict(tuple(int,int),ExtraMonitorSupportMachineVertex) \ extra_monitor_cores: :param ~pacman.model.placements.Placements placements: :rtype: tuple(MulticastRoutingTables, dict(tuple(int,int),int), dict(tuple(int,int),int)) """ # pylint: disable=attribute-defined-outside-init self._machine = machine self._placements = placements self._monitors = extra_monitor_cores self._routing_tables = MulticastRoutingTables() self._key_to_destination_map = dict() self._time_out_keys_by_board = dict() # create progress bar progress = ProgressBar( machine.ethernet_connected_chips, "Generating routing tables for data in system processes") for ethernet_chip in progress.over(machine.ethernet_connected_chips): tree = self._generate_routing_tree(ethernet_chip) self._add_routing_entries(ethernet_chip, tree) return (self._routing_tables, self._key_to_destination_map, self._time_out_keys_by_board) def _generate_routing_tree(self, ethernet_chip): """ Generates a map for each chip to over which link it gets its data. :param ~spinn_machine.Chip ethernet_chip: :return: Map of chip.x, chip.y tp (source.x, source.y, source.link) :rtype: dict(tuple(int, int), tuple(int, int, int)) """ eth_x = ethernet_chip.x eth_y = ethernet_chip.y tree = dict() to_reach = set( self._machine.get_existing_xys_by_ethernet(eth_x, eth_y)) reached = set() reached.add((eth_x, eth_y)) to_reach.remove((eth_x, eth_y)) found = set() found.add((eth_x, eth_y)) while len(to_reach) > 0: just_reached = found found = set() for x, y in just_reached: # Check links starting with the most direct from 0,0 for link_id in [1, 0, 2, 5, 3, 4]: # Get protential destination destination = self._machine.xy_over_link(x, y, link_id) # If it is useful if destination in to_reach: # check it actually exits if self._machine.is_link_at(x, y, link_id): # Add to tree and record chip reachable tree[destination] = (x, y, link_id) to_reach.remove(destination) found.add(destination) if len(found) == 0: raise PacmanRoutingException( "Unable to do data in routing on {}.".format( ethernet_chip.ip_address)) return tree def _add_routing_entry(self, x, y, key, processor_id=None, link_ids=None): """ Adds a routing entry on this chip, creating the table if needed. :param int x: chip.x :param int y: chip.y :param int key: The key to use :param int processor_id: placement.p of the monitor vertex if applicable :param int link_id: If of the link out if applicable """ table = self._routing_tables.get_routing_table_for_chip(x, y) if table is None: table = UnCompressedMulticastRoutingTable(x, y) self._routing_tables.add_routing_table(table) if processor_id is None: processor_ids = [] else: processor_ids = [processor_id] if link_ids is None: link_ids = [] entry = MulticastRoutingEntry( routing_entry_key=key, mask=ROUTING_MASK, processor_ids=processor_ids, link_ids=link_ids, defaultable=False) table.add_multicast_routing_entry(entry) def _add_routing_entries(self, ethernet_chip, tree): """ Adds the routing entires based on the tree. For every chip with this ethernet: - A key is generated (and saved) for this chip. - A local route to the monitor core is added. - The tree is walked adding a route on each source to get here :param ~spinn_machine.Chip ethernet_chip: the ethernet chip to make entries for :param dict(tuple(int,int),tuple(int,int,int)) tree: map of chips and links """ eth_x = ethernet_chip.x eth_y = ethernet_chip.y key = KEY_START_VALUE for (x, y) in self._machine.get_existing_xys_by_ethernet( eth_x, eth_y): self._key_to_destination_map[x, y] = key placement = self._placements.get_placement_of_vertex( self._monitors[x, y]) self._add_routing_entry(x, y, key, processor_id=placement.p) while (x, y) in tree: x, y, link = tree[(x, y)] self._add_routing_entry(x, y, key, link_ids=[link]) key += N_KEYS_PER_PARTITION_ID # accum links to make a broad cast links_per_chip = defaultdict(list) for chip_key in tree: x, y, link = tree[chip_key] links_per_chip[x, y].append(link) # add broadcast router timeout keys time_out_key = key for (x, y) in self._machine.get_existing_xys_by_ethernet( eth_x, eth_y): placement = self._placements.get_placement_of_vertex( self._monitors[x, y]) self._add_routing_entry( x, y, time_out_key, processor_id=placement.p, link_ids=links_per_chip[x, y]) # update tracker self._time_out_keys_by_board[(eth_x, eth_y)] = key key += N_KEYS_PER_REINJECTION_PARTITION