def test_merger(self): link_ids = list() link_ids2 = list() proc_ids = list() proc_ids2 = list() for i in range(3): link_ids.append(i) for i in range(3, 6): link_ids2.append(i) for i in range(9): proc_ids.append(i) for i in range(9, 18): proc_ids2.append(i) key = 1 mask = 1 a_multicast = MulticastRoutingEntry( key, mask, proc_ids, link_ids, True) b_multicast = MulticastRoutingEntry( key, mask, proc_ids2, link_ids2, True) result_multicast = a_multicast.merge(b_multicast) comparison_link_ids = list() comparison_proc_ids = list() for i in range(6): comparison_link_ids.append(i) self.assertEqual(link_ids + link_ids2, comparison_link_ids) for i in range(18): comparison_proc_ids.append(i) self.assertEqual(proc_ids + proc_ids2, comparison_proc_ids) self.assertEqual(result_multicast.routing_entry_key, key) self.assertEqual(result_multicast.link_ids, set(comparison_link_ids)) self.assertEqual(result_multicast.mask, mask) self.assertEqual(result_multicast.processor_ids, set(comparison_proc_ids))
def test_new_multicast_routing_tables(self): key_combo = 0xff35 mask = 0xffff proc_ids = list() link_ids = list() for i in range(18): proc_ids.append(i) for i in range(6): link_ids.append(i) multicast_entries1 = MulticastRoutingEntry(key_combo, mask, proc_ids, link_ids, True) multicast_entries2 = MulticastRoutingEntry(key_combo - 1, mask - 1, proc_ids, link_ids, True) mrt = list() t1 = MulticastRoutingTable(0, 0, [multicast_entries1]) t2 = MulticastRoutingTable(1, 0, [multicast_entries2]) mrt.append(t1) mrt.append(t2) tables = MulticastRoutingTables(mrt) retrieved_tables = tables.routing_tables self.assertEqual(len(retrieved_tables), len(mrt)) for tab in retrieved_tables: self.assertIn(tab, mrt) self.assertEqual(tables.get_routing_table_for_chip(0, 0), t1) self.assertEqual(tables.get_routing_table_for_chip(1, 0), t2) self.assertEqual(tables.get_routing_table_for_chip(2, 0), None)
def test_new_multicast_routing_tables(self): key_combo = 0xff35 mask = 0xffff proc_ids = list() link_ids = list() for i in range(18): proc_ids.append(i) for i in range(6): link_ids.append(i) multicast_entries1 = MulticastRoutingEntry(key_combo, mask, proc_ids, link_ids, True) multicast_entries2 = MulticastRoutingEntry(key_combo - 1, mask - 1, proc_ids, link_ids, True) mrt = list() t1 = UnCompressedMulticastRoutingTable(0, 0, [multicast_entries1]) t2 = UnCompressedMulticastRoutingTable(1, 0, [multicast_entries2]) mrt.append(t1) mrt.append(t2) tables = MulticastRoutingTables(mrt) retrieved_tables = tables.routing_tables self.assertEqual(len(retrieved_tables), len(mrt)) for tab in retrieved_tables: self.assertIn(tab, mrt) self.assertEqual(tables.get_routing_table_for_chip(0, 0), t1) self.assertEqual(tables.get_routing_table_for_chip(1, 0), t2) self.assertEqual(tables.get_routing_table_for_chip(2, 0), None) json_obj = to_json(tables) file_format_schemas.validate(json_obj, "routing_tables.json") new_tables = from_json(json_obj) self.assertEqual(new_tables.get_routing_table_for_chip(0, 0), t1) self.assertEqual(new_tables.get_routing_table_for_chip(1, 0), t2) self.assertEqual(new_tables.get_routing_table_for_chip(2, 0), None)
def _generate_routing_tables(self, routing_tables, routing_tables_by_partition, ethernet_chip): """ from the routing. use the partition id as key, and build mc\ routing tables. :param routing_tables: the routing tables to store routing tables in :param routing_tables_by_partition: the routing output :param ethernet_chip: the ethernet chip being used :return: dict of chip x and chip yto key to get there :rtype: dict """ for fake_chip_x, fake_chip_y in \ routing_tables_by_partition.get_routers(): multicast_routing_table = MulticastRoutingTable( *self._real_machine.get_global_xy(fake_chip_x, fake_chip_y, ethernet_chip.x, ethernet_chip.y)) # build routing table entries for partition, entry in iteritems( routing_tables_by_partition.get_entries_for_router( fake_chip_x, fake_chip_y)): multicast_routing_table.add_multicast_routing_entry( MulticastRoutingEntry( routing_entry_key=partition.identifier, mask=ROUTING_MASK, processor_ids=entry.processor_ids, link_ids=entry.link_ids, defaultable=entry.defaultable)) # add routing table to pile routing_tables.add_routing_table(multicast_routing_table)
def _convert_to_pacman_router_table(self, mundy_compressed_router_table_entries, router_x_coord, router_y_coord): """ :param mundy_compressed_router_table_entries: rig version of the table :param router_x_coord: the x coord of this routing table :param router_y_coord: the y coord of this routing table :return: pacman version of the table """ table = MulticastRoutingTable(router_x_coord, router_y_coord) if (len(mundy_compressed_router_table_entries) > self.max_supported_length): raise PacmanElementAllocationException( "The routing table {}:{} after compression will still not fit" " within the machines router ({} entries)".format( router_x_coord, router_y_coord, len(mundy_compressed_router_table_entries))) for entry in mundy_compressed_router_table_entries: table.add_multicast_routing_entry( MulticastRoutingEntry( entry.key, entry.mask, # Key and mask ((int(c) - 6) for c in entry.route if c.is_core), # Cores (int(l) for l in entry.route if l.is_link), # Links False)) # NOT defaultable return table
def to_MulticastRoutingEntry(self): """ :rtype: ~spinn_machine.MulticastRoutingEntry """ return MulticastRoutingEntry(self.key, self.mask, defaultable=self.defaultable, spinnaker_route=self.spinnaker_route)
def __create_entry(key_and_mask, entry): """ :param BaseKeyAndMask key_and_mask: :param MulticastRoutingTableByPartitionEntry entry: :rtype: MulticastRoutingEntry """ return MulticastRoutingEntry( routing_entry_key=key_and_mask.key_combo, defaultable=entry.defaultable, mask=key_and_mask.mask, link_ids=entry.link_ids, processor_ids=entry.processor_ids)
def test_add_routing_table_for_duplicate_chip(self): key_combo = 0xff35 mask = 0xffff proc_ids = list() link_ids = list() for i in range(18): proc_ids.append(i) for i in range(6): link_ids.append(i) multicast_entries1 = MulticastRoutingEntry(key_combo, mask, proc_ids, link_ids, True) multicast_entries2 = MulticastRoutingEntry(key_combo - 1, mask, proc_ids, link_ids, True) mrt = list() mrt.append(MulticastRoutingTable(3, 0, [multicast_entries1])) mrt.append(MulticastRoutingTable(3, 0, [multicast_entries2])) with self.assertRaises(PacmanAlreadyExistsException): MulticastRoutingTables(mrt)
def test_new_multicast_routing_table_entry(self): """ test that creating a multicast routing entry works """ # TODO: Move this test to SpiNNMachine's test suite key_combo = 0xff00 mask = 0xff00 proc_ids = range(18) link_ids = range(6) MulticastRoutingEntry(key_combo, mask, proc_ids, link_ids, True)
def _from_csv_file(csvfile): table_reader = csv.reader(csvfile) table = UnCompressedMulticastRoutingTable(0, 0) for row in table_reader: try: if len(row) == 3: key = int(row[0], base=16) mask = int(row[1], base=16) route = int(row[2], base=16) table.add_multicast_routing_entry( MulticastRoutingEntry(key, mask, spinnaker_route=route)) elif len(row) == 6: key = int(row[1], base=16) mask = int(row[2], base=16) route = int(row[3], base=16) table.add_multicast_routing_entry( MulticastRoutingEntry(key, mask, spinnaker_route=route)) except ValueError as ex: logger.warning(f"csv read error {ex}") return table
def __add_key_and_mask(key_and_mask, entry, table): """ :param BaseKeyAndMask key_and_mask: :param MulticastRoutingTableByPartitionEntry entry: :param MulticastRoutingTable table: """ table.add_multicast_routing_entry( MulticastRoutingEntry(routing_entry_key=key_and_mask.key_combo, defaultable=entry.defaultable, mask=key_and_mask.mask, link_ids=entry.link_ids, processor_ids=entry.processor_ids))
def test_merger_with_invalid_parameter_mask(self): link_ids = list() link_ids2 = list() proc_ids = list() proc_ids2 = list() for i in range(3): link_ids.append(i) for i in range(3, 6): link_ids2.append(i) for i in range(9): proc_ids.append(i) for i in range(9, 18): proc_ids2.append(i) key = 1 mask = 1 a_multicast = MulticastRoutingEntry( key, mask, proc_ids, link_ids, True) b_multicast = MulticastRoutingEntry( key + 1, mask + 1, proc_ids2, link_ids2, True) with self.assertRaises(SpinnMachineInvalidParameterException): a_multicast.merge(b_multicast)
def _add_routing_entry(self, route_no, offset, app_id, route, key, mask): # pylint: disable=too-many-arguments if route >= 0xFF000000: return if self._app_id is not None and self._app_id != app_id: return # Convert bit-set into list of (set) IDs processor_ids, link_ids = \ Router.convert_spinnaker_route_to_routing_ids(route) self._entries[route_no + offset] = MulticastRoutingEntry( key, mask, processor_ids, link_ids, False)
def _merge_range(self, first, last): # With a range of 1 just use the existing if first == last: self._compressed.add_multicast_routing_entry(self._entries[first]) return # Find the points the range must cover first_point = self._get_key(first) last_point = self._get_endpoint(last) # Find the points the range may NOT go into pre_point = self._get_endpoint(first - 1) post_point = self._get_key(last + 1) # find the power big enough to include the first and last enrty dif = last_point - first_point power = self.next_power(dif) # Find the start range cutoffs low_cut = first_point // power * power high_cut = low_cut + power # If that does not cover all try one power higher if high_cut < last_point: power <<= 1 low_cut = first_point // power * power high_cut = low_cut + power # The power is too big if it touches the entry before or after while power > 1 and (low_cut < pre_point or high_cut > post_point): power >>= 1 low_cut = first_point // power * power high_cut = low_cut + power # The range may now not cover all the index so reduce the indexes full_last = last while high_cut <= last_point: last -= 1 last_point = self._get_endpoint(last) # make the new router entry new_mask = 2**32 - power route = self._entries[first].spinnaker_route new_entry = MulticastRoutingEntry(low_cut, new_mask, spinnaker_route=route) self._compressed.add_multicast_routing_entry(new_entry) # Do any indexes skip from before if full_last != last: self._merge_range(last + 1, full_last)
def _create_routing_table(self, chip, partitions_in_table, routing_infos): table = MulticastRoutingTable(chip.x, chip.y) for partition in partitions_in_table: r_info = routing_infos.get_routing_info_from_partition(partition) entry = partitions_in_table[partition] for key_and_mask in r_info.keys_and_masks: table.add_multicast_routing_entry( MulticastRoutingEntry( routing_entry_key=key_and_mask.key_combo, defaultable=entry.defaultable, mask=key_and_mask.mask, link_ids=entry.out_going_links, processor_ids=entry.out_going_processors)) return table
def _generate_entries_from_bitfield(self, bit_fields, routing_table_entry, key_to_n_atoms_map): """ generate neuron level entries :param list(_BitFieldData) bit_fields: the bitfields for a given key :param ~.MulticastRoutingEntry routing_table_entry: the original entry from it :param dict(int,int) key_to_n_atoms_map: :return: the set of bitfield entries """ entries = list() processors_filtered = list() for bit_field_by_processor in bit_fields: processors_filtered.append(bit_field_by_processor.processor_id) # get some basic values entry_links = routing_table_entry.link_ids base_key = routing_table_entry.routing_entry_key n_neurons = key_to_n_atoms_map[base_key] # check each neuron to see if any bitfields care, and if so, # add processor for neuron in range(0, n_neurons): processors = list() # add processors that are not going to be filtered for processor_id in routing_table_entry.processor_ids: if processor_id not in processors_filtered: processors.append(processor_id) # process bitfields for bit_field_by_processor in bit_fields: if self._bit_for_neuron_id(bit_field_by_processor.bit_field, neuron): processors.append(bit_field_by_processor.processor_id) # build new entry for this neuron entries.append( MulticastRoutingEntry(routing_entry_key=base_key + neuron, mask=self._NEURON_LEVEL_MASK, link_ids=entry_links, defaultable=False, processor_ids=processors)) # return the entries return entries
def test_merger_with_invalid_parameter_key(self): link_ids = list() link_ids2 = list() proc_ids = list() proc_ids2 = list() for i in range(3): link_ids.append(i) for i in range(3, 6): link_ids2.append(i) for i in range(9): proc_ids.append(i) for i in range(9, 18): proc_ids2.append(i) key_combo = 1 mask = 1 a_multicast = MulticastRoutingEntry( key_combo, mask, proc_ids, link_ids, True) b_multicast = MulticastRoutingEntry( key_combo + 1, mask + 1, proc_ids2, link_ids2, True) with self.assertRaises(SpinnMachineInvalidParameterException) as e: a_multicast.merge(b_multicast) self.assertEqual(e.exception.parameter, "other_entry.key") self.assertEqual(e.exception.value, "0x2") self.assertEqual(e.exception.problem, "The key does not match 0x1")
def test_new_multicast_routing_table_duplicate_key_combo(self): key_combo = 0xff35 mask = 0xffff proc_ids = list() link_ids = list() for i in range(18): proc_ids.append(i) for i in range(6): link_ids.append(i) multicast_entries = list() for i in range(5): multicast_entries.append( MulticastRoutingEntry(key_combo, mask, proc_ids, link_ids, True)) with self.assertRaises(PacmanAlreadyExistsException): UnCompressedMulticastRoutingTable(0, 0, multicast_entries)
def _merge_routes(self, router_table, previous_masks): """ :param MulticastRoutingTable router_table: :param dict(int,list(int)) previous_masks: :rtype: MulticastRoutingTable """ merged_routes = CompressedMulticastRoutingTable( router_table.x, router_table.y) keys_merged = set() entries = router_table.multicast_routing_entries for router_entry in entries: if router_entry.routing_entry_key in keys_merged: continue mask = router_entry.mask if mask & _UPPER_16_BITS == _UPPER_16_BITS: for extra_bits in self._get_merge_masks(mask, previous_masks): new_mask = _UPPER_16_BITS | extra_bits new_key = router_entry.routing_entry_key & new_mask new_n_keys = ~new_mask & FULL_MASK # Get candidates for this particular possible merge potential_merges = self._mergeable_entries( router_entry, entries, new_key, new_mask, new_key + new_n_keys, keys_merged) # Only do a merge if there's real merging to do if len(potential_merges) > 1: merged_routes.add_multicast_routing_entry( MulticastRoutingEntry( new_key, new_mask, router_entry.processor_ids, router_entry.link_ids, defaultable=False)) keys_merged.update( route.routing_entry_key for route in potential_merges) break else: # print("Was not able to merge", hex(key)) merged_routes.add_multicast_routing_entry(router_entry) keys_merged.add(router_entry.routing_entry_key) else: merged_routes.add_multicast_routing_entry(router_entry) keys_merged.add(router_entry.routing_entry_key) return merged_routes
def setUp(self): self.original_tables = MulticastRoutingTables() original_table = UnCompressedMulticastRoutingTable(x=0, y=0) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0101, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1110, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1100, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0010, 0b1011, [4, 5], [], False)) self.original_tables.add_routing_table(original_table)
def setUp(self): self.original_tables = MulticastRoutingTables() original_table = UnCompressedMulticastRoutingTable(x=0, y=0) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0101, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1110, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1100, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0010, 0b1011, [4, 5], [], False)) self.original_tables.add_routing_table(original_table) unittest_setup() set_config("Mapping", "router_table_compress_as_far_as_possible", True)
def test_new_multicast_routing_table_duplicate_entry(self): """ test that adding multiple identical entries into a multicast table causes an error """ key_combo = 0xff35 mask = 0xff35 proc_ids = list() link_ids = list() for i in range(18): proc_ids.append(i) for i in range(6): link_ids.append(i) multicast_entries = list() for i in range(5): multicast_entries.append( MulticastRoutingEntry(key_combo + i, mask + i, proc_ids, link_ids, True)) mrt = UnCompressedMulticastRoutingTable(0, 0, multicast_entries) with self.assertRaises(PacmanAlreadyExistsException): mrt.add_multicast_routing_entry(multicast_entries[0])
def from_json(j_router): if isinstance(j_router, str): if j_router.endswith(".gz"): with gzip.open(j_router) as j_file: j_router = json.load(j_file) else: with open(j_router, encoding="utf-8") as j_file: j_router = json.load(j_file) tables = MulticastRoutingTables() for j_table in j_router: table = UnCompressedMulticastRoutingTable(j_table["x"], j_table["y"]) tables.add_routing_table(table) for j_entry in j_table["entries"]: table.add_multicast_routing_entry( MulticastRoutingEntry( j_entry["key"], j_entry["mask"], defaultable=j_entry["defaultable"], spinnaker_route=j_entry["spinnaker_route"])) return tables
def test_new_multicast_routing_table(self): """ test that creating a multicast routing entry and adding it to the table works """ key_combo = 0xff000 mask = 0xff000 proc_ids = list() link_ids = list() for i in range(18): proc_ids.append(i) for i in range(6): link_ids.append(i) multicast_entries = list() for i in range(5): multicast_entries.append( MulticastRoutingEntry(key_combo + i, mask + i, proc_ids, link_ids, True)) mrt = UnCompressedMulticastRoutingTable(0, 0, multicast_entries) self.assertEqual(mrt.x, 0) self.assertEqual(mrt.y, 0) mre = mrt.multicast_routing_entries for entry in mre: self.assertIn(entry, multicast_entries) self.assertEqual(len(mre), len(multicast_entries)) for i in range(5): self.assertEqual( mrt.get_multicast_routing_entry_by_routing_entry_key( key_combo + i, mask + i), multicast_entries[i]) self.assertEqual( mrt.get_multicast_routing_entry_by_routing_entry_key( key_combo + 5, mask + 5), None) self.assertEqual( mrt.get_multicast_routing_entry_by_routing_entry_key( key_combo - 1, mask - 1), None)
def _add_routing_entry(self, x, y, key, processor_id=None, link_ids=None): """ Adds a routing entry on this chip, creating the table if needed. :param int x: chip.x :param int y: chip.y :param int key: The key to use :param int processor_id: placement.p of the monitor vertex if applicable :param int link_id: If of the link out if applicable """ table = self._routing_tables.get_routing_table_for_chip(x, y) if table is None: table = UnCompressedMulticastRoutingTable(x, y) self._routing_tables.add_routing_table(table) if processor_id is None: processor_ids = [] else: processor_ids = [processor_id] if link_ids is None: link_ids = [] entry = MulticastRoutingEntry( routing_entry_key=key, mask=ROUTING_MASK, processor_ids=processor_ids, link_ids=link_ids, defaultable=False) table.add_multicast_routing_entry(entry)
def test(self): """Test minimising a table of the form: 0000 -> N NE 0001 -> E 0101 -> SW 1000 -> N NE 1001 -> E 1110 -> SW 1100 -> N NE 0X00 -> S SW The result (worked out by hand) should be: 0000 -> N NE 0X00 -> S SW 1X00 -> N NE X001 -> E X1XX -> SW """ original_tables = MulticastRoutingTables() original_table = MulticastRoutingTable(x=0, y=0) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0101, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1000, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1001, 0b1111, [0], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1110, 0b1111, [4], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b1100, 0b1111, [1, 2], [], False)) original_table.add_multicast_routing_entry( MulticastRoutingEntry(0b0000, 0b1011, [4, 5], [], False)) original_tables.add_routing_table(original_table) mundy_compressor = MundyRouterCompressor() compressed_tables = mundy_compressor(original_tables) compressed_table = compressed_tables.get_routing_table_for_chip(0, 0) # TODO: FIX THIS SO THAT WE TEST THAT THE RESULT IS VALID # result_table_expected = MulticastRoutingTable(x=0, y=0) # result_table_expected.add_multicast_routing_entry( # MulticastRoutingEntry(0b0000, 0b1111, [1, 2], [], False)) # result_table_expected.add_multicast_routing_entry( # MulticastRoutingEntry(0b0000, 0b1011, [4, 5], [], False)) # result_table_expected.add_multicast_routing_entry( # MulticastRoutingEntry(0b1000, 0b1011, [1, 2], [], False)) # result_table_expected.add_multicast_routing_entry( # MulticastRoutingEntry(0b0001, 0b0111, [0], [], False)) # result_table_expected.add_multicast_routing_entry( # MulticastRoutingEntry(0b0100, 0b0100, [4], [], False)) # Minimise as far as possible assert compressed_table.number_of_entries == 5 # assert compressed_table == result_table_expected compare_table(original_table, compressed_table)
def _merge_routes(self, router_table, previous_masks): merged_routes = MulticastRoutingTable(router_table.x, router_table.y) # Order the routes by key entries = sorted(router_table.multicast_routing_entries, key=lambda entry: entry.routing_entry_key) # Find adjacent entries that can be merged pos = 0 last_key_added = 0 while pos < len(entries): links = entries[pos].link_ids processors = entries[pos].processor_ids next_pos = pos + 1 # Keep going until routes are not the same or too many keys are # generated base_key = int(entries[pos].routing_entry_key) while (next_pos < len(entries) and entries[next_pos].link_ids == links and entries[next_pos].processor_ids == processors and (base_key & entries[next_pos].routing_entry_key) > last_key_added): base_key = (base_key & entries[next_pos].routing_entry_key) next_pos += 1 next_pos -= 1 # print("Pre decision", hex(base_key)) # If there is something to merge, merge it if possible merge_done = False if next_pos != pos: # print("At merge, base_key =", hex(base_key)) # Find the next nearest power of 2 to the number of keys # that will be covered last_key = (entries[next_pos].routing_entry_key + (~entries[next_pos].mask & _32_BITS)) n_keys = (last_key - base_key) + 1 next_log_n_keys = int(math.ceil(math.log(n_keys, 2))) n_keys = (1 << next_log_n_keys) - 1 n_keys_mask = ~n_keys & _32_BITS base_key = base_key & n_keys_mask if ((base_key + n_keys) >= last_key and base_key > last_key_added and (next_pos + 1 >= len(entries) or entries[pos].routing_entry_key + n_keys < entries[next_pos + 1].routing_entry_key)): last_key_added = base_key + n_keys merged_routes.add_multicast_routing_entry( MulticastRoutingEntry(int(base_key), n_keys_mask, processors, links, defaultable=False)) pos = next_pos merge_done = True if not merge_done: merged_routes.add_multicast_routing_entry(entries[pos]) last_key_added = (entries[pos].routing_entry_key + (~entries[pos].mask & _32_BITS)) pos += 1 return merged_routes