def decode_distributor_key(self, key, mask, row_idx):
        if mask == 0:
            return lpm_prefix(0, 0)
        key_width = bin(mask).count('1')

        is_ipv6 = not (get_bits(
            key & mask, DISTRIBUTOR_TCAM_WIDTH - 1, DISTRIBUTOR_TCAM_WIDTH -
            DISTRIBUTOR_IPV4_ENC_WIDTH) == DISTRIBUTOR_IPV4_ENC_VALUE)

        key_value = (key & mask)
        ret_key = lpm_prefix(int(is_ipv6), ADDRESS_TYPE_LENGTH)
        assert 0 < key_width <= DISTRIBUTOR_TCAM_WIDTH
        if is_ipv6:
            encoded_ip_value = get_bits(key_value, DISTRIBUTOR_TCAM_WIDTH - 1,
                                        DISTRIBUTOR_TCAM_WIDTH - key_width)
            ret_key <<= key_width
            ret_key.value |= encoded_ip_value

        else:
            key_width -= DISTRIBUTOR_IPV4_ENC_WIDTH
            prefix_start_idx = IPV4_LENGTH_IN_DISTRIBUTOR - key_width
            prefix_end_idx = IPV4_LENGTH_IN_DISTRIBUTOR - 1
            prefix_width = prefix_end_idx - prefix_start_idx + 1
            ret_key <<= prefix_width
            ret_key.value |= get_bits(
                key_value, prefix_end_idx,
                prefix_start_idx) if prefix_width > 0 else 0

        return ret_key
Ejemplo n.º 2
0
def key_to_str(key, width):
    vrf_length = 11
    if width == vrf_length + 1:
        return "0.0.0.0/0"
    decoded_prefix, decoded_width = decode_prefix(key, width)
    is_ipv6 = bool(get_bits(key, width - 1, width - 1))
    full_ip_width = 128 if is_ipv6 else 32
    ip_width = decoded_width - vrf_length - 1
    ip_value = get_bits(decoded_prefix, ip_width - 1,
                        0) << full_ip_width - ip_width
    ip_constructor = ipaddress.IPv6Address if is_ipv6 else ipaddress.IPv4Address
    return "%s/%d" % (str(ip_constructor(ip_value)), ip_width)
def make_ip_from_key(key, width):
    if (width < 1 + VRF_LEN):
        return (None, None, None)

    is_ipv6 = (get_bits(key, width - 1, width - 1) == 1)
    ip_full_length = 128 if is_ipv6 else 32
    remaining_length = ip_full_length + VRF_LEN + 1 - width
    prefix_full = key << remaining_length

    ip = get_bits(prefix_full, ip_full_length - 1, 0)
    vrf = get_bits(prefix_full, ip_full_length + VRF_LEN - 1, ip_full_length)
    ip_width = width - 1 - VRF_LEN

    ip_str = ip_to_str(ip, is_ipv6)
    return (vrf, ip_str, ip_width)
Ejemplo n.º 4
0
    def read_hbm_line(self, core_idx, mem_line, replica=0):
        channel_idx, register_data = self.make_hbm_read_data_request(
            core_idx, mem_line, replica)
        reg = self.hbm_regs[channel_idx]
        self.ll_device.write_register(reg, register_data)
        ready_to_read = 1
        # Ready to read when send_command bit is 0.
        while ready_to_read != 0:
            out_reg = self.ll_device.read_register(reg)
            ready_to_read = get_bits(out_reg, 0, 0)

        hbm_data_width = 1024
        hbm_data_start_offset = 25
        out_value = get_bits(out_reg, hbm_data_start_offset + hbm_data_width - 1, hbm_data_start_offset)
        return out_value
    def encode_prefix(prefix, width):

        is_ipv6 = (prefix >> (width - 1)) == 1
        if is_ipv6:
            return lpm_prefix(prefix, width)

        bits_above_broken_bit = IPV4_ENCODED_KEY_LENGTH - (
            BROKEN_BIT_INDEX_IN_PREFIX + 1)
        if width <= bits_above_broken_bit:
            return lpm_prefix(prefix, width)

        prefix_padded = prefix << (IPV4_KEY_LENGTH - width)
        prefix_msb = get_bits(prefix_padded, width - 1,
                              BROKEN_BIT_INDEX_IN_PREFIX)
        prefix_lsb = get_bits(prefix_padded, BROKEN_BIT_INDEX_IN_PREFIX - 1, 0)
        encoded_prefix_padded = (prefix_msb <<
                                 (BROKEN_BIT_INDEX_IN_PREFIX + 1)) | prefix_lsb
        encoded_prefix = encoded_prefix_padded >> (IPV4_KEY_LENGTH - width)
        return lpm_prefix(encoded_prefix, width + 1)
Ejemplo n.º 6
0
def decode_prefix(prefix, width):
    is_ipv6 = (prefix >> (width - 1)) == 1
    if is_ipv6:
        return (prefix, width)

    broken_bit = 20
    encoded_key_len = 45
    bits_above_broken_bit = encoded_key_len - (broken_bit + 1)
    if width <= bits_above_broken_bit:
        return (prefix, width)

    assert encoded_key_len >= width
    prefix_padded = prefix << (encoded_key_len - width)
    decoded_padded = (get_bits(prefix_padded, encoded_key_len - 1,
                               broken_bit + 1) << broken_bit) | get_bits(
                                   prefix_padded, broken_bit - 1, 0)
    decoded_prefix = decoded_padded >> (encoded_key_len - width)

    return (decoded_prefix, width - 1)
    def read_l2_hbm_bucket(self, core_idx, bucket_idx):
        ret_bucket = lpm_bucket()
        line_value = self.hw_src.read_hbm_line(core_idx, bucket_idx)

        default_value = get_bits(line_value, L2_DEFAULT_WIDTH - 1, 0)

        ret_bucket.default = lpm_default(default_value)
        for section_idx in range(HBM_NUM_SECTIONS):
            for entry_id in range(HBM_NUM_ENTRIES_IN_SECTION):
                lsb = section_idx * HBM_SECTION_WIDTH + L2_DEFAULT_WIDTH + entry_id * L2_ENTRY_LENGTH
                entry_value = get_bits(line_value, lsb + L2_ENTRY_LENGTH - 1,
                                       lsb)
                entry = self.parse_hbm_entry(
                    entry_value,
                    entry_index=(section_idx * HBM_NUM_ENTRIES_IN_SECTION +
                                 entry_id))
                if entry.valid:
                    ret_bucket.entries.append(entry)

        return ret_bucket
Ejemplo n.º 8
0
 def l1_entry_to_lpm_entry(entry, is_shared, entry_index):
     offset = 0
     payload = get_bits(entry, offset + L1_PAYLOAD_LENGTH - 1, offset)
     offset += L1_PAYLOAD_LENGTH
     prefix = get_bits(entry, offset + L1_PREFIX_LENGTH - 1, offset)
     offset += L1_PREFIX_LENGTH
     if prefix == 0:
         return lpm_entry(lpm_prefix(0, 0),
                          0,
                          valid=False,
                          is_shared=is_shared,
                          index=entry_index)
     is_double_line_in_hbm = bool(get_bits(
         entry, offset, offset))  # Currently should be always false.
     prefix = lpm_hw_to_logic_converter_gb.decode_bucket_prefix(prefix)
     return lpm_entry(prefix,
                      payload=payload,
                      valid=True,
                      is_shared=is_shared,
                      index=entry_index)
 def parse_hbm_entry(entry: int, entry_index):
     offset = 0
     prefix = get_bits(entry, offset + L2_PREFIX_LENGTH - 1, offset)
     offset += L2_PREFIX_LENGTH
     payload = get_bits(entry, offset + L2_PAYLOAD_WIDTH - 1, offset)
     offset += L2_PAYLOAD_WIDTH
     if prefix == 0:
         return lpm_entry(lpm_prefix(0, 0),
                          payload=0,
                          valid=False,
                          is_shared=False,
                          index=entry_index)
     prefix = lpm_hw_to_logic_converter_pacific.decode_bucket_prefix(prefix)
     is_leaf = not bool(get_bits(entry, offset, offset))
     assert prefix.width <= L2_PREFIX_LENGTH - 1
     return lpm_entry(prefix,
                      payload=payload,
                      valid=True,
                      is_shared=False,
                      is_leaf=is_leaf,
                      index=entry_index)
    def l1_entry_to_lpm_entry(entry, is_shared, entry_index):
        offset = 0
        l1_fullness = get_bits(entry, offset + L1_ENTRY_FULLNESS_WIDTH - 1,
                               offset)
        offset += L1_ENTRY_FULLNESS_WIDTH
        payload = get_bits(entry, offset + L1_PAYLOAD_LENGTH - 1, offset)
        offset += L1_PAYLOAD_LENGTH
        prefix = get_bits(entry, offset + L1_PREFIX_LENGTH - 1, offset)
        if prefix == 0:
            return lpm_entry(lpm_prefix(0, 0),
                             0,
                             valid=False,
                             is_shared=is_shared,
                             index=entry_index)

        prefix = lpm_hw_to_logic_converter_pacific.decode_bucket_prefix(prefix)
        return lpm_entry(prefix,
                         payload=payload,
                         valid=True,
                         is_shared=is_shared,
                         index=entry_index)
Ejemplo n.º 11
0
    def read_hbm_line(self,
                      core_idx,
                      mem_line,
                      replica=0,
                      read_fat_hbm_line=False):

        rows_offset = 256 if read_fat_hbm_line else 128

        dest_index = (mem_line << 4) + core_idx

        bank_channel = get_bits(dest_index, 7, 0) + 4 * replica

        if read_fat_hbm_line:
            bank_row = get_bits(dest_index, 15, 8) + rows_offset * replica
            column = get_bits(dest_index, 18, 16) << 1
        else:
            bank_row = get_bits(dest_index, 14, 8) + rows_offset * replica
            column = get_bits(dest_index, 18, 15)

        channel = get_bits(bank_channel, 3, 0)
        bank_msb = get_bits(bank_channel, 5, 4)
        bank_lsb = get_bits(bank_channel, 7, 6)
        bank = (bank_msb << 2) + bank_lsb

        cif_num = channel // 2
        addr = (bank_row << 4) | column

        ret_val = self.ll_device.read_memory(
            self.hbm_mems[cif_num][channel % 2][bank], addr)

        if read_fat_hbm_line:
            ret_val = get_bits(ret_val, HBM_THIN_BUCKET_WIDTH - 1, 0)
            ret_val <<= HBM_THIN_BUCKET_WIDTH
            addr = (bank_row << 4) | (column + 1)
            next_col_val = self.ll_device.read_memory(
                self.hbm_mems[cif_num][channel % 2][bank], addr)
            ret_val |= get_bits(next_col_val, HBM_THIN_BUCKET_WIDTH - 1, 0)

        return ret_val
Ejemplo n.º 12
0
    def decode_distributor_key(self, key, mask):
        is_ipv4 = ((key >> (DISTRIBUTOR_TCAM_WIDTH - 1)) == 0)
        msb_offset = 1 if is_ipv4 else 2

        key_width = bin(mask).count('1') - msb_offset
        if key_width <= 0:
            return lpm_prefix(int(not is_ipv4), ADDRESS_TYPE_LENGTH)

        key_value = (key & mask)
        ret_key = lpm_prefix(int(not is_ipv4), ADDRESS_TYPE_LENGTH)
        ret_key <<= key_width
        ret_key.value |= get_bits(
            key_value, DISTRIBUTOR_TCAM_WIDTH - 1 - msb_offset,
            DISTRIBUTOR_TCAM_WIDTH - msb_offset - key_width)
        return ret_key
Ejemplo n.º 13
0
    def print_prefixes(self, fp):
        # Gather keys
        keys_in_lpm = set()
        for core in self.cores:
            for tcam_entry in core.tcam:
                if not tcam_entry.valid:
                    continue
                tcam_width = tcam_entry.payload_hit_width
                tcam_key = tcam_entry.key.value >> (tcam_entry.key.width -
                                                    tcam_width)
                l1_bucket = core.l1_buckets[tcam_entry.payload_l1_bucket]
                for l1_entry in (l1_bucket.entries):
                    l1_key = (
                        tcam_key << l1_entry.key.width) | l1_entry.key.value
                    l1_width = tcam_width + l1_entry.key.width
                    l2_bucket = core.l2_buckets[l1_entry.payload]
                    for l2_entry in l2_bucket.entries:
                        l2_key = (
                            l1_key << l2_entry.key.width) | l2_entry.key.value
                        l2_width = l1_width + l2_entry.key.width
                        keys_in_lpm.add((l2_key, l2_width, l2_entry.payload))

        # Convert keys to string and print
        vrf_length = 11
        table = PrettyTable()
        table.field_names = ["VRF", "Prefix", "Payload"]
        for key, width, payload in keys_in_lpm:
            if width < vrf_length + 1:
                table.add_row(["LPM internal", hex(key), hex(payload)])
                continue
            else:
                vrf_str = hex(get_bits(key, width - 2, width - vrf_length - 1))
                prefix_str = key_to_str(key, width)
                payload_str = hex(payload)
                table.add_row([vrf_str, prefix_str, payload_str])
        with open(fp, "w") as fp:
            print(table.get_string(sortby='VRF',
                                   sort_key=lambda row: int(row[0], 16)),
                  file=fp)
Ejemplo n.º 14
0
    def make_hbm_read_data_request(core_idx, mem_line, replica):
        dest_index = (mem_line << 4) + core_idx

        bank_channel = get_bits(dest_index, 7, 0) + 4 * replica
        bank_row = get_bits(dest_index, 14, 8) + 128 * replica
        column = get_bits(dest_index, 18, 15)

        channel = get_bits(bank_channel, 0, 0)
        channel_idx = get_bits(bank_channel, 3, 1)
        bank_msb = get_bits(bank_channel, 5, 4)
        bank_lsb = get_bits(bank_channel, 7, 6)
        bank = (bank_msb << 2) + bank_lsb

        register_data = 1  # LSB is 1 for send command.
        register_data = set_bits(register_data, 1, 1, channel)
        register_data = set_bits(register_data, 5, 2, bank)
        register_data = set_bits(register_data, 6, 6, 1)  # read/write bit, 1 for read.
        register_data = set_bits(register_data, 20, 7, bank_row)  # read/write bit, 1 for read.
        register_data = set_bits(register_data, 24, 21, column)  # read/write bit, 1 for read.

        return channel_idx, register_data
Ejemplo n.º 15
0
 def remove_msbs(self, number_of_bits):
     new_width = self.width - number_of_bits
     if new_width < 0:
         raise Exception("Width < 0")
     self.value = get_bits(self.value, new_width - 1, 0)
     self.width = new_width
Ejemplo n.º 16
0
 def parse_tcam_payload(self, payload):
     l1_bucket_idx = get_bits(payload, TCAM_HIT_WIDTH_START - 1,
                              TCAM_PAYLOAD_START)
     hit_width = get_bits(payload, TCAM_PAYLOAD_WIDTH - 1,
                          TCAM_HIT_WIDTH_START)
     return l1_bucket_idx, hit_width
    def __init__(self, logs_file):
        self.distributor = {}
        self.cores = [lpm_core() for _ in range(NUMBER_OF_CORES)]

        # Set TCAM defaults since it's not printed to logs.
        for core in self.cores:
            core.tcam[511] = lpm_tcam_entry(lpm_prefix(1, 1),
                                            valid=True,
                                            payload=0,
                                            hit_width=1)
            core.tcam[2047] = lpm_tcam_entry(lpm_prefix(0, 1),
                                             valid=True,
                                             payload=0,
                                             hit_width=1)

        distributor_to_group = {}
        group_to_core = {}
        with open(logs_file, "r") as fd:

            for lno, line in enumerate(fd):

                m = None

                if m is None and 'set_distributor_line' in line:
                    m = re.match(
                        '.*set_distributor_line\(line after offset = (?P<line>([0-9]*)), key = (?P<key>([0-9a-f]*)), key width = (?P<width>([0-9]*)), payload = (0x)?(?P<payload>([0-9a-f]*)).*',
                        line)
                    if m is not None:
                        row = int(m['line'], 10)
                        key = int(m['key'], 16)
                        width = int(m['width'], 10)
                        payload = int(m['payload'], 16)
                        distributor_to_group[row] = {
                            'key': key,
                            'width': width,
                            'payload': payload,
                            'valid': True
                        }

                if m is None and 'assigning group' in line:
                    m = re.match(
                        '.*assigning group (?P<group>([0-9]*)) to core (?P<core>([0-9]*))',
                        line)
                    if m is not None:
                        group = int(m['group'], 10)
                        core = int(m['core'], 10)
                        group_to_core[group] = core

                if m is None and 'remove_distributor_line' in line:
                    m = re.match(
                        '.*remove_distributor_line\(line after offset = (?P<line>([0-9]*)).*',
                        line)
                    if m is not None:
                        row = int(m['line'], 10)
                        if row in distributor_to_group.keys():
                            distributor_to_group[row]['valid'] = False
                        else:
                            print(
                                'ERROR: Distributer removing an invalid row %d  [log file line %d]'
                                % (row, lno))
                            return

                if m is None and 'TCAM Wr' in line:
                    m = re.match(
                        '.*LPM: TCAM Wr *core = (?P<core>([0-9]*)) *row = (?P<row>([0-9]*)) *key = 0x(?P<key>([0-9a-f]*)) *key width = (?P<width>([0-9]*)) *payload = (?P<payload>([0-9]*))',
                        line)
                    if m is not None:
                        core = int(m['core'], 10)
                        row = int(m['row'], 10)
                        key_value = int(m['key'], 16)
                        width = int(m['width'], 10)
                        payload = int(m['payload'], 10)
                        key = lpm_prefix(key_value, width)
                        self.cores[core].tcam[row] = lpm_tcam_entry(
                            key=key,
                            valid=True,
                            payload=payload,
                            hit_width=width)

                if m is None and 'TCAM Iv' in line:
                    m = re.match(
                        '.*LPM: TCAM Iv *core = (?P<core>([0-9]*)) *row = (?P<row>([0-9]*))',
                        line)
                    if m is not None:
                        core = int(m['core'], 10)
                        row = int(m['row'], 10)
                        if row in self.cores[core].tcam and self.cores[
                                core].tcam[row].valid is True:
                            self.cores[core].tcam[row].valid = False
                        else:
                            print(
                                'WARNING: TCAM Invalidation of already invalid row %d  [log file line %d]'
                                % (row, lno))

                if m is None and 'Write L' in line:
                    m = re.match(
                        '.*LPM Core (?P<core>([0-9]*)) Write L(?P<tree>([1-2])) Bucket Line (?P<line>([0-9]*))  #Nodes (?P<nnodes>([0-9]*))  root width (?P<rwidth>([0-9]*)):',
                        line)
                    if m is not None:
                        current_core = int(m['core'], 10)
                        current_tree = int(m['tree'])
                        current_row = int(m['line'], 10)
                        root_width = int(m['rwidth'], 10)
                        nodes_seen = 0

                        buckets = self.cores[
                            current_core].l1 if current_tree == 1 else self.cores[
                                current_core].l2

                        buckets[current_row] = lpm_bucket()

                if m is None and 'Node: key' in line:
                    m = re.match(
                        '.*Node: key (?P<key>([0-9a-f]*)) width (?P<width>([0-9]*)) payload (?P<payload>([0-9a-f]*))',
                        line)
                    if m is not None:
                        key = int(m['key'], 16)
                        width = int(m['width'], 10)
                        payload = int(m['payload'], 16)
                        clean_width = width - root_width
                        clean_key = get_bits(key, clean_width - 1, 0)
                        nodes_seen += 1

                        try:
                            buckets = self.cores[
                                current_core].l1 if current_tree == 1 else self.cores[
                                    current_core].l2
                        except Exception:
                            print(
                                "Corrupted log file, trying to add entries to bucket before bucket declaration."
                            )

                        entry_key = lpm_prefix(clean_key, clean_width)
                        buckets[current_row].entries.append(
                            lpm_entry(key=entry_key,
                                      payload=payload,
                                      valid=True))

                if m is None and 'Default bucket payload' in line:
                    m = re.match(
                        '.*Default bucket payload (?P<payload>([0-9a-f]*))',
                        line)
                    if m is not None:
                        buckets = self.cores[
                            current_core].l1 if current_tree == 1 else self.cores[
                                current_core].l2
                        payload = int(m['payload'], 16)
                        # Right now supports only pacific.
                        buckets[current_row].default = lpm_default(
                            payload, is_pointer=False)

        for row in distributor_to_group:
            key_valuue = distributor_to_group[row]['key']
            width = distributor_to_group[row]['width']
            group = distributor_to_group[row]['payload']
            valid = distributor_to_group[row]['valid']
            key = lpm_prefix(key_valuue, width)
            core = group_to_core.get(group, 0)
            self.distributor[row] = lpm_distributor_entry(
                key, valid, group, core)
Ejemplo n.º 18
0
    def l2_raw_group_to_lpm_entries(raw_group, is_even_bucket, is_shared,
                                    group_index):
        is_double = raw_group & 0x1
        offset = 1
        ret_entries = []
        if is_shared:
            raw_prefix0 = get_bits(raw_group, offset + L2_PREFIX_LENGTH - 1,
                                   offset)
            prefix0 = lpm_hw_to_logic_converter_gb.decode_bucket_prefix(
                raw_prefix0) if raw_prefix0 > 0 else None
            offset += L2_PREFIX_LENGTH
            is_prefix0_leaf = bool(get_bits(raw_group, offset, offset))
            offset += L2_ENTRY_TYPE_WIDTH
            payload0 = get_bits(raw_group, offset + L2_PAYLOAD_WIDTH - 1,
                                offset)
            offset += L2_PAYLOAD_WIDTH
            raw_prefix1 = get_bits(raw_group, offset + L2_PREFIX_LENGTH - 1,
                                   offset)
            prefix1 = lpm_hw_to_logic_converter_gb.decode_bucket_prefix(
                raw_prefix1) if raw_prefix1 > 0 else None
            offset += L2_PREFIX_LENGTH
            is_prefix1_leaf = bool(get_bits(raw_group, offset, offset))
            offset += L2_ENTRY_TYPE_WIDTH
            payload1 = get_bits(raw_group, offset + L2_PAYLOAD_WIDTH - 1,
                                offset)
        else:
            raw_prefix0 = get_bits(raw_group, offset + L2_PREFIX_LENGTH - 1,
                                   offset)
            prefix0 = lpm_hw_to_logic_converter_gb.decode_bucket_prefix(
                raw_prefix0) if raw_prefix0 > 0 else None
            offset += L2_PREFIX_LENGTH
            is_prefix0_leaf = bool(get_bits(raw_group, offset, offset))
            offset += L2_ENTRY_TYPE_WIDTH
            payload0 = get_bits(raw_group, offset + L2_PAYLOAD_WIDTH - 1,
                                offset)
            offset += L2_PAYLOAD_WIDTH + (1 if is_even_bucket else 0)
            raw_prefix1 = get_bits(raw_group, offset + L2_PREFIX_LENGTH - 1,
                                   offset)
            prefix1 = lpm_hw_to_logic_converter_gb.decode_bucket_prefix(
                raw_prefix1) if raw_prefix1 > 0 else None
            offset += L2_PREFIX_LENGTH
            is_prefix1_leaf = bool(get_bits(raw_group, offset, offset))
            offset += L2_ENTRY_TYPE_WIDTH
            payload1 = get_bits(raw_group, offset + L2_PAYLOAD_WIDTH - 1,
                                offset)

        if is_double:
            assert prefix1 is not None and prefix0 is not None
            ret_key = lpm_prefix(
                (prefix1.value << prefix0.width) | prefix0.value,
                prefix1.width + prefix0.width)
            assert ret_key.width <= (L2_PREFIX_LENGTH - 1) * 2
            ret_entries.append(
                lpm_entry(ret_key,
                          payload=payload0,
                          valid=True,
                          is_shared=is_shared))
        else:
            if prefix0 is not None:
                assert prefix0.width <= L2_PREFIX_LENGTH - 1
                ret_entries.append(
                    lpm_entry(prefix0,
                              payload=payload0,
                              valid=True,
                              is_shared=is_shared,
                              index=group_index * 2,
                              is_leaf=is_prefix0_leaf))
            if prefix1 is not None:
                assert prefix1.width <= L2_PREFIX_LENGTH - 1
                ret_entries.append(
                    lpm_entry(prefix1,
                              payload=payload1,
                              valid=True,
                              is_shared=is_shared,
                              index=group_index * 2 + 1,
                              is_leaf=is_prefix1_leaf))
        return ret_entries
Ejemplo n.º 19
0
    def get_buckets_from_l1_row(row_data):
        ENCODING_WIDTH = 3
        NUMBER_OF_SHARED_ENTRIES = 4
        NUMBER_OF_FIXED_ENTRIES = 2
        NON_ENTRY_DATA_WIDTH = ENCODING_WIDTH + 2 * L1_DEFAULT_POINTER_LENGTH + 2 * (
            L1_DEFAULT_LENGTH_WIDTH + 1)
        bits_offset = 0
        number_of_bucket1_shared_entries = get_bits(
            row_data, bits_offset + ENCODING_WIDTH - 1, bits_offset)
        bits_offset += ENCODING_WIDTH
        l1_row_buckets = [lpm_bucket() for _ in range(2)]
        assert number_of_bucket1_shared_entries <= NUMBER_OF_SHARED_ENTRIES
        default0 = get_bits(row_data,
                            bits_offset + L1_DEFAULT_POINTER_LENGTH - 1,
                            bits_offset)
        bits_offset += L1_DEFAULT_POINTER_LENGTH
        default0_hit_width = get_bits(
            row_data, bits_offset + L1_DEFAULT_LENGTH_WIDTH - 1, bits_offset)
        bits_offset += L1_DEFAULT_LENGTH_WIDTH + 1
        default1 = get_bits(row_data,
                            bits_offset + L1_DEFAULT_POINTER_LENGTH - 1,
                            bits_offset)
        bits_offset += L1_DEFAULT_POINTER_LENGTH
        default1_hit_width = get_bits(
            row_data, bits_offset + L1_DEFAULT_LENGTH_WIDTH - 1, bits_offset)
        bits_offset += L1_DEFAULT_LENGTH_WIDTH + 1
        # offset is 26
        l1_row_buckets[0].default = lpm_default(
            default0, is_pointer=True, default_hit_width=default0_hit_width)
        l1_row_buckets[1].default = lpm_default(
            default1, is_pointer=True, default_hit_width=default1_hit_width)

        # Bucket 1 shared entries:
        for _ in range(0, number_of_bucket1_shared_entries):
            entry = get_bits(row_data, bits_offset + L1_ENTRY_LENGTH - 1,
                             bits_offset)
            index = (bits_offset - NON_ENTRY_DATA_WIDTH) // L1_ENTRY_LENGTH
            entry_instance = lpm_hw_to_logic_converter_gb.l1_entry_to_lpm_entry(
                entry, is_shared=True, entry_index=index)
            bits_offset += L1_ENTRY_LENGTH
            if not entry_instance.valid:
                continue
            l1_row_buckets[1].entries.append(entry_instance)

        # Bucket 0 shared entries:
        for _ in range(
                0,
                NUMBER_OF_SHARED_ENTRIES - number_of_bucket1_shared_entries):
            entry = get_bits(row_data, bits_offset + L1_ENTRY_LENGTH - 1,
                             bits_offset)
            index = (bits_offset - NON_ENTRY_DATA_WIDTH) // L1_ENTRY_LENGTH
            entry_instance = lpm_hw_to_logic_converter_gb.l1_entry_to_lpm_entry(
                entry, is_shared=True, entry_index=index)
            bits_offset += L1_ENTRY_LENGTH
            if not entry_instance.valid:
                continue
            l1_row_buckets[0].entries.append(entry_instance)

        # Bucket 0 fixed entries:
        for _ in range(0, NUMBER_OF_FIXED_ENTRIES):
            entry = get_bits(row_data, bits_offset + L1_ENTRY_LENGTH - 1,
                             bits_offset)
            index = (bits_offset - NON_ENTRY_DATA_WIDTH) // L1_ENTRY_LENGTH
            entry_instance = lpm_hw_to_logic_converter_gb.l1_entry_to_lpm_entry(
                entry, is_shared=False, entry_index=index)
            bits_offset += L1_ENTRY_LENGTH
            if not entry_instance.valid:
                continue
            l1_row_buckets[0].entries.append(entry_instance)

        # Bucket 1 fixed entries:
        for _ in range(0, NUMBER_OF_FIXED_ENTRIES):
            entry = get_bits(row_data, bits_offset + L1_ENTRY_LENGTH - 1,
                             bits_offset)
            index = (bits_offset - NON_ENTRY_DATA_WIDTH) // L1_ENTRY_LENGTH
            entry_instance = lpm_hw_to_logic_converter_gb.l1_entry_to_lpm_entry(
                entry, is_shared=False, entry_index=index)
            bits_offset += L1_ENTRY_LENGTH
            if not entry_instance.valid:
                continue
            l1_row_buckets[1].entries.append(entry_instance)

        return l1_row_buckets
Ejemplo n.º 20
0
    def get_buckets_from_l2_row(row, row_width):
        NON_ENTRY_DATA_WIDTH = L2_ECC + L2_IS_NARROW_WIDTH + L2_ENCODING_WIDTH + 2 * L2_ENTRY_TYPE_WIDTH + 2 * L2_DEFAULT_WIDTH
        ret_buckets = [lpm_bucket() for _ in range(2)]
        offset = 0
        offset += L2_ECC + L2_IS_NARROW_WIDTH
        shared_to_1 = get_bits(row, offset + L2_ENCODING_WIDTH - 1, offset)
        offset += L2_ENCODING_WIDTH
        offset += L2_ENTRY_TYPE_WIDTH
        # Defaults:
        default0 = get_bits(row, offset + L2_DEFAULT_WIDTH - 1, offset)
        ret_buckets[0].default = lpm_default(default0)
        offset += L2_DEFAULT_WIDTH + L2_ENTRY_TYPE_WIDTH
        default1 = get_bits(row, offset + L2_DEFAULT_WIDTH - 1, offset)
        ret_buckets[1].default = lpm_default(default1)
        offset += L2_DEFAULT_WIDTH

        # Bucket 1 shared groups:
        for _ in range(shared_to_1):
            raw_group = get_bits(row, offset + L2_GROUP_WIDTH - 1, offset)
            group_index = (offset - NON_ENTRY_DATA_WIDTH) // L2_GROUP_WIDTH
            entries = lpm_hw_to_logic_converter_gb.l2_raw_group_to_lpm_entries(
                raw_group,
                is_even_bucket=False,
                is_shared=True,
                group_index=group_index)
            offset += L2_GROUP_WIDTH
            ret_buckets[1].entries += entries

        # Bucket 0 shared groups:
        for _ in range(L2_NUMBER_OF_SHARED_GROUPS - shared_to_1):
            raw_group = get_bits(row, offset + L2_GROUP_WIDTH - 1, offset)
            group_index = (offset - NON_ENTRY_DATA_WIDTH) // L2_GROUP_WIDTH
            entries = lpm_hw_to_logic_converter_gb.l2_raw_group_to_lpm_entries(
                raw_group,
                is_even_bucket=True,
                is_shared=True,
                group_index=group_index)
            ret_buckets[0].entries += entries
            offset += L2_GROUP_WIDTH

        while offset < row_width:
            group0_index = (offset - NON_ENTRY_DATA_WIDTH) // L2_GROUP_WIDTH
            group0_entry0 = get_bits(
                row, offset + L2_ENTRY_WIDTH + L2_IS_NARROW_WIDTH - 1, offset)
            offset += L2_ENTRY_WIDTH + L2_IS_NARROW_WIDTH
            group1_entry0 = get_bits(
                row, offset + L2_ENTRY_WIDTH + L2_IS_NARROW_WIDTH - 1, offset)
            offset += L2_ENTRY_WIDTH + L2_IS_NARROW_WIDTH
            group0_entry1 = get_bits(row, offset + L2_ENTRY_WIDTH - 1, offset)
            offset += L2_ENTRY_WIDTH
            group1_entry1 = get_bits(row, offset + L2_ENTRY_WIDTH - 1, offset)
            offset += L2_ENTRY_WIDTH
            group0 = (group0_entry0 << L2_ENTRY_WIDTH) | group0_entry1
            group1 = (group1_entry0 << L2_ENTRY_WIDTH) | group1_entry1

            b0_entries = lpm_hw_to_logic_converter_gb.l2_raw_group_to_lpm_entries(
                raw_group,
                is_even_bucket=True,
                is_shared=False,
                group_index=group0_index)
            b1_entries = lpm_hw_to_logic_converter_gb.l2_raw_group_to_lpm_entries(
                raw_group,
                is_even_bucket=True,
                is_shared=False,
                group_index=group0_index + 1)
            ret_buckets[0].entries += b0_entries
            ret_buckets[1].entries += b1_entries

        return ret_buckets
Ejemplo n.º 21
0
    def read_l2_hbm_bucket(self, core_idx, bucket_idx):
        use_fat_hbm_lines = False
        number_of_hbm_thin_lines = 2 if use_fat_hbm_lines else 1
        hbm_data = self.hw_src.read_hbm_line(
            core_idx,
            bucket_idx,
            replica=0,
            read_fat_hbm_line=use_fat_hbm_lines)

        bucket = lpm_bucket()
        for thin_line_idx in range(number_of_hbm_thin_lines):
            group_offset = HBM_THIN_BUCKET_WIDTH * thin_line_idx
            for group_idx in range(HBM_NUM_GROUPS_PER_THIN_BUCKET):
                group_data = get_bits(hbm_data,
                                      group_offset + L2_GROUP_WIDTH - 1,
                                      group_offset)
                is_double = get_bits(group_data, 0, 0)
                raw_prefix0 = get_bits(group_data, 17, 1)
                if raw_prefix0 > 0:
                    prefix0 = self.decode_bucket_prefix(raw_prefix0)
                    is_prefix0_leaf = get_bits(group_data, 35, 35)
                    payload0 = get_bits(group_data, 63, 36)
                    if not is_double:
                        bucket.entries.append(
                            lpm_entry(prefix0,
                                      payload0,
                                      valid=True,
                                      is_shared=False,
                                      is_leaf=is_prefix0_leaf,
                                      index=thin_line_idx *
                                      HBM_NUM_GROUPS_PER_THIN_BUCKET +
                                      group_idx))

                raw_prefix1 = get_bits(group_data, 34, 18)
                if raw_prefix1 > 0:
                    prefix1 = self.decode_bucket_prefix(raw_prefix1)
                    is_prefix1_leaf = get_bits(group_data, 64, 64)
                    payload1 = get_bits(group_data, 92, 65)
                    if not is_double:
                        bucket.entries.append(
                            lpm_entry(prefix1,
                                      payload1,
                                      valid=True,
                                      is_shared=False,
                                      is_leaf=is_prefix1_leaf,
                                      index=thin_line_idx *
                                      HBM_NUM_GROUPS_PER_THIN_BUCKET +
                                      group_idx + 1))

                if is_double:
                    # This is not asserted because unreachable HBM buckets contain invalid data.
                    if raw_prefix1 > 0 and raw_prefix0 > 0:
                        ret_key = lpm_prefix(
                            (prefix1.value << prefix0.width) | prefix0.value,
                            prefix1.width + prefix0.width)
                        assert ret_key.width <= (L2_PREFIX_LENGTH - 1) * 2
                        bucket.entries.append(
                            lpm_entry(ret_key,
                                      payload=payload0,
                                      is_leaf=is_prefix0_leaf,
                                      valid=True,
                                      is_shared=False,
                                      index=thin_line_idx *
                                      HBM_NUM_GROUPS_PER_THIN_BUCKET +
                                      group_idx))

                group_offset += L2_GROUP_WIDTH

        bucket.default = lpm_default(0)
        return bucket
Ejemplo n.º 22
0
def analyze_test_file(fname, list_of_descriptors, csvfile=None):

    widths_v4 = [0 for _ in range(129)]  # pad for easier handling when using CSV
    widths_v6 = [0 for _ in range(129)]

    _, ext = os.path.splitext(fname)
    if ext == '.gz':
        f = gzip.open(fname, 'r')
    else:
        f = open(fname, 'r')

    unique_prefixes = {d: set() for d in list_of_descriptors}

    print('Filename: %s' % fname)
    for l in f:
        if isinstance(l, bytes):
            l = str(l, 'utf-8')
            l = l.rstrip()
        m = re.match(r'lpm_insert (?P<prefix>[0-9a-f]+) (?P<full_length>[0-9]+) [0-9a-f]', l)
        if m is not None:
            prefix = int(m['prefix'], 16)
            prefix_len = int(m['full_length'], 10)
            is_ipv6 = (prefix >> (prefix_len - 1))
            ip_prefix_len = prefix_len - 1 - VRF_LEN
            ip_prefix = get_bits(prefix, ip_prefix_len - 1, 0)
            if is_ipv6:
                widths_v6[ip_prefix_len] += 1
            else:
                widths_v4[ip_prefix_len] += 1

            if is_ipv6:
                related_desciptors = get_related_descriptors(ip_prefix_len, list_of_descriptors)
                for d in related_desciptors:
                    bits_to_strip = ip_prefix_len - d.uniq_bits
                    unique_prefixes[d].add(ip_prefix >> bits_to_strip)

    total_v4 = sum(widths_v4)
    total_v6 = sum(widths_v6)

    if total_v4 > 0:
        print('IPv4 Distribution (total=%d)' % total_v4)
        csv_v4 = [fname, 'IPV4', str(total_v4)]

        for w, c in enumerate(widths_v4):
            csv_v4.append(str(c))
            if c != 0:
                print('%-4d  %-8d    %6.2f%%    %s' % (w, c, c / total_v4 * 100, '' * math.ceil((c * 100) / total_v4)))

        csv_v4 += [' ' for _ in list_of_descriptors]

        if csvfile is not None:
            csvfile.write('%s\n' % ','.join(csv_v4))

    if total_v6 > 0:
        print('IPv6 Distribution (total=%d)' % total_v6)
        csv_v6 = [fname, 'IPV6', str(total_v6)]

        for w, c in enumerate(widths_v6):
            csv_v6.append(str(c))
            if c != 0:
                print('%-4d  %-8d    %6.2f%%    %s' % (w, c, c / total_v6 * 100, '' * math.ceil((c * 100) / total_v6)))

        for d in list_of_descriptors:
            csv_v6.append(str(len(unique_prefixes[d])))

        if csvfile is not None:
            csvfile.write('%s\n' % ','.join(csv_v6))

    print('-----------')
    def get_buckets_from_l1_row(row_data):
        ENCODING_WIDTH = 3
        NUMBER_OF_SHARED_ENTRIES = 4
        NUMBER_OF_FIXED_ENTRIES = 2
        DEFAULT_PAYLOAD_WIDTH = 20
        NON_ENTRY_DATA_WIDTH = ENCODING_WIDTH + 2 * DEFAULT_PAYLOAD_WIDTH
        offset = 0
        number_of_bucket1_shared_entries = get_bits(
            row_data, offset + ENCODING_WIDTH - 1, offset)
        offset += ENCODING_WIDTH
        l1_row_buckets = [lpm_bucket() for _ in range(2)]
        assert number_of_bucket1_shared_entries <= NUMBER_OF_SHARED_ENTRIES
        default0 = get_bits(row_data, offset + DEFAULT_PAYLOAD_WIDTH - 1,
                            offset)
        offset += DEFAULT_PAYLOAD_WIDTH
        default1 = get_bits(row_data, offset + DEFAULT_PAYLOAD_WIDTH - 1,
                            offset)
        offset += DEFAULT_PAYLOAD_WIDTH
        l1_row_buckets[0].default = lpm_default(default0)
        l1_row_buckets[1].default = lpm_default(default1)

        # Bucket 1 shared entries:
        for _ in range(0, number_of_bucket1_shared_entries):
            entry = get_bits(row_data, offset + L1_ENTRY_LENGTH - 1, offset)
            index = (offset - NON_ENTRY_DATA_WIDTH) // L1_ENTRY_LENGTH
            entry_instance = lpm_hw_to_logic_converter_pacific.l1_entry_to_lpm_entry(
                entry, is_shared=True, entry_index=index)
            offset += L1_ENTRY_LENGTH
            if not entry_instance.valid:
                continue

            l1_row_buckets[1].entries.append(entry_instance)

        # Bucket 0 shared entries:
        for _ in range(
                0,
                NUMBER_OF_SHARED_ENTRIES - number_of_bucket1_shared_entries):
            entry = get_bits(row_data, offset + L1_ENTRY_LENGTH - 1, offset)
            index = (offset - NON_ENTRY_DATA_WIDTH) // L1_ENTRY_LENGTH
            entry_instance = lpm_hw_to_logic_converter_pacific.l1_entry_to_lpm_entry(
                entry, is_shared=True, entry_index=index)
            offset += L1_ENTRY_LENGTH
            if not entry_instance.valid:
                continue

            l1_row_buckets[0].entries.append(entry_instance)

        # Bucket 0 fixed entries:
        for _ in range(0, NUMBER_OF_FIXED_ENTRIES):
            entry = get_bits(row_data, offset + L1_ENTRY_LENGTH - 1, offset)
            index = (offset - NON_ENTRY_DATA_WIDTH) // L1_ENTRY_LENGTH
            entry_instance = lpm_hw_to_logic_converter_pacific.l1_entry_to_lpm_entry(
                entry, is_shared=False, entry_index=index)
            offset += L1_ENTRY_LENGTH
            if not entry_instance.valid:
                continue

            l1_row_buckets[0].entries.append(entry_instance)

        # Bucket 1 fixed entries:
        for _ in range(0, NUMBER_OF_FIXED_ENTRIES):
            entry = get_bits(row_data, offset + L1_ENTRY_LENGTH - 1, offset)
            index = (offset - NON_ENTRY_DATA_WIDTH) // L1_ENTRY_LENGTH
            entry_instance = lpm_hw_to_logic_converter_pacific.l1_entry_to_lpm_entry(
                entry, is_shared=False, entry_index=index)
            offset += L1_ENTRY_LENGTH
            if not entry_instance.valid:
                continue

            l1_row_buckets[1].entries.append(entry_instance)

        return l1_row_buckets
    def get_buckets_from_l2_row(row, row_width):
        NON_ENTRY_DATA_WIDTH = L2_ECC + L2_ENCODING_WIDTH + 2 * L2_DEFAULT_WIDTH
        ret_buckets = [lpm_bucket() for _ in range(2)]
        offset = 0
        ecc = get_bits(row, offset + L2_ECC - 1, offset)
        offset += L2_ECC
        shared_to_1 = get_bits(row, offset + L2_ENCODING_WIDTH - 1, offset)
        offset += L2_ENCODING_WIDTH
        # defaults:
        default0 = get_bits(row, offset + L2_DEFAULT_WIDTH - 1, offset)
        ret_buckets[0].default = lpm_default(default0)
        offset += L2_DEFAULT_WIDTH
        default1 = get_bits(row, offset + L2_DEFAULT_WIDTH - 1, offset)
        ret_buckets[1].default = lpm_default(default1)
        offset += L2_DEFAULT_WIDTH

        # Bucket 1 Shared:
        for _ in range(shared_to_1):
            entry = get_bits(row, offset + L2_ENTRY_LENGTH - 1, offset)
            index = (offset - NON_ENTRY_DATA_WIDTH) // L2_ENTRY_LENGTH
            entry_instance = lpm_hw_to_logic_converter_pacific.l2_entry_to_lpm_entry(
                entry, is_shared=True, entry_index=index)
            offset += L2_ENTRY_LENGTH
            if not entry_instance.valid:
                continue
            ret_buckets[1].entries.append(entry_instance)

        # Bucket 0 Shared:
        for _ in range(L2_NUMBER_OF_SHARED_ENTRIES - shared_to_1):
            entry = get_bits(row, offset + L2_ENTRY_LENGTH - 1, offset)
            index = (offset - NON_ENTRY_DATA_WIDTH) // L2_ENTRY_LENGTH
            entry_instance = lpm_hw_to_logic_converter_pacific.l2_entry_to_lpm_entry(
                entry, is_shared=True, entry_index=index)
            offset += L2_ENTRY_LENGTH
            if not entry_instance.valid:
                continue

            ret_buckets[0].entries.append(entry_instance)

        end_of_shared_offset = offset
        # In L2, fixed entries are interleaved (b0e0, b1e0, b0e1, b1e1...)
        while offset < row_width:
            entry = get_bits(row, offset + L2_ENTRY_LENGTH - 1, offset)
            index = (offset - NON_ENTRY_DATA_WIDTH) // L2_ENTRY_LENGTH
            entry_instance = lpm_hw_to_logic_converter_pacific.l2_entry_to_lpm_entry(
                entry, is_shared=False, entry_index=index)
            offset += L2_ENTRY_LENGTH * 2

            if not entry_instance.valid:
                continue

            ret_buckets[0].entries.append(entry_instance)

        offset = L2_ENTRY_LENGTH + end_of_shared_offset
        while offset < row_width:
            entry = get_bits(row, offset + L2_ENTRY_LENGTH - 1, offset)
            index = (offset - NON_ENTRY_DATA_WIDTH) // L2_ENTRY_LENGTH
            entry_instance = lpm_hw_to_logic_converter_pacific.l2_entry_to_lpm_entry(
                entry, is_shared=False, entry_index=index)
            offset += L2_ENTRY_LENGTH * 2

            if not entry_instance.valid:
                continue

            ret_buckets[1].entries.append(entry_instance)

        return ret_buckets