Exemplo n.º 1
0
    def _start_rx_queue(self, queue):
        """
        2048 as pktbuf size is strictly speaking incorrect:
        we need a few headers (1 cacheline), so there's only 1984 bytes left for the device
        but the 82599 can only handle sizes in increments of 1 kb; but this is fine since our max packet size
        is the default MTU of 1518
        this has to be fixed if jumbo frames are to be supported
        mempool should be >= the number of rx and tx descriptors for a forwarding application
        """
        log.info('Starting RX queue %d', queue.identifier)
        if len(queue) & (len(queue) - 1) != 0:
            raise ValueError(
                'Number of queue entries must be a power of 2, actual {}'.
                format(len(queue)))
        for i, descriptor in enumerate(queue.descriptors):
            pkt_buf = queue.mempool.get_buffer()
            if not pkt_buf:
                raise ValueError('Failed to allocate rx descriptor')
            descriptor.read.pkt_addr = pkt_buf.data_addr
            descriptor.read.hdr_addr = 0
            queue.buffers[i] = pkt_buf
        # Enable queue and wait if necessary
        self.reg.set_flags(types.IXGBE_RXDCTL(queue.identifier),
                           types.IXGBE_RXDCTL_ENABLE)
        self.reg.wait_set(types.IXGBE_RXDCTL(queue.identifier),
                          types.IXGBE_RXDCTL_ENABLE)

        # Rx queue starts out full
        self.reg.set(types.IXGBE_RDH(queue.identifier), 0)

        # was set to 0 before in the init function
        self.reg.set(types.IXGBE_RDT(queue.identifier), len(queue) - 1)
        self.reg.set(types.IXGBE_RDT(queue.identifier), len(queue) - 1)
        self.reg.wait_set(types.IXGBE_RDT(queue.identifier), len(queue) - 1)
Exemplo n.º 2
0
    def rx_batch(self, queue_id, buffer_count):
        """
        Sec 1.8.2 and 7.1
        try to receive a single packet if one is available, non-blocking
        see datashet section 7.1.9 for an explanation of the rx ring structure
        tl;dr; we control the tail of the queue, the hardware the head
        """
        # if not 0 <= queue_id < len(self.rx_queues):
        # raise IndexError('Queue id<{}> not in [0, {}]'.format(queue_id, len(self.rx_queues)))
        buffers = []
        queue = self.rx_queues[queue_id]
        queue_length = len(queue)
        rx_index = queue.index
        last_rx_index = rx_index
        for _ in range(buffer_count):
            descriptor = queue.descriptors[rx_index]
            status = descriptor.writeback.upper.status_error
            # status done
            if (status & types.IXGBE_RXDADV_STAT_DD) != 0:
                # status end of packet
                if (status & types.IXGBE_RXDADV_STAT_EOP) == 0:
                    raise RuntimeError(
                        'Multisegment packets are not supported - increase buffer size or decrease MTU'
                    )

                # We got a packet - read and copy the whole descriptor
                packet_buffer = queue.buffers[rx_index]
                packet_buffer.size = descriptor.writeback.upper.length

                # This would be the place to implement RX offloading by translating the device-specific
                # flags to an independent representation in that buffer (similar to how DPDK works)
                new_buf = queue.mempool.get_buffer()
                if not new_buf:
                    raise MemoryError('Failed to allocate new buffer for rx')
                # descriptor.read.pkt_addr = new_buf.data_addr
                # This resets the flags
                # descriptor.read.hdr_addr = 0
                descriptor.read.pack(new_buf.data_addr, 0)
                queue.buffers[rx_index] = new_buf
                buffers.append(packet_buffer)

                # want to read the next one in the next iteration but we still need the current one to update RDT later
                last_rx_index = rx_index
                rx_index = wrap_ring(rx_index, queue_length)
            else:
                break
        if rx_index != last_rx_index:
            """
            Tell the hardware that we are done. This is intentionally off by one, otherwise
            we'd set RDT=RDH if we are receiving faster than packets are coming in, which would mean queue is full
            """
            self.reg.set(types.IXGBE_RDT(queue_id), last_rx_index)
            queue.index = rx_index
        return buffers
Exemplo n.º 3
0
    def _init_rx_queue(self, index):
        log.info('Initializing rx queue %d', index)
        # Enable advanced rx descriptors
        srrctl = types.IXGBE_SRRCTL(index)
        srrctl_masked = self.reg.get(
            srrctl) & ~types.IXGBE_SRRCTL_DESCTYPE_MASK
        rx_descriptor_reg = srrctl_masked | types.IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
        self.reg.set(srrctl, rx_descriptor_reg)
        """
        DROP_EN causes the NIC to drop packets if no descriptors are available
        instead of buffering them
        A single overflowing queue can fill up the whole buffer and impact
        operations if not setting this
        """
        self.reg.set_flags(srrctl, types.IXGBE_SRRCTL_DROP_EN)

        # Sec 7.1.9 - Set up descriptor ring
        ring_size = self.NUM_RX_QUEUE_ENTRIES * self.RX_DESCRIPTOR_SIZE
        dma = DmaMemory(ring_size)
        mem = memoryview(dma)
        for i in range(ring_size):
            mem[i] = 0xFF
        self.reg.set(types.IXGBE_RDBAL(index), dma.physical_address)
        self.reg.set(types.IXGBE_RDBAH(index), dma.physical_address >> 32)
        self.reg.set(types.IXGBE_RDLEN(index), ring_size)
        log.info('RX ring %d using %s', index, dma)

        # Set ring to empty
        self.reg.set(types.IXGBE_RDH(index), 0)
        self.reg.set(types.IXGBE_RDT(index), 0)
        # Mempool should be >= number of rx and tx descriptors
        mempool_size = self.NUM_RX_QUEUE_ENTRIES + self.NUM_TX_QUEUE_ENTRIES
        mempool = Mempool.allocate(
            4096 if mempool_size < 4096 else mempool_size)
        queue = RxQueue(mem, self.NUM_RX_QUEUE_ENTRIES, index, mempool)
        return queue