def E_find_mk_test(dut):
    """
    Finds MK successfully
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())
    
    filename = '../test_data/wpa2-psk-linksys.hccap'
    start = '1000000000'
    end =   '1000000300'    #Comparison currently hardcoded as 1000000200

    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
    
    print_process_vars(dut)
    yield load_file(dut, filename)
    print_process_vars(dut)
    yield load_mk(dut, start)
    print_process_vars(dut)
    yield load_mk(dut, end)
    print_process_vars(dut)
    
    #This clock isn't necessary while pipelining
    yield RisingEdge(dut.clk_i)
    
    yield wait_process(dut)
    
    print_process_vars(dut)
    
    if int(str(dut.pmk_valid), 2) == 0:
        raise TestFailure("MK search failed")
    else:
        log.info("Master key found!")
Exemple #2
0
def F_exhaust_mk_test(dut):
    """
    Hits end of MK list before matching
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())

    filename = '../test_data/wpa2-psk-linksys.hccap'
    start = '1000000000'
    end = '1000000020'  #Comparison currently hardcoded as 1000000200

    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)

    yield load_file(dut, filename)
    yield load_mk(dut, start)
    yield load_mk(dut, end)

    #This clock isn't necessary while pipelining
    yield RisingEdge(dut.clk_i)

    yield wait_process(dut)

    if int(str(dut.pmk_valid), 2) == 0:
        raise TestFailure("Master key found, not good!")
    else:
        log.info("List done")
def F_exhaust_mk_test(dut):
    """
    Hits end of MK list before matching
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())
    
    filename = '../test_data/wpa2-psk-linksys.hccap'
    start = '1000000000'
    end =   '1000000020'    #Comparison currently hardcoded as 1000000200

    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
    
    yield load_file(dut, filename)
    yield load_mk(dut, start)
    yield load_mk(dut, end)
    
    #This clock isn't necessary while pipelining
    yield RisingEdge(dut.clk_i)
    
    yield wait_process(dut)
    
    if int(str(dut.pmk_valid), 2) == 0:
        raise TestFailure("Master key found, not good!")
    else:
        log.info("List done")
Exemple #4
0
class TB(object):
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog(f"cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        cocotb.fork(Clock(dut.clk, 2, units="ns").start())

        self.source = AxiStreamSource(dut, "axis", dut.clk, dut.rst)
        self.sink = AxiStreamSink(dut, "axis", dut.clk, dut.rst)

    def set_idle_generator(self, generator=None):
        if generator:
            self.source.set_pause_generator(generator())

    def set_backpressure_generator(self, generator=None):
        if generator:
            self.sink.set_pause_generator(generator())

    async def reset(self):
        self.dut.rst.setimmediatevalue(0)
        await RisingEdge(self.dut.clk)
        await RisingEdge(self.dut.clk)
        self.dut.rst <= 1
        await RisingEdge(self.dut.clk)
        await RisingEdge(self.dut.clk)
        self.dut.rst <= 0
        await RisingEdge(self.dut.clk)
        await RisingEdge(self.dut.clk)
class TB:
    def __init__(self, dut, speed=1000e6):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        cocotb.start_soon(Clock(dut.clk_125mhz, 8, units="ns").start())

        # Ethernet
        self.gmii_phy = GmiiPhy(dut.phy_txd, dut.phy_tx_er, dut.phy_tx_en, dut.phy_tx_clk, dut.phy_gtx_clk,
            dut.phy_rxd, dut.phy_rx_er, dut.phy_rx_dv, dut.phy_rx_clk, speed=speed)

        dut.btnu.setimmediatevalue(0)
        dut.btnl.setimmediatevalue(0)
        dut.btnd.setimmediatevalue(0)
        dut.btnr.setimmediatevalue(0)
        dut.btnc.setimmediatevalue(0)
        dut.sw.setimmediatevalue(0)
        dut.uart_txd.setimmediatevalue(1)
        dut.uart_rts.setimmediatevalue(1)

    async def init(self):

        self.dut.rst_125mhz.setimmediatevalue(0)

        for k in range(10):
            await RisingEdge(self.dut.clk_125mhz)

        self.dut.rst_125mhz <= 1

        for k in range(10):
            await RisingEdge(self.dut.clk_125mhz)

        self.dut.rst_125mhz <= 0
Exemple #6
0
def E_find_mk_test(dut):
    """
    Finds MK successfully
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())

    filename = '../test_data/wpa2-psk-linksys.hccap'
    start = '1000000000'
    end = '1000000300'  #Comparison currently hardcoded as 1000000200

    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)

    print_process_vars(dut)
    yield load_file(dut, filename)
    print_process_vars(dut)
    yield load_mk(dut, start)
    print_process_vars(dut)
    yield load_mk(dut, end)
    print_process_vars(dut)

    #This clock isn't necessary while pipelining
    yield RisingEdge(dut.clk_i)

    yield wait_process(dut)

    print_process_vars(dut)

    if int(str(dut.pmk_valid), 2) == 0:
        raise TestFailure("MK search failed")
    else:
        log.info("Master key found!")
Exemple #7
0
class TB:
    def __init__(self, dut, speed=1000e6):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        self.rgmii_phy0 = RgmiiPhy(dut.phy0_txd,
                                   dut.phy0_tx_ctl,
                                   dut.phy0_tx_clk,
                                   dut.phy0_rxd,
                                   dut.phy0_rx_ctl,
                                   dut.phy0_rx_clk,
                                   speed=speed)

        self.rgmii_phy1 = RgmiiPhy(dut.phy1_txd,
                                   dut.phy1_tx_ctl,
                                   dut.phy1_tx_clk,
                                   dut.phy1_rxd,
                                   dut.phy1_rx_ctl,
                                   dut.phy1_rx_clk,
                                   speed=speed)

        dut.phy0_int_n.setimmediatevalue(1)
        dut.phy1_int_n.setimmediatevalue(1)

        dut.btn.setimmediatevalue(0)
        dut.sw.setimmediatevalue(0)

        dut.clk.setimmediatevalue(0)
        dut.clk90.setimmediatevalue(0)

        cocotb.fork(self._run_clk())

    async def init(self):

        self.dut.rst.setimmediatevalue(0)

        for k in range(10):
            await RisingEdge(self.dut.clk)

        self.dut.rst <= 1

        for k in range(10):
            await RisingEdge(self.dut.clk)

        self.dut.rst <= 0

    async def _run_clk(self):
        t = Timer(2, 'ns')
        while True:
            self.dut.clk <= 1
            await t
            self.dut.clk90 <= 1
            await t
            self.dut.clk <= 0
            await t
            self.dut.clk90 <= 0
            await t
class TB:
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        cocotb.fork(Clock(dut.clk, 6.4, units="ns").start())

        # Ethernet
        cocotb.fork(Clock(dut.sfp0_rx_clk, 6.4, units="ns").start())
        self.sfp0_source = XgmiiSource(dut.sfp0_rxd, dut.sfp0_rxc,
                                       dut.sfp0_rx_clk, dut.sfp0_rx_rst)
        cocotb.fork(Clock(dut.sfp0_tx_clk, 6.4, units="ns").start())
        self.sfp0_sink = XgmiiSink(dut.sfp0_txd, dut.sfp0_txc, dut.sfp0_tx_clk,
                                   dut.sfp0_tx_rst)

        cocotb.fork(Clock(dut.sfp1_rx_clk, 6.4, units="ns").start())
        self.sfp1_source = XgmiiSource(dut.sfp1_rxd, dut.sfp1_rxc,
                                       dut.sfp1_rx_clk, dut.sfp1_rx_rst)
        cocotb.fork(Clock(dut.sfp1_tx_clk, 6.4, units="ns").start())
        self.sfp1_sink = XgmiiSink(dut.sfp1_txd, dut.sfp1_txc, dut.sfp1_tx_clk,
                                   dut.sfp1_tx_rst)

        dut.btnu.setimmediatevalue(0)
        dut.btnl.setimmediatevalue(0)
        dut.btnd.setimmediatevalue(0)
        dut.btnr.setimmediatevalue(0)
        dut.btnc.setimmediatevalue(0)
        dut.sw.setimmediatevalue(0)
        dut.uart_rxd.setimmediatevalue(0)
        dut.uart_rts.setimmediatevalue(0)

    async def init(self):

        self.dut.rst.setimmediatevalue(0)
        self.dut.sfp0_rx_rst.setimmediatevalue(0)
        self.dut.sfp0_tx_rst.setimmediatevalue(0)
        self.dut.sfp1_rx_rst.setimmediatevalue(0)
        self.dut.sfp1_tx_rst.setimmediatevalue(0)

        for k in range(10):
            await RisingEdge(self.dut.clk)

        self.dut.rst <= 1
        self.dut.sfp0_rx_rst <= 1
        self.dut.sfp0_tx_rst <= 1
        self.dut.sfp1_rx_rst <= 1
        self.dut.sfp1_tx_rst <= 1

        for k in range(10):
            await RisingEdge(self.dut.clk)

        self.dut.rst <= 0
        self.dut.sfp0_rx_rst <= 0
        self.dut.sfp0_tx_rst <= 0
        self.dut.sfp1_rx_rst <= 0
        self.dut.sfp1_tx_rst <= 0
Exemple #9
0
class TB:
    def __init__(self, dut, speed=1000e6):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        self.rgmii_phy = RgmiiPhy(dut.phy_txd,
                                  dut.phy_tx_ctl,
                                  dut.phy_tx_clk,
                                  dut.phy_rxd,
                                  dut.phy_rx_ctl,
                                  dut.phy_rx_clk,
                                  speed=speed)

        dut.btnu.setimmediatevalue(0)
        dut.btnl.setimmediatevalue(0)
        dut.btnd.setimmediatevalue(0)
        dut.btnr.setimmediatevalue(0)
        dut.btnc.setimmediatevalue(0)
        dut.sw.setimmediatevalue(0)
        dut.uart_txd.setimmediatevalue(1)
        dut.uart_rts.setimmediatevalue(1)

        dut.clk_125mhz.setimmediatevalue(0)
        dut.clk90_125mhz.setimmediatevalue(0)

        cocotb.fork(self._run_clk_125mhz())

    async def init(self):

        self.dut.rst_125mhz.setimmediatevalue(0)

        for k in range(10):
            await RisingEdge(self.dut.clk_125mhz)

        self.dut.rst_125mhz <= 1

        for k in range(10):
            await RisingEdge(self.dut.clk_125mhz)

        self.dut.rst_125mhz <= 0

    async def _run_clk_125mhz(self):
        t = Timer(2, 'ns')
        while True:
            self.dut.clk_125mhz <= 1
            await t
            self.dut.clk90_125mhz <= 1
            await t
            self.dut.clk_125mhz <= 0
            await t
            self.dut.clk90_125mhz <= 0
            await t
Exemple #10
0
class AFIFODriver():
    def __init__(self, signals, debug=False, slots=0, width=0):
        level = logging.DEBUG if debug else logging.WARNING
        self.log = SimLog("afifo.log")
        file_handler = RotatingFileHandler("sim.log",
                                           maxBytes=(5 * 1024 * 1024),
                                           backupCount=2,
                                           mode='w')
        file_handler.setFormatter(SimColourLogFormatter())
        self.log.addHandler(file_handler)
        self.log.addFilter(SimTimeContextFilter())
        self.log.setLevel(level)
        self.log.info("SEED ======> %s", str(cocotb.RANDOM_SEED))

        self.clk_wr = signals.clk_wr
        self.valid_wr = signals.wr_en_i
        self.data_wr = signals.wr_data_i
        self.ready_wr = signals.wr_full_o
        self.clk_rd = signals.clk_rd
        self.valid_rd = signals.rd_empty_o
        self.data_rd = signals.rd_data_o
        self.ready_rd = signals.rd_en_i
        self.valid_wr <= 0
        self.ready_rd <= 0
        self.log.setLevel(level)

    async def write(self, data, sync=True, **kwargs):
        self.log.info("[AFIFO driver] write => %x" % data)
        while True:
            await FallingEdge(self.clk_wr)
            self.valid_wr <= 1
            self.data_wr <= data
            await RisingEdge(self.clk_wr)
            if self.ready_wr == 0:
                break
            elif kwargs["exit_full"] == True:
                return "FULL"
        self.valid_wr <= 0
        return 0

    async def read(self, sync=True, **kwargs):
        while True:
            await FallingEdge(self.clk_rd)
            if self.valid_rd == 0:
                data = self.data_rd.value  # We capture before we incr. rd ptr
                self.ready_rd <= 1
                await RisingEdge(self.clk_rd)
                break
            elif kwargs["exit_empty"] == True:
                return "EMPTY"
        self.log.info("[AFIFO-driver] read => %x" % data)
        self.ready_rd <= 0
        return data
class TB:
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        cocotb.fork(Clock(dut.clk, 6.4, units="ns").start())

        # Ethernet
        self.sfp_a_source = XgmiiSource(dut.sfp_a_rxd, dut.sfp_a_rxc, dut.clk,
                                        dut.rst)
        self.sfp_a_sink = XgmiiSink(dut.sfp_a_txd, dut.sfp_a_txc, dut.clk,
                                    dut.rst)

        self.sfp_b_source = XgmiiSource(dut.sfp_b_rxd, dut.sfp_b_rxc, dut.clk,
                                        dut.rst)
        self.sfp_b_sink = XgmiiSink(dut.sfp_b_txd, dut.sfp_b_txc, dut.clk,
                                    dut.rst)

        self.sfp_c_source = XgmiiSource(dut.sfp_c_rxd, dut.sfp_c_rxc, dut.clk,
                                        dut.rst)
        self.sfp_c_sink = XgmiiSink(dut.sfp_c_txd, dut.sfp_c_txc, dut.clk,
                                    dut.rst)

        self.sfp_d_source = XgmiiSource(dut.sfp_d_rxd, dut.sfp_d_rxc, dut.clk,
                                        dut.rst)
        self.sfp_d_sink = XgmiiSink(dut.sfp_d_txd, dut.sfp_d_txc, dut.clk,
                                    dut.rst)

        dut.btn.setimmediatevalue(0)
        dut.sw.setimmediatevalue(0)

    async def init(self):

        self.dut.rst.setimmediatevalue(0)

        for k in range(10):
            await RisingEdge(self.dut.clk)

        self.dut.rst <= 1

        for k in range(10):
            await RisingEdge(self.dut.clk)

        self.dut.rst <= 0
Exemple #12
0
class TB:
    def __init__(self, dut, speed=1000):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        cocotb.fork(Clock(dut.clk_125mhz, 6.4, units="ns").start())
        cocotb.fork(Clock(dut.phy_gmii_clk, 8, units="ns").start())

        self.gmii_source = GmiiSource(dut.phy_gmii_rxd, dut.phy_gmii_rx_er,
                                      dut.phy_gmii_rx_dv, dut.phy_gmii_clk,
                                      dut.phy_gmii_rst, dut.phy_gmii_clk_en)
        self.gmii_sink = GmiiSink(dut.phy_gmii_txd, dut.phy_gmii_tx_er,
                                  dut.phy_gmii_tx_en, dut.phy_gmii_clk,
                                  dut.phy_gmii_rst, dut.phy_gmii_clk_en)

        dut.phy_gmii_clk_en.setimmediatevalue(1)

        dut.btnu.setimmediatevalue(0)
        dut.btnl.setimmediatevalue(0)
        dut.btnd.setimmediatevalue(0)
        dut.btnr.setimmediatevalue(0)
        dut.btnc.setimmediatevalue(0)
        dut.sw.setimmediatevalue(0)
        dut.uart_txd.setimmediatevalue(0)
        dut.uart_rts.setimmediatevalue(0)

    async def init(self):

        self.dut.rst_125mhz.setimmediatevalue(0)
        self.dut.phy_gmii_rst.setimmediatevalue(0)

        for k in range(10):
            await RisingEdge(self.dut.clk_125mhz)

        self.dut.rst_125mhz <= 1
        self.dut.phy_gmii_rst <= 1

        for k in range(10):
            await RisingEdge(self.dut.clk_125mhz)

        self.dut.rst_125mhz <= 0
        self.dut.phy_gmii_rst <= 0
Exemple #13
0
class TB(object):
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog(f"cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        cocotb.fork(Clock(dut.clk, 2, units="ns").start())

        self.axi_master = AxiMaster(dut, "axi", dut.clk, dut.rst)
        self.axi_ram = AxiRam(dut, "axi", dut.clk, dut.rst, size=2**16)

        self.axi_ram.write_if.log.setLevel(logging.DEBUG)
        self.axi_ram.read_if.log.setLevel(logging.DEBUG)

    def set_idle_generator(self, generator=None):
        if generator:
            self.axi_master.write_if.aw_channel.set_pause_generator(
                generator())
            self.axi_master.write_if.w_channel.set_pause_generator(generator())
            self.axi_master.read_if.ar_channel.set_pause_generator(generator())
            self.axi_ram.write_if.b_channel.set_pause_generator(generator())
            self.axi_ram.read_if.r_channel.set_pause_generator(generator())

    def set_backpressure_generator(self, generator=None):
        if generator:
            self.axi_master.write_if.b_channel.set_pause_generator(generator())
            self.axi_master.read_if.r_channel.set_pause_generator(generator())
            self.axi_ram.write_if.aw_channel.set_pause_generator(generator())
            self.axi_ram.write_if.w_channel.set_pause_generator(generator())
            self.axi_ram.read_if.ar_channel.set_pause_generator(generator())

    async def cycle_reset(self):
        self.dut.rst.setimmediatevalue(0)
        await RisingEdge(self.dut.clk)
        await RisingEdge(self.dut.clk)
        self.dut.rst <= 1
        await RisingEdge(self.dut.clk)
        await RisingEdge(self.dut.clk)
        self.dut.rst <= 0
        await RisingEdge(self.dut.clk)
        await RisingEdge(self.dut.clk)
Exemple #14
0
def A_load_packet_test(dut):
    """
    Test proper load of filedata into DUT
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())

    filename = '../test_data/wpa2-psk-linksys.hccap'

    obj = wpa2slow.handshake.Handshake()
    objSha = wpa2slow.sha1.Sha1Model()
    objHmac = wpa2slow.hmac.HmacModel(objSha)
    objPbkdf2 = wpa2slow.pbkdf2.Pbkdf2Model()
    objPrf = wpa2slow.compare.PrfModel(objHmac)

    (ssid, mac1, mac2, nonce1, nonce2, eapol, eapol_size,
     keymic) = obj.load(filename)

    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)

    yield load_file(dut, filename)

    packet_test1 = dut.test_ssid_1
    packet_test2 = dut.test_ssid_2
    packet_test3 = dut.test_ssid_3

    if ord(ssid[0][0]) != int(str(ssid_test1), 2):
        raise TestFailure("ssid_test1 differs from mock")
    elif ord(ssid[0][3]) != int(str(ssid_test2), 2):
        raise TestFailure("ssid_test2 differs from mock")
    elif ord(ssid[0][6]) != int(str(ssid_test3), 2):
        raise TestFailure("ssid_test3 differs from mock")
    elif ord(ssid[0][6]) == int(
            str(ssid_test1),
            2):  #Todo: remove false positive if 1st and 7th chars equal
        raise TestFailure("SSID comparisons failing.")
    else:
        log.info("SSID Ok!")
class TB:
    def __init__(self, dut, speed=100e6):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        cocotb.fork(Clock(dut.clk, 8, units="ns").start())

        self.mii_phy = MiiPhy(dut.phy_txd,
                              None,
                              dut.phy_tx_en,
                              dut.phy_tx_clk,
                              dut.phy_rxd,
                              dut.phy_rx_er,
                              dut.phy_rx_dv,
                              dut.phy_rx_clk,
                              speed=speed)

        dut.phy_crs.setimmediatevalue(0)
        dut.phy_col.setimmediatevalue(0)

        dut.btn.setimmediatevalue(0)
        dut.sw.setimmediatevalue(0)
        dut.uart_rxd.setimmediatevalue(0)

    async def init(self):

        self.dut.rst.setimmediatevalue(0)

        for k in range(10):
            await RisingEdge(self.dut.clk)

        self.dut.rst <= 1

        for k in range(10):
            await RisingEdge(self.dut.clk)

        self.dut.rst <= 0
def D_set_session_params_test(dut):
    """
    Loads handshake, start, end MK values
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())
    
    filename = '../test_data/wpa2-psk-linksys.hccap'
    start = '1000000000'
    end =   '1000000200'

    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
    
    yield load_file(dut, filename)
    
    yield load_mk(dut, start)
    
    yield load_mk(dut, end)
    
    #This clock isn't necessary while pipelining
    yield RisingEdge(dut.clk_i)
    
    #yield wait_process(dut)
    
    mk_test1 = dut.test_mk1
    mk_test2 = dut.test_mk2
    mk_test3 = dut.test_mk3
    
    if ord(start[0]) != int(str(mk_test1), 2):
        raise TestFailure("Start MK inequal")
    elif ord(end[7]) != int(str(mk_test2), 2):
        raise TestFailure("End MK inequal1")
    elif ord(end[9]) != int(str(mk_test3), 2):
        raise TestFailure("End MK inequal2")
    else:
        log.info("Start/End Params Ok!")
Exemple #17
0
def D_set_session_params_test(dut):
    """
    Loads handshake, start, end MK values
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())

    filename = '../test_data/wpa2-psk-linksys.hccap'
    start = '1000000000'
    end = '1000000200'

    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)

    yield load_file(dut, filename)

    yield load_mk(dut, start)

    yield load_mk(dut, end)

    #This clock isn't necessary while pipelining
    yield RisingEdge(dut.clk_i)

    #yield wait_process(dut)

    mk_test1 = dut.test_mk1
    mk_test2 = dut.test_mk2
    mk_test3 = dut.test_mk3

    if ord(start[0]) != int(str(mk_test1), 2):
        raise TestFailure("Start MK inequal")
    elif ord(end[7]) != int(str(mk_test2), 2):
        raise TestFailure("End MK inequal1")
    elif ord(end[9]) != int(str(mk_test3), 2):
        raise TestFailure("End MK inequal2")
    else:
        log.info("Start/End Params Ok!")
def A_load_packet_test(dut):
    """
    Test proper load of filedata into DUT
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())
    
    filename = '../test_data/wpa2-psk-linksys.hccap'
    
    obj = wpa2slow.handshake.Handshake()
    objSha = wpa2slow.sha1.Sha1Model()
    objHmac = wpa2slow.hmac.HmacModel(objSha)
    objPbkdf2 = wpa2slow.pbkdf2.Pbkdf2Model()
    objPrf = wpa2slow.compare.PrfModel(objHmac)
    
    (ssid, mac1, mac2, nonce1, nonce2, eapol, eapol_size, keymic) = obj.load(filename)
    
    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
    
    yield load_file(dut, filename)
    
    packet_test1 = dut.test_ssid_1
    packet_test2 = dut.test_ssid_2
    packet_test3 = dut.test_ssid_3
    
    if ord(ssid[0][0]) != int(str(ssid_test1), 2):
        raise TestFailure("ssid_test1 differs from mock")
    elif ord(ssid[0][3]) != int(str(ssid_test2), 2):
        raise TestFailure("ssid_test2 differs from mock")
    elif ord(ssid[0][6]) != int(str(ssid_test3), 2):
        raise TestFailure("ssid_test3 differs from mock")
    elif ord(ssid[0][6]) == int(str(ssid_test1), 2):    #Todo: remove false positive if 1st and 7th chars equal
        raise TestFailure("SSID comparisons failing.")
    else:
        log.info("SSID Ok!")
Exemple #19
0
class TB:
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        self.rc = RootComplex()

        self.ep = []

        ep = TestEndpoint()
        self.dev = Device(ep)
        self.ep.append(ep)

        self.rc.make_port().connect(self.dev)

        self.sw = Switch()

        self.rc.make_port().connect(self.sw)

        ep = TestEndpoint()
        self.dev2 = Device(ep)
        self.ep.append(ep)

        self.sw.make_port().connect(self.dev2)

        ep = TestEndpoint()
        self.dev3 = Device(ep)
        self.ep.append(ep)

        self.sw.make_port().connect(self.dev3)

        ep = TestEndpoint()
        self.dev4 = Device(ep)
        self.ep.append(ep)

        self.rc.make_port().connect(self.dev4)
Exemple #20
0
class Scheduler(object):
    """The main scheduler.

    Here we accept callbacks from the simulator and schedule the appropriate
    coroutines.

    A callback fires, causing the :any:`react` method to be called, with the
    trigger that caused the callback as the first argument.

    We look up a list of coroutines to schedule (indexed by the trigger) and
    schedule them in turn. NB implementors should not depend on the scheduling
    order!

    Some additional management is required since coroutines can return a list
    of triggers, to be scheduled when any one of the triggers fires.  To
    ensure we don't receive spurious callbacks, we have to un-prime all the
    other triggers when any one fires.

    Due to the simulator nuances and fun with delta delays we have the
    following modes:

    Normal mode
        - Callbacks cause coroutines to be scheduled
        - Any pending writes are cached and do not happen immediately

    ReadOnly mode
        - Corresponds to cbReadOnlySynch (VPI) or vhpiCbLastKnownDeltaCycle
          (VHPI).  In this state we are not allowed to perform writes.

    Write mode
        - Corresponds to cbReadWriteSynch (VPI) or vhpiCbEndOfProcesses (VHPI)
          In this mode we play back all the cached write updates.

    We can legally transition from normal->write by registering a ReadWrite
    callback, however usually once a simulator has entered the ReadOnly phase
    of a given timestep then we must move to a new timestep before performing
    any writes.  The mechanism for moving to a new timestep may not be
    consistent across simulators and therefore we provide an abstraction to
    assist with compatibility.


    Unless a coroutine has explicitly requested to be scheduled in ReadOnly
    mode (for example wanting to sample the finally settled value after all
    delta delays) then it can reasonably be expected to be scheduled during
    "normal mode" i.e. where writes are permitted.
    """

    _MODE_NORMAL   = 1  # noqa
    _MODE_READONLY = 2  # noqa
    _MODE_WRITE    = 3  # noqa
    _MODE_TERM     = 4  # noqa

    # Singleton events, recycled to avoid spurious object creation
    _readonly = ReadOnly()
    # TODO[gh-759]: For some reason, the scheduler requires that these triggers
    # are _not_ the same instances used by the tests themselves. This is risky,
    # because it can lead to them overwriting each other's callbacks. We should
    # try to remove this `copy.copy` in future.
    _next_timestep = copy.copy(NextTimeStep())
    _readwrite = copy.copy(ReadWrite())
    _timer1 = Timer(1)
    _timer0 = Timer(0)

    def __init__(self):

        self.log = SimLog("cocotb.scheduler")
        if _debug:
            self.log.setLevel(logging.DEBUG)

        # A dictionary of pending coroutines for each trigger,
        # indexed by trigger
        self._trigger2coros = collections.defaultdict(list)

        # A dictionary of pending triggers for each coroutine, indexed by coro
        self._coro2triggers = collections.defaultdict(list)

        # Our main state
        self._mode = Scheduler._MODE_NORMAL

        # A dictionary of pending writes
        self._writes = {}

        self._pending_coros = []
        self._pending_callbacks = []
        self._pending_triggers = []
        self._pending_threads = []
        self._pending_events = []   # Events we need to call set on once we've unwound

        self._terminate = False
        self._test_result = None
        self._entrypoint = None
        self._main_thread = threading.current_thread()

        # Select the appropriate scheduling algorithm for this simulator
        self.advance = self.default_scheduling_algorithm
        self._is_reacting = False

    def default_scheduling_algorithm(self):
        """
        Decide whether we need to schedule our own triggers (if at all) in
        order to progress to the next mode.

        This algorithm has been tested against the following simulators:
            Icarus Verilog
        """
        if not self._terminate and self._writes:

            if self._mode == Scheduler._MODE_NORMAL:
                if not self._readwrite.primed:
                    self._readwrite.prime(self.react)
            elif not self._next_timestep.primed:
                self._next_timestep.prime(self.react)

        elif self._terminate:
            if _debug:
                self.log.debug("Test terminating, scheduling Timer")

            for t in self._trigger2coros:
                t.unprime()

            for t in [self._readwrite, self._readonly, self._next_timestep,
                      self._timer1, self._timer0]:
                if t.primed:
                    t.unprime()

            self._timer1.prime(self.begin_test)
            self._trigger2coros = collections.defaultdict(list)
            self._coro2triggers = collections.defaultdict(list)
            self._terminate = False
            self._mode = Scheduler._MODE_TERM

    def begin_test(self, trigger=None):
        """Called to initiate a test.

        Could be called on start-up or from a callback.
        """
        if _debug:
            self.log.debug("begin_test called with trigger: %s" %
                           (str(trigger)))
        if _profiling:
            ps = pstats.Stats(_profile).sort_stats('cumulative')
            ps.dump_stats("test_profile.pstat")
            ctx = profiling_context()
        else:
            ctx = nullcontext()

        with ctx:
            self._mode = Scheduler._MODE_NORMAL
            if trigger is not None:
                trigger.unprime()

            # Issue previous test result, if there is one
            if self._test_result is not None:
                if _debug:
                    self.log.debug("Issue test result to regression object")
                cocotb.regression_manager.handle_result(self._test_result)
                self._test_result = None
            if self._entrypoint is not None:
                test = self._entrypoint
                self._entrypoint = None
                self.schedule(test)
                self.advance()

    def react(self, trigger):
        """
        Called when a trigger fires.

        We ensure that we only start the event loop once, rather than
        letting it recurse.
        """
        if self._is_reacting:
            # queue up the trigger, the event loop will get to it
            self._pending_triggers.append(trigger)
            return

        # start the event loop
        self._is_reacting = True
        try:
            self._event_loop(trigger)
        finally:
            self._is_reacting = False


    def _event_loop(self, trigger):
        """
        Run an event loop triggered by the given trigger.

        The loop will keep running until no further triggers fire.

        This should be triggered by only:
        * The beginning of a test, when there is no trigger to react to
        * A GPI trigger
        """
        if _profiling:
            ctx = profiling_context()
        else:
            ctx = nullcontext()

        with ctx:
            # When a trigger fires it is unprimed internally
            if _debug:
                self.log.debug("Trigger fired: %s" % str(trigger))
            # trigger.unprime()

            if self._mode == Scheduler._MODE_TERM:
                if _debug:
                    self.log.debug("Ignoring trigger %s since we're terminating" %
                                   str(trigger))
                return

            if trigger is self._readonly:
                self._mode = Scheduler._MODE_READONLY
            # Only GPI triggers affect the simulator scheduling mode
            elif isinstance(trigger, GPITrigger):
                self._mode = Scheduler._MODE_NORMAL

            # We're the only source of ReadWrite triggers which are only used for
            # playing back any cached signal updates
            if trigger is self._readwrite:

                if _debug:
                    self.log.debug("Writing cached signal updates")

                while self._writes:
                    handle, value = self._writes.popitem()
                    handle.setimmediatevalue(value)

                self._readwrite.unprime()

                return

            # Similarly if we've scheduled our next_timestep on way to readwrite
            if trigger is self._next_timestep:

                if not self._writes:
                    self.log.error(
                        "Moved to next timestep without any pending writes!")
                else:
                    self.log.debug(
                        "Priming ReadWrite trigger so we can playback writes")
                    self._readwrite.prime(self.react)

                return

            # work through triggers one by one
            is_first = True
            self._pending_triggers.append(trigger)
            while self._pending_triggers:
                trigger = self._pending_triggers.pop(0)

                if not is_first and isinstance(trigger, GPITrigger):
                    self.log.warning(
                        "A GPI trigger occurred after entering react - this "
                        "should not happen."
                    )
                    assert False

                # this only exists to enable the warning above
                is_first = False

                if trigger not in self._trigger2coros:

                    # GPI triggers should only be ever pending if there is an
                    # associated coroutine waiting on that trigger, otherwise it would
                    # have been unprimed already
                    if isinstance(trigger, GPITrigger):
                        self.log.critical(
                            "No coroutines waiting on trigger that fired: %s" %
                            str(trigger))

                        trigger.log.info("I'm the culprit")
                    # For Python triggers this isn't actually an error - we might do
                    # event.set() without knowing whether any coroutines are actually
                    # waiting on this event, for example
                    elif _debug:
                        self.log.debug(
                            "No coroutines waiting on trigger that fired: %s" %
                            str(trigger))

                    continue

                # Scheduled coroutines may append to our waiting list so the first
                # thing to do is pop all entries waiting on this trigger.
                scheduling = self._trigger2coros.pop(trigger)

                if _debug:
                    debugstr = "\n\t".join([coro.__name__ for coro in scheduling])
                    if len(scheduling):
                        debugstr = "\n\t" + debugstr
                    self.log.debug("%d pending coroutines for event %s%s" %
                                   (len(scheduling), str(trigger), debugstr))

                # This trigger isn't needed any more
                trigger.unprime()

                # If the coroutine was waiting on multiple triggers we may be able
                # to unprime the other triggers that didn't fire
                scheduling_set = set(scheduling)
                other_triggers = {
                    t
                    for coro in scheduling
                    for t in self._coro2triggers[coro]
                } - {trigger}

                for pending in other_triggers:
                    # every coroutine waiting on this trigger is already being woken
                    if scheduling_set.issuperset(self._trigger2coros[pending]):
                        if pending.primed:
                            pending.unprime()
                        del self._trigger2coros[pending]

                for coro in scheduling:
                    if _debug:
                        self.log.debug("Scheduling coroutine %s" % (coro.__name__))
                    self.schedule(coro, trigger=trigger)
                    if _debug:
                        self.log.debug("Scheduled coroutine %s" % (coro.__name__))

                # Schedule may have queued up some events so we'll burn through those
                while self._pending_events:
                    if _debug:
                        self.log.debug("Scheduling pending event %s" %
                                       (str(self._pending_events[0])))
                    self._pending_events.pop(0).set()

            # no more pending triggers
            self.advance()
            if _debug:
                self.log.debug("All coroutines scheduled, handing control back"
                               " to simulator")


    def unschedule(self, coro):
        """Unschedule a coroutine.  Unprime any pending triggers"""

        for trigger in self._coro2triggers[coro]:
            if coro in self._trigger2coros[trigger]:
                self._trigger2coros[trigger].remove(coro)
            if not self._trigger2coros[trigger]:
                trigger.unprime()
                del self._trigger2coros[trigger]
        del self._coro2triggers[coro]

        if Join(coro) in self._trigger2coros:
            self._pending_triggers.append(Join(coro))
        else:
            try:
                # throws an error if the background coroutine errored
                # and no one was monitoring it
                coro.retval
            except Exception as e:
                self._test_result = TestError(
                    "Forked coroutine {} raised exception {}"
                    .format(coro, e)
                )
                self._terminate = True

    def save_write(self, handle, value):
        if self._mode == Scheduler._MODE_READONLY:
            raise Exception("Write to object {0} was scheduled during a read-only sync phase.".format(handle._name))
        self._writes[handle] = value

    def _coroutine_yielded(self, coro, triggers):
        """Prime the triggers and update our internal mappings."""
        self._coro2triggers[coro] = triggers

        for trigger in triggers:

            self._trigger2coros[trigger].append(coro)
            if not trigger.primed:
                try:
                    trigger.prime(self.react)
                except Exception as e:
                    # Convert any exceptions into a test result
                    self.finish_test(
                        create_error(self, "Unable to prime trigger %s: %s" %
                                     (str(trigger), str(e))))

    def queue(self, coroutine):
        """Queue a coroutine for execution"""
        self._pending_coros.append(coroutine)

    def queue_function(self, coroutine):
        """Queue a coroutine for execution and move the containing thread
        so that it does not block execution of the main thread any longer.
        """

        # We should be able to find ourselves inside the _pending_threads list

        for t in self._pending_threads:
            if t.thread == threading.current_thread():
                t.thread_suspend()
                self._pending_coros.append(coroutine)
                return t


    def run_in_executor(self, func, *args, **kwargs):
        """Run the coroutine in a separate execution thread
        and return a yieldable object for the caller.
        """
        # Create a thread
        # Create a trigger that is called as a result of the thread finishing
        # Create an Event object that the caller can yield on
        # Event object set when the thread finishes execution, this blocks the
        #   calling coroutine (but not the thread) until the external completes

        def execute_external(func, _waiter):
            _waiter._outcome = outcomes.capture(func, *args, **kwargs)
            if _debug:
                self.log.debug("Execution of external routine done %s" % threading.current_thread())
            _waiter.thread_done()

        waiter = external_waiter()
        thread = threading.Thread(group=None, target=execute_external,
                                  name=func.__name__ + "_thread",
                                  args=([func, waiter]), kwargs={})

        waiter.thread = thread;
        self._pending_threads.append(waiter)

        return waiter

    def add(self, coroutine):
        """Add a new coroutine.

        Just a wrapper around self.schedule which provides some debug and
        useful error messages in the event of common gotchas.
        """
        if isinstance(coroutine, cocotb.decorators.coroutine):
            self.log.critical(
                "Attempt to schedule a coroutine that hasn't started")
            coroutine.log.error("This is the failing coroutine")
            self.log.warning(
                "Did you forget to add parentheses to the @test decorator?")
            self._test_result = TestError(
                "Attempt to schedule a coroutine that hasn't started")
            self._terminate = True
            return

        elif not isinstance(coroutine, cocotb.decorators.RunningCoroutine):
            self.log.critical(
                "Attempt to add something to the scheduler which isn't a "
                "coroutine")
            self.log.warning(
                "Got: %s (%s)" % (str(type(coroutine)), repr(coroutine)))
            self.log.warning("Did you use the @coroutine decorator?")
            self._test_result = TestError(
                "Attempt to schedule a coroutine that hasn't started")
            self._terminate = True
            return

        if _debug:
            self.log.debug("Adding new coroutine %s" % coroutine.__name__)

        self.schedule(coroutine)
        self.advance()
        return coroutine

    def new_test(self, coroutine):
        self._entrypoint = coroutine

    def schedule(self, coroutine, trigger=None):
        """Schedule a coroutine by calling the send method.

        Args:
            coroutine (cocotb.decorators.coroutine): The coroutine to schedule.
            trigger (cocotb.triggers.Trigger): The trigger that caused this
                coroutine to be scheduled.
        """
        if trigger is None:
            send_outcome = outcomes.Value(None)
        else:
            send_outcome = trigger._outcome
        if _debug:
            self.log.debug("Scheduling with {}".format(send_outcome))

        try:
            result = coroutine._advance(send_outcome)
            if _debug:
                self.log.debug("Coroutine %s yielded %s (mode %d)" %
                               (coroutine.__name__, str(result), self._mode))

        # TestComplete indication is game over, tidy up
        except TestComplete as test_result:
            # Tag that close down is needed, save the test_result
            # for later use in cleanup handler
            self.log.debug("TestComplete received: %s" % test_result.__class__.__name__)
            self.finish_test(test_result)
            return

        # Normal coroutine completion
        except cocotb.decorators.CoroutineComplete as exc:
            if _debug:
                self.log.debug("Coroutine completed: %s" % str(coroutine))
            self.unschedule(coroutine)
            return

        # Don't handle the result if we're shutting down
        if self._terminate:
            return

        # Queue current routine to schedule when the nested routine exits
        yield_successful = False
        if isinstance(result, cocotb.decorators.RunningCoroutine):

            if not result.has_started():
                self.queue(result)
                if _debug:
                    self.log.debug("Scheduling nested coroutine: %s" %
                                   result.__name__)
            else:
                if _debug:
                    self.log.debug("Joining to already running coroutine: %s" %
                                   result.__name__)

            new_trigger = result.join()
            self._coroutine_yielded(coroutine, [new_trigger])
            yield_successful = True

        elif isinstance(result, Trigger):
            if _debug:
                self.log.debug("%s: is instance of Trigger" % result)
            self._coroutine_yielded(coroutine, [result])
            yield_successful = True

        # If we get a list, make sure it's a list of triggers or coroutines.
        # For every coroutine, replace it with coroutine.join().
        # This could probably be done more elegantly via list comprehension.
        elif isinstance(result, list):
            new_triggers = []
            for listobj in result:
                if isinstance(listobj, Trigger):
                    new_triggers.append(listobj)
                elif isinstance(listobj, cocotb.decorators.RunningCoroutine):
                    if _debug:
                        self.log.debug("Scheduling coroutine in list: %s" %
                                       listobj.__name__)
                    if not listobj.has_started():
                        self.queue(listobj)
                    new_trigger = listobj.join()
                    new_triggers.append(new_trigger)
                else:
                    # If we encounter something not a coroutine or trigger,
                    # set the success flag to False and break out of the loop.
                    yield_successful = False
                    break

            # Make sure the lists are the same size. If they are not, it means
            # it contained something not a trigger/coroutine, so do nothing.
            if len(new_triggers) == len(result):
                self._coroutine_yielded(coroutine, new_triggers)
                yield_successful = True

        # If we didn't successfully yield anything, thrown an error.
        # Do it this way to make the logic in the list case simpler.
        if not yield_successful:
            msg = ("Coroutine %s yielded something the scheduler can't handle"
                   % str(coroutine))
            msg += ("\nGot type: %s repr: %s str: %s" %
                    (type(result), repr(result), str(result)))
            msg += "\nDid you forget to decorate with @cocotb.coroutine?"
            try:
                raise_error(self, msg)
            except Exception as e:
                self.finish_test(e)

        # We do not return from here until pending threads have completed, but only
        # from the main thread, this seems like it could be problematic in cases
        # where a sim might change what this thread is.
        def unblock_event(ext):
            @cocotb.coroutine
            def wrapper():
                ext.event.set()
                yield PythonTrigger()

        if self._main_thread is threading.current_thread():

            for ext in self._pending_threads:
                ext.thread_start()
                if _debug:
                    self.log.debug("Blocking from %s on %s" % (threading.current_thread(), ext.thread))
                state = ext.thread_wait()
                if _debug:
                    self.log.debug("Back from wait on self %s with newstate %d" % (threading.current_thread(), state))
                if state == external_state.EXITED:
                    self._pending_threads.remove(ext)
                    self._pending_events.append(ext.event)

        # Handle any newly queued coroutines that need to be scheduled
        while self._pending_coros:
            self.add(self._pending_coros.pop(0))

        while self._pending_callbacks:
            self._pending_callbacks.pop(0)()


    def finish_test(self, test_result):
        """Cache the test result and set the terminate flag."""
        self.log.debug("finish_test called with %s" % (repr(test_result)))
        if not self._terminate:
            self._terminate = True
            self._test_result = test_result
            self.cleanup()

    def finish_scheduler(self, test_result):
        """Directly call into the regression manager and end test
           once we return the sim will close us so no cleanup is needed.
        """
        self.log.debug("Issue sim closedown result to regression object")
        cocotb.regression_manager.handle_result(test_result)

    def cleanup(self):
        """Clear up all our state.

        Unprime all pending triggers and kill off any coroutines stop all externals.
        """
        for trigger, waiting in dict(self._trigger2coros).items():
            for coro in waiting:
                if _debug:
                    self.log.debug("Killing %s" % str(coro))
                coro.kill()

        if self._main_thread is not threading.current_thread():
            raise Exception("Cleanup() called outside of the main thread")

        for ext in self._pending_threads:
            self.log.warn("Waiting for %s to exit", ext.thread)
def C_load_second_test(dut):
    """
    Resets data and tries again
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())
    
    filename = '../test_data/wpa2-psk-linksys.hccap'
    
    obj = wpa2slow.Handshake()
    
    obj.load(filename)
    ssid = obj.ssid
    mac1 = obj.mac1
    mac2 = obj.mac2
    nonce1 = obj.nonce1
    nonce2 = obj.nonce2
    eapol = obj.eapol
    eapol_size = obj.eapol_size
    keymic = obj.keymic
    
    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
    
    yield load_file(dut, filename)
    
    #This clock isn't necessary while pipelining
    yield RisingEdge(dut.clk_i)
    
    #yield wait_process(dut)
    
    ssid_test1 = dut.test_ssid_1
    ssid_test2 = dut.test_ssid_2
    ssid_test3 = dut.test_ssid_3
    
    if ord(ssid[0]) != int(str(ssid_test1), 2):
        raise TestFailure("ssid_test1 differs from mock")
    elif ord(ssid[3]) != int(str(ssid_test2), 2):
        raise TestFailure("ssid_test2 differs from mock")
    elif ord(ssid[6]) != int(str(ssid_test3), 2):
        raise TestFailure("ssid_test3 differs from mock")
    elif ord(ssid[6]) == int(str(ssid_test1), 2):    #Todo: remove false positive if 1st and 7th chars equal
        raise TestFailure("SSID comparisons failing.")
    else:
        log.info("SSID Ok!")
    mic_test1 = dut.test_keymic_1
    mic_test2 = dut.test_keymic_2
    mic_test3 = dut.test_keymic_3
        
    if ord(keymic[0]) != int(str(mic_test1), 2):
        raise TestFailure("mic_test1 differs from mock")
    elif ord(keymic[14]) != int(str(mic_test2), 2):
        raise TestFailure("mic_test2 differs from mock")
    elif ord(keymic[15]) != int(str(mic_test3), 2):
        raise TestFailure("mic_test3 differs from mock")
    elif ord(keymic[5]) == int(str(mic_test1), 2):    #Todo: remove false positive
        raise TestFailure("MIC comparisons failing.")
    else:
        log.info("MIC Ok!")
Exemple #22
0
class TB(object):
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        # PCIe
        self.rc = RootComplex()

        self.rc.max_payload_size = 0x1  # 256 bytes
        self.rc.max_read_request_size = 0x2  # 512 bytes

        self.dev = UltraScalePcieDevice(
            # configuration options
            pcie_generation=3,
            pcie_link_width=8,
            user_clk_frequency=250e6,
            alignment="dword",
            straddle=False,
            enable_pf1=False,
            enable_client_tag=True,
            enable_extended_tag=True,
            enable_parity=False,
            enable_rx_msg_interface=False,
            enable_sriov=False,
            enable_extended_configuration=False,
            enable_pf0_msi=True,
            enable_pf1_msi=False,

            # signals
            # Clock and Reset Interface
            user_clk=dut.clk_250mhz,
            user_reset=dut.rst_250mhz,
            # user_lnk_up
            # sys_clk
            # sys_clk_gt
            # sys_reset
            # phy_rdy_out

            # Requester reQuest Interface
            rq_bus=AxiStreamBus.from_prefix(dut, "m_axis_rq"),
            pcie_rq_seq_num=dut.s_axis_rq_seq_num,
            pcie_rq_seq_num_vld=dut.s_axis_rq_seq_num_valid,
            # pcie_rq_tag
            # pcie_rq_tag_av
            # pcie_rq_tag_vld

            # Requester Completion Interface
            rc_bus=AxiStreamBus.from_prefix(dut, "s_axis_rc"),

            # Completer reQuest Interface
            cq_bus=AxiStreamBus.from_prefix(dut, "s_axis_cq"),
            # pcie_cq_np_req
            # pcie_cq_np_req_count

            # Completer Completion Interface
            cc_bus=AxiStreamBus.from_prefix(dut, "m_axis_cc"),

            # Transmit Flow Control Interface
            # pcie_tfc_nph_av=dut.pcie_tfc_nph_av,
            # pcie_tfc_npd_av=dut.pcie_tfc_npd_av,

            # Configuration Management Interface
            cfg_mgmt_addr=dut.cfg_mgmt_addr,
            cfg_mgmt_write=dut.cfg_mgmt_write,
            cfg_mgmt_write_data=dut.cfg_mgmt_write_data,
            cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable,
            cfg_mgmt_read=dut.cfg_mgmt_read,
            cfg_mgmt_read_data=dut.cfg_mgmt_read_data,
            cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done,
            # cfg_mgmt_debug_access

            # Configuration Status Interface
            # cfg_phy_link_down
            # cfg_phy_link_status
            # cfg_negotiated_width
            # cfg_current_speed
            cfg_max_payload=dut.cfg_max_payload,
            cfg_max_read_req=dut.cfg_max_read_req,
            # cfg_function_status
            # cfg_vf_status
            # cfg_function_power_state
            # cfg_vf_power_state
            # cfg_link_power_state
            # cfg_err_cor_out
            # cfg_err_nonfatal_out
            # cfg_err_fatal_out
            # cfg_local_error_out
            # cfg_local_error_valid
            # cfg_rx_pm_state
            # cfg_tx_pm_state
            # cfg_ltssm_state
            # cfg_rcb_status
            # cfg_obff_enable
            # cfg_pl_status_change
            # cfg_tph_requester_enable
            # cfg_tph_st_mode
            # cfg_vf_tph_requester_enable
            # cfg_vf_tph_st_mode

            # Configuration Received Message Interface
            # cfg_msg_received
            # cfg_msg_received_data
            # cfg_msg_received_type

            # Configuration Transmit Message Interface
            # cfg_msg_transmit
            # cfg_msg_transmit_type
            # cfg_msg_transmit_data
            # cfg_msg_transmit_done

            # Configuration Flow Control Interface
            cfg_fc_ph=dut.cfg_fc_ph,
            cfg_fc_pd=dut.cfg_fc_pd,
            cfg_fc_nph=dut.cfg_fc_nph,
            cfg_fc_npd=dut.cfg_fc_npd,
            cfg_fc_cplh=dut.cfg_fc_cplh,
            cfg_fc_cpld=dut.cfg_fc_cpld,
            cfg_fc_sel=dut.cfg_fc_sel,

            # Configuration Control Interface
            # cfg_hot_reset_in
            # cfg_hot_reset_out
            # cfg_config_space_enable
            # cfg_dsn
            # cfg_bus_number
            # cfg_ds_port_number
            # cfg_ds_bus_number
            # cfg_ds_device_number
            # cfg_ds_function_number
            # cfg_power_state_change_ack
            # cfg_power_state_change_interrupt
            cfg_err_cor_in=dut.status_error_cor,
            cfg_err_uncor_in=dut.status_error_uncor,
            # cfg_flr_in_process
            # cfg_flr_done
            # cfg_vf_flr_in_process
            # cfg_vf_flr_func_num
            # cfg_vf_flr_done
            # cfg_pm_aspm_l1_entry_reject
            # cfg_pm_aspm_tx_l0s_entry_disable
            # cfg_req_pm_transition_l23_ready
            # cfg_link_training_enable

            # Configuration Interrupt Controller Interface
            # cfg_interrupt_int
            # cfg_interrupt_sent
            # cfg_interrupt_pending
            cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable,
            cfg_interrupt_msi_vf_enable=dut.cfg_interrupt_msi_vf_enable,
            cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable,
            cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update,
            cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data,
            cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select,
            cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int,
            cfg_interrupt_msi_pending_status=dut.
            cfg_interrupt_msi_pending_status,
            cfg_interrupt_msi_pending_status_data_enable=dut.
            cfg_interrupt_msi_pending_status_data_enable,
            cfg_interrupt_msi_pending_status_function_num=dut.
            cfg_interrupt_msi_pending_status_function_num,
            cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent,
            cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail,
            # cfg_interrupt_msix_enable
            # cfg_interrupt_msix_mask
            # cfg_interrupt_msix_vf_enable
            # cfg_interrupt_msix_vf_mask
            # cfg_interrupt_msix_address
            # cfg_interrupt_msix_data
            # cfg_interrupt_msix_int
            # cfg_interrupt_msix_vec_pending
            # cfg_interrupt_msix_vec_pending_status
            cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr,
            cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present,
            cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type,
            # cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag,
            # cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number,

            # Configuration Extend Interface
            # cfg_ext_read_received
            # cfg_ext_write_received
            # cfg_ext_register_number
            # cfg_ext_function_number
            # cfg_ext_write_data
            # cfg_ext_write_byte_enable
            # cfg_ext_read_data
            # cfg_ext_read_data_valid
        )

        # self.dev.log.setLevel(logging.DEBUG)

        self.rc.make_port().connect(self.dev)

        self.driver = mqnic.Driver()

        self.dev.functions[0].msi_cap.msi_multiple_message_capable = 5

        self.dev.functions[0].configure_bar(
            0,
            2**len(dut.core_inst.core_pcie_inst.axil_ctrl_araddr),
            ext=True,
            prefetch=True)
        if hasattr(dut.core_inst.core_pcie_inst, 'pcie_app_ctrl'):
            self.dev.functions[0].configure_bar(
                2,
                2**len(dut.core_inst.core_pcie_inst.axil_app_ctrl_araddr),
                ext=True,
                prefetch=True)

        # Ethernet
        cocotb.start_soon(Clock(dut.sfp_1_rx_clk, 6.4, units="ns").start())
        self.sfp_1_source = XgmiiSource(dut.sfp_1_rxd, dut.sfp_1_rxc,
                                        dut.sfp_1_rx_clk, dut.sfp_1_rx_rst)
        cocotb.start_soon(Clock(dut.sfp_1_tx_clk, 6.4, units="ns").start())
        self.sfp_1_sink = XgmiiSink(dut.sfp_1_txd, dut.sfp_1_txc,
                                    dut.sfp_1_tx_clk, dut.sfp_1_tx_rst)

        cocotb.start_soon(Clock(dut.sfp_2_rx_clk, 6.4, units="ns").start())
        self.sfp_2_source = XgmiiSource(dut.sfp_2_rxd, dut.sfp_2_rxc,
                                        dut.sfp_2_rx_clk, dut.sfp_2_rx_rst)
        cocotb.start_soon(Clock(dut.sfp_2_tx_clk, 6.4, units="ns").start())
        self.sfp_2_sink = XgmiiSink(dut.sfp_2_txd, dut.sfp_2_txc,
                                    dut.sfp_2_tx_clk, dut.sfp_2_tx_rst)

        cocotb.start_soon(Clock(dut.sfp_3_rx_clk, 6.4, units="ns").start())
        self.sfp_3_source = XgmiiSource(dut.sfp_3_rxd, dut.sfp_3_rxc,
                                        dut.sfp_3_rx_clk, dut.sfp_3_rx_rst)
        cocotb.start_soon(Clock(dut.sfp_3_tx_clk, 6.4, units="ns").start())
        self.sfp_3_sink = XgmiiSink(dut.sfp_3_txd, dut.sfp_3_txc,
                                    dut.sfp_3_tx_clk, dut.sfp_3_tx_rst)

        cocotb.start_soon(Clock(dut.sfp_4_rx_clk, 6.4, units="ns").start())
        self.sfp_4_source = XgmiiSource(dut.sfp_4_rxd, dut.sfp_4_rxc,
                                        dut.sfp_4_rx_clk, dut.sfp_4_rx_rst)
        cocotb.start_soon(Clock(dut.sfp_4_tx_clk, 6.4, units="ns").start())
        self.sfp_4_sink = XgmiiSink(dut.sfp_4_txd, dut.sfp_4_txc,
                                    dut.sfp_4_tx_clk, dut.sfp_4_tx_rst)

        dut.btn.setimmediatevalue(0)

        dut.i2c_scl_i.setimmediatevalue(1)
        dut.i2c_sda_i.setimmediatevalue(1)

        self.loopback_enable = False
        cocotb.start_soon(self._run_loopback())

    async def init(self):

        self.dut.sfp_1_rx_rst.setimmediatevalue(0)
        self.dut.sfp_1_tx_rst.setimmediatevalue(0)
        self.dut.sfp_2_rx_rst.setimmediatevalue(0)
        self.dut.sfp_2_tx_rst.setimmediatevalue(0)
        self.dut.sfp_3_rx_rst.setimmediatevalue(0)
        self.dut.sfp_3_tx_rst.setimmediatevalue(0)
        self.dut.sfp_4_rx_rst.setimmediatevalue(0)
        self.dut.sfp_4_tx_rst.setimmediatevalue(0)

        await RisingEdge(self.dut.clk_250mhz)
        await RisingEdge(self.dut.clk_250mhz)

        self.dut.sfp_1_rx_rst.setimmediatevalue(1)
        self.dut.sfp_1_tx_rst.setimmediatevalue(1)
        self.dut.sfp_2_rx_rst.setimmediatevalue(1)
        self.dut.sfp_2_tx_rst.setimmediatevalue(1)
        self.dut.sfp_3_rx_rst.setimmediatevalue(1)
        self.dut.sfp_3_tx_rst.setimmediatevalue(1)
        self.dut.sfp_4_rx_rst.setimmediatevalue(1)
        self.dut.sfp_4_tx_rst.setimmediatevalue(1)

        await FallingEdge(self.dut.rst_250mhz)
        await Timer(100, 'ns')

        await RisingEdge(self.dut.clk_250mhz)
        await RisingEdge(self.dut.clk_250mhz)

        self.dut.sfp_1_rx_rst.setimmediatevalue(0)
        self.dut.sfp_1_tx_rst.setimmediatevalue(0)
        self.dut.sfp_2_rx_rst.setimmediatevalue(0)
        self.dut.sfp_2_tx_rst.setimmediatevalue(0)
        self.dut.sfp_3_rx_rst.setimmediatevalue(0)
        self.dut.sfp_3_tx_rst.setimmediatevalue(0)
        self.dut.sfp_4_rx_rst.setimmediatevalue(0)
        self.dut.sfp_4_tx_rst.setimmediatevalue(0)

        await self.rc.enumerate(enable_bus_mastering=True, configure_msi=True)

    async def _run_loopback(self):
        while True:
            await RisingEdge(self.dut.clk_250mhz)

            if self.loopback_enable:
                if not self.sfp_1_sink.empty():
                    await self.sfp_1_source.send(await self.sfp_1_sink.recv())
                if not self.sfp_2_sink.empty():
                    await self.sfp_2_source.send(await self.sfp_2_sink.recv())
                if not self.sfp_3_sink.empty():
                    await self.sfp_3_source.send(await self.sfp_3_sink.recv())
                if not self.sfp_4_sink.empty():
                    await self.sfp_4_source.send(await self.sfp_4_sink.recv())
Exemple #23
0
class Scheduler(object):
    """
    The main scheduler.

    Here we accept callbacks from the simulator and schedule the appropriate
    coroutines.

    A callback fires, causing the `react`_ method to be called, with the
    trigger that caused the callback as the first argument.

    We look up a list of coroutines to schedule (indexed by the trigger) and
    schedule them in turn. NB implementors should not depend on the scheduling
    order!

    Some additional management is required since coroutines can return a list
    of triggers, to be scheduled when any one of the triggers fires.  To
    ensure we don't receive spurious callbacks, we have to un-prime all the
    other triggers when any one fires.

    Due to the simulator nuances and fun with delta delays we have the
    following modes:

    Normal mode
        - Callbacks cause coroutines to be scheduled
        - Any pending writes are cached and do not happen immediately

    ReadOnly mode
        - Corresponds to cbReadOnlySynch (VPI) or vhpiCbLastKnownDeltaCycle
          (VHPI).  In this state we are not allowed to perform writes.

    Write mode
        - Corresponds to cbReadWriteSynch (VPI) or vhpiCbEndOfProcesses (VHPI)
          In this mode we play back all the cached write updates.

    We can legally transition from normal->write by registering a ReadWrite
    callback, however usually once a simulator has entered the ReadOnly phase
    of a given timestep then we must move to a new timestep before performing
    any writes.  The mechanism for moving to a new timestep may not be
    consistent across simulators and therefore we provide an abstraction to
    assist with compatibility.


    Unless a coroutine has explicitly requested to be scheduled in ReadOnly
    mode (for example wanting to sample the finally settled value after all
    delta delays) then it can reasonably be expected to be scheduled during
    "normal mode" i.e. where writes are permitted.
    """

    _MODE_NORMAL   = 1  # noqa
    _MODE_READONLY = 2  # noqa
    _MODE_WRITE    = 3  # noqa
    _MODE_TERM     = 4  # noqa

    # Singleton events, recycled to avoid spurious object creation
    _readonly = ReadOnly()
    _next_timestep = _NextTimeStep()
    _readwrite = _ReadWrite()
    _timer1 = Timer(1)
    _timer0 = Timer(0)

    def __init__(self):

        self.log = SimLog("cocotb.scheduler")
        if _debug:
            self.log.setLevel(logging.DEBUG)

        # A dictionary of pending coroutines for each trigger,
        # indexed by trigger
        self._trigger2coros = collections.defaultdict(list)

        # A dictionary of pending triggers for each coroutine, indexed by coro
        self._coro2triggers = collections.defaultdict(list)

        # Our main state
        self._mode = Scheduler._MODE_NORMAL

        # A dictionary of pending writes
        self._writes = {}

        self._pending_coros = []
        self._pending_callbacks = []
        self._pending_triggers = []

        self._terminate = False
        self._test_result = None
        self._entrypoint = None

        # Select the appropriate scheduling algorithm for this simulator
        self.advance = self.default_scheduling_algorithm

    def default_scheduling_algorithm(self):
        """
        Decide whether we need to schedule our own triggers (if at all) in
        order to progress to the next mode.

        This algorithm has been tested against the following simulators:
            Icarus Verilog
        """
        if not self._terminate and self._writes:

            if self._mode == Scheduler._MODE_NORMAL:
                if not self._readwrite.primed:
                    self._readwrite.prime(self.react)
            elif not self._next_timestep.primed:
                self._next_timestep.prime(self.react)

        elif self._terminate:
            if _debug:
                self.log.debug("Test terminating, scheduling Timer")

            for t in self._trigger2coros:
                t.unprime()

            for t in [self._readwrite, self._readonly, self._next_timestep,
                      self._timer1, self._timer0]:
                if t.primed:
                    t.unprime()

            self._timer1.prime(self.begin_test)
            self._trigger2coros = collections.defaultdict(list)
            self._coro2triggers = collections.defaultdict(list)
            self._terminate = False
            self._mode = Scheduler._MODE_TERM

    def begin_test(self, trigger=None):
        """
        Called to initiate a test.

        Could be called on start-up or from a callback
        """
        if _debug:
            self.log.debug("begin_test called with trigger: %s" %
                           (str(trigger)))
        if _profiling:
            ps = pstats.Stats(_profile).sort_stats('cumulative')
            ps.dump_stats("test_profile.pstat")
            _profile.enable()

        self._mode = Scheduler._MODE_NORMAL
        if trigger is not None:
            trigger.unprime()

        # Issue previous test result, if there is one
        if self._test_result is not None:
            if _debug:
                self.log.debug("Issue test result to regresssion object")
            cocotb.regression.handle_result(self._test_result)
            self._test_result = None
        if self._entrypoint is not None:
            test = self._entrypoint
            self._entrypoint = None
            self.schedule(test)
            self.advance()

        if _profiling:
            _profile.disable()

    def react(self, trigger, depth=0):
        """
        React called when a trigger fires.

        We find any coroutines that are waiting on the particular trigger and
        schedule them.
        """
        if _profiling and not depth:
            _profile.enable()

        # When a trigger fires it is unprimed internally
        if _debug:
            self.log.debug("Trigger fired: %s" % str(trigger))
        # trigger.unprime()

        if self._mode == Scheduler._MODE_TERM:
            if _debug:
                self.log.debug("Ignoring trigger %s since we're terminating" %
                               str(trigger))
            return

        if trigger is self._readonly:
            self._mode = Scheduler._MODE_READONLY
        # Only GPI triggers affect the simulator scheduling mode
        elif isinstance(trigger, GPITrigger):
            self._mode = Scheduler._MODE_NORMAL

        # We're the only source of ReadWrite triggers which are only used for
        # playing back any cached signal updates
        if trigger is self._readwrite:

            if _debug:
                self.log.debug("Writing cached signal updates")

            while self._writes:
                handle, value = self._writes.popitem()
                handle.setimmediatevalue(value)

            self._readwrite.unprime()

            if _profiling:
                _profile.disable()
            return

        # Similarly if we've scheduled our next_timestep on way to readwrite
        if trigger is self._next_timestep:

            if not self._writes:
                self.log.error(
                    "Moved to next timestep without any pending writes!")
            else:
                self.log.debug(
                    "Priming ReadWrite trigger so we can playback writes")
                self._readwrite.prime(self.react)

            if _profiling:
                _profile.disable()
            return

        if trigger not in self._trigger2coros:

            # GPI triggers should only be ever pending if there is an
            # associated coroutine waiting on that trigger, otherwise it would
            # have been unprimed already
            if isinstance(trigger, GPITrigger):
                self.log.critical(
                    "No coroutines waiting on trigger that fired: %s" %
                    str(trigger))

                trigger.log.info("I'm the culprit")
            # For Python triggers this isn't actually an error - we might do
            # event.set() without knowing whether any coroutines are actually
            # waiting on this event, for example
            elif _debug:
                self.log.debug(
                    "No coroutines waiting on trigger that fired: %s" %
                    str(trigger))

            if _profiling:
                _profile.disable()
            return

        # Scheduled coroutines may append to our waiting list so the first
        # thing to do is pop all entries waiting on this trigger.
        scheduling = self._trigger2coros.pop(trigger)

        if _debug:
            debugstr = "\n\t".join([coro.__name__ for coro in scheduling])
            if len(scheduling):
                debugstr = "\n\t" + debugstr
            self.log.debug("%d pending coroutines for event %s%s" %
                           (len(scheduling), str(trigger), debugstr))

        # If the coroutine was waiting on multiple triggers we may be able
        # to unprime the other triggers that didn't fire
        for coro in scheduling:
            for pending in self._coro2triggers[coro]:
                for others in self._trigger2coros[pending]:
                    if others not in scheduling:
                        break
                else:
                    # if pending is not trigger and pending.primed:
                    #     pending.unprime()
                    if pending.primed:
                        pending.unprime()
                    del self._trigger2coros[pending]

        for coro in scheduling:
            self.schedule(coro, trigger=trigger)
            if _debug:
                self.log.debug("Scheduled coroutine %s" % (coro.__name__))

        while self._pending_triggers:
            if _debug:
                self.log.debug("Scheduling pending trigger %s" %
                               (str(self._pending_triggers[0])))
            self.react(self._pending_triggers.pop(0), depth=depth + 1)

        # We only advance for GPI triggers
        if not depth and isinstance(trigger, GPITrigger):
            self.advance()

            if _debug:
                self.log.debug("All coroutines scheduled, handing control back"
                               " to simulator")

            if _profiling:
                _profile.disable()
        return

    def unschedule(self, coro):
        """Unschedule a coroutine.  Unprime any pending triggers"""

        for trigger in self._coro2triggers[coro]:
            if coro in self._trigger2coros[trigger]:
                self._trigger2coros[trigger].remove(coro)
            if not self._trigger2coros[trigger]:
                trigger.unprime()
        del self._coro2triggers[coro]

        if coro._join in self._trigger2coros:
            self._pending_triggers.append(coro._join)

        # Remove references to allow GC to clean up
        del coro._join

    def save_write(self, handle, value):
        self._writes[handle] = value

    def _coroutine_yielded(self, coro, triggers):
        """
        Prime the triggers and update our internal mappings
        """
        self._coro2triggers[coro] = triggers

        for trigger in triggers:

            self._trigger2coros[trigger].append(coro)
            if not trigger.primed:
                try:
                    trigger.prime(self.react)
                except Exception as e:
                    # Convert any exceptions into a test result
                    self.finish_test(
                        create_error(self, "Unable to prime trigger %s: %s" %
                                     (str(trigger), str(e))))

    def queue(self, coroutine):
        """Queue a coroutine for execution"""
        self._pending_coros.append(coroutine)

    def add(self, coroutine):
        """
        Add a new coroutine.

        Just a wrapper around self.schedule which provides some debug and
        useful error mesages in the event of common gotchas
        """
        if isinstance(coroutine, cocotb.decorators.coroutine):
            self.log.critical(
                "Attempt to schedule a coroutine that hasn't started")
            coroutine.log.error("This is the failing coroutine")
            self.log.warning(
                "Did you forget to add parentheses to the @test decorator?")
            self._test_result = TestError(
                "Attempt to schedule a coroutine that hasn't started")
            self._terminate = True
            return

        elif not isinstance(coroutine, cocotb.decorators.RunningCoroutine):
            self.log.critical(
                "Attempt to add something to the scheduler which isn't a "
                "coroutine")
            self.log.warning(
                "Got: %s (%s)" % (str(type(coroutine)), repr(coroutine)))
            self.log.warning("Did you use the @coroutine decorator?")
            self._test_result = TestError(
                "Attempt to schedule a coroutine that hasn't started")
            self._terminate = True
            return

        if _debug:
            self.log.debug("Adding new coroutine %s" % coroutine.__name__)

        self.schedule(coroutine)
        self.advance()
        return coroutine

    def new_test(self, coroutine):
        self._entrypoint = coroutine

    def schedule(self, coroutine, trigger=None):
        """
        Schedule a coroutine by calling the send method

        Args:
            coroutine (cocotb.decorators.coroutine): The coroutine to schedule

            trigger (cocotb.triggers.Trigger): The trigger that caused this
                                                coroutine to be scheduled
        """
        if hasattr(trigger, "pass_retval"):
            sendval = trigger.retval
            if _debug:
                coroutine.log.debug("Scheduling with ReturnValue(%s)" %
                                    (repr(sendval)))
        else:
            sendval = trigger
            if _debug:
                coroutine.log.debug("Scheduling with %s" % str(trigger))

        try:
            result = coroutine.send(sendval)
            if _debug:
                self.log.debug("Coroutine %s yielded %s (mode %d)" %
                               (coroutine.__name__, str(result), self._mode))

        # TestComplete indication is game over, tidy up
        except TestComplete as test_result:
            # Tag that close down is needed, save the test_result
            # for later use in cleanup handler
            self.log.debug("TestComplete received: %s" % test_result.__class__.__name__)
            self.finish_test(test_result)
            return

        # Normal co-routine completion
        except cocotb.decorators.CoroutineComplete as exc:
            if _debug:
                self.log.debug("Coroutine completed: %s" % str(coroutine))
            self.unschedule(coroutine)
            return

        # Don't handle the result if we're shutting down
        if self._terminate:
            return

        # Queue current routine to schedule when the nested routine exits
        if isinstance(result, cocotb.decorators.RunningCoroutine):
            if _debug:
                self.log.debug("Scheduling nested co-routine: %s" %
                               result.__name__)

            self.queue(result)
            new_trigger = result.join()
            self._coroutine_yielded(coroutine, [new_trigger])

        elif isinstance(result, Trigger):
            self._coroutine_yielded(coroutine, [result])

        elif (isinstance(result, list) and
                not [t for t in result if not isinstance(t, Trigger)]):
            self._coroutine_yielded(coroutine, result)

        else:
            msg = ("Coroutine %s yielded something the scheduler can't handle"
                   % str(coroutine))
            msg += ("\nGot type: %s repr: %s str: %s" %
                    (type(result), repr(result), str(result)))
            msg += "\nDid you forget to decorate with @cocotb.coroutine?"
            try:
                raise_error(self, msg)
            except Exception as e:
                self.finish_test(e)

        # Handle any newly queued coroutines that need to be scheduled
        while self._pending_coros:
            self.add(self._pending_coros.pop(0))

        while self._pending_callbacks:
            self._pending_callbacks.pop(0)()

    def finish_test(self, test_result):
        """Cache the test result and set the terminate flag"""
        self.log.debug("finish_test called with %s" % (repr(test_result)))
        if not self._terminate:
            self._terminate = True
            self._test_result = test_result
            self.cleanup()

    def finish_scheduler(self, test_result):
        """Directly call into the regression manager and end test
           once we return the sim will close us so no cleanup is needed"""
        self.log.debug("Issue sim closedown result to regresssion object")
        cocotb.regression.handle_result(test_result)

    def cleanup(self):
        """
        Clear up all our state

        Unprime all pending triggers and kill off any coroutines
        """
        for trigger, waiting in self._trigger2coros.items():
            for coro in waiting:
                if _debug:
                    self.log.debug("Killing %s" % str(coro))
                coro.kill()
Exemple #24
0
class TB(object):
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        # PCIe
        self.rc = RootComplex()

        self.dev = S10PcieDevice(
            # configuration options
            pcie_generation=3,
            # pcie_link_width=2,
            # pld_clk_frequency=250e6,
            l_tile=False,
            pf_count=1,
            max_payload_size=1024,
            enable_extended_tag=True,
            pf0_msi_enable=True,
            pf0_msi_count=32,
            pf1_msi_enable=False,
            pf1_msi_count=1,
            pf2_msi_enable=False,
            pf2_msi_count=1,
            pf3_msi_enable=False,
            pf3_msi_count=1,
            pf0_msix_enable=False,
            pf0_msix_table_size=0,
            pf0_msix_table_bir=0,
            pf0_msix_table_offset=0x00000000,
            pf0_msix_pba_bir=0,
            pf0_msix_pba_offset=0x00000000,
            pf1_msix_enable=False,
            pf1_msix_table_size=0,
            pf1_msix_table_bir=0,
            pf1_msix_table_offset=0x00000000,
            pf1_msix_pba_bir=0,
            pf1_msix_pba_offset=0x00000000,
            pf2_msix_enable=False,
            pf2_msix_table_size=0,
            pf2_msix_table_bir=0,
            pf2_msix_table_offset=0x00000000,
            pf2_msix_pba_bir=0,
            pf2_msix_pba_offset=0x00000000,
            pf3_msix_enable=False,
            pf3_msix_table_size=0,
            pf3_msix_table_bir=0,
            pf3_msix_table_offset=0x00000000,
            pf3_msix_pba_bir=0,
            pf3_msix_pba_offset=0x00000000,

            # signals
            # Clock and reset
            # npor=dut.npor,
            # pin_perst=dut.pin_perst,
            # ninit_done=dut.ninit_done,
            # pld_clk_inuse=dut.pld_clk_inuse,
            # pld_core_ready=dut.pld_core_ready,
            reset_status=dut.rst,
            # clr_st=dut.clr_st,
            # refclk=dut.refclk,
            coreclkout_hip=dut.clk,

            # RX interface
            rx_bus=S10RxBus.from_prefix(dut, "rx_st"),

            # TX interface
            tx_bus=S10TxBus.from_prefix(dut, "tx_st"),

            # TX flow control
            tx_ph_cdts=dut.tx_ph_cdts,
            tx_pd_cdts=dut.tx_pd_cdts,
            tx_nph_cdts=dut.tx_nph_cdts,
            tx_npd_cdts=dut.tx_npd_cdts,
            tx_cplh_cdts=dut.tx_cplh_cdts,
            tx_cpld_cdts=dut.tx_cpld_cdts,
            tx_hdr_cdts_consumed=dut.tx_hdr_cdts_consumed,
            tx_data_cdts_consumed=dut.tx_data_cdts_consumed,
            tx_cdts_type=dut.tx_cdts_type,
            tx_cdts_data_value=dut.tx_cdts_data_value,

            # Hard IP status
            # int_status=dut.int_status,
            # int_status_common=dut.int_status_common,
            # derr_cor_ext_rpl=dut.derr_cor_ext_rpl,
            # derr_rpl=dut.derr_rpl,
            # derr_cor_ext_rcv=dut.derr_cor_ext_rcv,
            # derr_uncor_ext_rcv=dut.derr_uncor_ext_rcv,
            # rx_par_err=dut.rx_par_err,
            # tx_par_err=dut.tx_par_err,
            # ltssmstate=dut.ltssmstate,
            # link_up=dut.link_up,
            # lane_act=dut.lane_act,
            # currentspeed=dut.currentspeed,

            # Power management
            # pm_linkst_in_l1=dut.pm_linkst_in_l1,
            # pm_linkst_in_l0s=dut.pm_linkst_in_l0s,
            # pm_state=dut.pm_state,
            # pm_dstate=dut.pm_dstate,
            # apps_pm_xmt_pme=dut.apps_pm_xmt_pme,
            # apps_ready_entr_l23=dut.apps_ready_entr_l23,
            # apps_pm_xmt_turnoff=dut.apps_pm_xmt_turnoff,
            # app_init_rst=dut.app_init_rst,
            # app_xfer_pending=dut.app_xfer_pending,

            # Interrupt interface
            app_msi_req=dut.app_msi_req,
            app_msi_ack=dut.app_msi_ack,
            app_msi_tc=dut.app_msi_tc,
            app_msi_num=dut.app_msi_num,
            app_msi_func_num=dut.app_msi_func_num,
            # app_int_sts=dut.app_int_sts,

            # Error interface
            # serr_out=dut.serr_out,
            # hip_enter_err_mode=dut.hip_enter_err_mode,
            # app_err_valid=dut.app_err_valid,
            # app_err_hdr=dut.app_err_hdr,
            # app_err_info=dut.app_err_info,
            # app_err_func_num=dut.app_err_func_num,

            # Configuration output
            tl_cfg_func=dut.tl_cfg_func,
            tl_cfg_add=dut.tl_cfg_add,
            tl_cfg_ctl=dut.tl_cfg_ctl,

            # Configuration extension bus
            # ceb_req=dut.ceb_req,
            # ceb_ack=dut.ceb_ack,
            # ceb_addr=dut.ceb_addr,
            # ceb_din=dut.ceb_din,
            # ceb_dout=dut.ceb_dout,
            # ceb_wr=dut.ceb_wr,
            # ceb_cdm_convert_data=dut.ceb_cdm_convert_data,
            # ceb_func_num=dut.ceb_func_num,
            # ceb_vf_num=dut.ceb_vf_num,
            # ceb_vf_active=dut.ceb_vf_active,

            # Hard IP reconfiguration interface
            # hip_reconfig_clk=dut.hip_reconfig_clk,
            # hip_reconfig_address=dut.hip_reconfig_address,
            # hip_reconfig_read=dut.hip_reconfig_read,
            # hip_reconfig_readdata=dut.hip_reconfig_readdata,
            # hip_reconfig_readdatavalid=dut.hip_reconfig_readdatavalid,
            # hip_reconfig_write=dut.hip_reconfig_write,
            # hip_reconfig_writedata=dut.hip_reconfig_writedata,
            # hip_reconfig_waitrequest=dut.hip_reconfig_waitrequest,
        )

        self.dev.log.setLevel(logging.DEBUG)

        self.rc.make_port().connect(self.dev)

        self.dev.functions[0].msi_multiple_message_capable = 5

        self.dev.functions[0].configure_bar(
            0, 2**len(dut.core_pcie_inst.axil_ctrl_awaddr))
        self.dev.functions[0].configure_bar(
            2, 2**len(dut.core_pcie_inst.axi_ram_awaddr))

    async def init(self):

        await FallingEdge(self.dut.rst)
        await Timer(100, 'ns')

        await self.rc.enumerate()
Exemple #25
0
class TB:
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        cocotb.fork(Clock(dut.clk, 2.56, units="ns").start())

        # Ethernet
        cocotb.fork(Clock(dut.qsfp_0_rx_clk_0, 2.56, units="ns").start())
        self.qsfp_0_0_source = XgmiiSource(dut.qsfp_0_rxd_0, dut.qsfp_0_rxc_0, dut.qsfp_0_rx_clk_0, dut.qsfp_0_rx_rst_0)
        cocotb.fork(Clock(dut.qsfp_0_tx_clk_0, 2.56, units="ns").start())
        self.qsfp_0_0_sink = XgmiiSink(dut.qsfp_0_txd_0, dut.qsfp_0_txc_0, dut.qsfp_0_tx_clk_0, dut.qsfp_0_tx_rst_0)

        cocotb.fork(Clock(dut.qsfp_0_rx_clk_1, 2.56, units="ns").start())
        self.qsfp_0_1_source = XgmiiSource(dut.qsfp_0_rxd_1, dut.qsfp_0_rxc_1, dut.qsfp_0_rx_clk_1, dut.qsfp_0_rx_rst_1)
        cocotb.fork(Clock(dut.qsfp_0_tx_clk_1, 2.56, units="ns").start())
        self.qsfp_0_1_sink = XgmiiSink(dut.qsfp_0_txd_1, dut.qsfp_0_txc_1, dut.qsfp_0_tx_clk_1, dut.qsfp_0_tx_rst_1)

        cocotb.fork(Clock(dut.qsfp_0_rx_clk_2, 2.56, units="ns").start())
        self.qsfp_0_2_source = XgmiiSource(dut.qsfp_0_rxd_2, dut.qsfp_0_rxc_2, dut.qsfp_0_rx_clk_2, dut.qsfp_0_rx_rst_2)
        cocotb.fork(Clock(dut.qsfp_0_tx_clk_2, 2.56, units="ns").start())
        self.qsfp_0_2_sink = XgmiiSink(dut.qsfp_0_txd_2, dut.qsfp_0_txc_2, dut.qsfp_0_tx_clk_2, dut.qsfp_0_tx_rst_2)

        cocotb.fork(Clock(dut.qsfp_0_rx_clk_3, 2.56, units="ns").start())
        self.qsfp_0_3_source = XgmiiSource(dut.qsfp_0_rxd_3, dut.qsfp_0_rxc_3, dut.qsfp_0_rx_clk_3, dut.qsfp_0_rx_rst_3)
        cocotb.fork(Clock(dut.qsfp_0_tx_clk_3, 2.56, units="ns").start())
        self.qsfp_0_3_sink = XgmiiSink(dut.qsfp_0_txd_3, dut.qsfp_0_txc_3, dut.qsfp_0_tx_clk_3, dut.qsfp_0_tx_rst_3)

        cocotb.fork(Clock(dut.qsfp_1_rx_clk_0, 2.56, units="ns").start())
        self.qsfp_1_0_source = XgmiiSource(dut.qsfp_1_rxd_0, dut.qsfp_1_rxc_0, dut.qsfp_1_rx_clk_0, dut.qsfp_1_rx_rst_0)
        cocotb.fork(Clock(dut.qsfp_1_tx_clk_0, 2.56, units="ns").start())
        self.qsfp_1_0_sink = XgmiiSink(dut.qsfp_1_txd_0, dut.qsfp_1_txc_0, dut.qsfp_1_tx_clk_0, dut.qsfp_1_tx_rst_0)

        cocotb.fork(Clock(dut.qsfp_1_rx_clk_1, 2.56, units="ns").start())
        self.qsfp_1_1_source = XgmiiSource(dut.qsfp_1_rxd_1, dut.qsfp_1_rxc_1, dut.qsfp_1_rx_clk_1, dut.qsfp_1_rx_rst_1)
        cocotb.fork(Clock(dut.qsfp_1_tx_clk_1, 2.56, units="ns").start())
        self.qsfp_1_1_sink = XgmiiSink(dut.qsfp_1_txd_1, dut.qsfp_1_txc_1, dut.qsfp_1_tx_clk_1, dut.qsfp_1_tx_rst_1)

        cocotb.fork(Clock(dut.qsfp_1_rx_clk_2, 2.56, units="ns").start())
        self.qsfp_1_2_source = XgmiiSource(dut.qsfp_1_rxd_2, dut.qsfp_1_rxc_2, dut.qsfp_1_rx_clk_2, dut.qsfp_1_rx_rst_2)
        cocotb.fork(Clock(dut.qsfp_1_tx_clk_2, 2.56, units="ns").start())
        self.qsfp_1_2_sink = XgmiiSink(dut.qsfp_1_txd_2, dut.qsfp_1_txc_2, dut.qsfp_1_tx_clk_2, dut.qsfp_1_tx_rst_2)

        cocotb.fork(Clock(dut.qsfp_1_rx_clk_3, 2.56, units="ns").start())
        self.qsfp_1_3_source = XgmiiSource(dut.qsfp_1_rxd_3, dut.qsfp_1_rxc_3, dut.qsfp_1_rx_clk_3, dut.qsfp_1_rx_rst_3)
        cocotb.fork(Clock(dut.qsfp_1_tx_clk_3, 2.56, units="ns").start())
        self.qsfp_1_3_sink = XgmiiSink(dut.qsfp_1_txd_3, dut.qsfp_1_txc_3, dut.qsfp_1_tx_clk_3, dut.qsfp_1_tx_rst_3)

        dut.user_sw.setimmediatevalue(0)

    async def init(self):

        self.dut.rst.setimmediatevalue(0)
        self.dut.qsfp_0_rx_rst_0.setimmediatevalue(0)
        self.dut.qsfp_0_tx_rst_0.setimmediatevalue(0)
        self.dut.qsfp_0_rx_rst_1.setimmediatevalue(0)
        self.dut.qsfp_0_tx_rst_1.setimmediatevalue(0)
        self.dut.qsfp_0_rx_rst_2.setimmediatevalue(0)
        self.dut.qsfp_0_tx_rst_2.setimmediatevalue(0)
        self.dut.qsfp_0_rx_rst_3.setimmediatevalue(0)
        self.dut.qsfp_0_tx_rst_3.setimmediatevalue(0)
        self.dut.qsfp_1_rx_rst_0.setimmediatevalue(0)
        self.dut.qsfp_1_tx_rst_0.setimmediatevalue(0)
        self.dut.qsfp_1_rx_rst_1.setimmediatevalue(0)
        self.dut.qsfp_1_tx_rst_1.setimmediatevalue(0)
        self.dut.qsfp_1_rx_rst_2.setimmediatevalue(0)
        self.dut.qsfp_1_tx_rst_2.setimmediatevalue(0)
        self.dut.qsfp_1_rx_rst_3.setimmediatevalue(0)
        self.dut.qsfp_1_tx_rst_3.setimmediatevalue(0)

        for k in range(10):
            await RisingEdge(self.dut.clk)

        self.dut.rst <= 1
        self.dut.qsfp_0_rx_rst_0 <= 1
        self.dut.qsfp_0_tx_rst_0 <= 1
        self.dut.qsfp_0_rx_rst_1 <= 1
        self.dut.qsfp_0_tx_rst_1 <= 1
        self.dut.qsfp_0_rx_rst_2 <= 1
        self.dut.qsfp_0_tx_rst_2 <= 1
        self.dut.qsfp_0_rx_rst_3 <= 1
        self.dut.qsfp_0_tx_rst_3 <= 1
        self.dut.qsfp_1_rx_rst_0 <= 1
        self.dut.qsfp_1_tx_rst_0 <= 1
        self.dut.qsfp_1_rx_rst_1 <= 1
        self.dut.qsfp_1_tx_rst_1 <= 1
        self.dut.qsfp_1_rx_rst_2 <= 1
        self.dut.qsfp_1_tx_rst_2 <= 1
        self.dut.qsfp_1_rx_rst_3 <= 1
        self.dut.qsfp_1_tx_rst_3 <= 1

        for k in range(10):
            await RisingEdge(self.dut.clk)

        self.dut.rst <= 0
        self.dut.qsfp_0_rx_rst_0 <= 0
        self.dut.qsfp_0_tx_rst_0 <= 0
        self.dut.qsfp_0_rx_rst_1 <= 0
        self.dut.qsfp_0_tx_rst_1 <= 0
        self.dut.qsfp_0_rx_rst_2 <= 0
        self.dut.qsfp_0_tx_rst_2 <= 0
        self.dut.qsfp_0_rx_rst_3 <= 0
        self.dut.qsfp_0_tx_rst_3 <= 0
        self.dut.qsfp_1_rx_rst_0 <= 0
        self.dut.qsfp_1_tx_rst_0 <= 0
        self.dut.qsfp_1_rx_rst_1 <= 0
        self.dut.qsfp_1_tx_rst_1 <= 0
        self.dut.qsfp_1_rx_rst_2 <= 0
        self.dut.qsfp_1_tx_rst_2 <= 0
        self.dut.qsfp_1_rx_rst_3 <= 0
        self.dut.qsfp_1_tx_rst_3 <= 0
Exemple #26
0
class TB:
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        cocotb.start_soon(Clock(dut.clk, 6.4, units="ns").start())

        # Ethernet
        cocotb.start_soon(Clock(dut.phy_gmii_clk, 8, units="ns").start())

        self.gmii_source = GmiiSource(dut.phy_gmii_rxd, dut.phy_gmii_rx_er,
                                      dut.phy_gmii_rx_dv, dut.phy_gmii_clk,
                                      dut.phy_gmii_rst, dut.phy_gmii_clk_en)
        self.gmii_sink = GmiiSink(dut.phy_gmii_txd, dut.phy_gmii_tx_er,
                                  dut.phy_gmii_tx_en, dut.phy_gmii_clk,
                                  dut.phy_gmii_rst, dut.phy_gmii_clk_en)

        dut.phy_gmii_clk_en.setimmediatevalue(1)

        cocotb.start_soon(Clock(dut.qsfp_rx_clk_1, 6.4, units="ns").start())
        self.qsfp_1_source = XgmiiSource(dut.qsfp_rxd_1, dut.qsfp_rxc_1,
                                         dut.qsfp_rx_clk_1, dut.qsfp_rx_rst_1)
        cocotb.start_soon(Clock(dut.qsfp_tx_clk_1, 6.4, units="ns").start())
        self.qsfp_1_sink = XgmiiSink(dut.qsfp_txd_1, dut.qsfp_txc_1,
                                     dut.qsfp_tx_clk_1, dut.qsfp_tx_rst_1)

        cocotb.start_soon(Clock(dut.qsfp_rx_clk_2, 6.4, units="ns").start())
        self.qsfp_2_source = XgmiiSource(dut.qsfp_rxd_2, dut.qsfp_rxc_2,
                                         dut.qsfp_rx_clk_2, dut.qsfp_rx_rst_2)
        cocotb.start_soon(Clock(dut.qsfp_tx_clk_2, 6.4, units="ns").start())
        self.qsfp_2_sink = XgmiiSink(dut.qsfp_txd_2, dut.qsfp_txc_2,
                                     dut.qsfp_tx_clk_2, dut.qsfp_tx_rst_2)

        cocotb.start_soon(Clock(dut.qsfp_rx_clk_3, 6.4, units="ns").start())
        self.qsfp_3_source = XgmiiSource(dut.qsfp_rxd_3, dut.qsfp_rxc_3,
                                         dut.qsfp_rx_clk_3, dut.qsfp_rx_rst_3)
        cocotb.start_soon(Clock(dut.qsfp_tx_clk_3, 6.4, units="ns").start())
        self.qsfp_3_sink = XgmiiSink(dut.qsfp_txd_3, dut.qsfp_txc_3,
                                     dut.qsfp_tx_clk_3, dut.qsfp_tx_rst_3)

        cocotb.start_soon(Clock(dut.qsfp_rx_clk_4, 6.4, units="ns").start())
        self.qsfp_4_source = XgmiiSource(dut.qsfp_rxd_4, dut.qsfp_rxc_4,
                                         dut.qsfp_rx_clk_4, dut.qsfp_rx_rst_4)
        cocotb.start_soon(Clock(dut.qsfp_tx_clk_4, 6.4, units="ns").start())
        self.qsfp_4_sink = XgmiiSink(dut.qsfp_txd_4, dut.qsfp_txc_4,
                                     dut.qsfp_tx_clk_4, dut.qsfp_tx_rst_4)

        dut.btnu.setimmediatevalue(0)
        dut.btnl.setimmediatevalue(0)
        dut.btnd.setimmediatevalue(0)
        dut.btnr.setimmediatevalue(0)
        dut.btnc.setimmediatevalue(0)
        dut.sw.setimmediatevalue(0)
        dut.uart_rxd.setimmediatevalue(0)
        dut.uart_cts.setimmediatevalue(0)

    async def init(self):

        self.dut.rst.setimmediatevalue(0)
        self.dut.phy_gmii_rst.setimmediatevalue(0)
        self.dut.qsfp_rx_rst_1.setimmediatevalue(0)
        self.dut.qsfp_tx_rst_1.setimmediatevalue(0)
        self.dut.qsfp_rx_rst_2.setimmediatevalue(0)
        self.dut.qsfp_tx_rst_2.setimmediatevalue(0)
        self.dut.qsfp_rx_rst_3.setimmediatevalue(0)
        self.dut.qsfp_tx_rst_3.setimmediatevalue(0)
        self.dut.qsfp_rx_rst_4.setimmediatevalue(0)
        self.dut.qsfp_tx_rst_4.setimmediatevalue(0)

        for k in range(10):
            await RisingEdge(self.dut.clk)

        self.dut.rst <= 1
        self.dut.phy_gmii_rst <= 1
        self.dut.qsfp_rx_rst_1 <= 1
        self.dut.qsfp_tx_rst_1 <= 1
        self.dut.qsfp_rx_rst_2 <= 1
        self.dut.qsfp_tx_rst_2 <= 1
        self.dut.qsfp_rx_rst_3 <= 1
        self.dut.qsfp_tx_rst_3 <= 1
        self.dut.qsfp_rx_rst_4 <= 1
        self.dut.qsfp_tx_rst_4 <= 1

        for k in range(10):
            await RisingEdge(self.dut.clk)

        self.dut.rst <= 0
        self.dut.phy_gmii_rst <= 0
        self.dut.qsfp_rx_rst_1 <= 0
        self.dut.qsfp_tx_rst_1 <= 0
        self.dut.qsfp_rx_rst_2 <= 0
        self.dut.qsfp_tx_rst_2 <= 0
        self.dut.qsfp_rx_rst_3 <= 0
        self.dut.qsfp_tx_rst_3 <= 0
        self.dut.qsfp_rx_rst_4 <= 0
        self.dut.qsfp_tx_rst_4 <= 0
Exemple #27
0
class TB(object):
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        # PCIe
        self.rc = RootComplex()

        self.rc.max_payload_size = 0x1  # 256 bytes
        self.rc.max_read_request_size = 0x2  # 512 bytes

        self.dev = UltraScalePlusPcieDevice(
            # configuration options
            pcie_generation=3,
            # pcie_link_width=16,
            user_clk_frequency=250e6,
            alignment="dword",
            cq_cc_straddle=False,
            rq_rc_straddle=False,
            rc_4tlp_straddle=False,
            enable_pf1=False,
            enable_client_tag=True,
            enable_extended_tag=True,
            enable_parity=False,
            enable_rx_msg_interface=False,
            enable_sriov=False,
            enable_extended_configuration=False,
            enable_pf0_msi=True,
            enable_pf1_msi=False,

            # signals
            # Clock and Reset Interface
            user_clk=dut.clk,
            user_reset=dut.rst,
            # user_lnk_up
            # sys_clk
            # sys_clk_gt
            # sys_reset
            # phy_rdy_out

            # Requester reQuest Interface
            rq_bus=AxiStreamBus.from_prefix(dut, "m_axis_rq"),
            pcie_rq_seq_num0=dut.s_axis_rq_seq_num_0,
            pcie_rq_seq_num_vld0=dut.s_axis_rq_seq_num_valid_0,
            pcie_rq_seq_num1=dut.s_axis_rq_seq_num_1,
            pcie_rq_seq_num_vld1=dut.s_axis_rq_seq_num_valid_1,
            # pcie_rq_tag0
            # pcie_rq_tag1
            # pcie_rq_tag_av
            # pcie_rq_tag_vld0
            # pcie_rq_tag_vld1

            # Requester Completion Interface
            rc_bus=AxiStreamBus.from_prefix(dut, "s_axis_rc"),

            # Completer reQuest Interface
            cq_bus=AxiStreamBus.from_prefix(dut, "s_axis_cq"),
            # pcie_cq_np_req
            # pcie_cq_np_req_count

            # Completer Completion Interface
            cc_bus=AxiStreamBus.from_prefix(dut, "m_axis_cc"),

            # Transmit Flow Control Interface
            # pcie_tfc_nph_av=dut.pcie_tfc_nph_av,
            # pcie_tfc_npd_av=dut.pcie_tfc_npd_av,

            # Configuration Management Interface
            cfg_mgmt_addr=dut.cfg_mgmt_addr,
            cfg_mgmt_function_number=dut.cfg_mgmt_function_number,
            cfg_mgmt_write=dut.cfg_mgmt_write,
            cfg_mgmt_write_data=dut.cfg_mgmt_write_data,
            cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable,
            cfg_mgmt_read=dut.cfg_mgmt_read,
            cfg_mgmt_read_data=dut.cfg_mgmt_read_data,
            cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done,
            # cfg_mgmt_debug_access

            # Configuration Status Interface
            # cfg_phy_link_down
            # cfg_phy_link_status
            # cfg_negotiated_width
            # cfg_current_speed
            cfg_max_payload=dut.cfg_max_payload,
            cfg_max_read_req=dut.cfg_max_read_req,
            # cfg_function_status
            # cfg_vf_status
            # cfg_function_power_state
            # cfg_vf_power_state
            # cfg_link_power_state
            # cfg_err_cor_out
            # cfg_err_nonfatal_out
            # cfg_err_fatal_out
            # cfg_local_error_out
            # cfg_local_error_valid
            # cfg_rx_pm_state
            # cfg_tx_pm_state
            # cfg_ltssm_state
            # cfg_rcb_status
            # cfg_obff_enable
            # cfg_pl_status_change
            # cfg_tph_requester_enable
            # cfg_tph_st_mode
            # cfg_vf_tph_requester_enable
            # cfg_vf_tph_st_mode

            # Configuration Received Message Interface
            # cfg_msg_received
            # cfg_msg_received_data
            # cfg_msg_received_type

            # Configuration Transmit Message Interface
            # cfg_msg_transmit
            # cfg_msg_transmit_type
            # cfg_msg_transmit_data
            # cfg_msg_transmit_done

            # Configuration Flow Control Interface
            cfg_fc_ph=dut.cfg_fc_ph,
            cfg_fc_pd=dut.cfg_fc_pd,
            cfg_fc_nph=dut.cfg_fc_nph,
            cfg_fc_npd=dut.cfg_fc_npd,
            cfg_fc_cplh=dut.cfg_fc_cplh,
            cfg_fc_cpld=dut.cfg_fc_cpld,
            cfg_fc_sel=dut.cfg_fc_sel,

            # Configuration Control Interface
            # cfg_hot_reset_in
            # cfg_hot_reset_out
            # cfg_config_space_enable
            # cfg_dsn
            # cfg_bus_number
            # cfg_ds_port_number
            # cfg_ds_bus_number
            # cfg_ds_device_number
            # cfg_ds_function_number
            # cfg_power_state_change_ack
            # cfg_power_state_change_interrupt
            cfg_err_cor_in=dut.status_error_cor,
            cfg_err_uncor_in=dut.status_error_uncor,
            # cfg_flr_in_process
            # cfg_flr_done
            # cfg_vf_flr_in_process
            # cfg_vf_flr_func_num
            # cfg_vf_flr_done
            # cfg_pm_aspm_l1_entry_reject
            # cfg_pm_aspm_tx_l0s_entry_disable
            # cfg_req_pm_transition_l23_ready
            # cfg_link_training_enable

            # Configuration Interrupt Controller Interface
            # cfg_interrupt_int
            # cfg_interrupt_sent
            # cfg_interrupt_pending
            cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable,
            cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable,
            cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update,
            cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data,
            # cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select,
            cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int,
            cfg_interrupt_msi_pending_status=dut.
            cfg_interrupt_msi_pending_status,
            cfg_interrupt_msi_pending_status_data_enable=dut.
            cfg_interrupt_msi_pending_status_data_enable,
            # cfg_interrupt_msi_pending_status_function_num=dut.cfg_interrupt_msi_pending_status_function_num,
            cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent,
            cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail,
            # cfg_interrupt_msix_enable
            # cfg_interrupt_msix_mask
            # cfg_interrupt_msix_vf_enable
            # cfg_interrupt_msix_vf_mask
            # cfg_interrupt_msix_address
            # cfg_interrupt_msix_data
            # cfg_interrupt_msix_int
            # cfg_interrupt_msix_vec_pending
            # cfg_interrupt_msix_vec_pending_status
            cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr,
            cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present,
            cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type,
            # cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag,
            # cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number,

            # Configuration Extend Interface
            # cfg_ext_read_received
            # cfg_ext_write_received
            # cfg_ext_register_number
            # cfg_ext_function_number
            # cfg_ext_write_data
            # cfg_ext_write_byte_enable
            # cfg_ext_read_data
            # cfg_ext_read_data_valid
        )

        # self.dev.log.setLevel(logging.DEBUG)

        self.rc.make_port().connect(self.dev)

        self.driver = mqnic.Driver()

        self.dev.functions[0].msi_cap.msi_multiple_message_capable = 5

        self.dev.functions[0].configure_bar(
            0,
            2**len(dut.core_pcie_inst.axil_ctrl_araddr),
            ext=True,
            prefetch=True)
        if hasattr(dut.core_pcie_inst, 'pcie_app_ctrl'):
            self.dev.functions[0].configure_bar(
                2,
                2**len(dut.core_pcie_inst.axil_app_ctrl_araddr),
                ext=True,
                prefetch=True)

        # Ethernet
        self.port_mac = []

        eth_int_if_width = len(dut.core_pcie_inst.core_inst.iface[0].port[0].
                               rx_async_fifo_inst.m_axis_tdata)
        eth_clock_period = 6.4
        eth_speed = 10e9

        if eth_int_if_width == 64:
            # 10G
            eth_clock_period = 6.4
            eth_speed = 10e9
        elif eth_int_if_width == 128:
            # 25G
            eth_clock_period = 2.56
            eth_speed = 25e9
        elif eth_int_if_width == 512:
            # 100G
            eth_clock_period = 3.102
            eth_speed = 100e9

        for iface in dut.core_pcie_inst.core_inst.iface:
            for port in iface.port:
                cocotb.start_soon(
                    Clock(port.port_rx_clk, eth_clock_period,
                          units="ns").start())
                cocotb.start_soon(
                    Clock(port.port_tx_clk, eth_clock_period,
                          units="ns").start())

                port.port_rx_rst.setimmediatevalue(0)
                port.port_tx_rst.setimmediatevalue(0)

                mac = EthMac(tx_clk=port.port_tx_clk,
                             tx_rst=port.port_tx_rst,
                             tx_bus=AxiStreamBus.from_prefix(port, "axis_tx"),
                             tx_ptp_time=port.ptp.tx_ptp_cdc_inst.output_ts,
                             tx_ptp_ts=port.ptp.axis_tx_ptp_ts,
                             tx_ptp_ts_tag=port.ptp.axis_tx_ptp_ts_tag,
                             tx_ptp_ts_valid=port.ptp.axis_tx_ptp_ts_valid,
                             rx_clk=port.port_rx_clk,
                             rx_rst=port.port_rx_rst,
                             rx_bus=AxiStreamBus.from_prefix(port, "axis_rx"),
                             rx_ptp_time=port.ptp.rx_ptp_cdc_inst.output_ts,
                             ifg=12,
                             speed=eth_speed)

                self.port_mac.append(mac)

        dut.ctrl_reg_wr_wait.setimmediatevalue(0)
        dut.ctrl_reg_wr_ack.setimmediatevalue(0)
        dut.ctrl_reg_rd_data.setimmediatevalue(0)
        dut.ctrl_reg_rd_wait.setimmediatevalue(0)
        dut.ctrl_reg_rd_ack.setimmediatevalue(0)

        dut.ptp_sample_clk.setimmediatevalue(0)

        dut.s_axis_stat_tdata.setimmediatevalue(0)
        dut.s_axis_stat_tid.setimmediatevalue(0)
        dut.s_axis_stat_tvalid.setimmediatevalue(0)

        self.loopback_enable = False
        cocotb.start_soon(self._run_loopback())

    async def init(self):

        for mac in self.port_mac:
            mac.rx.reset.setimmediatevalue(0)
            mac.tx.reset.setimmediatevalue(0)

        await RisingEdge(self.dut.clk)
        await RisingEdge(self.dut.clk)

        for mac in self.port_mac:
            mac.rx.reset.setimmediatevalue(1)
            mac.tx.reset.setimmediatevalue(1)

        await FallingEdge(self.dut.rst)
        await Timer(100, 'ns')

        await RisingEdge(self.dut.clk)
        await RisingEdge(self.dut.clk)

        for mac in self.port_mac:
            mac.rx.reset.setimmediatevalue(0)
            mac.tx.reset.setimmediatevalue(0)

        await self.rc.enumerate(enable_bus_mastering=True, configure_msi=True)

    async def _run_loopback(self):
        while True:
            await RisingEdge(self.dut.clk)

            if self.loopback_enable:
                for mac in self.port_mac:
                    if not mac.tx.empty():
                        await mac.rx.send(await mac.tx.recv())
Exemple #28
0
class TB(object):
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        # PCIe
        self.rc = RootComplex()

        self.rc.max_payload_size = 0x1  # 256 bytes
        self.rc.max_read_request_size = 0x2  # 512 bytes

        self.dev = S10PcieDevice(
            # configuration options
            pcie_generation=3,
            # pcie_link_width=8,
            # pld_clk_frequency=250e6,
            l_tile=False,

            # signals
            # Clock and reset
            # npor=dut.npor,
            # pin_perst=dut.pin_perst,
            # ninit_done=dut.ninit_done,
            # pld_clk_inuse=dut.pld_clk_inuse,
            # pld_core_ready=dut.pld_core_ready,
            reset_status=dut.rst_250mhz,
            # clr_st=dut.clr_st,
            # refclk=dut.refclk,
            coreclkout_hip=dut.clk_250mhz,

            # RX interface
            rx_bus=S10RxBus.from_prefix(dut, "rx_st"),

            # TX interface
            tx_bus=S10TxBus.from_prefix(dut, "tx_st"),

            # TX flow control
            tx_ph_cdts=dut.tx_ph_cdts,
            tx_pd_cdts=dut.tx_pd_cdts,
            tx_nph_cdts=dut.tx_nph_cdts,
            tx_npd_cdts=dut.tx_npd_cdts,
            tx_cplh_cdts=dut.tx_cplh_cdts,
            tx_cpld_cdts=dut.tx_cpld_cdts,
            tx_hdr_cdts_consumed=dut.tx_hdr_cdts_consumed,
            tx_data_cdts_consumed=dut.tx_data_cdts_consumed,
            tx_cdts_type=dut.tx_cdts_type,
            tx_cdts_data_value=dut.tx_cdts_data_value,

            # Hard IP status
            # int_status=dut.int_status,
            # int_status_common=dut.int_status_common,
            # derr_cor_ext_rpl=dut.derr_cor_ext_rpl,
            # derr_rpl=dut.derr_rpl,
            # derr_cor_ext_rcv=dut.derr_cor_ext_rcv,
            # derr_uncor_ext_rcv=dut.derr_uncor_ext_rcv,
            # rx_par_err=dut.rx_par_err,
            # tx_par_err=dut.tx_par_err,
            # ltssmstate=dut.ltssmstate,
            # link_up=dut.link_up,
            # lane_act=dut.lane_act,
            # currentspeed=dut.currentspeed,

            # Power management
            # pm_linkst_in_l1=dut.pm_linkst_in_l1,
            # pm_linkst_in_l0s=dut.pm_linkst_in_l0s,
            # pm_state=dut.pm_state,
            # pm_dstate=dut.pm_dstate,
            # apps_pm_xmt_pme=dut.apps_pm_xmt_pme,
            # apps_ready_entr_l23=dut.apps_ready_entr_l23,
            # apps_pm_xmt_turnoff=dut.apps_pm_xmt_turnoff,
            # app_init_rst=dut.app_init_rst,
            # app_xfer_pending=dut.app_xfer_pending,

            # Interrupt interface
            app_msi_req=dut.app_msi_req,
            app_msi_ack=dut.app_msi_ack,
            app_msi_tc=dut.app_msi_tc,
            app_msi_num=dut.app_msi_num,
            app_msi_func_num=dut.app_msi_func_num,
            # app_int_sts=dut.app_int_sts,

            # Error interface
            # app_err_valid=dut.app_err_valid,
            # app_err_hdr=dut.app_err_hdr,
            # app_err_info=dut.app_err_info,
            # app_err_func_num=dut.app_err_func_num,

            # Configuration output
            tl_cfg_func=dut.tl_cfg_func,
            tl_cfg_add=dut.tl_cfg_add,
            tl_cfg_ctl=dut.tl_cfg_ctl,

            # Configuration extension bus
            # ceb_req=dut.ceb_req,
            # ceb_ack=dut.ceb_ack,
            # ceb_addr=dut.ceb_addr,
            # ceb_din=dut.ceb_din,
            # ceb_dout=dut.ceb_dout,
            # ceb_wr=dut.ceb_wr,
            # ceb_cdm_convert_data=dut.ceb_cdm_convert_data,
            # ceb_func_num=dut.ceb_func_num,
            # ceb_vf_num=dut.ceb_vf_num,
            # ceb_vf_active=dut.ceb_vf_active,

            # Hard IP reconfiguration interface
            # hip_reconfig_clk=dut.hip_reconfig_clk,
            # hip_reconfig_address=dut.hip_reconfig_address,
            # hip_reconfig_read=dut.hip_reconfig_read,
            # hip_reconfig_readdata=dut.hip_reconfig_readdata,
            # hip_reconfig_readdatavalid=dut.hip_reconfig_readdatavalid,
            # hip_reconfig_write=dut.hip_reconfig_write,
            # hip_reconfig_writedata=dut.hip_reconfig_writedata,
            # hip_reconfig_waitrequest=dut.hip_reconfig_waitrequest,
        )

        # self.dev.log.setLevel(logging.DEBUG)

        self.rc.make_port().connect(self.dev)

        self.driver = mqnic.Driver()

        self.dev.functions[0].msi_cap.msi_multiple_message_capable = 5

        self.dev.functions[0].configure_bar(0, 2**len(dut.core_inst.core_pcie_inst.axil_ctrl_araddr), ext=True, prefetch=True)
        if hasattr(dut.core_inst.core_pcie_inst, 'pcie_app_ctrl'):
            self.dev.functions[0].configure_bar(2, 2**len(dut.core_inst.core_pcie_inst.axil_app_ctrl_araddr), ext=True, prefetch=True)

        # Ethernet
        cocotb.start_soon(Clock(dut.qsfp0_rx_clk_1, 6.4, units="ns").start())
        self.qsfp0_1_source = XgmiiSource(dut.qsfp0_rxd_1, dut.qsfp0_rxc_1, dut.qsfp0_rx_clk_1, dut.qsfp0_rx_rst_1)
        cocotb.start_soon(Clock(dut.qsfp0_tx_clk_1, 6.4, units="ns").start())
        self.qsfp0_1_sink = XgmiiSink(dut.qsfp0_txd_1, dut.qsfp0_txc_1, dut.qsfp0_tx_clk_1, dut.qsfp0_tx_rst_1)

        cocotb.start_soon(Clock(dut.qsfp0_rx_clk_2, 6.4, units="ns").start())
        self.qsfp0_2_source = XgmiiSource(dut.qsfp0_rxd_2, dut.qsfp0_rxc_2, dut.qsfp0_rx_clk_2, dut.qsfp0_rx_rst_2)
        cocotb.start_soon(Clock(dut.qsfp0_tx_clk_2, 6.4, units="ns").start())
        self.qsfp0_2_sink = XgmiiSink(dut.qsfp0_txd_2, dut.qsfp0_txc_2, dut.qsfp0_tx_clk_2, dut.qsfp0_tx_rst_2)

        cocotb.start_soon(Clock(dut.qsfp0_rx_clk_3, 6.4, units="ns").start())
        self.qsfp0_3_source = XgmiiSource(dut.qsfp0_rxd_3, dut.qsfp0_rxc_3, dut.qsfp0_rx_clk_3, dut.qsfp0_rx_rst_3)
        cocotb.start_soon(Clock(dut.qsfp0_tx_clk_3, 6.4, units="ns").start())
        self.qsfp0_3_sink = XgmiiSink(dut.qsfp0_txd_3, dut.qsfp0_txc_3, dut.qsfp0_tx_clk_3, dut.qsfp0_tx_rst_3)

        cocotb.start_soon(Clock(dut.qsfp0_rx_clk_4, 6.4, units="ns").start())
        self.qsfp0_4_source = XgmiiSource(dut.qsfp0_rxd_4, dut.qsfp0_rxc_4, dut.qsfp0_rx_clk_4, dut.qsfp0_rx_rst_4)
        cocotb.start_soon(Clock(dut.qsfp0_tx_clk_4, 6.4, units="ns").start())
        self.qsfp0_4_sink = XgmiiSink(dut.qsfp0_txd_4, dut.qsfp0_txc_4, dut.qsfp0_tx_clk_4, dut.qsfp0_tx_rst_4)

        cocotb.start_soon(Clock(dut.qsfp1_rx_clk_1, 6.4, units="ns").start())
        self.qsfp1_1_source = XgmiiSource(dut.qsfp1_rxd_1, dut.qsfp1_rxc_1, dut.qsfp1_rx_clk_1, dut.qsfp1_rx_rst_1)
        cocotb.start_soon(Clock(dut.qsfp1_tx_clk_1, 6.4, units="ns").start())
        self.qsfp1_1_sink = XgmiiSink(dut.qsfp1_txd_1, dut.qsfp1_txc_1, dut.qsfp1_tx_clk_1, dut.qsfp1_tx_rst_1)

        cocotb.start_soon(Clock(dut.qsfp1_rx_clk_2, 6.4, units="ns").start())
        self.qsfp1_2_source = XgmiiSource(dut.qsfp1_rxd_2, dut.qsfp1_rxc_2, dut.qsfp1_rx_clk_2, dut.qsfp1_rx_rst_2)
        cocotb.start_soon(Clock(dut.qsfp1_tx_clk_2, 6.4, units="ns").start())
        self.qsfp1_2_sink = XgmiiSink(dut.qsfp1_txd_2, dut.qsfp1_txc_2, dut.qsfp1_tx_clk_2, dut.qsfp1_tx_rst_2)

        cocotb.start_soon(Clock(dut.qsfp1_rx_clk_3, 6.4, units="ns").start())
        self.qsfp1_3_source = XgmiiSource(dut.qsfp1_rxd_3, dut.qsfp1_rxc_3, dut.qsfp1_rx_clk_3, dut.qsfp1_rx_rst_3)
        cocotb.start_soon(Clock(dut.qsfp1_tx_clk_3, 6.4, units="ns").start())
        self.qsfp1_3_sink = XgmiiSink(dut.qsfp1_txd_3, dut.qsfp1_txc_3, dut.qsfp1_tx_clk_3, dut.qsfp1_tx_rst_3)

        cocotb.start_soon(Clock(dut.qsfp1_rx_clk_4, 6.4, units="ns").start())
        self.qsfp1_4_source = XgmiiSource(dut.qsfp1_rxd_4, dut.qsfp1_rxc_4, dut.qsfp1_rx_clk_4, dut.qsfp1_rx_rst_4)
        cocotb.start_soon(Clock(dut.qsfp1_tx_clk_4, 6.4, units="ns").start())
        self.qsfp1_4_sink = XgmiiSink(dut.qsfp1_txd_4, dut.qsfp1_txc_4, dut.qsfp1_tx_clk_4, dut.qsfp1_tx_rst_4)

        # dut.qsfp0_i2c_scl_i.setimmediatevalue(1)
        # dut.qsfp0_i2c_sda_i.setimmediatevalue(1)
        # dut.qsfp0_intr_n.setimmediatevalue(1)
        # dut.qsfp0_mod_prsnt_n.setimmediatevalue(0)

        # dut.qsfp0_rx_error_count_0.setimmediatevalue(0)
        # dut.qsfp0_rx_error_count_1.setimmediatevalue(0)
        # dut.qsfp0_rx_error_count_2.setimmediatevalue(0)
        # dut.qsfp0_rx_error_count_3.setimmediatevalue(0)

        # dut.qsfp1_i2c_scl_i.setimmediatevalue(1)
        # dut.qsfp1_i2c_sda_i.setimmediatevalue(1)
        # dut.qsfp1_intr_n.setimmediatevalue(1)
        # dut.qsfp1_mod_prsnt_n.setimmediatevalue(0)

        # dut.qsfp1_rx_error_count_0.setimmediatevalue(0)
        # dut.qsfp1_rx_error_count_1.setimmediatevalue(0)
        # dut.qsfp1_rx_error_count_2.setimmediatevalue(0)
        # dut.qsfp1_rx_error_count_3.setimmediatevalue(0)

        # dut.qspi_dq_i.setimmediatevalue(0)

        self.loopback_enable = False
        cocotb.start_soon(self._run_loopback())

    async def init(self):

        self.dut.qsfp0_rx_rst_1.setimmediatevalue(0)
        self.dut.qsfp0_tx_rst_1.setimmediatevalue(0)
        self.dut.qsfp0_rx_rst_2.setimmediatevalue(0)
        self.dut.qsfp0_tx_rst_2.setimmediatevalue(0)
        self.dut.qsfp0_rx_rst_3.setimmediatevalue(0)
        self.dut.qsfp0_tx_rst_3.setimmediatevalue(0)
        self.dut.qsfp0_rx_rst_4.setimmediatevalue(0)
        self.dut.qsfp0_tx_rst_4.setimmediatevalue(0)
        self.dut.qsfp1_rx_rst_1.setimmediatevalue(0)
        self.dut.qsfp1_tx_rst_1.setimmediatevalue(0)
        self.dut.qsfp1_rx_rst_2.setimmediatevalue(0)
        self.dut.qsfp1_tx_rst_2.setimmediatevalue(0)
        self.dut.qsfp1_rx_rst_3.setimmediatevalue(0)
        self.dut.qsfp1_tx_rst_3.setimmediatevalue(0)
        self.dut.qsfp1_rx_rst_4.setimmediatevalue(0)
        self.dut.qsfp1_tx_rst_4.setimmediatevalue(0)

        await RisingEdge(self.dut.clk_250mhz)
        await RisingEdge(self.dut.clk_250mhz)

        self.dut.qsfp0_rx_rst_1.setimmediatevalue(1)
        self.dut.qsfp0_tx_rst_1.setimmediatevalue(1)
        self.dut.qsfp0_rx_rst_2.setimmediatevalue(1)
        self.dut.qsfp0_tx_rst_2.setimmediatevalue(1)
        self.dut.qsfp0_rx_rst_3.setimmediatevalue(1)
        self.dut.qsfp0_tx_rst_3.setimmediatevalue(1)
        self.dut.qsfp0_rx_rst_4.setimmediatevalue(1)
        self.dut.qsfp0_tx_rst_4.setimmediatevalue(1)
        self.dut.qsfp1_rx_rst_1.setimmediatevalue(1)
        self.dut.qsfp1_tx_rst_1.setimmediatevalue(1)
        self.dut.qsfp1_rx_rst_2.setimmediatevalue(1)
        self.dut.qsfp1_tx_rst_2.setimmediatevalue(1)
        self.dut.qsfp1_rx_rst_3.setimmediatevalue(1)
        self.dut.qsfp1_tx_rst_3.setimmediatevalue(1)
        self.dut.qsfp1_rx_rst_4.setimmediatevalue(1)
        self.dut.qsfp1_tx_rst_4.setimmediatevalue(1)

        await FallingEdge(self.dut.rst_250mhz)
        await Timer(100, 'ns')

        await RisingEdge(self.dut.clk_250mhz)
        await RisingEdge(self.dut.clk_250mhz)

        self.dut.qsfp0_rx_rst_1.setimmediatevalue(0)
        self.dut.qsfp0_tx_rst_1.setimmediatevalue(0)
        self.dut.qsfp0_rx_rst_2.setimmediatevalue(0)
        self.dut.qsfp0_tx_rst_2.setimmediatevalue(0)
        self.dut.qsfp0_rx_rst_3.setimmediatevalue(0)
        self.dut.qsfp0_tx_rst_3.setimmediatevalue(0)
        self.dut.qsfp0_rx_rst_4.setimmediatevalue(0)
        self.dut.qsfp0_tx_rst_4.setimmediatevalue(0)
        self.dut.qsfp1_rx_rst_1.setimmediatevalue(0)
        self.dut.qsfp1_tx_rst_1.setimmediatevalue(0)
        self.dut.qsfp1_rx_rst_2.setimmediatevalue(0)
        self.dut.qsfp1_tx_rst_2.setimmediatevalue(0)
        self.dut.qsfp1_rx_rst_3.setimmediatevalue(0)
        self.dut.qsfp1_tx_rst_3.setimmediatevalue(0)
        self.dut.qsfp1_rx_rst_4.setimmediatevalue(0)
        self.dut.qsfp1_tx_rst_4.setimmediatevalue(0)

        await self.rc.enumerate(enable_bus_mastering=True, configure_msi=True)

    async def _run_loopback(self):
        while True:
            await RisingEdge(self.dut.clk_250mhz)

            if self.loopback_enable:
                if not self.qsfp0_1_sink.empty():
                    await self.qsfp0_1_source.send(await self.qsfp0_1_sink.recv())
                if not self.qsfp0_2_sink.empty():
                    await self.qsfp0_2_source.send(await self.qsfp0_2_sink.recv())
                if not self.qsfp0_3_sink.empty():
                    await self.qsfp0_3_source.send(await self.qsfp0_3_sink.recv())
                if not self.qsfp0_4_sink.empty():
                    await self.qsfp0_4_source.send(await self.qsfp0_4_sink.recv())
                if not self.qsfp1_1_sink.empty():
                    await self.qsfp1_1_source.send(await self.qsfp1_1_sink.recv())
                if not self.qsfp1_2_sink.empty():
                    await self.qsfp1_2_source.send(await self.qsfp1_2_sink.recv())
                if not self.qsfp1_3_sink.empty():
                    await self.qsfp1_3_source.send(await self.qsfp1_3_sink.recv())
                if not self.qsfp1_4_sink.empty():
                    await self.qsfp1_4_source.send(await self.qsfp1_4_sink.recv())
Exemple #29
0
class Scheduler:
    """The main scheduler.

    Here we accept callbacks from the simulator and schedule the appropriate
    coroutines.

    A callback fires, causing the :any:`react` method to be called, with the
    trigger that caused the callback as the first argument.

    We look up a list of coroutines to schedule (indexed by the trigger) and
    schedule them in turn.

    .. attention::

       Implementors should not depend on the scheduling order!

    Some additional management is required since coroutines can return a list
    of triggers, to be scheduled when any one of the triggers fires.  To
    ensure we don't receive spurious callbacks, we have to un-prime all the
    other triggers when any one fires.

    Due to the simulator nuances and fun with delta delays we have the
    following modes:

    Normal mode
        - Callbacks cause coroutines to be scheduled
        - Any pending writes are cached and do not happen immediately

    ReadOnly mode
        - Corresponds to :any:`cbReadOnlySynch` (VPI) or :any:`vhpiCbLastKnownDeltaCycle`
          (VHPI).  In this state we are not allowed to perform writes.

    Write mode
        - Corresponds to :any:`cbReadWriteSynch` (VPI) or :c:macro:`vhpiCbEndOfProcesses` (VHPI)
          In this mode we play back all the cached write updates.

    We can legally transition from Normal to Write by registering a :class:`~cocotb.triggers.ReadWrite`
    callback, however usually once a simulator has entered the ReadOnly phase
    of a given timestep then we must move to a new timestep before performing
    any writes.  The mechanism for moving to a new timestep may not be
    consistent across simulators and therefore we provide an abstraction to
    assist with compatibility.


    Unless a coroutine has explicitly requested to be scheduled in ReadOnly
    mode (for example wanting to sample the finally settled value after all
    delta delays) then it can reasonably be expected to be scheduled during
    "normal mode" i.e. where writes are permitted.
    """

    _MODE_NORMAL = 1  # noqa
    _MODE_READONLY = 2  # noqa
    _MODE_WRITE = 3  # noqa
    _MODE_TERM = 4  # noqa

    # Singleton events, recycled to avoid spurious object creation
    _next_time_step = NextTimeStep()
    _read_write = ReadWrite()
    _read_only = ReadOnly()
    _timer1 = Timer(1)

    def __init__(self):

        self.log = SimLog("cocotb.scheduler")
        if _debug:
            self.log.setLevel(logging.DEBUG)

        # Use OrderedDict here for deterministic behavior (gh-934)

        # A dictionary of pending coroutines for each trigger,
        # indexed by trigger
        self._trigger2coros = _py_compat.insertion_ordered_dict()

        # Our main state
        self._mode = Scheduler._MODE_NORMAL

        # A dictionary of pending (write_func, args), keyed by handle. Only the last scheduled write
        # in a timestep is performed, all the rest are discarded in python.
        self._write_calls = _py_compat.insertion_ordered_dict()

        self._pending_coros = []
        self._pending_triggers = []
        self._pending_threads = []
        self._pending_events = [
        ]  # Events we need to call set on once we've unwound

        self._terminate = False
        self._test = None
        self._main_thread = threading.current_thread()

        self._current_task = None

        self._is_reacting = False

        self._write_coro_inst = None
        self._writes_pending = Event()

    async def _do_writes(self):
        """ An internal coroutine that performs pending writes """
        while True:
            await self._writes_pending.wait()
            if self._mode != Scheduler._MODE_NORMAL:
                await self._next_time_step

            await self._read_write

            while self._write_calls:
                handle, (func, args) = self._write_calls.popitem()
                func(*args)
            self._writes_pending.clear()

    def _check_termination(self):
        """
        Handle a termination that causes us to move onto the next test.
        """
        if self._terminate:
            if _debug:
                self.log.debug("Test terminating, scheduling Timer")

            if self._write_coro_inst is not None:
                self._write_coro_inst.kill()
                self._write_coro_inst = None

            for t in self._trigger2coros:
                t.unprime()

            if self._timer1.primed:
                self._timer1.unprime()

            self._timer1.prime(self._test_completed)
            self._trigger2coros = _py_compat.insertion_ordered_dict()
            self._terminate = False
            self._write_calls = _py_compat.insertion_ordered_dict()
            self._writes_pending.clear()
            self._mode = Scheduler._MODE_TERM

    def _test_completed(self, trigger=None):
        """Called after a test and its cleanup have completed
        """
        if _debug:
            self.log.debug("begin_test called with trigger: %s" %
                           (str(trigger)))
        if _profiling:
            ps = pstats.Stats(_profile).sort_stats('cumulative')
            ps.dump_stats("test_profile.pstat")
            ctx = profiling_context()
        else:
            ctx = _py_compat.nullcontext()

        with ctx:
            self._mode = Scheduler._MODE_NORMAL
            if trigger is not None:
                trigger.unprime()

            # extract the current test, and clear it
            test = self._test
            self._test = None
            if test is None:
                raise InternalError(
                    "_test_completed called with no active test")
            if test._outcome is None:
                raise InternalError(
                    "_test_completed called with an incomplete test")

            # Issue previous test result
            if _debug:
                self.log.debug("Issue test result to regression object")

            # this may scheduler another test
            cocotb.regression_manager.handle_result(test)

            # if it did, make sure we handle the test completing
            self._check_termination()

    def react(self, trigger):
        """
        Called when a trigger fires.

        We ensure that we only start the event loop once, rather than
        letting it recurse.
        """
        if self._is_reacting:
            # queue up the trigger, the event loop will get to it
            self._pending_triggers.append(trigger)
            return

        if self._pending_triggers:
            raise InternalError(
                "Expected all triggers to be handled but found {}".format(
                    self._pending_triggers))

        # start the event loop
        self._is_reacting = True
        try:
            self._event_loop(trigger)
        finally:
            self._is_reacting = False

    def _event_loop(self, trigger):
        """
        Run an event loop triggered by the given trigger.

        The loop will keep running until no further triggers fire.

        This should be triggered by only:
        * The beginning of a test, when there is no trigger to react to
        * A GPI trigger
        """
        if _profiling:
            ctx = profiling_context()
        else:
            ctx = _py_compat.nullcontext()

        with ctx:
            # When a trigger fires it is unprimed internally
            if _debug:
                self.log.debug("Trigger fired: %s" % str(trigger))
            # trigger.unprime()

            if self._mode == Scheduler._MODE_TERM:
                if _debug:
                    self.log.debug(
                        "Ignoring trigger %s since we're terminating" %
                        str(trigger))
                return

            if trigger is self._read_only:
                self._mode = Scheduler._MODE_READONLY
            # Only GPI triggers affect the simulator scheduling mode
            elif isinstance(trigger, GPITrigger):
                self._mode = Scheduler._MODE_NORMAL

            # work through triggers one by one
            is_first = True
            self._pending_triggers.append(trigger)
            while self._pending_triggers:
                trigger = self._pending_triggers.pop(0)

                if not is_first and isinstance(trigger, GPITrigger):
                    self.log.warning(
                        "A GPI trigger occurred after entering react - this "
                        "should not happen.")
                    assert False

                # this only exists to enable the warning above
                is_first = False

                # Scheduled coroutines may append to our waiting list so the first
                # thing to do is pop all entries waiting on this trigger.
                try:
                    scheduling = self._trigger2coros.pop(trigger)
                except KeyError:
                    # GPI triggers should only be ever pending if there is an
                    # associated coroutine waiting on that trigger, otherwise it would
                    # have been unprimed already
                    if isinstance(trigger, GPITrigger):
                        self.log.critical(
                            "No coroutines waiting on trigger that fired: %s" %
                            str(trigger))

                        trigger.log.info("I'm the culprit")
                    # For Python triggers this isn't actually an error - we might do
                    # event.set() without knowing whether any coroutines are actually
                    # waiting on this event, for example
                    elif _debug:
                        self.log.debug(
                            "No coroutines waiting on trigger that fired: %s" %
                            str(trigger))

                    del trigger
                    continue

                if _debug:
                    debugstr = "\n\t".join(
                        [coro._coro.__qualname__ for coro in scheduling])
                    if len(scheduling):
                        debugstr = "\n\t" + debugstr
                    self.log.debug("%d pending coroutines for event %s%s" %
                                   (len(scheduling), str(trigger), debugstr))

                # This trigger isn't needed any more
                trigger.unprime()

                for coro in scheduling:
                    if coro._outcome is not None:
                        # coroutine was killed by another coroutine waiting on the same trigger
                        continue
                    if _debug:
                        self.log.debug("Scheduling coroutine %s" %
                                       (coro._coro.__qualname__))
                    self.schedule(coro, trigger=trigger)
                    if _debug:
                        self.log.debug("Scheduled coroutine %s" %
                                       (coro._coro.__qualname__))

                # Schedule may have queued up some events so we'll burn through those
                while self._pending_events:
                    if _debug:
                        self.log.debug("Scheduling pending event %s" %
                                       (str(self._pending_events[0])))
                    self._pending_events.pop(0).set()

                # remove our reference to the objects at the end of each loop,
                # to try and avoid them being destroyed at a weird time (as
                # happened in gh-957)
                del trigger
                del coro
                del scheduling

            # no more pending triggers
            self._check_termination()
            if _debug:
                self.log.debug("All coroutines scheduled, handing control back"
                               " to simulator")

    def unschedule(self, coro):
        """Unschedule a coroutine.  Unprime any pending triggers"""

        # Unprime the trigger this coroutine is waiting on
        trigger = coro._trigger
        if trigger is not None:
            coro._trigger = None
            if coro in self._trigger2coros.setdefault(trigger, []):
                self._trigger2coros[trigger].remove(coro)
            if not self._trigger2coros[trigger]:
                trigger.unprime()
                del self._trigger2coros[trigger]

        assert self._test is not None

        if coro is self._test:
            if _debug:
                self.log.debug("Unscheduling test {}".format(coro))

            if not self._terminate:
                self._terminate = True
                self.cleanup()

        elif Join(coro) in self._trigger2coros:
            self.react(Join(coro))
        else:
            try:
                # throws an error if the background coroutine errored
                # and no one was monitoring it
                coro._outcome.get()
            except (TestComplete, AssertionError) as e:
                coro.log.info("Test stopped by this forked coroutine")
                e = remove_traceback_frames(e, ['unschedule', 'get'])
                self._test.abort(e)
            except Exception as e:
                coro.log.error("Exception raised by this forked coroutine")
                e = remove_traceback_frames(e, ['unschedule', 'get'])
                self._test.abort(e)

    def _schedule_write(self, handle, write_func, *args):
        """ Queue `write_func` to be called on the next ReadWrite trigger. """
        if self._mode == Scheduler._MODE_READONLY:
            raise Exception(
                "Write to object {0} was scheduled during a read-only sync phase."
                .format(handle._name))

        # TODO: we should be able to better keep track of when this needs to
        # be scheduled
        if self._write_coro_inst is None:
            self._write_coro_inst = self.add(self._do_writes())

        self._write_calls[handle] = (write_func, args)
        self._writes_pending.set()

    def _resume_coro_upon(self, coro, trigger):
        """Schedule `coro` to be resumed when `trigger` fires."""
        coro._trigger = trigger

        trigger_coros = self._trigger2coros.setdefault(trigger, [])
        if coro is self._write_coro_inst:
            # Our internal write coroutine always runs before any user coroutines.
            # This preserves the behavior prior to the refactoring of writes to
            # this coroutine.
            trigger_coros.insert(0, coro)
        else:
            # Everything else joins the back of the queue
            trigger_coros.append(coro)

        if not trigger.primed:

            if trigger_coros != [coro]:
                # should never happen
                raise InternalError(
                    "More than one coroutine waiting on an unprimed trigger")

            try:
                trigger.prime(self.react)
            except Exception as e:
                # discard the trigger we associated, it will never fire
                self._trigger2coros.pop(trigger)

                # replace it with a new trigger that throws back the exception
                self._resume_coro_upon(
                    coro,
                    NullTrigger(name="Trigger.prime() Error",
                                outcome=outcomes.Error(e)))

    def queue(self, coroutine):
        """Queue a coroutine for execution"""
        self._pending_coros.append(coroutine)

    def queue_function(self, coro):
        """Queue a coroutine for execution and move the containing thread
        so that it does not block execution of the main thread any longer.
        """
        # We should be able to find ourselves inside the _pending_threads list
        matching_threads = [
            t for t in self._pending_threads
            if t.thread == threading.current_thread()
        ]
        if len(matching_threads) == 0:
            raise RuntimeError(
                "queue_function called from unrecognized thread")

        # Raises if there is more than one match. This can never happen, since
        # each entry always has a unique thread.
        t, = matching_threads

        async def wrapper():
            # This function runs in the scheduler thread
            try:
                _outcome = outcomes.Value(await coro)
            except BaseException as e:
                _outcome = outcomes.Error(e)
            event.outcome = _outcome
            # Notify the current (scheduler) thread that we are about to wake
            # up the background (`@external`) thread, making sure to do so
            # before the background thread gets a chance to go back to sleep by
            # calling thread_suspend.
            # We need to do this here in the scheduler thread so that no more
            # coroutines run until the background thread goes back to sleep.
            t.thread_resume()
            event.set()

        event = threading.Event()
        self._pending_coros.append(cocotb.decorators.RunningTask(wrapper()))
        # The scheduler thread blocks in `thread_wait`, and is woken when we
        # call `thread_suspend` - so we need to make sure the coroutine is
        # queued before that.
        t.thread_suspend()
        # This blocks the calling `@external` thread until the coroutine finishes
        event.wait()
        return event.outcome.get()

    def run_in_executor(self, func, *args, **kwargs):
        """Run the coroutine in a separate execution thread
        and return an awaitable object for the caller.
        """

        # Create a thread
        # Create a trigger that is called as a result of the thread finishing
        # Create an Event object that the caller can await on
        # Event object set when the thread finishes execution, this blocks the
        #   calling coroutine (but not the thread) until the external completes

        def execute_external(func, _waiter):
            _waiter._outcome = outcomes.capture(func, *args, **kwargs)
            if _debug:
                self.log.debug("Execution of external routine done %s" %
                               threading.current_thread())
            _waiter.thread_done()

        async def wrapper():
            waiter = external_waiter()
            thread = threading.Thread(group=None,
                                      target=execute_external,
                                      name=func.__qualname__ + "_thread",
                                      args=([func, waiter]),
                                      kwargs={})

            waiter.thread = thread
            self._pending_threads.append(waiter)

            await waiter.event.wait()

            return waiter.result  # raises if there was an exception

        return wrapper()

    @staticmethod
    def create_task(coroutine: Any) -> RunningTask:
        """ Checks to see if the given object is a schedulable coroutine object and if so, returns it """

        if isinstance(coroutine, RunningTask):
            return coroutine
        if inspect.iscoroutine(coroutine):
            return RunningTask(coroutine)
        if inspect.iscoroutinefunction(coroutine):
            raise TypeError(
                "Coroutine function {} should be called prior to being "
                "scheduled.".format(coroutine))
        if isinstance(coroutine, cocotb.decorators.coroutine):
            raise TypeError(
                "Attempt to schedule a coroutine that hasn't started: {}.\n"
                "Did you forget to add parentheses to the @cocotb.test() "
                "decorator?".format(coroutine))
        if sys.version_info >= (3, 6) and inspect.isasyncgen(coroutine):
            raise TypeError(
                "{} is an async generator, not a coroutine. "
                "You likely used the yield keyword instead of await.".format(
                    coroutine.__qualname__))
        raise TypeError(
            "Attempt to add an object of type {} to the scheduler, which "
            "isn't a coroutine: {!r}\n"
            "Did you forget to use the @cocotb.coroutine decorator?".format(
                type(coroutine), coroutine))

    def add(self, coroutine: Union[RunningTask, Coroutine]) -> RunningTask:
        """Add a new coroutine.

        Just a wrapper around self.schedule which provides some debug and
        useful error messages in the event of common gotchas.
        """

        task = self.create_task(coroutine)

        if _debug:
            self.log.debug("Adding new coroutine %s" % task._coro.__qualname__)

        self.schedule(task)
        self._check_termination()
        return task

    def start_soon(self, coro: Union[Coroutine, RunningTask]) -> RunningTask:
        """
        Schedule a coroutine to be run concurrently, starting after the current coroutine yields control.

        In contrast to :func:`~cocotb.fork` which starts the given coroutine immediately, this function
        starts the given coroutine only after the current coroutine yields control.
        This is useful when the coroutine to be forked has logic before the first
        :keyword:`await` that may not be safe to execute immediately.
        """

        task = self.create_task(coro)

        if _debug:
            self.log.debug("Queueing a new coroutine %s" %
                           task._coro.__qualname__)

        self.queue(task)
        return task

    def add_test(self, test_coro):
        """Called by the regression manager to queue the next test"""
        if self._test is not None:
            raise InternalError("Test was added while another was in progress")

        self._test = test_coro
        self._resume_coro_upon(
            test_coro,
            NullTrigger(name="Start {!s}".format(test_coro),
                        outcome=outcomes.Value(None)))

    # This collection of functions parses a trigger out of the object
    # that was yielded by a coroutine, converting `list` -> `Waitable`,
    # `Waitable` -> `RunningTask`, `RunningTask` -> `Trigger`.
    # Doing them as separate functions allows us to avoid repeating unencessary
    # `isinstance` checks.

    def _trigger_from_started_coro(
            self, result: cocotb.decorators.RunningTask) -> Trigger:
        if _debug:
            self.log.debug("Joining to already running coroutine: %s" %
                           result._coro.__qualname__)
        return result.join()

    def _trigger_from_unstarted_coro(
            self, result: cocotb.decorators.RunningTask) -> Trigger:
        self.queue(result)
        if _debug:
            self.log.debug("Scheduling nested coroutine: %s" %
                           result._coro.__qualname__)
        return result.join()

    def _trigger_from_waitable(self,
                               result: cocotb.triggers.Waitable) -> Trigger:
        return self._trigger_from_unstarted_coro(
            cocotb.decorators.RunningTask(result._wait()))

    def _trigger_from_list(self, result: list) -> Trigger:
        return self._trigger_from_waitable(cocotb.triggers.First(*result))

    def _trigger_from_any(self, result) -> Trigger:
        """Convert a yielded object into a Trigger instance"""
        # note: the order of these can significantly impact performance

        if isinstance(result, Trigger):
            return result

        if isinstance(result, cocotb.decorators.RunningTask):
            if not result.has_started():
                return self._trigger_from_unstarted_coro(result)
            else:
                return self._trigger_from_started_coro(result)

        if inspect.iscoroutine(result):
            return self._trigger_from_unstarted_coro(
                cocotb.decorators.RunningTask(result))

        if isinstance(result, list):
            return self._trigger_from_list(result)

        if isinstance(result, cocotb.triggers.Waitable):
            return self._trigger_from_waitable(result)

        if sys.version_info >= (3, 6) and inspect.isasyncgen(result):
            raise TypeError(
                "{} is an async generator, not a coroutine. "
                "You likely used the yield keyword instead of await.".format(
                    result.__qualname__))

        raise TypeError(
            "Coroutine yielded an object of type {}, which the scheduler can't "
            "handle: {!r}\n"
            "Did you forget to decorate with @cocotb.coroutine?".format(
                type(result), result))

    @contextmanager
    def _task_context(self, task):
        """Context manager for the currently running task."""
        old_task = self._current_task
        self._current_task = task
        try:
            yield
        finally:
            self._current_task = old_task

    def schedule(self, coroutine, trigger=None):
        """Schedule a coroutine by calling the send method.

        Args:
            coroutine (cocotb.decorators.coroutine): The coroutine to schedule.
            trigger (cocotb.triggers.Trigger): The trigger that caused this
                coroutine to be scheduled.
        """
        with self._task_context(coroutine):
            if trigger is None:
                send_outcome = outcomes.Value(None)
            else:
                send_outcome = trigger._outcome
            if _debug:
                self.log.debug("Scheduling with {}".format(send_outcome))

            coro_completed = False
            try:
                coroutine._trigger = None
                result = coroutine._advance(send_outcome)
                if _debug:
                    self.log.debug("Coroutine %s yielded %s (mode %d)" %
                                   (coroutine._coro.__qualname__, str(result),
                                    self._mode))

            except cocotb.decorators.CoroutineComplete:
                if _debug:
                    self.log.debug("Coroutine {} completed with {}".format(
                        coroutine, coroutine._outcome))
                coro_completed = True

            # this can't go in the else above, as that causes unwanted exception
            # chaining
            if coro_completed:
                self.unschedule(coroutine)

            # Don't handle the result if we're shutting down
            if self._terminate:
                return

            if not coro_completed:
                try:
                    result = self._trigger_from_any(result)
                except TypeError as exc:
                    # restart this coroutine with an exception object telling it that
                    # it wasn't allowed to yield that
                    result = NullTrigger(outcome=outcomes.Error(exc))

                self._resume_coro_upon(coroutine, result)

            # We do not return from here until pending threads have completed, but only
            # from the main thread, this seems like it could be problematic in cases
            # where a sim might change what this thread is.

            if self._main_thread is threading.current_thread():

                for ext in self._pending_threads:
                    ext.thread_start()
                    if _debug:
                        self.log.debug(
                            "Blocking from %s on %s" %
                            (threading.current_thread(), ext.thread))
                    state = ext.thread_wait()
                    if _debug:
                        self.log.debug(
                            "Back from wait on self %s with newstate %d" %
                            (threading.current_thread(), state))
                    if state == external_state.EXITED:
                        self._pending_threads.remove(ext)
                        self._pending_events.append(ext.event)

            # Handle any newly queued coroutines that need to be scheduled
            while self._pending_coros:
                self.add(self._pending_coros.pop(0))

    def finish_test(self, exc):
        self._test.abort(exc)
        self._check_termination()

    def finish_scheduler(self, exc):
        """Directly call into the regression manager and end test
           once we return the sim will close us so no cleanup is needed.
        """
        # If there is an error during cocotb initialization, self._test may not
        # have been set yet. Don't cause another Python exception here.

        if self._test:
            self.log.debug("Issue sim closedown result to regression object")
            self._test.abort(exc)
            cocotb.regression_manager.handle_result(self._test)

    def cleanup(self):
        """Clear up all our state.

        Unprime all pending triggers and kill off any coroutines stop all externals.
        """
        # copy since we modify this in kill
        items = list(self._trigger2coros.items())

        # reversing seems to fix gh-928, although the order is still somewhat
        # arbitrary.
        for trigger, waiting in items[::-1]:
            for coro in waiting:
                if _debug:
                    self.log.debug("Killing %s" % str(coro))
                coro.kill()

        if self._main_thread is not threading.current_thread():
            raise Exception("Cleanup() called outside of the main thread")

        for ext in self._pending_threads:
            self.log.warning("Waiting for %s to exit", ext.thread)
Exemple #30
0
class Scheduler(object):
    """
    The main scheduler.

    Here we accept callbacks from the simulator and schedule the appropriate
    coroutines.

    A callback fires, causing the `react`_ method to be called, with the
    trigger that caused the callback as the first argument.

    We look up a list of coroutines to schedule (indexed by the trigger) and
    schedule them in turn. NB implementors should not depend on the scheduling
    order!

    Some additional management is required since coroutines can return a list
    of triggers, to be scheduled when any one of the triggers fires.  To
    ensure we don't receive spurious callbacks, we have to un-prime all the
    other triggers when any one fires.

    Due to the simulator nuances and fun with delta delays we have the
    following modes:

    Normal mode
        - Callbacks cause coroutines to be scheduled
        - Any pending writes are cached and do not happen immediately

    ReadOnly mode
        - Corresponds to cbReadOnlySynch (VPI) or vhpiCbLastKnownDeltaCycle
          (VHPI).  In this state we are not allowed to perform writes.

    Write mode
        - Corresponds to cbReadWriteSynch (VPI) or vhpiCbEndOfProcesses (VHPI)
          In this mode we play back all the cached write updates.

    We can legally transition from normal->write by registering a ReadWrite
    callback, however usually once a simulator has entered the ReadOnly phase
    of a given timestep then we must move to a new timestep before performing
    any writes.  The mechanism for moving to a new timestep may not be
    consistent across simulators and therefore we provide an abstraction to
    assist with compatibility.


    Unless a coroutine has explicitly requested to be scheduled in ReadOnly
    mode (for example wanting to sample the finally settled value after all
    delta delays) then it can reasonably be expected to be scheduled during
    "normal mode" i.e. where writes are permitted.
    """

    _MODE_NORMAL   = 1  # noqa
    _MODE_READONLY = 2  # noqa
    _MODE_WRITE    = 3  # noqa
    _MODE_TERM     = 4  # noqa

    # Singleton events, recycled to avoid spurious object creation
    _readonly = ReadOnly()
    _next_timestep = _NextTimeStep()
    _readwrite = _ReadWrite()
    _timer1 = Timer(1)
    _timer0 = Timer(0)

    def __init__(self):

        self.log = SimLog("cocotb.scheduler")
        if _debug:
            self.log.setLevel(logging.DEBUG)

        # A dictionary of pending coroutines for each trigger,
        # indexed by trigger
        self._trigger2coros = collections.defaultdict(list)

        # A dictionary of pending triggers for each coroutine, indexed by coro
        self._coro2triggers = collections.defaultdict(list)

        # Our main state
        self._mode = Scheduler._MODE_NORMAL

        # A dictionary of pending writes
        self._writes = {}

        self._pending_coros = []
        self._pending_callbacks = []
        self._pending_triggers = []

        self._terminate = False
        self._test_result = None
        self._entrypoint = None

        # Select the appropriate scheduling algorithm for this simulator
        self.advance = self.default_scheduling_algorithm

    def default_scheduling_algorithm(self):
        """
        Decide whether we need to schedule our own triggers (if at all) in
        order to progress to the next mode.

        This algorithm has been tested against the following simulators:
            Icarus Verilog
        """
        if not self._terminate and self._writes:

            if self._mode == Scheduler._MODE_NORMAL:
                if not self._readwrite.primed:
                    self._readwrite.prime(self.react)
            elif not self._next_timestep.primed:
                self._next_timestep.prime(self.react)

        elif self._terminate:
            if _debug:
                self.log.debug("Test terminating, scheduling Timer")

            for t in self._trigger2coros:
                t.unprime()

            for t in [self._readwrite, self._readonly, self._next_timestep,
                      self._timer1, self._timer0]:
                if t.primed:
                    t.unprime()

            self._timer1.prime(self.begin_test)
            self._trigger2coros = collections.defaultdict(list)
            self._coro2triggers = collections.defaultdict(list)
            self._terminate = False
            self._mode = Scheduler._MODE_TERM

    def begin_test(self, trigger=None):
        """
        Called to initiate a test.

        Could be called on start-up or from a callback
        """
        if _debug:
            self.log.debug("begin_test called with trigger: %s" %
                           (str(trigger)))
        if _profiling:
            ps = pstats.Stats(_profile).sort_stats('cumulative')
            ps.dump_stats("test_profile.pstat")
            _profile.enable()

        self._mode = Scheduler._MODE_NORMAL
        if trigger is not None:
            trigger.unprime()

        # Issue previous test result, if there is one
        if self._test_result is not None:
            if _debug:
                self.log.debug("Issue test result to regresssion object")
            cocotb.regression.handle_result(self._test_result)
            self._test_result = None
        if self._entrypoint is not None:
            test = self._entrypoint
            self._entrypoint = None
            self.schedule(test)
            self.advance()

        if _profiling:
            _profile.disable()

    def react(self, trigger, depth=0):
        """
        React called when a trigger fires.

        We find any coroutines that are waiting on the particular trigger and
        schedule them.
        """
        if _profiling and not depth:
            _profile.enable()

        # When a trigger fires it is unprimed internally
        if _debug:
            self.log.debug("Trigger fired: %s" % str(trigger))
        # trigger.unprime()

        if self._mode == Scheduler._MODE_TERM:
            if _debug:
                self.log.debug("Ignoring trigger %s since we're terminating" %
                               str(trigger))
            return

        if trigger is self._readonly:
            self._mode = Scheduler._MODE_READONLY
        # Only GPI triggers affect the simulator scheduling mode
        elif isinstance(trigger, GPITrigger):
            self._mode = Scheduler._MODE_NORMAL

        # We're the only source of ReadWrite triggers which are only used for
        # playing back any cached signal updates
        if trigger is self._readwrite:

            if _debug:
                self.log.debug("Writing cached signal updates")

            while self._writes:
                handle, value = self._writes.popitem()
                handle.setimmediatevalue(value)

            self._readwrite.unprime()

            if _profiling:
                _profile.disable()
            return

        # Similarly if we've scheduled our next_timestep on way to readwrite
        if trigger is self._next_timestep:

            if not self._writes:
                self.log.error(
                    "Moved to next timestep without any pending writes!")
            else:
                self.log.debug(
                    "Priming ReadWrite trigger so we can playback writes")
                self._readwrite.prime(self.react)

            if _profiling:
                _profile.disable()
            return

        if trigger not in self._trigger2coros:

            # GPI triggers should only be ever pending if there is an
            # associated coroutine waiting on that trigger, otherwise it would
            # have been unprimed already
            if isinstance(trigger, GPITrigger):
                self.log.critical(
                    "No coroutines waiting on trigger that fired: %s" %
                    str(trigger))

                trigger.log.info("I'm the culprit")
            # For Python triggers this isn't actually an error - we might do
            # event.set() without knowing whether any coroutines are actually
            # waiting on this event, for example
            elif _debug:
                self.log.debug(
                    "No coroutines waiting on trigger that fired: %s" %
                    str(trigger))

            if _profiling:
                _profile.disable()
            return

        # Scheduled coroutines may append to our waiting list so the first
        # thing to do is pop all entries waiting on this trigger.
        scheduling = self._trigger2coros.pop(trigger)

        if _debug:
            debugstr = "\n\t".join([coro.__name__ for coro in scheduling])
            if len(scheduling):
                debugstr = "\n\t" + debugstr
            self.log.debug("%d pending coroutines for event %s%s" %
                           (len(scheduling), str(trigger), debugstr))

        # If the coroutine was waiting on multiple triggers we may be able
        # to unprime the other triggers that didn't fire
        for coro in scheduling:
            for pending in self._coro2triggers[coro]:
                for others in self._trigger2coros[pending]:
                    if others not in scheduling:
                        break
                else:
                    # if pending is not trigger and pending.primed:
                    #     pending.unprime()
                    if pending.primed:
                        pending.unprime()
                    del self._trigger2coros[pending]

        for coro in scheduling:
            self.schedule(coro, trigger=trigger)
            if _debug:
                self.log.debug("Scheduled coroutine %s" % (coro.__name__))

        while self._pending_triggers:
            if _debug:
                self.log.debug("Scheduling pending trigger %s" %
                               (str(self._pending_triggers[0])))
            self.react(self._pending_triggers.pop(0), depth=depth + 1)

        # We only advance for GPI triggers
        if not depth and isinstance(trigger, GPITrigger):
            self.advance()

            if _debug:
                self.log.debug("All coroutines scheduled, handing control back"
                               " to simulator")

            if _profiling:
                _profile.disable()
        return

    def unschedule(self, coro):
        """Unschedule a coroutine.  Unprime any pending triggers"""

        for trigger in self._coro2triggers[coro]:
            if coro in self._trigger2coros[trigger]:
                self._trigger2coros[trigger].remove(coro)
            if not self._trigger2coros[trigger]:
                trigger.unprime()
        del self._coro2triggers[coro]

        if coro._join in self._trigger2coros:
            self._pending_triggers.append(coro._join)

        # Remove references to allow GC to clean up
        del coro._join

    def save_write(self, handle, value):
        self._writes[handle] = value

    def _coroutine_yielded(self, coro, triggers):
        """
        Prime the triggers and update our internal mappings
        """
        self._coro2triggers[coro] = triggers

        for trigger in triggers:

            self._trigger2coros[trigger].append(coro)
            if not trigger.primed:
                try:
                    trigger.prime(self.react)
                except Exception as e:
                    # Convert any exceptions into a test result
                    self.finish_test(
                        create_error(self, "Unable to prime trigger %s: %s" %
                                     (str(trigger), str(e))))

    def queue(self, coroutine):
        """Queue a coroutine for execution"""
        self._pending_coros.append(coroutine)

    def add(self, coroutine):
        """
        Add a new coroutine.

        Just a wrapper around self.schedule which provides some debug and
        useful error mesages in the event of common gotchas
        """
        if isinstance(coroutine, cocotb.decorators.coroutine):
            self.log.critical(
                "Attempt to schedule a coroutine that hasn't started")
            coroutine.log.error("This is the failing coroutine")
            self.log.warning(
                "Did you forget to add parentheses to the @test decorator?")
            self._test_result = TestError(
                "Attempt to schedule a coroutine that hasn't started")
            self._terminate = True
            return

        elif not isinstance(coroutine, cocotb.decorators.RunningCoroutine):
            self.log.critical(
                "Attempt to add something to the scheduler which isn't a "
                "coroutine")
            self.log.warning(
                "Got: %s (%s)" % (str(type(coroutine)), repr(coroutine)))
            self.log.warning("Did you use the @coroutine decorator?")
            self._test_result = TestError(
                "Attempt to schedule a coroutine that hasn't started")
            self._terminate = True
            return

        if _debug:
            self.log.debug("Adding new coroutine %s" % coroutine.__name__)

        self.schedule(coroutine)
        self.advance()
        return coroutine

    def new_test(self, coroutine):
        self._entrypoint = coroutine

    def schedule(self, coroutine, trigger=None):
        """
        Schedule a coroutine by calling the send method

        Args:
            coroutine (cocotb.decorators.coroutine): The coroutine to schedule

            trigger (cocotb.triggers.Trigger): The trigger that caused this
                                                coroutine to be scheduled
        """
        if hasattr(trigger, "pass_retval"):
            sendval = trigger.retval
            if _debug:
                coroutine.log.debug("Scheduling with ReturnValue(%s)" %
                                    (repr(sendval)))
        else:
            sendval = trigger
            if _debug:
                coroutine.log.debug("Scheduling with %s" % str(trigger))

        try:
            result = coroutine.send(sendval)
            if _debug:
                self.log.debug("Coroutine %s yielded %s (mode %d)" %
                               (coroutine.__name__, str(result), self._mode))

        # TestComplete indication is game over, tidy up
        except TestComplete as test_result:
            # Tag that close down is needed, save the test_result
            # for later use in cleanup handler
            self.finish_test(test_result)
            return

        # Normal co-routine completion
        except cocotb.decorators.CoroutineComplete as exc:
            if _debug:
                self.log.debug("Coroutine completed: %s" % str(coroutine))
            self.unschedule(coroutine)
            return

        # Don't handle the result if we're shutting down
        if self._terminate:
            return

        # Queue current routine to schedule when the nested routine exits
        if isinstance(result, cocotb.decorators.RunningCoroutine):
            if _debug:
                self.log.debug("Scheduling nested co-routine: %s" %
                               result.__name__)

            self.queue(result)
            new_trigger = result.join()
            self._coroutine_yielded(coroutine, [new_trigger])

        elif isinstance(result, Trigger):
            self._coroutine_yielded(coroutine, [result])

        elif (isinstance(result, list) and
                not [t for t in result if not isinstance(t, Trigger)]):
            self._coroutine_yielded(coroutine, result)

        else:
            msg = ("Coroutine %s yielded something the scheduler can't handle"
                   % str(coroutine))
            msg += ("\nGot type: %s repr: %s str: %s" %
                    (type(result), repr(result), str(result)))
            msg += "\nDid you forget to decorate with @cocotb.cocorutine?"
            try:
                raise_error(self, msg)
            except Exception as e:
                self.finish_test(e)

        # Handle any newly queued coroutines that need to be scheduled
        while self._pending_coros:
            self.add(self._pending_coros.pop(0))

        while self._pending_callbacks:
            self._pending_callbacks.pop(0)()

    def finish_test(self, test_result):
        """Cache the test result and set the terminate flag"""
        self.log.debug("finish_test called with %s" % (repr(test_result)))
        if not self._terminate:
            self._terminate = True
            self._test_result = test_result
            self.cleanup()

    def finish_scheduler(self, test_result):
        """Directly call into the regression manager and end test
           once we return the sim will close us so no cleanup is needed"""
        self.log.debug("Issue sim closedown result to regresssion object")
        cocotb.regression.handle_result(test_result)

    def cleanup(self):
        """
        Clear up all our state

        Unprime all pending triggers and kill off any coroutines
        """
        for trigger, waiting in self._trigger2coros.items():
            for coro in waiting:
                if _debug:
                    self.log.debug("Killing %s" % str(coro))
                coro.kill()
# so that cocotb.scheduler gives you the singleton instance and not the
# scheduler package

# GPI logging instance
if "COCOTB_SIM" in os.environ:
    import simulator
    logging.basicConfig()
    logging.setLoggerClass(SimBaseLog)
    log = SimLog('cocotb')
    level = os.getenv("COCOTB_LOG_LEVEL", "INFO")
    try:
        _default_log = getattr(logging, level)
    except AttributeError as e:
        log.error("Unable to set loging level to %s" % level)
        _default_log = logging.INFO
    log.setLevel(_default_log)
    loggpi = SimLog('cocotb.gpi')
    # Notify GPI of log level
    simulator.log_level(_default_log)

    # If stdout/stderr are not TTYs, Python may not have opened them with line
    # buffering. In that case, try to reopen them with line buffering
    # explicitly enabled. This ensures that prints such as stack traces always
    # appear. Continue silently if this fails.
    try:
        if not sys.stdout.isatty():
            sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 1)
            log.debug("Reopened stdout with line buffering")
        if not sys.stderr.isatty():
            sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 1)
            log.debug("Reopened stderr with line buffering")
Exemple #32
0
class Tb:
    """
    Base class for RaveNoC testbench

    Args:
        dut: The Dut object coming from cocotb
        log_name: Name of the log file inside the run folder, it's append the timestamp only
        cfg: NoC cfg dict
    """
    def __init__(self, dut, log_name, cfg):
        self.dut = dut
        self.cfg = cfg
        timenow_wstamp = self._gen_log(log_name)
        self.log.info("------------[LOG - %s]------------",timenow_wstamp)
        self.log.info("SEED: %s",str(cocotb.RANDOM_SEED))
        self.log.info("Log file: %s",log_name)
        self._print_noc_cfg()
        # Create the AXI Master I/Fs and connect it to the two main AXI Slave I/Fs in the top wrappers
        self.noc_axi_in = AxiMaster(AxiBus.from_prefix(self.dut, "noc_in"), self.dut.clk_axi, self.dut.arst_axi)
        self.noc_axi_out = AxiMaster(AxiBus.from_prefix(self.dut, "noc_out"), self.dut.clk_axi, self.dut.arst_axi)
        # Tied to zero the inputs
        self.dut.act_in.setimmediatevalue(0)
        self.dut.act_out.setimmediatevalue(0)
        self.dut.axi_sel_in.setimmediatevalue(0)
        self.dut.axi_sel_out.setimmediatevalue(0)

    def __del__(self):
        # Need to write the last strings in the buffer in the file
        self.log.info("Closing log file.")
        self.log.removeHandler(self.file_handler)

    def set_idle_generator(self, generator=None):
        if generator:
            self.noc_axi_in.write_if.aw_channel.set_pause_generator(generator())
            self.noc_axi_in.write_if.w_channel.set_pause_generator(generator())
            self.noc_axi_in.read_if.ar_channel.set_pause_generator(generator())
            self.noc_axi_out.write_if.aw_channel.set_pause_generator(generator())
            self.noc_axi_out.write_if.w_channel.set_pause_generator(generator())
            self.noc_axi_out.read_if.ar_channel.set_pause_generator(generator())

    def set_backpressure_generator(self, generator=None):
        if generator:
            self.noc_axi_in.write_if.b_channel.set_pause_generator(generator())
            self.noc_axi_in.read_if.r_channel.set_pause_generator(generator())
            self.noc_axi_out.write_if.b_channel.set_pause_generator(generator())
            self.noc_axi_out.read_if.r_channel.set_pause_generator(generator())

    """
    Write method to transfer pkts over the NoC

    Args:
        pkt: Input pkt to be transfered to the NoC
        kwargs: All aditional args that can be passed to the amba AXI driver
    """
    async def write_pkt(self, pkt=RaveNoC_pkt, timeout=noc_const.TIMEOUT_AXI,  use_side_if=0, **kwargs):
        if use_side_if == 0:
            self.dut.axi_sel_in.setimmediatevalue(pkt.src[0])
            self.dut.act_in.setimmediatevalue(1)
        else:
            self.dut.axi_sel_out.setimmediatevalue(pkt.src[0])
            self.dut.act_out.setimmediatevalue(1)

        self._print_pkt_header("write",pkt)
        #self.log.info("[AXI Master - Write NoC Packet] Data:")
        #self._print_pkt(pkt.message, pkt.num_bytes_per_beat)
        if use_side_if == 0:
            write = self.noc_axi_in.init_write(address=pkt.axi_address_w, awid=0x0, data=pkt.msg, **kwargs)
        else:
            write = self.noc_axi_out.init_write(address=pkt.axi_address_w, awid=0x0, data=pkt.msg, **kwargs)

        await with_timeout(write.wait(), *timeout)
        ret = write.data

        if use_side_if == 0:
            self.dut.act_in.setimmediatevalue(0)
            self.dut.axi_sel_in.setimmediatevalue(0)
        else:
            self.dut.act_out.setimmediatevalue(0)
            self.dut.axi_sel_out.setimmediatevalue(0)
        return ret

    """
    Read method to fetch pkts from the NoC

    Args:
        pkt: Valid pkt to be used as inputs args (vc_channel, node on the axi_mux input,..) to the read op from the NoC
        kwargs: All aditional args that can be passed to the amba AXI driver
    Returns:
        Return the packet message with the head flit
    """
    async def read_pkt(self, pkt=RaveNoC_pkt, timeout=noc_const.TIMEOUT_AXI, **kwargs):
        self.dut.axi_sel_out.setimmediatevalue(pkt.dest[0])
        self.dut.act_out.setimmediatevalue(1)
        self._print_pkt_header("read",pkt)
        read = self.noc_axi_out.init_read(address=pkt.axi_address_r, arid=0x0, length=pkt.length, **kwargs)
        await with_timeout(read.wait(), *timeout)
        ret = read.data # read.data => AxiReadResp
        # self.log.info("[AXI Master - Read NoC Packet] Data:")
        #self._print_pkt(ret.data, pkt.num_bytes_per_beat)
        self.dut.act_out.setimmediatevalue(0)
        self.dut.axi_sel_out.setimmediatevalue(0)
        return ret

    """
    Write AXI method

    Args:
        sel: axi_mux switch to select the correct node to write through
        kwargs: All aditional args that can be passed to the amba AXI driver
    """
    async def write(self, sel=0, address=0x0, data=0x0, **kwargs):
        self.dut.act_in.setimmediatevalue(1)
        self.dut.axi_sel_in.setimmediatevalue(sel)
        self.log.info("[AXI Master - Write] Slave = ["+str(sel)+"] / "
                      "Address = ["+str(hex(address))+"] ")
                      #"Data = ["+data+"]")
        write = self.noc_axi_in.init_write(address=address, awid=0x0, data=data, **kwargs)
        await with_timeout(write.wait(), *noc_const.TIMEOUT_AXI)
        ret = write.data
        self.dut.axi_sel_in.setimmediatevalue(0)
        self.dut.act_in.setimmediatevalue(0)
        return ret

    """
    Read AXI method

    Args:
        sel: axi_mux switch to select the correct node to read from
        kwargs: All aditional args that can be passed to the amba AXI driver
    Returns:
        Return the data read from the specified node
    """
    async def read(self, sel=0, address=0x0, length=4, **kwargs):
        self.dut.act_out.setimmediatevalue(1)
        self.dut.axi_sel_out.setimmediatevalue(sel)
        self.log.info("[AXI Master - Read] Slave = ["+str(sel)+"] / Address = ["+str(hex(address))+"] / Length = ["+str(length)+" bytes]")
        read = self.noc_axi_out.init_read(address=address, arid=0x0, length=length, **kwargs)
        await with_timeout(read.wait(), *noc_const.TIMEOUT_AXI)
        resp = read.data # read.data => AxiReadResp
        self.dut.axi_sel_out.setimmediatevalue(0)
        self.dut.act_out.setimmediatevalue(0)
        return resp

    """
    Auxiliary method to check received data
    """
    def check_pkt(self, data, received):
        assert len(data) == len(received), "Lengths are different from received to sent pkt"
        for i in range(len(data)):
            assert data[i] == received[i], "Mismatch on received vs sent NoC packet!"

    """
    Auxiliary method to log flit header
    """
    def _print_pkt_header(self, op, pkt):
        axi_addr = str(hex(pkt.axi_address_r)) if op=="read" else str(hex(pkt.axi_address_w))
        mux = str(pkt.dest[0]) if op=="read" else str(pkt.src[0])
        self.log.info(f"[AXI Master - "+str(op)+" NoC Packet] Router=["+mux+"] "
                        "Address=[AXI_Addr="+axi_addr+"] Mux_"+op+"=["+mux+"] "
                        "SRC(x,y)=["+str(pkt.src[1])+","+str(pkt.src[2])+"] "
                        "DEST(x,y)=["+str(pkt.dest[1])+","+str(pkt.dest[2])+"] "
                        "Length=["+str(pkt.length)+" bytes / "+str(pkt.length_beats)+" beats]")

    """
    Auxiliary method to print/log AXI payload
    """
    def _print_pkt(self, data, bytes_per_beat):
        print("LEN="+str(len(data))+" BYTES PER BEAT="+str(bytes_per_beat))
        if len(data) == bytes_per_beat:
            beat_burst_hex = [data[x] for x in range(0,bytes_per_beat)][::-1]
            beat_burst_hs = ""
            for j in beat_burst_hex:
                beat_burst_hs += hex(j)
                beat_burst_hs += "\t("+chr(j)+")"
                beat_burst_hs += "\t"
            tmp = "Beat[0]---> "+beat_burst_hs
            self.log.info(tmp)
        else:
            for i in range(0,len(data),bytes_per_beat):
                beat_burst_hex = [data[x] for x in range(i,i+bytes_per_beat)][::-1]
                # beat_burst_s = [chr(data[x]) for x in range(i,i+bytes_per_beat)][::-1]
                beat_burst_hs = ""
                for j in beat_burst_hex:
                    beat_burst_hs += hex(j)
                    beat_burst_hs += "\t("+chr(j)+")"
                    beat_burst_hs += "\t"
                tmp = "Beat["+str(int(i/bytes_per_beat))+"]---> "+beat_burst_hs
                self.log.info(tmp)
                #print("--)

    """
    Setup and launch the clocks on the simulation

    Args:
        clk_mode: Selects between AXI clk higher than NoC clk and vice-versa
    """
    async def setup_clks(self, clk_mode="NoC_slwT_AXI"):
        self.log.info(f"[Setup] Configuring the clocks: {clk_mode}")
        if clk_mode == "NoC_slwT_AXI":
            cocotb.fork(Clock(self.dut.clk_noc, *noc_const.CLK_100MHz).start())
            cocotb.fork(Clock(self.dut.clk_axi, *noc_const.CLK_200MHz).start())
        elif clk_mode == "AXI_slwT_NoC":
            cocotb.fork(Clock(self.dut.clk_axi, *noc_const.CLK_100MHz).start())
            cocotb.fork(Clock(self.dut.clk_noc, *noc_const.CLK_200MHz).start())
        else:
            cocotb.fork(Clock(self.dut.clk_axi, *noc_const.CLK_200MHz).start())
            cocotb.fork(Clock(self.dut.clk_noc, *noc_const.CLK_200MHz).start())


    """
    Setup and apply the reset on the NoC

    Args:
        clk_mode: Depending on the input clock mode, we need to wait different
        clk cycles for the reset, we always hold as long as the slowest clock
    """
    async def arst(self, clk_mode="NoC_slwT_AXI"):
        self.log.info("[Setup] Reset DUT")
        self.dut.arst_axi.setimmediatevalue(0)
        self.dut.arst_noc.setimmediatevalue(0)
        self.dut.axi_sel_in.setimmediatevalue(0)
        self.dut.axi_sel_out.setimmediatevalue(0)
        self.dut.act_in.setimmediatevalue(0)
        self.dut.act_out.setimmediatevalue(0)
        bypass_cdc = 1 if clk_mode == "NoC_equal_AXI" else 0
        self.dut.bypass_cdc.setimmediatevalue(bypass_cdc)
        self.dut.arst_axi <= 1
        self.dut.arst_noc <= 1
        # await NextTimeStep()
        #await ReadOnly() #https://github.com/cocotb/cocotb/issues/2478
        if clk_mode == "NoC_slwT_AXI":
            await ClockCycles(self.dut.clk_noc, noc_const.RST_CYCLES)
        else:
            await ClockCycles(self.dut.clk_axi, noc_const.RST_CYCLES)
        self.dut.arst_axi <= 0
        self.dut.arst_noc <= 0
        # https://github.com/alexforencich/cocotbext-axi/issues/19
        #await ClockCycles(self.dut.clk_axi, 1)
        #await ClockCycles(self.dut.clk_noc, 1)

    """
    Method to wait for IRQs from the NoC
    """
    async def wait_irq(self):
        # We need to wait some clock cyles because the in/out axi I/F is muxed
        # once verilator 4.106 doesn't support array of structs in the top.
        # This trigger is required because we read much faster than we write
        # and if we don't wait for the flit to arrive, it'll throw an error of
        # empty rd buffer
        # if tb.dut.irqs_out.value.integer == 0:
        #await with_timeout(First(*(Edge(bit) for bit in tb.dut.irqs_out)), *noc_const.TIMEOUT_IRQ)
        # This only exists bc of this:
        # https://github.com/cocotb/cocotb/issues/2478
        timeout_cnt = 0
        while int(self.dut.irqs_out) == 0:
            await RisingEdge(self.dut.clk_noc)
            if timeout_cnt == noc_const.TIMEOUT_IRQ_V:
                self.log.error("Timeout on waiting for an IRQ")
                raise TestFailure("Timeout on waiting for an IRQ")
            else:
                timeout_cnt += 1

    """
    Method to wait for IRQs from the NoC with a specific value
    """
    async def wait_irq_x(self, val):
        # We need to wait some clock cyles because the in/out axi I/F is muxed
        # once verilator 4.106 doesn't support array of structs in the top.
        # This trigger is required because we read much faster than we write
        # and if we don't wait for the flit to arrive, it'll throw an error of
        # empty rd buffer
        # if tb.dut.irqs_out.value.integer == 0:
        #await with_timeout(First(*(Edge(bit) for bit in tb.dut.irqs_out)), *noc_const.TIMEOUT_IRQ)
        # This only exists bc of this:
        # https://github.com/cocotb/cocotb/issues/2478
        timeout_cnt = 0
        while int(self.dut.irqs_out) != val:
            await RisingEdge(self.dut.clk_noc)
            if timeout_cnt == noc_const.TIMEOUT_IRQ_V:
                self.log.error("Timeout on waiting for an IRQ")
                raise TestFailure("Timeout on waiting for an IRQ")
            else:
                timeout_cnt += 1

    """
    Creates the tb log obj and start filling with headers
    """
    def _gen_log(self, log_name):
        timenow = datetime.now().strftime("%d_%b_%Y_%Hh_%Mm_%Ss")
        timenow_wstamp = timenow + str("_") + str(datetime.timestamp(datetime.now()))
        self.log = SimLog(log_name)
        self.log.setLevel(logging.DEBUG)
        self.file_handler = RotatingFileHandler(f"{log_name}_{timenow}.log", maxBytes=(5 * 1024 * 1024), backupCount=2, mode='w')
        self._symlink_force(f"{log_name}_{timenow}.log",f"latest_{log_name}.log")
        self.file_handler.setFormatter(SimLogFormatter())
        self.log.addHandler(self.file_handler)
        self.log.addFilter(SimTimeContextFilter())
        return timenow_wstamp

    """
    Used to create the symlink with the latest log in the run dir folder
    """
    def _symlink_force(self, target, link_name):
        try:
            os.symlink(target, link_name)
        except OSError as e:
            if e.errno == errno.EEXIST:
                os.remove(link_name)
                os.symlink(target, link_name)
            else:
                raise e

    """
    Returns a random string with the length equal to input argument
    """
    def _get_random_string(self, length=1):
        # choose from all lowercase letter
        letters = string.ascii_lowercase
        result_str = ''.join(random.choice(letters) for i in range(length))
        return result_str

    def _print_noc_cfg(self):
        cfg = self.cfg

        self.log.info("------------------------------")
        self.log.info("RaveNoC configuration:")
        self.log.info(f"--> Flit data width: "+str(cfg['flit_data_width']))
        self.log.info(f"--> AXI data width: "+str(cfg['flit_data_width']))
        self.log.info(f"--> Routing algorithm: "+cfg['routing_alg'])
        self.log.info(f"--> NoC Size: "+str(cfg['noc_cfg_sz_rows'])+"x"+str(cfg['noc_cfg_sz_cols']))
        self.log.info(f"--> Number of flit buffers per input module: "+str(cfg['flit_buff']))
        self.log.info(f"--> Max size per pkt (beats): "+str(cfg['max_sz_pkt']))
        self.log.info(f"--> Number of virtual channels: "+str(cfg['n_virt_chn']))
        self.log.info(f"--> VC ID priority: "+("VC[0] has highest priority" if cfg['h_priority'] == 1 else "VC[0] has lowest priority"))
        self.log.info("------------------------------")
class TB(object):
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        # PCIe
        self.rc = RootComplex()

        self.dev = UltraScalePcieDevice(
            # configuration options
            pcie_generation=3,
            pcie_link_width=8,
            user_clk_frequency=250e6,
            alignment="dword",
            straddle=False,
            enable_pf1=False,
            enable_client_tag=True,
            enable_extended_tag=True,
            enable_parity=False,
            enable_rx_msg_interface=False,
            enable_sriov=False,
            enable_extended_configuration=False,
            enable_pf0_msi=True,
            enable_pf1_msi=False,

            # signals
            # Clock and Reset Interface
            user_clk=dut.clk,
            user_reset=dut.rst,
            # user_lnk_up
            # sys_clk
            # sys_clk_gt
            # sys_reset
            # phy_rdy_out

            # Requester reQuest Interface
            rq_bus=AxiStreamBus.from_prefix(dut, "m_axis_rq"),
            # pcie_rq_seq_num=dut.s_axis_rq_seq_num,
            # pcie_rq_seq_num_vld=dut.s_axis_rq_seq_num_valid,
            # pcie_rq_tag
            # pcie_rq_tag_av
            # pcie_rq_tag_vld

            # Requester Completion Interface
            rc_bus=AxiStreamBus.from_prefix(dut, "s_axis_rc"),

            # Completer reQuest Interface
            cq_bus=AxiStreamBus.from_prefix(dut, "s_axis_cq"),
            # pcie_cq_np_req
            # pcie_cq_np_req_count

            # Completer Completion Interface
            cc_bus=AxiStreamBus.from_prefix(dut, "m_axis_cc"),

            # Transmit Flow Control Interface
            # pcie_tfc_nph_av=dut.pcie_tfc_nph_av,
            # pcie_tfc_npd_av=dut.pcie_tfc_npd_av,

            # Configuration Management Interface
            cfg_mgmt_addr=dut.cfg_mgmt_addr,
            cfg_mgmt_write=dut.cfg_mgmt_write,
            cfg_mgmt_write_data=dut.cfg_mgmt_write_data,
            cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable,
            cfg_mgmt_read=dut.cfg_mgmt_read,
            cfg_mgmt_read_data=dut.cfg_mgmt_read_data,
            cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done,
            # cfg_mgmt_debug_access

            # Configuration Status Interface
            # cfg_phy_link_down
            # cfg_phy_link_status
            # cfg_negotiated_width
            # cfg_current_speed
            cfg_max_payload=dut.cfg_max_payload,
            cfg_max_read_req=dut.cfg_max_read_req,
            # cfg_function_status
            # cfg_vf_status
            # cfg_function_power_state
            # cfg_vf_power_state
            # cfg_link_power_state
            # cfg_err_cor_out
            # cfg_err_nonfatal_out
            # cfg_err_fatal_out
            # cfg_local_error_out
            # cfg_local_error_valid
            # cfg_rx_pm_state
            # cfg_tx_pm_state
            # cfg_ltssm_state
            # cfg_rcb_status
            # cfg_obff_enable
            # cfg_pl_status_change
            # cfg_tph_requester_enable
            # cfg_tph_st_mode
            # cfg_vf_tph_requester_enable
            # cfg_vf_tph_st_mode

            # Configuration Received Message Interface
            # cfg_msg_received
            # cfg_msg_received_data
            # cfg_msg_received_type

            # Configuration Transmit Message Interface
            # cfg_msg_transmit
            # cfg_msg_transmit_type
            # cfg_msg_transmit_data
            # cfg_msg_transmit_done

            # Configuration Flow Control Interface
            # cfg_fc_ph=dut.cfg_fc_ph,
            # cfg_fc_pd=dut.cfg_fc_pd,
            # cfg_fc_nph=dut.cfg_fc_nph,
            # cfg_fc_npd=dut.cfg_fc_npd,
            # cfg_fc_cplh=dut.cfg_fc_cplh,
            # cfg_fc_cpld=dut.cfg_fc_cpld,
            # cfg_fc_sel=dut.cfg_fc_sel,

            # Configuration Control Interface
            # cfg_hot_reset_in
            # cfg_hot_reset_out
            # cfg_config_space_enable
            # cfg_dsn
            # cfg_bus_number
            # cfg_ds_port_number
            # cfg_ds_bus_number
            # cfg_ds_device_number
            # cfg_ds_function_number
            # cfg_power_state_change_ack
            # cfg_power_state_change_interrupt
            cfg_err_cor_in=dut.status_error_cor,
            cfg_err_uncor_in=dut.status_error_uncor,
            # cfg_flr_in_process
            # cfg_flr_done
            # cfg_vf_flr_in_process
            # cfg_vf_flr_func_num
            # cfg_vf_flr_done
            # cfg_pm_aspm_l1_entry_reject
            # cfg_pm_aspm_tx_l0s_entry_disable
            # cfg_req_pm_transition_l23_ready
            # cfg_link_training_enable

            # Configuration Interrupt Controller Interface
            # cfg_interrupt_int
            # cfg_interrupt_sent
            # cfg_interrupt_pending
            cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable,
            cfg_interrupt_msi_vf_enable=dut.cfg_interrupt_msi_vf_enable,
            cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable,
            cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update,
            cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data,
            cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select,
            cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int,
            cfg_interrupt_msi_pending_status=dut.
            cfg_interrupt_msi_pending_status,
            cfg_interrupt_msi_pending_status_data_enable=dut.
            cfg_interrupt_msi_pending_status_data_enable,
            cfg_interrupt_msi_pending_status_function_num=dut.
            cfg_interrupt_msi_pending_status_function_num,
            cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent,
            cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail,
            # cfg_interrupt_msix_enable
            # cfg_interrupt_msix_mask
            # cfg_interrupt_msix_vf_enable
            # cfg_interrupt_msix_vf_mask
            # cfg_interrupt_msix_address
            # cfg_interrupt_msix_data
            # cfg_interrupt_msix_int
            # cfg_interrupt_msix_vec_pending
            # cfg_interrupt_msix_vec_pending_status
            cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr,
            cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present,
            cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type,
            # cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag,
            # cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number,

            # Configuration Extend Interface
            # cfg_ext_read_received
            # cfg_ext_write_received
            # cfg_ext_register_number
            # cfg_ext_function_number
            # cfg_ext_write_data
            # cfg_ext_write_byte_enable
            # cfg_ext_read_data
            # cfg_ext_read_data_valid
        )

        # self.dev.log.setLevel(logging.DEBUG)

        self.rc.make_port().connect(self.dev)

        self.dev.functions[0].msi_multiple_message_capable = 5

        self.dev.functions[0].configure_bar(0, 2**22)
        self.dev.functions[0].configure_bar(1, 2**22)

    async def init(self):

        await FallingEdge(self.dut.rst)
        await Timer(100, 'ns')

        await self.rc.enumerate(enable_bus_mastering=True, configure_msi=True)
class TB(object):
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        # PCIe
        self.rc = RootComplex()

        self.dev = UltraScalePlusPcieDevice(
            # configuration options
            pcie_generation=3,
            pcie_link_width=16,
            user_clk_frequency=250e6,
            alignment="dword",
            cq_straddle=False,
            cc_straddle=False,
            rq_straddle=False,
            rc_straddle=False,
            rc_4tlp_straddle=False,
            pf_count=1,
            max_payload_size=1024,
            enable_client_tag=True,
            enable_extended_tag=True,
            enable_parity=False,
            enable_rx_msg_interface=False,
            enable_sriov=False,
            enable_extended_configuration=False,
            pf0_msi_enable=True,
            pf0_msi_count=32,
            pf1_msi_enable=False,
            pf1_msi_count=1,
            pf2_msi_enable=False,
            pf2_msi_count=1,
            pf3_msi_enable=False,
            pf3_msi_count=1,
            pf0_msix_enable=False,
            pf0_msix_table_size=0,
            pf0_msix_table_bir=0,
            pf0_msix_table_offset=0x00000000,
            pf0_msix_pba_bir=0,
            pf0_msix_pba_offset=0x00000000,
            pf1_msix_enable=False,
            pf1_msix_table_size=0,
            pf1_msix_table_bir=0,
            pf1_msix_table_offset=0x00000000,
            pf1_msix_pba_bir=0,
            pf1_msix_pba_offset=0x00000000,
            pf2_msix_enable=False,
            pf2_msix_table_size=0,
            pf2_msix_table_bir=0,
            pf2_msix_table_offset=0x00000000,
            pf2_msix_pba_bir=0,
            pf2_msix_pba_offset=0x00000000,
            pf3_msix_enable=False,
            pf3_msix_table_size=0,
            pf3_msix_table_bir=0,
            pf3_msix_table_offset=0x00000000,
            pf3_msix_pba_bir=0,
            pf3_msix_pba_offset=0x00000000,

            # signals
            # Clock and Reset Interface
            user_clk=dut.clk,
            user_reset=dut.rst,
            # user_lnk_up
            # sys_clk
            # sys_clk_gt
            # sys_reset
            # phy_rdy_out

            # Requester reQuest Interface
            rq_bus=AxiStreamBus.from_prefix(dut, "m_axis_rq"),
            pcie_rq_seq_num0=dut.s_axis_rq_seq_num_0,
            pcie_rq_seq_num_vld0=dut.s_axis_rq_seq_num_valid_0,
            pcie_rq_seq_num1=dut.s_axis_rq_seq_num_1,
            pcie_rq_seq_num_vld1=dut.s_axis_rq_seq_num_valid_1,
            # pcie_rq_tag0
            # pcie_rq_tag1
            # pcie_rq_tag_av
            # pcie_rq_tag_vld0
            # pcie_rq_tag_vld1

            # Requester Completion Interface
            rc_bus=AxiStreamBus.from_prefix(dut, "s_axis_rc"),

            # Completer reQuest Interface
            cq_bus=AxiStreamBus.from_prefix(dut, "s_axis_cq"),
            # pcie_cq_np_req
            # pcie_cq_np_req_count

            # Completer Completion Interface
            cc_bus=AxiStreamBus.from_prefix(dut, "m_axis_cc"),

            # Transmit Flow Control Interface
            # pcie_tfc_nph_av=dut.pcie_tfc_nph_av,
            # pcie_tfc_npd_av=dut.pcie_tfc_npd_av,

            # Configuration Management Interface
            cfg_mgmt_addr=dut.cfg_mgmt_addr,
            cfg_mgmt_function_number=dut.cfg_mgmt_function_number,
            cfg_mgmt_write=dut.cfg_mgmt_write,
            cfg_mgmt_write_data=dut.cfg_mgmt_write_data,
            cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable,
            cfg_mgmt_read=dut.cfg_mgmt_read,
            cfg_mgmt_read_data=dut.cfg_mgmt_read_data,
            cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done,
            # cfg_mgmt_debug_access

            # Configuration Status Interface
            # cfg_phy_link_down
            # cfg_phy_link_status
            # cfg_negotiated_width
            # cfg_current_speed
            cfg_max_payload=dut.cfg_max_payload,
            cfg_max_read_req=dut.cfg_max_read_req,
            # cfg_function_status
            # cfg_vf_status
            # cfg_function_power_state
            # cfg_vf_power_state
            # cfg_link_power_state
            # cfg_err_cor_out
            # cfg_err_nonfatal_out
            # cfg_err_fatal_out
            # cfg_local_error_out
            # cfg_local_error_valid
            # cfg_rx_pm_state
            # cfg_tx_pm_state
            # cfg_ltssm_state
            # cfg_rcb_status
            # cfg_obff_enable
            # cfg_pl_status_change
            # cfg_tph_requester_enable
            # cfg_tph_st_mode
            # cfg_vf_tph_requester_enable
            # cfg_vf_tph_st_mode

            # Configuration Received Message Interface
            # cfg_msg_received
            # cfg_msg_received_data
            # cfg_msg_received_type

            # Configuration Transmit Message Interface
            # cfg_msg_transmit
            # cfg_msg_transmit_type
            # cfg_msg_transmit_data
            # cfg_msg_transmit_done

            # Configuration Flow Control Interface
            cfg_fc_ph=dut.cfg_fc_ph,
            cfg_fc_pd=dut.cfg_fc_pd,
            cfg_fc_nph=dut.cfg_fc_nph,
            cfg_fc_npd=dut.cfg_fc_npd,
            cfg_fc_cplh=dut.cfg_fc_cplh,
            cfg_fc_cpld=dut.cfg_fc_cpld,
            cfg_fc_sel=dut.cfg_fc_sel,

            # Configuration Control Interface
            # cfg_hot_reset_in
            # cfg_hot_reset_out
            # cfg_config_space_enable
            # cfg_dsn
            # cfg_bus_number
            # cfg_ds_port_number
            # cfg_ds_bus_number
            # cfg_ds_device_number
            # cfg_ds_function_number
            # cfg_power_state_change_ack
            # cfg_power_state_change_interrupt
            cfg_err_cor_in=dut.status_error_cor,
            cfg_err_uncor_in=dut.status_error_uncor,
            # cfg_flr_in_process
            # cfg_flr_done
            # cfg_vf_flr_in_process
            # cfg_vf_flr_func_num
            # cfg_vf_flr_done
            # cfg_pm_aspm_l1_entry_reject
            # cfg_pm_aspm_tx_l0s_entry_disable
            # cfg_req_pm_transition_l23_ready
            # cfg_link_training_enable

            # Configuration Interrupt Controller Interface
            # cfg_interrupt_int
            # cfg_interrupt_sent
            # cfg_interrupt_pending
            cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable,
            cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable,
            cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update,
            cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data,
            # cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select,
            cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int,
            cfg_interrupt_msi_pending_status=dut.
            cfg_interrupt_msi_pending_status,
            cfg_interrupt_msi_pending_status_data_enable=dut.
            cfg_interrupt_msi_pending_status_data_enable,
            # cfg_interrupt_msi_pending_status_function_num=dut.cfg_interrupt_msi_pending_status_function_num,
            cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent,
            cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail,
            # cfg_interrupt_msix_enable
            # cfg_interrupt_msix_mask
            # cfg_interrupt_msix_vf_enable
            # cfg_interrupt_msix_vf_mask
            # cfg_interrupt_msix_address
            # cfg_interrupt_msix_data
            # cfg_interrupt_msix_int
            # cfg_interrupt_msix_vec_pending
            # cfg_interrupt_msix_vec_pending_status
            # cfg_interrupt_msix_sent
            # cfg_interrupt_msix_fail
            cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr,
            cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present,
            cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type,
            # cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag,
            # cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number,

            # Configuration Extend Interface
            # cfg_ext_read_received
            # cfg_ext_write_received
            # cfg_ext_register_number
            # cfg_ext_function_number
            # cfg_ext_write_data
            # cfg_ext_write_byte_enable
            # cfg_ext_read_data
            # cfg_ext_read_data_valid
        )

        # self.dev.log.setLevel(logging.DEBUG)

        self.rc.make_port().connect(self.dev)

        self.dev.functions[0].configure_bar(
            0, 2**len(
                dut.example_core_pcie_us_inst.core_pcie_inst.axil_ctrl_awaddr))
        self.dev.functions[0].configure_bar(
            2, 2**len(
                dut.example_core_pcie_us_inst.core_pcie_inst.axi_ram_awaddr))

        dut.btnu.setimmediatevalue(0)
        dut.btnl.setimmediatevalue(0)
        dut.btnd.setimmediatevalue(0)
        dut.btnr.setimmediatevalue(0)
        dut.btnc.setimmediatevalue(0)
        dut.sw.setimmediatevalue(0)

    async def init(self):

        await FallingEdge(self.dut.rst)
        await Timer(100, 'ns')

        await self.rc.enumerate()

        dev = self.rc.find_device(self.dev.functions[0].pcie_id)
        await dev.enable_device()
        await dev.set_master()
        await dev.alloc_irq_vectors(32, 32)
class TB(object):
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        # PCIe
        self.rc = RootComplex()

        self.rc.max_payload_size = 0x1  # 256 bytes
        self.rc.max_read_request_size = 0x2  # 512 bytes

        self.dev = S10PcieDevice(
            # configuration options
            pcie_generation=3,
            # pcie_link_width=2,
            # pld_clk_frequency=250e6,
            l_tile=False,

            # signals
            # Clock and reset
            # npor=dut.npor,
            # pin_perst=dut.pin_perst,
            # ninit_done=dut.ninit_done,
            # pld_clk_inuse=dut.pld_clk_inuse,
            # pld_core_ready=dut.pld_core_ready,
            reset_status=dut.rst,
            # clr_st=dut.clr_st,
            # refclk=dut.refclk,
            coreclkout_hip=dut.clk,

            # RX interface
            rx_bus=S10RxBus.from_prefix(dut, "rx_st"),

            # TX interface
            tx_bus=S10TxBus.from_prefix(dut, "tx_st"),

            # TX flow control
            tx_ph_cdts=dut.tx_ph_cdts,
            tx_pd_cdts=dut.tx_pd_cdts,
            tx_nph_cdts=dut.tx_nph_cdts,
            tx_npd_cdts=dut.tx_npd_cdts,
            tx_cplh_cdts=dut.tx_cplh_cdts,
            tx_cpld_cdts=dut.tx_cpld_cdts,
            tx_hdr_cdts_consumed=dut.tx_hdr_cdts_consumed,
            tx_data_cdts_consumed=dut.tx_data_cdts_consumed,
            tx_cdts_type=dut.tx_cdts_type,
            tx_cdts_data_value=dut.tx_cdts_data_value,

            # Hard IP status
            # int_status=dut.int_status,
            # int_status_common=dut.int_status_common,
            # derr_cor_ext_rpl=dut.derr_cor_ext_rpl,
            # derr_rpl=dut.derr_rpl,
            # derr_cor_ext_rcv=dut.derr_cor_ext_rcv,
            # derr_uncor_ext_rcv=dut.derr_uncor_ext_rcv,
            # rx_par_err=dut.rx_par_err,
            # tx_par_err=dut.tx_par_err,
            # ltssmstate=dut.ltssmstate,
            # link_up=dut.link_up,
            # lane_act=dut.lane_act,
            # currentspeed=dut.currentspeed,

            # Power management
            # pm_linkst_in_l1=dut.pm_linkst_in_l1,
            # pm_linkst_in_l0s=dut.pm_linkst_in_l0s,
            # pm_state=dut.pm_state,
            # pm_dstate=dut.pm_dstate,
            # apps_pm_xmt_pme=dut.apps_pm_xmt_pme,
            # apps_ready_entr_l23=dut.apps_ready_entr_l23,
            # apps_pm_xmt_turnoff=dut.apps_pm_xmt_turnoff,
            # app_init_rst=dut.app_init_rst,
            # app_xfer_pending=dut.app_xfer_pending,

            # Interrupt interface
            app_msi_req=dut.app_msi_req,
            app_msi_ack=dut.app_msi_ack,
            app_msi_tc=dut.app_msi_tc,
            app_msi_num=dut.app_msi_num,
            app_msi_func_num=dut.app_msi_func_num,
            # app_int_sts=dut.app_int_sts,

            # Error interface
            # serr_out=dut.serr_out,
            # hip_enter_err_mode=dut.hip_enter_err_mode,
            # app_err_valid=dut.app_err_valid,
            # app_err_hdr=dut.app_err_hdr,
            # app_err_info=dut.app_err_info,
            # app_err_func_num=dut.app_err_func_num,

            # Configuration output
            tl_cfg_func=dut.tl_cfg_func,
            tl_cfg_add=dut.tl_cfg_add,
            tl_cfg_ctl=dut.tl_cfg_ctl,

            # Configuration extension bus
            # ceb_req=dut.ceb_req,
            # ceb_ack=dut.ceb_ack,
            # ceb_addr=dut.ceb_addr,
            # ceb_din=dut.ceb_din,
            # ceb_dout=dut.ceb_dout,
            # ceb_wr=dut.ceb_wr,
            # ceb_cdm_convert_data=dut.ceb_cdm_convert_data,
            # ceb_func_num=dut.ceb_func_num,
            # ceb_vf_num=dut.ceb_vf_num,
            # ceb_vf_active=dut.ceb_vf_active,

            # Hard IP reconfiguration interface
            # hip_reconfig_clk=dut.hip_reconfig_clk,
            # hip_reconfig_address=dut.hip_reconfig_address,
            # hip_reconfig_read=dut.hip_reconfig_read,
            # hip_reconfig_readdata=dut.hip_reconfig_readdata,
            # hip_reconfig_readdatavalid=dut.hip_reconfig_readdatavalid,
            # hip_reconfig_write=dut.hip_reconfig_write,
            # hip_reconfig_writedata=dut.hip_reconfig_writedata,
            # hip_reconfig_waitrequest=dut.hip_reconfig_waitrequest,
        )

        # self.dev.log.setLevel(logging.DEBUG)

        self.rc.make_port().connect(self.dev)

        self.driver = mqnic.Driver()

        self.dev.functions[0].msi_cap.msi_multiple_message_capable = 5

        self.dev.functions[0].configure_bar(
            0,
            2**len(dut.core_pcie_inst.axil_ctrl_araddr),
            ext=True,
            prefetch=True)
        if hasattr(dut.core_pcie_inst, 'pcie_app_ctrl'):
            self.dev.functions[0].configure_bar(
                2,
                2**len(dut.core_pcie_inst.axil_app_ctrl_araddr),
                ext=True,
                prefetch=True)

        # Ethernet
        self.port_mac = []

        eth_int_if_width = len(dut.core_pcie_inst.core_inst.iface[0].port[0].
                               rx_async_fifo_inst.m_axis_tdata)
        eth_clock_period = 6.4
        eth_speed = 10e9

        if eth_int_if_width == 64:
            # 10G
            eth_clock_period = 6.4
            eth_speed = 10e9
        elif eth_int_if_width == 128:
            # 25G
            eth_clock_period = 2.56
            eth_speed = 25e9
        elif eth_int_if_width == 512:
            # 100G
            eth_clock_period = 3.102
            eth_speed = 100e9

        for iface in dut.core_pcie_inst.core_inst.iface:
            for port in iface.port:
                cocotb.start_soon(
                    Clock(port.port_rx_clk, eth_clock_period,
                          units="ns").start())
                cocotb.start_soon(
                    Clock(port.port_tx_clk, eth_clock_period,
                          units="ns").start())

                port.port_rx_rst.setimmediatevalue(0)
                port.port_tx_rst.setimmediatevalue(0)

                mac = EthMac(tx_clk=port.port_tx_clk,
                             tx_rst=port.port_tx_rst,
                             tx_bus=AxiStreamBus.from_prefix(port, "axis_tx"),
                             tx_ptp_time=port.ptp.tx_ptp_cdc_inst.output_ts,
                             tx_ptp_ts=port.ptp.axis_tx_ptp_ts,
                             tx_ptp_ts_tag=port.ptp.axis_tx_ptp_ts_tag,
                             tx_ptp_ts_valid=port.ptp.axis_tx_ptp_ts_valid,
                             rx_clk=port.port_rx_clk,
                             rx_rst=port.port_rx_rst,
                             rx_bus=AxiStreamBus.from_prefix(port, "axis_rx"),
                             rx_ptp_time=port.ptp.rx_ptp_cdc_inst.output_ts,
                             ifg=12,
                             speed=eth_speed)

                self.port_mac.append(mac)

        dut.ctrl_reg_wr_wait.setimmediatevalue(0)
        dut.ctrl_reg_wr_ack.setimmediatevalue(0)
        dut.ctrl_reg_rd_data.setimmediatevalue(0)
        dut.ctrl_reg_rd_wait.setimmediatevalue(0)
        dut.ctrl_reg_rd_ack.setimmediatevalue(0)

        dut.ptp_sample_clk.setimmediatevalue(0)

        dut.s_axis_stat_tdata.setimmediatevalue(0)
        dut.s_axis_stat_tid.setimmediatevalue(0)
        dut.s_axis_stat_tvalid.setimmediatevalue(0)

        self.loopback_enable = False
        cocotb.start_soon(self._run_loopback())

    async def init(self):

        for mac in self.port_mac:
            mac.rx.reset.setimmediatevalue(0)
            mac.tx.reset.setimmediatevalue(0)

        await RisingEdge(self.dut.clk)
        await RisingEdge(self.dut.clk)

        for mac in self.port_mac:
            mac.rx.reset.setimmediatevalue(1)
            mac.tx.reset.setimmediatevalue(1)

        await FallingEdge(self.dut.rst)
        await Timer(100, 'ns')

        await RisingEdge(self.dut.clk)
        await RisingEdge(self.dut.clk)

        for mac in self.port_mac:
            mac.rx.reset.setimmediatevalue(0)
            mac.tx.reset.setimmediatevalue(0)

        await self.rc.enumerate(enable_bus_mastering=True, configure_msi=True)

    async def _run_loopback(self):
        while True:
            await RisingEdge(self.dut.clk)

            if self.loopback_enable:
                for mac in self.port_mac:
                    if not mac.tx.empty():
                        await mac.rx.send(await mac.tx.recv())
Exemple #36
0
class TB(object):
    def __init__(self, dut):
        self.dut = dut

        self.BAR0_APERTURE = int(os.getenv("PARAM_BAR0_APERTURE"))

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        # PCIe
        self.rc = RootComplex()

        self.rc.max_payload_size = 0x1  # 256 bytes
        self.rc.max_read_request_size = 0x2  # 512 bytes

        self.dev = UltraScalePlusPcieDevice(
            # configuration options
            pcie_generation=3,
            pcie_link_width=16,
            user_clk_frequency=250e6,
            alignment="dword",
            cq_cc_straddle=False,
            rq_rc_straddle=False,
            rc_4tlp_straddle=False,
            enable_pf1=False,
            enable_client_tag=True,
            enable_extended_tag=True,
            enable_parity=False,
            enable_rx_msg_interface=False,
            enable_sriov=False,
            enable_extended_configuration=False,
            enable_pf0_msi=True,
            enable_pf1_msi=False,

            # signals
            # Clock and Reset Interface
            user_clk=dut.clk_250mhz,
            user_reset=dut.rst_250mhz,
            # user_lnk_up
            # sys_clk
            # sys_clk_gt
            # sys_reset
            # phy_rdy_out

            # Requester reQuest Interface
            rq_bus=AxiStreamBus.from_prefix(dut, "m_axis_rq"),
            pcie_rq_seq_num0=dut.s_axis_rq_seq_num_0,
            pcie_rq_seq_num_vld0=dut.s_axis_rq_seq_num_valid_0,
            pcie_rq_seq_num1=dut.s_axis_rq_seq_num_1,
            pcie_rq_seq_num_vld1=dut.s_axis_rq_seq_num_valid_1,
            # pcie_rq_tag0
            # pcie_rq_tag1
            # pcie_rq_tag_av
            # pcie_rq_tag_vld0
            # pcie_rq_tag_vld1

            # Requester Completion Interface
            rc_bus=AxiStreamBus.from_prefix(dut, "s_axis_rc"),

            # Completer reQuest Interface
            cq_bus=AxiStreamBus.from_prefix(dut, "s_axis_cq"),
            # pcie_cq_np_req
            # pcie_cq_np_req_count

            # Completer Completion Interface
            cc_bus=AxiStreamBus.from_prefix(dut, "m_axis_cc"),

            # Transmit Flow Control Interface
            # pcie_tfc_nph_av=dut.pcie_tfc_nph_av,
            # pcie_tfc_npd_av=dut.pcie_tfc_npd_av,

            # Configuration Management Interface
            cfg_mgmt_addr=dut.cfg_mgmt_addr,
            cfg_mgmt_function_number=dut.cfg_mgmt_function_number,
            cfg_mgmt_write=dut.cfg_mgmt_write,
            cfg_mgmt_write_data=dut.cfg_mgmt_write_data,
            cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable,
            cfg_mgmt_read=dut.cfg_mgmt_read,
            cfg_mgmt_read_data=dut.cfg_mgmt_read_data,
            cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done,
            # cfg_mgmt_debug_access

            # Configuration Status Interface
            # cfg_phy_link_down
            # cfg_phy_link_status
            # cfg_negotiated_width
            # cfg_current_speed
            cfg_max_payload=dut.cfg_max_payload,
            cfg_max_read_req=dut.cfg_max_read_req,
            # cfg_function_status
            # cfg_vf_status
            # cfg_function_power_state
            # cfg_vf_power_state
            # cfg_link_power_state
            # cfg_err_cor_out
            # cfg_err_nonfatal_out
            # cfg_err_fatal_out
            # cfg_local_error_out
            # cfg_local_error_valid
            # cfg_rx_pm_state
            # cfg_tx_pm_state
            # cfg_ltssm_state
            # cfg_rcb_status
            # cfg_obff_enable
            # cfg_pl_status_change
            # cfg_tph_requester_enable
            # cfg_tph_st_mode
            # cfg_vf_tph_requester_enable
            # cfg_vf_tph_st_mode

            # Configuration Received Message Interface
            # cfg_msg_received
            # cfg_msg_received_data
            # cfg_msg_received_type

            # Configuration Transmit Message Interface
            # cfg_msg_transmit
            # cfg_msg_transmit_type
            # cfg_msg_transmit_data
            # cfg_msg_transmit_done

            # Configuration Flow Control Interface
            cfg_fc_ph=dut.cfg_fc_ph,
            cfg_fc_pd=dut.cfg_fc_pd,
            cfg_fc_nph=dut.cfg_fc_nph,
            cfg_fc_npd=dut.cfg_fc_npd,
            cfg_fc_cplh=dut.cfg_fc_cplh,
            cfg_fc_cpld=dut.cfg_fc_cpld,
            cfg_fc_sel=dut.cfg_fc_sel,

            # Configuration Control Interface
            # cfg_hot_reset_in
            # cfg_hot_reset_out
            # cfg_config_space_enable
            # cfg_dsn
            # cfg_bus_number
            # cfg_ds_port_number
            # cfg_ds_bus_number
            # cfg_ds_device_number
            # cfg_ds_function_number
            # cfg_power_state_change_ack
            # cfg_power_state_change_interrupt
            cfg_err_cor_in=dut.status_error_cor,
            cfg_err_uncor_in=dut.status_error_uncor,
            # cfg_flr_in_process
            # cfg_flr_done
            # cfg_vf_flr_in_process
            # cfg_vf_flr_func_num
            # cfg_vf_flr_done
            # cfg_pm_aspm_l1_entry_reject
            # cfg_pm_aspm_tx_l0s_entry_disable
            # cfg_req_pm_transition_l23_ready
            # cfg_link_training_enable

            # Configuration Interrupt Controller Interface
            # cfg_interrupt_int
            # cfg_interrupt_sent
            # cfg_interrupt_pending
            cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable,
            cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable,
            cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update,
            cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data,
            # cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select,
            cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int,
            cfg_interrupt_msi_pending_status=dut.
            cfg_interrupt_msi_pending_status,
            cfg_interrupt_msi_pending_status_data_enable=dut.
            cfg_interrupt_msi_pending_status_data_enable,
            # cfg_interrupt_msi_pending_status_function_num=dut.cfg_interrupt_msi_pending_status_function_num,
            cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent,
            cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail,
            # cfg_interrupt_msix_enable
            # cfg_interrupt_msix_mask
            # cfg_interrupt_msix_vf_enable
            # cfg_interrupt_msix_vf_mask
            # cfg_interrupt_msix_address
            # cfg_interrupt_msix_data
            # cfg_interrupt_msix_int
            # cfg_interrupt_msix_vec_pending
            # cfg_interrupt_msix_vec_pending_status
            cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr,
            cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present,
            cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type,
            # cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag,
            # cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number,

            # Configuration Extend Interface
            # cfg_ext_read_received
            # cfg_ext_write_received
            # cfg_ext_register_number
            # cfg_ext_function_number
            # cfg_ext_write_data
            # cfg_ext_write_byte_enable
            # cfg_ext_read_data
            # cfg_ext_read_data_valid
        )

        # self.dev.log.setLevel(logging.DEBUG)

        self.rc.make_port().connect(self.dev)

        self.driver = mqnic.Driver(self.rc)

        self.dev.functions[0].msi_multiple_message_capable = 5

        self.dev.functions[0].configure_bar(0,
                                            2**self.BAR0_APERTURE,
                                            ext=True,
                                            prefetch=True)

        # Ethernet
        cocotb.fork(Clock(dut.qsfp_rx_clk, 3.102, units="ns").start())
        cocotb.fork(Clock(dut.qsfp_tx_clk, 3.102, units="ns").start())

        self.qsfp_mac = EthMac(
            tx_clk=dut.qsfp_tx_clk,
            tx_rst=dut.qsfp_tx_rst,
            tx_bus=AxiStreamBus.from_prefix(dut, "qsfp_tx_axis"),
            tx_ptp_time=dut.qsfp_tx_ptp_time,
            tx_ptp_ts=dut.qsfp_tx_ptp_ts,
            tx_ptp_ts_valid=dut.qsfp_tx_ptp_ts_valid,
            rx_clk=dut.qsfp_rx_clk,
            rx_rst=dut.qsfp_rx_rst,
            rx_bus=AxiStreamBus.from_prefix(dut, "qsfp_rx_axis"),
            rx_ptp_time=dut.qsfp_rx_ptp_time,
            ifg=12,
            speed=100e9)

        dut.qspi_dq_i.setimmediatevalue(0)

        self.cms_ram = AxiLiteRam(AxiLiteBus.from_prefix(dut, "m_axil_cms"),
                                  dut.m_axil_cms_clk,
                                  dut.m_axil_cms_rst,
                                  size=256 * 1024)

        self.loopback_enable = False
        cocotb.fork(self._run_loopback())

    async def init(self):

        self.dut.qsfp_rx_rst.setimmediatevalue(0)
        self.dut.qsfp_tx_rst.setimmediatevalue(0)

        await RisingEdge(self.dut.clk_250mhz)
        await RisingEdge(self.dut.clk_250mhz)

        self.dut.qsfp_rx_rst.setimmediatevalue(1)
        self.dut.qsfp_tx_rst.setimmediatevalue(1)

        await FallingEdge(self.dut.rst_250mhz)
        await Timer(100, 'ns')

        await RisingEdge(self.dut.clk_250mhz)
        await RisingEdge(self.dut.clk_250mhz)

        self.dut.qsfp_rx_rst.setimmediatevalue(0)
        self.dut.qsfp_tx_rst.setimmediatevalue(0)

        await self.rc.enumerate(enable_bus_mastering=True, configure_msi=True)

    async def _run_loopback(self):
        while True:
            await RisingEdge(self.dut.clk_250mhz)

            if self.loopback_enable:
                if not self.qsfp_mac.tx.empty():
                    await self.qsfp_mac.rx.send(await self.qsfp_mac.tx.recv())
Exemple #37
0
class TB:
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        cocotb.start_soon(Clock(dut.clk, 6.206, units="ns").start())

        # Ethernet
        cocotb.start_soon(
            Clock(dut.qsfp1_mac_1_rx_clk, 6.206, units="ns").start())
        cocotb.start_soon(
            Clock(dut.qsfp1_mac_1_tx_clk, 6.206, units="ns").start())

        self.qsfp1_mac_1 = EthMac(
            tx_clk=dut.qsfp1_mac_1_tx_clk,
            tx_rst=dut.qsfp1_mac_1_tx_rst,
            tx_bus=AxiStreamBus.from_prefix(dut, "qsfp1_mac_1_tx_axis"),
            rx_clk=dut.qsfp1_mac_1_rx_clk,
            rx_rst=dut.qsfp1_mac_1_rx_rst,
            rx_bus=AxiStreamBus.from_prefix(dut, "qsfp1_mac_1_rx_axis"),
            ifg=12,
            speed=10e9)

        cocotb.start_soon(
            Clock(dut.qsfp1_mac_2_rx_clk, 6.206, units="ns").start())
        cocotb.start_soon(
            Clock(dut.qsfp1_mac_2_tx_clk, 6.206, units="ns").start())

        self.qsfp1_mac_2 = EthMac(
            tx_clk=dut.qsfp1_mac_2_tx_clk,
            tx_rst=dut.qsfp1_mac_2_tx_rst,
            tx_bus=AxiStreamBus.from_prefix(dut, "qsfp1_mac_2_tx_axis"),
            rx_clk=dut.qsfp1_mac_2_rx_clk,
            rx_rst=dut.qsfp1_mac_2_rx_rst,
            rx_bus=AxiStreamBus.from_prefix(dut, "qsfp1_mac_2_rx_axis"),
            ifg=12,
            speed=10e9)

        cocotb.start_soon(
            Clock(dut.qsfp1_mac_3_rx_clk, 6.206, units="ns").start())
        cocotb.start_soon(
            Clock(dut.qsfp1_mac_3_tx_clk, 6.206, units="ns").start())

        self.qsfp1_mac_3 = EthMac(
            tx_clk=dut.qsfp1_mac_3_tx_clk,
            tx_rst=dut.qsfp1_mac_3_tx_rst,
            tx_bus=AxiStreamBus.from_prefix(dut, "qsfp1_mac_3_tx_axis"),
            rx_clk=dut.qsfp1_mac_3_rx_clk,
            rx_rst=dut.qsfp1_mac_3_rx_rst,
            rx_bus=AxiStreamBus.from_prefix(dut, "qsfp1_mac_3_rx_axis"),
            ifg=12,
            speed=10e9)

        cocotb.start_soon(
            Clock(dut.qsfp1_mac_4_rx_clk, 6.206, units="ns").start())
        cocotb.start_soon(
            Clock(dut.qsfp1_mac_4_tx_clk, 6.206, units="ns").start())

        self.qsfp1_mac_4 = EthMac(
            tx_clk=dut.qsfp1_mac_4_tx_clk,
            tx_rst=dut.qsfp1_mac_4_tx_rst,
            tx_bus=AxiStreamBus.from_prefix(dut, "qsfp1_mac_4_tx_axis"),
            rx_clk=dut.qsfp1_mac_4_rx_clk,
            rx_rst=dut.qsfp1_mac_4_rx_rst,
            rx_bus=AxiStreamBus.from_prefix(dut, "qsfp1_mac_4_rx_axis"),
            ifg=12,
            speed=10e9)

        cocotb.start_soon(
            Clock(dut.qsfp2_mac_1_rx_clk, 6.206, units="ns").start())
        cocotb.start_soon(
            Clock(dut.qsfp2_mac_1_tx_clk, 6.206, units="ns").start())

        self.qsfp2_mac_1 = EthMac(
            tx_clk=dut.qsfp2_mac_1_tx_clk,
            tx_rst=dut.qsfp2_mac_1_tx_rst,
            tx_bus=AxiStreamBus.from_prefix(dut, "qsfp2_mac_1_tx_axis"),
            rx_clk=dut.qsfp2_mac_1_rx_clk,
            rx_rst=dut.qsfp2_mac_1_rx_rst,
            rx_bus=AxiStreamBus.from_prefix(dut, "qsfp2_mac_1_rx_axis"),
            ifg=12,
            speed=10e9)

        cocotb.start_soon(
            Clock(dut.qsfp2_mac_2_rx_clk, 6.206, units="ns").start())
        cocotb.start_soon(
            Clock(dut.qsfp2_mac_2_tx_clk, 6.206, units="ns").start())

        self.qsfp2_mac_2 = EthMac(
            tx_clk=dut.qsfp2_mac_2_tx_clk,
            tx_rst=dut.qsfp2_mac_2_tx_rst,
            tx_bus=AxiStreamBus.from_prefix(dut, "qsfp2_mac_2_tx_axis"),
            rx_clk=dut.qsfp2_mac_2_rx_clk,
            rx_rst=dut.qsfp2_mac_2_rx_rst,
            rx_bus=AxiStreamBus.from_prefix(dut, "qsfp2_mac_2_rx_axis"),
            ifg=12,
            speed=10e9)

        cocotb.start_soon(
            Clock(dut.qsfp2_mac_3_rx_clk, 6.206, units="ns").start())
        cocotb.start_soon(
            Clock(dut.qsfp2_mac_3_tx_clk, 6.206, units="ns").start())

        self.qsfp2_mac_3 = EthMac(
            tx_clk=dut.qsfp2_mac_3_tx_clk,
            tx_rst=dut.qsfp2_mac_3_tx_rst,
            tx_bus=AxiStreamBus.from_prefix(dut, "qsfp2_mac_3_tx_axis"),
            rx_clk=dut.qsfp2_mac_3_rx_clk,
            rx_rst=dut.qsfp2_mac_3_rx_rst,
            rx_bus=AxiStreamBus.from_prefix(dut, "qsfp2_mac_3_rx_axis"),
            ifg=12,
            speed=10e9)

        cocotb.start_soon(
            Clock(dut.qsfp2_mac_4_rx_clk, 6.206, units="ns").start())
        cocotb.start_soon(
            Clock(dut.qsfp2_mac_4_tx_clk, 6.206, units="ns").start())

        self.qsfp2_mac_4 = EthMac(
            tx_clk=dut.qsfp2_mac_4_tx_clk,
            tx_rst=dut.qsfp2_mac_4_tx_rst,
            tx_bus=AxiStreamBus.from_prefix(dut, "qsfp2_mac_4_tx_axis"),
            rx_clk=dut.qsfp2_mac_4_rx_clk,
            rx_rst=dut.qsfp2_mac_4_rx_rst,
            rx_bus=AxiStreamBus.from_prefix(dut, "qsfp2_mac_4_rx_axis"),
            ifg=12,
            speed=10e9)

        dut.btn.setimmediatevalue(0)

    async def init(self):

        self.dut.rst.setimmediatevalue(0)
        self.dut.qsfp1_mac_1_rx_rst.setimmediatevalue(0)
        self.dut.qsfp1_mac_1_tx_rst.setimmediatevalue(0)
        self.dut.qsfp1_mac_2_rx_rst.setimmediatevalue(0)
        self.dut.qsfp1_mac_2_tx_rst.setimmediatevalue(0)
        self.dut.qsfp1_mac_3_rx_rst.setimmediatevalue(0)
        self.dut.qsfp1_mac_3_tx_rst.setimmediatevalue(0)
        self.dut.qsfp1_mac_4_rx_rst.setimmediatevalue(0)
        self.dut.qsfp1_mac_4_tx_rst.setimmediatevalue(0)
        self.dut.qsfp2_mac_1_rx_rst.setimmediatevalue(0)
        self.dut.qsfp2_mac_1_tx_rst.setimmediatevalue(0)
        self.dut.qsfp2_mac_2_rx_rst.setimmediatevalue(0)
        self.dut.qsfp2_mac_2_tx_rst.setimmediatevalue(0)
        self.dut.qsfp2_mac_3_rx_rst.setimmediatevalue(0)
        self.dut.qsfp2_mac_3_tx_rst.setimmediatevalue(0)
        self.dut.qsfp2_mac_4_rx_rst.setimmediatevalue(0)
        self.dut.qsfp2_mac_4_tx_rst.setimmediatevalue(0)

        for k in range(10):
            await RisingEdge(self.dut.clk)

        self.dut.rst <= 1
        self.dut.qsfp1_mac_1_rx_rst <= 1
        self.dut.qsfp1_mac_1_tx_rst <= 1
        self.dut.qsfp1_mac_2_rx_rst <= 1
        self.dut.qsfp1_mac_2_tx_rst <= 1
        self.dut.qsfp1_mac_3_rx_rst <= 1
        self.dut.qsfp1_mac_3_tx_rst <= 1
        self.dut.qsfp1_mac_4_rx_rst <= 1
        self.dut.qsfp1_mac_4_tx_rst <= 1
        self.dut.qsfp2_mac_1_rx_rst <= 1
        self.dut.qsfp2_mac_1_tx_rst <= 1
        self.dut.qsfp2_mac_2_rx_rst <= 1
        self.dut.qsfp2_mac_2_tx_rst <= 1
        self.dut.qsfp2_mac_3_rx_rst <= 1
        self.dut.qsfp2_mac_3_tx_rst <= 1
        self.dut.qsfp2_mac_4_rx_rst <= 1
        self.dut.qsfp2_mac_4_tx_rst <= 1

        for k in range(10):
            await RisingEdge(self.dut.clk)

        self.dut.rst <= 0
        self.dut.qsfp1_mac_1_rx_rst <= 0
        self.dut.qsfp1_mac_1_tx_rst <= 0
        self.dut.qsfp1_mac_2_rx_rst <= 0
        self.dut.qsfp1_mac_2_tx_rst <= 0
        self.dut.qsfp1_mac_3_rx_rst <= 0
        self.dut.qsfp1_mac_3_tx_rst <= 0
        self.dut.qsfp1_mac_4_rx_rst <= 0
        self.dut.qsfp1_mac_4_tx_rst <= 0
        self.dut.qsfp2_mac_1_rx_rst <= 0
        self.dut.qsfp2_mac_1_tx_rst <= 0
        self.dut.qsfp2_mac_2_rx_rst <= 0
        self.dut.qsfp2_mac_2_tx_rst <= 0
        self.dut.qsfp2_mac_3_rx_rst <= 0
        self.dut.qsfp2_mac_3_tx_rst <= 0
        self.dut.qsfp2_mac_4_rx_rst <= 0
        self.dut.qsfp2_mac_4_tx_rst <= 0

        for k in range(10):
            await RisingEdge(self.dut.clk)