Ejemplo n.º 1
0
def F_exhaust_mk_test(dut):
    """
    Hits end of MK list before matching
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())
    
    filename = '../test_data/wpa2-psk-linksys.hccap'
    start = '1000000000'
    end =   '1000000020'    #Comparison currently hardcoded as 1000000200

    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
    
    yield load_file(dut, filename)
    yield load_mk(dut, start)
    yield load_mk(dut, end)
    
    #This clock isn't necessary while pipelining
    yield RisingEdge(dut.clk_i)
    
    yield wait_process(dut)
    
    if int(str(dut.pmk_valid), 2) == 0:
        raise TestFailure("Master key found, not good!")
    else:
        log.info("List done")
Ejemplo n.º 2
0
def E_process_second_input_round_test(dut):
    """Test input processing with 32 word input"""
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 10000).start())
    
    mockObject = Sha1Model()

    yield reset(dut)
    #yield load_data(dut, log, mockObject, 16)

    #mockObject.processInput()
    #mockObject.displayAll()
    
    yield load_data(dut, log, mockObject, 16)
    mockObject.processInput()
    yield load_data(dut, log, mockObject, 66)
    
    mockOut = "{:08x}".format(mockObject.W[16])
    compare1 = convert_hex(dut.pinput1.test_word_1.value).rjust(8, '0')
    compare2 = convert_hex(dut.pinput1.test_word_5.value).rjust(8, '0')
    
    if compare1 != mockOut:
        raise TestFailure(
            "First load incorrect: {0} != {1}".format(compare1, mockOut))
    elif compare2 != "{:08x}".format(mockObject.W[79]):
        raise TestFailure(
            "First load incorrect: {0} != {1}".format(compare2, "{:08x}".format(mockObject.W[79])))
    else:
        log.info("First load ok!") 
Ejemplo n.º 3
0
    def __init__(self, inst, parent):
        if hasattr(inst, "__name__"):
            self.__name__ = "%s" % inst.__name__
            self.log = SimLog("cocotb.coroutine.%s" % self.__name__, id(self))
        else:
            self.log = SimLog("cocotb.coroutine.fail")

        if sys.version_info[:2] >= (3, 5) and inspect.iscoroutine(inst):
            self._natively_awaitable = True
            self._coro = inst.__await__()
        else:
            self._natively_awaitable = False
            self._coro = inst
        self._started = False
        self._callbacks = []
        self._parent = parent
        self.__doc__ = parent._func.__doc__
        self.module = parent._func.__module__
        self.funcname = parent._func.__name__
        self._outcome = None

        if not hasattr(self._coro, "send"):
            self.log.error("%s isn't a valid coroutine! Did you use the yield "
                           "keyword?" % self.funcname)
            raise CoroutineComplete()
Ejemplo n.º 4
0
def F_process_first_buffer_test(dut):
    """Test data after processing the first message buffer"""
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 10000).start())
    
    mockObject = Sha1Model()

    yield reset(dut)
    #yield load_data(dut, log, mockObject, 16)

    #mockObject.processInput()
    #mockObject.displayAll()
    
    yield load_data(dut, log, mockObject, 16)
    mockObject.processInput()
    mockObject.processBuffer()
    yield load_data(dut, log, mockObject, 65)
    yield load_data(dut, log, mockObject, 85)
    
    mockOut = "{:08x}".format(mockObject.H0)
    compare1 = convert_hex(dut.pbuffer1.test_word_4.value).rjust(8, '0')
    
    if compare1 != mockOut:
        raise TestFailure(
            "First buffer incorrect: {0} != {1}".format(compare1, mockOut))
    else:
        log.info("First buffer ok!") 
Ejemplo n.º 5
0
def A_gen_data_test(dut):
    """
    Tests that gen_tenhex generates sane values
    """
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 10000).start())
    
    outStr = ''
    
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
        
    for x in xrange(0xff):
        complete = int(dut.main1.gen1.complete_o.value)
        if complete != 0:
            raise TestFailure("Premature completion")
        
        outStr = '{:x}'.format(int(dut.main1.gen1.mk_test9.value)) + \
            '{:x}'.format(int(dut.main1.gen1.mk_test8.value)) + \
            '{:x}'.format(int(dut.main1.gen1.mk_test7.value)) + \
            '{:x}'.format(int(dut.main1.gen1.mk_test6.value)) + \
            '{:x}'.format(int(dut.main1.gen1.mk_test5.value)) + \
            '{:x}'.format(int(dut.main1.gen1.mk_test4.value)) + \
            '{:x}'.format(int(dut.main1.gen1.mk_test3.value)) + \
            '{:x}'.format(int(dut.main1.gen1.mk_test2.value)) + \
            '{:x}'.format(int(dut.main1.gen1.mk_test1.value)) + \
            '{:x}'.format(int(dut.main1.gen1.mk_test0.value))
       
        
        yield RisingEdge(dut.clk_i)
        
    if outStr != "00000000fe":
        raise TestFailure("Wrong loaded values!")
    else:
        log.info("Ok!")
Ejemplo n.º 6
0
def E_find_mk_test(dut):
    """
    Finds MK successfully
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())
    
    filename = '../test_data/wpa2-psk-linksys.hccap'
    start = '1000000000'
    end =   '1000000300'    #Comparison currently hardcoded as 1000000200

    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
    
    print_process_vars(dut)
    yield load_file(dut, filename)
    print_process_vars(dut)
    yield load_mk(dut, start)
    print_process_vars(dut)
    yield load_mk(dut, end)
    print_process_vars(dut)
    
    #This clock isn't necessary while pipelining
    yield RisingEdge(dut.clk_i)
    
    yield wait_process(dut)
    
    print_process_vars(dut)
    
    if int(str(dut.pmk_valid), 2) == 0:
        raise TestFailure("MK search failed")
    else:
        log.info("Master key found!")
Ejemplo n.º 7
0
def A_cache_data_test(dut):
    """
    Tests that initial data cache
    gets built and latched properly
    """
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 10000).start())
    
    mockSha1 = wpa2slow.sha1.Sha1Model()
    mockObject = wpa2slow.hmac.HmacModel(mockSha1)
    
    yield reset(dut)
    size = random.randint(8, 64)
    print "Length: {:d}".format(size)
    yield load_random_data(dut, log, mockObject, size)

    #mockObject.displayAll()
    mockOut = "{}".format(mockObject.shaBo)

    print convert_hex(dut.test_word_1) + " " + convert_hex(dut.test_word_2) + " " + convert_hex(dut.test_word_3) + " " + convert_hex(dut.test_word_4) + " " + convert_hex(dut.test_word_5)

    if convert_hex(dut.test_word_1).zfill(8) != mockOut:
        raise TestFailure(
            "Load data is incorrect: {0} != {1}".format(convert_hex(dut.test_word_1), mockOut))
    else:
        log.info("Ok!")
Ejemplo n.º 8
0
def A_load_config_test(dut):
    """
    Test correct start/end parameters get loaded into DUT
    """
    log = SimLog("cocotb.%s" % dut._name)
    #log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())
    
    mk_start = '1222222222'
    mk_end = '1222222f22'
    
    #Todo: fix this garbage when GHDL implements arrays in their VPI
    dut.test_start_val0 <= ord(mk_start[0])
    dut.test_start_val1 <= ord(mk_start[1])
    dut.test_start_val2 <= ord(mk_start[2])
    dut.test_start_val3 <= ord(mk_start[3])
    dut.test_start_val4 <= ord(mk_start[4])
    dut.test_start_val5 <= ord(mk_start[5])
    dut.test_start_val6 <= ord(mk_start[6])
    dut.test_start_val7 <= ord(mk_start[7])
    dut.test_start_val8 <= ord(mk_start[8])
    dut.test_start_val9 <= ord(mk_start[9])
    
    dut.test_end_val0 <= ord(mk_end[0])
    dut.test_end_val1 <= ord(mk_end[1])
    dut.test_end_val2 <= ord(mk_end[2])
    dut.test_end_val3 <= ord(mk_end[3])
    dut.test_end_val4 <= ord(mk_end[4])
    dut.test_end_val5 <= ord(mk_end[5])
    dut.test_end_val6 <= ord(mk_end[6])
    dut.test_end_val7 <= ord(mk_end[7])
    dut.test_end_val8 <= ord(mk_end[8])
    dut.test_end_val9 <= ord(mk_end[9])
    
    dut.init_load_i <= 1
    yield RisingEdge(dut.clk_i)
    dut.rst_i <= 1
    
    yield RisingEdge(dut.clk_i)
    dut.rst_i <= 0
    yield RisingEdge(dut.clk_i)
    dut.init_load_i <= 0
    
    yield wait_process(dut)
    #print_mk(dut) 
    
    if mk_end[1] != chr(int(str(dut.test_mk_val1), 2)):
        raise TestFailure("MK Final Value 1 Mismatch")
    if mk_end[3] != chr(int(str(dut.test_mk_val3), 2)):
        raise TestFailure("MK Final Value 3 Mismatch")
    if mk_end[7] != chr(int(str(dut.test_mk_val7), 2)):
        raise TestFailure("MK Final Value 7 Mismatch")
    if mk_end[9] != chr(int(str(dut.test_mk_val9), 2)):
        raise TestFailure("MK Final Value 9 Mismatch")
    else:
        log.info("MK Generation Ok!")
Ejemplo n.º 9
0
 def __init__(self):
     self._outcome = None
     self.thread = None
     self.event = Event()
     self.state = external_state.INIT
     self.cond = threading.Condition()
     self._log = SimLog("cocotb.external.thead.%s" % self.thread, id(self))
Ejemplo n.º 10
0
    def __init__(self, callback=None, event=None):
        """
        Constructor for a monitor instance

        callback will be called with each recovered transaction as the argument

        If the callback isn't used, received transactions will be placed on a
        queue and the event used to notify any consumers.
        """
        self._event = event
        self._wait_event = None
        self._recvQ = []
        self._callbacks = []
        self.stats = MonitorStatistics()
        self._wait_event = Event()

        # Subclasses may already set up logging
        if not hasattr(self, "log"):
            self.log = SimLog("cocotb.monitor.%s" % (self.__class__.__name__))

        if callback is not None:
            self.add_callback(callback)

        # Create an independent coroutine which can receive stuff
        self._thread = cocotb.scheduler.add(self._monitor_recv())
Ejemplo n.º 11
0
    def __init__(self):

        self.log = SimLog("cocotb.scheduler")
        if _debug:
            self.log.setLevel(logging.DEBUG)

        # Use OrderedDict here for deterministic behavior (gh-934)

        # A dictionary of pending coroutines for each trigger,
        # indexed by trigger
        self._trigger2coros = collections.OrderedDict()

        # A dictionary mapping coroutines to the trigger they are waiting for
        self._coro2trigger = collections.OrderedDict()

        # Our main state
        self._mode = Scheduler._MODE_NORMAL

        # A dictionary of pending writes
        self._writes = collections.OrderedDict()

        self._pending_coros = []
        self._pending_triggers = []
        self._pending_threads = []
        self._pending_events = []   # Events we need to call set on once we've unwound

        self._terminate = False
        self._test_result = None
        self._entrypoint = None
        self._main_thread = threading.current_thread()

        self._is_reacting = False

        self._write_coro_inst = None
        self._writes_pending = Event()
Ejemplo n.º 12
0
    def __init__(self):

        self.log = SimLog("cocotb.scheduler")
        if _debug:
            self.log.setLevel(logging.DEBUG)

        # A dictionary of pending coroutines for each trigger,
        # indexed by trigger
        self._trigger2coros = collections.defaultdict(list)

        # A dictionary of pending triggers for each coroutine, indexed by coro
        self._coro2triggers = collections.defaultdict(list)

        # Our main state
        self._mode = Scheduler._MODE_NORMAL

        # A dictionary of pending writes
        self._writes = {}

        self._pending_coros = []
        self._pending_callbacks = []
        self._pending_triggers = []
        self._pending_threads = []
        self._pending_events = []   # Events we need to call set on once we've unwound

        self._terminate = False
        self._test_result = None
        self._entrypoint = None
        self._main_thread = threading.current_thread()

        # Select the appropriate scheduling algorithm for this simulator
        self.advance = self.default_scheduling_algorithm
        self._is_reacting = False
Ejemplo n.º 13
0
class RunningTest(RunningCoroutine):
    """Add some useful Test functionality to a RunningCoroutine."""

    class ErrorLogHandler(logging.Handler):
        def __init__(self, fn):
            self.fn = fn
            logging.Handler.__init__(self, level=logging.DEBUG)

        def handle(self, record):
            self.fn(self.format(record))

    def __init__(self, inst, parent):
        self.error_messages = []
        RunningCoroutine.__init__(self, inst, parent)
        self.log = SimLog("cocotb.test.%s" % self.__name__, id(self))
        self.started = False
        self.start_time = 0
        self.start_sim_time = 0
        self.expect_fail = parent.expect_fail
        self.expect_error = parent.expect_error
        self.skip = parent.skip
        self.stage = parent.stage

        self.handler = RunningTest.ErrorLogHandler(self._handle_error_message)
        cocotb.log.addHandler(self.handler)

    def _advance(self, outcome):
        if not self.started:
            self.error_messages = []
            self.log.info("Starting test: \"%s\"\nDescription: %s" %
                          (self.funcname, self.__doc__))
            self.start_time = time.time()
            self.start_sim_time = get_sim_time('ns')
            self.started = True
        try:
            self.log.debug("Sending {}".format(outcome))
            return outcome.send(self._coro)
        except TestComplete as e:
            if isinstance(e, TestFailure):
                self.log.warning(str(e))
            else:
                self.log.info(str(e))

            buff = StringIO()
            for message in self.error_messages:
                print(message, file=buff)
            e.stderr.write(buff.getvalue())
            raise
        except StopIteration:
            raise TestSuccess()
        except Exception as e:
            raise raise_error(self, "Send raised exception:")

    def _handle_error_message(self, msg):
        self.error_messages.append(msg)
Ejemplo n.º 14
0
 def __init__(self, name, filepath=None):
     self.name = name
     self._packets = {}
     self._filepath = filepath
     self.fullname = '\'' + self.name + '\''
     self.log = SimLog('cocotb.' + self.name)
     if self._filepath:
         self._source = open(self._filepath)
         self.log.debug("loaded file %s" % self._filepath)
     self.log.debug("Created feed!")
Ejemplo n.º 15
0
def H_continuous_buffer_test(dut):
    """Loop message buffer several times"""
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 10000).start())
    

    yield reset(dut)
    
    iterations = 30
    mockW = [0] * iterations
    compareW = [0] * iterations
    for i in xrange(iterations):
        mockObject = Sha1Model()
        #
    
        yield load_data(dut, log, mockObject, 16)
        mockObject.processInput()
        mockObject.processBuffer()
    
        #yield load_data(dut, log, mockObject, 73)
    
        #yield load_data(dut, log, None, 85)
    
        mockOut = "{:08x}".format(mockObject.H0)
        compare0 = convert_hex(dut.test_sha1_process_buffer0_o.value).rjust(8, '0')
        compare1 = convert_hex(dut.test_sha1_process_buffer_o.value).rjust(8, '0')
        #print mockOut + " - " + compare0 + " - " + compare1 + " - " + str(dut.w_processed_valid.value)
        
        mockW[i] = mockOut
        if i >= 11:
            compareW[i - 11] = compare1
        
    #print str(mockW[0:-11]).strip('[]')
    #print str(compareW[0:-11]).strip('[]')
       
    

    if mockW[0:-11] != compareW[0:-11]:
        raise TestFailure(
            "Continuous buffer incorrect: {0} != {1}".format(str(mockW[0:-11]).strip('[]'), str(compareW[0:-11]).strip('[]')))
    else:
        log.info("Continuous buffer ok!") 
Ejemplo n.º 16
0
    def __init__(self, inst, parent):
        if hasattr(inst, "__name__"):
            self.__name__ = "%s" % inst.__name__
            self.log = SimLog("cocotb.coroutine.%s" % self.__name__, id(self))
        else:
            self.log = SimLog("cocotb.coroutine.fail")
        self._coro = inst
        self._finished = False
        self._callbacks = []
        self._join = _Join(self)
        self._parent = parent
        self.__doc__ = parent._func.__doc__
        self.module = parent._func.__module__
        self.funcname = parent._func.__name__
        self.retval = None

        if not hasattr(self._coro, "send"):
            self.log.error("%s isn't a value coroutine! Did you use the yield keyword?"
                % self.funcname)
            raise CoroutineComplete(callback=self._finished_cb)
Ejemplo n.º 17
0
    def __init__(self):
        """Constructor for a driver instance."""
        self._pending = Event(name="Driver._pending")
        self._sendQ = deque()

        # Subclasses may already set up logging
        if not hasattr(self, "log"):
            self.log = SimLog("cocotb.driver.%s" % (self.__class__.__name__))

        # Create an independent coroutine which can send stuff
        self._thread = cocotb.scheduler.add(self._send_thread())
Ejemplo n.º 18
0
class Trigger(object):
    """Base class to derive from"""
    def __init__(self):
        self.log = SimLog("cocotb.%s" % (self.__class__.__name__), id(self))
        self.signal = None
        self.primed = False

    def prime(self, *args):
        self.primed = True

    def unprime(self):
        """Remove any pending callbacks if necessary"""
        self.primed = False
        self.log.debug("Unprimed")

    def __del__(self):
        """Ensure if a trigger drops out of scope we remove any pending callbacks"""
        self.unprime()

    def __str__(self):
        return self.__class__.__name__
Ejemplo n.º 19
0
class Feed(object):
    def __init__(self, name, filepath=None):
        self.name = name
        self._packets = {}
        self._filepath = filepath
        self.fullname = '\'' + self.name + '\''
        self.log = SimLog('cocotb.' + self.name)
        if self._filepath:
            self._source = open(self._filepath)
            self.log.debug("loaded file %s" % self._filepath)
        self.log.debug("Created feed!")

    def addmsg(self, tag, data):
        """ Add a defined message to the internal feed store """
        self._packets[tag] = data

    def getmsg(self):
        """ Get a string representation of the current list head
            This packet will be ready to send
        """
        if self._packets:
            tag, packet = self._packets.popitem()
            return str(packet)
        else:
            self.log.warn("No packets in feed %s" % self.fullname)
            return None
Ejemplo n.º 20
0
    def __init__(self, dut, modules, tests=None):
        """
        Args:
            modules (list): A list of python module names to run

        Kwargs
        """
        self._queue = []
        self._dut = dut
        self._modules = modules
        self._functions = tests
        self._running_test = None
        self.log = SimLog("cocotb.regression")
Ejemplo n.º 21
0
    def __init__(self, handle):
        """
            Args:
                _handle [integer] : vpi/vhpi handle to the simulator object
        """
        self._handle = handle           # handle used for future simulator transactions
        self._sub_handles = {}          # Dictionary of SimHandle objects created by getattr
        self._len = None

        self.name = simulator.get_name_string(self._handle)
        self.fullname = self.name + '(%s)' % simulator.get_type_string(self._handle)
        self.log = SimLog('cocotb.' + self.name)
        self.log.debug("Created!")
Ejemplo n.º 22
0
def D_set_session_params_test(dut):
    """
    Loads handshake, start, end MK values
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())
    
    filename = '../test_data/wpa2-psk-linksys.hccap'
    start = '1000000000'
    end =   '1000000200'

    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
    
    yield load_file(dut, filename)
    
    yield load_mk(dut, start)
    
    yield load_mk(dut, end)
    
    #This clock isn't necessary while pipelining
    yield RisingEdge(dut.clk_i)
    
    #yield wait_process(dut)
    
    mk_test1 = dut.test_mk1
    mk_test2 = dut.test_mk2
    mk_test3 = dut.test_mk3
    
    if ord(start[0]) != int(str(mk_test1), 2):
        raise TestFailure("Start MK inequal")
    elif ord(end[7]) != int(str(mk_test2), 2):
        raise TestFailure("End MK inequal1")
    elif ord(end[9]) != int(str(mk_test3), 2):
        raise TestFailure("End MK inequal2")
    else:
        log.info("Start/End Params Ok!")
Ejemplo n.º 23
0
def A_load_packet_test(dut):
    """
    Test proper load of filedata into DUT
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())
    
    filename = '../test_data/wpa2-psk-linksys.hccap'
    
    obj = wpa2slow.handshake.Handshake()
    objSha = wpa2slow.sha1.Sha1Model()
    objHmac = wpa2slow.hmac.HmacModel(objSha)
    objPbkdf2 = wpa2slow.pbkdf2.Pbkdf2Model()
    objPrf = wpa2slow.compare.PrfModel(objHmac)
    
    (ssid, mac1, mac2, nonce1, nonce2, eapol, eapol_size, keymic) = obj.load(filename)
    
    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
    
    yield load_file(dut, filename)
    
    packet_test1 = dut.test_ssid_1
    packet_test2 = dut.test_ssid_2
    packet_test3 = dut.test_ssid_3
    
    if ord(ssid[0][0]) != int(str(ssid_test1), 2):
        raise TestFailure("ssid_test1 differs from mock")
    elif ord(ssid[0][3]) != int(str(ssid_test2), 2):
        raise TestFailure("ssid_test2 differs from mock")
    elif ord(ssid[0][6]) != int(str(ssid_test3), 2):
        raise TestFailure("ssid_test3 differs from mock")
    elif ord(ssid[0][6]) == int(str(ssid_test1), 2):    #Todo: remove false positive if 1st and 7th chars equal
        raise TestFailure("SSID comparisons failing.")
    else:
        log.info("SSID Ok!")
Ejemplo n.º 24
0
def A_load_data_test(dut):
    """
    Test for data properly shifted in
    w(0) gets loaded in LAST
    """
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 10000).start())
    
    mockObject = Sha1Model()
    
    yield reset(dut)
    yield load_data(dut, log, mockObject, 16)

    #mockObject.displayAll()
    mockOut = "{:08x}".format(mockObject.W[15])

    #print convert_hex(dut.dat_1_o) + " " + convert_hex(dut.dat_2_o) + " " + convert_hex(dut.dat_3_o) + " " + convert_hex(dut.dat_4_o) + " " + convert_hex(dut.dat_5_o)

    if convert_hex(dut.test_sha1_load_o).zfill(8) != mockOut:
        raise TestFailure(
            "Load data is incorrect: {0} != {1}".format(convert_hex(dut.test_sha1_load_o), mockOut))
    else:
        log.info("Ok!")
Ejemplo n.º 25
0
    def __init__(self, inst, parent):
        self.error_messages = []
        RunningCoroutine.__init__(self, inst, parent)
        self.log = SimLog("cocotb.test.%s" % self.__name__, id(self))
        self.started = False
        self.start_time = 0
        self.start_sim_time = 0
        self.expect_fail = parent.expect_fail
        self.expect_error = parent.expect_error
        self.skip = parent.skip
        self.stage = parent.stage

        self.handler = RunningTest.ErrorLogHandler(self._handle_error_message)
        cocotb.log.addHandler(self.handler)
Ejemplo n.º 26
0
def B_compare_data_test(dut):
    """
    Tests that generated data gets compared against test values
    """
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 10000).start())
    
    outStr = ''
    
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
        
    for x in xrange(0x90):
        complete = int(dut.main1.comp_complete)
        
        outStr = str(x) + ' - ' + str(int(dut.main1.i.value)) + ' - ' + str(complete) + ' - ' + \
            '{:x}'.format(int(dut.main1.comp1.mk_test_comp.value)) + ": " + \
            '{:x}'.format(int(dut.main1.comp1.mk_test9.value)) + \
            '{:x}'.format(int(dut.main1.comp1.mk_test8.value)) + \
            '{:x}'.format(int(dut.main1.comp1.mk_test7.value)) + \
            '{:x}'.format(int(dut.main1.comp1.mk_test6.value)) + \
            '{:x}'.format(int(dut.main1.comp1.mk_test5.value)) + \
            '{:x}'.format(int(dut.main1.comp1.mk_test4.value)) + \
            '{:x}'.format(int(dut.main1.comp1.mk_test3.value)) + \
            '{:x}'.format(int(dut.main1.comp1.mk_test2.value)) + \
            '{:x}'.format(int(dut.main1.comp1.mk_test1.value)) + \
            '{:x}'.format(int(dut.main1.comp1.mk_test0.value))
            
        if complete == 1:
            break
        
        yield RisingEdge(dut.clk_i)
        
    if complete == 0:
        raise TestFailure("Comparison never reached")
    else:
        log.info("Ok!")
Ejemplo n.º 27
0
class BusMonitor(Monitor):
    """Wrapper providing common functionality for monitoring busses."""
    _signals = []
    _optional_signals = []

    def __init__(self, entity, name, clock, reset=None, reset_n=None,
                 callback=None, event=None, bus_separator="_",  config={}, array_idx=None):
        self.log = SimLog("cocotb.%s.%s" % (entity._name, name))
        self.entity = entity
        self.name = name
        self.clock = clock
        self.config = self._default_config.copy()

        for configoption, value in config.items():
            self.config[configoption] = value
            self.log.debug("Setting config option %s to %s" %
                           (configoption, str(value)))

        self.bus = Bus(self.entity, self.name, self._signals,
                       optional_signals=self._optional_signals,
                       bus_separator=bus_separator,array_idx=array_idx)
        self._reset = reset
        self._reset_n = reset_n
        Monitor.__init__(self, callback=callback, event=event)

    @property
    def in_reset(self):
        """Boolean flag showing whether the bus is in reset state or not."""
        if self._reset_n is not None:
            return not bool(self._reset_n.value.integer)
        if self._reset is not None:
            return bool(self._reset.value.integer)
        return False

    def __str__(self):
        return "%s(%s)" % (self.__class__.__name__, self.name)
Ejemplo n.º 28
0
def G_process_second_buffer_test(dut):
    """Test data after processing the second message buffer"""
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 10000).start())
    
    mockObject1 = Sha1Model()
    mockObject2 = Sha1Model()

    yield reset(dut)
    
    yield load_data(dut, log, mockObject1, 16)
    mockObject1.processInput()
    mockObject1.processBuffer()
    yield load_data(dut, log, mockObject2, 16)
    mockObject2.processInput()
    mockObject2.processBuffer()
    yield load_data(None, log, mockObject1, 85)
    yield load_data(None, log, mockObject2, 85)
    
    yield load_data(dut, log, None, 85)
    
    mock1 = "{:08x}".format(mockObject1.H0)
    compare1 = convert_hex(dut.pbuffer1.test_word_4.value).rjust(8, '0')
    
    mock2 = "{:08x}".format(mockObject2.H0)
    compare2 = convert_hex(dut.pbuffer2.test_word_4.value).rjust(8, '0')
    

    if compare1 != mock1:
        raise TestFailure(
            "Second buffer1 incorrect: {0} != {1}".format(compare1, mock1))
    elif compare2 != mock2:
        raise TestFailure(
            "Second buffer2 incorrect: {0} != {1}".format(compare2, mock2))
    else:
        log.info("Second buffer ok!") 
Ejemplo n.º 29
0
    def __init__(self, handle):
        """
        Args:
            handle (integer)    : the GPI handle to the simulator object
        """
        self._handle = handle
        self._len = None
        self._sub_handles = {}  # Dictionary of children
        self._invalid_sub_handles = {} # Dictionary of invalid queries
        self._discovered = False

        self._name = simulator.get_name_string(self._handle)
        self._type = simulator.get_type_string(self._handle)
        self._fullname = self._name + "(%s)" % self._type
        self._log = SimLog("cocotb.%s" % self._name)
        self._log.debug("Created")
Ejemplo n.º 30
0
    def __init__(self, callback=None, event=None):
        self._event = event
        self._wait_event = None
        self._recvQ = deque()
        self._callbacks = []
        self.stats = MonitorStatistics()
        self._wait_event = Event()

        # Subclasses may already set up logging
        if not hasattr(self, "log"):
            self.log = SimLog("cocotb.monitor.%s" % (self.__class__.__name__))

        if callback is not None:
            self.add_callback(callback)

        # Create an independent coroutine which can receive stuff
        self._thread = cocotb.scheduler.add(self._monitor_recv())
Ejemplo n.º 31
0
 def log(self):
     return SimLog("cocotb.%s" % (type(self).__qualname__), id(self))
Ejemplo n.º 32
0
class Driver:
    """Class defining the standard interface for a driver within a testbench.

    The driver is responsible for serializing transactions onto the physical
    pins of the interface.  This may consume simulation time.
    """
    def __init__(self):
        """Constructor for a driver instance."""
        self._pending = Event(name="Driver._pending")
        self._sendQ = deque()

        # Sub-classes may already set up logging
        if not hasattr(self, "log"):
            self.log = SimLog("cocotb.driver.%s" % (type(self).__qualname__))

        # Create an independent coroutine which can send stuff
        self._thread = cocotb.scheduler.add(self._send_thread())

    def kill(self):
        """Kill the coroutine sending stuff."""
        if self._thread:
            self._thread.kill()
            self._thread = None

    def append(self, transaction, callback=None, event=None, **kwargs):
        """Queue up a transaction to be sent over the bus.

        Mechanisms are provided to permit the caller to know when the
        transaction is processed.

        Args:
            transaction (any): The transaction to be sent.
            callback (callable, optional): Optional function to be called
                when the transaction has been sent.
            event (optional): :class:`~cocotb.triggers.Event` to be set
                when the transaction has been sent.
            **kwargs: Any additional arguments used in child class'
                :any:`_driver_send` method.
        """
        self._sendQ.append((transaction, callback, event, kwargs))
        self._pending.set()

    def clear(self):
        """Clear any queued transactions without sending them onto the bus."""
        self._sendQ = deque()

    @coroutine
    def send(self, transaction, sync=True, **kwargs):
        """Blocking send call (hence must be "yielded" rather than called).

        Sends the transaction over the bus.

        Args:
            transaction (any): The transaction to be sent.
            sync (bool, optional): Synchronize the transfer by waiting for a rising edge.
            **kwargs (dict): Additional arguments used in child class'
                :any:`_driver_send` method.
        """
        yield self._send(transaction, None, None, sync=sync, **kwargs)

    def _driver_send(self, transaction, sync=True, **kwargs):
        """Actual implementation of the send.

        Sub-classes should override this method to implement the actual
        :meth:`~cocotb.drivers.Driver.send` routine.

        Args:
            transaction (any): The transaction to be sent.
            sync (bool, optional): Synchronize the transfer by waiting for a rising edge.
            **kwargs: Additional arguments if required for protocol implemented in a sub-class.
        """
        raise NotImplementedError("Sub-classes of Driver should define a "
                                  "_driver_send coroutine")

    @coroutine
    def _send(self, transaction, callback, event, sync=True, **kwargs):
        """Send coroutine.

        Args:
            transaction (any): The transaction to be sent.
            callback (callable, optional): Optional function to be called
                when the transaction has been sent.
            event (optional): event to be set when the transaction has been sent.
            sync (bool, optional): Synchronize the transfer by waiting for a rising edge.
            **kwargs: Any additional arguments used in child class'
                :any:`_driver_send` method.
        """
        yield self._driver_send(transaction, sync=sync, **kwargs)

        # Notify the world that this transaction is complete
        if event:
            event.set()
        if callback:
            callback(transaction)

    @coroutine
    def _send_thread(self):
        while True:

            # Sleep until we have something to send
            while not self._sendQ:
                self._pending.clear()
                yield self._pending.wait()

            synchronised = False

            # Send in all the queued packets,
            # only synchronize on the first send
            while self._sendQ:
                transaction, callback, event, kwargs = self._sendQ.popleft()
                self.log.debug("Sending queued packet...")
                yield self._send(transaction,
                                 callback,
                                 event,
                                 sync=not synchronised,
                                 **kwargs)
                synchronised = True
Ejemplo n.º 33
0
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        # PCIe
        self.rc = RootComplex()

        self.dev = UltraScalePlusPcieDevice(
            # configuration options
            pcie_generation=3,
            pcie_link_width=16,
            user_clk_frequency=250e6,
            alignment="dword",
            cq_cc_straddle=False,
            rq_rc_straddle=False,
            rc_4tlp_straddle=False,
            enable_pf1=False,
            enable_client_tag=True,
            enable_extended_tag=True,
            enable_parity=False,
            enable_rx_msg_interface=False,
            enable_sriov=False,
            enable_extended_configuration=False,
            enable_pf0_msi=True,
            enable_pf1_msi=False,

            # signals
            # Clock and Reset Interface
            user_clk=dut.clk,
            user_reset=dut.rst,
            # user_lnk_up
            # sys_clk
            # sys_clk_gt
            # sys_reset
            # phy_rdy_out

            # Requester reQuest Interface
            rq_bus=AxiStreamBus.from_prefix(dut, "m_axis_rq"),
            # pcie_rq_seq_num0=dut.s_axis_rq_seq_num_0,
            # pcie_rq_seq_num_vld0=dut.s_axis_rq_seq_num_valid_0,
            # pcie_rq_seq_num1=dut.s_axis_rq_seq_num_1,
            # pcie_rq_seq_num_vld1=dut.s_axis_rq_seq_num_valid_1,
            # pcie_rq_tag0
            # pcie_rq_tag1
            # pcie_rq_tag_av
            # pcie_rq_tag_vld0
            # pcie_rq_tag_vld1

            # Requester Completion Interface
            rc_bus=AxiStreamBus.from_prefix(dut, "s_axis_rc"),

            # Completer reQuest Interface
            cq_bus=AxiStreamBus.from_prefix(dut, "s_axis_cq"),
            # pcie_cq_np_req
            # pcie_cq_np_req_count

            # Completer Completion Interface
            cc_bus=AxiStreamBus.from_prefix(dut, "m_axis_cc"),

            # Transmit Flow Control Interface
            # pcie_tfc_nph_av=dut.pcie_tfc_nph_av,
            # pcie_tfc_npd_av=dut.pcie_tfc_npd_av,

            # Configuration Management Interface
            cfg_mgmt_addr=dut.cfg_mgmt_addr,
            cfg_mgmt_function_number=dut.cfg_mgmt_function_number,
            cfg_mgmt_write=dut.cfg_mgmt_write,
            cfg_mgmt_write_data=dut.cfg_mgmt_write_data,
            cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable,
            cfg_mgmt_read=dut.cfg_mgmt_read,
            cfg_mgmt_read_data=dut.cfg_mgmt_read_data,
            cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done,
            # cfg_mgmt_debug_access

            # Configuration Status Interface
            # cfg_phy_link_down
            # cfg_phy_link_status
            # cfg_negotiated_width
            # cfg_current_speed
            cfg_max_payload=dut.cfg_max_payload,
            cfg_max_read_req=dut.cfg_max_read_req,
            # cfg_function_status
            # cfg_vf_status
            # cfg_function_power_state
            # cfg_vf_power_state
            # cfg_link_power_state
            # cfg_err_cor_out
            # cfg_err_nonfatal_out
            # cfg_err_fatal_out
            # cfg_local_error_out
            # cfg_local_error_valid
            # cfg_rx_pm_state
            # cfg_tx_pm_state
            # cfg_ltssm_state
            # cfg_rcb_status
            # cfg_obff_enable
            # cfg_pl_status_change
            # cfg_tph_requester_enable
            # cfg_tph_st_mode
            # cfg_vf_tph_requester_enable
            # cfg_vf_tph_st_mode

            # Configuration Received Message Interface
            # cfg_msg_received
            # cfg_msg_received_data
            # cfg_msg_received_type

            # Configuration Transmit Message Interface
            # cfg_msg_transmit
            # cfg_msg_transmit_type
            # cfg_msg_transmit_data
            # cfg_msg_transmit_done

            # Configuration Flow Control Interface
            # cfg_fc_ph=dut.cfg_fc_ph,
            # cfg_fc_pd=dut.cfg_fc_pd,
            # cfg_fc_nph=dut.cfg_fc_nph,
            # cfg_fc_npd=dut.cfg_fc_npd,
            # cfg_fc_cplh=dut.cfg_fc_cplh,
            # cfg_fc_cpld=dut.cfg_fc_cpld,
            # cfg_fc_sel=dut.cfg_fc_sel,

            # Configuration Control Interface
            # cfg_hot_reset_in
            # cfg_hot_reset_out
            # cfg_config_space_enable
            # cfg_dsn
            # cfg_bus_number
            # cfg_ds_port_number
            # cfg_ds_bus_number
            # cfg_ds_device_number
            # cfg_ds_function_number
            # cfg_power_state_change_ack
            # cfg_power_state_change_interrupt
            cfg_err_cor_in=dut.status_error_cor,
            cfg_err_uncor_in=dut.status_error_uncor,
            # cfg_flr_in_process
            # cfg_flr_done
            # cfg_vf_flr_in_process
            # cfg_vf_flr_func_num
            # cfg_vf_flr_done
            # cfg_pm_aspm_l1_entry_reject
            # cfg_pm_aspm_tx_l0s_entry_disable
            # cfg_req_pm_transition_l23_ready
            # cfg_link_training_enable

            # Configuration Interrupt Controller Interface
            # cfg_interrupt_int
            # cfg_interrupt_sent
            # cfg_interrupt_pending
            cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable,
            cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable,
            cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update,
            cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data,
            # cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select,
            cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int,
            cfg_interrupt_msi_pending_status=dut.
            cfg_interrupt_msi_pending_status,
            cfg_interrupt_msi_pending_status_data_enable=dut.
            cfg_interrupt_msi_pending_status_data_enable,
            # cfg_interrupt_msi_pending_status_function_num=dut.cfg_interrupt_msi_pending_status_function_num,
            cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent,
            cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail,
            # cfg_interrupt_msix_enable
            # cfg_interrupt_msix_mask
            # cfg_interrupt_msix_vf_enable
            # cfg_interrupt_msix_vf_mask
            # cfg_interrupt_msix_address
            # cfg_interrupt_msix_data
            # cfg_interrupt_msix_int
            # cfg_interrupt_msix_vec_pending
            # cfg_interrupt_msix_vec_pending_status
            cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr,
            cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present,
            cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type,
            # cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag,
            # cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number,

            # Configuration Extend Interface
            # cfg_ext_read_received
            # cfg_ext_write_received
            # cfg_ext_register_number
            # cfg_ext_function_number
            # cfg_ext_write_data
            # cfg_ext_write_byte_enable
            # cfg_ext_read_data
            # cfg_ext_read_data_valid
        )

        # self.dev.log.setLevel(logging.DEBUG)

        self.rc.make_port().connect(self.dev)

        self.dev.functions[0].msi_multiple_message_capable = 5

        self.dev.functions[0].configure_bar(0, 2**22)
        self.dev.functions[0].configure_bar(2, 2**22)
Ejemplo n.º 34
0
class SimHandleBase(object):
    """
    Base class for all simulation objects.

    We maintain a handle which we can use for GPI calls
    """

    # For backwards compatibility we support a mapping of old member names
    # which may alias with the simulator hierarchy.  In these cases the
    # simulator result takes priority, only falling back to the python member
    # if there is no colliding object in the elaborated design.
    _compat_mapping = {
        "log"               :       "_log",
        "fullname"          :       "_fullname",
        "name"              :       "_name",
        }

    def __init__(self, handle, path):
        """
        Args:
            handle (integer)    : the GPI handle to the simulator object
            path (string)       : path to this handle, None if root
        """
        self._handle = handle
        self._len = None
        self._sub_handles = {}  # Dictionary of children
        self._invalid_sub_handles = {} # Dictionary of invalid queries

        self._name = simulator.get_name_string(self._handle)
        self._type = simulator.get_type_string(self._handle)
        self._fullname = self._name + "(%s)" % self._type
        self._path = self._name if path is None else path
        self._log = SimLog("cocotb.%s" % self._name)
        self._log.debug("Created")

    def __hash__(self):
        return self._handle

    def __len__(self):
        """Returns the 'length' of the underlying object.

        For vectors this is the number of bits.
        """
        if self._len is None:
            self._len = simulator.get_num_elems(self._handle)
        return self._len

    def __eq__(self, other):

        # Permits comparison of handles i.e. if clk == dut.clk
        if isinstance(other, SimHandleBase):
            if self._handle == other._handle: return 0
            return 1

    def __ne__(self, other):
        return not self.__eq__(other)

    def __repr__(self):
        return type(self).__name__ + "(" + self._path + ")"

    def __str__(self):
        return self._path

    def __setattr__(self, name, value):
        if name in self._compat_mapping:
            if name not in _deprecation_warned:
                warnings.warn("Use of %s attribute is deprecated" % name)
                _deprecation_warned[name] = True
            return setattr(self, self._compat_mapping[name], value)
        else:
            return object.__setattr__(self, name, value)

    def __getattr__(self, name):
        if name in self._compat_mapping:
            if name not in _deprecation_warned:
                warnings.warn("Use of %s attribute is deprecated" % name)
                _deprecation_warned[name] = True
            return getattr(self, self._compat_mapping[name])
        else:
            return object.__getattr__(self, name)
Ejemplo n.º 35
0
# Things we want in the cocotb namespace
from cocotb.decorators import test, coroutine, hook, function, external

# Singleton scheduler instance
# NB this cheekily ensures a singleton since we're replacing the reference
# so that cocotb.scheduler gives you the singleton instance and not the
# scheduler package

# GPI logging instance
# For autodocumentation don't need the extension modules
if "SPHINX_BUILD" not in os.environ:
    import simulator
    logging.basicConfig()
    logging.setLoggerClass(SimBaseLog)
    log = SimLog('cocotb')
    level = os.getenv("COCOTB_LOG_LEVEL", "INFO")
    try:
        _default_log = getattr(logging, level)
    except AttributeError as e:
        log.error("Unable to set loging level to %s" % level)
        _default_log = logging.INFO
    log.setLevel(_default_log)
    loggpi = SimLog('cocotb.gpi')
    # Notify GPI of log level
    simulator.log_level(_default_log)

    # If stdout/stderr are not TTYs, Python may not have opened them with line
    # buffering. In that case, try to reopen them with line buffering
    # explicitly enabled. This ensures that prints such as stack traces always
    # appear. Continue silently if this fails.
Ejemplo n.º 36
0
 def log(self):
     return SimLog("cocotb.function.%s" % self._func.__name__, id(self))
Ejemplo n.º 37
0
class RunningCoroutine(object):
    """Per instance wrapper around an function to turn it into a coroutine


        Provides the following:

            coro.join() creates a Trigger that will fire when this coroutine
            completes

            coro.kill() will destroy a coroutine instance (and cause any Join
            triggers to fire
    """
    def __init__(self, inst, parent):
        if hasattr(inst, "__name__"):
            self.__name__ = "%s" % inst.__name__
            self.log = SimLog("cocotb.coroutine.%s" % self.__name__, id(self))
        else:
            self.log = SimLog("cocotb.coroutine.fail")
        self._coro = inst
        self._started = False
        self._finished = False
        self._callbacks = []
        self._parent = parent
        self.__doc__ = parent._func.__doc__
        self.module = parent._func.__module__
        self.funcname = parent._func.__name__
        self.retval = None

        if not hasattr(self._coro, "send"):
            self.log.error("%s isn't a valid coroutine! Did you use the yield "
                           "keyword?" % self.funcname)
            raise CoroutineComplete(callback=self._finished_cb)

    def __iter__(self):
        return self

    def __str__(self):
        return str(self.__name__)

    def send(self, value):
        try:
            if isinstance(value, ExternalException):
                self.log.debug("Injecting ExternalException(%s)" %
                               (repr(value)))
                return self._coro.throw(value.exception)
            self._started = True
            return self._coro.send(value)
        except TestComplete as e:
            if isinstance(e, TestFailure):
                self.log.warning(str(e))
            raise
        except ExternalException as e:
            self.retval = e
            self._finished = True
            raise CoroutineComplete(callback=self._finished_cb)
        except ReturnValue as e:
            self.retval = e.retval
            self._finished = True
            raise CoroutineComplete(callback=self._finished_cb)
        except StopIteration as e:
            self._finished = True
            self.retval = getattr(e, 'value', None)  # for python >=3.3
            raise CoroutineComplete(callback=self._finished_cb)
        except Exception as e:
            self._finished = True
            raise raise_error(self, "Send raised exception:")

    def throw(self, exc):
        return self._coro.throw(exc)

    def close(self):
        return self._coro.close()

    def kill(self):
        """Kill a coroutine"""
        self.log.debug("kill() called on coroutine")
        cocotb.scheduler.unschedule(self)

    def _finished_cb(self):
        """Called when the coroutine completes.
            Allows us to mark the coroutine as finished so that boolean testing
            works.
            Also call any callbacks, usually the result of coroutine.join()"""
        self._finished = True

    def join(self):
        """Return a trigger that will fire when the wrapped coroutine exits"""
        return Join(self)

    def has_started(self):
        return self._started

    def __nonzero__(self):
        """Provide boolean testing
            if the coroutine has finished return false
            otherwise return true"""
        return not self._finished

    __bool__ = __nonzero__

    def sort_name(self):
        if self.stage is None:
            return "%s.%s" % (self.module, self.funcname)
        else:
            return "%s.%d.%s" % (self.module, self.stage, self.funcname)
Ejemplo n.º 38
0
class external_waiter:
    def __init__(self):
        self._outcome = None
        self.thread = None
        self.event = Event()
        self.state = external_state.INIT
        self.cond = threading.Condition()
        self._log = SimLog("cocotb.external.thead.%s" % self.thread, id(self))

    @property
    def result(self):
        return self._outcome.get()

    def _propagate_state(self, new_state):
        with self.cond:
            if _debug:
                self._log.debug(
                    "Changing state from %d -> %d from %s" %
                    (self.state, new_state, threading.current_thread()))
            self.state = new_state
            self.cond.notify()

    def thread_done(self):
        if _debug:
            self._log.debug("Thread finished from %s" %
                            (threading.current_thread()))
        self._propagate_state(external_state.EXITED)

    def thread_suspend(self):
        self._propagate_state(external_state.PAUSED)

    def thread_start(self):
        if self.state > external_state.INIT:
            return

        if not self.thread.is_alive():
            self._propagate_state(external_state.RUNNING)
            self.thread.start()

    def thread_resume(self):
        self._propagate_state(external_state.RUNNING)

    def thread_wait(self):
        if _debug:
            self._log.debug("Waiting for the condition lock %s" %
                            threading.current_thread())

        with self.cond:
            while self.state == external_state.RUNNING:
                self.cond.wait()

            if _debug:
                if self.state == external_state.EXITED:
                    self._log.debug("Thread {} has exited from {}".format(
                        self.thread, threading.current_thread()))
                elif self.state == external_state.PAUSED:
                    self._log.debug("Thread %s has called yield from %s" %
                                    (self.thread, threading.current_thread()))
                elif self.state == external_state.RUNNING:
                    self._log.debug("Thread %s is in RUNNING from %d" %
                                    (self.thread, threading.current_thread()))

            if self.state == external_state.INIT:
                raise Exception("Thread %s state was not allowed from %s" %
                                (self.thread, threading.current_thread()))

        return self.state
Ejemplo n.º 39
0
class Scheduler:
    """The main scheduler.

    Here we accept callbacks from the simulator and schedule the appropriate
    coroutines.

    A callback fires, causing the :any:`react` method to be called, with the
    trigger that caused the callback as the first argument.

    We look up a list of coroutines to schedule (indexed by the trigger) and
    schedule them in turn.

    .. attention::

       Implementors should not depend on the scheduling order!

    Some additional management is required since coroutines can return a list
    of triggers, to be scheduled when any one of the triggers fires.  To
    ensure we don't receive spurious callbacks, we have to un-prime all the
    other triggers when any one fires.

    Due to the simulator nuances and fun with delta delays we have the
    following modes:

    Normal mode
        - Callbacks cause coroutines to be scheduled
        - Any pending writes are cached and do not happen immediately

    ReadOnly mode
        - Corresponds to ``cbReadOnlySynch`` (VPI) or ``vhpiCbRepEndOfTimeStep``
          (VHPI).  In this state we are not allowed to perform writes.

    Write mode
        - Corresponds to ``cbReadWriteSynch`` (VPI) or ``vhpiCbRepLastKnownDeltaCycle`` (VHPI)
          In this mode we play back all the cached write updates.

    We can legally transition from Normal to Write by registering a :class:`~cocotb.triggers.ReadWrite`
    callback, however usually once a simulator has entered the ReadOnly phase
    of a given timestep then we must move to a new timestep before performing
    any writes.  The mechanism for moving to a new timestep may not be
    consistent across simulators and therefore we provide an abstraction to
    assist with compatibility.


    Unless a coroutine has explicitly requested to be scheduled in ReadOnly
    mode (for example wanting to sample the finally settled value after all
    delta delays) then it can reasonably be expected to be scheduled during
    "normal mode" i.e. where writes are permitted.
    """

    _MODE_NORMAL = 1  # noqa
    _MODE_READONLY = 2  # noqa
    _MODE_WRITE = 3  # noqa
    _MODE_TERM = 4  # noqa

    # Singleton events, recycled to avoid spurious object creation
    _next_time_step = NextTimeStep()
    _read_write = ReadWrite()
    _read_only = ReadOnly()
    _timer1 = Timer(1)

    def __init__(self):

        self.log = SimLog("cocotb.scheduler")
        if _debug:
            self.log.setLevel(logging.DEBUG)

        # Use OrderedDict here for deterministic behavior (gh-934)

        # A dictionary of pending coroutines for each trigger,
        # indexed by trigger
        self._trigger2coros = _py_compat.insertion_ordered_dict()

        # Our main state
        self._mode = Scheduler._MODE_NORMAL

        # A dictionary of pending (write_func, args), keyed by handle. Only the last scheduled write
        # in a timestep is performed, all the rest are discarded in python.
        self._write_calls = _py_compat.insertion_ordered_dict()

        self._pending_coros = []
        self._pending_triggers = []
        self._pending_threads = []
        self._pending_events = [
        ]  # Events we need to call set on once we've unwound

        self._terminate = False
        self._test = None
        self._main_thread = threading.current_thread()

        self._current_task = None

        self._is_reacting = False

        self._write_coro_inst = None
        self._writes_pending = Event()

    async def _do_writes(self):
        """ An internal coroutine that performs pending writes """
        while True:
            await self._writes_pending.wait()
            if self._mode != Scheduler._MODE_NORMAL:
                await self._next_time_step

            await self._read_write

            while self._write_calls:
                handle, (func, args) = self._write_calls.popitem()
                func(*args)
            self._writes_pending.clear()

    def _check_termination(self):
        """
        Handle a termination that causes us to move onto the next test.
        """
        if self._terminate:
            if _debug:
                self.log.debug("Test terminating, scheduling Timer")

            if self._write_coro_inst is not None:
                self._write_coro_inst.kill()
                self._write_coro_inst = None

            for t in self._trigger2coros:
                t.unprime()

            if self._timer1.primed:
                self._timer1.unprime()

            self._timer1.prime(self._test_completed)
            self._trigger2coros = _py_compat.insertion_ordered_dict()
            self._terminate = False
            self._write_calls = _py_compat.insertion_ordered_dict()
            self._writes_pending.clear()
            self._mode = Scheduler._MODE_TERM

    def _test_completed(self, trigger=None):
        """Called after a test and its cleanup have completed"""
        if _debug:
            self.log.debug("begin_test called with trigger: %s" %
                           (str(trigger)))
        if _profiling:
            ps = pstats.Stats(_profile).sort_stats('cumulative')
            ps.dump_stats("test_profile.pstat")
            ctx = profiling_context()
        else:
            ctx = _py_compat.nullcontext()

        with ctx:
            self._mode = Scheduler._MODE_NORMAL
            if trigger is not None:
                trigger.unprime()

            # extract the current test, and clear it
            test = self._test
            self._test = None
            if test is None:
                raise InternalError(
                    "_test_completed called with no active test")
            if test._outcome is None:
                raise InternalError(
                    "_test_completed called with an incomplete test")

            # Issue previous test result
            if _debug:
                self.log.debug("Issue test result to regression object")

            # this may schedule another test
            cocotb.regression_manager.handle_result(test)

            # if it did, make sure we handle the test completing
            self._check_termination()

    def react(self, trigger):
        """
        .. deprecated:: 1.5
            This function is now private.
        """
        warnings.warn("This function is now private.",
                      DeprecationWarning,
                      stacklevel=2)
        return self._react(trigger)

    def _react(self, trigger):
        """
        Called when a trigger fires.

        We ensure that we only start the event loop once, rather than
        letting it recurse.
        """
        if self._is_reacting:
            # queue up the trigger, the event loop will get to it
            self._pending_triggers.append(trigger)
            return

        if self._pending_triggers:
            raise InternalError(
                "Expected all triggers to be handled but found {}".format(
                    self._pending_triggers))

        # start the event loop
        self._is_reacting = True
        try:
            self._event_loop(trigger)
        finally:
            self._is_reacting = False

    def _event_loop(self, trigger):
        """
        Run an event loop triggered by the given trigger.

        The loop will keep running until no further triggers fire.

        This should be triggered by only:
        * The beginning of a test, when there is no trigger to react to
        * A GPI trigger
        """
        if _profiling:
            ctx = profiling_context()
        else:
            ctx = _py_compat.nullcontext()

        with ctx:
            # When a trigger fires it is unprimed internally
            if _debug:
                self.log.debug("Trigger fired: %s" % str(trigger))
            # trigger.unprime()

            if self._mode == Scheduler._MODE_TERM:
                if _debug:
                    self.log.debug(
                        "Ignoring trigger %s since we're terminating" %
                        str(trigger))
                return

            if trigger is self._read_only:
                self._mode = Scheduler._MODE_READONLY
            # Only GPI triggers affect the simulator scheduling mode
            elif isinstance(trigger, GPITrigger):
                self._mode = Scheduler._MODE_NORMAL

            # work through triggers one by one
            is_first = True
            self._pending_triggers.append(trigger)
            while self._pending_triggers:
                trigger = self._pending_triggers.pop(0)

                if not is_first and isinstance(trigger, GPITrigger):
                    self.log.warning(
                        "A GPI trigger occurred after entering react - this "
                        "should not happen.")
                    assert False

                # this only exists to enable the warning above
                is_first = False

                # Scheduled coroutines may append to our waiting list so the first
                # thing to do is pop all entries waiting on this trigger.
                try:
                    scheduling = self._trigger2coros.pop(trigger)
                except KeyError:
                    # GPI triggers should only be ever pending if there is an
                    # associated coroutine waiting on that trigger, otherwise it would
                    # have been unprimed already
                    if isinstance(trigger, GPITrigger):
                        self.log.critical(
                            "No coroutines waiting on trigger that fired: %s" %
                            str(trigger))

                        trigger.log.info("I'm the culprit")
                    # For Python triggers this isn't actually an error - we might do
                    # event.set() without knowing whether any coroutines are actually
                    # waiting on this event, for example
                    elif _debug:
                        self.log.debug(
                            "No coroutines waiting on trigger that fired: %s" %
                            str(trigger))

                    del trigger
                    continue

                if _debug:
                    debugstr = "\n\t".join(
                        [coro._coro.__qualname__ for coro in scheduling])
                    if len(scheduling) > 0:
                        debugstr = "\n\t" + debugstr
                    self.log.debug("%d pending coroutines for event %s%s" %
                                   (len(scheduling), str(trigger), debugstr))

                # This trigger isn't needed any more
                trigger.unprime()

                for coro in scheduling:
                    if coro._outcome is not None:
                        # coroutine was killed by another coroutine waiting on the same trigger
                        continue
                    if _debug:
                        self.log.debug("Scheduling coroutine %s" %
                                       (coro._coro.__qualname__))
                    self._schedule(coro, trigger=trigger)
                    if _debug:
                        self.log.debug("Scheduled coroutine %s" %
                                       (coro._coro.__qualname__))

                    # remove our reference to the objects at the end of each loop,
                    # to try and avoid them being destroyed at a weird time (as
                    # happened in gh-957)
                    del coro

                # Schedule may have queued up some events so we'll burn through those
                while self._pending_events:
                    if _debug:
                        self.log.debug("Scheduling pending event %s" %
                                       (str(self._pending_events[0])))
                    self._pending_events.pop(0).set()

                # remove our reference to the objects at the end of each loop,
                # to try and avoid them being destroyed at a weird time (as
                # happened in gh-957)
                del trigger
                del scheduling

            # no more pending triggers
            self._check_termination()
            if _debug:
                self.log.debug("All coroutines scheduled, handing control back"
                               " to simulator")

    def unschedule(self, coro):
        """
        .. deprecated:: 1.5
            This function is now private.
        """
        warnings.warn("This function is now private.",
                      DeprecationWarning,
                      stacklevel=2)
        return self._unschedule(coro)

    def _unschedule(self, coro):
        """Unschedule a coroutine.  Unprime any pending triggers"""

        # Unprime the trigger this coroutine is waiting on
        trigger = coro._trigger
        if trigger is not None:
            coro._trigger = None
            if coro in self._trigger2coros.setdefault(trigger, []):
                self._trigger2coros[trigger].remove(coro)
            if not self._trigger2coros[trigger]:
                trigger.unprime()
                del self._trigger2coros[trigger]

        assert self._test is not None

        if coro is self._test:
            if _debug:
                self.log.debug(f"Unscheduling test {coro}")

            if not self._terminate:
                self._terminate = True
                self._cleanup()

        elif Join(coro) in self._trigger2coros:
            self._react(Join(coro))
        else:
            try:
                # throws an error if the background coroutine errored
                # and no one was monitoring it
                coro._outcome.get()
            except (TestComplete, AssertionError) as e:
                coro.log.info("Test stopped by this forked coroutine")
                e = remove_traceback_frames(e, ['_unschedule', 'get'])
                self._test.abort(e)
            except Exception as e:
                coro.log.error("Exception raised by this forked coroutine")
                e = remove_traceback_frames(e, ['_unschedule', 'get'])
                self._test.abort(e)

    def _schedule_write(self, handle, write_func, *args):
        """ Queue `write_func` to be called on the next ReadWrite trigger. """
        if self._mode == Scheduler._MODE_READONLY:
            raise Exception(
                f"Write to object {handle._name} was scheduled during a read-only sync phase."
            )

        # TODO: we should be able to better keep track of when this needs to
        # be scheduled
        if self._write_coro_inst is None:
            self._write_coro_inst = self.add(self._do_writes())

        self._write_calls[handle] = (write_func, args)
        self._writes_pending.set()

    def _resume_coro_upon(self, coro, trigger):
        """Schedule `coro` to be resumed when `trigger` fires."""
        coro._trigger = trigger

        trigger_coros = self._trigger2coros.setdefault(trigger, [])
        if coro is self._write_coro_inst:
            # Our internal write coroutine always runs before any user coroutines.
            # This preserves the behavior prior to the refactoring of writes to
            # this coroutine.
            trigger_coros.insert(0, coro)
        else:
            # Everything else joins the back of the queue
            trigger_coros.append(coro)

        if not trigger.primed:

            if trigger_coros != [coro]:
                # should never happen
                raise InternalError(
                    "More than one coroutine waiting on an unprimed trigger")

            try:
                trigger.prime(self._react)
            except Exception as e:
                # discard the trigger we associated, it will never fire
                self._trigger2coros.pop(trigger)

                # replace it with a new trigger that throws back the exception
                self._resume_coro_upon(
                    coro,
                    NullTrigger(name="Trigger.prime() Error",
                                outcome=outcomes.Error(e)))

    def queue(self, coroutine):
        """
        .. deprecated:: 1.5
            This function is now private.
        """
        warnings.warn("This function is now private.",
                      DeprecationWarning,
                      stacklevel=2)
        return self._queue(coroutine)

    def _queue(self, coroutine):
        """Queue a coroutine for execution"""
        self._pending_coros.append(coroutine)

    def queue_function(self, coro):
        """
        .. deprecated:: 1.5
            This function is now private.
        """
        warnings.warn("This function is now private.",
                      DeprecationWarning,
                      stacklevel=2)
        return self._queue_function(coro)

    def _queue_function(self, coro):
        """Queue a coroutine for execution and move the containing thread
        so that it does not block execution of the main thread any longer.
        """
        # We should be able to find ourselves inside the _pending_threads list
        matching_threads = [
            t for t in self._pending_threads
            if t.thread == threading.current_thread()
        ]
        if len(matching_threads) == 0:
            raise RuntimeError(
                "queue_function called from unrecognized thread")

        # Raises if there is more than one match. This can never happen, since
        # each entry always has a unique thread.
        t, = matching_threads

        async def wrapper():
            # This function runs in the scheduler thread
            try:
                _outcome = outcomes.Value(await coro)
            except BaseException as e:
                _outcome = outcomes.Error(e)
            event.outcome = _outcome
            # Notify the current (scheduler) thread that we are about to wake
            # up the background (`@external`) thread, making sure to do so
            # before the background thread gets a chance to go back to sleep by
            # calling thread_suspend.
            # We need to do this here in the scheduler thread so that no more
            # coroutines run until the background thread goes back to sleep.
            t.thread_resume()
            event.set()

        event = threading.Event()
        self._pending_coros.append(cocotb.decorators.RunningTask(wrapper()))
        # The scheduler thread blocks in `thread_wait`, and is woken when we
        # call `thread_suspend` - so we need to make sure the coroutine is
        # queued before that.
        t.thread_suspend()
        # This blocks the calling `@external` thread until the coroutine finishes
        event.wait()
        return event.outcome.get()

    def run_in_executor(self, func, *args, **kwargs):
        """
        .. deprecated:: 1.5
            This function is now private.
        """
        warnings.warn("This function is now private.",
                      DeprecationWarning,
                      stacklevel=2)
        return self._run_in_executor(func, *args, **kwargs)

    def _run_in_executor(self, func, *args, **kwargs):
        """Run the coroutine in a separate execution thread
        and return an awaitable object for the caller.
        """

        # Create a thread
        # Create a trigger that is called as a result of the thread finishing
        # Create an Event object that the caller can await on
        # Event object set when the thread finishes execution, this blocks the
        #   calling coroutine (but not the thread) until the external completes

        def execute_external(func, _waiter):
            _waiter._outcome = outcomes.capture(func, *args, **kwargs)
            if _debug:
                self.log.debug("Execution of external routine done %s" %
                               threading.current_thread())
            _waiter.thread_done()

        async def wrapper():
            waiter = external_waiter()
            thread = threading.Thread(group=None,
                                      target=execute_external,
                                      name=func.__qualname__ + "_thread",
                                      args=([func, waiter]),
                                      kwargs={})

            waiter.thread = thread
            self._pending_threads.append(waiter)

            await waiter.event.wait()

            return waiter.result  # raises if there was an exception

        return wrapper()

    @staticmethod
    def create_task(coroutine: Any) -> RunningTask:
        """ Checks to see if the given object is a schedulable coroutine object and if so, returns it """

        if isinstance(coroutine, RunningTask):
            return coroutine
        if inspect.iscoroutine(coroutine):
            return RunningTask(coroutine)
        if inspect.iscoroutinefunction(coroutine):
            raise TypeError(
                "Coroutine function {} should be called prior to being "
                "scheduled.".format(coroutine))
        if isinstance(coroutine, cocotb.decorators.coroutine):
            raise TypeError(
                "Attempt to schedule a coroutine that hasn't started: {}.\n"
                "Did you forget to add parentheses to the @cocotb.test() "
                "decorator?".format(coroutine))
        if sys.version_info >= (3, 6) and inspect.isasyncgen(coroutine):
            raise TypeError(
                "{} is an async generator, not a coroutine. "
                "You likely used the yield keyword instead of await.".format(
                    coroutine.__qualname__))
        raise TypeError(
            "Attempt to add an object of type {} to the scheduler, which "
            "isn't a coroutine: {!r}\n"
            "Did you forget to use the @cocotb.coroutine decorator?".format(
                type(coroutine), coroutine))

    def add(self, coroutine: Union[RunningTask, Coroutine]) -> RunningTask:
        """Add a new coroutine.

        Just a wrapper around self.schedule which provides some debug and
        useful error messages in the event of common gotchas.
        """

        task = self.create_task(coroutine)

        if _debug:
            self.log.debug("Adding new coroutine %s" % task._coro.__qualname__)

        self._schedule(task)
        self._check_termination()
        return task

    def start_soon(self, coro: Union[Coroutine, RunningTask]) -> RunningTask:
        """
        Schedule a coroutine to be run concurrently, starting after the current coroutine yields control.

        In contrast to :func:`~cocotb.fork` which starts the given coroutine immediately, this function
        starts the given coroutine only after the current coroutine yields control.
        This is useful when the coroutine to be forked has logic before the first
        :keyword:`await` that may not be safe to execute immediately.
        """

        task = self.create_task(coro)

        if _debug:
            self.log.debug("Queueing a new coroutine %s" %
                           task._coro.__qualname__)

        self._queue(task)
        return task

    def add_test(self, test_coro):
        """
        .. deprecated:: 1.5
            This function is now private.
        """
        warnings.warn("This function is now private.",
                      DeprecationWarning,
                      stacklevel=2)
        return self._add_test(test_coro)

    def _add_test(self, test_coro):
        """Called by the regression manager to queue the next test"""
        if self._test is not None:
            raise InternalError("Test was added while another was in progress")

        self._test = test_coro
        self._resume_coro_upon(
            test_coro,
            NullTrigger(name=f"Start {test_coro!s}",
                        outcome=outcomes.Value(None)))

    # This collection of functions parses a trigger out of the object
    # that was yielded by a coroutine, converting `list` -> `Waitable`,
    # `Waitable` -> `RunningTask`, `RunningTask` -> `Trigger`.
    # Doing them as separate functions allows us to avoid repeating unencessary
    # `isinstance` checks.

    def _trigger_from_started_coro(
            self, result: cocotb.decorators.RunningTask) -> Trigger:
        if _debug:
            self.log.debug("Joining to already running coroutine: %s" %
                           result._coro.__qualname__)
        return result.join()

    def _trigger_from_unstarted_coro(
            self, result: cocotb.decorators.RunningTask) -> Trigger:
        self._queue(result)
        if _debug:
            self.log.debug("Scheduling nested coroutine: %s" %
                           result._coro.__qualname__)
        return result.join()

    def _trigger_from_waitable(self,
                               result: cocotb.triggers.Waitable) -> Trigger:
        return self._trigger_from_unstarted_coro(
            cocotb.decorators.RunningTask(result._wait()))

    def _trigger_from_list(self, result: list) -> Trigger:
        return self._trigger_from_waitable(cocotb.triggers.First(*result))

    def _trigger_from_any(self, result) -> Trigger:
        """Convert a yielded object into a Trigger instance"""
        # note: the order of these can significantly impact performance

        if isinstance(result, Trigger):
            return result

        if isinstance(result, cocotb.decorators.RunningTask):
            if not result.has_started():
                return self._trigger_from_unstarted_coro(result)
            else:
                return self._trigger_from_started_coro(result)

        if inspect.iscoroutine(result):
            return self._trigger_from_unstarted_coro(
                cocotb.decorators.RunningTask(result))

        if isinstance(result, list):
            return self._trigger_from_list(result)

        if isinstance(result, cocotb.triggers.Waitable):
            return self._trigger_from_waitable(result)

        if sys.version_info >= (3, 6) and inspect.isasyncgen(result):
            raise TypeError(
                "{} is an async generator, not a coroutine. "
                "You likely used the yield keyword instead of await.".format(
                    result.__qualname__))

        raise TypeError(
            "Coroutine yielded an object of type {}, which the scheduler can't "
            "handle: {!r}\n"
            "Did you forget to decorate with @cocotb.coroutine?".format(
                type(result), result))

    @contextmanager
    def _task_context(self, task):
        """Context manager for the currently running task."""
        old_task = self._current_task
        self._current_task = task
        try:
            yield
        finally:
            self._current_task = old_task

    def schedule(self, coroutine, trigger=None):
        """
        .. deprecated:: 1.5
            This function is now private.
        """
        warnings.warn("This function is now private.",
                      DeprecationWarning,
                      stacklevel=2)
        return self._schedule(coroutine, trigger)

    def _schedule(self, coroutine, trigger=None):
        """Schedule a coroutine by calling the send method.

        Args:
            coroutine (cocotb.decorators.coroutine): The coroutine to schedule.
            trigger (cocotb.triggers.Trigger): The trigger that caused this
                coroutine to be scheduled.
        """
        with self._task_context(coroutine):
            if trigger is None:
                send_outcome = outcomes.Value(None)
            else:
                send_outcome = trigger._outcome
            if _debug:
                self.log.debug(f"Scheduling with {send_outcome}")

            coro_completed = False
            try:
                coroutine._trigger = None
                result = coroutine._advance(send_outcome)
                if _debug:
                    self.log.debug("Coroutine %s yielded %s (mode %d)" %
                                   (coroutine._coro.__qualname__, str(result),
                                    self._mode))

            except cocotb.decorators.CoroutineComplete:
                if _debug:
                    self.log.debug("Coroutine {} completed with {}".format(
                        coroutine, coroutine._outcome))
                coro_completed = True

            # this can't go in the else above, as that causes unwanted exception
            # chaining
            if coro_completed:
                self._unschedule(coroutine)

            # Don't handle the result if we're shutting down
            if self._terminate:
                return

            if not coro_completed:
                try:
                    result = self._trigger_from_any(result)
                except TypeError as exc:
                    # restart this coroutine with an exception object telling it that
                    # it wasn't allowed to yield that
                    result = NullTrigger(outcome=outcomes.Error(exc))

                self._resume_coro_upon(coroutine, result)

            # We do not return from here until pending threads have completed, but only
            # from the main thread, this seems like it could be problematic in cases
            # where a sim might change what this thread is.

            if self._main_thread is threading.current_thread():

                for ext in self._pending_threads:
                    ext.thread_start()
                    if _debug:
                        self.log.debug("Blocking from {} on {}".format(
                            threading.current_thread(), ext.thread))
                    state = ext.thread_wait()
                    if _debug:
                        self.log.debug(
                            "Back from wait on self %s with newstate %d" %
                            (threading.current_thread(), state))
                    if state == external_state.EXITED:
                        self._pending_threads.remove(ext)
                        self._pending_events.append(ext.event)

            # Handle any newly queued coroutines that need to be scheduled
            while self._pending_coros:
                self.add(self._pending_coros.pop(0))

    def finish_test(self, exc):
        """
        .. deprecated:: 1.5
            This function is now private.
        """
        warnings.warn("This function is now private.",
                      DeprecationWarning,
                      stacklevel=2)
        return self._finish_test(exc)

    def _finish_test(self, exc):
        self._test.abort(exc)
        self._check_termination()

    def finish_scheduler(self, exc):
        """
        .. deprecated:: 1.5
            This function is now private.
        """
        warnings.warn("This function is now private.",
                      DeprecationWarning,
                      stacklevel=2)
        return self._finish_scheduler(exc)

    def _finish_scheduler(self, exc):
        """Directly call into the regression manager and end test
           once we return the sim will close us so no cleanup is needed.
        """
        # If there is an error during cocotb initialization, self._test may not
        # have been set yet. Don't cause another Python exception here.

        if self._test:
            self.log.debug("Issue sim closedown result to regression object")
            self._test.abort(exc)
            cocotb.regression_manager.handle_result(self._test)

    def cleanup(self):
        """
        .. deprecated:: 1.5
            This function is now private.
        """
        warnings.warn("This function is now private.",
                      DeprecationWarning,
                      stacklevel=2)
        return self._cleanup()

    def _cleanup(self):
        """Clear up all our state.

        Unprime all pending triggers and kill off any coroutines, stop all externals.
        """
        # copy since we modify this in kill
        items = list(self._trigger2coros.items())

        # reversing seems to fix gh-928, although the order is still somewhat
        # arbitrary.
        for trigger, waiting in items[::-1]:
            for coro in waiting:
                if _debug:
                    self.log.debug("Killing %s" % str(coro))
                coro.kill()

        if self._main_thread is not threading.current_thread():
            raise Exception("Cleanup() called outside of the main thread")

        for ext in self._pending_threads:
            self.log.warning("Waiting for %s to exit", ext.thread)
Ejemplo n.º 40
0
class TB(object):
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        # PCIe
        self.rc = RootComplex()

        self.rc.max_payload_size = 0x1  # 256 bytes
        self.rc.max_read_request_size = 0x2  # 512 bytes

        self.dev = UltraScalePlusPcieDevice(
            # configuration options
            pcie_generation=3,
            pcie_link_width=16,
            user_clk_frequency=250e6,
            alignment="dword",
            cq_cc_straddle=False,
            rq_rc_straddle=False,
            rc_4tlp_straddle=False,
            enable_pf1=False,
            enable_client_tag=True,
            enable_extended_tag=True,
            enable_parity=False,
            enable_rx_msg_interface=False,
            enable_sriov=False,
            enable_extended_configuration=False,
            enable_pf0_msi=True,
            enable_pf1_msi=False,

            # signals
            # Clock and Reset Interface
            user_clk=dut.clk_250mhz,
            user_reset=dut.rst_250mhz,
            # user_lnk_up
            # sys_clk
            # sys_clk_gt
            # sys_reset
            # phy_rdy_out

            # Requester reQuest Interface
            rq_bus=AxiStreamBus.from_prefix(dut, "m_axis_rq"),
            pcie_rq_seq_num0=dut.s_axis_rq_seq_num_0,
            pcie_rq_seq_num_vld0=dut.s_axis_rq_seq_num_valid_0,
            pcie_rq_seq_num1=dut.s_axis_rq_seq_num_1,
            pcie_rq_seq_num_vld1=dut.s_axis_rq_seq_num_valid_1,
            # pcie_rq_tag0
            # pcie_rq_tag1
            # pcie_rq_tag_av
            # pcie_rq_tag_vld0
            # pcie_rq_tag_vld1

            # Requester Completion Interface
            rc_bus=AxiStreamBus.from_prefix(dut, "s_axis_rc"),

            # Completer reQuest Interface
            cq_bus=AxiStreamBus.from_prefix(dut, "s_axis_cq"),
            # pcie_cq_np_req
            # pcie_cq_np_req_count

            # Completer Completion Interface
            cc_bus=AxiStreamBus.from_prefix(dut, "m_axis_cc"),

            # Transmit Flow Control Interface
            # pcie_tfc_nph_av=dut.pcie_tfc_nph_av,
            # pcie_tfc_npd_av=dut.pcie_tfc_npd_av,

            # Configuration Management Interface
            cfg_mgmt_addr=dut.cfg_mgmt_addr,
            cfg_mgmt_function_number=dut.cfg_mgmt_function_number,
            cfg_mgmt_write=dut.cfg_mgmt_write,
            cfg_mgmt_write_data=dut.cfg_mgmt_write_data,
            cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable,
            cfg_mgmt_read=dut.cfg_mgmt_read,
            cfg_mgmt_read_data=dut.cfg_mgmt_read_data,
            cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done,
            # cfg_mgmt_debug_access

            # Configuration Status Interface
            # cfg_phy_link_down
            # cfg_phy_link_status
            # cfg_negotiated_width
            # cfg_current_speed
            cfg_max_payload=dut.cfg_max_payload,
            cfg_max_read_req=dut.cfg_max_read_req,
            # cfg_function_status
            # cfg_vf_status
            # cfg_function_power_state
            # cfg_vf_power_state
            # cfg_link_power_state
            # cfg_err_cor_out
            # cfg_err_nonfatal_out
            # cfg_err_fatal_out
            # cfg_local_error_out
            # cfg_local_error_valid
            # cfg_rx_pm_state
            # cfg_tx_pm_state
            # cfg_ltssm_state
            # cfg_rcb_status
            # cfg_obff_enable
            # cfg_pl_status_change
            # cfg_tph_requester_enable
            # cfg_tph_st_mode
            # cfg_vf_tph_requester_enable
            # cfg_vf_tph_st_mode

            # Configuration Received Message Interface
            # cfg_msg_received
            # cfg_msg_received_data
            # cfg_msg_received_type

            # Configuration Transmit Message Interface
            # cfg_msg_transmit
            # cfg_msg_transmit_type
            # cfg_msg_transmit_data
            # cfg_msg_transmit_done

            # Configuration Flow Control Interface
            cfg_fc_ph=dut.cfg_fc_ph,
            cfg_fc_pd=dut.cfg_fc_pd,
            cfg_fc_nph=dut.cfg_fc_nph,
            cfg_fc_npd=dut.cfg_fc_npd,
            cfg_fc_cplh=dut.cfg_fc_cplh,
            cfg_fc_cpld=dut.cfg_fc_cpld,
            cfg_fc_sel=dut.cfg_fc_sel,

            # Configuration Control Interface
            # cfg_hot_reset_in
            # cfg_hot_reset_out
            # cfg_config_space_enable
            # cfg_dsn
            # cfg_bus_number
            # cfg_ds_port_number
            # cfg_ds_bus_number
            # cfg_ds_device_number
            # cfg_ds_function_number
            # cfg_power_state_change_ack
            # cfg_power_state_change_interrupt
            cfg_err_cor_in=dut.status_error_cor,
            cfg_err_uncor_in=dut.status_error_uncor,
            # cfg_flr_in_process
            # cfg_flr_done
            # cfg_vf_flr_in_process
            # cfg_vf_flr_func_num
            # cfg_vf_flr_done
            # cfg_pm_aspm_l1_entry_reject
            # cfg_pm_aspm_tx_l0s_entry_disable
            # cfg_req_pm_transition_l23_ready
            # cfg_link_training_enable

            # Configuration Interrupt Controller Interface
            # cfg_interrupt_int
            # cfg_interrupt_sent
            # cfg_interrupt_pending
            cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable,
            cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable,
            cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update,
            cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data,
            # cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select,
            cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int,
            cfg_interrupt_msi_pending_status=dut.
            cfg_interrupt_msi_pending_status,
            cfg_interrupt_msi_pending_status_data_enable=dut.
            cfg_interrupt_msi_pending_status_data_enable,
            # cfg_interrupt_msi_pending_status_function_num=dut.cfg_interrupt_msi_pending_status_function_num,
            cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent,
            cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail,
            # cfg_interrupt_msix_enable
            # cfg_interrupt_msix_mask
            # cfg_interrupt_msix_vf_enable
            # cfg_interrupt_msix_vf_mask
            # cfg_interrupt_msix_address
            # cfg_interrupt_msix_data
            # cfg_interrupt_msix_int
            # cfg_interrupt_msix_vec_pending
            # cfg_interrupt_msix_vec_pending_status
            cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr,
            cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present,
            cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type,
            # cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag,
            # cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number,

            # Configuration Extend Interface
            # cfg_ext_read_received
            # cfg_ext_write_received
            # cfg_ext_register_number
            # cfg_ext_function_number
            # cfg_ext_write_data
            # cfg_ext_write_byte_enable
            # cfg_ext_read_data
            # cfg_ext_read_data_valid
        )

        # self.dev.log.setLevel(logging.DEBUG)

        self.rc.make_port().connect(self.dev)

        self.driver = mqnic.Driver()

        self.dev.functions[0].msi_cap.msi_multiple_message_capable = 5

        self.dev.functions[0].configure_bar(
            0,
            2**len(dut.core_inst.core_pcie_inst.axil_ctrl_araddr),
            ext=True,
            prefetch=True)
        if hasattr(dut.core_inst.core_pcie_inst, 'pcie_app_ctrl'):
            self.dev.functions[0].configure_bar(
                2,
                2**len(dut.core_inst.core_pcie_inst.axil_app_ctrl_araddr),
                ext=True,
                prefetch=True)

        # Ethernet
        cocotb.start_soon(Clock(dut.qsfp0_rx_clk, 3.102, units="ns").start())
        cocotb.start_soon(Clock(dut.qsfp0_tx_clk, 3.102, units="ns").start())

        self.qsfp0_mac = EthMac(
            tx_clk=dut.qsfp0_tx_clk,
            tx_rst=dut.qsfp0_tx_rst,
            tx_bus=AxiStreamBus.from_prefix(dut, "qsfp0_tx_axis"),
            tx_ptp_time=dut.qsfp0_tx_ptp_time,
            tx_ptp_ts=dut.qsfp0_tx_ptp_ts,
            tx_ptp_ts_tag=dut.qsfp0_tx_ptp_ts_tag,
            tx_ptp_ts_valid=dut.qsfp0_tx_ptp_ts_valid,
            rx_clk=dut.qsfp0_rx_clk,
            rx_rst=dut.qsfp0_rx_rst,
            rx_bus=AxiStreamBus.from_prefix(dut, "qsfp0_rx_axis"),
            rx_ptp_time=dut.qsfp0_rx_ptp_time,
            ifg=12,
            speed=100e9)

        cocotb.start_soon(Clock(dut.qsfp1_rx_clk, 3.102, units="ns").start())
        cocotb.start_soon(Clock(dut.qsfp1_tx_clk, 3.102, units="ns").start())

        self.qsfp1_mac = EthMac(
            tx_clk=dut.qsfp1_tx_clk,
            tx_rst=dut.qsfp1_tx_rst,
            tx_bus=AxiStreamBus.from_prefix(dut, "qsfp1_tx_axis"),
            tx_ptp_time=dut.qsfp1_tx_ptp_time,
            tx_ptp_ts=dut.qsfp1_tx_ptp_ts,
            tx_ptp_ts_tag=dut.qsfp1_tx_ptp_ts_tag,
            tx_ptp_ts_valid=dut.qsfp1_tx_ptp_ts_valid,
            rx_clk=dut.qsfp1_rx_clk,
            rx_rst=dut.qsfp1_rx_rst,
            rx_bus=AxiStreamBus.from_prefix(dut, "qsfp1_rx_axis"),
            rx_ptp_time=dut.qsfp1_rx_ptp_time,
            ifg=12,
            speed=100e9)

        dut.sw.setimmediatevalue(0)

        dut.i2c_scl_i.setimmediatevalue(1)
        dut.i2c_sda_i.setimmediatevalue(1)

        dut.qsfp0_modprsl.setimmediatevalue(0)
        dut.qsfp0_intl.setimmediatevalue(1)

        dut.qsfp1_modprsl.setimmediatevalue(0)
        dut.qsfp1_intl.setimmediatevalue(1)

        dut.qspi_dq_i.setimmediatevalue(0)

        self.loopback_enable = False
        cocotb.start_soon(self._run_loopback())

    async def init(self):

        self.dut.qsfp0_rx_rst.setimmediatevalue(0)
        self.dut.qsfp0_tx_rst.setimmediatevalue(0)
        self.dut.qsfp1_rx_rst.setimmediatevalue(0)
        self.dut.qsfp1_tx_rst.setimmediatevalue(0)

        await RisingEdge(self.dut.clk_250mhz)
        await RisingEdge(self.dut.clk_250mhz)

        self.dut.qsfp0_rx_rst.setimmediatevalue(1)
        self.dut.qsfp0_tx_rst.setimmediatevalue(1)
        self.dut.qsfp1_rx_rst.setimmediatevalue(1)
        self.dut.qsfp1_tx_rst.setimmediatevalue(1)

        await FallingEdge(self.dut.rst_250mhz)
        await Timer(100, 'ns')

        await RisingEdge(self.dut.clk_250mhz)
        await RisingEdge(self.dut.clk_250mhz)

        self.dut.qsfp0_rx_rst.setimmediatevalue(0)
        self.dut.qsfp0_tx_rst.setimmediatevalue(0)
        self.dut.qsfp1_rx_rst.setimmediatevalue(0)
        self.dut.qsfp1_tx_rst.setimmediatevalue(0)

        await self.rc.enumerate(enable_bus_mastering=True, configure_msi=True)

    async def _run_loopback(self):
        while True:
            await RisingEdge(self.dut.clk_250mhz)

            if self.loopback_enable:
                if not self.qsfp0_mac.tx.empty():
                    await self.qsfp0_mac.rx.send(await
                                                 self.qsfp0_mac.tx.recv())
                if not self.qsfp1_mac.tx.empty():
                    await self.qsfp1_mac.rx.send(await
                                                 self.qsfp1_mac.tx.recv())
Ejemplo n.º 41
0
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        cocotb.start_soon(Clock(dut.clk, 6.4, units="ns").start())

        # Ethernet
        cocotb.start_soon(Clock(dut.qsfp0_rx_clk_1, 6.4, units="ns").start())
        self.qsfp0_1_source = XgmiiSource(dut.qsfp0_rxd_1, dut.qsfp0_rxc_1,
                                          dut.qsfp0_rx_clk_1,
                                          dut.qsfp0_rx_rst_1)
        cocotb.start_soon(Clock(dut.qsfp0_tx_clk_1, 6.4, units="ns").start())
        self.qsfp0_1_sink = XgmiiSink(dut.qsfp0_txd_1, dut.qsfp0_txc_1,
                                      dut.qsfp0_tx_clk_1, dut.qsfp0_tx_rst_1)

        cocotb.start_soon(Clock(dut.qsfp0_rx_clk_2, 6.4, units="ns").start())
        self.qsfp0_2_source = XgmiiSource(dut.qsfp0_rxd_2, dut.qsfp0_rxc_2,
                                          dut.qsfp0_rx_clk_2,
                                          dut.qsfp0_rx_rst_2)
        cocotb.start_soon(Clock(dut.qsfp0_tx_clk_2, 6.4, units="ns").start())
        self.qsfp0_2_sink = XgmiiSink(dut.qsfp0_txd_2, dut.qsfp0_txc_2,
                                      dut.qsfp0_tx_clk_2, dut.qsfp0_tx_rst_2)

        cocotb.start_soon(Clock(dut.qsfp0_rx_clk_3, 6.4, units="ns").start())
        self.qsfp0_3_source = XgmiiSource(dut.qsfp0_rxd_3, dut.qsfp0_rxc_3,
                                          dut.qsfp0_rx_clk_3,
                                          dut.qsfp0_rx_rst_3)
        cocotb.start_soon(Clock(dut.qsfp0_tx_clk_3, 6.4, units="ns").start())
        self.qsfp0_3_sink = XgmiiSink(dut.qsfp0_txd_3, dut.qsfp0_txc_3,
                                      dut.qsfp0_tx_clk_3, dut.qsfp0_tx_rst_3)

        cocotb.start_soon(Clock(dut.qsfp0_rx_clk_4, 6.4, units="ns").start())
        self.qsfp0_4_source = XgmiiSource(dut.qsfp0_rxd_4, dut.qsfp0_rxc_4,
                                          dut.qsfp0_rx_clk_4,
                                          dut.qsfp0_rx_rst_4)
        cocotb.start_soon(Clock(dut.qsfp0_tx_clk_4, 6.4, units="ns").start())
        self.qsfp0_4_sink = XgmiiSink(dut.qsfp0_txd_4, dut.qsfp0_txc_4,
                                      dut.qsfp0_tx_clk_4, dut.qsfp0_tx_rst_4)

        cocotb.start_soon(Clock(dut.qsfp1_rx_clk_1, 6.4, units="ns").start())
        self.qsfp1_1_source = XgmiiSource(dut.qsfp1_rxd_1, dut.qsfp1_rxc_1,
                                          dut.qsfp1_rx_clk_1,
                                          dut.qsfp1_rx_rst_1)
        cocotb.start_soon(Clock(dut.qsfp1_tx_clk_1, 6.4, units="ns").start())
        self.qsfp1_1_sink = XgmiiSink(dut.qsfp1_txd_1, dut.qsfp1_txc_1,
                                      dut.qsfp1_tx_clk_1, dut.qsfp1_tx_rst_1)

        cocotb.start_soon(Clock(dut.qsfp1_rx_clk_2, 6.4, units="ns").start())
        self.qsfp1_2_source = XgmiiSource(dut.qsfp1_rxd_2, dut.qsfp1_rxc_2,
                                          dut.qsfp1_rx_clk_2,
                                          dut.qsfp1_rx_rst_2)
        cocotb.start_soon(Clock(dut.qsfp1_tx_clk_2, 6.4, units="ns").start())
        self.qsfp1_2_sink = XgmiiSink(dut.qsfp1_txd_2, dut.qsfp1_txc_2,
                                      dut.qsfp1_tx_clk_2, dut.qsfp1_tx_rst_2)

        cocotb.start_soon(Clock(dut.qsfp1_rx_clk_3, 6.4, units="ns").start())
        self.qsfp1_3_source = XgmiiSource(dut.qsfp1_rxd_3, dut.qsfp1_rxc_3,
                                          dut.qsfp1_rx_clk_3,
                                          dut.qsfp1_rx_rst_3)
        cocotb.start_soon(Clock(dut.qsfp1_tx_clk_3, 6.4, units="ns").start())
        self.qsfp1_3_sink = XgmiiSink(dut.qsfp1_txd_3, dut.qsfp1_txc_3,
                                      dut.qsfp1_tx_clk_3, dut.qsfp1_tx_rst_3)

        cocotb.start_soon(Clock(dut.qsfp1_rx_clk_4, 6.4, units="ns").start())
        self.qsfp1_4_source = XgmiiSource(dut.qsfp1_rxd_4, dut.qsfp1_rxc_4,
                                          dut.qsfp1_rx_clk_4,
                                          dut.qsfp1_rx_rst_4)
        cocotb.start_soon(Clock(dut.qsfp1_tx_clk_4, 6.4, units="ns").start())
        self.qsfp1_4_sink = XgmiiSink(dut.qsfp1_txd_4, dut.qsfp1_txc_4,
                                      dut.qsfp1_tx_clk_4, dut.qsfp1_tx_rst_4)
Ejemplo n.º 42
0
        msg = (
            "Coverage collection requested but coverage module not available"
            "\n"
            "Import error was: %s\n" % repr(e))
        sys.stderr.write(msg)


def _my_import(name: str) -> Any:
    mod = __import__(name)
    components = name.split('.')
    for comp in components[1:]:
        mod = getattr(mod, comp)
    return mod


_logger = SimLog(__name__)


class RegressionManager:
    """Encapsulates all regression capability into a single place"""
    def __init__(self, dut: SimHandle, tests: Iterable[Test]):
        """
        Args:
            dut (SimHandle): The root handle to pass into test functions.
            tests (Iterable[Test]): tests to run
        """
        self._dut = dut
        self._test = None
        self._test_task = None
        self._test_start_time = None
        self._test_start_sim_time = None
Ejemplo n.º 43
0
 def log(self):
     return SimLog("cocotb.%s" % (self.__class__.__name__), id(self))
Ejemplo n.º 44
0
class TB:
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        cocotb.start_soon(Clock(dut.clk, 6.4, units="ns").start())

        # Ethernet
        cocotb.start_soon(Clock(dut.qsfp_rx_clk_1, 6.4, units="ns").start())
        self.qsfp_1_source = XgmiiSource(dut.qsfp_rxd_1, dut.qsfp_rxc_1, dut.qsfp_rx_clk_1, dut.qsfp_rx_rst_1)
        cocotb.start_soon(Clock(dut.qsfp_tx_clk_1, 6.4, units="ns").start())
        self.qsfp_1_sink = XgmiiSink(dut.qsfp_txd_1, dut.qsfp_txc_1, dut.qsfp_tx_clk_1, dut.qsfp_tx_rst_1)

        cocotb.start_soon(Clock(dut.qsfp_rx_clk_2, 6.4, units="ns").start())
        self.qsfp_2_source = XgmiiSource(dut.qsfp_rxd_2, dut.qsfp_rxc_2, dut.qsfp_rx_clk_2, dut.qsfp_rx_rst_2)
        cocotb.start_soon(Clock(dut.qsfp_tx_clk_2, 6.4, units="ns").start())
        self.qsfp_2_sink = XgmiiSink(dut.qsfp_txd_2, dut.qsfp_txc_2, dut.qsfp_tx_clk_2, dut.qsfp_tx_rst_2)

        cocotb.start_soon(Clock(dut.qsfp_rx_clk_3, 6.4, units="ns").start())
        self.qsfp_3_source = XgmiiSource(dut.qsfp_rxd_3, dut.qsfp_rxc_3, dut.qsfp_rx_clk_3, dut.qsfp_rx_rst_3)
        cocotb.start_soon(Clock(dut.qsfp_tx_clk_3, 6.4, units="ns").start())
        self.qsfp_3_sink = XgmiiSink(dut.qsfp_txd_3, dut.qsfp_txc_3, dut.qsfp_tx_clk_3, dut.qsfp_tx_rst_3)

        cocotb.start_soon(Clock(dut.qsfp_rx_clk_4, 6.4, units="ns").start())
        self.qsfp_4_source = XgmiiSource(dut.qsfp_rxd_4, dut.qsfp_rxc_4, dut.qsfp_rx_clk_4, dut.qsfp_rx_rst_4)
        cocotb.start_soon(Clock(dut.qsfp_tx_clk_4, 6.4, units="ns").start())
        self.qsfp_4_sink = XgmiiSink(dut.qsfp_txd_4, dut.qsfp_txc_4, dut.qsfp_tx_clk_4, dut.qsfp_tx_rst_4)

    async def init(self):

        self.dut.rst.setimmediatevalue(0)
        self.dut.qsfp_rx_rst_1.setimmediatevalue(0)
        self.dut.qsfp_tx_rst_1.setimmediatevalue(0)
        self.dut.qsfp_rx_rst_2.setimmediatevalue(0)
        self.dut.qsfp_tx_rst_2.setimmediatevalue(0)
        self.dut.qsfp_rx_rst_3.setimmediatevalue(0)
        self.dut.qsfp_tx_rst_3.setimmediatevalue(0)
        self.dut.qsfp_rx_rst_4.setimmediatevalue(0)
        self.dut.qsfp_tx_rst_4.setimmediatevalue(0)

        for k in range(10):
            await RisingEdge(self.dut.clk)

        self.dut.rst.setimmediatevalue(1)
        self.dut.qsfp_rx_rst_1 <= 1
        self.dut.qsfp_tx_rst_1 <= 1
        self.dut.qsfp_rx_rst_2 <= 1
        self.dut.qsfp_tx_rst_2 <= 1
        self.dut.qsfp_rx_rst_3 <= 1
        self.dut.qsfp_tx_rst_3 <= 1
        self.dut.qsfp_rx_rst_4 <= 1
        self.dut.qsfp_tx_rst_4 <= 1

        for k in range(10):
            await RisingEdge(self.dut.clk)

        self.dut.rst <= 0
        self.dut.qsfp_rx_rst_1 <= 0
        self.dut.qsfp_tx_rst_1 <= 0
        self.dut.qsfp_rx_rst_2 <= 0
        self.dut.qsfp_tx_rst_2 <= 0
        self.dut.qsfp_rx_rst_3 <= 0
        self.dut.qsfp_tx_rst_3 <= 0
        self.dut.qsfp_rx_rst_4 <= 0
        self.dut.qsfp_tx_rst_4 <= 0
Ejemplo n.º 45
0
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        cocotb.fork(Clock(dut.clk, 2.56, units="ns").start())

        # Ethernet
        cocotb.fork(Clock(dut.phy_gmii_clk, 8, units="ns").start())

        self.gmii_source = GmiiSource(dut.phy_gmii_rxd, dut.phy_gmii_rx_er,
                                      dut.phy_gmii_rx_dv, dut.phy_gmii_clk,
                                      dut.phy_gmii_rst, dut.phy_gmii_clk_en)
        self.gmii_sink = GmiiSink(dut.phy_gmii_txd, dut.phy_gmii_tx_er,
                                  dut.phy_gmii_tx_en, dut.phy_gmii_clk,
                                  dut.phy_gmii_rst, dut.phy_gmii_clk_en)

        dut.phy_gmii_clk_en.setimmediatevalue(1)

        cocotb.fork(Clock(dut.qsfp1_rx_clk_1, 2.56, units="ns").start())
        self.qsfp1_1_source = XgmiiSource(dut.qsfp1_rxd_1, dut.qsfp1_rxc_1,
                                          dut.qsfp1_rx_clk_1,
                                          dut.qsfp1_rx_rst_1)
        cocotb.fork(Clock(dut.qsfp1_tx_clk_1, 2.56, units="ns").start())
        self.qsfp1_1_sink = XgmiiSink(dut.qsfp1_txd_1, dut.qsfp1_txc_1,
                                      dut.qsfp1_tx_clk_1, dut.qsfp1_tx_rst_1)

        cocotb.fork(Clock(dut.qsfp1_rx_clk_2, 2.56, units="ns").start())
        self.qsfp1_2_source = XgmiiSource(dut.qsfp1_rxd_2, dut.qsfp1_rxc_2,
                                          dut.qsfp1_rx_clk_2,
                                          dut.qsfp1_rx_rst_2)
        cocotb.fork(Clock(dut.qsfp1_tx_clk_2, 2.56, units="ns").start())
        self.qsfp1_2_sink = XgmiiSink(dut.qsfp1_txd_2, dut.qsfp1_txc_2,
                                      dut.qsfp1_tx_clk_2, dut.qsfp1_tx_rst_2)

        cocotb.fork(Clock(dut.qsfp1_rx_clk_3, 2.56, units="ns").start())
        self.qsfp1_3_source = XgmiiSource(dut.qsfp1_rxd_3, dut.qsfp1_rxc_3,
                                          dut.qsfp1_rx_clk_3,
                                          dut.qsfp1_rx_rst_3)
        cocotb.fork(Clock(dut.qsfp1_tx_clk_3, 2.56, units="ns").start())
        self.qsfp1_3_sink = XgmiiSink(dut.qsfp1_txd_3, dut.qsfp1_txc_3,
                                      dut.qsfp1_tx_clk_3, dut.qsfp1_tx_rst_3)

        cocotb.fork(Clock(dut.qsfp1_rx_clk_4, 2.56, units="ns").start())
        self.qsfp1_4_source = XgmiiSource(dut.qsfp1_rxd_4, dut.qsfp1_rxc_4,
                                          dut.qsfp1_rx_clk_4,
                                          dut.qsfp1_rx_rst_4)
        cocotb.fork(Clock(dut.qsfp1_tx_clk_4, 2.56, units="ns").start())
        self.qsfp1_4_sink = XgmiiSink(dut.qsfp1_txd_4, dut.qsfp1_txc_4,
                                      dut.qsfp1_tx_clk_4, dut.qsfp1_tx_rst_4)

        cocotb.fork(Clock(dut.qsfp2_rx_clk_1, 2.56, units="ns").start())
        self.qsfp2_1_source = XgmiiSource(dut.qsfp2_rxd_1, dut.qsfp2_rxc_1,
                                          dut.qsfp2_rx_clk_1,
                                          dut.qsfp2_rx_rst_1)
        cocotb.fork(Clock(dut.qsfp2_tx_clk_1, 2.56, units="ns").start())
        self.qsfp2_1_sink = XgmiiSink(dut.qsfp2_txd_1, dut.qsfp2_txc_1,
                                      dut.qsfp2_tx_clk_1, dut.qsfp2_tx_rst_1)

        cocotb.fork(Clock(dut.qsfp2_rx_clk_2, 2.56, units="ns").start())
        self.qsfp2_2_source = XgmiiSource(dut.qsfp2_rxd_2, dut.qsfp2_rxc_2,
                                          dut.qsfp2_rx_clk_2,
                                          dut.qsfp2_rx_rst_2)
        cocotb.fork(Clock(dut.qsfp2_tx_clk_2, 2.56, units="ns").start())
        self.qsfp2_2_sink = XgmiiSink(dut.qsfp2_txd_2, dut.qsfp2_txc_2,
                                      dut.qsfp2_tx_clk_2, dut.qsfp2_tx_rst_2)

        cocotb.fork(Clock(dut.qsfp2_rx_clk_3, 2.56, units="ns").start())
        self.qsfp2_3_source = XgmiiSource(dut.qsfp2_rxd_3, dut.qsfp2_rxc_3,
                                          dut.qsfp2_rx_clk_3,
                                          dut.qsfp2_rx_rst_3)
        cocotb.fork(Clock(dut.qsfp2_tx_clk_3, 2.56, units="ns").start())
        self.qsfp2_3_sink = XgmiiSink(dut.qsfp2_txd_3, dut.qsfp2_txc_3,
                                      dut.qsfp2_tx_clk_3, dut.qsfp2_tx_rst_3)

        cocotb.fork(Clock(dut.qsfp2_rx_clk_4, 2.56, units="ns").start())
        self.qsfp2_4_source = XgmiiSource(dut.qsfp2_rxd_4, dut.qsfp2_rxc_4,
                                          dut.qsfp2_rx_clk_4,
                                          dut.qsfp2_rx_rst_4)
        cocotb.fork(Clock(dut.qsfp2_tx_clk_4, 2.56, units="ns").start())
        self.qsfp2_4_sink = XgmiiSink(dut.qsfp2_txd_4, dut.qsfp2_txc_4,
                                      dut.qsfp2_tx_clk_4, dut.qsfp2_tx_rst_4)

        dut.btnu.setimmediatevalue(0)
        dut.btnl.setimmediatevalue(0)
        dut.btnd.setimmediatevalue(0)
        dut.btnr.setimmediatevalue(0)
        dut.btnc.setimmediatevalue(0)
        dut.sw.setimmediatevalue(0)
        dut.uart_rxd.setimmediatevalue(0)
        dut.uart_cts.setimmediatevalue(0)
Ejemplo n.º 46
0
 def __init__(self, func):
     self._func = func
     self._log = SimLog("cocotb.external.%s" % self._func.__qualname__, id(self))
Ejemplo n.º 47
0
class RegressionManager(object):
    """Encapsulates all regression capability into a single place"""

    def __init__(self, root_name, modules, tests=None):
        """
        Args:
            modules (list): A list of python module names to run

        Kwargs
        """
        self._queue = []
        self._root_name = root_name
        self._dut = None
        self._modules = modules
        self._functions = tests
        self._running_test = None
        self._cov = None
        self.log = SimLog("cocotb.regression")

    def initialise(self):

        self.start_time = time.time()
        self.test_results = []
        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0
        self.xunit = XUnitReporter()
        self.xunit.add_testsuite(name="all", tests=repr(self.ntests),
                                 package="all")

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            self._cov = coverage.coverage(branch=True, omit=["*cocotb*"])
            self._cov.start()

        handle = simulator.get_root_handle(self._root_name)

        self._dut = cocotb.handle.SimHandle(handle) if handle else None

        if self._dut is None:
            raise AttributeError("Can not find Root Handle (%s)" %
                                 self._root_name)

        # Auto discovery
        for module_name in self._modules:
            try:
                module = _my_import(module_name)
            except ImportError:
                self.log.critical("Failed to import module %s", module_name)
                self.log.info("MODULE variable was \"%s\"",
                                                    ",".join(self._modules))
                raise

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    if not hasattr(module, test):
                        raise AttributeError("Test %s doesn't exist in %s" %
                                             (test, module_name))

                    self._queue.append(getattr(module, test)(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except TestError:
                        skip = True
                        self.log.warning("Failed to initialise test %s" %
                                         thing.name)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name,
                                                classname=module_name,
                                                time="0.0",
                                                sim_time_ns="0.0",
                                                ratio_time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1
                        self._store_test_result(module_name, thing.name, None, 0.0, 0.0, 0.0)
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(key=lambda test: "%s.%s" %
                         (test.module, test.funcname))

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                          (valid_tests.module,
                           valid_tests.funcname))

    def tear_down(self):
        """It's the end of the world as we know it"""
        if self.failures:
            self.log.error("Failed %d out of %d tests (%d skipped)" %
                           (self.failures, self.count - 1, self.skipped))
        else:
            self.log.info("Passed %d tests (%d skipped)" %
                          (self.count - 1, self.skipped))
        if self._cov:
            self._cov.stop()
            self.log.info("Writing coverage data")
            self._cov.save()
            self._cov.html_report()
        self._log_test_summary()
        self._log_sim_summary()
        self.log.info("Shutting down...")
        self.xunit.write()
        simulator.stop_simulator()

    def next_test(self):
        """Get the next test to run"""
        if not self._queue:
            return None
        return self._queue.pop(0)

    def handle_result(self, result):
        """Handle a test result

        Dumps result to XML and schedules the next test (if any)

        Args: result (TestComplete exception)
        """
        real_time   = time.time() - self._running_test.start_time
        sim_time_ns = get_sim_time('ns') - self._running_test.start_sim_time
        ratio_time  = sim_time_ns / real_time
        self.xunit.add_testcase(name=self._running_test.funcname,
                                classname=self._running_test.module,
                                time=repr(real_time),
                                sim_time_ns=repr(sim_time_ns),
                                ratio_time=repr(ratio_time))

        running_test_funcname = self._running_test.funcname

        # Helper for logging result
        def _result_was():
            result_was = ("%s (result was %s)" %
                          (running_test_funcname, result.__class__.__name__))
            return result_was

        result_pass = True

        if (isinstance(result, TestSuccess) and
                not self._running_test.expect_fail and
                not self._running_test.expect_error):
            self.log.info("Test Passed: %s" % running_test_funcname)

        elif (isinstance(result, TestFailure) and
                self._running_test.expect_fail):
            self.log.info("Test failed as expected: " + _result_was())

        elif (isinstance(result, TestSuccess) and
              self._running_test.expect_error):
            self.log.error("Test passed but we expected an error: " +
                           _result_was())
            self.xunit.add_failure(stdout=repr(str(result)),
                                   stderr="\n".join(
                                   self._running_test.error_messages))
            self.failures += 1
            result_pass = False

        elif isinstance(result, TestSuccess):
            self.log.error("Test passed but we expected a failure: " +
                           _result_was())
            self.xunit.add_failure(stdout=repr(str(result)),
                                   stderr="\n".join(
                                   self._running_test.error_messages))
            self.failures += 1
            result_pass = False

        elif isinstance(result, TestError) and self._running_test.expect_error:
            self.log.info("Test errored as expected: " + _result_was())

        elif isinstance(result, SimFailure):
            if self._running_test.expect_error:
                self.log.info("Test errored as expected: " + _result_was())
            else:
                self.log.error("Test error has lead to simulator shuttting us "
                               "down")
                self.failures += 1
                self._store_test_result(self._running_test.module, self._running_test.funcname, False, sim_time_ns, real_time, ratio_time)
                self.tear_down()
                return

        else:
            self.log.error("Test Failed: " + _result_was())
            self.xunit.add_failure(stdout=repr(str(result)),
                                   stderr="\n".join(
                                   self._running_test.error_messages))
            self.failures += 1
            result_pass = False

        self._store_test_result(self._running_test.module, self._running_test.funcname, result_pass, sim_time_ns, real_time, ratio_time)

        self.execute()

    def execute(self):
        self._running_test = cocotb.regression.next_test()
        if self._running_test:
            start = ''
            end   = ''
            if self.log.colour:
                start = ANSI.BLUE_BG + ANSI.BLACK_FG
                end   = ANSI.DEFAULT
            # Want this to stand out a little bit
            self.log.info("%sRunning test %d/%d:%s %s" %
                          (start,
                           self.count, self.ntests,
                           end,
                           self._running_test.funcname))
            if self.count is 1:
                test = cocotb.scheduler.add(self._running_test)
            else:
                test = cocotb.scheduler.new_test(self._running_test)
            self.count += 1
        else:
            self.tear_down()

    def _log_test_summary(self):
        TEST_FIELD   = 'TEST'
        RESULT_FIELD = 'PASS/FAIL'
        SIM_FIELD    = 'SIM TIME(NS)'
        REAL_FIELD   = 'REAL TIME(S)'
        RATIO_FIELD  = 'RATIO(NS/S)'

        TEST_FIELD_LEN   = max(len(TEST_FIELD),len(max([x['test'] for x in self.test_results],key=len)))
        RESULT_FIELD_LEN = len(RESULT_FIELD)
        SIM_FIELD_LEN    = len(SIM_FIELD)
        REAL_FIELD_LEN   = len(REAL_FIELD)
        RATIO_FIELD_LEN  = len(RATIO_FIELD)

        LINE_LEN = 3 + TEST_FIELD_LEN + 2 + RESULT_FIELD_LEN + 2 + SIM_FIELD_LEN + 2 + REAL_FIELD_LEN + 2 + RATIO_FIELD_LEN + 3

        LINE_SEP = "*"*LINE_LEN+"\n"

        summary = ""
        summary += LINE_SEP
        summary += "** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}}  {d:>{d_len}}  {e:>{e_len}} **\n".format(a=TEST_FIELD,   a_len=TEST_FIELD_LEN,
                                                                                                         b=RESULT_FIELD, b_len=RESULT_FIELD_LEN,
                                                                                                         c=SIM_FIELD,    c_len=SIM_FIELD_LEN,
                                                                                                         d=REAL_FIELD,   d_len=REAL_FIELD_LEN,
                                                                                                         e=RATIO_FIELD,  e_len=RATIO_FIELD_LEN)
        summary += LINE_SEP
        for result in self.test_results:
            hilite = ''

            if result['pass'] is None:
                pass_fail_str = "N/A"
            elif result['pass']:
                pass_fail_str = "PASS"
            else:
                pass_fail_str = "FAIL"
                if self.log.colour:
                    hilite = ANSI.WHITE_FG + ANSI.RED_BG

            summary += "{start}** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}.2f}   {d:>{d_len}.2f}   {e:>{e_len}.2f}  **\n".format(a=result['test'],   a_len=TEST_FIELD_LEN,
                                                                                                                                b=pass_fail_str,    b_len=RESULT_FIELD_LEN,
                                                                                                                                c=result['sim'],    c_len=SIM_FIELD_LEN-1,
                                                                                                                                d=result['real'],   d_len=REAL_FIELD_LEN-1,
                                                                                                                                e=result['ratio'],  e_len=RATIO_FIELD_LEN-1,
                                                                                                                                start=hilite)
        summary += LINE_SEP

        self.log.info(summary)

    def _log_sim_summary(self):
        real_time   = time.time() - self.start_time
        sim_time_ns = get_sim_time('ns')
        ratio_time  = sim_time_ns / real_time

        summary = ""

        summary += "*************************************************************************************\n"
        summary += "**                                 ERRORS : {:<39}**\n".format(self.failures)
        summary += "*************************************************************************************\n"
        summary += "**                               SIM TIME : {:<39}**\n".format('{:.2f} NS'.format(sim_time_ns))
        summary += "**                              REAL TIME : {:<39}**\n".format('{:.2f} S'.format(real_time))
        summary += "**                        SIM / REAL TIME : {:<39}**\n".format('{:.2f} NS/S'.format(ratio_time))
        summary += "*************************************************************************************\n"

        self.log.info(summary)

    def _store_test_result(self, module_name, test_name, result_pass, sim_time, real_time, ratio):
        result = {
            'test'  : '.'.join([module_name, test_name]),
            'pass'  : result_pass,
            'sim'   : sim_time,
            'real'  : real_time,
            'ratio' : ratio}
        self.test_results.append(result)
Ejemplo n.º 48
0
 def log(self):
     return SimLog("cocotb.function.%s" % self._coro.__qualname__, id(self))
Ejemplo n.º 49
0
class TB:
    def __init__(self, dut):
        self.dut = dut

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        cocotb.fork(Clock(dut.clk, 6.4, units="ns").start())

        # Ethernet
        cocotb.fork(Clock(dut.sfp_1_rx_clk, 6.4, units="ns").start())
        self.sfp_1_source = XgmiiSource(dut.sfp_1_rxd, dut.sfp_1_rxc,
                                        dut.sfp_1_rx_clk, dut.sfp_1_rx_rst)
        cocotb.fork(Clock(dut.sfp_1_tx_clk, 6.4, units="ns").start())
        self.sfp_1_sink = XgmiiSink(dut.sfp_1_txd, dut.sfp_1_txc,
                                    dut.sfp_1_tx_clk, dut.sfp_1_tx_rst)

        cocotb.fork(Clock(dut.sfp_2_rx_clk, 6.4, units="ns").start())
        self.sfp_2_source = XgmiiSource(dut.sfp_2_rxd, dut.sfp_2_rxc,
                                        dut.sfp_2_rx_clk, dut.sfp_2_rx_rst)
        cocotb.fork(Clock(dut.sfp_2_tx_clk, 6.4, units="ns").start())
        self.sfp_2_sink = XgmiiSink(dut.sfp_2_txd, dut.sfp_2_txc,
                                    dut.sfp_2_tx_clk, dut.sfp_2_tx_rst)

        cocotb.fork(Clock(dut.sfp_3_rx_clk, 6.4, units="ns").start())
        self.sfp_3_source = XgmiiSource(dut.sfp_3_rxd, dut.sfp_3_rxc,
                                        dut.sfp_3_rx_clk, dut.sfp_3_rx_rst)
        cocotb.fork(Clock(dut.sfp_3_tx_clk, 6.4, units="ns").start())
        self.sfp_3_sink = XgmiiSink(dut.sfp_3_txd, dut.sfp_3_txc,
                                    dut.sfp_3_tx_clk, dut.sfp_3_tx_rst)

        cocotb.fork(Clock(dut.sfp_4_rx_clk, 6.4, units="ns").start())
        self.sfp_4_source = XgmiiSource(dut.sfp_4_rxd, dut.sfp_4_rxc,
                                        dut.sfp_4_rx_clk, dut.sfp_4_rx_rst)
        cocotb.fork(Clock(dut.sfp_4_tx_clk, 6.4, units="ns").start())
        self.sfp_4_sink = XgmiiSink(dut.sfp_4_txd, dut.sfp_4_txc,
                                    dut.sfp_4_tx_clk, dut.sfp_4_tx_rst)

        dut.btn.setimmediatevalue(0)

    async def init(self):

        self.dut.rst.setimmediatevalue(0)
        self.dut.sfp_1_rx_rst.setimmediatevalue(0)
        self.dut.sfp_1_tx_rst.setimmediatevalue(0)
        self.dut.sfp_2_rx_rst.setimmediatevalue(0)
        self.dut.sfp_2_tx_rst.setimmediatevalue(0)
        self.dut.sfp_3_rx_rst.setimmediatevalue(0)
        self.dut.sfp_3_tx_rst.setimmediatevalue(0)
        self.dut.sfp_4_rx_rst.setimmediatevalue(0)
        self.dut.sfp_4_tx_rst.setimmediatevalue(0)

        for k in range(10):
            await RisingEdge(self.dut.clk)

        self.dut.rst <= 1
        self.dut.sfp_1_rx_rst <= 1
        self.dut.sfp_1_tx_rst <= 1
        self.dut.sfp_2_rx_rst <= 1
        self.dut.sfp_2_tx_rst <= 1
        self.dut.sfp_3_rx_rst <= 1
        self.dut.sfp_3_tx_rst <= 1
        self.dut.sfp_4_rx_rst <= 1
        self.dut.sfp_4_tx_rst <= 1

        for k in range(10):
            await RisingEdge(self.dut.clk)

        self.dut.rst <= 0
        self.dut.sfp_1_rx_rst <= 0
        self.dut.sfp_1_tx_rst <= 0
        self.dut.sfp_2_rx_rst <= 0
        self.dut.sfp_2_tx_rst <= 0
        self.dut.sfp_3_rx_rst <= 0
        self.dut.sfp_3_tx_rst <= 0
        self.dut.sfp_4_rx_rst <= 0
        self.dut.sfp_4_tx_rst <= 0
Ejemplo n.º 50
0
 def log(self):
     return SimLog("cocotb.coroutine.%s" % self._func.__qualname__, id(self))
Ejemplo n.º 51
0
class RunningTest(RunningCoroutine):
    """Add some useful Test functionality to a RunningCoroutine."""
    class ErrorLogHandler(logging.Handler):
        def __init__(self, fn):
            self.fn = fn
            logging.Handler.__init__(self, level=logging.DEBUG)

        def handle(self, record):
            self.fn(self.format(record))

    def __init__(self, inst, parent):
        self.error_messages = []
        RunningCoroutine.__init__(self, inst, parent)
        self.log = SimLog("cocotb.test.%s" % self.__name__, id(self))
        self.started = False
        self.start_time = 0
        self.start_sim_time = 0
        self.expect_fail = parent.expect_fail
        self.expect_error = parent.expect_error
        self.skip = parent.skip
        self.stage = parent.stage

        self.handler = RunningTest.ErrorLogHandler(self._handle_error_message)
        cocotb.log.addHandler(self.handler)

    def _advance(self, outcome):
        if not self.started:
            self.error_messages = []
            self.log.info("Starting test: \"%s\"\nDescription: %s" %
                          (self.funcname, self.__doc__))
            self.start_time = time.time()
            self.start_sim_time = get_sim_time('ns')
            self.started = True
        return super(RunningTest, self)._advance(outcome)

    def _handle_error_message(self, msg):
        self.error_messages.append(msg)

    def _force_outcome(self, outcome):
        """
        This method exists as a workaround for preserving tracebacks on
        python 2, and is called in unschedule. Once Python 2 is dropped, this
        should be inlined into `abort` below, and the call in `unschedule`
        replaced with `abort(outcome.error)`.
        """
        assert self._outcome is None
        if _debug:
            self.log.debug("outcome forced to {}".format(outcome))
        self._outcome = outcome
        cocotb.scheduler.unschedule(self)

    # like RunningCoroutine.kill(), but with a way to inject a failure
    def abort(self, exc):
        """
        Force this test to end early, without executing any cleanup.

        This happens when a background task fails, and is consistent with
        how the behavior has always been. In future, we may want to behave
        more gracefully to allow the test body to clean up.

        `exc` is the exception that the test should report as its reason for
        aborting.
        """
        return self._force_outcome(outcomes.Error(exc))
Ejemplo n.º 52
0
class SimHandleBase:
    """Base class for all simulation objects.

    We maintain a handle which we can use for GPI calls.
    """

    # For backwards compatibility we support a mapping of old member names
    # which may alias with the simulator hierarchy.  In these cases the
    # simulator result takes priority, only falling back to the python member
    # if there is no colliding object in the elaborated design.
    _compat_mapping = {
        "log"               :       "_log",
        "fullname"          :       "_fullname",
        "name"              :       "_name",
    }

    def __init__(self, handle, path):
        """
        .. Constructor. This RST comment works around sphinx-doc/sphinx#6885

        Args:
            handle (int): The GPI handle to the simulator object.
            path (str): Path to this handle, ``None`` if root.
        """
        self._handle = handle
        self._len: Optional[int] = None
        """The "length" (the number of elements) of the underlying object. For vectors this is the number of bits."""
        self._sub_handles: dict = {}
        """Dictionary of this handle's children."""
        self._invalid_sub_handles: set = set()
        """Python :class:`set` of invalid queries, for caching purposes."""
        self._name: str = self._handle.get_name_string()
        """The name of an object.

        :meta public:
        """
        self._type: str = self._handle.get_type_string()
        """The type of an object as a string.

        :meta public:
        """
        self._fullname: str = self._name + "(%s)" % self._type
        """The name of an object with its type appended in parentheses."""
        self._path: str = self._name if path is None else path
        """The path to this handle, or its name if this is the root handle.

        :meta public:
        """
        self._log = SimLog("cocotb.%s" % self._name)
        """The logging object."""
        self._log.debug("Created")
        self._def_name: str = self._handle.get_definition_name()
        """The name of a GPI object's definition.

        This is the value of ``vpiDefName`` for VPI, ``vhpiNameP`` for VHPI,
        and ``mti_GetPrimaryName`` for FLI.
        Support for this depends on the specific object type and simulator used.

        :meta public:
        """
        self._def_file: str = self._handle.get_definition_file()
        """The name of the file that sources the object's definition.

        This is the value of ``vpiDefFile`` for VPI, ``vhpiFileNameP`` for VHPI,
        and ``mti_GetRegionSourceName`` for FLI.
        Support for this depends on the specific object type and simulator used.

        :meta public:
        """

    def get_definition_name(self):
        return self._def_name

    def get_definition_file(self):
        return self._def_file

    def __hash__(self):
        return hash(self._handle)

    def __len__(self):
        """Return the "length" (the number of elements) of the underlying object.

        For vectors this is the number of bits.
        """
        if self._len is None:
            self._len = self._handle.get_num_elems()
        return self._len

    def __eq__(self, other):
        """Equality comparator for handles

        Example usage::

            if clk == dut.clk:
                do_something()
        """
        if not isinstance(other, SimHandleBase):
            return NotImplemented
        return self._handle == other._handle

    def __ne__(self, other):
        if not isinstance(other, SimHandleBase):
            return NotImplemented
        return self._handle != other._handle

    def __repr__(self):
        desc = self._path
        defname = self._def_name
        if defname:
            desc += " with definition "+defname
            deffile = self._def_file
            if deffile:
                desc += " (at "+deffile+")"
        return type(self).__qualname__ + "(" + desc + ")"

    def __str__(self):
        return self._path

    def __setattr__(self, name, value):
        if name in self._compat_mapping:
            if name not in _deprecation_warned:
                warnings.warn("Use of attribute {!r} is deprecated, use {!r} instead".format(name, self._compat_mapping[name]),
                              DeprecationWarning, stacklevel=3)
                _deprecation_warned.add(name)
            return setattr(self, self._compat_mapping[name], value)
        else:
            return object.__setattr__(self, name, value)

    def __getattr__(self, name):
        if name in self._compat_mapping:
            if name not in _deprecation_warned:
                warnings.warn("Use of attribute {!r} is deprecated, use {!r} instead".format(name, self._compat_mapping[name]),
                              DeprecationWarning, stacklevel=3)
                _deprecation_warned.add(name)
            return getattr(self, self._compat_mapping[name])
        else:
            return object.__getattribute__(self, name)
Ejemplo n.º 53
0
class Monitor(object):
    """Base class for Monitor objects.

    Monitors are passive 'listening' objects that monitor pins going in or out of a DUT.
    This class should not be used
    directly, but should be sub-classed and the internal :any:`_monitor_recv` method
    should be overridden and decorated as a :any:`coroutine`.  This :any:`_monitor_recv`
    method should capture some behavior of the pins, form a transaction, and
    pass this transaction to the internal :any:`_recv` method.  The :any:`_monitor_recv`
    method is added to the cocotb scheduler during the ``__init__`` phase, so it
    should not be yielded anywhere.

    The primary use of a Monitor is as an interface for a
    :class:`~cocotb.scoreboard.Scoreboard`.

    Args:
        callback (callable): Callback to be called with each recovered transaction
            as the argument. If the callback isn't used, received transactions will
            be placed on a queue and the event used to notify any consumers.
        event (cocotb.triggers.Event): Event that will be called when a transaction
            is received through the internal :any:`_recv` method.
            `Event.data` is set to the received transaction.
    """
    def __init__(self, callback=None, event=None):
        self._event = event
        self._wait_event = Event()
        self._recvQ = deque()
        self._callbacks = []
        self.stats = MonitorStatistics()

        # Sub-classes may already set up logging
        if not hasattr(self, "log"):
            self.log = SimLog("cocotb.monitor.%s" % (self.__class__.__name__))

        if callback is not None:
            self.add_callback(callback)

        # Create an independent coroutine which can receive stuff
        self._thread = cocotb.scheduler.add(self._monitor_recv())

    def kill(self):
        """Kill the monitor coroutine."""
        if self._thread:
            self._thread.kill()
            self._thread = None

    def __len__(self):
        return len(self._recvQ)

    def __getitem__(self, idx):
        return self._recvQ[idx]

    def add_callback(self, callback):
        """Add function as a callback.

        Args:
            callback (callable): The function to call back.
        """
        self.log.debug("Adding callback of function %s to monitor",
                       callback.__name__)
        self._callbacks.append(callback)

    @coroutine
    def wait_for_recv(self, timeout=None):
        """With *timeout*, :meth:`.wait` for transaction to arrive on monitor
        and return its data.

        Args:
            timeout: The timeout value for :class:`~.triggers.Timer`.
                Defaults to ``None``.

        Returns:
            Data of received transaction.
        """
        if timeout:
            t = Timer(timeout)
            fired = yield [self._wait_event.wait(), t]
            if fired is t:
                return None
        else:
            yield self._wait_event.wait()

        return self._wait_event.data

    @coroutine
    def _monitor_recv(self):
        """Actual implementation of the receiver.

        Sub-classes should override this method to implement the actual receive
        routine and call :any:`_recv` with the recovered transaction.
        """
        raise NotImplementedError("Attempt to use base monitor class without "
                                  "providing a ``_monitor_recv`` method")

    def _recv(self, transaction):
        """Common handling of a received transaction."""

        self.stats.received_transactions += 1

        # either callback based consumer
        for callback in self._callbacks:
            callback(transaction)

        # Or queued with a notification
        if not self._callbacks:
            self._recvQ.append(transaction)

        if self._event is not None:
            self._event.set(data=transaction)

        # If anyone was waiting then let them know
        if self._wait_event is not None:
            self._wait_event.set(data=transaction)
            self._wait_event.clear()
Ejemplo n.º 54
0
 def log(self):
     # Creating a logger is expensive, only do it if we actually plan to
     # log anything
     return SimLog("cocotb.coroutine.%s" % self.__qualname__, id(self))
Ejemplo n.º 55
0
 def __init__(self, func):
     self._func = func
     self._log = SimLog("cocotb.external.%s" % self._func.__name__,
                        id(self))
     functools.update_wrapper(self, self._func)
Ejemplo n.º 56
0
class Scoreboard(object):
    """Generic scoreboarding class.

    We can add interfaces by providing a monitor and an expected output queue.

    The expected output can either be a function which provides a transaction
    or a simple list containing the expected output.

    TODO:
        Statistics for end-of-test summary etc.
        
    Args:
        dut (SimHandle): Handle to the DUT.
        reorder_depth (int, optional): Consider up to `reorder_depth` elements 
            of the expected result list as passing matches.
            Default is 0, meaning only the first element in the expected result list
            is considered for a passing match.
        fail_immediately (bool, optional): Raise :any:`TestFailure`
            immediately when something is wrong instead of just
            recording an error. Default is ``True``.
    """
    
    def __init__(self, dut, reorder_depth=0, fail_immediately=True):  # FIXME: reorder_depth needed here?
        self.dut = dut
        self.log = SimLog("cocotb.scoreboard.%s" % self.dut._name)
        self.errors = 0
        self.expected = {}
        self._imm = fail_immediately

    @property
    def result(self):
        """Determine the test result, do we have any pending data remaining?
        
        Returns:
            :any:`TestFailure`: If not all expected output was received or 
            error were recorded during the test.
        """
        fail = False
        for monitor, expected_output in self.expected.items():
            if callable(expected_output):
                self.log.debug("Can't check all data returned for %s since "
                               "expected output is callable function rather "
                               "than a list" % str(monitor))
                continue
            if len(expected_output):
                self.log.warn("Still expecting %d transactions on %s" %
                              (len(expected_output), str(monitor)))
                for index, transaction in enumerate(expected_output):
                    self.log.info("Expecting %d:\n%s" %
                                  (index, hexdump(str(transaction))))
                    if index > 5:
                        self.log.info("... and %d more to come" %
                                      (len(expected_output) - index - 1))
                        break
                fail = True
        if fail:
            return TestFailure("Not all expected output was received")
        if self.errors:
            return TestFailure("Errors were recorded during the test")
        return TestSuccess()

    def compare(self, got, exp, log, strict_type=True):
        """Common function for comparing two transactions.

        Can be re-implemented by a subclass.
        
        Args:
            got: The received transaction.
            exp: The expected transaction.
            log: The logger for reporting messages.
            strict_type (bool, optional): Require transaction type to match
                exactly if ``True``, otherwise compare its string representation.

        Raises:
            :any:`TestFailure`: If received transaction differed from
                expected transaction when :attr:`fail_immediately` is ``True``.
                If *strict_type* is ``True``,
                also the transaction type must match.
        """

        # Compare the types
        if strict_type and type(got) != type(exp):
            self.errors += 1
            log.error("Received transaction type is different than expected")
            log.info("Received: %s but expected %s" %
                     (str(type(got)), str(type(exp))))
            if self._imm:
                raise TestFailure("Received transaction of wrong type. "
                                  "Set strict_type=False to avoid this.")
            return
        # Or convert to a string before comparison
        elif not strict_type:
            got, exp = str(got), str(exp)

        # Compare directly
        if got != exp:
            self.errors += 1

            # Try our best to print out something useful
            strgot, strexp = str(got), str(exp)

            log.error("Received transaction differed from expected output")
            if not strict_type:
                log.info("Expected:\n" + hexdump(strexp))
            else:
                log.info("Expected:\n" + repr(exp))
            if not isinstance(exp, str):
                try:
                    for word in exp:
                        log.info(str(word))
                except Exception:
                    pass
            if not strict_type:
                log.info("Received:\n" + hexdump(strgot))
            else:
                log.info("Received:\n" + repr(got))
            if not isinstance(got, str):
                try:
                    for word in got:
                        log.info(str(word))
                except Exception:
                    pass
            log.warning("Difference:\n%s" % hexdiffs(strexp, strgot))
            if self._imm:
                raise TestFailure("Received transaction differed from expected"
                                  "transaction")
        else:
            # Don't want to fail the test
            # if we're passed something without __len__
            try:
                log.debug("Received expected transaction %d bytes" %
                          (len(got)))
                log.debug(repr(got))
            except Exception:
                pass

    def add_interface(self, monitor, expected_output, compare_fn=None,
                      reorder_depth=0, strict_type=True):
        """Add an interface to be scoreboarded.

        Provides a function which the monitor will callback with received
        transactions.

        Simply check against the expected output.
        
        Args:
            monitor: The monitor object.
            expected_output: Queue of expected outputs.
            compare_fn (callable, optional): Function doing the actual comparison.
            reorder_depth (int, optional): Consider up to *reorder_depth* elements 
                of the expected result list as passing matches.
                Default is 0, meaning only the first element in the expected result list
                is considered for a passing match.
            strict_type (bool, optional): Require transaction type to match
                exactly if ``True``, otherwise compare its string representation.

        Raises:
            :any:`TypeError`: If no monitor is on the interface or
                *compare_fn* is not a callable function.
        """
        # save a handle to the expected output so we can check if all expected
        # data has been received at the end of a test.
        self.expected[monitor] = expected_output

        # Enforce some type checking as we only work with a real monitor
        if not isinstance(monitor, Monitor):
            raise TypeError("Expected monitor on the interface but got %s" %
                            (monitor.__class__.__name__))

        if compare_fn is not None:
            if callable(compare_fn):
                monitor.add_callback(compare_fn)
                return
            raise TypeError("Expected a callable compare function but got %s" %
                            str(type(compare_fn)))

        self.log.info("Created with reorder_depth %d" % reorder_depth)

        def check_received_transaction(transaction):
            """Called back by the monitor when a new transaction has been
            received."""

            if monitor.name:
                log_name = self.log.name + '.' + monitor.name
            else:
                log_name = self.log.name + '.' + monitor.__class__.__name__

            log = logging.getLogger(log_name)

            if callable(expected_output):
                exp = expected_output(transaction)

            elif len(expected_output):  # we expect something
                for i in range(min((reorder_depth + 1), len(expected_output))):
                    if expected_output[i] == transaction:
                        break  # break out of enclosing for loop
                else:  # run when for loop is exhausted (but no break occurs)
                    i = 0
                exp = expected_output.pop(i)
            else:
                self.errors += 1
                log.error("Received a transaction but wasn't expecting "
                          "anything")
                log.info("Got: %s" % (hexdump(str(transaction))))
                if self._imm:
                    raise TestFailure("Received a transaction but wasn't "
                                      "expecting anything")
                return

            self.compare(transaction, exp, log, strict_type=strict_type)

        monitor.add_callback(check_received_transaction)
Ejemplo n.º 57
0
 def __init__(self, dut, reorder_depth=0, fail_immediately=True):
     self.dut = dut
     self.log = SimLog("cocotb.scoreboard.%s" % self.dut.name)
     self.errors = 0
     self.expected = {}
     self._imm = fail_immediately
Ejemplo n.º 58
0
class Scoreboard(object):
    """Generic scoreboarding class

    We can add interfaces by providing a monitor and an expected output queue

    The expected output can either be a function which provides a transaction
    or a simple list containing the expected output.

    TODO:
        Statistics for end-of-test summary etc.
    """
    def __init__(self, dut, reorder_depth=0, fail_immediately=True):
        self.dut = dut
        self.log = SimLog("cocotb.scoreboard.%s" % self.dut.name)
        self.errors = 0
        self.expected = {}
        self._imm = fail_immediately

    @property
    def result(self):
        """Determine the test result, do we have any pending data remaining?"""
        fail = False
        for monitor, expected_output in self.expected.items():
            if callable(expected_output):
                self.log.debug("Can't check all data returned for %s since "
                               "expected output is callable function rather "
                               "than a list" % str(monitor))
                continue
            if len(expected_output):
                self.log.warn("Still expecting %d transactions on %s" %
                              (len(expected_output), str(monitor)))
                for index, transaction in enumerate(expected_output):
                    self.log.info("Expecting %d:\n%s" %
                                  (index, hexdump(str(transaction))))
                    if index > 5:
                        self.log.info("... and %d more to come" %
                                      (len(expected_output) - index - 1))
                        break
                fail = True
        if fail:
            return TestFailure("Not all expected output was received")
        if self.errors:
            return TestFailure("Errors were recorded during the test")
        return TestSuccess()

    def compare(self, got, exp, log, strict_type=True):
        """
        Common function for comparing two transactions.

        Can be re-implemented by a subclass.
        """

        # Compare the types
        if strict_type and type(got) != type(exp):
            self.errors += 1
            log.error("Received transaction is a different type to expected "
                      "transaction")
            log.info("Got: %s but expected %s" %
                     (str(type(got)), str(type(exp))))
            if self._imm:
                raise TestFailure("Received transaction of wrong type")
            return
        # Or convert to a string before comparison
        elif not strict_type:
            got, exp = str(got), str(exp)

        # Compare directly
        if got != exp:
            self.errors += 1

            # Try our best to print out something useful
            strgot, strexp = str(got), str(exp)

            log.error("Received transaction differed from expected output")
            log.info("Expected:\n" + hexdump(strexp))
            if not isinstance(exp, str):
                try:
                    for word in exp:
                        log.info(str(word))
                except:
                    pass
            log.info("Received:\n" + hexdump(strgot))
            if not isinstance(got, str):
                try:
                    for word in got:
                        log.info(str(word))
                except:
                    pass
            log.warning("Difference:\n%s" % hexdiffs(strexp, strgot))
            if self._imm:
                raise TestFailure("Received transaction differed from expected"
                                  "transaction")
        else:
            # Don't want to fail the test
            # if we're passed something without __len__
            try:
                log.debug("Received expected transaction %d bytes" %
                          (len(got)))
                log.debug(repr(got))
            except:
                pass

    def add_interface(self,
                      monitor,
                      expected_output,
                      compare_fn=None,
                      reorder_depth=0,
                      strict_type=True):
        """Add an interface to be scoreboarded.

            Provides a function which the monitor will callback with received
            transactions

            Simply check against the expected output.

        """
        # save a handle to the expected output so we can check if all expected
        # data has been received at the end of a test.
        self.expected[monitor] = expected_output

        # Enforce some type checking as we only work with a real monitor
        if not isinstance(monitor, Monitor):
            raise TypeError("Expected monitor on the interface but got %s" %
                            (monitor.__class__.__name__))

        if compare_fn is not None:
            if callable(compare_fn):
                monitor.add_callback(compare_fn)
                return
            raise TypeError("Expected a callable compare function but got %s" %
                            str(type(compare_fn)))

        self.log.info("Created with reorder_depth %d" % reorder_depth)

        def check_received_transaction(transaction):
            """Called back by the monitor when a new transaction has been
            received"""

            log = logging.getLogger(self.log.name + '.' + monitor.name)

            if callable(expected_output):
                exp = expected_output(transaction)

            elif len(expected_output):
                for i in range(min((reorder_depth + 1), len(expected_output))):
                    if expected_output[i] == transaction:
                        break
                else:
                    i = 0
                exp = expected_output.pop(i)
            else:
                self.errors += 1
                log.error("Received a transaction but wasn't expecting "
                          "anything")
                log.info("Got: %s" % (hexdump(str(transaction))))
                if self._imm:
                    raise TestFailure("Received a transaction but wasn't "
                                      "expecting anything")
                return

            self.compare(transaction, exp, log, strict_type=strict_type)

        monitor.add_callback(check_received_transaction)
Ejemplo n.º 59
0
    def __init__(self, dut):
        self.dut = dut

        self.BAR0_APERTURE = int(os.getenv("PARAM_BAR0_APERTURE"))

        self.log = SimLog("cocotb.tb")
        self.log.setLevel(logging.DEBUG)

        # PCIe
        self.rc = RootComplex()

        self.rc.max_payload_size = 0x1  # 256 bytes
        self.rc.max_read_request_size = 0x2  # 512 bytes

        self.dev = UltraScalePlusPcieDevice(
            # configuration options
            pcie_generation=3,
            pcie_link_width=16,
            user_clk_frequency=250e6,
            alignment="dword",
            cq_cc_straddle=False,
            rq_rc_straddle=False,
            rc_4tlp_straddle=False,
            enable_pf1=False,
            enable_client_tag=True,
            enable_extended_tag=True,
            enable_parity=False,
            enable_rx_msg_interface=False,
            enable_sriov=False,
            enable_extended_configuration=False,
            enable_pf0_msi=True,
            enable_pf1_msi=False,

            # signals
            # Clock and Reset Interface
            user_clk=dut.clk_250mhz,
            user_reset=dut.rst_250mhz,
            # user_lnk_up
            # sys_clk
            # sys_clk_gt
            # sys_reset
            # phy_rdy_out

            # Requester reQuest Interface
            rq_entity=dut,
            rq_name="m_axis_rq",
            pcie_rq_seq_num0=dut.s_axis_rq_seq_num_0,
            pcie_rq_seq_num_vld0=dut.s_axis_rq_seq_num_valid_0,
            pcie_rq_seq_num1=dut.s_axis_rq_seq_num_1,
            pcie_rq_seq_num_vld1=dut.s_axis_rq_seq_num_valid_1,
            # pcie_rq_tag0
            # pcie_rq_tag1
            # pcie_rq_tag_av
            # pcie_rq_tag_vld0
            # pcie_rq_tag_vld1

            # Requester Completion Interface
            rc_entity=dut,
            rc_name="s_axis_rc",

            # Completer reQuest Interface
            cq_entity=dut,
            cq_name="s_axis_cq",
            # pcie_cq_np_req
            # pcie_cq_np_req_count

            # Completer Completion Interface
            cc_entity=dut,
            cc_name="m_axis_cc",

            # Transmit Flow Control Interface
            # pcie_tfc_nph_av=dut.pcie_tfc_nph_av,
            # pcie_tfc_npd_av=dut.pcie_tfc_npd_av,

            # Configuration Management Interface
            cfg_mgmt_addr=dut.cfg_mgmt_addr,
            cfg_mgmt_function_number=dut.cfg_mgmt_function_number,
            cfg_mgmt_write=dut.cfg_mgmt_write,
            cfg_mgmt_write_data=dut.cfg_mgmt_write_data,
            cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable,
            cfg_mgmt_read=dut.cfg_mgmt_read,
            cfg_mgmt_read_data=dut.cfg_mgmt_read_data,
            cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done,
            # cfg_mgmt_debug_access

            # Configuration Status Interface
            # cfg_phy_link_down
            # cfg_phy_link_status
            # cfg_negotiated_width
            # cfg_current_speed
            cfg_max_payload=dut.cfg_max_payload,
            cfg_max_read_req=dut.cfg_max_read_req,
            # cfg_function_status
            # cfg_vf_status
            # cfg_function_power_state
            # cfg_vf_power_state
            # cfg_link_power_state
            # cfg_err_cor_out
            # cfg_err_nonfatal_out
            # cfg_err_fatal_out
            # cfg_local_error_out
            # cfg_local_error_valid
            # cfg_rx_pm_state
            # cfg_tx_pm_state
            # cfg_ltssm_state
            # cfg_rcb_status
            # cfg_obff_enable
            # cfg_pl_status_change
            # cfg_tph_requester_enable
            # cfg_tph_st_mode
            # cfg_vf_tph_requester_enable
            # cfg_vf_tph_st_mode

            # Configuration Received Message Interface
            # cfg_msg_received
            # cfg_msg_received_data
            # cfg_msg_received_type

            # Configuration Transmit Message Interface
            # cfg_msg_transmit
            # cfg_msg_transmit_type
            # cfg_msg_transmit_data
            # cfg_msg_transmit_done

            # Configuration Flow Control Interface
            cfg_fc_ph=dut.cfg_fc_ph,
            cfg_fc_pd=dut.cfg_fc_pd,
            cfg_fc_nph=dut.cfg_fc_nph,
            cfg_fc_npd=dut.cfg_fc_npd,
            cfg_fc_cplh=dut.cfg_fc_cplh,
            cfg_fc_cpld=dut.cfg_fc_cpld,
            cfg_fc_sel=dut.cfg_fc_sel,

            # Configuration Control Interface
            # cfg_hot_reset_in
            # cfg_hot_reset_out
            # cfg_config_space_enable
            # cfg_dsn
            # cfg_bus_number
            # cfg_ds_port_number
            # cfg_ds_bus_number
            # cfg_ds_device_number
            # cfg_ds_function_number
            # cfg_power_state_change_ack
            # cfg_power_state_change_interrupt
            cfg_err_cor_in=dut.status_error_cor,
            cfg_err_uncor_in=dut.status_error_uncor,
            # cfg_flr_in_process
            # cfg_flr_done
            # cfg_vf_flr_in_process
            # cfg_vf_flr_func_num
            # cfg_vf_flr_done
            # cfg_pm_aspm_l1_entry_reject
            # cfg_pm_aspm_tx_l0s_entry_disable
            # cfg_req_pm_transition_l23_ready
            # cfg_link_training_enable

            # Configuration Interrupt Controller Interface
            # cfg_interrupt_int
            # cfg_interrupt_sent
            # cfg_interrupt_pending
            cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable,
            cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable,
            cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update,
            cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data,
            # cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select,
            cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int,
            cfg_interrupt_msi_pending_status=dut.
            cfg_interrupt_msi_pending_status,
            cfg_interrupt_msi_pending_status_data_enable=dut.
            cfg_interrupt_msi_pending_status_data_enable,
            # cfg_interrupt_msi_pending_status_function_num=dut.cfg_interrupt_msi_pending_status_function_num,
            cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent,
            cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail,
            # cfg_interrupt_msix_enable
            # cfg_interrupt_msix_mask
            # cfg_interrupt_msix_vf_enable
            # cfg_interrupt_msix_vf_mask
            # cfg_interrupt_msix_address
            # cfg_interrupt_msix_data
            # cfg_interrupt_msix_int
            # cfg_interrupt_msix_vec_pending
            # cfg_interrupt_msix_vec_pending_status
            cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr,
            cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present,
            cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type,
            # cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag,
            # cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number,

            # Configuration Extend Interface
            # cfg_ext_read_received
            # cfg_ext_write_received
            # cfg_ext_register_number
            # cfg_ext_function_number
            # cfg_ext_write_data
            # cfg_ext_write_byte_enable
            # cfg_ext_read_data
            # cfg_ext_read_data_valid
        )

        # self.dev.log.setLevel(logging.DEBUG)

        self.rc.make_port().connect(self.dev)

        self.driver = mqnic.Driver(self.rc)

        self.dev.functions[0].msi_multiple_message_capable = 5

        self.dev.functions[0].configure_bar(0,
                                            2**self.BAR0_APERTURE,
                                            ext=True,
                                            prefetch=True)

        # Ethernet
        cocotb.fork(Clock(dut.qsfp_0_rx_clk, 3.102, units="ns").start())
        self.qsfp_0_source = AxiStreamSource(dut, "qsfp_0_rx_axis",
                                             dut.qsfp_0_rx_clk,
                                             dut.qsfp_0_rx_rst)
        cocotb.fork(Clock(dut.qsfp_0_tx_clk, 3.102, units="ns").start())
        self.qsfp_0_sink = AxiStreamSink(dut, "qsfp_0_tx_axis",
                                         dut.qsfp_0_tx_clk, dut.qsfp_0_tx_rst)

        cocotb.fork(Clock(dut.qsfp_1_rx_clk, 3.102, units="ns").start())
        self.qsfp_1_source = AxiStreamSource(dut, "qsfp_1_rx_axis",
                                             dut.qsfp_1_rx_clk,
                                             dut.qsfp_1_rx_rst)
        cocotb.fork(Clock(dut.qsfp_1_tx_clk, 3.102, units="ns").start())
        self.qsfp_1_sink = AxiStreamSink(dut, "qsfp_1_tx_axis",
                                         dut.qsfp_1_tx_clk, dut.qsfp_1_tx_rst)

        dut.qsfp_0_i2c_scl_i.setimmediatevalue(1)
        dut.qsfp_0_i2c_sda_i.setimmediatevalue(1)
        dut.qsfp_0_intr_n.setimmediatevalue(1)
        dut.qsfp_0_mod_prsnt_n.setimmediatevalue(0)

        dut.qsfp_1_i2c_scl_i.setimmediatevalue(1)
        dut.qsfp_1_i2c_sda_i.setimmediatevalue(1)
        dut.qsfp_1_intr_n.setimmediatevalue(1)
        dut.qsfp_1_mod_prsnt_n.setimmediatevalue(0)

        dut.qspi_dq_i.setimmediatevalue(0)

        dut.pps_in.setimmediatevalue(0)

        self.loopback_enable = False
        cocotb.fork(self._run_loopback())
Ejemplo n.º 60
0
 def __init__(self):
     self.log = SimLog("cocotb.%s" % (self.__class__.__name__), id(self))
     self.signal = None
     self.primed = False