示例#1
0
def F_exhaust_mk_test(dut):
    """
    Hits end of MK list before matching
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())
    
    filename = '../test_data/wpa2-psk-linksys.hccap'
    start = '1000000000'
    end =   '1000000020'    #Comparison currently hardcoded as 1000000200

    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
    
    yield load_file(dut, filename)
    yield load_mk(dut, start)
    yield load_mk(dut, end)
    
    #This clock isn't necessary while pipelining
    yield RisingEdge(dut.clk_i)
    
    yield wait_process(dut)
    
    if int(str(dut.pmk_valid), 2) == 0:
        raise TestFailure("Master key found, not good!")
    else:
        log.info("List done")
示例#2
0
def F_exhaust_mk_test(dut):
    """
    Hits end of MK list before matching
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())

    filename = '../test_data/wpa2-psk-linksys.hccap'
    start = '1000000000'
    end = '1000000020'  #Comparison currently hardcoded as 1000000200

    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)

    yield load_file(dut, filename)
    yield load_mk(dut, start)
    yield load_mk(dut, end)

    #This clock isn't necessary while pipelining
    yield RisingEdge(dut.clk_i)

    yield wait_process(dut)

    if int(str(dut.pmk_valid), 2) == 0:
        raise TestFailure("Master key found, not good!")
    else:
        log.info("List done")
示例#3
0
def E_find_mk_test(dut):
    """
    Finds MK successfully
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())
    
    filename = '../test_data/wpa2-psk-linksys.hccap'
    start = '1000000000'
    end =   '1000000300'    #Comparison currently hardcoded as 1000000200

    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
    
    print_process_vars(dut)
    yield load_file(dut, filename)
    print_process_vars(dut)
    yield load_mk(dut, start)
    print_process_vars(dut)
    yield load_mk(dut, end)
    print_process_vars(dut)
    
    #This clock isn't necessary while pipelining
    yield RisingEdge(dut.clk_i)
    
    yield wait_process(dut)
    
    print_process_vars(dut)
    
    if int(str(dut.pmk_valid), 2) == 0:
        raise TestFailure("MK search failed")
    else:
        log.info("Master key found!")
示例#4
0
def A_cache_data_test(dut):
    """
    Tests that initial data cache
    gets built and latched properly
    """
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 10000).start())
    
    mockSha1 = wpa2slow.sha1.Sha1Model()
    mockObject = wpa2slow.hmac.HmacModel(mockSha1)
    
    yield reset(dut)
    size = random.randint(8, 64)
    print "Length: {:d}".format(size)
    yield load_random_data(dut, log, mockObject, size)

    #mockObject.displayAll()
    mockOut = "{}".format(mockObject.shaBo)

    print convert_hex(dut.test_word_1) + " " + convert_hex(dut.test_word_2) + " " + convert_hex(dut.test_word_3) + " " + convert_hex(dut.test_word_4) + " " + convert_hex(dut.test_word_5)

    if convert_hex(dut.test_word_1).zfill(8) != mockOut:
        raise TestFailure(
            "Load data is incorrect: {0} != {1}".format(convert_hex(dut.test_word_1), mockOut))
    else:
        log.info("Ok!")
示例#5
0
def E_process_second_input_round_test(dut):
    """Test input processing with 32 word input"""
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 10000).start())
    
    mockObject = Sha1Model()

    yield reset(dut)
    #yield load_data(dut, log, mockObject, 16)

    #mockObject.processInput()
    #mockObject.displayAll()
    
    yield load_data(dut, log, mockObject, 16)
    mockObject.processInput()
    yield load_data(dut, log, mockObject, 66)
    
    mockOut = "{:08x}".format(mockObject.W[16])
    compare1 = convert_hex(dut.pinput1.test_word_1.value).rjust(8, '0')
    compare2 = convert_hex(dut.pinput1.test_word_5.value).rjust(8, '0')
    
    if compare1 != mockOut:
        raise TestFailure(
            "First load incorrect: {0} != {1}".format(compare1, mockOut))
    elif compare2 != "{:08x}".format(mockObject.W[79]):
        raise TestFailure(
            "First load incorrect: {0} != {1}".format(compare2, "{:08x}".format(mockObject.W[79])))
    else:
        log.info("First load ok!") 
示例#6
0
def F_process_first_buffer_test(dut):
    """Test data after processing the first message buffer"""
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 10000).start())
    
    mockObject = Sha1Model()

    yield reset(dut)
    #yield load_data(dut, log, mockObject, 16)

    #mockObject.processInput()
    #mockObject.displayAll()
    
    yield load_data(dut, log, mockObject, 16)
    mockObject.processInput()
    mockObject.processBuffer()
    yield load_data(dut, log, mockObject, 65)
    yield load_data(dut, log, mockObject, 85)
    
    mockOut = "{:08x}".format(mockObject.H0)
    compare1 = convert_hex(dut.pbuffer1.test_word_4.value).rjust(8, '0')
    
    if compare1 != mockOut:
        raise TestFailure(
            "First buffer incorrect: {0} != {1}".format(compare1, mockOut))
    else:
        log.info("First buffer ok!") 
示例#7
0
def A_gen_data_test(dut):
    """
    Tests that gen_tenhex generates sane values
    """
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 10000).start())
    
    outStr = ''
    
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
        
    for x in xrange(0xff):
        complete = int(dut.main1.gen1.complete_o.value)
        if complete != 0:
            raise TestFailure("Premature completion")
        
        outStr = '{:x}'.format(int(dut.main1.gen1.mk_test9.value)) + \
            '{:x}'.format(int(dut.main1.gen1.mk_test8.value)) + \
            '{:x}'.format(int(dut.main1.gen1.mk_test7.value)) + \
            '{:x}'.format(int(dut.main1.gen1.mk_test6.value)) + \
            '{:x}'.format(int(dut.main1.gen1.mk_test5.value)) + \
            '{:x}'.format(int(dut.main1.gen1.mk_test4.value)) + \
            '{:x}'.format(int(dut.main1.gen1.mk_test3.value)) + \
            '{:x}'.format(int(dut.main1.gen1.mk_test2.value)) + \
            '{:x}'.format(int(dut.main1.gen1.mk_test1.value)) + \
            '{:x}'.format(int(dut.main1.gen1.mk_test0.value))
       
        
        yield RisingEdge(dut.clk_i)
        
    if outStr != "00000000fe":
        raise TestFailure("Wrong loaded values!")
    else:
        log.info("Ok!")
示例#8
0
def E_find_mk_test(dut):
    """
    Finds MK successfully
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())

    filename = '../test_data/wpa2-psk-linksys.hccap'
    start = '1000000000'
    end = '1000000300'  #Comparison currently hardcoded as 1000000200

    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)

    print_process_vars(dut)
    yield load_file(dut, filename)
    print_process_vars(dut)
    yield load_mk(dut, start)
    print_process_vars(dut)
    yield load_mk(dut, end)
    print_process_vars(dut)

    #This clock isn't necessary while pipelining
    yield RisingEdge(dut.clk_i)

    yield wait_process(dut)

    print_process_vars(dut)

    if int(str(dut.pmk_valid), 2) == 0:
        raise TestFailure("MK search failed")
    else:
        log.info("Master key found!")
示例#9
0
文件: __init__.py 项目: powlib/sim
class ScoreBlock(SwissBlock):
    '''
    Used for scoring a set of values.
    '''
    def __init__(self, inputs=2, name="", log_score=True):
        '''
        Constructor.
        '''
        SwissBlock.__init__(self, trans_func=self._score_func, inputs=inputs)
        self.__log = SimLog("cocotb.score.{}".format(name))
        self.__log_score = log_score

    def _score_func(self, *values):
        '''
        Checks to see if all the inputs presented in values are the same. If they're 
        the same, a True is returned, otherwise a False is returned.
        '''
        check = values[0]
        state = True
        message = ""
        for val in values:
            message += "{}==".format(val)
            state &= check == val
        message += "EQUAL" if state == True else "DIFFERENT"
        if self.__log_score == True: self.__log.info(message)

        return state

    def compare(self, *args):
        '''
        Directly write values for comparison to the score block.
        '''
        for idx, arg in enumerate(args):
            self.inports(idx).write(data=arg)
示例#10
0
class RunningTest(RunningCoroutine):
    """Add some useful Test functionality to a RunningCoroutine."""
    class ErrorLogHandler(logging.Handler):
        def __init__(self, fn):
            self.fn = fn
            logging.Handler.__init__(self, level=logging.DEBUG)

        def handle(self, record):
            self.fn(self.format(record))

    def __init__(self, inst, parent):
        self.error_messages = []
        RunningCoroutine.__init__(self, inst, parent)
        self.log = SimLog("cocotb.test.%s" % self.__name__, id(self))
        self.started = False
        self.start_time = 0
        self.start_sim_time = 0
        self.expect_fail = parent.expect_fail
        self.expect_error = parent.expect_error
        self.skip = parent.skip
        self.stage = parent.stage

        # make sure not to create a circular reference here
        self.handler = RunningTest.ErrorLogHandler(self.error_messages.append)

    def _advance(self, outcome):
        if not self.started:
            self.log.info("Starting test: \"%s\"\nDescription: %s" %
                          (self.funcname, self.__doc__))
            self.start_time = time.time()
            self.start_sim_time = get_sim_time('ns')
            self.started = True
        return super(RunningTest, self)._advance(outcome)

    def _force_outcome(self, outcome):
        """
        This method exists as a workaround for preserving tracebacks on
        python 2, and is called in unschedule. Once Python 2 is dropped, this
        should be inlined into `abort` below, and the call in `unschedule`
        replaced with `abort(outcome.error)`.
        """
        assert self._outcome is None
        if _debug:
            self.log.debug("outcome forced to {}".format(outcome))
        self._outcome = outcome
        cocotb.scheduler.unschedule(self)

    # like RunningCoroutine.kill(), but with a way to inject a failure
    def abort(self, exc):
        """Force this test to end early, without executing any cleanup.

        This happens when a background task fails, and is consistent with
        how the behavior has always been. In future, we may want to behave
        more gracefully to allow the test body to clean up.

        `exc` is the exception that the test should report as its reason for
        aborting.
        """
        return self._force_outcome(outcomes.Error(exc))
示例#11
0
def A_load_config_test(dut):
    """
    Test correct start/end parameters get loaded into DUT
    """
    log = SimLog("cocotb.%s" % dut._name)
    #log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())
    
    mk_start = '1222222222'
    mk_end = '1222222f22'
    
    #Todo: fix this garbage when GHDL implements arrays in their VPI
    dut.test_start_val0 <= ord(mk_start[0])
    dut.test_start_val1 <= ord(mk_start[1])
    dut.test_start_val2 <= ord(mk_start[2])
    dut.test_start_val3 <= ord(mk_start[3])
    dut.test_start_val4 <= ord(mk_start[4])
    dut.test_start_val5 <= ord(mk_start[5])
    dut.test_start_val6 <= ord(mk_start[6])
    dut.test_start_val7 <= ord(mk_start[7])
    dut.test_start_val8 <= ord(mk_start[8])
    dut.test_start_val9 <= ord(mk_start[9])
    
    dut.test_end_val0 <= ord(mk_end[0])
    dut.test_end_val1 <= ord(mk_end[1])
    dut.test_end_val2 <= ord(mk_end[2])
    dut.test_end_val3 <= ord(mk_end[3])
    dut.test_end_val4 <= ord(mk_end[4])
    dut.test_end_val5 <= ord(mk_end[5])
    dut.test_end_val6 <= ord(mk_end[6])
    dut.test_end_val7 <= ord(mk_end[7])
    dut.test_end_val8 <= ord(mk_end[8])
    dut.test_end_val9 <= ord(mk_end[9])
    
    dut.init_load_i <= 1
    yield RisingEdge(dut.clk_i)
    dut.rst_i <= 1
    
    yield RisingEdge(dut.clk_i)
    dut.rst_i <= 0
    yield RisingEdge(dut.clk_i)
    dut.init_load_i <= 0
    
    yield wait_process(dut)
    #print_mk(dut) 
    
    if mk_end[1] != chr(int(str(dut.test_mk_val1), 2)):
        raise TestFailure("MK Final Value 1 Mismatch")
    if mk_end[3] != chr(int(str(dut.test_mk_val3), 2)):
        raise TestFailure("MK Final Value 3 Mismatch")
    if mk_end[7] != chr(int(str(dut.test_mk_val7), 2)):
        raise TestFailure("MK Final Value 7 Mismatch")
    if mk_end[9] != chr(int(str(dut.test_mk_val9), 2)):
        raise TestFailure("MK Final Value 9 Mismatch")
    else:
        log.info("MK Generation Ok!")
示例#12
0
def A_load_config_test(dut):
    """
    Test correct start/end parameters get loaded into DUT
    """
    log = SimLog("cocotb.%s" % dut._name)
    #log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())

    mk_start = '1222222222'
    mk_end = '1222222f22'

    #Todo: fix this garbage when GHDL implements arrays in their VPI
    dut.test_start_val0 <= ord(mk_start[0])
    dut.test_start_val1 <= ord(mk_start[1])
    dut.test_start_val2 <= ord(mk_start[2])
    dut.test_start_val3 <= ord(mk_start[3])
    dut.test_start_val4 <= ord(mk_start[4])
    dut.test_start_val5 <= ord(mk_start[5])
    dut.test_start_val6 <= ord(mk_start[6])
    dut.test_start_val7 <= ord(mk_start[7])
    dut.test_start_val8 <= ord(mk_start[8])
    dut.test_start_val9 <= ord(mk_start[9])

    dut.test_end_val0 <= ord(mk_end[0])
    dut.test_end_val1 <= ord(mk_end[1])
    dut.test_end_val2 <= ord(mk_end[2])
    dut.test_end_val3 <= ord(mk_end[3])
    dut.test_end_val4 <= ord(mk_end[4])
    dut.test_end_val5 <= ord(mk_end[5])
    dut.test_end_val6 <= ord(mk_end[6])
    dut.test_end_val7 <= ord(mk_end[7])
    dut.test_end_val8 <= ord(mk_end[8])
    dut.test_end_val9 <= ord(mk_end[9])

    dut.init_load_i <= 1
    yield RisingEdge(dut.clk_i)
    dut.rst_i <= 1

    yield RisingEdge(dut.clk_i)
    dut.rst_i <= 0
    yield RisingEdge(dut.clk_i)
    dut.init_load_i <= 0

    yield wait_process(dut)
    #print_mk(dut)

    if mk_end[1] != chr(int(str(dut.test_mk_val1), 2)):
        raise TestFailure("MK Final Value 1 Mismatch")
    if mk_end[3] != chr(int(str(dut.test_mk_val3), 2)):
        raise TestFailure("MK Final Value 3 Mismatch")
    if mk_end[7] != chr(int(str(dut.test_mk_val7), 2)):
        raise TestFailure("MK Final Value 7 Mismatch")
    if mk_end[9] != chr(int(str(dut.test_mk_val9), 2)):
        raise TestFailure("MK Final Value 9 Mismatch")
    else:
        log.info("MK Generation Ok!")
示例#13
0
class RunningTest(RunningCoroutine):
    """Add some useful Test functionality to a RunningCoroutine."""
    class ErrorLogHandler(logging.Handler):
        def __init__(self, fn):
            self.fn = fn
            logging.Handler.__init__(self, level=logging.DEBUG)

        def handle(self, record):
            self.fn(self.format(record))

    def __init__(self, inst, parent):
        self.error_messages = []
        RunningCoroutine.__init__(self, inst, parent)
        self.log = SimLog("cocotb.test.%s" % self.__name__, id(self))
        self.started = False
        self.start_time = 0
        self.start_sim_time = 0
        self.expect_fail = parent.expect_fail
        self.expect_error = parent.expect_error
        self.skip = parent.skip
        self.stage = parent.stage

        self.handler = RunningTest.ErrorLogHandler(self._handle_error_message)
        cocotb.log.addHandler(self.handler)

    def _advance(self, outcome):
        if not self.started:
            self.error_messages = []
            self.log.info("Starting test: \"%s\"\nDescription: %s" %
                          (self.funcname, self.__doc__))
            self.start_time = time.time()
            self.start_sim_time = get_sim_time('ns')
            self.started = True
        return super(RunningTest, self)._advance(outcome)

    def _handle_error_message(self, msg):
        self.error_messages.append(msg)

    # like RunningCoroutine.kill(), but with a way to inject a failure
    def abort(self, exc):
        """
        Force this test to end early, without executing any cleanup.

        This happens when a background task fails, and is consistent with
        how the behavior has always been. In future, we may want to behave
        more gracefully to allow the test body to clean up.

        `exc` is the exception that the test should report as its reason for
        aborting.
        """
        assert self._outcome is None
        if _debug:
            self.log.debug("abort() called on test")
        self._outcome = outcomes.Error(exc)
        cocotb.scheduler.unschedule(self)
示例#14
0
class RunningTest(RunningCoroutine):
    """Add some useful Test functionality to a RunningCoroutine."""

    class ErrorLogHandler(logging.Handler):
        def __init__(self, fn):
            self.fn = fn
            logging.Handler.__init__(self, level=logging.DEBUG)

        def handle(self, record):
            self.fn(self.format(record))

    def __init__(self, inst, parent):
        self.error_messages = []
        RunningCoroutine.__init__(self, inst, parent)
        self.log = SimLog("cocotb.test.%s" % self.__name__, id(self))
        self.started = False
        self.start_time = 0
        self.start_sim_time = 0
        self.expect_fail = parent.expect_fail
        self.expect_error = parent.expect_error
        self.skip = parent.skip
        self.stage = parent.stage

        self.handler = RunningTest.ErrorLogHandler(self._handle_error_message)
        cocotb.log.addHandler(self.handler)

    def _advance(self, outcome):
        if not self.started:
            self.error_messages = []
            self.log.info("Starting test: \"%s\"\nDescription: %s" %
                          (self.funcname, self.__doc__))
            self.start_time = time.time()
            self.start_sim_time = get_sim_time('ns')
            self.started = True
        try:
            self.log.debug("Sending {}".format(outcome))
            return outcome.send(self._coro)
        except TestComplete as e:
            if isinstance(e, TestFailure):
                self.log.warning(str(e))
            else:
                self.log.info(str(e))

            buff = StringIO()
            for message in self.error_messages:
                print(message, file=buff)
            e.stderr.write(buff.getvalue())
            raise
        except StopIteration:
            raise TestSuccess()
        except Exception as e:
            raise raise_error(self, "Send raised exception:")

    def _handle_error_message(self, msg):
        self.error_messages.append(msg)
示例#15
0
class RunningTest(RunningCoroutine):
    """Add some useful Test functionality to a RunningCoroutine."""

    class ErrorLogHandler(logging.Handler):
        def __init__(self, fn):
            self.fn = fn
            logging.Handler.__init__(self, level=logging.DEBUG)

        def handle(self, record):
            self.fn(self.format(record))

    def __init__(self, inst, parent):
        self.error_messages = []
        RunningCoroutine.__init__(self, inst, parent)
        self.log = SimLog("cocotb.test.%s" % self.__name__, id(self))
        self.started = False
        self.start_time = 0
        self.start_sim_time = 0
        self.expect_fail = parent.expect_fail
        self.expect_error = parent.expect_error
        self.skip = parent.skip
        self.stage = parent.stage

        self.handler = RunningTest.ErrorLogHandler(self._handle_error_message)
        cocotb.log.addHandler(self.handler)

    def _advance(self, outcome):
        if not self.started:
            self.error_messages = []
            self.log.info("Starting test: \"%s\"\nDescription: %s" %
                          (self.funcname, self.__doc__))
            self.start_time = time.time()
            self.start_sim_time = get_sim_time('ns')
            self.started = True
        try:
            self.log.debug("Sending {}".format(outcome))
            return outcome.send(self._coro)
        except TestComplete as e:
            if isinstance(e, TestFailure):
                self.log.warning(str(e))
            else:
                self.log.info(str(e))

            buff = StringIO()
            for message in self.error_messages:
                print(message, file=buff)
            e.stderr.write(buff.getvalue())
            raise
        except StopIteration:
            raise TestSuccess()
        except Exception as e:
            raise raise_error(self, "Send raised exception:")

    def _handle_error_message(self, msg):
        self.error_messages.append(msg)
示例#16
0
文件: __init__.py 项目: powlib/sim
class PrintBlock(SwissBlock):
    '''
    Simply prints out the data it
    receives.
    '''
    def __init__(self, name="data"):
        SwissBlock.__init__(self, trans_func=self._print_func)
        self.__log = SimLog("cocotb.print.{}".format(name))

    def _print_func(self, data):
        self.__log.info("{}".format(data))
示例#17
0
class ClockDriver(Driver):
    '''
    The clock driver simply drives the system's clocks.
    '''
    def __init__(self, interface, param_namespace, name=""):
        '''
        Constructs the clock driver.
        interface       = The interface associated with the driver should contain the
                          cocotb handles to all the clocks that need to be driven.
        param_namespace = A namespace with parameters associated with each clock 
                          in interface.
        name            = String identifier needed for logging.                       
        '''
        self.__param_namespace = param_namespace
        self.__log = SimLog("cocotb.clks.{}".format(name))
        Driver.__init__(self, interface)

    @property
    def inport(self):
        '''
        Not data should be written to the clock driver.
        '''
        raise NotImplementedError("Do not write data into the clock driver.")

    def write(self, data):
        '''
        Not data should be written to the clock driver.
        '''
        raise NotImplementedError("Do not write data into the clock driver.")

    @coroutine
    def _drive(self):
        '''
        Implements the behavior of the clock driver.
        '''

        for name, handle in vars(self._interface).items():
            params = getattr(self.__param_namespace, name, None)
            period = getattr(params, "period", None)
            phase = getattr(params, "phase", None)
            param_dict = {}
            if period is not None: param_dict["period"] = period
            if phase is not None: param_dict["phase"] = phase
            self.__log.info(
                "Starting clock {} with period {} and phase {}...".format(
                    name, period, phase))
            fork(start_clock(clock=handle, **param_dict))

        yield NullTrigger()
示例#18
0
def H_continuous_buffer_test(dut):
    """Loop message buffer several times"""
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 10000).start())
    

    yield reset(dut)
    
    iterations = 30
    mockW = [0] * iterations
    compareW = [0] * iterations
    for i in xrange(iterations):
        mockObject = Sha1Model()
        #
    
        yield load_data(dut, log, mockObject, 16)
        mockObject.processInput()
        mockObject.processBuffer()
    
        #yield load_data(dut, log, mockObject, 73)
    
        #yield load_data(dut, log, None, 85)
    
        mockOut = "{:08x}".format(mockObject.H0)
        compare0 = convert_hex(dut.test_sha1_process_buffer0_o.value).rjust(8, '0')
        compare1 = convert_hex(dut.test_sha1_process_buffer_o.value).rjust(8, '0')
        #print mockOut + " - " + compare0 + " - " + compare1 + " - " + str(dut.w_processed_valid.value)
        
        mockW[i] = mockOut
        if i >= 11:
            compareW[i - 11] = compare1
        
    #print str(mockW[0:-11]).strip('[]')
    #print str(compareW[0:-11]).strip('[]')
       
    

    if mockW[0:-11] != compareW[0:-11]:
        raise TestFailure(
            "Continuous buffer incorrect: {0} != {1}".format(str(mockW[0:-11]).strip('[]'), str(compareW[0:-11]).strip('[]')))
    else:
        log.info("Continuous buffer ok!") 
示例#19
0
def A_load_packet_test(dut):
    """
    Test proper load of filedata into DUT
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())

    filename = '../test_data/wpa2-psk-linksys.hccap'

    obj = wpa2slow.handshake.Handshake()
    objSha = wpa2slow.sha1.Sha1Model()
    objHmac = wpa2slow.hmac.HmacModel(objSha)
    objPbkdf2 = wpa2slow.pbkdf2.Pbkdf2Model()
    objPrf = wpa2slow.compare.PrfModel(objHmac)

    (ssid, mac1, mac2, nonce1, nonce2, eapol, eapol_size,
     keymic) = obj.load(filename)

    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)

    yield load_file(dut, filename)

    packet_test1 = dut.test_ssid_1
    packet_test2 = dut.test_ssid_2
    packet_test3 = dut.test_ssid_3

    if ord(ssid[0][0]) != int(str(ssid_test1), 2):
        raise TestFailure("ssid_test1 differs from mock")
    elif ord(ssid[0][3]) != int(str(ssid_test2), 2):
        raise TestFailure("ssid_test2 differs from mock")
    elif ord(ssid[0][6]) != int(str(ssid_test3), 2):
        raise TestFailure("ssid_test3 differs from mock")
    elif ord(ssid[0][6]) == int(
            str(ssid_test1),
            2):  #Todo: remove false positive if 1st and 7th chars equal
        raise TestFailure("SSID comparisons failing.")
    else:
        log.info("SSID Ok!")
示例#20
0
 def drivePlb(name, busAgt, baseAddr):
     log = SimLog("cocotb.{}".format(name))
     log.info("Starting drivePlb as a coroutine...")
     for eachBurst in range(BURST_TOTAL):
         log.info("Writing out burst {}...".format(eachBurst))
         expDatas = []
         expAddrs = []
         expBes = []
         for eachWord in range(WORDS_PER_BURST):
             addr = baseAddr + (eachWord +
                                eachBurst * WORDS_PER_BURST) * BPD
             data = randint(0, (1 << (BPD * BYTE_WIDTH)) - 1)
             be = randint(0, (1 << BPD) - 1)
             expDatas.append(data)
             expAddrs.append(addr)
             expBes.append(be)
             busAgt.write(addr=addr, data=data, be=be)
         log.info("Reading back and scoring burst...")
         transList = yield busAgt.read(addr=expAddrs)
         for be, exp, actTrans in zip(expBes, expDatas, transList):
             act = int(actTrans.data.value)
             for eachByte in range(BPD):
                 if (1 << eachByte) & be:
                     mask = (((1 << BYTE_WIDTH) - 1) <<
                             (eachByte * BYTE_WIDTH))
                     te.ipmaxiFullInstCompare(act=act & mask,
                                              exp=exp & mask)
示例#21
0
def D_set_session_params_test(dut):
    """
    Loads handshake, start, end MK values
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())
    
    filename = '../test_data/wpa2-psk-linksys.hccap'
    start = '1000000000'
    end =   '1000000200'

    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
    
    yield load_file(dut, filename)
    
    yield load_mk(dut, start)
    
    yield load_mk(dut, end)
    
    #This clock isn't necessary while pipelining
    yield RisingEdge(dut.clk_i)
    
    #yield wait_process(dut)
    
    mk_test1 = dut.test_mk1
    mk_test2 = dut.test_mk2
    mk_test3 = dut.test_mk3
    
    if ord(start[0]) != int(str(mk_test1), 2):
        raise TestFailure("Start MK inequal")
    elif ord(end[7]) != int(str(mk_test2), 2):
        raise TestFailure("End MK inequal1")
    elif ord(end[9]) != int(str(mk_test3), 2):
        raise TestFailure("End MK inequal2")
    else:
        log.info("Start/End Params Ok!")
示例#22
0
def D_set_session_params_test(dut):
    """
    Loads handshake, start, end MK values
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())

    filename = '../test_data/wpa2-psk-linksys.hccap'
    start = '1000000000'
    end = '1000000200'

    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)

    yield load_file(dut, filename)

    yield load_mk(dut, start)

    yield load_mk(dut, end)

    #This clock isn't necessary while pipelining
    yield RisingEdge(dut.clk_i)

    #yield wait_process(dut)

    mk_test1 = dut.test_mk1
    mk_test2 = dut.test_mk2
    mk_test3 = dut.test_mk3

    if ord(start[0]) != int(str(mk_test1), 2):
        raise TestFailure("Start MK inequal")
    elif ord(end[7]) != int(str(mk_test2), 2):
        raise TestFailure("End MK inequal1")
    elif ord(end[9]) != int(str(mk_test3), 2):
        raise TestFailure("End MK inequal2")
    else:
        log.info("Start/End Params Ok!")
示例#23
0
def A_load_packet_test(dut):
    """
    Test proper load of filedata into DUT
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())
    
    filename = '../test_data/wpa2-psk-linksys.hccap'
    
    obj = wpa2slow.handshake.Handshake()
    objSha = wpa2slow.sha1.Sha1Model()
    objHmac = wpa2slow.hmac.HmacModel(objSha)
    objPbkdf2 = wpa2slow.pbkdf2.Pbkdf2Model()
    objPrf = wpa2slow.compare.PrfModel(objHmac)
    
    (ssid, mac1, mac2, nonce1, nonce2, eapol, eapol_size, keymic) = obj.load(filename)
    
    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
    
    yield load_file(dut, filename)
    
    packet_test1 = dut.test_ssid_1
    packet_test2 = dut.test_ssid_2
    packet_test3 = dut.test_ssid_3
    
    if ord(ssid[0][0]) != int(str(ssid_test1), 2):
        raise TestFailure("ssid_test1 differs from mock")
    elif ord(ssid[0][3]) != int(str(ssid_test2), 2):
        raise TestFailure("ssid_test2 differs from mock")
    elif ord(ssid[0][6]) != int(str(ssid_test3), 2):
        raise TestFailure("ssid_test3 differs from mock")
    elif ord(ssid[0][6]) == int(str(ssid_test1), 2):    #Todo: remove false positive if 1st and 7th chars equal
        raise TestFailure("SSID comparisons failing.")
    else:
        log.info("SSID Ok!")
示例#24
0
def A_load_data_test(dut):
    """
    Test for data properly shifted in
    w(0) gets loaded in LAST
    """
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 10000).start())
    
    mockObject = Sha1Model()
    
    yield reset(dut)
    yield load_data(dut, log, mockObject, 16)

    #mockObject.displayAll()
    mockOut = "{:08x}".format(mockObject.W[15])

    #print convert_hex(dut.dat_1_o) + " " + convert_hex(dut.dat_2_o) + " " + convert_hex(dut.dat_3_o) + " " + convert_hex(dut.dat_4_o) + " " + convert_hex(dut.dat_5_o)

    if convert_hex(dut.test_sha1_load_o).zfill(8) != mockOut:
        raise TestFailure(
            "Load data is incorrect: {0} != {1}".format(convert_hex(dut.test_sha1_load_o), mockOut))
    else:
        log.info("Ok!")
示例#25
0
def B_compare_data_test(dut):
    """
    Tests that generated data gets compared against test values
    """
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 10000).start())
    
    outStr = ''
    
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
        
    for x in xrange(0x90):
        complete = int(dut.main1.comp_complete)
        
        outStr = str(x) + ' - ' + str(int(dut.main1.i.value)) + ' - ' + str(complete) + ' - ' + \
            '{:x}'.format(int(dut.main1.comp1.mk_test_comp.value)) + ": " + \
            '{:x}'.format(int(dut.main1.comp1.mk_test9.value)) + \
            '{:x}'.format(int(dut.main1.comp1.mk_test8.value)) + \
            '{:x}'.format(int(dut.main1.comp1.mk_test7.value)) + \
            '{:x}'.format(int(dut.main1.comp1.mk_test6.value)) + \
            '{:x}'.format(int(dut.main1.comp1.mk_test5.value)) + \
            '{:x}'.format(int(dut.main1.comp1.mk_test4.value)) + \
            '{:x}'.format(int(dut.main1.comp1.mk_test3.value)) + \
            '{:x}'.format(int(dut.main1.comp1.mk_test2.value)) + \
            '{:x}'.format(int(dut.main1.comp1.mk_test1.value)) + \
            '{:x}'.format(int(dut.main1.comp1.mk_test0.value))
            
        if complete == 1:
            break
        
        yield RisingEdge(dut.clk_i)
        
    if complete == 0:
        raise TestFailure("Comparison never reached")
    else:
        log.info("Ok!")
示例#26
0
def Z_wavedrom_test(dut):
    """
    Generate a JSON wavedrom diagram of a trace
    """
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 1000).start())

    mockObject = ZtexModel()
    #shaObject = Sha1Driver(dut, None, dut.clk_i)

    #yield load_data(dut, log, mockObject, 80)

    args = [
        dut.rst_i, dut.cs_i, dut.cont_i, dut.clk_i, dut.din_i, dut.dout_i,
        dut.SLOE, dut.SLRD, dut.SLWR, dut.FIFOADR0, dut.FIFOADR1, dut.PKTEND,
        dut.FLAGA, dut.FLAGB
    ]

    with cocotb.wavedrom.trace(*args, clk=dut.clk_i) as waves:

        yield RisingEdge(dut.clk_i)
        yield reset(dut)

        yield load_data(dut, log, mockObject, 16)

        if _debug == True:
            log.info(convert_hex(dut.pbuffer1.test_word_3).zfill(8))
        yield load_data(dut, log, mockObject, 60)

        if _debug == True:
            log.info(convert_hex(dut.pbuffer1.test_word_3).zfill(8))
            #log.info("{:08x}".format(mockObject.W[78]))
            #log.info("{:08x}".format(mockObject.W[79]))
            #log.info("{:08x}".format(mockObject.W[16 - 14]))
            #log.info("{:08x}".format(mockObject.W[16 - 16]))
            #log.info("{:08x}".format(mockObject.W[16]))

        yield load_data(dut, log, mockObject, 90)

        if _debug == True:
            log.info(convert_hex(dut.pbuffer1.test_word_3).zfill(8))
            log.info(convert_hex(dut.pbuffer1.test_word_4).zfill(8))
            #log.info(dut.pinput1.test_word_1.value.hex())
            #log.info(dut.pinput1.test_word_2.value.hex())
            #log.info(dut.pinput1.test_word_3.value.hex())
            #log.info(dut.pinput1.test_word_4.value.hex())
            #log.info(dut.pinput1.test_word_5.value.hex())
            #log.info(dut.pinput1.test_word_5)
            #log.info(waves.dumpj(header = {'text':'D_wavedrom_test', 'tick':-2}, config = {'hscale':3}))

        waves.write('wavedrom.json',
                    header={
                        'text': 'D_wavedrom_test',
                        'tick': -1
                    },
                    config={'hscale': 5})
示例#27
0
def G_process_second_buffer_test(dut):
    """Test data after processing the second message buffer"""
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 10000).start())
    
    mockObject1 = Sha1Model()
    mockObject2 = Sha1Model()

    yield reset(dut)
    
    yield load_data(dut, log, mockObject1, 16)
    mockObject1.processInput()
    mockObject1.processBuffer()
    yield load_data(dut, log, mockObject2, 16)
    mockObject2.processInput()
    mockObject2.processBuffer()
    yield load_data(None, log, mockObject1, 85)
    yield load_data(None, log, mockObject2, 85)
    
    yield load_data(dut, log, None, 85)
    
    mock1 = "{:08x}".format(mockObject1.H0)
    compare1 = convert_hex(dut.pbuffer1.test_word_4.value).rjust(8, '0')
    
    mock2 = "{:08x}".format(mockObject2.H0)
    compare2 = convert_hex(dut.pbuffer2.test_word_4.value).rjust(8, '0')
    

    if compare1 != mock1:
        raise TestFailure(
            "Second buffer1 incorrect: {0} != {1}".format(compare1, mock1))
    elif compare2 != mock2:
        raise TestFailure(
            "Second buffer2 incorrect: {0} != {1}".format(compare2, mock2))
    else:
        log.info("Second buffer ok!") 
示例#28
0
class AFIFODriver():
    def __init__(self, signals, debug=False, slots=0, width=0):
        level = logging.DEBUG if debug else logging.WARNING
        self.log = SimLog("afifo.log")
        file_handler = RotatingFileHandler("sim.log",
                                           maxBytes=(5 * 1024 * 1024),
                                           backupCount=2,
                                           mode='w')
        file_handler.setFormatter(SimColourLogFormatter())
        self.log.addHandler(file_handler)
        self.log.addFilter(SimTimeContextFilter())
        self.log.setLevel(level)
        self.log.info("SEED ======> %s", str(cocotb.RANDOM_SEED))

        self.clk_wr = signals.clk_wr
        self.valid_wr = signals.wr_en_i
        self.data_wr = signals.wr_data_i
        self.ready_wr = signals.wr_full_o
        self.clk_rd = signals.clk_rd
        self.valid_rd = signals.rd_empty_o
        self.data_rd = signals.rd_data_o
        self.ready_rd = signals.rd_en_i
        self.valid_wr <= 0
        self.ready_rd <= 0
        self.log.setLevel(level)

    async def write(self, data, sync=True, **kwargs):
        self.log.info("[AFIFO driver] write => %x" % data)
        while True:
            await FallingEdge(self.clk_wr)
            self.valid_wr <= 1
            self.data_wr <= data
            await RisingEdge(self.clk_wr)
            if self.ready_wr == 0:
                break
            elif kwargs["exit_full"] == True:
                return "FULL"
        self.valid_wr <= 0
        return 0

    async def read(self, sync=True, **kwargs):
        while True:
            await FallingEdge(self.clk_rd)
            if self.valid_rd == 0:
                data = self.data_rd.value  # We capture before we incr. rd ptr
                self.ready_rd <= 1
                await RisingEdge(self.clk_rd)
                break
            elif kwargs["exit_empty"] == True:
                return "EMPTY"
        self.log.info("[AFIFO-driver] read => %x" % data)
        self.ready_rd <= 0
        return data
示例#29
0
def Z_wavedrom_test(dut):
    """
    Generate a JSON wavedrom diagram of a trace
    """
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 100).start())
    
    mockSha1 = wpa2slow.sha1.Sha1Model()
    mockObject = wpa2slow.hmac.HmacModel(mockSha1)
    shaObject = HmacDriver(dut, None, dut.clk_i)
    
    #yield load_random_data(dut, log, mockObject, 80)
    
    
    args = [
            dut.rst_i,
            dut.dat_i,
            dut.i,
            dut.i_mux
            ]

    with cocotb.wavedrom.trace(*args, clk=dut.clk_i) as waves:
    
        yield RisingEdge(dut.clk_i)
        yield reset(dut)
        yield load_random_data(dut, log, mockObject, 16)
        mockObject.processInput()
        mockObject.processBuffer()
        
        if _debug == True:
            log.info(convert_hex(dut.pbuffer1.test_word_3).zfill(8))
        yield load_random_data(dut, log, mockObject, 60)
        
        
            
        if _debug == True:
            log.info(convert_hex(dut.pbuffer1.test_word_3).zfill(8))
            
        yield load_random_data(dut, log, mockObject, 90)
        
        if _debug == True:
            log.info(convert_hex(dut.pbuffer1.test_word_3).zfill(8))
            log.info(convert_hex(dut.pbuffer1.test_word_4).zfill(8))
            
        waves.write('wavedrom.json', header = {'text':'D_wavedrom_test', 'tick':-1}, config = {'hscale':5})
示例#30
0
def B_reset_test(dut):
    """Testing synchronous reset"""
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 10000).start())
    
    mockObject = Sha1Model()
    
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
    
    if dut.i.value != 0:
        raise TestFailure(
            "Reset 1 failed!")
    yield load_data(dut, log, mockObject, 18)
    if convert_hex(dut.dat_1_o) == '0':
        raise TestFailure(
            "Data not populated!")
    else:
        log.info("Testing reset")
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
    
    if dut.i.value != 0:
        raise TestFailure(
            "Reset 2 failed!")
    else:
        log.info("Reset Ok!")
    yield load_data(dut, log, mockObject, 19)

    mockOut = "{:08x}".format(mockObject.W[15])

    if convert_hex(dut.test_sha1_load_o).zfill(8) != mockOut:
        raise TestFailure(
            "Reload is incorrect: {0} != {1}".format(convert_hex(dut.test_sha1_load_o), mockOut))
    else:
        log.info("Ok!")
示例#31
0
def Z_wavedrom_test(dut):
    """
    Generate a JSON wavedrom diagram of a trace
    """
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 1000).start())
    
    mockObject = ZtexModel()
    #shaObject = Sha1Driver(dut, None, dut.clk_i)
    
    #yield load_data(dut, log, mockObject, 80)
    
    
    args = [
            dut.rst_i,
            dut.cs_i,
            dut.cont_i,
            dut.clk_i,
            dut.din_i,
            dut.dout_i,
            dut.SLOE,
            dut.SLRD,
            dut.SLWR,
            dut.FIFOADR0,
            dut.FIFOADR1,
            dut.PKTEND,
            dut.FLAGA,
            dut.FLAGB
            ]

    with cocotb.wavedrom.trace(*args, clk=dut.clk_i) as waves:
    
        yield RisingEdge(dut.clk_i)
        yield reset(dut)
        
        yield load_data(dut, log, mockObject, 16)
        
        if _debug == True:
            log.info(convert_hex(dut.pbuffer1.test_word_3).zfill(8))
        yield load_data(dut, log, mockObject, 60)
        
        
            
        if _debug == True:
            log.info(convert_hex(dut.pbuffer1.test_word_3).zfill(8))
            #log.info("{:08x}".format(mockObject.W[78]))
            #log.info("{:08x}".format(mockObject.W[79]))
            #log.info("{:08x}".format(mockObject.W[16 - 14]))
            #log.info("{:08x}".format(mockObject.W[16 - 16]))
            #log.info("{:08x}".format(mockObject.W[16]))
            
        yield load_data(dut, log, mockObject, 90)
        
        if _debug == True:
            log.info(convert_hex(dut.pbuffer1.test_word_3).zfill(8))
            log.info(convert_hex(dut.pbuffer1.test_word_4).zfill(8))
            #log.info(dut.pinput1.test_word_1.value.hex())
            #log.info(dut.pinput1.test_word_2.value.hex())
            #log.info(dut.pinput1.test_word_3.value.hex())
            #log.info(dut.pinput1.test_word_4.value.hex())
            #log.info(dut.pinput1.test_word_5.value.hex())
            #log.info(dut.pinput1.test_word_5)
            #log.info(waves.dumpj(header = {'text':'D_wavedrom_test', 'tick':-2}, config = {'hscale':3}))
            
        waves.write('wavedrom.json', header = {'text':'D_wavedrom_test', 'tick':-1}, config = {'hscale':5})
示例#32
0
def C_load_second_test(dut):
    """
    Resets data and tries again
    """
    log = SimLog("cocotb.%s" % dut._name)
    log.setLevel(logging.DEBUG)
    cocotb.fork(Clock(dut.clk_i, 1000).start())
    
    filename = '../test_data/wpa2-psk-linksys.hccap'
    
    obj = wpa2slow.Handshake()
    
    obj.load(filename)
    ssid = obj.ssid
    mac1 = obj.mac1
    mac2 = obj.mac2
    nonce1 = obj.nonce1
    nonce2 = obj.nonce2
    eapol = obj.eapol
    eapol_size = obj.eapol_size
    keymic = obj.keymic
    
    dut.cs_i <= 1
    yield reset(dut)
    yield RisingEdge(dut.clk_i)
    
    yield load_file(dut, filename)
    
    #This clock isn't necessary while pipelining
    yield RisingEdge(dut.clk_i)
    
    #yield wait_process(dut)
    
    ssid_test1 = dut.test_ssid_1
    ssid_test2 = dut.test_ssid_2
    ssid_test3 = dut.test_ssid_3
    
    if ord(ssid[0]) != int(str(ssid_test1), 2):
        raise TestFailure("ssid_test1 differs from mock")
    elif ord(ssid[3]) != int(str(ssid_test2), 2):
        raise TestFailure("ssid_test2 differs from mock")
    elif ord(ssid[6]) != int(str(ssid_test3), 2):
        raise TestFailure("ssid_test3 differs from mock")
    elif ord(ssid[6]) == int(str(ssid_test1), 2):    #Todo: remove false positive if 1st and 7th chars equal
        raise TestFailure("SSID comparisons failing.")
    else:
        log.info("SSID Ok!")
    mic_test1 = dut.test_keymic_1
    mic_test2 = dut.test_keymic_2
    mic_test3 = dut.test_keymic_3
        
    if ord(keymic[0]) != int(str(mic_test1), 2):
        raise TestFailure("mic_test1 differs from mock")
    elif ord(keymic[14]) != int(str(mic_test2), 2):
        raise TestFailure("mic_test2 differs from mock")
    elif ord(keymic[15]) != int(str(mic_test3), 2):
        raise TestFailure("mic_test3 differs from mock")
    elif ord(keymic[5]) == int(str(mic_test1), 2):    #Todo: remove false positive
        raise TestFailure("MIC comparisons failing.")
    else:
        log.info("MIC Ok!")
示例#33
0
class Scoreboard(object):
    """Generic scoreboarding class

    We can add interfaces by providing a monitor and an expected output queue

    The expected output can either be a function which provides a transaction
    or a simple list containing the expected output.

    TODO:
        Statistics for end-of-test summary etc.
    """

    def __init__(self, dut, reorder_depth=0, fail_immediately=True):
        self.dut = dut
        self.log = SimLog("cocotb.scoreboard.%s" % self.dut._name)
        self.errors = 0
        self.expected = {}
        self._imm = fail_immediately

    @property
    def result(self):
        """Determine the test result, do we have any pending data remaining?"""
        fail = False
        for monitor, expected_output in self.expected.items():
            if callable(expected_output):
                self.log.debug("Can't check all data returned for %s since "
                               "expected output is callable function rather "
                               "than a list" % str(monitor))
                continue
            if len(expected_output):
                self.log.warn("Still expecting %d transactions on %s" %
                              (len(expected_output), str(monitor)))
                for index, transaction in enumerate(expected_output):
                    self.log.info("Expecting %d:\n%s" %
                                  (index, hexdump(str(transaction))))
                    if index > 5:
                        self.log.info("... and %d more to come" %
                                      (len(expected_output) - index - 1))
                        break
                fail = True
        if fail:
            return TestFailure("Not all expected output was received")
        if self.errors:
            return TestFailure("Errors were recorded during the test")
        return TestSuccess()

    def compare(self, got, exp, log, strict_type=True):
        """
        Common function for comparing two transactions.

        Can be re-implemented by a subclass.
        """

        # Compare the types
        if strict_type and type(got) != type(exp):
            self.errors += 1
            log.error("Received transaction type is different than expected")
            log.info("Received: %s but expected %s" %
                     (str(type(got)), str(type(exp))))
            if self._imm:
                raise TestFailure("Received transaction of wrong type. "
                                  "Set strict_type=False to avoid this.")
            return
        # Or convert to a string before comparison
        elif not strict_type:
            got, exp = str(got), str(exp)

        # Compare directly
        if got != exp:
            self.errors += 1

            # Try our best to print out something useful
            strgot, strexp = str(got), str(exp)

            log.error("Received transaction differed from expected output")
            if not strict_type:
                log.info("Expected:\n" + hexdump(strexp))
            else:
                log.info("Expected:\n" + repr(exp))
            if not isinstance(exp, str):
                try:
                    for word in exp:
                        log.info(str(word))
                except:
                    pass
            if not strict_type:
                log.info("Received:\n" + hexdump(strgot))
            else:
                log.info("Received:\n" + repr(got))
            if not isinstance(got, str):
                try:
                    for word in got:
                        log.info(str(word))
                except:
                    pass
            log.warning("Difference:\n%s" % hexdiffs(strexp, strgot))
            if self._imm:
                raise TestFailure("Received transaction differed from expected"
                                  "transaction")
        else:
            # Don't want to fail the test
            # if we're passed something without __len__
            try:
                log.debug("Received expected transaction %d bytes" %
                          (len(got)))
                log.debug(repr(got))
            except:
                pass

    def add_interface(self, monitor, expected_output, compare_fn=None,
                      reorder_depth=0, strict_type=True):
        """Add an interface to be scoreboarded.

            Provides a function which the monitor will callback with received
            transactions

            Simply check against the expected output.

        """
        # save a handle to the expected output so we can check if all expected
        # data has been received at the end of a test.
        self.expected[monitor] = expected_output

        # Enforce some type checking as we only work with a real monitor
        if not isinstance(monitor, Monitor):
            raise TypeError("Expected monitor on the interface but got %s" %
                            (monitor.__class__.__name__))

        if compare_fn is not None:
            if callable(compare_fn):
                monitor.add_callback(compare_fn)
                return
            raise TypeError("Expected a callable compare function but got %s" %
                            str(type(compare_fn)))

        self.log.info("Created with reorder_depth %d" % reorder_depth)

        def check_received_transaction(transaction):
            """Called back by the monitor when a new transaction has been
            received"""

            if monitor.name:
                log_name = self.log.name + '.' + monitor.name
            else:
                log_name = self.log.name + '.' + monitor.__class__.__name__

            log = logging.getLogger(log_name)

            if callable(expected_output):
                exp = expected_output(transaction)

            elif len(expected_output):
                for i in range(min((reorder_depth + 1), len(expected_output))):
                    if expected_output[i] == transaction:
                        break
                else:
                    i = 0
                exp = expected_output.pop(i)
            else:
                self.errors += 1
                log.error("Received a transaction but wasn't expecting "
                          "anything")
                log.info("Got: %s" % (hexdump(str(transaction))))
                if self._imm:
                    raise TestFailure("Received a transaction but wasn't "
                                      "expecting anything")
                return

            self.compare(transaction, exp, log, strict_type=strict_type)

        monitor.add_callback(check_received_transaction)
示例#34
0
class RegressionManager(object):
    """Encapsulates all regression capability into a single place"""
    def __init__(self, root_name, modules, tests=None, seed=None, hooks=[]):
        """
        Args:
            root_name (str): The name of the root handle.
            modules (list): A list of Python module names to run.
            tests (list, optional): A list of tests to run.
                Defaults to ``None``, meaning all discovered tests will be run.
            seed (int,  optional): The seed for the random number generator to use.
                Defaults to ``None``.
            hooks (list, optional): A list of hook modules to import.
                Defaults to the empty list.
        """
        self._queue = []
        self._root_name = root_name
        self._dut = None
        self._modules = modules
        self._functions = tests
        self._running_test = None
        self._cov = None
        self.log = SimLog("cocotb.regression")
        self._seed = seed
        self._hooks = hooks

    def initialise(self):

        self.start_time = time.time()
        self.test_results = []
        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0

        results_filename = os.getenv('COCOTB_RESULTS_FILE', "results.xml")
        suite_name = os.getenv('RESULT_TESTSUITE', "all")
        package_name = os.getenv('RESULT_TESTPACKAGE', "all")

        self.xunit = XUnitReporter(filename=results_filename)

        self.xunit.add_testsuite(name=suite_name,
                                 tests=repr(self.ntests),
                                 package=package_name)

        if (self._seed is not None):
            self.xunit.add_property(name="random_seed",
                                    value=("%d" % self._seed))

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            self._cov = coverage.coverage(branch=True, omit=["*cocotb*"])
            self._cov.start()

        handle = simulator.get_root_handle(self._root_name)

        self._dut = cocotb.handle.SimHandle(handle) if handle else None

        if self._dut is None:
            raise AttributeError("Can not find Root Handle (%s)" %
                                 self._root_name)

        # Auto discovery
        for module_name in self._modules:
            try:
                self.log.debug("Python Path: " + ",".join(sys.path))
                self.log.debug("PWD: " + os.getcwd())
                module = _my_import(module_name)
            except Exception as E:
                self.log.critical("Failed to import module %s: %s",
                                  module_name, E)
                self.log.info("MODULE variable was \"%s\"",
                              ".".join(self._modules))
                self.log.info("Traceback: ")
                self.log.info(traceback.format_exc())
                raise

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    try:
                        _test = getattr(module, test)
                    except AttributeError:
                        self.log.error(
                            "Requested test %s wasn't found in module %s",
                            test, module_name)
                        err = AttributeError("Test %s doesn't exist in %s" %
                                             (test, module_name))
                        _py_compat.raise_from(err,
                                              None)  # discard nested traceback

                    if not hasattr(_test, "im_test"):
                        self.log.error(
                            "Requested %s from module %s isn't a cocotb.test decorated coroutine",
                            test, module_name)
                        raise ImportError("Failed to find requested test %s" %
                                          test)
                    self._queue.append(_test(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except Exception:
                        skip = True
                        self.log.warning("Failed to initialize test %s" %
                                         thing.name,
                                         exc_info=True)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name,
                                                classname=module_name,
                                                time="0.0",
                                                sim_time_ns="0.0",
                                                ratio_time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1
                        self._store_test_result(module_name, thing.name, None,
                                                0.0, 0.0, 0.0)
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(key=lambda test: test.sort_name())

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                          (valid_tests.module, valid_tests.funcname))

        for module_name in self._hooks:
            self.log.info("Loading hook from module '" + module_name + "'")
            module = _my_import(module_name)

            for thing in vars(module).values():
                if hasattr(thing, "im_hook"):
                    try:
                        test = thing(self._dut)
                    except Exception:
                        self.log.warning("Failed to initialize hook %s" %
                                         thing.name,
                                         exc_info=True)
                    else:
                        cocotb.scheduler.add(test)

    def tear_down(self):
        """It's the end of the world as we know it"""
        if self.failures:
            self.log.error("Failed %d out of %d tests (%d skipped)" %
                           (self.failures, self.count - 1, self.skipped))
        else:
            self.log.info("Passed %d tests (%d skipped)" %
                          (self.count - 1, self.skipped))
        if self._cov:
            self._cov.stop()
            self.log.info("Writing coverage data")
            self._cov.save()
            self._cov.html_report()
        if len(self.test_results) > 0:
            self._log_test_summary()
        self._log_sim_summary()
        self.log.info("Shutting down...")
        self.xunit.write()
        simulator.stop_simulator()

    def next_test(self):
        """Get the next test to run"""
        if not self._queue:
            return None
        return self._queue.pop(0)

    def _add_failure(self, result):
        self.xunit.add_failure(
            stdout=repr(str(result)),
            stderr="\n".join(self._running_test.error_messages),
            message="Test failed with random_seed={}".format(self._seed))
        self.failures += 1

    def handle_result(self, test):
        """Handle a test completing.

        Dump result to XML and schedule the next test (if any).

        Args:
            test: The test that completed
        """
        assert test is self._running_test

        real_time = time.time() - test.start_time
        sim_time_ns = get_sim_time('ns') - test.start_sim_time
        ratio_time = self._safe_divide(sim_time_ns, real_time)

        self.xunit.add_testcase(name=test.funcname,
                                classname=test.module,
                                time=repr(real_time),
                                sim_time_ns=repr(sim_time_ns),
                                ratio_time=repr(ratio_time))

        # Helper for logging result
        def _result_was():
            result_was = ("{} (result was {})".format(
                test.funcname, result.__class__.__name__))
            return result_was

        result_pass = True

        # check what exception the test threw
        try:
            test._outcome.get()
        except Exception as e:
            if sys.version_info >= (3, 5):
                result = remove_traceback_frames(e, ['handle_result', 'get'])
                # newer versions of the `logging` module accept plain exception objects
                exc_info = result
            elif sys.version_info >= (3, ):
                result = remove_traceback_frames(e, ['handle_result', 'get'])
                # newer versions of python have Exception.__traceback__
                exc_info = (type(result), result, result.__traceback__)
            else:
                # Python 2
                result = e
                exc_info = remove_traceback_frames(sys.exc_info(),
                                                   ['handle_result', 'get'])
        else:
            result = TestSuccess()

        if (isinstance(result, TestSuccess) and not test.expect_fail
                and not test.expect_error):
            self.log.info("Test Passed: %s" % test.funcname)

        elif (isinstance(result, AssertionError) and test.expect_fail):
            self.log.info("Test failed as expected: " + _result_was())

        elif (isinstance(result, TestSuccess) and test.expect_error):
            self.log.error("Test passed but we expected an error: " +
                           _result_was())
            self._add_failure(result)
            result_pass = False

        elif isinstance(result, TestSuccess):
            self.log.error("Test passed but we expected a failure: " +
                           _result_was())
            self._add_failure(result)
            result_pass = False

        elif isinstance(result, SimFailure):
            if isinstance(result, test.expect_error):
                self.log.info("Test errored as expected: " + _result_was())
            else:
                self.log.error(
                    "Test error has lead to simulator shutting us "
                    "down",
                    exc_info=exc_info)
                self._add_failure(result)
                self._store_test_result(test.module, test.funcname, False,
                                        sim_time_ns, real_time, ratio_time)
                self.tear_down()
                return

        elif test.expect_error:
            if isinstance(result, test.expect_error):
                self.log.info("Test errored as expected: " + _result_was())
            else:
                self.log.info("Test errored with unexpected type: " +
                              _result_was())
                self._add_failure(result)
                result_pass = False

        else:
            self.log.error("Test Failed: " + _result_was(), exc_info=exc_info)
            self._add_failure(result)
            result_pass = False

        self._store_test_result(test.module, test.funcname, result_pass,
                                sim_time_ns, real_time, ratio_time)

        self.execute()

    def execute(self):
        self._running_test = cocotb.regression_manager.next_test()
        if self._running_test:
            start = ''
            end = ''
            if want_color_output():
                start = ANSI.COLOR_TEST
                end = ANSI.COLOR_DEFAULT
            # Want this to stand out a little bit
            self.log.info("%sRunning test %d/%d:%s %s" %
                          (start, self.count, self.ntests, end,
                           self._running_test.funcname))

            cocotb.scheduler.add_test(self._running_test)
            self.count += 1
        else:
            self.tear_down()

    def _log_test_summary(self):
        TEST_FIELD = 'TEST'
        RESULT_FIELD = 'PASS/FAIL'
        SIM_FIELD = 'SIM TIME(NS)'
        REAL_FIELD = 'REAL TIME(S)'
        RATIO_FIELD = 'RATIO(NS/S)'

        TEST_FIELD_LEN = max(
            len(TEST_FIELD),
            len(max([x['test'] for x in self.test_results], key=len)))
        RESULT_FIELD_LEN = len(RESULT_FIELD)
        SIM_FIELD_LEN = len(SIM_FIELD)
        REAL_FIELD_LEN = len(REAL_FIELD)
        RATIO_FIELD_LEN = len(RATIO_FIELD)

        LINE_LEN = 3 + TEST_FIELD_LEN + 2 + RESULT_FIELD_LEN + 2 + SIM_FIELD_LEN + 2 + REAL_FIELD_LEN + 2 + RATIO_FIELD_LEN + 3

        LINE_SEP = "*" * LINE_LEN + "\n"

        summary = ""
        summary += LINE_SEP
        summary += "** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}}  {d:>{d_len}}  {e:>{e_len}} **\n".format(
            a=TEST_FIELD,
            a_len=TEST_FIELD_LEN,
            b=RESULT_FIELD,
            b_len=RESULT_FIELD_LEN,
            c=SIM_FIELD,
            c_len=SIM_FIELD_LEN,
            d=REAL_FIELD,
            d_len=REAL_FIELD_LEN,
            e=RATIO_FIELD,
            e_len=RATIO_FIELD_LEN)
        summary += LINE_SEP
        for result in self.test_results:
            hilite = ''

            if result['pass'] is None:
                pass_fail_str = "N/A"
            elif result['pass']:
                pass_fail_str = "PASS"
            else:
                pass_fail_str = "FAIL"
                if want_color_output():
                    hilite = ANSI.COLOR_HILITE_SUMMARY

            summary += "{start}** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}.2f}   {d:>{d_len}.2f}   {e:>{e_len}.2f}  **\n".format(
                a=result['test'],
                a_len=TEST_FIELD_LEN,
                b=pass_fail_str,
                b_len=RESULT_FIELD_LEN,
                c=result['sim'],
                c_len=SIM_FIELD_LEN - 1,
                d=result['real'],
                d_len=REAL_FIELD_LEN - 1,
                e=result['ratio'],
                e_len=RATIO_FIELD_LEN - 1,
                start=hilite)
        summary += LINE_SEP

        self.log.info(summary)

    def _log_sim_summary(self):
        real_time = time.time() - self.start_time
        sim_time_ns = get_sim_time('ns')
        ratio_time = self._safe_divide(sim_time_ns, real_time)

        summary = ""

        summary += "*************************************************************************************\n"
        summary += "**                                 ERRORS : {0:<39}**\n".format(
            self.failures)
        summary += "*************************************************************************************\n"
        summary += "**                               SIM TIME : {0:<39}**\n".format(
            '{0:.2f} NS'.format(sim_time_ns))
        summary += "**                              REAL TIME : {0:<39}**\n".format(
            '{0:.2f} S'.format(real_time))
        summary += "**                        SIM / REAL TIME : {0:<39}**\n".format(
            '{0:.2f} NS/S'.format(ratio_time))
        summary += "*************************************************************************************\n"

        self.log.info(summary)

    @staticmethod
    def _safe_divide(a, b):
        try:
            return a / b
        except ZeroDivisionError:
            if a == 0:
                return float('nan')
            else:
                return float('inf')

    def _store_test_result(self, module_name, test_name, result_pass, sim_time,
                           real_time, ratio):
        result = {
            'test': '.'.join([module_name, test_name]),
            'pass': result_pass,
            'sim': sim_time,
            'real': real_time,
            'ratio': ratio
        }
        self.test_results.append(result)
示例#35
0
def Z_wavedrom_test(dut):
    """
    Generate a JSON wavedrom diagram of a trace
    """
    log = SimLog("cocotb.%s" % dut._name)
    cocotb.fork(Clock(dut.clk_i, 100).start())
    
    mockObject = Sha1Model()
    shaObject = Sha1Driver(dut, None, dut.clk_i)
    
    #yield load_data(dut, log, mockObject, 80)
    
    
    args = [
            dut.rst_i,
            dut.dat_i,
            dut.i,
            dut.i_mux,
            # dut.pinput1.i,
            # dut.pinput1.load_i,
            # dut.pinput1.test_word_1,
            # dut.pinput1.test_word_2,
            # dut.pinput1.test_word_3,
            # dut.pinput1.test_word_4,
            # dut.pinput1.test_word_5,
            # dut.pinput1.valid_o,
            dut.pbuffer1.i,
            dut.pbuffer1.rst_i,
            dut.pbuffer1.load_i,
            dut.pbuffer1.new_i,
            dut.pbuffer1.test_word_1,
            dut.pbuffer1.test_word_2,
            dut.pbuffer1.test_word_3,
            dut.pbuffer1.test_word_4,
            dut.pbuffer1.test_word_5,
            dut.pbuffer1.valid_o,
            dut.pbuffer2.i,
            dut.pbuffer2.rst_i,
            dut.pbuffer2.load_i,
            dut.pbuffer2.new_i,
            dut.pbuffer2.test_word_1,
            dut.pbuffer2.test_word_2,
            dut.pbuffer2.test_word_3,
            dut.pbuffer2.test_word_4,
            dut.pbuffer2.test_word_5,
            dut.pbuffer2.valid_o,
            dut.pbuffer3.i,
            dut.pbuffer3.rst_i,
            dut.pbuffer3.load_i,
            dut.pbuffer3.new_i,
            dut.pbuffer3.test_word_1,
            dut.pbuffer3.test_word_2,
            dut.pbuffer3.test_word_3,
            dut.pbuffer3.test_word_4,
            dut.pbuffer3.test_word_5,
            dut.pbuffer3.valid_o
            ]

    with cocotb.wavedrom.trace(*args, clk=dut.clk_i) as waves:
    
        yield RisingEdge(dut.clk_i)
        yield reset(dut)
        yield load_data(dut, log, mockObject, 16)
        mockObject.processInput()
        mockObject.processBuffer()
        
        if _debug == True:
            log.info(convert_hex(dut.pbuffer1.test_word_3).zfill(8))
        yield load_data(dut, log, mockObject, 60)
        
        
            
        if _debug == True:
            log.info(convert_hex(dut.pbuffer1.test_word_3).zfill(8))
            #log.info("{:08x}".format(mockObject.W[78]))
            #log.info("{:08x}".format(mockObject.W[79]))
            #log.info("{:08x}".format(mockObject.W[16 - 14]))
            #log.info("{:08x}".format(mockObject.W[16 - 16]))
            #log.info("{:08x}".format(mockObject.W[16]))
            
        yield load_data(dut, log, mockObject, 90)
        
        if _debug == True:
            log.info(convert_hex(dut.pbuffer1.test_word_3).zfill(8))
            log.info(convert_hex(dut.pbuffer1.test_word_4).zfill(8))
            #log.info(dut.pinput1.test_word_1.value.hex())
            #log.info(dut.pinput1.test_word_2.value.hex())
            #log.info(dut.pinput1.test_word_3.value.hex())
            #log.info(dut.pinput1.test_word_4.value.hex())
            #log.info(dut.pinput1.test_word_5.value.hex())
            #log.info(dut.pinput1.test_word_5)
            #log.info(waves.dumpj(header = {'text':'D_wavedrom_test', 'tick':-2}, config = {'hscale':3}))
            
        waves.write('wavedrom.json', header = {'text':'D_wavedrom_test', 'tick':-1}, config = {'hscale':5})
示例#36
0
class AxiRamWrite(object):
    def __init__(self, entity, name, clock, reset=None, size=1024, mem=None):
        self.log = SimLog("cocotb.%s.%s" % (entity._name, name))

        self.log.info("AXI RAM model")
        self.log.info("cocotbext-axi version %s", __version__)
        self.log.info("Copyright (c) 2020 Alex Forencich")
        self.log.info("https://github.com/alexforencich/cocotbext-axi")

        if type(mem) is mmap.mmap:
            self.mem = mem
        else:
            self.mem = mmap.mmap(-1, size)
        self.size = len(self.mem)

        self.reset = reset

        self.aw_channel = AxiAWSink(entity, name, clock, reset)
        self.w_channel = AxiWSink(entity, name, clock, reset)
        self.b_channel = AxiBSource(entity, name, clock, reset)

        self.in_flight_operations = 0

        self.width = len(self.w_channel.bus.wdata)
        self.byte_size = 8
        self.byte_width = self.width // self.byte_size
        self.strb_mask = 2**self.byte_width-1

        assert self.byte_width == len(self.w_channel.bus.wstrb)
        assert self.byte_width * self.byte_size == self.width

        assert len(self.b_channel.bus.bid) == len(self.aw_channel.bus.awid)

        cocotb.fork(self._process_write())

    def read_mem(self, address, length):
        self.mem.seek(address)
        return self.mem.read(length)

    def write_mem(self, address, data):
        self.mem.seek(address)
        self.mem.write(bytes(data))

    def hexdump(self, address, length, prefix=""):
        hexdump(self.mem, address, length, prefix=prefix)

    def hexdump_str(self, address, length, prefix=""):
        return hexdump_str(self.mem, address, length, prefix=prefix)

    async def _process_write(self):
        while True:
            await self.aw_channel.wait()
            aw = self.aw_channel.recv()

            awid = int(aw.awid)
            addr = int(aw.awaddr)
            length = int(aw.awlen)
            size = int(aw.awsize)
            burst = int(aw.awburst)
            prot = AxiProt(int(aw.awprot))

            self.log.info(f"Write burst awid: {awid:#x} awaddr: {addr:#010x} awlen: {length} awsize: {size} awprot: {prot}")

            num_bytes = 2**size
            assert 0 < num_bytes <= self.byte_width

            aligned_addr = (addr // num_bytes) * num_bytes
            length += 1

            transfer_size = num_bytes*length

            if burst == AxiBurstType.WRAP:
                lower_wrap_boundary = (addr // transfer_size) * transfer_size
                upper_wrap_boundary = lower_wrap_boundary + transfer_size

            if burst == AxiBurstType.INCR:
                # check 4k boundary crossing
                assert 0x1000-(aligned_addr&0xfff) >= transfer_size

            cur_addr = aligned_addr

            for n in range(length):
                cur_word_addr = (cur_addr // self.byte_width) * self.byte_width

                await self.w_channel.wait()
                w = self.w_channel.recv()

                data = int(w.wdata)
                strb = int(w.wstrb)
                last = int(w.wlast)

                # todo latency

                self.mem.seek(cur_word_addr % self.size)

                data = data.to_bytes(self.byte_width, 'little')

                self.log.debug(f"Write word awid: {awid:#x} addr: {cur_addr:#010x} wstrb: {strb:#04x} data: {' '.join((f'{c:02x}' for c in data))}")

                for i in range(self.byte_width):
                    if strb & (1 << i):
                        self.mem.write(data[i:i+1])
                    else:
                        self.mem.seek(1, 1)

                assert last == (n == length-1)

                if burst != AxiBurstType.FIXED:
                    cur_addr += num_bytes

                    if burst == AxiBurstType.WRAP:
                        if cur_addr == upper_wrap_boundary:
                            cur_addr = lower_wrap_boundary

            b = self.b_channel._transaction_obj()
            b.bid = awid
            b.bresp = AxiResp.OKAY

            self.b_channel.send(b)
示例#37
0
class AxiRamRead(object):
    def __init__(self, entity, name, clock, reset=None, size=1024, mem=None):
        self.log = SimLog("cocotb.%s.%s" % (entity._name, name))

        if type(mem) is mmap.mmap:
            self.mem = mem
        else:
            self.mem = mmap.mmap(-1, size)
        self.size = len(self.mem)

        self.reset = reset

        self.ar_channel = AxiARSink(entity, name, clock, reset)
        self.r_channel = AxiRSource(entity, name, clock, reset)

        self.int_read_resp_command_queue = deque()
        self.int_read_resp_command_sync = Event()

        self.in_flight_operations = 0

        self.width = len(self.r_channel.bus.rdata)
        self.byte_size = 8
        self.byte_width = self.width // self.byte_size

        assert self.byte_width * self.byte_size == self.width

        assert len(self.r_channel.bus.rid) == len(self.ar_channel.bus.arid)

        cocotb.fork(self._process_read())

    def read_mem(self, address, length):
        self.mem.seek(address)
        return self.mem.read(length)

    def write_mem(self, address, data):
        self.mem.seek(address)
        self.mem.write(bytes(data))

    def hexdump(self, address, length, prefix=""):
        hexdump(self.mem, address, length, prefix=prefix)

    def hexdump_str(self, address, length, prefix=""):
        return hexdump_str(self.mem, address, length, prefix=prefix)

    async def _process_read(self):
        while True:
            await self.ar_channel.wait()
            ar = self.ar_channel.recv()

            arid = int(ar.arid)
            addr = int(ar.araddr)
            length = int(ar.arlen)
            size = int(ar.arsize)
            burst = int(ar.arburst)
            prot = AxiProt(ar.arprot)

            self.log.info(f"Read burst arid: {arid:#x} araddr: {addr:#010x} arlen: {length} arsize: {size} arprot: {prot}")

            num_bytes = 2**size
            assert 0 < num_bytes <= self.byte_width

            aligned_addr = (addr // num_bytes) * num_bytes
            length += 1

            transfer_size = num_bytes*length

            if burst == AxiBurstType.WRAP:
                lower_wrap_boundary = (addr // transfer_size) * transfer_size
                upper_wrap_boundary = lower_wrap_boundary + transfer_size

            if burst == AxiBurstType.INCR:
                # check 4k boundary crossing
                assert 0x1000-(aligned_addr&0xfff) >= transfer_size

            cur_addr = aligned_addr

            for n in range(length):
                cur_word_addr = (cur_addr // self.byte_width) * self.byte_width

                self.mem.seek(cur_word_addr % self.size)

                data = self.mem.read(self.byte_width)

                r = self.r_channel._transaction_obj()
                r.rid = arid
                r.rdata = int.from_bytes(data, 'little')
                r.rlast = n == length-1
                r.rresp = AxiResp.OKAY

                self.r_channel.send(r)

                self.log.debug(f"Read word arid: {arid:#x} addr: {cur_addr:#010x} data: {' '.join((f'{c:02x}' for c in data))}")

                if burst != AxiBurstType.FIXED:
                    cur_addr += num_bytes

                    if burst == AxiBurstType.WRAP:
                        if cur_addr == upper_wrap_boundary:
                            cur_addr = lower_wrap_boundary
示例#38
0
class Scoreboard:
    """Generic scoreboarding class.

    We can add interfaces by providing a monitor and an expected output queue.

    The expected output can either be a function which provides a transaction
    or a simple list containing the expected output.

    TODO:
        Statistics for end-of-test summary etc.

    Args:
        dut (SimHandle): Handle to the DUT.
        reorder_depth (int, optional): Consider up to `reorder_depth` elements
            of the expected result list as passing matches.
            Default is 0, meaning only the first element in the expected result list
            is considered for a passing match.
        fail_immediately (bool, optional): Raise :any:`TestFailure`
            immediately when something is wrong instead of just
            recording an error. Default is ``True``.
    """
    def __init__(self,
                 dut,
                 reorder_depth=0,
                 fail_immediately=True):  # FIXME: reorder_depth needed here?
        self.dut = dut
        self.log = SimLog("cocotb.scoreboard.%s" % self.dut._name)
        self.errors = 0
        self.expected = {}
        self._imm = fail_immediately

    @property
    def result(self):
        """Determine the test result, do we have any pending data remaining?

        Returns:
            :any:`TestFailure`: If not all expected output was received or
            error were recorded during the test.
        """
        fail = False
        for monitor, expected_output in self.expected.items():
            if callable(expected_output):
                self.log.debug("Can't check all data returned for %s since "
                               "expected output is callable function rather "
                               "than a list" % str(monitor))
                continue
            if len(expected_output):
                self.log.warn("Still expecting %d transactions on %s" %
                              (len(expected_output), str(monitor)))
                for index, transaction in enumerate(expected_output):
                    self.log.info("Expecting %d:\n%s" %
                                  (index, hexdump(str(transaction))))
                    if index > 5:
                        self.log.info("... and %d more to come" %
                                      (len(expected_output) - index - 1))
                        break
                fail = True
        if fail:
            return TestFailure("Not all expected output was received")
        if self.errors:
            return TestFailure("Errors were recorded during the test")
        return TestSuccess()

    def compare(self, got, exp, log, strict_type=True):
        """Common function for comparing two transactions.

        Can be re-implemented by a sub-class.

        Args:
            got: The received transaction.
            exp: The expected transaction.
            log: The logger for reporting messages.
            strict_type (bool, optional): Require transaction type to match
                exactly if ``True``, otherwise compare its string representation.

        Raises:
            :any:`TestFailure`: If received transaction differed from
                expected transaction when :attr:`fail_immediately` is ``True``.
                If *strict_type* is ``True``,
                also the transaction type must match.
        """

        # Compare the types
        if strict_type and type(got) != type(exp):
            self.errors += 1
            log.error("Received transaction type is different than expected")
            log.info("Received: %s but expected %s" %
                     (str(type(got)), str(type(exp))))
            if self._imm:
                raise TestFailure("Received transaction of wrong type. "
                                  "Set strict_type=False to avoid this.")
            return
        # Or convert to a string before comparison
        elif not strict_type:
            got, exp = str(got), str(exp)

        # Compare directly
        if got != exp:
            self.errors += 1

            # Try our best to print out something useful
            strgot, strexp = str(got), str(exp)

            log.error("Received transaction differed from expected output")
            if not strict_type:
                log.info("Expected:\n" + hexdump(strexp))
            else:
                log.info("Expected:\n" + repr(exp))
            if not isinstance(exp, str):
                try:
                    for word in exp:
                        log.info(str(word))
                except Exception:
                    pass
            if not strict_type:
                log.info("Received:\n" + hexdump(strgot))
            else:
                log.info("Received:\n" + repr(got))
            if not isinstance(got, str):
                try:
                    for word in got:
                        log.info(str(word))
                except Exception:
                    pass
            log.warning("Difference:\n%s" % hexdiffs(strexp, strgot))
            if self._imm:
                raise TestFailure(
                    "Received transaction differed from expected "
                    "transaction")
        else:
            # Don't want to fail the test
            # if we're passed something without __len__
            try:
                log.debug("Received expected transaction %d bytes" %
                          (len(got)))
                log.debug(repr(got))
            except Exception:
                pass

    def add_interface(self,
                      monitor,
                      expected_output,
                      compare_fn=None,
                      reorder_depth=0,
                      strict_type=True):
        """Add an interface to be scoreboarded.

        Provides a function which the monitor will callback with received
        transactions.

        Simply check against the expected output.

        Args:
            monitor: The monitor object.
            expected_output: Queue of expected outputs.
            compare_fn (callable, optional): Function doing the actual comparison.
            reorder_depth (int, optional): Consider up to *reorder_depth* elements
                of the expected result list as passing matches.
                Default is 0, meaning only the first element in the expected result list
                is considered for a passing match.
            strict_type (bool, optional): Require transaction type to match
                exactly if ``True``, otherwise compare its string representation.

        Raises:
            :any:`TypeError`: If no monitor is on the interface or
                *compare_fn* is not a callable function.
        """
        # save a handle to the expected output so we can check if all expected
        # data has been received at the end of a test.
        self.expected[monitor] = expected_output

        # Enforce some type checking as we only work with a real monitor
        if not isinstance(monitor, Monitor):
            raise TypeError("Expected monitor on the interface but got %s" %
                            (type(monitor).__qualname__))

        if compare_fn is not None:
            if callable(compare_fn):
                monitor.add_callback(compare_fn)
                return
            raise TypeError("Expected a callable compare function but got %s" %
                            str(type(compare_fn)))

        self.log.info("Created with reorder_depth %d" % reorder_depth)

        def check_received_transaction(transaction):
            """Called back by the monitor when a new transaction has been
            received."""

            if monitor.name:
                log_name = self.log.name + '.' + monitor.name
            else:
                log_name = self.log.name + '.' + type(monitor).__qualname__

            log = logging.getLogger(log_name)

            if callable(expected_output):
                exp = expected_output(transaction)

            elif len(expected_output):  # we expect something
                for i in range(min((reorder_depth + 1), len(expected_output))):
                    if expected_output[i] == transaction:
                        break  # break out of enclosing for loop
                else:  # run when for loop is exhausted (but no break occurs)
                    i = 0
                exp = expected_output.pop(i)
            else:
                self.errors += 1
                log.error("Received a transaction but wasn't expecting "
                          "anything")
                log.info("Got: %s" % (hexdump(str(transaction))))
                if self._imm:
                    raise TestFailure("Received a transaction but wasn't "
                                      "expecting anything")
                return

            self.compare(transaction, exp, log, strict_type=strict_type)

        monitor.add_callback(check_received_transaction)
示例#39
0
class RegressionManager(object):
    """Encapsulates all regression capability into a single place"""

    def __init__(self, dut, modules, tests=None):
        """
        Args:
            modules (list): A list of python module names to run

        Kwargs
        """
        self._queue = []
        self._dut = dut
        self._modules = modules
        self._functions = tests
        self._running_test = None
        self.log = SimLog("cocotb.regression")

    def initialise(self):

        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0
        self.xunit = XUnitReporter()
        self.xunit.add_testsuite(name="all", tests=repr(self.ntests), package="all")

        # Auto discovery
        for module_name in self._modules:
            module = _my_import(module_name)

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    if not hasattr(module, test):
                        raise AttributeError("Test %s doesn't exist in %s" %
                            (test, module_name))

                    self._queue.append(getattr(module, test)(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except TestError:
                        skip = True
                        self.log.warning("Failed to initialise test %s" % thing.name)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name, classname=module_name, time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1                        
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(key=lambda test: "%s.%s" % (test.module, test.funcname))

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                        (valid_tests.module,
                         valid_tests.funcname))

    def tear_down(self):
        """It's the end of the world as we know it"""
        if self.failures:
            self.log.error("Failed %d out of %d tests (%d skipped)" %
                (self.failures, self.count -1, self.skipped))
        else:
            self.log.info("Passed %d tests (%d skipped)"  %
                (self.count-1, self.skipped))
        self.log.info("Shutting down...")
        self.xunit.write()
        simulator.stop_simulator()


    def next_test(self):
        """Get the next test to run"""
        if not self._queue: return None
        return self._queue.pop(0)


    def handle_result(self, result):
        """Handle a test result

        Dumps result to XML and schedules the next test (if any)

        Args: result (TestComplete exception)
        """
        self.xunit.add_testcase(name =self._running_test.funcname,
                                classname=self._running_test.module,
                                time=repr(time.time() - self._running_test.start_time) )

        if isinstance(result, TestSuccess) and not self._running_test.expect_fail and not self._running_test.expect_error:
            self.log.info("Test Passed: %s" % self._running_test.funcname)

        elif isinstance(result, TestFailure) and self._running_test.expect_fail:
            self.log.info("Test failed as expected: %s (result was %s)" % (
                          self._running_test.funcname, result.__class__.__name__))

        elif isinstance(result, TestSuccess) and self._running_test.expect_error:
            self.log.error("Test passed but we expected an error: %s (result was %s)" % (
                           self._running_test.funcname, result.__class__.__name__))
            self.xunit.add_failure(stdout=repr(str(result)), stderr="\n".join(self._running_test.error_messages))
            self.failures += 1

        elif isinstance(result, TestSuccess):
            self.log.error("Test passed but we expected a failure: %s (result was %s)" % (
                           self._running_test.funcname, result.__class__.__name__))
            self.xunit.add_failure(stdout=repr(str(result)), stderr="\n".join(self._running_test.error_messages))
            self.failures += 1

        elif isinstance(result, TestError) and self._running_test.expect_error:
            self.log.info("Test errored as expected: %s (result was %s)" % (
                          self._running_test.funcname, result.__class__.__name__))

        elif isinstance(result, SimFailure):
            if self._running_test.expect_error:
                self.log.info("Test errored as expected: %s (result was %s)" % (
                              self._running_test.funcname, result.__class__.__name__))
            else:
                self.log.error("Test error has lead to simulator shuttting us down")
                self.failures += 1
                self.tear_down()
                return

        else:
            self.log.error("Test Failed: %s (result was %s)" % (
                        self._running_test.funcname, result.__class__.__name__))
            self.xunit.add_failure(stdout=repr(str(result)), stderr="\n".join(self._running_test.error_messages))
            self.failures += 1

        self.execute()

    def execute(self):
        self._running_test = cocotb.regression.next_test()
        if self._running_test:
            # Want this to stand out a little bit
            self.log.info("%sRunning test %d/%d:%s %s" % (
               ANSI.BLUE_BG +ANSI.BLACK_FG,
                    self.count, self.ntests,
               ANSI.DEFAULT_FG + ANSI.DEFAULT_BG,
                    self._running_test.funcname))
            if self.count is 1:
                test = cocotb.scheduler.add(self._running_test)
            else:
                test = cocotb.scheduler.new_test(self._running_test)
            self.count+=1
        else:
            self.tear_down()
示例#40
0
class RunningCoroutine(object):
    """Per instance wrapper around an function to turn it into a coroutine


        Provides the following:

            coro.join() creates a Trigger that will fire when this coroutine completes

            coro.kill() will destroy a coroutine instance (and cause any Join triggers to fire
    """
    def __init__(self, inst, parent):
        self.__name__ = "%s" % inst.__name__
        self.log = SimLog("cocotb.coroutine.%s" % self.__name__, id(self))
        self._coro = inst
        self._finished = False
        self._callbacks = []
        self._parent = parent
        self.__doc__ = parent._func.__doc__
        self.module = parent._func.__module__
        self.funcname = parent._func.__name__
        self.retval = None

        if not hasattr(self._coro, "send"):
            self.log.error("%s isn't a value coroutine! Did you use the yield keyword?"
                % self.__name__)
            raise CoroutineComplete(callback=self._finished_cb)

    def __iter__(self):
        return self

    def __str__(self):
        return str(self.__name__)

    def send(self, value):
        try:
            return self._coro.send(value)
        except TestComplete as e:
            if isinstance(e, TestFailure):
                self.log.warning(str(e))
            else:
                self.log.info(str(e))
            raise
        except ReturnValue as e:
            self.retval = e.retval
            raise CoroutineComplete(callback=self._finished_cb)
        except StopIteration:
            raise CoroutineComplete(callback=self._finished_cb)
        except Exception as e:
            raise_error(self, "Send raised exception: %s" % (str(e)))

    def throw(self, exc):
        return self._coro.throw(exc)

    def close(self):
        return self._coro.close()

    def kill(self):
        """Kill a coroutine"""
        self.log.debug("kill() called on coroutine")
        cocotb.scheduler.schedule_remove(self, self._finished_cb)

    def _finished_cb(self):
        """Called when the coroutine completes.
            Allows us to mark the coroutine as finished so that boolean testing works.
            Also call any callbacks, usually the result of coroutine.join()"""
        self._finished = True
        self.log.debug("Coroutine finished calling pending callbacks (%d pending)" % len(self._callbacks))
        for cb in self._callbacks:
            cb()
        self._callbacks = []

    def join(self):
        """Return a trigger that will fire when the wrapped coroutine exits"""
        return Join(self)

    def __nonzero__(self):
        """Provide boolean testing
            if the coroutine has finished return false
            otherwise return true"""
        return not self._finished
示例#41
0
class RegressionManager(object):
    """Encapsulates all regression capability into a single place"""

    def __init__(self, root_name, modules, tests=None, seed=None, hooks=[]):
        """
        Args:
            modules (list): A list of python module names to run

        Kwargs
        """
        self._queue = []
        self._root_name = root_name
        self._dut = None
        self._modules = modules
        self._functions = tests
        self._running_test = None
        self._cov = None
        self.log = SimLog("cocotb.regression")
        self._seed = seed
        self._hooks = hooks

    def initialise(self):
        try:
            self._initialise()
        except Exception as e:
            import traceback
            self.log.error(traceback.format_exc())
            raise
        
    def _initialise(self):

        self.start_time = time.time()
        self.test_results = []
        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0
        self.xunit = XUnitReporter()

        suite_name = os.getenv('RESULT_TESTSUITE') if os.getenv('RESULT_TESTSUITE') else "all"
        package_name = os.getenv('RESULT_TESTPACKAGE') if os.getenv('RESULT_TESTPACKAGE') else "all"
                
        self.xunit.add_testsuite(name=suite_name, tests=repr(self.ntests),
                                 package=package_name)
        
        if (self._seed is not None):
            self.xunit.add_property(name="random_seed", value=("%d"%self._seed))

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            self._cov = coverage.coverage(branch=True, omit=["*cocotb*"])
            self._cov.start()

        handle = simulator.get_root_handle(self._root_name)

        self._dut = cocotb.handle.SimHandle(handle) if handle else None

        if self._dut is None:
            raise AttributeError("Can not find Root Handle (%s)" %
                                 self._root_name)

        # Auto discovery
        for module_name in self._modules:
            try:
                self.log.debug("Python Path: " + ",".join(sys.path))
                self.log.debug("PWD: " + os.getcwd())
                module = _my_import(module_name)
            except Exception as E:
                self.log.critical("Failed to import module %s: %s", module_name, E)
                self.log.info("MODULE variable was \"%s\"", ".".join(self._modules))
                self.log.info("Traceback: ")
                self.log.info(traceback.format_exc())
                raise

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    if not hasattr(module, test):
                        raise AttributeError("Test %s doesn't exist in %s" %
                                             (test, module_name))

                    self._queue.append(getattr(module, test)(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except TestError:
                        skip = True
                        self.log.warning("Failed to initialise test %s" %
                                         thing.name)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name,
                                                classname=module_name,
                                                time="0.0",
                                                sim_time_ns="0.0",
                                                ratio_time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1
                        self._store_test_result(module_name, thing.name, None, 0.0, 0.0, 0.0)
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(key=lambda test: "%s.%s" %
                         (test.module, test.funcname))

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                          (valid_tests.module,
                           valid_tests.funcname))

        for module_name in self._hooks:
            self.log.info("Loading hook from module '"+module_name+"'")
            module = _my_import(module_name)

            for thing in vars(module).values():
                if hasattr(thing, "im_hook"):
                    try:
                        test = thing(self._dut)
                    except TestError:
                        self.log.warning("Failed to initialize hook %s" % thing.name)
                    else:
                        cocotb.scheduler.add(test)


    def tear_down(self):
        """It's the end of the world as we know it"""
        if self.failures:
            self.log.error("Failed %d out of %d tests (%d skipped)" %
                           (self.failures, self.count - 1, self.skipped))
        else:
            self.log.info("Passed %d tests (%d skipped)" %
                          (self.count - 1, self.skipped))
        if self._cov:
            self._cov.stop()
            self.log.info("Writing coverage data")
            self._cov.save()
            self._cov.html_report()
        self._log_test_summary()
        self._log_sim_summary()
        self.log.info("Shutting down...")
        self.xunit.write()
        simulator.stop_simulator()

    def next_test(self):
        """Get the next test to run"""
        if not self._queue:
            return None
        return self._queue.pop(0)

    def _add_failure(self, result):
        self.xunit.add_failure(stdout=repr(str(result)),
                               stderr="\n".join(self._running_test.error_messages),
                               message="Test failed with random_seed={}".format(self._seed))
        self.failures += 1

    def handle_result(self, result):
        """Handle a test result

        Dumps result to XML and schedules the next test (if any)

        Args: result (TestComplete exception)
        """
        real_time   = time.time() - self._running_test.start_time
        sim_time_ns = get_sim_time('ns') - self._running_test.start_sim_time
        ratio_time  = sim_time_ns / real_time
        self.xunit.add_testcase(name=self._running_test.funcname,
                                classname=self._running_test.module,
                                time=repr(real_time),
                                sim_time_ns=repr(sim_time_ns),
                                ratio_time=repr(ratio_time))

        running_test_funcname = self._running_test.funcname

        # Helper for logging result
        def _result_was():
            result_was = ("%s (result was %s)" %
                          (running_test_funcname, result.__class__.__name__))
            return result_was

        result_pass = True

        if (isinstance(result, TestSuccess) and
                not self._running_test.expect_fail and
                not self._running_test.expect_error):
            self.log.info("Test Passed: %s" % running_test_funcname)

        elif (isinstance(result, TestFailure) and
                self._running_test.expect_fail):
            self.log.info("Test failed as expected: " + _result_was())

        elif (isinstance(result, TestSuccess) and
              self._running_test.expect_error):
            self.log.error("Test passed but we expected an error: " +
                           _result_was())
            self._add_failure(result)
            result_pass = False

        elif isinstance(result, TestSuccess):
            self.log.error("Test passed but we expected a failure: " +
                           _result_was())
            self._add_failure(result)
            result_pass = False

        elif isinstance(result, TestError) and self._running_test.expect_error:
            self.log.info("Test errored as expected: " + _result_was())

        elif isinstance(result, SimFailure):
            if self._running_test.expect_error:
                self.log.info("Test errored as expected: " + _result_was())
            else:
                self.log.error("Test error has lead to simulator shuttting us "
                               "down")
                self._add_failure(result)
                self._store_test_result(self._running_test.module, self._running_test.funcname, False, sim_time_ns, real_time, ratio_time)
                self.tear_down()
                return

        else:
            self.log.error("Test Failed: " + _result_was())
            self._add_failure(result)
            result_pass = False

        self._store_test_result(self._running_test.module, self._running_test.funcname, result_pass, sim_time_ns, real_time, ratio_time)

        self.execute()

    def execute(self):
        self._running_test = cocotb.regression.next_test()
        if self._running_test:
            start = ''
            end   = ''
            if self.log.colour:
                start = ANSI.BLUE_BG + ANSI.BLACK_FG
                end   = ANSI.DEFAULT
            # Want this to stand out a little bit
            self.log.info("%sRunning test %d/%d:%s %s" %
                          (start,
                           self.count, self.ntests,
                           end,
                           self._running_test.funcname))
            if self.count is 1:
                test = cocotb.scheduler.add(self._running_test)
            else:
                test = cocotb.scheduler.new_test(self._running_test)
            self.count += 1
        else:
            self.tear_down()

    def _log_test_summary(self):
        TEST_FIELD   = 'TEST'
        RESULT_FIELD = 'PASS/FAIL'
        SIM_FIELD    = 'SIM TIME(NS)'
        REAL_FIELD   = 'REAL TIME(S)'
        RATIO_FIELD  = 'RATIO(NS/S)'

        TEST_FIELD_LEN   = max(len(TEST_FIELD),len(max([x['test'] for x in self.test_results],key=len)))
        RESULT_FIELD_LEN = len(RESULT_FIELD)
        SIM_FIELD_LEN    = len(SIM_FIELD)
        REAL_FIELD_LEN   = len(REAL_FIELD)
        RATIO_FIELD_LEN  = len(RATIO_FIELD)

        LINE_LEN = 3 + TEST_FIELD_LEN + 2 + RESULT_FIELD_LEN + 2 + SIM_FIELD_LEN + 2 + REAL_FIELD_LEN + 2 + RATIO_FIELD_LEN + 3

        LINE_SEP = "*"*LINE_LEN+"\n"

        summary = ""
        summary += LINE_SEP
        summary += "** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}}  {d:>{d_len}}  {e:>{e_len}} **\n".format(a=TEST_FIELD,   a_len=TEST_FIELD_LEN,
                                                                                                         b=RESULT_FIELD, b_len=RESULT_FIELD_LEN,
                                                                                                         c=SIM_FIELD,    c_len=SIM_FIELD_LEN,
                                                                                                         d=REAL_FIELD,   d_len=REAL_FIELD_LEN,
                                                                                                         e=RATIO_FIELD,  e_len=RATIO_FIELD_LEN)
        summary += LINE_SEP
        for result in self.test_results:
            hilite = ''

            if result['pass'] is None:
                pass_fail_str = "N/A"
            elif result['pass']:
                pass_fail_str = "PASS"
            else:
                pass_fail_str = "FAIL"
                if self.log.colour:
                    hilite = ANSI.WHITE_FG + ANSI.RED_BG

            summary += "{start}** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}.2f}   {d:>{d_len}.2f}   {e:>{e_len}.2f}  **\n".format(a=result['test'],   a_len=TEST_FIELD_LEN,
                                                                                                                                b=pass_fail_str,    b_len=RESULT_FIELD_LEN,
                                                                                                                                c=result['sim'],    c_len=SIM_FIELD_LEN-1,
                                                                                                                                d=result['real'],   d_len=REAL_FIELD_LEN-1,
                                                                                                                                e=result['ratio'],  e_len=RATIO_FIELD_LEN-1,
                                                                                                                                start=hilite)
        summary += LINE_SEP

        self.log.info(summary)

    def _log_sim_summary(self):
        real_time   = time.time() - self.start_time
        sim_time_ns = get_sim_time('ns')
        ratio_time  = sim_time_ns / real_time

        summary = ""

        summary += "*************************************************************************************\n"
        summary += "**                                 ERRORS : {0:<39}**\n".format(self.failures)
        summary += "*************************************************************************************\n"
        summary += "**                               SIM TIME : {0:<39}**\n".format('{0:.2f} NS'.format(sim_time_ns))
        summary += "**                              REAL TIME : {0:<39}**\n".format('{0:.2f} S'.format(real_time))
        summary += "**                        SIM / REAL TIME : {0:<39}**\n".format('{0:.2f} NS/S'.format(ratio_time))
        summary += "*************************************************************************************\n"

        self.log.info(summary)

    def _store_test_result(self, module_name, test_name, result_pass, sim_time, real_time, ratio):
        result = {
            'test'  : '.'.join([module_name, test_name]),
            'pass'  : result_pass,
            'sim'   : sim_time,
            'real'  : real_time,
            'ratio' : ratio}
        self.test_results.append(result)
示例#42
0
class RegressionManager(object):
    """Encapsulates all regression capability into a single place"""
    def __init__(self, root_name, modules, tests=None, seed=None, hooks=[]):
        """
        Args:
            modules (list): A list of Python module names to run

        Kwargs
        """
        self._queue = []
        self._root_name = root_name
        self._dut = None
        self._modules = modules
        self._functions = tests
        self._running_test = None
        self._cov = None
        self.log = SimLog("cocotb.regression")
        self._seed = seed
        self._hooks = hooks

    def initialise(self):

        self.start_time = time.time()
        self.test_results = []
        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0
        self.xunit = XUnitReporter()

        suite_name = os.getenv('RESULT_TESTSUITE') if os.getenv(
            'RESULT_TESTSUITE') else "all"
        package_name = os.getenv('RESULT_TESTPACKAGE') if os.getenv(
            'RESULT_TESTPACKAGE') else "all"

        self.xunit.add_testsuite(name=suite_name,
                                 tests=repr(self.ntests),
                                 package=package_name)

        if (self._seed is not None):
            self.xunit.add_property(name="random_seed",
                                    value=("%d" % self._seed))

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            self._cov = coverage.coverage(branch=True, omit=["*cocotb*"])
            self._cov.start()

        handle = simulator.get_root_handle(self._root_name)

        self._dut = cocotb.handle.SimHandle(handle) if handle else None

        if self._dut is None:
            raise AttributeError("Can not find Root Handle (%s)" %
                                 self._root_name)

        # Auto discovery
        for module_name in self._modules:
            try:
                self.log.debug("Python Path: " + ",".join(sys.path))
                self.log.debug("PWD: " + os.getcwd())
                module = _my_import(module_name)
            except Exception as E:
                self.log.critical("Failed to import module %s: %s",
                                  module_name, E)
                self.log.info("MODULE variable was \"%s\"",
                              ".".join(self._modules))
                self.log.info("Traceback: ")
                self.log.info(traceback.format_exc())
                raise

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    if not hasattr(module, test):
                        raise AttributeError("Test %s doesn't exist in %s" %
                                             (test, module_name))

                    self._queue.append(getattr(module, test)(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except TestError:
                        skip = True
                        self.log.warning("Failed to initialize test %s" %
                                         thing.name)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name,
                                                classname=module_name,
                                                time="0.0",
                                                sim_time_ns="0.0",
                                                ratio_time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1
                        self._store_test_result(module_name, thing.name, None,
                                                0.0, 0.0, 0.0)
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(key=lambda test: test.sort_name())

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                          (valid_tests.module, valid_tests.funcname))

        for module_name in self._hooks:
            self.log.info("Loading hook from module '" + module_name + "'")
            module = _my_import(module_name)

            for thing in vars(module).values():
                if hasattr(thing, "im_hook"):
                    try:
                        test = thing(self._dut)
                    except TestError:
                        self.log.warning("Failed to initialize hook %s" %
                                         thing.name)
                    else:
                        cocotb.scheduler.add(test)

    def tear_down(self):
        """It's the end of the world as we know it"""
        if self.failures:
            self.log.error("Failed %d out of %d tests (%d skipped)" %
                           (self.failures, self.count - 1, self.skipped))
        else:
            self.log.info("Passed %d tests (%d skipped)" %
                          (self.count - 1, self.skipped))
        if self._cov:
            self._cov.stop()
            self.log.info("Writing coverage data")
            self._cov.save()
            self._cov.html_report()
        if len(self.test_results) > 0:
            self._log_test_summary()
        self._log_sim_summary()
        self.log.info("Shutting down...")
        self.xunit.write()
        simulator.stop_simulator()

    def next_test(self):
        """Get the next test to run"""
        if not self._queue:
            return None
        return self._queue.pop(0)

    def _add_failure(self, result):
        self.xunit.add_failure(
            stdout=repr(str(result)),
            stderr="\n".join(self._running_test.error_messages),
            message="Test failed with random_seed={}".format(self._seed))
        self.failures += 1

    def handle_result(self, result):
        """Handle a test result

        Dumps result to XML and schedules the next test (if any)

        Args: result (TestComplete exception)
        """
        real_time = time.time() - self._running_test.start_time
        sim_time_ns = get_sim_time('ns') - self._running_test.start_sim_time
        ratio_time = sim_time_ns / real_time
        self.xunit.add_testcase(name=self._running_test.funcname,
                                classname=self._running_test.module,
                                time=repr(real_time),
                                sim_time_ns=repr(sim_time_ns),
                                ratio_time=repr(ratio_time))

        running_test_funcname = self._running_test.funcname

        # Helper for logging result
        def _result_was():
            result_was = ("%s (result was %s)" %
                          (running_test_funcname, result.__class__.__name__))
            return result_was

        result_pass = True

        if (isinstance(result, TestSuccess)
                and not self._running_test.expect_fail
                and not self._running_test.expect_error):
            self.log.info("Test Passed: %s" % running_test_funcname)

        elif (isinstance(result, TestFailure)
              and self._running_test.expect_fail):
            self.log.info("Test failed as expected: " + _result_was())

        elif (isinstance(result, TestSuccess)
              and self._running_test.expect_error):
            self.log.error("Test passed but we expected an error: " +
                           _result_was())
            self._add_failure(result)
            result_pass = False

        elif isinstance(result, TestSuccess):
            self.log.error("Test passed but we expected a failure: " +
                           _result_was())
            self._add_failure(result)
            result_pass = False

        elif isinstance(result, TestError) and self._running_test.expect_error:
            self.log.info("Test errored as expected: " + _result_was())

        elif isinstance(result, SimFailure):
            if self._running_test.expect_error:
                self.log.info("Test errored as expected: " + _result_was())
            else:
                self.log.error("Test error has lead to simulator shutting us "
                               "down")
                self._add_failure(result)
                self._store_test_result(self._running_test.module,
                                        self._running_test.funcname, False,
                                        sim_time_ns, real_time, ratio_time)
                self.tear_down()
                return

        else:
            self.log.error("Test Failed: " + _result_was())
            self._add_failure(result)
            result_pass = False

        self._store_test_result(self._running_test.module,
                                self._running_test.funcname, result_pass,
                                sim_time_ns, real_time, ratio_time)

        self.execute()

    def execute(self):
        self._running_test = cocotb.regression.next_test()
        if self._running_test:
            start = ''
            end = ''
            if self.log.colour:
                start = ANSI.COLOR_TEST
                end = ANSI.COLOR_DEFAULT
            # Want this to stand out a little bit
            self.log.info("%sRunning test %d/%d:%s %s" %
                          (start, self.count, self.ntests, end,
                           self._running_test.funcname))
            if self.count is 1:
                test = cocotb.scheduler.add(self._running_test)
            else:
                test = cocotb.scheduler.new_test(self._running_test)
            self.count += 1
        else:
            self.tear_down()

    def _log_test_summary(self):
        TEST_FIELD = 'TEST'
        RESULT_FIELD = 'PASS/FAIL'
        SIM_FIELD = 'SIM TIME(NS)'
        REAL_FIELD = 'REAL TIME(S)'
        RATIO_FIELD = 'RATIO(NS/S)'

        TEST_FIELD_LEN = max(
            len(TEST_FIELD),
            len(max([x['test'] for x in self.test_results], key=len)))
        RESULT_FIELD_LEN = len(RESULT_FIELD)
        SIM_FIELD_LEN = len(SIM_FIELD)
        REAL_FIELD_LEN = len(REAL_FIELD)
        RATIO_FIELD_LEN = len(RATIO_FIELD)

        LINE_LEN = 3 + TEST_FIELD_LEN + 2 + RESULT_FIELD_LEN + 2 + SIM_FIELD_LEN + 2 + REAL_FIELD_LEN + 2 + RATIO_FIELD_LEN + 3

        LINE_SEP = "*" * LINE_LEN + "\n"

        summary = ""
        summary += LINE_SEP
        summary += "** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}}  {d:>{d_len}}  {e:>{e_len}} **\n".format(
            a=TEST_FIELD,
            a_len=TEST_FIELD_LEN,
            b=RESULT_FIELD,
            b_len=RESULT_FIELD_LEN,
            c=SIM_FIELD,
            c_len=SIM_FIELD_LEN,
            d=REAL_FIELD,
            d_len=REAL_FIELD_LEN,
            e=RATIO_FIELD,
            e_len=RATIO_FIELD_LEN)
        summary += LINE_SEP
        for result in self.test_results:
            hilite = ''

            if result['pass'] is None:
                pass_fail_str = "N/A"
            elif result['pass']:
                pass_fail_str = "PASS"
            else:
                pass_fail_str = "FAIL"
                if self.log.colour:
                    hilite = ANSI.COLOR_HILITE_SUMMARY

            summary += "{start}** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}.2f}   {d:>{d_len}.2f}   {e:>{e_len}.2f}  **\n".format(
                a=result['test'],
                a_len=TEST_FIELD_LEN,
                b=pass_fail_str,
                b_len=RESULT_FIELD_LEN,
                c=result['sim'],
                c_len=SIM_FIELD_LEN - 1,
                d=result['real'],
                d_len=REAL_FIELD_LEN - 1,
                e=result['ratio'],
                e_len=RATIO_FIELD_LEN - 1,
                start=hilite)
        summary += LINE_SEP

        self.log.info(summary)

    def _log_sim_summary(self):
        real_time = time.time() - self.start_time
        sim_time_ns = get_sim_time('ns')
        ratio_time = sim_time_ns / real_time

        summary = ""

        summary += "*************************************************************************************\n"
        summary += "**                                 ERRORS : {0:<39}**\n".format(
            self.failures)
        summary += "*************************************************************************************\n"
        summary += "**                               SIM TIME : {0:<39}**\n".format(
            '{0:.2f} NS'.format(sim_time_ns))
        summary += "**                              REAL TIME : {0:<39}**\n".format(
            '{0:.2f} S'.format(real_time))
        summary += "**                        SIM / REAL TIME : {0:<39}**\n".format(
            '{0:.2f} NS/S'.format(ratio_time))
        summary += "*************************************************************************************\n"

        self.log.info(summary)

    def _store_test_result(self, module_name, test_name, result_pass, sim_time,
                           real_time, ratio):
        result = {
            'test': '.'.join([module_name, test_name]),
            'pass': result_pass,
            'sim': sim_time,
            'real': real_time,
            'ratio': ratio
        }
        self.test_results.append(result)
示例#43
0
class Scoreboard(object):
    """Generic scoreboarding class

    We can add interfaces by providing a monitor and an expected output queue

    The expected output can either be a function which provides a transaction
    or a simple list containing the expected output.

    TODO:
        Statistics for end-of-test summary etc.
    """

    def __init__(self, dut, reorder_depth=0, fail_immediately=True):
        self.dut = dut
        self.log = SimLog("cocotb.scoreboard.%s" % self.dut.name)
        self.errors = 0
        self.expected = {}
        self._imm = fail_immediately

    @property
    def result(self):
        """Determine the test result - do we have any pending data remaining?"""
        fail = False
        for monitor, expected_output in self.expected.iteritems():
            if callable(expected_output):
                self.log.debug("Can't check all data returned for %s since expected output is \
                                callable function rather than a list" % str(monitor))
                continue
            if len(expected_output):
                self.log.warn("Still expecting %d transactions on %s" % (len(expected_output), str(monitor)))
                for index, transaction in enumerate(expected_output):
                    self.log.info("Expecting %d:\n%s" % (index, hexdump(str(transaction))))
                    if index > 5:
                        self.log.info("... and %d more to come" % (len(expected_output) - index - 1))
                        break
                fail = True
        if fail:
            return TestFailure("Not all expected output was received")
        if self.errors:
            return TestFailure("Errors were recorded during the test")
        return TestSuccess()

    def add_interface(self, monitor, expected_output, compare_fn=None):
        """Add an interface to be scoreboarded.

            Provides a function which the monitor will callback with received transactions

            Simply check against the expected output.

        """
        # save a handle to the expected output so we can check if all expected data has
        # been received at the end of a test.
        self.expected[monitor] = expected_output

        # Enforce some type checking as we only work with a real monitor
        if not isinstance(monitor, Monitor):
            raise TypeError("Expected monitor on the interface but got %s" % (monitor.__class__.__name__))

        if compare_fn is not None:
            if callable(compare_fn):
                monitor.add_callback(compare_fn)
                return
            raise TypeError("Expected a callable compare function but got %s" % str(type(compare_fn)))

        def check_received_transaction(transaction):
            """Called back by the monitor when a new transaction has been received"""

            log = logging.getLogger(self.log.name + '.' + monitor.name)

            if callable(expected_output):
                exp = expected_output(transaction)
            elif len(expected_output):
                exp = expected_output.pop(0)
            else:
                self.errors += 1
                log.error("Received a transaction but wasn't expecting anything")
                log.info("Got: %s" % (hexdump(str(transaction))))
                if self._imm: raise TestFailure("Received a transaction but wasn't expecting anything")
                return

            if type(transaction) != type(exp):
                self.errors += 1
                log.error("Received transaction is a different type to expected transaction")
                log.info("Got: %s but expected %s" % (str(type(transaction)), str(type(exp))))
                if self._imm: raise TestFailure("Received transaction of wrong type")
                return

            if transaction != exp:
                self.errors += 1
                log.error("Received transaction differed from expected output")
                log.info("Expected:\n" + hexdump(exp))
                if not isinstance(exp, str):
                    try:
                        for word in exp: self.log.info(str(word))
                    except: pass
                log.info("Received:\n" + hexdump(transaction))
                if not isinstance(transaction, str):
                    try:
                        for word in transaction: self.log.info(str(word))
                    except: pass
                log.warning("Difference:\n%s" % hexdiffs(exp, transaction))
                if self._imm: raise TestFailure("Received transaction differed from expected transaction")
            else:
                # Don't want to fail the test if we're passed something without __len__
                try:
                    log.debug("Received expected transaction %d bytes" % (len(transaction)))
                    log.debug(repr(transaction))
                except: pass

        monitor.add_callback(check_received_transaction)