Beispiel #1
0
    def initialise(self):

        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0
        self.xunit = XUnitReporter()
        self.xunit.add_testsuite(name="all", tests=repr(self.ntests), package="all")

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            self._cov = coverage.coverage(branch=True, omit=["*cocotb*"])
            self._cov.start()

        self._dut = cocotb.handle.SimHandle(simulator.get_root_handle(self._root_name))
        if self._dut is None:
            raise AttributeError("Can not find Root Handle (%s)" % root_name)

        # Auto discovery
        for module_name in self._modules:
            module = _my_import(module_name)

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    if not hasattr(module, test):
                        raise AttributeError("Test %s doesn't exist in %s" %
                            (test, module_name))

                    self._queue.append(getattr(module, test)(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except TestError:
                        skip = True
                        self.log.warning("Failed to initialise test %s" % thing.name)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name, classname=module_name, time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1                        
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(key=lambda test: "%s.%s" % (test.module, test.funcname))

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                        (valid_tests.module,
                         valid_tests.funcname))
Beispiel #2
0
    def initialise(self):

        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0
        self.xunit = XUnitReporter()
        self.xunit.add_testsuite(name="all", tests=repr(self.ntests), package="all")

        # Auto discovery
        for module_name in self._modules:
            module = _my_import(module_name)

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    if not hasattr(module, test):
                        raise AttributeError("Test %s doesn't exist in %s" %
                            (test, module_name))

                    self._queue.append(getattr(module, test)(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except TestError:
                        skip = True
                        self.log.warning("Failed to initialise test %s" % thing.name)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name, classname=module_name, time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1                        
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(key=lambda test: "%s.%s" % (test.module, test.funcname))

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                        (valid_tests.module,
                         valid_tests.funcname))
Beispiel #3
0
class RegressionManager(object):
    """Encapsulates all regression capability into a single place"""

    def __init__(self, root_name, modules, tests=None, seed=None, hooks=[]):
        """
        Args:
            modules (list): A list of python module names to run

        Kwargs
        """
        self._queue = []
        self._root_name = root_name
        self._dut = None
        self._modules = modules
        self._functions = tests
        self._running_test = None
        self._cov = None
        self.log = SimLog("cocotb.regression")
        self._seed = seed
        self._hooks = hooks

    def initialise(self):
        try:
            self._initialise()
        except Exception as e:
            import traceback
            self.log.error(traceback.format_exc())
            raise
        
    def _initialise(self):

        self.start_time = time.time()
        self.test_results = []
        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0
        self.xunit = XUnitReporter()

        suite_name = os.getenv('RESULT_TESTSUITE') if os.getenv('RESULT_TESTSUITE') else "all"
        package_name = os.getenv('RESULT_TESTPACKAGE') if os.getenv('RESULT_TESTPACKAGE') else "all"
                
        self.xunit.add_testsuite(name=suite_name, tests=repr(self.ntests),
                                 package=package_name)
        
        if (self._seed is not None):
            self.xunit.add_property(name="random_seed", value=("%d"%self._seed))

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            self._cov = coverage.coverage(branch=True, omit=["*cocotb*"])
            self._cov.start()

        handle = simulator.get_root_handle(self._root_name)

        self._dut = cocotb.handle.SimHandle(handle) if handle else None

        if self._dut is None:
            raise AttributeError("Can not find Root Handle (%s)" %
                                 self._root_name)

        # Auto discovery
        for module_name in self._modules:
            try:
                self.log.debug("Python Path: " + ",".join(sys.path))
                self.log.debug("PWD: " + os.getcwd())
                module = _my_import(module_name)
            except Exception as E:
                self.log.critical("Failed to import module %s: %s", module_name, E)
                self.log.info("MODULE variable was \"%s\"", ".".join(self._modules))
                self.log.info("Traceback: ")
                self.log.info(traceback.format_exc())
                raise

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    if not hasattr(module, test):
                        raise AttributeError("Test %s doesn't exist in %s" %
                                             (test, module_name))

                    self._queue.append(getattr(module, test)(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except TestError:
                        skip = True
                        self.log.warning("Failed to initialise test %s" %
                                         thing.name)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name,
                                                classname=module_name,
                                                time="0.0",
                                                sim_time_ns="0.0",
                                                ratio_time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1
                        self._store_test_result(module_name, thing.name, None, 0.0, 0.0, 0.0)
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(key=lambda test: "%s.%s" %
                         (test.module, test.funcname))

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                          (valid_tests.module,
                           valid_tests.funcname))

        for module_name in self._hooks:
            self.log.info("Loading hook from module '"+module_name+"'")
            module = _my_import(module_name)

            for thing in vars(module).values():
                if hasattr(thing, "im_hook"):
                    try:
                        test = thing(self._dut)
                    except TestError:
                        self.log.warning("Failed to initialize hook %s" % thing.name)
                    else:
                        cocotb.scheduler.add(test)


    def tear_down(self):
        """It's the end of the world as we know it"""
        if self.failures:
            self.log.error("Failed %d out of %d tests (%d skipped)" %
                           (self.failures, self.count - 1, self.skipped))
        else:
            self.log.info("Passed %d tests (%d skipped)" %
                          (self.count - 1, self.skipped))
        if self._cov:
            self._cov.stop()
            self.log.info("Writing coverage data")
            self._cov.save()
            self._cov.html_report()
        self._log_test_summary()
        self._log_sim_summary()
        self.log.info("Shutting down...")
        self.xunit.write()
        simulator.stop_simulator()

    def next_test(self):
        """Get the next test to run"""
        if not self._queue:
            return None
        return self._queue.pop(0)

    def _add_failure(self, result):
        self.xunit.add_failure(stdout=repr(str(result)),
                               stderr="\n".join(self._running_test.error_messages),
                               message="Test failed with random_seed={}".format(self._seed))
        self.failures += 1

    def handle_result(self, result):
        """Handle a test result

        Dumps result to XML and schedules the next test (if any)

        Args: result (TestComplete exception)
        """
        real_time   = time.time() - self._running_test.start_time
        sim_time_ns = get_sim_time('ns') - self._running_test.start_sim_time
        ratio_time  = sim_time_ns / real_time
        self.xunit.add_testcase(name=self._running_test.funcname,
                                classname=self._running_test.module,
                                time=repr(real_time),
                                sim_time_ns=repr(sim_time_ns),
                                ratio_time=repr(ratio_time))

        running_test_funcname = self._running_test.funcname

        # Helper for logging result
        def _result_was():
            result_was = ("%s (result was %s)" %
                          (running_test_funcname, result.__class__.__name__))
            return result_was

        result_pass = True

        if (isinstance(result, TestSuccess) and
                not self._running_test.expect_fail and
                not self._running_test.expect_error):
            self.log.info("Test Passed: %s" % running_test_funcname)

        elif (isinstance(result, TestFailure) and
                self._running_test.expect_fail):
            self.log.info("Test failed as expected: " + _result_was())

        elif (isinstance(result, TestSuccess) and
              self._running_test.expect_error):
            self.log.error("Test passed but we expected an error: " +
                           _result_was())
            self._add_failure(result)
            result_pass = False

        elif isinstance(result, TestSuccess):
            self.log.error("Test passed but we expected a failure: " +
                           _result_was())
            self._add_failure(result)
            result_pass = False

        elif isinstance(result, TestError) and self._running_test.expect_error:
            self.log.info("Test errored as expected: " + _result_was())

        elif isinstance(result, SimFailure):
            if self._running_test.expect_error:
                self.log.info("Test errored as expected: " + _result_was())
            else:
                self.log.error("Test error has lead to simulator shuttting us "
                               "down")
                self._add_failure(result)
                self._store_test_result(self._running_test.module, self._running_test.funcname, False, sim_time_ns, real_time, ratio_time)
                self.tear_down()
                return

        else:
            self.log.error("Test Failed: " + _result_was())
            self._add_failure(result)
            result_pass = False

        self._store_test_result(self._running_test.module, self._running_test.funcname, result_pass, sim_time_ns, real_time, ratio_time)

        self.execute()

    def execute(self):
        self._running_test = cocotb.regression.next_test()
        if self._running_test:
            start = ''
            end   = ''
            if self.log.colour:
                start = ANSI.BLUE_BG + ANSI.BLACK_FG
                end   = ANSI.DEFAULT
            # Want this to stand out a little bit
            self.log.info("%sRunning test %d/%d:%s %s" %
                          (start,
                           self.count, self.ntests,
                           end,
                           self._running_test.funcname))
            if self.count is 1:
                test = cocotb.scheduler.add(self._running_test)
            else:
                test = cocotb.scheduler.new_test(self._running_test)
            self.count += 1
        else:
            self.tear_down()

    def _log_test_summary(self):
        TEST_FIELD   = 'TEST'
        RESULT_FIELD = 'PASS/FAIL'
        SIM_FIELD    = 'SIM TIME(NS)'
        REAL_FIELD   = 'REAL TIME(S)'
        RATIO_FIELD  = 'RATIO(NS/S)'

        TEST_FIELD_LEN   = max(len(TEST_FIELD),len(max([x['test'] for x in self.test_results],key=len)))
        RESULT_FIELD_LEN = len(RESULT_FIELD)
        SIM_FIELD_LEN    = len(SIM_FIELD)
        REAL_FIELD_LEN   = len(REAL_FIELD)
        RATIO_FIELD_LEN  = len(RATIO_FIELD)

        LINE_LEN = 3 + TEST_FIELD_LEN + 2 + RESULT_FIELD_LEN + 2 + SIM_FIELD_LEN + 2 + REAL_FIELD_LEN + 2 + RATIO_FIELD_LEN + 3

        LINE_SEP = "*"*LINE_LEN+"\n"

        summary = ""
        summary += LINE_SEP
        summary += "** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}}  {d:>{d_len}}  {e:>{e_len}} **\n".format(a=TEST_FIELD,   a_len=TEST_FIELD_LEN,
                                                                                                         b=RESULT_FIELD, b_len=RESULT_FIELD_LEN,
                                                                                                         c=SIM_FIELD,    c_len=SIM_FIELD_LEN,
                                                                                                         d=REAL_FIELD,   d_len=REAL_FIELD_LEN,
                                                                                                         e=RATIO_FIELD,  e_len=RATIO_FIELD_LEN)
        summary += LINE_SEP
        for result in self.test_results:
            hilite = ''

            if result['pass'] is None:
                pass_fail_str = "N/A"
            elif result['pass']:
                pass_fail_str = "PASS"
            else:
                pass_fail_str = "FAIL"
                if self.log.colour:
                    hilite = ANSI.WHITE_FG + ANSI.RED_BG

            summary += "{start}** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}.2f}   {d:>{d_len}.2f}   {e:>{e_len}.2f}  **\n".format(a=result['test'],   a_len=TEST_FIELD_LEN,
                                                                                                                                b=pass_fail_str,    b_len=RESULT_FIELD_LEN,
                                                                                                                                c=result['sim'],    c_len=SIM_FIELD_LEN-1,
                                                                                                                                d=result['real'],   d_len=REAL_FIELD_LEN-1,
                                                                                                                                e=result['ratio'],  e_len=RATIO_FIELD_LEN-1,
                                                                                                                                start=hilite)
        summary += LINE_SEP

        self.log.info(summary)

    def _log_sim_summary(self):
        real_time   = time.time() - self.start_time
        sim_time_ns = get_sim_time('ns')
        ratio_time  = sim_time_ns / real_time

        summary = ""

        summary += "*************************************************************************************\n"
        summary += "**                                 ERRORS : {0:<39}**\n".format(self.failures)
        summary += "*************************************************************************************\n"
        summary += "**                               SIM TIME : {0:<39}**\n".format('{0:.2f} NS'.format(sim_time_ns))
        summary += "**                              REAL TIME : {0:<39}**\n".format('{0:.2f} S'.format(real_time))
        summary += "**                        SIM / REAL TIME : {0:<39}**\n".format('{0:.2f} NS/S'.format(ratio_time))
        summary += "*************************************************************************************\n"

        self.log.info(summary)

    def _store_test_result(self, module_name, test_name, result_pass, sim_time, real_time, ratio):
        result = {
            'test'  : '.'.join([module_name, test_name]),
            'pass'  : result_pass,
            'sim'   : sim_time,
            'real'  : real_time,
            'ratio' : ratio}
        self.test_results.append(result)
Beispiel #4
0
    def _initialise(self):

        self.start_time = time.time()
        self.test_results = []
        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0
        self.xunit = XUnitReporter()

        suite_name = os.getenv('RESULT_TESTSUITE') if os.getenv('RESULT_TESTSUITE') else "all"
        package_name = os.getenv('RESULT_TESTPACKAGE') if os.getenv('RESULT_TESTPACKAGE') else "all"
                
        self.xunit.add_testsuite(name=suite_name, tests=repr(self.ntests),
                                 package=package_name)
        
        if (self._seed is not None):
            self.xunit.add_property(name="random_seed", value=("%d"%self._seed))

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            self._cov = coverage.coverage(branch=True, omit=["*cocotb*"])
            self._cov.start()

        handle = simulator.get_root_handle(self._root_name)

        self._dut = cocotb.handle.SimHandle(handle) if handle else None

        if self._dut is None:
            raise AttributeError("Can not find Root Handle (%s)" %
                                 self._root_name)

        # Auto discovery
        for module_name in self._modules:
            try:
                self.log.debug("Python Path: " + ",".join(sys.path))
                self.log.debug("PWD: " + os.getcwd())
                module = _my_import(module_name)
            except Exception as E:
                self.log.critical("Failed to import module %s: %s", module_name, E)
                self.log.info("MODULE variable was \"%s\"", ".".join(self._modules))
                self.log.info("Traceback: ")
                self.log.info(traceback.format_exc())
                raise

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    if not hasattr(module, test):
                        raise AttributeError("Test %s doesn't exist in %s" %
                                             (test, module_name))

                    self._queue.append(getattr(module, test)(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except TestError:
                        skip = True
                        self.log.warning("Failed to initialise test %s" %
                                         thing.name)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name,
                                                classname=module_name,
                                                time="0.0",
                                                sim_time_ns="0.0",
                                                ratio_time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1
                        self._store_test_result(module_name, thing.name, None, 0.0, 0.0, 0.0)
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(key=lambda test: "%s.%s" %
                         (test.module, test.funcname))

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                          (valid_tests.module,
                           valid_tests.funcname))

        for module_name in self._hooks:
            self.log.info("Loading hook from module '"+module_name+"'")
            module = _my_import(module_name)

            for thing in vars(module).values():
                if hasattr(thing, "im_hook"):
                    try:
                        test = thing(self._dut)
                    except TestError:
                        self.log.warning("Failed to initialize hook %s" % thing.name)
                    else:
                        cocotb.scheduler.add(test)
Beispiel #5
0
class RegressionManager(object):
    """Encapsulates all regression capability into a single place"""

    def __init__(self, dut, modules, tests=None):
        """
        Args:
            modules (list): A list of python module names to run

        Kwargs
        """
        self._queue = []
        self._dut = dut
        self._modules = modules
        self._functions = tests
        self._running_test = None
        self.log = SimLog("cocotb.regression")

    def initialise(self):

        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0
        self.xunit = XUnitReporter()
        self.xunit.add_testsuite(name="all", tests=repr(self.ntests), package="all")

        # Auto discovery
        for module_name in self._modules:
            module = _my_import(module_name)

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    if not hasattr(module, test):
                        raise AttributeError("Test %s doesn't exist in %s" %
                            (test, module_name))

                    self._queue.append(getattr(module, test)(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except TestError:
                        skip = True
                        self.log.warning("Failed to initialise test %s" % thing.name)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name, classname=module_name, time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1                        
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(key=lambda test: "%s.%s" % (test.module, test.funcname))

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                        (valid_tests.module,
                         valid_tests.funcname))

    def tear_down(self):
        """It's the end of the world as we know it"""
        if self.failures:
            self.log.error("Failed %d out of %d tests (%d skipped)" %
                (self.failures, self.count -1, self.skipped))
        else:
            self.log.info("Passed %d tests (%d skipped)"  %
                (self.count-1, self.skipped))
        self.log.info("Shutting down...")
        self.xunit.write()
        simulator.stop_simulator()


    def next_test(self):
        """Get the next test to run"""
        if not self._queue: return None
        return self._queue.pop(0)


    def handle_result(self, result):
        """Handle a test result

        Dumps result to XML and schedules the next test (if any)

        Args: result (TestComplete exception)
        """
        self.xunit.add_testcase(name =self._running_test.funcname,
                                classname=self._running_test.module,
                                time=repr(time.time() - self._running_test.start_time) )

        if isinstance(result, TestSuccess) and not self._running_test.expect_fail and not self._running_test.expect_error:
            self.log.info("Test Passed: %s" % self._running_test.funcname)

        elif isinstance(result, TestFailure) and self._running_test.expect_fail:
            self.log.info("Test failed as expected: %s (result was %s)" % (
                          self._running_test.funcname, result.__class__.__name__))

        elif isinstance(result, TestSuccess) and self._running_test.expect_error:
            self.log.error("Test passed but we expected an error: %s (result was %s)" % (
                           self._running_test.funcname, result.__class__.__name__))
            self.xunit.add_failure(stdout=repr(str(result)), stderr="\n".join(self._running_test.error_messages))
            self.failures += 1

        elif isinstance(result, TestSuccess):
            self.log.error("Test passed but we expected a failure: %s (result was %s)" % (
                           self._running_test.funcname, result.__class__.__name__))
            self.xunit.add_failure(stdout=repr(str(result)), stderr="\n".join(self._running_test.error_messages))
            self.failures += 1

        elif isinstance(result, TestError) and self._running_test.expect_error:
            self.log.info("Test errored as expected: %s (result was %s)" % (
                          self._running_test.funcname, result.__class__.__name__))

        elif isinstance(result, SimFailure):
            if self._running_test.expect_error:
                self.log.info("Test errored as expected: %s (result was %s)" % (
                              self._running_test.funcname, result.__class__.__name__))
            else:
                self.log.error("Test error has lead to simulator shuttting us down")
                self.failures += 1
                self.tear_down()
                return

        else:
            self.log.error("Test Failed: %s (result was %s)" % (
                        self._running_test.funcname, result.__class__.__name__))
            self.xunit.add_failure(stdout=repr(str(result)), stderr="\n".join(self._running_test.error_messages))
            self.failures += 1

        self.execute()

    def execute(self):
        self._running_test = cocotb.regression.next_test()
        if self._running_test:
            # Want this to stand out a little bit
            self.log.info("%sRunning test %d/%d:%s %s" % (
               ANSI.BLUE_BG +ANSI.BLACK_FG,
                    self.count, self.ntests,
               ANSI.DEFAULT_FG + ANSI.DEFAULT_BG,
                    self._running_test.funcname))
            if self.count is 1:
                test = cocotb.scheduler.add(self._running_test)
            else:
                test = cocotb.scheduler.new_test(self._running_test)
            self.count+=1
        else:
            self.tear_down()
Beispiel #6
0
class RegressionManager(object):
    """Encapsulates all regression capability into a single place"""
    def __init__(self, root_name, modules, tests=None, seed=None, hooks=[]):
        """
        Args:
            root_name (str): The name of the root handle.
            modules (list): A list of Python module names to run.
            tests (list, optional): A list of tests to run.
                Defaults to ``None``, meaning all discovered tests will be run.
            seed (int,  optional): The seed for the random number generator to use.
                Defaults to ``None``.
            hooks (list, optional): A list of hook modules to import.
                Defaults to the empty list.
        """
        self._queue = []
        self._root_name = root_name
        self._dut = None
        self._modules = modules
        self._functions = tests
        self._running_test = None
        self._cov = None
        self.log = SimLog("cocotb.regression")
        self._seed = seed
        self._hooks = hooks

    def initialise(self):

        self.start_time = time.time()
        self.test_results = []
        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0

        # Setup XUnit
        ###################

        results_filename = os.getenv('COCOTB_RESULTS_FILE', "results.xml")
        suite_name = os.getenv('RESULT_TESTSUITE', "all")
        package_name = os.getenv('RESULT_TESTPACKAGE', "all")

        self.xunit = XUnitReporter(filename=results_filename)

        self.xunit.add_testsuite(name=suite_name,
                                 tests=repr(self.ntests),
                                 package=package_name)

        if (self._seed is not None):
            self.xunit.add_property(name="random_seed",
                                    value=("%d" % self._seed))

        # Setup Coverage
        ####################

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            self._cov = coverage.coverage(branch=True, omit=["*cocotb*"])
            self._cov.start()

        # Setup DUT object
        #######################

        handle = simulator.get_root_handle(self._root_name)

        self._dut = cocotb.handle.SimHandle(handle) if handle else None

        if self._dut is None:
            raise AttributeError("Can not find Root Handle (%s)" %
                                 self._root_name)

        # Test Discovery
        ####################

        for module_name in self._modules:
            try:
                self.log.debug("Python Path: " + ",".join(sys.path))
                self.log.debug("PWD: " + os.getcwd())
                module = _my_import(module_name)
            except Exception as E:
                self.log.critical("Failed to import module %s: %s",
                                  module_name, E)
                self.log.info("MODULE variable was \"%s\"",
                              ".".join(self._modules))
                self.log.info("Traceback: ")
                self.log.info(traceback.format_exc())
                raise

            if self._functions:

                # Specific functions specified, don't auto-discover
                for test in self._functions.rsplit(','):
                    try:
                        _test = getattr(module, test)
                    except AttributeError:
                        self.log.error(
                            "Requested test %s wasn't found in module %s",
                            test, module_name)
                        err = AttributeError("Test %s doesn't exist in %s" %
                                             (test, module_name))
                        _py_compat.raise_from(err,
                                              None)  # discard nested traceback

                    if not hasattr(_test, "im_test"):
                        self.log.error(
                            "Requested %s from module %s isn't a cocotb.test decorated coroutine",
                            test, module_name)
                        raise ImportError("Failed to find requested test %s" %
                                          test)
                    self._init_test(_test)

                # only look in first module for all functions and don't complain if all functions are not found
                break

            # auto-discover
            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    self._init_test(thing)

        self._queue.sort(key=lambda test: (test.stage, test._id))

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                          (valid_tests.module, valid_tests.funcname))

        # Process Hooks
        ###################

        for module_name in self._hooks:
            self.log.info("Loading hook from module '" + module_name + "'")
            module = _my_import(module_name)

            for thing in vars(module).values():
                if hasattr(thing, "im_hook"):
                    try:
                        test = thing(self._dut)
                    except Exception:
                        self.log.warning("Failed to initialize hook %s" %
                                         thing.name,
                                         exc_info=True)
                    else:
                        cocotb.scheduler.add(test)

    def tear_down(self):
        # fail remaining tests
        while True:
            test = self.next_test()
            if test is None:
                break
            self.xunit.add_testcase(name=test.funcname,
                                    classname=test.module,
                                    time=repr(0),
                                    sim_time_ns=repr(0),
                                    ratio_time=repr(0))
            result_pass, _ = self._score_test(
                test, cocotb.outcomes.Error(SimFailure()))
            self._store_test_result(test.__module__, test.__name__,
                                    result_pass, 0, 0, 0)
            if not result_pass:
                self.xunit.add_failure()
                self.failures += 1

        # Write out final log messages
        if self.failures:
            self.log.error("Failed %d out of %d tests (%d skipped)" %
                           (self.failures, self.count - 1, self.skipped))
        else:
            self.log.info("Passed %d tests (%d skipped)" %
                          (self.count - 1, self.skipped))
        if len(self.test_results) > 0:
            self._log_test_summary()
        self._log_sim_summary()
        self.log.info("Shutting down...")

        # Generate output reports
        self.xunit.write()
        if self._cov:
            self._cov.stop()
            self.log.info("Writing coverage data")
            self._cov.save()
            self._cov.html_report()

        # Setup simulator finalization
        simulator.stop_simulator()

    def next_test(self):
        """Get the next test to run"""
        if not self._queue:
            return None
        return self._queue.pop(0)

    def handle_result(self, test):
        """Handle a test completing.

        Dump result to XML and schedule the next test (if any). Entered by the scheduler.

        Args:
            test: The test that completed
        """
        assert test is self._running_test

        real_time = time.time() - test.start_time
        sim_time_ns = get_sim_time('ns') - test.start_sim_time
        ratio_time = self._safe_divide(sim_time_ns, real_time)

        self.xunit.add_testcase(name=test.funcname,
                                classname=test.module,
                                time=repr(real_time),
                                sim_time_ns=repr(sim_time_ns),
                                ratio_time=repr(ratio_time))

        # score test
        result_pass, sim_failed = self._score_test(test, test._outcome)

        # stop capturing log output
        cocotb.log.removeHandler(test.handler)

        # Save results
        self._store_test_result(test.__module__, test.__name__, result_pass,
                                sim_time_ns, real_time, ratio_time)
        if not result_pass:
            self.xunit.add_failure()
            self.failures += 1

        # Fail if required
        if sim_failed:
            self.tear_down()
            return

        self.execute()

    def _init_test(self, test_func):
        """
        Initializes a test.

        Records outcome if the initialization fails.
        Records skip if the test is skipped.
        Saves the initialized test if it successfully initializes.
        """
        test_init_outcome = cocotb.outcomes.capture(test_func, self._dut)

        if isinstance(test_init_outcome, cocotb.outcomes.Error):
            self.log.error("Failed to initialize test %s" % test_func.name,
                           exc_info=True)
            self.xunit.add_testcase(name=test_func.name,
                                    classname=test_func.__module__,
                                    time="0.0",
                                    sim_time_ns="0.0",
                                    ratio_time="0.0")
            result_pass, sim_failed = self._score_test(test_func,
                                                       test_init_outcome)
            # Save results
            self._store_test_result(test_func.__module__, test_func.__name__,
                                    result_pass, 0.0, 0.0, 0.0)
            if not result_pass:
                self.xunit.add_failure()
                self.failures += 1
            # Fail if required
            if sim_failed:
                self.tear_down()
                raise SimFailure(
                    "Test initialization caused a simulator failure. Shutting down."
                )

        else:
            test = test_init_outcome.get()
            if test.skip:
                self.log.info("Skipping test %s" % test_func.name)
                self.xunit.add_testcase(name=test_func.name,
                                        classname=test.module,
                                        time="0.0",
                                        sim_time_ns="0.0",
                                        ratio_time="0.0")
                self.xunit.add_skipped()
                self.skipped += 1
                self._store_test_result(test.module, test_func.name, None, 0.0,
                                        0.0, 0.0)
            else:
                self._queue.append(test)
                self.ntests += 1

    def _score_test(self, test, outcome):
        """
        Given a test and the test's outcome, determine if the test met expectations and log pertinent information
        """

        # Helper for logging result
        def _result_was():
            result_was = ("{} (result was {})".format(
                test.__name__, result.__class__.__name__))
            return result_was

        # scoring outcomes
        result_pass = True
        sim_failed = False

        try:
            outcome.get()
        except Exception as e:
            result = remove_traceback_frames(e, ['_score_test', 'get'])
        else:
            result = TestSuccess()

        if (isinstance(result, TestSuccess) and not test.expect_fail
                and not test.expect_error):
            self.log.info("Test Passed: %s" % test.__name__)

        elif (isinstance(result, AssertionError) and test.expect_fail):
            self.log.info("Test failed as expected: " + _result_was())

        elif (isinstance(result, TestSuccess) and test.expect_error):
            self.log.error("Test passed but we expected an error: " +
                           _result_was())
            result_pass = False

        elif isinstance(result, TestSuccess):
            self.log.error("Test passed but we expected a failure: " +
                           _result_was())
            result_pass = False

        elif isinstance(result, SimFailure):
            if isinstance(result, test.expect_error):
                self.log.info("Test errored as expected: " + _result_was())
            else:
                self.log.error(
                    "Test error has lead to simulator shutting us "
                    "down",
                    exc_info=result)
                result_pass = False
            # whether we expected it or not, the simulation has failed unrecoverably
            sim_failed = True

        elif test.expect_error:
            if isinstance(result, test.expect_error):
                self.log.info("Test errored as expected: " + _result_was())
            else:
                self.log.error("Test errored with unexpected type: " +
                               _result_was(),
                               exc_info=result)
                result_pass = False

        else:
            self.log.error("Test Failed: " + _result_was(), exc_info=result)
            result_pass = False

            if _pdb_on_exception:
                pdb.post_mortem(result.__traceback__)

        return result_pass, sim_failed

    def execute(self):
        self._running_test = cocotb.regression_manager.next_test()
        if self._running_test:
            start = ''
            end = ''
            if want_color_output():
                start = ANSI.COLOR_TEST
                end = ANSI.COLOR_DEFAULT
            # Want this to stand out a little bit
            self.log.info("%sRunning test %d/%d:%s %s" %
                          (start, self.count, self.ntests, end,
                           self._running_test.funcname))

            # start capturing log output
            cocotb.log.addHandler(self._running_test.handler)

            cocotb.scheduler.add_test(self._running_test)
            self.count += 1
        else:
            self.tear_down()

    def _log_test_summary(self):
        TEST_FIELD = 'TEST'
        RESULT_FIELD = 'PASS/FAIL'
        SIM_FIELD = 'SIM TIME(NS)'
        REAL_FIELD = 'REAL TIME(S)'
        RATIO_FIELD = 'RATIO(NS/S)'

        TEST_FIELD_LEN = max(
            len(TEST_FIELD),
            len(max([x['test'] for x in self.test_results], key=len)))
        RESULT_FIELD_LEN = len(RESULT_FIELD)
        SIM_FIELD_LEN = len(SIM_FIELD)
        REAL_FIELD_LEN = len(REAL_FIELD)
        RATIO_FIELD_LEN = len(RATIO_FIELD)

        LINE_LEN = 3 + TEST_FIELD_LEN + 2 + RESULT_FIELD_LEN + 2 + SIM_FIELD_LEN + 2 + REAL_FIELD_LEN + 2 + RATIO_FIELD_LEN + 3

        LINE_SEP = "*" * LINE_LEN + "\n"

        summary = ""
        summary += LINE_SEP
        summary += "** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}}  {d:>{d_len}}  {e:>{e_len}} **\n".format(
            a=TEST_FIELD,
            a_len=TEST_FIELD_LEN,
            b=RESULT_FIELD,
            b_len=RESULT_FIELD_LEN,
            c=SIM_FIELD,
            c_len=SIM_FIELD_LEN,
            d=REAL_FIELD,
            d_len=REAL_FIELD_LEN,
            e=RATIO_FIELD,
            e_len=RATIO_FIELD_LEN)
        summary += LINE_SEP
        for result in self.test_results:
            hilite = ''

            if result['pass'] is None:
                pass_fail_str = "N/A"
            elif result['pass']:
                pass_fail_str = "PASS"
            else:
                pass_fail_str = "FAIL"
                if want_color_output():
                    hilite = ANSI.COLOR_HILITE_SUMMARY

            summary += "{start}** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}.2f}   {d:>{d_len}.2f}   {e:>{e_len}.2f}  **\n".format(
                a=result['test'],
                a_len=TEST_FIELD_LEN,
                b=pass_fail_str,
                b_len=RESULT_FIELD_LEN,
                c=result['sim'],
                c_len=SIM_FIELD_LEN - 1,
                d=result['real'],
                d_len=REAL_FIELD_LEN - 1,
                e=result['ratio'],
                e_len=RATIO_FIELD_LEN - 1,
                start=hilite)
        summary += LINE_SEP

        self.log.info(summary)

    def _log_sim_summary(self):
        real_time = time.time() - self.start_time
        sim_time_ns = get_sim_time('ns')
        ratio_time = self._safe_divide(sim_time_ns, real_time)

        summary = ""

        summary += "*************************************************************************************\n"
        summary += "**                                 ERRORS : {0:<39}**\n".format(
            self.failures)
        summary += "*************************************************************************************\n"
        summary += "**                               SIM TIME : {0:<39}**\n".format(
            '{0:.2f} NS'.format(sim_time_ns))
        summary += "**                              REAL TIME : {0:<39}**\n".format(
            '{0:.2f} S'.format(real_time))
        summary += "**                        SIM / REAL TIME : {0:<39}**\n".format(
            '{0:.2f} NS/S'.format(ratio_time))
        summary += "*************************************************************************************\n"

        self.log.info(summary)

    @staticmethod
    def _safe_divide(a, b):
        try:
            return a / b
        except ZeroDivisionError:
            if a == 0:
                return float('nan')
            else:
                return float('inf')

    def _store_test_result(self, module_name, test_name, result_pass, sim_time,
                           real_time, ratio):
        result = {
            'test': '.'.join([module_name, test_name]),
            'pass': result_pass,
            'sim': sim_time,
            'real': real_time,
            'ratio': ratio
        }
        self.test_results.append(result)
Beispiel #7
0
    def initialise(self):

        self.start_time = time.time()
        self.test_results = []
        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0

        # Setup XUnit
        ###################

        results_filename = os.getenv('COCOTB_RESULTS_FILE', "results.xml")
        suite_name = os.getenv('RESULT_TESTSUITE', "all")
        package_name = os.getenv('RESULT_TESTPACKAGE', "all")

        self.xunit = XUnitReporter(filename=results_filename)

        self.xunit.add_testsuite(name=suite_name,
                                 tests=repr(self.ntests),
                                 package=package_name)

        if (self._seed is not None):
            self.xunit.add_property(name="random_seed",
                                    value=("%d" % self._seed))

        # Setup Coverage
        ####################

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            self._cov = coverage.coverage(branch=True, omit=["*cocotb*"])
            self._cov.start()

        # Setup DUT object
        #######################

        handle = simulator.get_root_handle(self._root_name)

        self._dut = cocotb.handle.SimHandle(handle) if handle else None

        if self._dut is None:
            raise AttributeError("Can not find Root Handle (%s)" %
                                 self._root_name)

        # Test Discovery
        ####################

        for module_name in self._modules:
            try:
                self.log.debug("Python Path: " + ",".join(sys.path))
                self.log.debug("PWD: " + os.getcwd())
                module = _my_import(module_name)
            except Exception as E:
                self.log.critical("Failed to import module %s: %s",
                                  module_name, E)
                self.log.info("MODULE variable was \"%s\"",
                              ".".join(self._modules))
                self.log.info("Traceback: ")
                self.log.info(traceback.format_exc())
                raise

            if self._functions:

                # Specific functions specified, don't auto-discover
                for test in self._functions.rsplit(','):
                    try:
                        _test = getattr(module, test)
                    except AttributeError:
                        self.log.error(
                            "Requested test %s wasn't found in module %s",
                            test, module_name)
                        err = AttributeError("Test %s doesn't exist in %s" %
                                             (test, module_name))
                        _py_compat.raise_from(err,
                                              None)  # discard nested traceback

                    if not hasattr(_test, "im_test"):
                        self.log.error(
                            "Requested %s from module %s isn't a cocotb.test decorated coroutine",
                            test, module_name)
                        raise ImportError("Failed to find requested test %s" %
                                          test)
                    self._init_test(_test)

                # only look in first module for all functions and don't complain if all functions are not found
                break

            # auto-discover
            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    self._init_test(thing)

        self._queue.sort(key=lambda test: (test.stage, test._id))

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                          (valid_tests.module, valid_tests.funcname))

        # Process Hooks
        ###################

        for module_name in self._hooks:
            self.log.info("Loading hook from module '" + module_name + "'")
            module = _my_import(module_name)

            for thing in vars(module).values():
                if hasattr(thing, "im_hook"):
                    try:
                        test = thing(self._dut)
                    except Exception:
                        self.log.warning("Failed to initialize hook %s" %
                                         thing.name,
                                         exc_info=True)
                    else:
                        cocotb.scheduler.add(test)
Beispiel #8
0
    def initialise(self):

        self.start_time = time.time()
        self.test_results = []
        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0
        self.xunit = XUnitReporter()

        suite_name = os.getenv('RESULT_TESTSUITE') if os.getenv(
            'RESULT_TESTSUITE') else "all"
        package_name = os.getenv('RESULT_TESTPACKAGE') if os.getenv(
            'RESULT_TESTPACKAGE') else "all"

        self.xunit.add_testsuite(name=suite_name,
                                 tests=repr(self.ntests),
                                 package=package_name)

        if (self._seed is not None):
            self.xunit.add_property(name="random_seed",
                                    value=("%d" % self._seed))

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            self._cov = coverage.coverage(branch=True, omit=["*cocotb*"])
            self._cov.start()

        handle = simulator.get_root_handle(self._root_name)

        self._dut = cocotb.handle.SimHandle(handle) if handle else None

        if self._dut is None:
            raise AttributeError("Can not find Root Handle (%s)" %
                                 self._root_name)

        # Auto discovery
        for module_name in self._modules:
            try:
                self.log.debug("Python Path: " + ",".join(sys.path))
                self.log.debug("PWD: " + os.getcwd())
                module = _my_import(module_name)
            except Exception as E:
                self.log.critical("Failed to import module %s: %s",
                                  module_name, E)
                self.log.info("MODULE variable was \"%s\"",
                              ".".join(self._modules))
                self.log.info("Traceback: ")
                self.log.info(traceback.format_exc())
                raise

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    try:
                        _test = getattr(module, test)
                    except AttributeError:
                        self.log.error(
                            "Requested test %s wasn't found in module %s",
                            test, module_name)
                        err = AttributeError("Test %s doesn't exist in %s" %
                                             (test, module_name))
                        raise_from(err, None)  # discard nested traceback

                    if not hasattr(_test, "im_test"):
                        self.log.error(
                            "Requested %s from module %s isn't a cocotb.test decorated coroutine",
                            test, module_name)
                        raise ImportError("Failed to find requested test %s" %
                                          test)
                    self._queue.append(_test(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except TestError:
                        skip = True
                        self.log.warning("Failed to initialize test %s" %
                                         thing.name)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name,
                                                classname=module_name,
                                                time="0.0",
                                                sim_time_ns="0.0",
                                                ratio_time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1
                        self._store_test_result(module_name, thing.name, None,
                                                0.0, 0.0, 0.0)
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(key=lambda test: test.sort_name())

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                          (valid_tests.module, valid_tests.funcname))

        for module_name in self._hooks:
            self.log.info("Loading hook from module '" + module_name + "'")
            module = _my_import(module_name)

            for thing in vars(module).values():
                if hasattr(thing, "im_hook"):
                    try:
                        test = thing(self._dut)
                    except TestError:
                        self.log.warning("Failed to initialize hook %s" %
                                         thing.name)
                    else:
                        cocotb.scheduler.add(test)
Beispiel #9
0
class RegressionManager(object):
    """Encapsulates all regression capability into a single place"""
    def __init__(self, root_name, modules, tests=None, seed=None, hooks=[]):
        """
        Args:
            root_name (str): The name of the root handle.
            modules (list): A list of Python module names to run.
            tests (list, optional): A list of tests to run.
                Defaults to ``None``, meaning all discovered tests will be run.
            seed (int,  optional): The seed for the random number generator to use.
                Defaults to ``None``.
            hooks (list, optional): A list of hook modules to import.
                Defaults to the empty list.
        """
        self._queue = []
        self._root_name = root_name
        self._dut = None
        self._modules = modules
        self._functions = tests
        self._running_test = None
        self._cov = None
        self.log = SimLog("cocotb.regression")
        self._seed = seed
        self._hooks = hooks

    def initialise(self):

        self.start_time = time.time()
        self.test_results = []
        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0
        self.xunit = XUnitReporter()

        suite_name = os.getenv('RESULT_TESTSUITE') if os.getenv(
            'RESULT_TESTSUITE') else "all"
        package_name = os.getenv('RESULT_TESTPACKAGE') if os.getenv(
            'RESULT_TESTPACKAGE') else "all"

        self.xunit.add_testsuite(name=suite_name,
                                 tests=repr(self.ntests),
                                 package=package_name)

        if (self._seed is not None):
            self.xunit.add_property(name="random_seed",
                                    value=("%d" % self._seed))

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            self._cov = coverage.coverage(branch=True, omit=["*cocotb*"])
            self._cov.start()

        handle = simulator.get_root_handle(self._root_name)

        self._dut = cocotb.handle.SimHandle(handle) if handle else None

        if self._dut is None:
            raise AttributeError("Can not find Root Handle (%s)" %
                                 self._root_name)

        # Auto discovery
        for module_name in self._modules:
            try:
                self.log.debug("Python Path: " + ",".join(sys.path))
                self.log.debug("PWD: " + os.getcwd())
                module = _my_import(module_name)
            except Exception as E:
                self.log.critical("Failed to import module %s: %s",
                                  module_name, E)
                self.log.info("MODULE variable was \"%s\"",
                              ".".join(self._modules))
                self.log.info("Traceback: ")
                self.log.info(traceback.format_exc())
                raise

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    try:
                        _test = getattr(module, test)
                    except AttributeError:
                        self.log.error(
                            "Requested test %s wasn't found in module %s",
                            test, module_name)
                        err = AttributeError("Test %s doesn't exist in %s" %
                                             (test, module_name))
                        raise_from(err, None)  # discard nested traceback

                    if not hasattr(_test, "im_test"):
                        self.log.error(
                            "Requested %s from module %s isn't a cocotb.test decorated coroutine",
                            test, module_name)
                        raise ImportError("Failed to find requested test %s" %
                                          test)
                    self._queue.append(_test(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except TestError:
                        skip = True
                        self.log.warning("Failed to initialize test %s" %
                                         thing.name)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name,
                                                classname=module_name,
                                                time="0.0",
                                                sim_time_ns="0.0",
                                                ratio_time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1
                        self._store_test_result(module_name, thing.name, None,
                                                0.0, 0.0, 0.0)
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(key=lambda test: test.sort_name())

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                          (valid_tests.module, valid_tests.funcname))

        for module_name in self._hooks:
            self.log.info("Loading hook from module '" + module_name + "'")
            module = _my_import(module_name)

            for thing in vars(module).values():
                if hasattr(thing, "im_hook"):
                    try:
                        test = thing(self._dut)
                    except TestError:
                        self.log.warning("Failed to initialize hook %s" %
                                         thing.name)
                    else:
                        cocotb.scheduler.add(test)

    def tear_down(self):
        """It's the end of the world as we know it"""
        if self.failures:
            self.log.error("Failed %d out of %d tests (%d skipped)" %
                           (self.failures, self.count - 1, self.skipped))
        else:
            self.log.info("Passed %d tests (%d skipped)" %
                          (self.count - 1, self.skipped))
        if self._cov:
            self._cov.stop()
            self.log.info("Writing coverage data")
            self._cov.save()
            self._cov.html_report()
        if len(self.test_results) > 0:
            self._log_test_summary()
        self._log_sim_summary()
        self.log.info("Shutting down...")
        self.xunit.write()
        simulator.stop_simulator()

    def next_test(self):
        """Get the next test to run"""
        if not self._queue:
            return None
        return self._queue.pop(0)

    def _add_failure(self, result):
        self.xunit.add_failure(
            stdout=repr(str(result)),
            stderr="\n".join(self._running_test.error_messages),
            message="Test failed with random_seed={}".format(self._seed))
        self.failures += 1

    def handle_result(self, result):
        """Handle a test result.

        Dump result to XML and schedule the next test (if any).

        Args:
            result: The sub-exception of TestComplete to raise.
        """
        real_time = time.time() - self._running_test.start_time
        sim_time_ns = get_sim_time('ns') - self._running_test.start_sim_time
        try:
            ratio_time = sim_time_ns / real_time
        except ZeroDivisionError:
            if round(sim_time_ns, 2) == 0:
                ratio_time = float('nan')
            else:
                ratio_time = float('inf')
        self.xunit.add_testcase(name=self._running_test.funcname,
                                classname=self._running_test.module,
                                time=repr(real_time),
                                sim_time_ns=repr(sim_time_ns),
                                ratio_time=repr(ratio_time))

        running_test_funcname = self._running_test.funcname

        # Helper for logging result
        def _result_was():
            result_was = ("%s (result was %s)" %
                          (running_test_funcname, result.__class__.__name__))
            return result_was

        result_pass = True

        if (isinstance(result, TestSuccess)
                and not self._running_test.expect_fail
                and not self._running_test.expect_error):
            self.log.info("Test Passed: %s" % running_test_funcname)

        elif (isinstance(result, TestFailure)
              and self._running_test.expect_fail):
            self.log.info("Test failed as expected: " + _result_was())

        elif (isinstance(result, TestSuccess)
              and self._running_test.expect_error):
            self.log.error("Test passed but we expected an error: " +
                           _result_was())
            self._add_failure(result)
            result_pass = False

        elif isinstance(result, TestSuccess):
            self.log.error("Test passed but we expected a failure: " +
                           _result_was())
            self._add_failure(result)
            result_pass = False

        elif isinstance(result, TestError) and self._running_test.expect_error:
            self.log.info("Test errored as expected: " + _result_was())

        elif isinstance(result, SimFailure):
            if self._running_test.expect_error:
                self.log.info("Test errored as expected: " + _result_was())
            else:
                self.log.error("Test error has lead to simulator shutting us "
                               "down")
                self._add_failure(result)
                self._store_test_result(self._running_test.module,
                                        self._running_test.funcname, False,
                                        sim_time_ns, real_time, ratio_time)
                self.tear_down()
                return

        else:
            self.log.error("Test Failed: " + _result_was())
            self._add_failure(result)
            result_pass = False

        self._store_test_result(self._running_test.module,
                                self._running_test.funcname, result_pass,
                                sim_time_ns, real_time, ratio_time)

        self.execute()

    def execute(self):
        self._running_test = cocotb.regression_manager.next_test()
        if self._running_test:
            start = ''
            end = ''
            if self.log.colour:
                start = ANSI.COLOR_TEST
                end = ANSI.COLOR_DEFAULT
            # Want this to stand out a little bit
            self.log.info("%sRunning test %d/%d:%s %s" %
                          (start, self.count, self.ntests, end,
                           self._running_test.funcname))
            if self.count is 1:
                test = cocotb.scheduler.add(self._running_test)
            else:
                test = cocotb.scheduler.new_test(self._running_test)
            self.count += 1
        else:
            self.tear_down()

    def _log_test_summary(self):
        TEST_FIELD = 'TEST'
        RESULT_FIELD = 'PASS/FAIL'
        SIM_FIELD = 'SIM TIME(NS)'
        REAL_FIELD = 'REAL TIME(S)'
        RATIO_FIELD = 'RATIO(NS/S)'

        TEST_FIELD_LEN = max(
            len(TEST_FIELD),
            len(max([x['test'] for x in self.test_results], key=len)))
        RESULT_FIELD_LEN = len(RESULT_FIELD)
        SIM_FIELD_LEN = len(SIM_FIELD)
        REAL_FIELD_LEN = len(REAL_FIELD)
        RATIO_FIELD_LEN = len(RATIO_FIELD)

        LINE_LEN = 3 + TEST_FIELD_LEN + 2 + RESULT_FIELD_LEN + 2 + SIM_FIELD_LEN + 2 + REAL_FIELD_LEN + 2 + RATIO_FIELD_LEN + 3

        LINE_SEP = "*" * LINE_LEN + "\n"

        summary = ""
        summary += LINE_SEP
        summary += "** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}}  {d:>{d_len}}  {e:>{e_len}} **\n".format(
            a=TEST_FIELD,
            a_len=TEST_FIELD_LEN,
            b=RESULT_FIELD,
            b_len=RESULT_FIELD_LEN,
            c=SIM_FIELD,
            c_len=SIM_FIELD_LEN,
            d=REAL_FIELD,
            d_len=REAL_FIELD_LEN,
            e=RATIO_FIELD,
            e_len=RATIO_FIELD_LEN)
        summary += LINE_SEP
        for result in self.test_results:
            hilite = ''

            if result['pass'] is None:
                pass_fail_str = "N/A"
            elif result['pass']:
                pass_fail_str = "PASS"
            else:
                pass_fail_str = "FAIL"
                if self.log.colour:
                    hilite = ANSI.COLOR_HILITE_SUMMARY

            summary += "{start}** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}.2f}   {d:>{d_len}.2f}   {e:>{e_len}.2f}  **\n".format(
                a=result['test'],
                a_len=TEST_FIELD_LEN,
                b=pass_fail_str,
                b_len=RESULT_FIELD_LEN,
                c=result['sim'],
                c_len=SIM_FIELD_LEN - 1,
                d=result['real'],
                d_len=REAL_FIELD_LEN - 1,
                e=result['ratio'],
                e_len=RATIO_FIELD_LEN - 1,
                start=hilite)
        summary += LINE_SEP

        self.log.info(summary)

    def _log_sim_summary(self):
        real_time = time.time() - self.start_time
        sim_time_ns = get_sim_time('ns')
        ratio_time = sim_time_ns / real_time

        summary = ""

        summary += "*************************************************************************************\n"
        summary += "**                                 ERRORS : {0:<39}**\n".format(
            self.failures)
        summary += "*************************************************************************************\n"
        summary += "**                               SIM TIME : {0:<39}**\n".format(
            '{0:.2f} NS'.format(sim_time_ns))
        summary += "**                              REAL TIME : {0:<39}**\n".format(
            '{0:.2f} S'.format(real_time))
        summary += "**                        SIM / REAL TIME : {0:<39}**\n".format(
            '{0:.2f} NS/S'.format(ratio_time))
        summary += "*************************************************************************************\n"

        self.log.info(summary)

    def _store_test_result(self, module_name, test_name, result_pass, sim_time,
                           real_time, ratio):
        result = {
            'test': '.'.join([module_name, test_name]),
            'pass': result_pass,
            'sim': sim_time,
            'real': real_time,
            'ratio': ratio
        }
        self.test_results.append(result)
Beispiel #10
0
class RegressionManager:
    """Encapsulates all regression capability into a single place"""

    def __init__(self, dut: SimHandle, tests: Iterable[Test], hooks: Iterable[Hook]):
        """
        Args:
            dut (SimHandle): The root handle to pass into test functions.
            tests (Iterable[Test]): tests to run
            hooks (Iterable[Hook]): hooks to tun
        """
        self._dut = dut
        self._test_task = None
        self._cov = None
        self.log = _logger
        self.start_time = time.time()
        self.test_results = []
        self.count = 0
        self.skipped = 0
        self.failures = 0
        self._tearing_down = False

        # Setup XUnit
        ###################

        results_filename = os.getenv('COCOTB_RESULTS_FILE', "results.xml")
        suite_name = os.getenv('RESULT_TESTSUITE', "all")
        package_name = os.getenv('RESULT_TESTPACKAGE', "all")

        self.xunit = XUnitReporter(filename=results_filename)

        self.xunit.add_testsuite(name=suite_name, package=package_name)

        self.xunit.add_property(name="random_seed", value=str(cocotb.RANDOM_SEED))

        # Setup Coverage
        ####################

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            # Exclude cocotb itself from coverage collection.
            cocotb_package_dir = os.path.dirname(__file__)
            self._cov = coverage.coverage(branch=True, omit=["{}/*".format(cocotb_package_dir)])
            self._cov.start()

        # Test Discovery
        ####################
        self._queue = []
        for test in tests:
            self.log.info("Found test {}.{}".format(test.__module__, test.__qualname__))
            self._queue.append(test)
        self.ntests = len(self._queue)

        if not self._queue:
            self.log.warning("No tests were discovered")

        self._queue.sort(key=lambda test: (test.stage, test._id))

        # Process Hooks
        ###################
        for hook in hooks:
            self.log.info("Found hook {}.{}".format(hook.__module__, hook.__qualname__))
            self._init_hook(hook)

    @classmethod
    def from_discovery(cls, dut: SimHandle):
        """
        Obtains the test and hook lists by discovery.

        See :envvar:`MODULE` and :envvar:`TESTCASE` for details on how tests are discovered.

        Args:
            dut (SimHandle): The root handle to pass into test functions.
        """
        tests = cls._discover_tests()
        hooks = cls._discover_hooks()
        return cls(dut, tests, hooks)

    @staticmethod
    def _discover_tests() -> Iterable[Test]:
        """
        Discovers tests in files automatically.

        See :envvar:`MODULE` and :envvar:`TESTCASE` for details on how tests are discovered.
        """
        module_str = os.getenv('MODULE')
        test_str = os.getenv('TESTCASE')

        if module_str is None:
            raise ValueError("Environment variable MODULE, which defines the module(s) to execute, is not defined.")

        modules = [s.strip() for s in module_str.split(',') if s.strip()]

        for module_name in modules:
            try:
                _logger.debug("Python Path: " + ",".join(sys.path))
                _logger.debug("PWD: " + os.getcwd())
                module = _my_import(module_name)
            except Exception as E:
                _logger.critical("Failed to import module %s: %s", module_name, E)
                _logger.info("MODULE variable was \"%s\"", ".".join(modules))
                _logger.info("Traceback: ")
                _logger.info(traceback.format_exc())
                raise

            if test_str:

                # Specific functions specified, don't auto-discover
                for test_name in test_str.rsplit(','):
                    try:
                        test = getattr(module, test_name)
                    except AttributeError:
                        _logger.error("Requested test %s wasn't found in module %s", test_name, module_name)
                        err = AttributeError("Test %s doesn't exist in %s" % (test_name, module_name))
                        raise err from None  # discard nested traceback

                    if not isinstance(test, Test):
                        _logger.error("Requested %s from module %s isn't a cocotb.test decorated coroutine",
                                      test_name, module_name)
                        raise ImportError("Failed to find requested test %s" % test_name)

                    # If we request a test manually, it should be run even if skip=True is set.
                    test.skip = False

                    yield test

                # only look in first module for all functions and don't complain if all functions are not found
                break

            # auto-discover
            for thing in vars(module).values():
                if isinstance(thing, Test):
                    yield thing

    @staticmethod
    def _discover_hooks() -> Iterable[Hook]:
        """
        Discovers hooks automatically.

        See :envvar:`COCOTB_HOOKS` for details on how hooks are discovered.
        """
        hooks_str = os.getenv('COCOTB_HOOKS', '')
        hooks = [s.strip() for s in hooks_str.split(',') if s.strip()]

        for module_name in hooks:
            _logger.info("Loading hook from module '" + module_name + "'")
            module = _my_import(module_name)

            for thing in vars(module).values():
                if hasattr(thing, "im_hook"):
                    yield thing

    def _init_hook(self, hook: Hook) -> Optional[RunningTask]:
        try:
            test = hook(self._dut)
        except Exception:
            self.log.warning("Failed to initialize hook %s" % hook.name, exc_info=True)
        else:
            return cocotb.scheduler.add(test)

    def tear_down(self) -> None:
        # prevent re-entering the tear down procedure
        if not self._tearing_down:
            self._tearing_down = True
        else:
            return

        # fail remaining tests
        while True:
            test = self.next_test()
            if test is None:
                break
            self._record_result(
                test=test,
                outcome=Error(SimFailure),
                wall_time_s=0,
                sim_time_ns=0)

        # Write out final log messages
        self._log_test_summary()
        self._log_sim_summary()
        self.log.info("Shutting down...")

        # Generate output reports
        self.xunit.write()
        if self._cov:
            self._cov.stop()
            self.log.info("Writing coverage data")
            self._cov.save()
            self._cov.html_report()
        if cocotb._library_coverage is not None:
            # TODO: move this once we have normal shutdown behavior to _sim_event
            cocotb._library_coverage.stop()
            cocotb._library_coverage.save()

        # Setup simulator finalization
        simulator.stop_simulator()

    def next_test(self) -> Optional[Test]:
        """Get the next test to run"""
        if not self._queue:
            return None
        self.count += 1
        return self._queue.pop(0)

    def handle_result(self, test: RunningTask) -> None:
        """Handle a test completing.

        Dump result to XML and schedule the next test (if any). Entered by the scheduler.

        Args:
            test: The test that completed
        """
        assert test is self._test_task

        real_time = time.time() - self._test_start_time
        sim_time_ns = get_sim_time('ns') - self._test_start_sim_time

        # stop capturing log output
        cocotb.log.removeHandler(test.handler)

        self._record_result(
            test=self._test,
            outcome=self._test_task._outcome,
            wall_time_s=real_time,
            sim_time_ns=sim_time_ns)

        self.execute()

    def _init_test(self, test: Test) -> Optional[RunningTask]:
        """
        Initializes a test.

        Records outcome if the initialization fails.
        Records skip if the test is skipped.
        Saves the initialized test if it successfully initializes.
        """

        if test.skip:
            hilight_start = ANSI.COLOR_TEST if want_color_output() else ''
            hilight_end = ANSI.COLOR_DEFAULT if want_color_output() else ''
            # Want this to stand out a little bit
            self.log.info("{}Skipping test {}/{}:{} {}".format(
                hilight_start,
                self.count,
                self.ntests,
                hilight_end,
                test.__qualname__))
            self._record_result(test, None, 0, 0)
            return

        test_init_outcome = cocotb.outcomes.capture(test, self._dut)

        if isinstance(test_init_outcome, cocotb.outcomes.Error):
            self.log.error("Failed to initialize test %s" % test.__qualname__,
                           exc_info=test_init_outcome.error)
            self._record_result(test, test_init_outcome, 0, 0)
            return

        test = test_init_outcome.get()
        return test

    def _score_test(self, test: Test, outcome: Outcome) -> Tuple[bool, bool]:
        """
        Given a test and the test's outcome, determine if the test met expectations and log pertinent information
        """

        # Helper for logging result
        def _result_was():
            result_was = ("{} (result was {})".format
                          (test.__qualname__, type(result).__qualname__))
            return result_was

        # scoring outcomes
        result_pass = True
        sim_failed = False

        try:
            outcome.get()
        except Exception as e:
            result = remove_traceback_frames(e, ['_score_test', 'get'])
        else:
            result = TestSuccess()

        if (isinstance(result, TestSuccess) and
                not test.expect_fail and
                not test.expect_error):
            self.log.info("Test Passed: %s" % test.__qualname__)

        elif (isinstance(result, AssertionError) and
                test.expect_fail):
            self.log.info("Test failed as expected: " + _result_was())

        elif (isinstance(result, TestSuccess) and
              test.expect_error):
            self.log.error("Test passed but we expected an error: " +
                           _result_was())
            result_pass = False

        elif isinstance(result, TestSuccess):
            self.log.error("Test passed but we expected a failure: " +
                           _result_was())
            result_pass = False

        elif isinstance(result, SimFailure):
            if isinstance(result, test.expect_error):
                self.log.info("Test errored as expected: " + _result_was())
            else:
                self.log.error("Test error has lead to simulator shutting us "
                               "down", exc_info=result)
                result_pass = False
            # whether we expected it or not, the simulation has failed unrecoverably
            sim_failed = True

        elif test.expect_error:
            if isinstance(result, test.expect_error):
                self.log.info("Test errored as expected: " + _result_was())
            else:
                self.log.error("Test errored with unexpected type: " + _result_was(), exc_info=result)
                result_pass = False

        else:
            self.log.error("Test Failed: " + _result_was(), exc_info=result)
            result_pass = False

            if _pdb_on_exception:
                pdb.post_mortem(result.__traceback__)

        return result_pass, sim_failed

    def _record_result(
        self,
        test: Test,
        outcome: Optional[Outcome],
        wall_time_s: float,
        sim_time_ns: float
    ) -> None:

        ratio_time = self._safe_divide(sim_time_ns, wall_time_s)

        self.xunit.add_testcase(name=test.__qualname__,
                                classname=test.__module__,
                                time=repr(wall_time_s),
                                sim_time_ns=repr(sim_time_ns),
                                ratio_time=repr(ratio_time))

        if outcome is None:  # skipped
            test_pass, sim_failed = None, False
            self.xunit.add_skipped()
            self.skipped += 1

        else:
            test_pass, sim_failed = self._score_test(test, outcome)
            if not test_pass:
                self.xunit.add_failure()
                self.failures += 1

        self.test_results.append({
            'test': '.'.join([test.__module__, test.__qualname__]),
            'pass': test_pass,
            'sim': sim_time_ns,
            'real': wall_time_s,
            'ratio': ratio_time})

        if sim_failed:
            self.tear_down()
            return

    def execute(self) -> None:
        while True:
            self._test = self.next_test()
            if self._test is None:
                return self.tear_down()

            self._test_task = self._init_test(self._test)
            if self._test_task:
                return self._start_test()

    def _start_test(self) -> None:
        start = ''
        end = ''
        if want_color_output():
            start = ANSI.COLOR_TEST
            end = ANSI.COLOR_DEFAULT
        # Want this to stand out a little bit
        self.log.info("%sRunning test %d/%d:%s %s" %
                      (start,
                       self.count, self.ntests,
                       end,
                       self._test.__qualname__))

        # start capturing log output
        cocotb.log.addHandler(self._test_task.handler)

        self._test_start_time = time.time()
        self._test_start_sim_time = get_sim_time('ns')
        cocotb.scheduler.add_test(self._test_task)

    def _log_test_summary(self) -> None:

        if self.failures:
            self.log.error("Failed %d out of %d tests (%d skipped)" %
                           (self.failures, self.count, self.skipped))
        else:
            self.log.info("Passed %d tests (%d skipped)" %
                          (self.count, self.skipped))

        if len(self.test_results) == 0:
            return

        TEST_FIELD = 'TEST'
        RESULT_FIELD = 'PASS/FAIL'
        SIM_FIELD = 'SIM TIME(NS)'
        REAL_FIELD = 'REAL TIME(S)'
        RATIO_FIELD = 'RATIO(NS/S)'

        TEST_FIELD_LEN = max(len(TEST_FIELD), len(max([x['test'] for x in self.test_results], key=len)))
        RESULT_FIELD_LEN = len(RESULT_FIELD)
        SIM_FIELD_LEN = len(SIM_FIELD)
        REAL_FIELD_LEN = len(REAL_FIELD)
        RATIO_FIELD_LEN = len(RATIO_FIELD)

        header_dict = dict(
            a=TEST_FIELD,
            b=RESULT_FIELD,
            c=SIM_FIELD,
            d=REAL_FIELD,
            e=RATIO_FIELD,
            a_len=TEST_FIELD_LEN,
            b_len=RESULT_FIELD_LEN,
            c_len=SIM_FIELD_LEN,
            d_len=REAL_FIELD_LEN,
            e_len=RATIO_FIELD_LEN)

        LINE_LEN = 3 + TEST_FIELD_LEN + 2 + RESULT_FIELD_LEN + 2 + SIM_FIELD_LEN + 2 + \
            REAL_FIELD_LEN + 2 + RATIO_FIELD_LEN + 3

        LINE_SEP = "*" * LINE_LEN + "\n"

        summary = ""
        summary += LINE_SEP
        summary += "** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}}  {d:>{d_len}}  {e:>{e_len}} **\n".format(**header_dict)
        summary += LINE_SEP

        test_line = "{start}** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}.2f}   {d:>{d_len}.2f}   {e:>{e_len}.2f}  **\n"
        for result in self.test_results:
            hilite = ''

            if result['pass'] is None:
                pass_fail_str = "N/A"
            elif result['pass']:
                pass_fail_str = "PASS"
            else:
                pass_fail_str = "FAIL"
                if want_color_output():
                    hilite = ANSI.COLOR_HILITE_SUMMARY

            test_dict = dict(
                a=result['test'],
                b=pass_fail_str,
                c=result['sim'],
                d=result['real'],
                e=result['ratio'],
                a_len=TEST_FIELD_LEN,
                b_len=RESULT_FIELD_LEN,
                c_len=SIM_FIELD_LEN - 1,
                d_len=REAL_FIELD_LEN - 1,
                e_len=RATIO_FIELD_LEN - 1,
                start=hilite)

            summary += test_line.format(**test_dict)

        summary += LINE_SEP

        self.log.info(summary)

    def _log_sim_summary(self) -> None:
        real_time = time.time() - self.start_time
        sim_time_ns = get_sim_time('ns')
        ratio_time = self._safe_divide(sim_time_ns, real_time)

        summary = ""

        summary += "*************************************************************************************\n"
        summary += "**                                 ERRORS : {0:<39}**\n".format(self.failures)
        summary += "*************************************************************************************\n"
        summary += "**                               SIM TIME : {0:<39}**\n".format('{0:.2f} NS'.format(sim_time_ns))
        summary += "**                              REAL TIME : {0:<39}**\n".format('{0:.2f} S'.format(real_time))
        summary += "**                        SIM / REAL TIME : {0:<39}**\n".format('{0:.2f} NS/S'.format(ratio_time))
        summary += "*************************************************************************************\n"

        self.log.info(summary)

    @staticmethod
    def _safe_divide(a: float, b: float) -> float:
        try:
            return a / b
        except ZeroDivisionError:
            if a == 0:
                return float('nan')
            else:
                return float('inf')
Beispiel #11
0
    def __init__(self, dut: SimHandle, tests: Iterable[Test],
                 hooks: Iterable[Hook]):
        """
        Args:
            dut (SimHandle): The root handle to pass into test functions.
            tests (Iterable[Test]): tests to run
            hooks (Iterable[Hook]): hooks to tun
        """
        self._dut = dut
        self._test_task = None
        self._cov = None
        self.log = _logger
        self.start_time = time.time()
        self.test_results = []
        self.count = 0
        self.skipped = 0
        self.failures = 0
        self._tearing_down = False

        # Setup XUnit
        ###################

        results_filename = os.getenv('COCOTB_RESULTS_FILE', "results.xml")
        suite_name = os.getenv('RESULT_TESTSUITE', "all")
        package_name = os.getenv('RESULT_TESTPACKAGE', "all")

        self.xunit = XUnitReporter(filename=results_filename)

        self.xunit.add_testsuite(name=suite_name, package=package_name)

        self.xunit.add_property(name="random_seed",
                                value=str(cocotb.RANDOM_SEED))

        # Setup Coverage
        ####################

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            # Exclude cocotb itself from coverage collection.
            cocotb_package_dir = os.path.dirname(__file__)
            self._cov = coverage.coverage(
                branch=True, omit=["{}/*".format(cocotb_package_dir)])
            self._cov.start()

        # Test Discovery
        ####################
        self._queue = []
        for test in tests:
            self.log.info("Found test {}.{}".format(test.__module__,
                                                    test.__qualname__))
            self._queue.append(test)
        self.ntests = len(self._queue)

        if not self._queue:
            self.log.warning("No tests were discovered")

        self._queue.sort(key=lambda test: (test.stage, test._id))

        # Process Hooks
        ###################
        for hook in hooks:
            self.log.info("Found hook {}.{}".format(hook.__module__,
                                                    hook.__qualname__))
            self._init_hook(hook)
Beispiel #12
0
class RegressionManager:
    """Encapsulates all regression capability into a single place"""
    def __init__(self, dut: SimHandle, tests: Iterable[Test]):
        """
        Args:
            dut (SimHandle): The root handle to pass into test functions.
            tests (Iterable[Test]): tests to run
        """
        self._dut = dut
        self._test = None
        self._test_task = None
        self._test_start_time = None
        self._test_start_sim_time = None
        self._cov = None
        self.log = _logger
        self.start_time = time.time()
        self.test_results = []
        self.count = 0
        self.passed = 0
        self.skipped = 0
        self.failures = 0
        self._tearing_down = False

        # Setup XUnit
        ###################

        results_filename = os.getenv("COCOTB_RESULTS_FILE", "results.xml")
        suite_name = os.getenv("RESULT_TESTSUITE", "all")
        package_name = os.getenv("RESULT_TESTPACKAGE", "all")

        self.xunit = XUnitReporter(filename=results_filename)

        self.xunit.add_testsuite(name=suite_name, package=package_name)

        self.xunit.add_property(name="random_seed",
                                value=str(cocotb.RANDOM_SEED))

        # Setup Coverage
        ####################

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            # Exclude cocotb itself from coverage collection.
            cocotb_package_dir = os.path.dirname(__file__)
            self._cov = coverage.coverage(branch=True,
                                          omit=[f"{cocotb_package_dir}/*"])
            self._cov.start()

        # Test Discovery
        ####################
        self._queue = []
        for test in tests:
            self.log.info(f"Found test {test.__module__}.{test.__qualname__}")
            self._queue.append(test)
        self.ntests = len(self._queue)

        if not self._queue:
            self.log.warning("No tests were discovered")

        self._queue.sort(key=lambda test: (test.stage, test._id))

    @classmethod
    def from_discovery(cls, dut: SimHandle):
        """
        Obtains the test list by discovery.

        See :envvar:`MODULE` and :envvar:`TESTCASE` for details on how tests are discovered.

        Args:
            dut (SimHandle): The root handle to pass into test functions.
        """
        tests = cls._discover_tests()
        return cls(dut, tests)

    @staticmethod
    def _discover_tests() -> Iterable[Test]:
        """
        Discovers tests in files automatically.

        See :envvar:`MODULE` and :envvar:`TESTCASE` for details on how tests are discovered.
        """
        module_str = os.getenv("MODULE")
        test_str = os.getenv("TESTCASE")

        if module_str is None:
            raise ValueError(
                "Environment variable MODULE, which defines the module(s) to execute, is not defined."
            )

        modules = [s.strip() for s in module_str.split(",") if s.strip()]

        tests = None
        if test_str:
            tests = [s.strip() for s in test_str.split(",") if s.strip()]

        for module_name in modules:
            try:
                _logger.debug("Python Path: " + ",".join(sys.path))
                _logger.debug("PWD: " + os.getcwd())
                module = _my_import(module_name)
            except Exception as E:
                _logger.critical("Failed to import module %s: %s", module_name,
                                 E)
                _logger.info('MODULE variable was "%s"', ".".join(modules))
                _logger.info("Traceback: ")
                _logger.info(traceback.format_exc())
                raise

            if tests is not None:
                not_found_tests = []
                # Specific functions specified, don't auto-discover
                for test_name in tests:
                    try:
                        test = getattr(module, test_name)
                    except AttributeError:
                        not_found_tests.append(test_name)
                        continue

                    if not isinstance(test, Test):
                        _logger.error(
                            "Requested %s from module %s isn't a cocotb.test decorated coroutine",
                            test_name,
                            module_name,
                        )
                        raise ImportError("Failed to find requested test %s" %
                                          test_name)

                    # If we request a test manually, it should be run even if skip=True is set.
                    test.skip = False

                    yield test

                # Use the non-matching test names in the next module search
                tests = not_found_tests

            else:
                # auto-discover
                for thing in vars(module).values():
                    if isinstance(thing, Test):
                        yield thing

        # If any test were not found in any module, raise an error
        if tests:
            _logger.error("Requested test(s) %s wasn't found in module(s) %s",
                          tests, modules)
            raise AttributeError("Test(s) %s doesn't exist in %s" %
                                 (tests, modules))

    @deprecated("This method is now private.")
    def tear_down(self) -> None:
        self._tear_down()

    def _tear_down(self) -> None:
        # prevent re-entering the tear down procedure
        if not self._tearing_down:
            self._tearing_down = True
        else:
            return

        # fail remaining tests
        while True:
            test = self._next_test()
            if test is None:
                break
            self._record_result(test=test,
                                outcome=Error(SimFailure),
                                wall_time_s=0,
                                sim_time_ns=0)

        # Write out final log messages
        self._log_test_summary()

        # Generate output reports
        self.xunit.write()
        if self._cov:
            self._cov.stop()
            self.log.info("Writing coverage data")
            self._cov.save()
            self._cov.html_report()
        if cocotb._library_coverage is not None:
            # TODO: move this once we have normal shutdown behavior to _sim_event
            cocotb._library_coverage.stop()
            cocotb._library_coverage.save()

        # Setup simulator finalization
        simulator.stop_simulator()

    @deprecated("This method is now private.")
    def next_test(self) -> Optional[Test]:
        return self._next_test()

    def _next_test(self) -> Optional[Test]:
        """Get the next test to run"""
        if not self._queue:
            return None
        self.count += 1
        return self._queue.pop(0)

    @deprecated("This method is now private.")
    def handle_result(self, test: Task) -> None:
        self._handle_result(test)

    def _handle_result(self, test: Task) -> None:
        """Handle a test completing.

        Dump result to XML and schedule the next test (if any). Entered by the scheduler.

        Args:
            test: The test that completed
        """
        assert test is self._test_task

        real_time = time.time() - self._test_start_time
        sim_time_ns = get_sim_time("ns") - self._test_start_sim_time

        self._record_result(
            test=self._test,
            outcome=self._test_task._outcome,
            wall_time_s=real_time,
            sim_time_ns=sim_time_ns,
        )

        self._execute()

    def _init_test(self, test: Test) -> Optional[Task]:
        """Initialize a test.

        Record outcome if the initialization fails.
        Record skip if the test is skipped.
        Save the initialized test if it successfully initializes.
        """

        if test.skip:
            hilight_start = ANSI.COLOR_SKIPPED if want_color_output() else ""
            hilight_end = ANSI.COLOR_DEFAULT if want_color_output() else ""
            # Want this to stand out a little bit
            self.log.info("{start}skipping{end} {name} ({i}/{total})".format(
                start=hilight_start,
                i=self.count,
                total=self.ntests,
                end=hilight_end,
                name=test.__qualname__,
            ))
            self._record_result(test, None, 0, 0)
            return None

        test_init_outcome = cocotb.outcomes.capture(test, self._dut)

        if isinstance(test_init_outcome, cocotb.outcomes.Error):
            self.log.error(
                "Failed to initialize test %s" % test.__qualname__,
                exc_info=test_init_outcome.error,
            )
            self._record_result(test, test_init_outcome, 0, 0)
            return None

        running_test = test_init_outcome.get()

        # seed random number generator based on test module, name, and RANDOM_SEED
        hasher = hashlib.sha1()
        hasher.update(test.__qualname__.encode())
        hasher.update(test.__module__.encode())
        seed = cocotb.RANDOM_SEED + int(hasher.hexdigest(), 16)
        random.seed(seed)

        return running_test

    def _score_test(self, test: Test, outcome: Outcome) -> Tuple[bool, bool]:
        """
        Given a test and the test's outcome, determine if the test met expectations and log pertinent information
        """

        # scoring outcomes
        result_pass = True
        sim_failed = False

        try:
            outcome.get()
        except Exception as e:
            result = remove_traceback_frames(e, ["_score_test", "get"])
        else:
            result = TestSuccess()

        if (isinstance(result, TestSuccess) and not test.expect_fail
                and not test.expect_error):
            self._log_test_passed(test, None, None)

        elif isinstance(result, AssertionError) and test.expect_fail:
            self._log_test_passed(test, result, "failed as expected")

        elif isinstance(result, TestSuccess) and test.expect_error:
            self._log_test_failed(test, None,
                                  "passed but we expected an error")
            result_pass = False

        elif isinstance(result, TestSuccess):
            self._log_test_failed(test, None,
                                  "passed but we expected a failure")
            result_pass = False

        elif isinstance(result, SimFailure):
            if isinstance(result, test.expect_error):
                self._log_test_passed(test, result, "errored as expected")
            else:
                self.log.error(
                    "Test error has lead to simulator shutting us down")
                result_pass = False
            # whether we expected it or not, the simulation has failed unrecoverably
            sim_failed = True

        elif test.expect_error:
            if isinstance(result, test.expect_error):
                self._log_test_passed(test, result, "errored as expected")
            else:
                self._log_test_failed(test, result,
                                      "errored with unexpected type ")
                result_pass = False

        else:
            self._log_test_failed(test, result, None)
            result_pass = False

            if _pdb_on_exception:
                pdb.post_mortem(result.__traceback__)

        return result_pass, sim_failed

    def _log_test_passed(self,
                         test: Test,
                         result: Optional[Exception] = None,
                         msg: Optional[str] = None) -> None:
        start_hilight = ANSI.COLOR_PASSED if want_color_output() else ""
        stop_hilight = ANSI.COLOR_DEFAULT if want_color_output() else ""
        if msg is None:
            rest = ""
        else:
            rest = f": {msg}"
        if result is None:
            result_was = ""
        else:
            result_was = f" (result was {type(result).__qualname__})"
        self.log.info(
            f"{test} {start_hilight}passed{stop_hilight}{rest}{result_was}")

    def _log_test_failed(self,
                         test: Test,
                         result: Optional[Exception] = None,
                         msg: Optional[str] = None) -> None:
        start_hilight = ANSI.COLOR_FAILED if want_color_output() else ""
        stop_hilight = ANSI.COLOR_DEFAULT if want_color_output() else ""
        if msg is None:
            rest = ""
        else:
            rest = f": {msg}"
        self.log.info(f"{test} {start_hilight}failed{stop_hilight}{rest}",
                      exc_info=result)

    def _record_result(
        self,
        test: Test,
        outcome: Optional[Outcome],
        wall_time_s: float,
        sim_time_ns: float,
    ) -> None:

        ratio_time = self._safe_divide(sim_time_ns, wall_time_s)
        try:
            lineno = inspect.getsourcelines(test._func)[1]
        except OSError:
            lineno = 1

        self.xunit.add_testcase(
            name=test.__qualname__,
            classname=test.__module__,
            file=inspect.getfile(test._func),
            lineno=repr(lineno),
            time=repr(wall_time_s),
            sim_time_ns=repr(sim_time_ns),
            ratio_time=repr(ratio_time),
        )

        if outcome is None:  # skipped
            test_pass, sim_failed = None, False
            self.xunit.add_skipped()
            self.skipped += 1

        else:
            test_pass, sim_failed = self._score_test(test, outcome)
            if not test_pass:
                self.xunit.add_failure()
                self.failures += 1
            else:
                self.passed += 1

        self.test_results.append({
            "test":
            ".".join([test.__module__, test.__qualname__]),
            "pass":
            test_pass,
            "sim":
            sim_time_ns,
            "real":
            wall_time_s,
            "ratio":
            ratio_time,
        })

        if sim_failed:
            self._tear_down()
            return

    @deprecated("This method is now private.")
    def execute(self) -> None:
        self._execute()

    def _execute(self) -> None:
        while True:
            self._test = self._next_test()
            if self._test is None:
                return self._tear_down()

            self._test_task = self._init_test(self._test)
            if self._test_task is not None:
                return self._start_test()

    def _start_test(self) -> None:
        # Want this to stand out a little bit
        start = ""
        end = ""
        if want_color_output():
            start = ANSI.COLOR_TEST
            end = ANSI.COLOR_DEFAULT
        self.log.info(
            "{start}running{end} {name} ({i}/{total}){description}".format(
                start=start,
                i=self.count,
                total=self.ntests,
                end=end,
                name=self._test.__qualname__,
                description=_trim(self._test.__doc__),
            ))

        self._test_start_time = time.time()
        self._test_start_sim_time = get_sim_time("ns")
        cocotb.scheduler._add_test(self._test_task)

    def _log_test_summary(self) -> None:

        real_time = time.time() - self.start_time
        sim_time_ns = get_sim_time("ns")
        ratio_time = self._safe_divide(sim_time_ns, real_time)

        if len(self.test_results) == 0:
            return

        TEST_FIELD = "TEST"
        RESULT_FIELD = "STATUS"
        SIM_FIELD = "SIM TIME (ns)"
        REAL_FIELD = "REAL TIME (s)"
        RATIO_FIELD = "RATIO (ns/s)"
        TOTAL_NAME = f"TESTS={self.ntests} PASS={self.passed} FAIL={self.failures} SKIP={self.skipped}"

        TEST_FIELD_LEN = max(
            len(TEST_FIELD),
            len(TOTAL_NAME),
            len(max([x["test"] for x in self.test_results], key=len)),
        )
        RESULT_FIELD_LEN = len(RESULT_FIELD)
        SIM_FIELD_LEN = len(SIM_FIELD)
        REAL_FIELD_LEN = len(REAL_FIELD)
        RATIO_FIELD_LEN = len(RATIO_FIELD)

        header_dict = dict(
            a=TEST_FIELD,
            b=RESULT_FIELD,
            c=SIM_FIELD,
            d=REAL_FIELD,
            e=RATIO_FIELD,
            a_len=TEST_FIELD_LEN,
            b_len=RESULT_FIELD_LEN,
            c_len=SIM_FIELD_LEN,
            d_len=REAL_FIELD_LEN,
            e_len=RATIO_FIELD_LEN,
        )

        LINE_LEN = (3 + TEST_FIELD_LEN + 2 + RESULT_FIELD_LEN + 2 +
                    SIM_FIELD_LEN + 2 + REAL_FIELD_LEN + 2 + RATIO_FIELD_LEN +
                    3)

        LINE_SEP = "*" * LINE_LEN + "\n"

        summary = ""
        summary += LINE_SEP
        summary += "** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}}  {d:>{d_len}}  {e:>{e_len}} **\n".format(
            **header_dict)
        summary += LINE_SEP

        test_line = "** {a:<{a_len}}  {start}{b:^{b_len}}{end}  {c:>{c_len}.2f}   {d:>{d_len}.2f}   {e:>{e_len}}  **\n"
        for result in self.test_results:
            hilite = ""
            lolite = ""

            if result["pass"] is None:
                ratio = "-.--"
                pass_fail_str = "SKIP"
                if want_color_output():
                    hilite = ANSI.COLOR_SKIPPED
                    lolite = ANSI.COLOR_DEFAULT
            elif result["pass"]:
                ratio = format(result["ratio"], "0.2f")
                pass_fail_str = "PASS"
                if want_color_output():
                    hilite = ANSI.COLOR_PASSED
                    lolite = ANSI.COLOR_DEFAULT
            else:
                ratio = format(result["ratio"], "0.2f")
                pass_fail_str = "FAIL"
                if want_color_output():
                    hilite = ANSI.COLOR_FAILED
                    lolite = ANSI.COLOR_DEFAULT

            test_dict = dict(
                a=result["test"],
                b=pass_fail_str,
                c=result["sim"],
                d=result["real"],
                e=ratio,
                a_len=TEST_FIELD_LEN,
                b_len=RESULT_FIELD_LEN,
                c_len=SIM_FIELD_LEN - 1,
                d_len=REAL_FIELD_LEN - 1,
                e_len=RATIO_FIELD_LEN - 1,
                start=hilite,
                end=lolite,
            )

            summary += test_line.format(**test_dict)

        summary += LINE_SEP

        summary += test_line.format(
            a=TOTAL_NAME,
            b="",
            c=sim_time_ns,
            d=real_time,
            e=format(ratio_time, "0.2f"),
            a_len=TEST_FIELD_LEN,
            b_len=RESULT_FIELD_LEN,
            c_len=SIM_FIELD_LEN - 1,
            d_len=REAL_FIELD_LEN - 1,
            e_len=RATIO_FIELD_LEN - 1,
            start="",
            end="",
        )

        summary += LINE_SEP

        self.log.info(summary)

    @staticmethod
    def _safe_divide(a: float, b: float) -> float:
        try:
            return a / b
        except ZeroDivisionError:
            if a == 0:
                return float("nan")
            else:
                return float("inf")
Beispiel #13
0
    def initialise(self):

        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0
        self.xunit = XUnitReporter()
        self.xunit.add_testsuite(name="all",
                                 tests=repr(self.ntests),
                                 package="all")

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            self._cov = coverage.coverage(branch=True, omit=["*cocotb*"])
            self._cov.start()

        self._dut = cocotb.handle.SimHandle(
            simulator.get_root_handle(self._root_name))
        if self._dut is None:
            raise AttributeError("Can not find Root Handle (%s)" % root_name)

        # Auto discovery
        for module_name in self._modules:
            module = _my_import(module_name)

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    if not hasattr(module, test):
                        raise AttributeError("Test %s doesn't exist in %s" %
                                             (test, module_name))

                    self._queue.append(getattr(module, test)(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except TestError:
                        skip = True
                        self.log.warning("Failed to initialise test %s" %
                                         thing.name)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name,
                                                classname=module_name,
                                                time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(
            key=lambda test: "%s.%s" % (test.module, test.funcname))

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                          (valid_tests.module, valid_tests.funcname))
Beispiel #14
0
class RegressionManager(object):
    """Encapsulates all regression capability into a single place"""
    def __init__(self, root_name, modules, tests=None):
        """
        Args:
            modules (list): A list of python module names to run

        Kwargs
        """
        self._queue = []
        self._root_name = root_name
        self._dut = None
        self._modules = modules
        self._functions = tests
        self._running_test = None
        self._cov = None
        self.log = SimLog("cocotb.regression")

    def initialise(self):

        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0
        self.xunit = XUnitReporter()
        self.xunit.add_testsuite(name="all",
                                 tests=repr(self.ntests),
                                 package="all")

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            self._cov = coverage.coverage(branch=True, omit=["*cocotb*"])
            self._cov.start()

        self._dut = cocotb.handle.SimHandle(
            simulator.get_root_handle(self._root_name))
        if self._dut is None:
            raise AttributeError("Can not find Root Handle (%s)" % root_name)

        # Auto discovery
        for module_name in self._modules:
            module = _my_import(module_name)

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    if not hasattr(module, test):
                        raise AttributeError("Test %s doesn't exist in %s" %
                                             (test, module_name))

                    self._queue.append(getattr(module, test)(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except TestError:
                        skip = True
                        self.log.warning("Failed to initialise test %s" %
                                         thing.name)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name,
                                                classname=module_name,
                                                time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(
            key=lambda test: "%s.%s" % (test.module, test.funcname))

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                          (valid_tests.module, valid_tests.funcname))

    def tear_down(self):
        """It's the end of the world as we know it"""
        if self.failures:
            self.log.error("Failed %d out of %d tests (%d skipped)" %
                           (self.failures, self.count - 1, self.skipped))
        else:
            self.log.info("Passed %d tests (%d skipped)" %
                          (self.count - 1, self.skipped))
        if self._cov:
            self._cov.stop()
            self.log.info("Writing coverage data")
            self._cov.save()
            self._cov.html_report()
        self.log.info("Shutting down...")
        self.xunit.write()
        simulator.stop_simulator()

    def next_test(self):
        """Get the next test to run"""
        if not self._queue: return None
        return self._queue.pop(0)

    def handle_result(self, result):
        """Handle a test result

        Dumps result to XML and schedules the next test (if any)

        Args: result (TestComplete exception)
        """
        self.xunit.add_testcase(name=self._running_test.funcname,
                                classname=self._running_test.module,
                                time=repr(time.time() -
                                          self._running_test.start_time))

        if isinstance(
                result, TestSuccess
        ) and not self._running_test.expect_fail and not self._running_test.expect_error:
            self.log.info("Test Passed: %s" % self._running_test.funcname)

        elif isinstance(result,
                        TestFailure) and self._running_test.expect_fail:
            self.log.info(
                "Test failed as expected: %s (result was %s)" %
                (self._running_test.funcname, result.__class__.__name__))

        elif isinstance(result,
                        TestSuccess) and self._running_test.expect_error:
            self.log.error(
                "Test passed but we expected an error: %s (result was %s)" %
                (self._running_test.funcname, result.__class__.__name__))
            self.xunit.add_failure(stdout=repr(str(result)),
                                   stderr="\n".join(
                                       self._running_test.error_messages))
            self.failures += 1

        elif isinstance(result, TestSuccess):
            self.log.error(
                "Test passed but we expected a failure: %s (result was %s)" %
                (self._running_test.funcname, result.__class__.__name__))
            self.xunit.add_failure(stdout=repr(str(result)),
                                   stderr="\n".join(
                                       self._running_test.error_messages))
            self.failures += 1

        elif isinstance(result, TestError) and self._running_test.expect_error:
            self.log.info(
                "Test errored as expected: %s (result was %s)" %
                (self._running_test.funcname, result.__class__.__name__))

        elif isinstance(result, SimFailure):
            if self._running_test.expect_error:
                self.log.info(
                    "Test errored as expected: %s (result was %s)" %
                    (self._running_test.funcname, result.__class__.__name__))
            else:
                self.log.error(
                    "Test error has lead to simulator shuttting us down")
                self.failures += 1
                self.tear_down()
                return

        else:
            self.log.error(
                "Test Failed: %s (result was %s)" %
                (self._running_test.funcname, result.__class__.__name__))
            self.xunit.add_failure(stdout=repr(str(result)),
                                   stderr="\n".join(
                                       self._running_test.error_messages))
            self.failures += 1

        self.execute()

    def execute(self):
        self._running_test = cocotb.regression.next_test()
        if self._running_test:
            # Want this to stand out a little bit
            self.log.info("%sRunning test %d/%d:%s %s" %
                          (ANSI.BLUE_BG + ANSI.BLACK_FG, self.count,
                           self.ntests, ANSI.DEFAULT_FG + ANSI.DEFAULT_BG,
                           self._running_test.funcname))
            if self.count is 1:
                test = cocotb.scheduler.add(self._running_test)
            else:
                test = cocotb.scheduler.new_test(self._running_test)
            self.count += 1
        else:
            self.tear_down()
Beispiel #15
0
class RegressionManager(object):
    """Encapsulates all regression capability into a single place"""

    def __init__(self, root_name, modules, tests=None):
        """
        Args:
            modules (list): A list of python module names to run

        Kwargs
        """
        self._queue = []
        self._root_name = root_name
        self._dut = None
        self._modules = modules
        self._functions = tests
        self._running_test = None
        self._cov = None
        self.log = SimLog("cocotb.regression")

    def initialise(self):

        self.start_time = time.time()
        self.test_results = []
        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0
        self.xunit = XUnitReporter()
        self.xunit.add_testsuite(name="all", tests=repr(self.ntests),
                                 package="all")

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            self._cov = coverage.coverage(branch=True, omit=["*cocotb*"])
            self._cov.start()

        handle = simulator.get_root_handle(self._root_name)

        self._dut = cocotb.handle.SimHandle(handle) if handle else None

        if self._dut is None:
            raise AttributeError("Can not find Root Handle (%s)" %
                                 self._root_name)

        # Auto discovery
        for module_name in self._modules:
            try:
                module = _my_import(module_name)
            except ImportError:
                self.log.critical("Failed to import module %s", module_name)
                self.log.info("MODULE variable was \"%s\"",
                                                    ",".join(self._modules))
                raise

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    if not hasattr(module, test):
                        raise AttributeError("Test %s doesn't exist in %s" %
                                             (test, module_name))

                    self._queue.append(getattr(module, test)(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except TestError:
                        skip = True
                        self.log.warning("Failed to initialise test %s" %
                                         thing.name)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name,
                                                classname=module_name,
                                                time="0.0",
                                                sim_time_ns="0.0",
                                                ratio_time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1
                        self._store_test_result(module_name, thing.name, None, 0.0, 0.0, 0.0)
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(key=lambda test: "%s.%s" %
                         (test.module, test.funcname))

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                          (valid_tests.module,
                           valid_tests.funcname))

    def tear_down(self):
        """It's the end of the world as we know it"""
        if self.failures:
            self.log.error("Failed %d out of %d tests (%d skipped)" %
                           (self.failures, self.count - 1, self.skipped))
        else:
            self.log.info("Passed %d tests (%d skipped)" %
                          (self.count - 1, self.skipped))
        if self._cov:
            self._cov.stop()
            self.log.info("Writing coverage data")
            self._cov.save()
            self._cov.html_report()
        self._log_test_summary()
        self._log_sim_summary()
        self.log.info("Shutting down...")
        self.xunit.write()
        simulator.stop_simulator()

    def next_test(self):
        """Get the next test to run"""
        if not self._queue:
            return None
        return self._queue.pop(0)

    def handle_result(self, result):
        """Handle a test result

        Dumps result to XML and schedules the next test (if any)

        Args: result (TestComplete exception)
        """
        real_time   = time.time() - self._running_test.start_time
        sim_time_ns = get_sim_time('ns') - self._running_test.start_sim_time
        ratio_time  = sim_time_ns / real_time
        self.xunit.add_testcase(name=self._running_test.funcname,
                                classname=self._running_test.module,
                                time=repr(real_time),
                                sim_time_ns=repr(sim_time_ns),
                                ratio_time=repr(ratio_time))

        running_test_funcname = self._running_test.funcname

        # Helper for logging result
        def _result_was():
            result_was = ("%s (result was %s)" %
                          (running_test_funcname, result.__class__.__name__))
            return result_was

        result_pass = True

        if (isinstance(result, TestSuccess) and
                not self._running_test.expect_fail and
                not self._running_test.expect_error):
            self.log.info("Test Passed: %s" % running_test_funcname)

        elif (isinstance(result, TestFailure) and
                self._running_test.expect_fail):
            self.log.info("Test failed as expected: " + _result_was())

        elif (isinstance(result, TestSuccess) and
              self._running_test.expect_error):
            self.log.error("Test passed but we expected an error: " +
                           _result_was())
            self.xunit.add_failure(stdout=repr(str(result)),
                                   stderr="\n".join(
                                   self._running_test.error_messages))
            self.failures += 1
            result_pass = False

        elif isinstance(result, TestSuccess):
            self.log.error("Test passed but we expected a failure: " +
                           _result_was())
            self.xunit.add_failure(stdout=repr(str(result)),
                                   stderr="\n".join(
                                   self._running_test.error_messages))
            self.failures += 1
            result_pass = False

        elif isinstance(result, TestError) and self._running_test.expect_error:
            self.log.info("Test errored as expected: " + _result_was())

        elif isinstance(result, SimFailure):
            if self._running_test.expect_error:
                self.log.info("Test errored as expected: " + _result_was())
            else:
                self.log.error("Test error has lead to simulator shuttting us "
                               "down")
                self.failures += 1
                self._store_test_result(self._running_test.module, self._running_test.funcname, False, sim_time_ns, real_time, ratio_time)
                self.tear_down()
                return

        else:
            self.log.error("Test Failed: " + _result_was())
            self.xunit.add_failure(stdout=repr(str(result)),
                                   stderr="\n".join(
                                   self._running_test.error_messages))
            self.failures += 1
            result_pass = False

        self._store_test_result(self._running_test.module, self._running_test.funcname, result_pass, sim_time_ns, real_time, ratio_time)

        self.execute()

    def execute(self):
        self._running_test = cocotb.regression.next_test()
        if self._running_test:
            start = ''
            end   = ''
            if self.log.colour:
                start = ANSI.BLUE_BG + ANSI.BLACK_FG
                end   = ANSI.DEFAULT
            # Want this to stand out a little bit
            self.log.info("%sRunning test %d/%d:%s %s" %
                          (start,
                           self.count, self.ntests,
                           end,
                           self._running_test.funcname))
            if self.count is 1:
                test = cocotb.scheduler.add(self._running_test)
            else:
                test = cocotb.scheduler.new_test(self._running_test)
            self.count += 1
        else:
            self.tear_down()

    def _log_test_summary(self):
        TEST_FIELD   = 'TEST'
        RESULT_FIELD = 'PASS/FAIL'
        SIM_FIELD    = 'SIM TIME(NS)'
        REAL_FIELD   = 'REAL TIME(S)'
        RATIO_FIELD  = 'RATIO(NS/S)'

        TEST_FIELD_LEN   = max(len(TEST_FIELD),len(max([x['test'] for x in self.test_results],key=len)))
        RESULT_FIELD_LEN = len(RESULT_FIELD)
        SIM_FIELD_LEN    = len(SIM_FIELD)
        REAL_FIELD_LEN   = len(REAL_FIELD)
        RATIO_FIELD_LEN  = len(RATIO_FIELD)

        LINE_LEN = 3 + TEST_FIELD_LEN + 2 + RESULT_FIELD_LEN + 2 + SIM_FIELD_LEN + 2 + REAL_FIELD_LEN + 2 + RATIO_FIELD_LEN + 3

        LINE_SEP = "*"*LINE_LEN+"\n"

        summary = ""
        summary += LINE_SEP
        summary += "** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}}  {d:>{d_len}}  {e:>{e_len}} **\n".format(a=TEST_FIELD,   a_len=TEST_FIELD_LEN,
                                                                                                         b=RESULT_FIELD, b_len=RESULT_FIELD_LEN,
                                                                                                         c=SIM_FIELD,    c_len=SIM_FIELD_LEN,
                                                                                                         d=REAL_FIELD,   d_len=REAL_FIELD_LEN,
                                                                                                         e=RATIO_FIELD,  e_len=RATIO_FIELD_LEN)
        summary += LINE_SEP
        for result in self.test_results:
            hilite = ''

            if result['pass'] is None:
                pass_fail_str = "N/A"
            elif result['pass']:
                pass_fail_str = "PASS"
            else:
                pass_fail_str = "FAIL"
                if self.log.colour:
                    hilite = ANSI.WHITE_FG + ANSI.RED_BG

            summary += "{start}** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}.2f}   {d:>{d_len}.2f}   {e:>{e_len}.2f}  **\n".format(a=result['test'],   a_len=TEST_FIELD_LEN,
                                                                                                                                b=pass_fail_str,    b_len=RESULT_FIELD_LEN,
                                                                                                                                c=result['sim'],    c_len=SIM_FIELD_LEN-1,
                                                                                                                                d=result['real'],   d_len=REAL_FIELD_LEN-1,
                                                                                                                                e=result['ratio'],  e_len=RATIO_FIELD_LEN-1,
                                                                                                                                start=hilite)
        summary += LINE_SEP

        self.log.info(summary)

    def _log_sim_summary(self):
        real_time   = time.time() - self.start_time
        sim_time_ns = get_sim_time('ns')
        ratio_time  = sim_time_ns / real_time

        summary = ""

        summary += "*************************************************************************************\n"
        summary += "**                                 ERRORS : {:<39}**\n".format(self.failures)
        summary += "*************************************************************************************\n"
        summary += "**                               SIM TIME : {:<39}**\n".format('{:.2f} NS'.format(sim_time_ns))
        summary += "**                              REAL TIME : {:<39}**\n".format('{:.2f} S'.format(real_time))
        summary += "**                        SIM / REAL TIME : {:<39}**\n".format('{:.2f} NS/S'.format(ratio_time))
        summary += "*************************************************************************************\n"

        self.log.info(summary)

    def _store_test_result(self, module_name, test_name, result_pass, sim_time, real_time, ratio):
        result = {
            'test'  : '.'.join([module_name, test_name]),
            'pass'  : result_pass,
            'sim'   : sim_time,
            'real'  : real_time,
            'ratio' : ratio}
        self.test_results.append(result)