Esempio n. 1
0
class RunningTest(RunningCoroutine):
    """Add some useful Test functionality to a RunningCoroutine."""

    class ErrorLogHandler(logging.Handler):
        def __init__(self, fn):
            self.fn = fn
            logging.Handler.__init__(self, level=logging.DEBUG)

        def handle(self, record):
            self.fn(self.format(record))

    def __init__(self, inst, parent):
        self.error_messages = []
        RunningCoroutine.__init__(self, inst, parent)
        self.log = SimLog("cocotb.test.%s" % self.__name__, id(self))
        self.started = False
        self.start_time = 0
        self.start_sim_time = 0
        self.expect_fail = parent.expect_fail
        self.expect_error = parent.expect_error
        self.skip = parent.skip
        self.stage = parent.stage

        self.handler = RunningTest.ErrorLogHandler(self._handle_error_message)
        cocotb.log.addHandler(self.handler)

    def _advance(self, outcome):
        if not self.started:
            self.error_messages = []
            self.log.info("Starting test: \"%s\"\nDescription: %s" %
                          (self.funcname, self.__doc__))
            self.start_time = time.time()
            self.start_sim_time = get_sim_time('ns')
            self.started = True
        try:
            self.log.debug("Sending {}".format(outcome))
            return outcome.send(self._coro)
        except TestComplete as e:
            if isinstance(e, TestFailure):
                self.log.warning(str(e))
            else:
                self.log.info(str(e))

            buff = StringIO()
            for message in self.error_messages:
                print(message, file=buff)
            e.stderr.write(buff.getvalue())
            raise
        except StopIteration:
            raise TestSuccess()
        except Exception as e:
            raise raise_error(self, "Send raised exception:")

    def _handle_error_message(self, msg):
        self.error_messages.append(msg)
Esempio n. 2
0
class RunningTest(RunningCoroutine):
    """Add some useful Test functionality to a RunningCoroutine."""
    class ErrorLogHandler(logging.Handler):
        def __init__(self, fn):
            self.fn = fn
            logging.Handler.__init__(self, level=logging.DEBUG)

        def handle(self, record):
            self.fn(self.format(record))

    def __init__(self, inst, parent):
        self.error_messages = []
        RunningCoroutine.__init__(self, inst, parent)
        self.log = SimLog("cocotb.test.%s" % self.__name__, id(self))
        self.started = False
        self.start_time = 0
        self.start_sim_time = 0
        self.expect_fail = parent.expect_fail
        self.expect_error = parent.expect_error
        self.skip = parent.skip
        self.stage = parent.stage

        self.handler = RunningTest.ErrorLogHandler(self._handle_error_message)
        cocotb.log.addHandler(self.handler)

    def _advance(self, outcome):
        if not self.started:
            self.error_messages = []
            self.log.info("Starting test: \"%s\"\nDescription: %s" %
                          (self.funcname, self.__doc__))
            self.start_time = time.time()
            self.start_sim_time = get_sim_time('ns')
            self.started = True
        try:
            self.log.debug("Sending {}".format(outcome))
            return outcome.send(self._coro)
        except TestComplete as e:
            if isinstance(e, TestFailure):
                self.log.warning(str(e))
            else:
                self.log.info(str(e))

            buff = StringIO()
            for message in self.error_messages:
                print(message, file=buff)
            e.stderr.write(buff.getvalue())
            raise
        except StopIteration:
            raise TestSuccess()
        except Exception as e:
            raise raise_error(self, "Send raised exception:")

    def _handle_error_message(self, msg):
        self.error_messages.append(msg)
Esempio n. 3
0
class Scheduler(object):
    """
    The main scheduler.

    Here we accept callbacks from the simulator and schedule the appropriate
    coroutines.

    A callback fires, causing the `react`_ method to be called, with the
    trigger that caused the callback as the first argument.

    We look up a list of coroutines to schedule (indexed by the trigger) and
    schedule them in turn. NB implementors should not depend on the scheduling
    order!

    Some additional management is required since coroutines can return a list
    of triggers, to be scheduled when any one of the triggers fires.  To
    ensure we don't receive spurious callbacks, we have to un-prime all the
    other triggers when any one fires.

    Due to the simulator nuances and fun with delta delays we have the
    following modes:

    Normal mode
        - Callbacks cause coroutines to be scheduled
        - Any pending writes are cached and do not happen immediately

    ReadOnly mode
        - Corresponds to cbReadOnlySynch (VPI) or vhpiCbLastKnownDeltaCycle
          (VHPI).  In this state we are not allowed to perform writes.

    Write mode
        - Corresponds to cbReadWriteSynch (VPI) or vhpiCbEndOfProcesses (VHPI)
          In this mode we play back all the cached write updates.

    We can legally transition from normal->write by registering a ReadWrite
    callback, however usually once a simulator has entered the ReadOnly phase
    of a given timestep then we must move to a new timestep before performing
    any writes.  The mechanism for moving to a new timestep may not be
    consistent across simulators and therefore we provide an abstraction to
    assist with compatibility.


    Unless a coroutine has explicitly requested to be scheduled in ReadOnly
    mode (for example wanting to sample the finally settled value after all
    delta delays) then it can reasonably be expected to be scheduled during
    "normal mode" i.e. where writes are permitted.
    """

    _MODE_NORMAL   = 1  # noqa
    _MODE_READONLY = 2  # noqa
    _MODE_WRITE    = 3  # noqa
    _MODE_TERM     = 4  # noqa

    # Singleton events, recycled to avoid spurious object creation
    _readonly = ReadOnly()
    _next_timestep = _NextTimeStep()
    _readwrite = _ReadWrite()
    _timer1 = Timer(1)
    _timer0 = Timer(0)

    def __init__(self):

        self.log = SimLog("cocotb.scheduler")
        if _debug:
            self.log.setLevel(logging.DEBUG)

        # A dictionary of pending coroutines for each trigger,
        # indexed by trigger
        self._trigger2coros = collections.defaultdict(list)

        # A dictionary of pending triggers for each coroutine, indexed by coro
        self._coro2triggers = collections.defaultdict(list)

        # Our main state
        self._mode = Scheduler._MODE_NORMAL

        # A dictionary of pending writes
        self._writes = {}

        self._pending_coros = []
        self._pending_callbacks = []
        self._pending_triggers = []

        self._terminate = False
        self._test_result = None
        self._entrypoint = None

        # Select the appropriate scheduling algorithm for this simulator
        self.advance = self.default_scheduling_algorithm

    def default_scheduling_algorithm(self):
        """
        Decide whether we need to schedule our own triggers (if at all) in
        order to progress to the next mode.

        This algorithm has been tested against the following simulators:
            Icarus Verilog
        """
        if not self._terminate and self._writes:

            if self._mode == Scheduler._MODE_NORMAL:
                if not self._readwrite.primed:
                    self._readwrite.prime(self.react)
            elif not self._next_timestep.primed:
                self._next_timestep.prime(self.react)

        elif self._terminate:
            if _debug:
                self.log.debug("Test terminating, scheduling Timer")

            for t in self._trigger2coros:
                t.unprime()

            for t in [self._readwrite, self._readonly, self._next_timestep,
                      self._timer1, self._timer0]:
                if t.primed:
                    t.unprime()

            self._timer1.prime(self.begin_test)
            self._trigger2coros = collections.defaultdict(list)
            self._coro2triggers = collections.defaultdict(list)
            self._terminate = False
            self._mode = Scheduler._MODE_TERM

    def begin_test(self, trigger=None):
        """
        Called to initiate a test.

        Could be called on start-up or from a callback
        """
        if _debug:
            self.log.debug("begin_test called with trigger: %s" %
                           (str(trigger)))
        if _profiling:
            ps = pstats.Stats(_profile).sort_stats('cumulative')
            ps.dump_stats("test_profile.pstat")
            _profile.enable()

        self._mode = Scheduler._MODE_NORMAL
        if trigger is not None:
            trigger.unprime()

        # Issue previous test result, if there is one
        if self._test_result is not None:
            if _debug:
                self.log.debug("Issue test result to regresssion object")
            cocotb.regression.handle_result(self._test_result)
            self._test_result = None
        if self._entrypoint is not None:
            test = self._entrypoint
            self._entrypoint = None
            self.schedule(test)
            self.advance()

        if _profiling:
            _profile.disable()

    def react(self, trigger, depth=0):
        """
        React called when a trigger fires.

        We find any coroutines that are waiting on the particular trigger and
        schedule them.
        """
        if _profiling and not depth:
            _profile.enable()

        # When a trigger fires it is unprimed internally
        if _debug:
            self.log.debug("Trigger fired: %s" % str(trigger))
        # trigger.unprime()

        if self._mode == Scheduler._MODE_TERM:
            if _debug:
                self.log.debug("Ignoring trigger %s since we're terminating" %
                               str(trigger))
            return

        if trigger is self._readonly:
            self._mode = Scheduler._MODE_READONLY
        # Only GPI triggers affect the simulator scheduling mode
        elif isinstance(trigger, GPITrigger):
            self._mode = Scheduler._MODE_NORMAL

        # We're the only source of ReadWrite triggers which are only used for
        # playing back any cached signal updates
        if trigger is self._readwrite:

            if _debug:
                self.log.debug("Writing cached signal updates")

            while self._writes:
                handle, value = self._writes.popitem()
                handle.setimmediatevalue(value)

            self._readwrite.unprime()

            if _profiling:
                _profile.disable()
            return

        # Similarly if we've scheduled our next_timestep on way to readwrite
        if trigger is self._next_timestep:

            if not self._writes:
                self.log.error(
                    "Moved to next timestep without any pending writes!")
            else:
                self.log.debug(
                    "Priming ReadWrite trigger so we can playback writes")
                self._readwrite.prime(self.react)

            if _profiling:
                _profile.disable()
            return

        if trigger not in self._trigger2coros:

            # GPI triggers should only be ever pending if there is an
            # associated coroutine waiting on that trigger, otherwise it would
            # have been unprimed already
            if isinstance(trigger, GPITrigger):
                self.log.critical(
                    "No coroutines waiting on trigger that fired: %s" %
                    str(trigger))

                trigger.log.info("I'm the culprit")
            # For Python triggers this isn't actually an error - we might do
            # event.set() without knowing whether any coroutines are actually
            # waiting on this event, for example
            elif _debug:
                self.log.debug(
                    "No coroutines waiting on trigger that fired: %s" %
                    str(trigger))

            if _profiling:
                _profile.disable()
            return

        # Scheduled coroutines may append to our waiting list so the first
        # thing to do is pop all entries waiting on this trigger.
        scheduling = self._trigger2coros.pop(trigger)

        if _debug:
            debugstr = "\n\t".join([coro.__name__ for coro in scheduling])
            if len(scheduling):
                debugstr = "\n\t" + debugstr
            self.log.debug("%d pending coroutines for event %s%s" %
                           (len(scheduling), str(trigger), debugstr))

        # If the coroutine was waiting on multiple triggers we may be able
        # to unprime the other triggers that didn't fire
        for coro in scheduling:
            for pending in self._coro2triggers[coro]:
                for others in self._trigger2coros[pending]:
                    if others not in scheduling:
                        break
                else:
                    # if pending is not trigger and pending.primed:
                    #     pending.unprime()
                    if pending.primed:
                        pending.unprime()
                    del self._trigger2coros[pending]

        for coro in scheduling:
            self.schedule(coro, trigger=trigger)
            if _debug:
                self.log.debug("Scheduled coroutine %s" % (coro.__name__))

        while self._pending_triggers:
            if _debug:
                self.log.debug("Scheduling pending trigger %s" %
                               (str(self._pending_triggers[0])))
            self.react(self._pending_triggers.pop(0), depth=depth + 1)

        # We only advance for GPI triggers
        if not depth and isinstance(trigger, GPITrigger):
            self.advance()

            if _debug:
                self.log.debug("All coroutines scheduled, handing control back"
                               " to simulator")

            if _profiling:
                _profile.disable()
        return

    def unschedule(self, coro):
        """Unschedule a coroutine.  Unprime any pending triggers"""

        for trigger in self._coro2triggers[coro]:
            if coro in self._trigger2coros[trigger]:
                self._trigger2coros[trigger].remove(coro)
            if not self._trigger2coros[trigger]:
                trigger.unprime()
        del self._coro2triggers[coro]

        if coro._join in self._trigger2coros:
            self._pending_triggers.append(coro._join)

        # Remove references to allow GC to clean up
        del coro._join

    def save_write(self, handle, value):
        self._writes[handle] = value

    def _coroutine_yielded(self, coro, triggers):
        """
        Prime the triggers and update our internal mappings
        """
        self._coro2triggers[coro] = triggers

        for trigger in triggers:

            self._trigger2coros[trigger].append(coro)
            if not trigger.primed:
                try:
                    trigger.prime(self.react)
                except Exception as e:
                    # Convert any exceptions into a test result
                    self.finish_test(
                        create_error(self, "Unable to prime trigger %s: %s" %
                                     (str(trigger), str(e))))

    def queue(self, coroutine):
        """Queue a coroutine for execution"""
        self._pending_coros.append(coroutine)

    def add(self, coroutine):
        """
        Add a new coroutine.

        Just a wrapper around self.schedule which provides some debug and
        useful error mesages in the event of common gotchas
        """
        if isinstance(coroutine, cocotb.decorators.coroutine):
            self.log.critical(
                "Attempt to schedule a coroutine that hasn't started")
            coroutine.log.error("This is the failing coroutine")
            self.log.warning(
                "Did you forget to add parentheses to the @test decorator?")
            self._test_result = TestError(
                "Attempt to schedule a coroutine that hasn't started")
            self._terminate = True
            return

        elif not isinstance(coroutine, cocotb.decorators.RunningCoroutine):
            self.log.critical(
                "Attempt to add something to the scheduler which isn't a "
                "coroutine")
            self.log.warning(
                "Got: %s (%s)" % (str(type(coroutine)), repr(coroutine)))
            self.log.warning("Did you use the @coroutine decorator?")
            self._test_result = TestError(
                "Attempt to schedule a coroutine that hasn't started")
            self._terminate = True
            return

        if _debug:
            self.log.debug("Adding new coroutine %s" % coroutine.__name__)

        self.schedule(coroutine)
        self.advance()
        return coroutine

    def new_test(self, coroutine):
        self._entrypoint = coroutine

    def schedule(self, coroutine, trigger=None):
        """
        Schedule a coroutine by calling the send method

        Args:
            coroutine (cocotb.decorators.coroutine): The coroutine to schedule

            trigger (cocotb.triggers.Trigger): The trigger that caused this
                                                coroutine to be scheduled
        """
        if hasattr(trigger, "pass_retval"):
            sendval = trigger.retval
            if _debug:
                coroutine.log.debug("Scheduling with ReturnValue(%s)" %
                                    (repr(sendval)))
        else:
            sendval = trigger
            if _debug:
                coroutine.log.debug("Scheduling with %s" % str(trigger))

        try:
            result = coroutine.send(sendval)
            if _debug:
                self.log.debug("Coroutine %s yielded %s (mode %d)" %
                               (coroutine.__name__, str(result), self._mode))

        # TestComplete indication is game over, tidy up
        except TestComplete as test_result:
            # Tag that close down is needed, save the test_result
            # for later use in cleanup handler
            self.finish_test(test_result)
            return

        # Normal co-routine completion
        except cocotb.decorators.CoroutineComplete as exc:
            if _debug:
                self.log.debug("Coroutine completed: %s" % str(coroutine))
            self.unschedule(coroutine)
            return

        # Don't handle the result if we're shutting down
        if self._terminate:
            return

        # Queue current routine to schedule when the nested routine exits
        if isinstance(result, cocotb.decorators.RunningCoroutine):
            if _debug:
                self.log.debug("Scheduling nested co-routine: %s" %
                               result.__name__)

            self.queue(result)
            new_trigger = result.join()
            self._coroutine_yielded(coroutine, [new_trigger])

        elif isinstance(result, Trigger):
            self._coroutine_yielded(coroutine, [result])

        elif (isinstance(result, list) and
                not [t for t in result if not isinstance(t, Trigger)]):
            self._coroutine_yielded(coroutine, result)

        else:
            msg = ("Coroutine %s yielded something the scheduler can't handle"
                   % str(coroutine))
            msg += ("\nGot type: %s repr: %s str: %s" %
                    (type(result), repr(result), str(result)))
            msg += "\nDid you forget to decorate with @cocotb.cocorutine?"
            try:
                raise_error(self, msg)
            except Exception as e:
                self.finish_test(e)

        # Handle any newly queued coroutines that need to be scheduled
        while self._pending_coros:
            self.add(self._pending_coros.pop(0))

        while self._pending_callbacks:
            self._pending_callbacks.pop(0)()

    def finish_test(self, test_result):
        """Cache the test result and set the terminate flag"""
        self.log.debug("finish_test called with %s" % (repr(test_result)))
        if not self._terminate:
            self._terminate = True
            self._test_result = test_result
            self.cleanup()

    def finish_scheduler(self, test_result):
        """Directly call into the regression manager and end test
           once we return the sim will close us so no cleanup is needed"""
        self.log.debug("Issue sim closedown result to regresssion object")
        cocotb.regression.handle_result(test_result)

    def cleanup(self):
        """
        Clear up all our state

        Unprime all pending triggers and kill off any coroutines
        """
        for trigger, waiting in self._trigger2coros.items():
            for coro in waiting:
                if _debug:
                    self.log.debug("Killing %s" % str(coro))
                coro.kill()
Esempio n. 4
0
class RegressionManager(object):
    """Encapsulates all regression capability into a single place"""

    def __init__(self, root_name, modules, tests=None, seed=None, hooks=[]):
        """
        Args:
            modules (list): A list of python module names to run

        Kwargs
        """
        self._queue = []
        self._root_name = root_name
        self._dut = None
        self._modules = modules
        self._functions = tests
        self._running_test = None
        self._cov = None
        self.log = SimLog("cocotb.regression")
        self._seed = seed
        self._hooks = hooks

    def initialise(self):
        try:
            self._initialise()
        except Exception as e:
            import traceback
            self.log.error(traceback.format_exc())
            raise
        
    def _initialise(self):

        self.start_time = time.time()
        self.test_results = []
        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0
        self.xunit = XUnitReporter()

        suite_name = os.getenv('RESULT_TESTSUITE') if os.getenv('RESULT_TESTSUITE') else "all"
        package_name = os.getenv('RESULT_TESTPACKAGE') if os.getenv('RESULT_TESTPACKAGE') else "all"
                
        self.xunit.add_testsuite(name=suite_name, tests=repr(self.ntests),
                                 package=package_name)
        
        if (self._seed is not None):
            self.xunit.add_property(name="random_seed", value=("%d"%self._seed))

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            self._cov = coverage.coverage(branch=True, omit=["*cocotb*"])
            self._cov.start()

        handle = simulator.get_root_handle(self._root_name)

        self._dut = cocotb.handle.SimHandle(handle) if handle else None

        if self._dut is None:
            raise AttributeError("Can not find Root Handle (%s)" %
                                 self._root_name)

        # Auto discovery
        for module_name in self._modules:
            try:
                self.log.debug("Python Path: " + ",".join(sys.path))
                self.log.debug("PWD: " + os.getcwd())
                module = _my_import(module_name)
            except Exception as E:
                self.log.critical("Failed to import module %s: %s", module_name, E)
                self.log.info("MODULE variable was \"%s\"", ".".join(self._modules))
                self.log.info("Traceback: ")
                self.log.info(traceback.format_exc())
                raise

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    if not hasattr(module, test):
                        raise AttributeError("Test %s doesn't exist in %s" %
                                             (test, module_name))

                    self._queue.append(getattr(module, test)(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except TestError:
                        skip = True
                        self.log.warning("Failed to initialise test %s" %
                                         thing.name)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name,
                                                classname=module_name,
                                                time="0.0",
                                                sim_time_ns="0.0",
                                                ratio_time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1
                        self._store_test_result(module_name, thing.name, None, 0.0, 0.0, 0.0)
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(key=lambda test: "%s.%s" %
                         (test.module, test.funcname))

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                          (valid_tests.module,
                           valid_tests.funcname))

        for module_name in self._hooks:
            self.log.info("Loading hook from module '"+module_name+"'")
            module = _my_import(module_name)

            for thing in vars(module).values():
                if hasattr(thing, "im_hook"):
                    try:
                        test = thing(self._dut)
                    except TestError:
                        self.log.warning("Failed to initialize hook %s" % thing.name)
                    else:
                        cocotb.scheduler.add(test)


    def tear_down(self):
        """It's the end of the world as we know it"""
        if self.failures:
            self.log.error("Failed %d out of %d tests (%d skipped)" %
                           (self.failures, self.count - 1, self.skipped))
        else:
            self.log.info("Passed %d tests (%d skipped)" %
                          (self.count - 1, self.skipped))
        if self._cov:
            self._cov.stop()
            self.log.info("Writing coverage data")
            self._cov.save()
            self._cov.html_report()
        self._log_test_summary()
        self._log_sim_summary()
        self.log.info("Shutting down...")
        self.xunit.write()
        simulator.stop_simulator()

    def next_test(self):
        """Get the next test to run"""
        if not self._queue:
            return None
        return self._queue.pop(0)

    def _add_failure(self, result):
        self.xunit.add_failure(stdout=repr(str(result)),
                               stderr="\n".join(self._running_test.error_messages),
                               message="Test failed with random_seed={}".format(self._seed))
        self.failures += 1

    def handle_result(self, result):
        """Handle a test result

        Dumps result to XML and schedules the next test (if any)

        Args: result (TestComplete exception)
        """
        real_time   = time.time() - self._running_test.start_time
        sim_time_ns = get_sim_time('ns') - self._running_test.start_sim_time
        ratio_time  = sim_time_ns / real_time
        self.xunit.add_testcase(name=self._running_test.funcname,
                                classname=self._running_test.module,
                                time=repr(real_time),
                                sim_time_ns=repr(sim_time_ns),
                                ratio_time=repr(ratio_time))

        running_test_funcname = self._running_test.funcname

        # Helper for logging result
        def _result_was():
            result_was = ("%s (result was %s)" %
                          (running_test_funcname, result.__class__.__name__))
            return result_was

        result_pass = True

        if (isinstance(result, TestSuccess) and
                not self._running_test.expect_fail and
                not self._running_test.expect_error):
            self.log.info("Test Passed: %s" % running_test_funcname)

        elif (isinstance(result, TestFailure) and
                self._running_test.expect_fail):
            self.log.info("Test failed as expected: " + _result_was())

        elif (isinstance(result, TestSuccess) and
              self._running_test.expect_error):
            self.log.error("Test passed but we expected an error: " +
                           _result_was())
            self._add_failure(result)
            result_pass = False

        elif isinstance(result, TestSuccess):
            self.log.error("Test passed but we expected a failure: " +
                           _result_was())
            self._add_failure(result)
            result_pass = False

        elif isinstance(result, TestError) and self._running_test.expect_error:
            self.log.info("Test errored as expected: " + _result_was())

        elif isinstance(result, SimFailure):
            if self._running_test.expect_error:
                self.log.info("Test errored as expected: " + _result_was())
            else:
                self.log.error("Test error has lead to simulator shuttting us "
                               "down")
                self._add_failure(result)
                self._store_test_result(self._running_test.module, self._running_test.funcname, False, sim_time_ns, real_time, ratio_time)
                self.tear_down()
                return

        else:
            self.log.error("Test Failed: " + _result_was())
            self._add_failure(result)
            result_pass = False

        self._store_test_result(self._running_test.module, self._running_test.funcname, result_pass, sim_time_ns, real_time, ratio_time)

        self.execute()

    def execute(self):
        self._running_test = cocotb.regression.next_test()
        if self._running_test:
            start = ''
            end   = ''
            if self.log.colour:
                start = ANSI.BLUE_BG + ANSI.BLACK_FG
                end   = ANSI.DEFAULT
            # Want this to stand out a little bit
            self.log.info("%sRunning test %d/%d:%s %s" %
                          (start,
                           self.count, self.ntests,
                           end,
                           self._running_test.funcname))
            if self.count is 1:
                test = cocotb.scheduler.add(self._running_test)
            else:
                test = cocotb.scheduler.new_test(self._running_test)
            self.count += 1
        else:
            self.tear_down()

    def _log_test_summary(self):
        TEST_FIELD   = 'TEST'
        RESULT_FIELD = 'PASS/FAIL'
        SIM_FIELD    = 'SIM TIME(NS)'
        REAL_FIELD   = 'REAL TIME(S)'
        RATIO_FIELD  = 'RATIO(NS/S)'

        TEST_FIELD_LEN   = max(len(TEST_FIELD),len(max([x['test'] for x in self.test_results],key=len)))
        RESULT_FIELD_LEN = len(RESULT_FIELD)
        SIM_FIELD_LEN    = len(SIM_FIELD)
        REAL_FIELD_LEN   = len(REAL_FIELD)
        RATIO_FIELD_LEN  = len(RATIO_FIELD)

        LINE_LEN = 3 + TEST_FIELD_LEN + 2 + RESULT_FIELD_LEN + 2 + SIM_FIELD_LEN + 2 + REAL_FIELD_LEN + 2 + RATIO_FIELD_LEN + 3

        LINE_SEP = "*"*LINE_LEN+"\n"

        summary = ""
        summary += LINE_SEP
        summary += "** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}}  {d:>{d_len}}  {e:>{e_len}} **\n".format(a=TEST_FIELD,   a_len=TEST_FIELD_LEN,
                                                                                                         b=RESULT_FIELD, b_len=RESULT_FIELD_LEN,
                                                                                                         c=SIM_FIELD,    c_len=SIM_FIELD_LEN,
                                                                                                         d=REAL_FIELD,   d_len=REAL_FIELD_LEN,
                                                                                                         e=RATIO_FIELD,  e_len=RATIO_FIELD_LEN)
        summary += LINE_SEP
        for result in self.test_results:
            hilite = ''

            if result['pass'] is None:
                pass_fail_str = "N/A"
            elif result['pass']:
                pass_fail_str = "PASS"
            else:
                pass_fail_str = "FAIL"
                if self.log.colour:
                    hilite = ANSI.WHITE_FG + ANSI.RED_BG

            summary += "{start}** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}.2f}   {d:>{d_len}.2f}   {e:>{e_len}.2f}  **\n".format(a=result['test'],   a_len=TEST_FIELD_LEN,
                                                                                                                                b=pass_fail_str,    b_len=RESULT_FIELD_LEN,
                                                                                                                                c=result['sim'],    c_len=SIM_FIELD_LEN-1,
                                                                                                                                d=result['real'],   d_len=REAL_FIELD_LEN-1,
                                                                                                                                e=result['ratio'],  e_len=RATIO_FIELD_LEN-1,
                                                                                                                                start=hilite)
        summary += LINE_SEP

        self.log.info(summary)

    def _log_sim_summary(self):
        real_time   = time.time() - self.start_time
        sim_time_ns = get_sim_time('ns')
        ratio_time  = sim_time_ns / real_time

        summary = ""

        summary += "*************************************************************************************\n"
        summary += "**                                 ERRORS : {0:<39}**\n".format(self.failures)
        summary += "*************************************************************************************\n"
        summary += "**                               SIM TIME : {0:<39}**\n".format('{0:.2f} NS'.format(sim_time_ns))
        summary += "**                              REAL TIME : {0:<39}**\n".format('{0:.2f} S'.format(real_time))
        summary += "**                        SIM / REAL TIME : {0:<39}**\n".format('{0:.2f} NS/S'.format(ratio_time))
        summary += "*************************************************************************************\n"

        self.log.info(summary)

    def _store_test_result(self, module_name, test_name, result_pass, sim_time, real_time, ratio):
        result = {
            'test'  : '.'.join([module_name, test_name]),
            'pass'  : result_pass,
            'sim'   : sim_time,
            'real'  : real_time,
            'ratio' : ratio}
        self.test_results.append(result)
Esempio n. 5
0
class RegressionManager(object):
    """Encapsulates all regression capability into a single place"""

    def __init__(self, dut, modules, tests=None):
        """
        Args:
            modules (list): A list of python module names to run

        Kwargs
        """
        self._queue = []
        self._dut = dut
        self._modules = modules
        self._functions = tests
        self._running_test = None
        self.log = SimLog("cocotb.regression")

    def initialise(self):

        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0
        self.xunit = XUnitReporter()
        self.xunit.add_testsuite(name="all", tests=repr(self.ntests), package="all")

        # Auto discovery
        for module_name in self._modules:
            module = _my_import(module_name)

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    if not hasattr(module, test):
                        raise AttributeError("Test %s doesn't exist in %s" %
                            (test, module_name))

                    self._queue.append(getattr(module, test)(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except TestError:
                        skip = True
                        self.log.warning("Failed to initialise test %s" % thing.name)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name, classname=module_name, time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1                        
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(key=lambda test: "%s.%s" % (test.module, test.funcname))

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                        (valid_tests.module,
                         valid_tests.funcname))

    def tear_down(self):
        """It's the end of the world as we know it"""
        if self.failures:
            self.log.error("Failed %d out of %d tests (%d skipped)" %
                (self.failures, self.count -1, self.skipped))
        else:
            self.log.info("Passed %d tests (%d skipped)"  %
                (self.count-1, self.skipped))
        self.log.info("Shutting down...")
        self.xunit.write()
        simulator.stop_simulator()


    def next_test(self):
        """Get the next test to run"""
        if not self._queue: return None
        return self._queue.pop(0)


    def handle_result(self, result):
        """Handle a test result

        Dumps result to XML and schedules the next test (if any)

        Args: result (TestComplete exception)
        """
        self.xunit.add_testcase(name =self._running_test.funcname,
                                classname=self._running_test.module,
                                time=repr(time.time() - self._running_test.start_time) )

        if isinstance(result, TestSuccess) and not self._running_test.expect_fail and not self._running_test.expect_error:
            self.log.info("Test Passed: %s" % self._running_test.funcname)

        elif isinstance(result, TestFailure) and self._running_test.expect_fail:
            self.log.info("Test failed as expected: %s (result was %s)" % (
                          self._running_test.funcname, result.__class__.__name__))

        elif isinstance(result, TestSuccess) and self._running_test.expect_error:
            self.log.error("Test passed but we expected an error: %s (result was %s)" % (
                           self._running_test.funcname, result.__class__.__name__))
            self.xunit.add_failure(stdout=repr(str(result)), stderr="\n".join(self._running_test.error_messages))
            self.failures += 1

        elif isinstance(result, TestSuccess):
            self.log.error("Test passed but we expected a failure: %s (result was %s)" % (
                           self._running_test.funcname, result.__class__.__name__))
            self.xunit.add_failure(stdout=repr(str(result)), stderr="\n".join(self._running_test.error_messages))
            self.failures += 1

        elif isinstance(result, TestError) and self._running_test.expect_error:
            self.log.info("Test errored as expected: %s (result was %s)" % (
                          self._running_test.funcname, result.__class__.__name__))

        elif isinstance(result, SimFailure):
            if self._running_test.expect_error:
                self.log.info("Test errored as expected: %s (result was %s)" % (
                              self._running_test.funcname, result.__class__.__name__))
            else:
                self.log.error("Test error has lead to simulator shuttting us down")
                self.failures += 1
                self.tear_down()
                return

        else:
            self.log.error("Test Failed: %s (result was %s)" % (
                        self._running_test.funcname, result.__class__.__name__))
            self.xunit.add_failure(stdout=repr(str(result)), stderr="\n".join(self._running_test.error_messages))
            self.failures += 1

        self.execute()

    def execute(self):
        self._running_test = cocotb.regression.next_test()
        if self._running_test:
            # Want this to stand out a little bit
            self.log.info("%sRunning test %d/%d:%s %s" % (
               ANSI.BLUE_BG +ANSI.BLACK_FG,
                    self.count, self.ntests,
               ANSI.DEFAULT_FG + ANSI.DEFAULT_BG,
                    self._running_test.funcname))
            if self.count is 1:
                test = cocotb.scheduler.add(self._running_test)
            else:
                test = cocotb.scheduler.new_test(self._running_test)
            self.count+=1
        else:
            self.tear_down()
Esempio n. 6
0
class Scheduler:
    """The main scheduler.

    Here we accept callbacks from the simulator and schedule the appropriate
    coroutines.

    A callback fires, causing the :any:`react` method to be called, with the
    trigger that caused the callback as the first argument.

    We look up a list of coroutines to schedule (indexed by the trigger) and
    schedule them in turn.

    .. attention::

       Implementors should not depend on the scheduling order!

    Some additional management is required since coroutines can return a list
    of triggers, to be scheduled when any one of the triggers fires.  To
    ensure we don't receive spurious callbacks, we have to un-prime all the
    other triggers when any one fires.

    Due to the simulator nuances and fun with delta delays we have the
    following modes:

    Normal mode
        - Callbacks cause coroutines to be scheduled
        - Any pending writes are cached and do not happen immediately

    ReadOnly mode
        - Corresponds to :any:`cbReadOnlySynch` (VPI) or :any:`vhpiCbLastKnownDeltaCycle`
          (VHPI).  In this state we are not allowed to perform writes.

    Write mode
        - Corresponds to :any:`cbReadWriteSynch` (VPI) or :c:macro:`vhpiCbEndOfProcesses` (VHPI)
          In this mode we play back all the cached write updates.

    We can legally transition from Normal to Write by registering a :class:`~cocotb.triggers.ReadWrite`
    callback, however usually once a simulator has entered the ReadOnly phase
    of a given timestep then we must move to a new timestep before performing
    any writes.  The mechanism for moving to a new timestep may not be
    consistent across simulators and therefore we provide an abstraction to
    assist with compatibility.


    Unless a coroutine has explicitly requested to be scheduled in ReadOnly
    mode (for example wanting to sample the finally settled value after all
    delta delays) then it can reasonably be expected to be scheduled during
    "normal mode" i.e. where writes are permitted.
    """

    _MODE_NORMAL = 1  # noqa
    _MODE_READONLY = 2  # noqa
    _MODE_WRITE = 3  # noqa
    _MODE_TERM = 4  # noqa

    # Singleton events, recycled to avoid spurious object creation
    _next_time_step = NextTimeStep()
    _read_write = ReadWrite()
    _read_only = ReadOnly()
    _timer1 = Timer(1)

    def __init__(self):

        self.log = SimLog("cocotb.scheduler")
        if _debug:
            self.log.setLevel(logging.DEBUG)

        # Use OrderedDict here for deterministic behavior (gh-934)

        # A dictionary of pending coroutines for each trigger,
        # indexed by trigger
        self._trigger2coros = _py_compat.insertion_ordered_dict()

        # Our main state
        self._mode = Scheduler._MODE_NORMAL

        # A dictionary of pending (write_func, args), keyed by handle. Only the last scheduled write
        # in a timestep is performed, all the rest are discarded in python.
        self._write_calls = _py_compat.insertion_ordered_dict()

        self._pending_coros = []
        self._pending_triggers = []
        self._pending_threads = []
        self._pending_events = [
        ]  # Events we need to call set on once we've unwound

        self._terminate = False
        self._test = None
        self._main_thread = threading.current_thread()

        self._current_task = None

        self._is_reacting = False

        self._write_coro_inst = None
        self._writes_pending = Event()

    async def _do_writes(self):
        """ An internal coroutine that performs pending writes """
        while True:
            await self._writes_pending.wait()
            if self._mode != Scheduler._MODE_NORMAL:
                await self._next_time_step

            await self._read_write

            while self._write_calls:
                handle, (func, args) = self._write_calls.popitem()
                func(*args)
            self._writes_pending.clear()

    def _check_termination(self):
        """
        Handle a termination that causes us to move onto the next test.
        """
        if self._terminate:
            if _debug:
                self.log.debug("Test terminating, scheduling Timer")

            if self._write_coro_inst is not None:
                self._write_coro_inst.kill()
                self._write_coro_inst = None

            for t in self._trigger2coros:
                t.unprime()

            if self._timer1.primed:
                self._timer1.unprime()

            self._timer1.prime(self._test_completed)
            self._trigger2coros = _py_compat.insertion_ordered_dict()
            self._terminate = False
            self._write_calls = _py_compat.insertion_ordered_dict()
            self._writes_pending.clear()
            self._mode = Scheduler._MODE_TERM

    def _test_completed(self, trigger=None):
        """Called after a test and its cleanup have completed
        """
        if _debug:
            self.log.debug("begin_test called with trigger: %s" %
                           (str(trigger)))
        if _profiling:
            ps = pstats.Stats(_profile).sort_stats('cumulative')
            ps.dump_stats("test_profile.pstat")
            ctx = profiling_context()
        else:
            ctx = _py_compat.nullcontext()

        with ctx:
            self._mode = Scheduler._MODE_NORMAL
            if trigger is not None:
                trigger.unprime()

            # extract the current test, and clear it
            test = self._test
            self._test = None
            if test is None:
                raise InternalError(
                    "_test_completed called with no active test")
            if test._outcome is None:
                raise InternalError(
                    "_test_completed called with an incomplete test")

            # Issue previous test result
            if _debug:
                self.log.debug("Issue test result to regression object")

            # this may scheduler another test
            cocotb.regression_manager.handle_result(test)

            # if it did, make sure we handle the test completing
            self._check_termination()

    def react(self, trigger):
        """
        Called when a trigger fires.

        We ensure that we only start the event loop once, rather than
        letting it recurse.
        """
        if self._is_reacting:
            # queue up the trigger, the event loop will get to it
            self._pending_triggers.append(trigger)
            return

        if self._pending_triggers:
            raise InternalError(
                "Expected all triggers to be handled but found {}".format(
                    self._pending_triggers))

        # start the event loop
        self._is_reacting = True
        try:
            self._event_loop(trigger)
        finally:
            self._is_reacting = False

    def _event_loop(self, trigger):
        """
        Run an event loop triggered by the given trigger.

        The loop will keep running until no further triggers fire.

        This should be triggered by only:
        * The beginning of a test, when there is no trigger to react to
        * A GPI trigger
        """
        if _profiling:
            ctx = profiling_context()
        else:
            ctx = _py_compat.nullcontext()

        with ctx:
            # When a trigger fires it is unprimed internally
            if _debug:
                self.log.debug("Trigger fired: %s" % str(trigger))
            # trigger.unprime()

            if self._mode == Scheduler._MODE_TERM:
                if _debug:
                    self.log.debug(
                        "Ignoring trigger %s since we're terminating" %
                        str(trigger))
                return

            if trigger is self._read_only:
                self._mode = Scheduler._MODE_READONLY
            # Only GPI triggers affect the simulator scheduling mode
            elif isinstance(trigger, GPITrigger):
                self._mode = Scheduler._MODE_NORMAL

            # work through triggers one by one
            is_first = True
            self._pending_triggers.append(trigger)
            while self._pending_triggers:
                trigger = self._pending_triggers.pop(0)

                if not is_first and isinstance(trigger, GPITrigger):
                    self.log.warning(
                        "A GPI trigger occurred after entering react - this "
                        "should not happen.")
                    assert False

                # this only exists to enable the warning above
                is_first = False

                # Scheduled coroutines may append to our waiting list so the first
                # thing to do is pop all entries waiting on this trigger.
                try:
                    scheduling = self._trigger2coros.pop(trigger)
                except KeyError:
                    # GPI triggers should only be ever pending if there is an
                    # associated coroutine waiting on that trigger, otherwise it would
                    # have been unprimed already
                    if isinstance(trigger, GPITrigger):
                        self.log.critical(
                            "No coroutines waiting on trigger that fired: %s" %
                            str(trigger))

                        trigger.log.info("I'm the culprit")
                    # For Python triggers this isn't actually an error - we might do
                    # event.set() without knowing whether any coroutines are actually
                    # waiting on this event, for example
                    elif _debug:
                        self.log.debug(
                            "No coroutines waiting on trigger that fired: %s" %
                            str(trigger))

                    del trigger
                    continue

                if _debug:
                    debugstr = "\n\t".join(
                        [coro._coro.__qualname__ for coro in scheduling])
                    if len(scheduling):
                        debugstr = "\n\t" + debugstr
                    self.log.debug("%d pending coroutines for event %s%s" %
                                   (len(scheduling), str(trigger), debugstr))

                # This trigger isn't needed any more
                trigger.unprime()

                for coro in scheduling:
                    if coro._outcome is not None:
                        # coroutine was killed by another coroutine waiting on the same trigger
                        continue
                    if _debug:
                        self.log.debug("Scheduling coroutine %s" %
                                       (coro._coro.__qualname__))
                    self.schedule(coro, trigger=trigger)
                    if _debug:
                        self.log.debug("Scheduled coroutine %s" %
                                       (coro._coro.__qualname__))

                # Schedule may have queued up some events so we'll burn through those
                while self._pending_events:
                    if _debug:
                        self.log.debug("Scheduling pending event %s" %
                                       (str(self._pending_events[0])))
                    self._pending_events.pop(0).set()

                # remove our reference to the objects at the end of each loop,
                # to try and avoid them being destroyed at a weird time (as
                # happened in gh-957)
                del trigger
                del coro
                del scheduling

            # no more pending triggers
            self._check_termination()
            if _debug:
                self.log.debug("All coroutines scheduled, handing control back"
                               " to simulator")

    def unschedule(self, coro):
        """Unschedule a coroutine.  Unprime any pending triggers"""

        # Unprime the trigger this coroutine is waiting on
        trigger = coro._trigger
        if trigger is not None:
            coro._trigger = None
            if coro in self._trigger2coros.setdefault(trigger, []):
                self._trigger2coros[trigger].remove(coro)
            if not self._trigger2coros[trigger]:
                trigger.unprime()
                del self._trigger2coros[trigger]

        assert self._test is not None

        if coro is self._test:
            if _debug:
                self.log.debug("Unscheduling test {}".format(coro))

            if not self._terminate:
                self._terminate = True
                self.cleanup()

        elif Join(coro) in self._trigger2coros:
            self.react(Join(coro))
        else:
            try:
                # throws an error if the background coroutine errored
                # and no one was monitoring it
                coro._outcome.get()
            except (TestComplete, AssertionError) as e:
                coro.log.info("Test stopped by this forked coroutine")
                e = remove_traceback_frames(e, ['unschedule', 'get'])
                self._test.abort(e)
            except Exception as e:
                coro.log.error("Exception raised by this forked coroutine")
                e = remove_traceback_frames(e, ['unschedule', 'get'])
                self._test.abort(e)

    def _schedule_write(self, handle, write_func, *args):
        """ Queue `write_func` to be called on the next ReadWrite trigger. """
        if self._mode == Scheduler._MODE_READONLY:
            raise Exception(
                "Write to object {0} was scheduled during a read-only sync phase."
                .format(handle._name))

        # TODO: we should be able to better keep track of when this needs to
        # be scheduled
        if self._write_coro_inst is None:
            self._write_coro_inst = self.add(self._do_writes())

        self._write_calls[handle] = (write_func, args)
        self._writes_pending.set()

    def _resume_coro_upon(self, coro, trigger):
        """Schedule `coro` to be resumed when `trigger` fires."""
        coro._trigger = trigger

        trigger_coros = self._trigger2coros.setdefault(trigger, [])
        if coro is self._write_coro_inst:
            # Our internal write coroutine always runs before any user coroutines.
            # This preserves the behavior prior to the refactoring of writes to
            # this coroutine.
            trigger_coros.insert(0, coro)
        else:
            # Everything else joins the back of the queue
            trigger_coros.append(coro)

        if not trigger.primed:

            if trigger_coros != [coro]:
                # should never happen
                raise InternalError(
                    "More than one coroutine waiting on an unprimed trigger")

            try:
                trigger.prime(self.react)
            except Exception as e:
                # discard the trigger we associated, it will never fire
                self._trigger2coros.pop(trigger)

                # replace it with a new trigger that throws back the exception
                self._resume_coro_upon(
                    coro,
                    NullTrigger(name="Trigger.prime() Error",
                                outcome=outcomes.Error(e)))

    def queue(self, coroutine):
        """Queue a coroutine for execution"""
        self._pending_coros.append(coroutine)

    def queue_function(self, coro):
        """Queue a coroutine for execution and move the containing thread
        so that it does not block execution of the main thread any longer.
        """
        # We should be able to find ourselves inside the _pending_threads list
        matching_threads = [
            t for t in self._pending_threads
            if t.thread == threading.current_thread()
        ]
        if len(matching_threads) == 0:
            raise RuntimeError(
                "queue_function called from unrecognized thread")

        # Raises if there is more than one match. This can never happen, since
        # each entry always has a unique thread.
        t, = matching_threads

        async def wrapper():
            # This function runs in the scheduler thread
            try:
                _outcome = outcomes.Value(await coro)
            except BaseException as e:
                _outcome = outcomes.Error(e)
            event.outcome = _outcome
            # Notify the current (scheduler) thread that we are about to wake
            # up the background (`@external`) thread, making sure to do so
            # before the background thread gets a chance to go back to sleep by
            # calling thread_suspend.
            # We need to do this here in the scheduler thread so that no more
            # coroutines run until the background thread goes back to sleep.
            t.thread_resume()
            event.set()

        event = threading.Event()
        self._pending_coros.append(cocotb.decorators.RunningTask(wrapper()))
        # The scheduler thread blocks in `thread_wait`, and is woken when we
        # call `thread_suspend` - so we need to make sure the coroutine is
        # queued before that.
        t.thread_suspend()
        # This blocks the calling `@external` thread until the coroutine finishes
        event.wait()
        return event.outcome.get()

    def run_in_executor(self, func, *args, **kwargs):
        """Run the coroutine in a separate execution thread
        and return an awaitable object for the caller.
        """

        # Create a thread
        # Create a trigger that is called as a result of the thread finishing
        # Create an Event object that the caller can await on
        # Event object set when the thread finishes execution, this blocks the
        #   calling coroutine (but not the thread) until the external completes

        def execute_external(func, _waiter):
            _waiter._outcome = outcomes.capture(func, *args, **kwargs)
            if _debug:
                self.log.debug("Execution of external routine done %s" %
                               threading.current_thread())
            _waiter.thread_done()

        async def wrapper():
            waiter = external_waiter()
            thread = threading.Thread(group=None,
                                      target=execute_external,
                                      name=func.__qualname__ + "_thread",
                                      args=([func, waiter]),
                                      kwargs={})

            waiter.thread = thread
            self._pending_threads.append(waiter)

            await waiter.event.wait()

            return waiter.result  # raises if there was an exception

        return wrapper()

    @staticmethod
    def create_task(coroutine: Any) -> RunningTask:
        """ Checks to see if the given object is a schedulable coroutine object and if so, returns it """

        if isinstance(coroutine, RunningTask):
            return coroutine
        if inspect.iscoroutine(coroutine):
            return RunningTask(coroutine)
        if inspect.iscoroutinefunction(coroutine):
            raise TypeError(
                "Coroutine function {} should be called prior to being "
                "scheduled.".format(coroutine))
        if isinstance(coroutine, cocotb.decorators.coroutine):
            raise TypeError(
                "Attempt to schedule a coroutine that hasn't started: {}.\n"
                "Did you forget to add parentheses to the @cocotb.test() "
                "decorator?".format(coroutine))
        if sys.version_info >= (3, 6) and inspect.isasyncgen(coroutine):
            raise TypeError(
                "{} is an async generator, not a coroutine. "
                "You likely used the yield keyword instead of await.".format(
                    coroutine.__qualname__))
        raise TypeError(
            "Attempt to add an object of type {} to the scheduler, which "
            "isn't a coroutine: {!r}\n"
            "Did you forget to use the @cocotb.coroutine decorator?".format(
                type(coroutine), coroutine))

    def add(self, coroutine: Union[RunningTask, Coroutine]) -> RunningTask:
        """Add a new coroutine.

        Just a wrapper around self.schedule which provides some debug and
        useful error messages in the event of common gotchas.
        """

        task = self.create_task(coroutine)

        if _debug:
            self.log.debug("Adding new coroutine %s" % task._coro.__qualname__)

        self.schedule(task)
        self._check_termination()
        return task

    def start_soon(self, coro: Union[Coroutine, RunningTask]) -> RunningTask:
        """
        Schedule a coroutine to be run concurrently, starting after the current coroutine yields control.

        In contrast to :func:`~cocotb.fork` which starts the given coroutine immediately, this function
        starts the given coroutine only after the current coroutine yields control.
        This is useful when the coroutine to be forked has logic before the first
        :keyword:`await` that may not be safe to execute immediately.
        """

        task = self.create_task(coro)

        if _debug:
            self.log.debug("Queueing a new coroutine %s" %
                           task._coro.__qualname__)

        self.queue(task)
        return task

    def add_test(self, test_coro):
        """Called by the regression manager to queue the next test"""
        if self._test is not None:
            raise InternalError("Test was added while another was in progress")

        self._test = test_coro
        self._resume_coro_upon(
            test_coro,
            NullTrigger(name="Start {!s}".format(test_coro),
                        outcome=outcomes.Value(None)))

    # This collection of functions parses a trigger out of the object
    # that was yielded by a coroutine, converting `list` -> `Waitable`,
    # `Waitable` -> `RunningTask`, `RunningTask` -> `Trigger`.
    # Doing them as separate functions allows us to avoid repeating unencessary
    # `isinstance` checks.

    def _trigger_from_started_coro(
            self, result: cocotb.decorators.RunningTask) -> Trigger:
        if _debug:
            self.log.debug("Joining to already running coroutine: %s" %
                           result._coro.__qualname__)
        return result.join()

    def _trigger_from_unstarted_coro(
            self, result: cocotb.decorators.RunningTask) -> Trigger:
        self.queue(result)
        if _debug:
            self.log.debug("Scheduling nested coroutine: %s" %
                           result._coro.__qualname__)
        return result.join()

    def _trigger_from_waitable(self,
                               result: cocotb.triggers.Waitable) -> Trigger:
        return self._trigger_from_unstarted_coro(
            cocotb.decorators.RunningTask(result._wait()))

    def _trigger_from_list(self, result: list) -> Trigger:
        return self._trigger_from_waitable(cocotb.triggers.First(*result))

    def _trigger_from_any(self, result) -> Trigger:
        """Convert a yielded object into a Trigger instance"""
        # note: the order of these can significantly impact performance

        if isinstance(result, Trigger):
            return result

        if isinstance(result, cocotb.decorators.RunningTask):
            if not result.has_started():
                return self._trigger_from_unstarted_coro(result)
            else:
                return self._trigger_from_started_coro(result)

        if inspect.iscoroutine(result):
            return self._trigger_from_unstarted_coro(
                cocotb.decorators.RunningTask(result))

        if isinstance(result, list):
            return self._trigger_from_list(result)

        if isinstance(result, cocotb.triggers.Waitable):
            return self._trigger_from_waitable(result)

        if sys.version_info >= (3, 6) and inspect.isasyncgen(result):
            raise TypeError(
                "{} is an async generator, not a coroutine. "
                "You likely used the yield keyword instead of await.".format(
                    result.__qualname__))

        raise TypeError(
            "Coroutine yielded an object of type {}, which the scheduler can't "
            "handle: {!r}\n"
            "Did you forget to decorate with @cocotb.coroutine?".format(
                type(result), result))

    @contextmanager
    def _task_context(self, task):
        """Context manager for the currently running task."""
        old_task = self._current_task
        self._current_task = task
        try:
            yield
        finally:
            self._current_task = old_task

    def schedule(self, coroutine, trigger=None):
        """Schedule a coroutine by calling the send method.

        Args:
            coroutine (cocotb.decorators.coroutine): The coroutine to schedule.
            trigger (cocotb.triggers.Trigger): The trigger that caused this
                coroutine to be scheduled.
        """
        with self._task_context(coroutine):
            if trigger is None:
                send_outcome = outcomes.Value(None)
            else:
                send_outcome = trigger._outcome
            if _debug:
                self.log.debug("Scheduling with {}".format(send_outcome))

            coro_completed = False
            try:
                coroutine._trigger = None
                result = coroutine._advance(send_outcome)
                if _debug:
                    self.log.debug("Coroutine %s yielded %s (mode %d)" %
                                   (coroutine._coro.__qualname__, str(result),
                                    self._mode))

            except cocotb.decorators.CoroutineComplete:
                if _debug:
                    self.log.debug("Coroutine {} completed with {}".format(
                        coroutine, coroutine._outcome))
                coro_completed = True

            # this can't go in the else above, as that causes unwanted exception
            # chaining
            if coro_completed:
                self.unschedule(coroutine)

            # Don't handle the result if we're shutting down
            if self._terminate:
                return

            if not coro_completed:
                try:
                    result = self._trigger_from_any(result)
                except TypeError as exc:
                    # restart this coroutine with an exception object telling it that
                    # it wasn't allowed to yield that
                    result = NullTrigger(outcome=outcomes.Error(exc))

                self._resume_coro_upon(coroutine, result)

            # We do not return from here until pending threads have completed, but only
            # from the main thread, this seems like it could be problematic in cases
            # where a sim might change what this thread is.

            if self._main_thread is threading.current_thread():

                for ext in self._pending_threads:
                    ext.thread_start()
                    if _debug:
                        self.log.debug(
                            "Blocking from %s on %s" %
                            (threading.current_thread(), ext.thread))
                    state = ext.thread_wait()
                    if _debug:
                        self.log.debug(
                            "Back from wait on self %s with newstate %d" %
                            (threading.current_thread(), state))
                    if state == external_state.EXITED:
                        self._pending_threads.remove(ext)
                        self._pending_events.append(ext.event)

            # Handle any newly queued coroutines that need to be scheduled
            while self._pending_coros:
                self.add(self._pending_coros.pop(0))

    def finish_test(self, exc):
        self._test.abort(exc)
        self._check_termination()

    def finish_scheduler(self, exc):
        """Directly call into the regression manager and end test
           once we return the sim will close us so no cleanup is needed.
        """
        # If there is an error during cocotb initialization, self._test may not
        # have been set yet. Don't cause another Python exception here.

        if self._test:
            self.log.debug("Issue sim closedown result to regression object")
            self._test.abort(exc)
            cocotb.regression_manager.handle_result(self._test)

    def cleanup(self):
        """Clear up all our state.

        Unprime all pending triggers and kill off any coroutines stop all externals.
        """
        # copy since we modify this in kill
        items = list(self._trigger2coros.items())

        # reversing seems to fix gh-928, although the order is still somewhat
        # arbitrary.
        for trigger, waiting in items[::-1]:
            for coro in waiting:
                if _debug:
                    self.log.debug("Killing %s" % str(coro))
                coro.kill()

        if self._main_thread is not threading.current_thread():
            raise Exception("Cleanup() called outside of the main thread")

        for ext in self._pending_threads:
            self.log.warning("Waiting for %s to exit", ext.thread)
Esempio n. 7
0
    # Notify GPI of log level
    simulator.log_level(_default_log)

    # If stdout/stderr are not TTYs, Python may not have opened them with line
    # buffering. In that case, try to reopen them with line buffering
    # explicitly enabled. This ensures that prints such as stack traces always
    # appear. Continue silently if this fails.
    try:
        if not sys.stdout.isatty():
            sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 1)
            log.debug("Reopened stdout with line buffering")
        if not sys.stderr.isatty():
            sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 1)
            log.debug("Reopened stderr with line buffering")
    except Exception as e:
        log.warning("Failed to ensure that stdout/stderr are line buffered: %s", e)
        log.warning("Some stack traces may not appear because of this.")


scheduler = Scheduler()
regression_manager = None

plusargs = {}

# To save typing provide an alias to scheduler.add
fork = scheduler.add

# FIXME is this really required?
_rlock = threading.RLock()

Esempio n. 8
0
class SDIOHost(object):
    """
    A SDIO host driver (not complete) according to the Simplified (Open Source) SDIO and SD specs.
    """
    def __init__(self, clock, phy, spi_mode=False):
        self.log = SimLog("sdio_host")
        self.log.setLevel(logging.INFO)

        # We're we're meant to be in SPI mode or not
        self.spi_mode = spi_mode
        
        self.phy = phy
        self.clock = clock

        self.init_state()

    def init_state(self):
        """
        Variables to set up during initialization
        """
        self.rca = None
        # Bits in register 8 of the CCCR region
        self.smb = False # Support multiple block transfer
        self.sdc = False # Support direct command (CMD52)
        self.srw = False # Support read wait
        self.sbs = False # Support bus control (suspend/resume)
        self.s4mi = False # Support block gap interrupt (card generates interrupts between gaps of 4-bit data)
        self.lsc = False # Low-speed card, else it's fullspeed
        self.b4ls = False # 4-bit low speed card support

        self.fn_cis_addrs = [0]*8
        self.fn_max_blocksizes = [0]*8
        self.fn_count = 1
        self.cis_data = []

    @cocotb.coroutine
    def get_cmd_response(self,cmd,timeout=1000,timeout_possible=False):
        """
        Await the response from the host to the command we sent,
        and check a number of standard things in it.
        """
        cmd_num = cmd[45:40].integer
        if self.spi_mode:
            response_type, response_length = sdio_utils.get_spi_response_type(cmd_num)
        else:
            response_type, response_length = sdio_utils.get_response_type(cmd_num)
        response = yield self.phy.get_cmd_response_bits(cmd,timeout,timeout_possible)

        if response is "timeout" and timeout_possible:
            # Just return the timeout indication
            raise ReturnValue(response)

        if self.spi_mode:
            # No CRC on command responses
            if response_type in [4,7]:
                r1_offset = 32
            elif response_type == 5:
                r1_offset = 8
            else:
                r1_offset = 0
            self.log.debug("Getting R%d from CMD%d, data: %s",response_type,cmd_num,response.binstr)
            # Check the R1 status fields
            if response[r1_offset + 7]:
                for _ in range(0,4): yield FallingEdge(self.clock)
                raise SDIOResponseError("SPI command cmd%d response indicated parameter error (R1:%02x)" %(cmd_num,response.integer))
            if response[r1_offset + 4]:
                for _ in range(0,4): yield FallingEdge(self.clock)
                raise SDIOResponseError("SPI command cmd%d response indicated function number error (R1:%02x)" %(cmd_num,response.integer))
            if response[r1_offset + 3]:
                for _ in range(0,4): yield FallingEdge(self.clock)
                raise SDIOResponseError("SPI command cmd%d response indicated CRC error (R1:%02x)" %(cmd_num,response.integer))
            if response[r1_offset + 2]:
                for _ in range(0,4): yield FallingEdge(self.clock)
                raise SDIOResponseError("SPI command cmd%d response indicated illegal instruction error (R1:%02x)" %(cmd_num,response.integer))
            raise ReturnValue(response)

        else:
            # Do response checks
            # Check the CRC7
            crc7 = sdio_utils.crc7_gen(number=response[47:8].integer)
            if crc7 != response[7:1].integer:
                for _ in range(0,4): yield FallingEdge(self.clock)
                raise SDIOProtocolError("Response CRC7 error: in response to cmd %d, expected CRC of %x got %x, response was 'b%s" %(cmd_num, crc7,
                                                                                                                                   response[7:1].integer,
                                                                                                                                   response.binstr))
            response_cmd_num = response[45:40].integer
            if response_type in [4]:
                if response_cmd_num != 0x3f:
                    raise SDIOProtocolError("R4 reserved field [45:40] were not all set to 1, instead they were %x" %response_cmd_num)
            elif response_cmd_num != cmd_num: # Other commands need to have their command number reflected here
                for _ in range(0,4): yield FallingEdge(self.clock)
                raise SDIOProtocolError("Response cmd num error: in response to cmd %d, cmd field had %d" %(cmd_num,response_cmd_num))
            if response_type in [1,1.5]:
                # Could be a busy if 1.5, else it'll be type, let's look at the card status/
                # We only care about a few bits, as specified in the SDIO spec 4.10.8 (page 23)
                card_status = BinaryValue(value=response[39:8].integer,bits=32,bigEndian=False)
                self.log.debug("R1 card status register: %x ('b%s)" %(card_status.integer, card_status.binstr))
                if card_status[31]:
                    for _ in range(0,4): yield FallingEdge(self.clock)
                    raise SDIOResponseError("Card status register bit 31, OUT_OF_RANGE, set in response to cmd%d" %(cmd_num))
                if card_status[23]:
                    for _ in range(0,4): yield FallingEdge(self.clock)
                    raise SDIOResponseError("Card status register bit 23, COM_CRC_ERROR, set in response to cmd%d" %(cmd_num))
                if card_status[22]:
                    for _ in range(0,4): yield FallingEdge(self.clock)
                    raise SDIOResponseError("Card status register bit 22, ILLEGAL_COMMAND (not legal for current state), set in response to cmd%d" %(cmd_num))
                if card_status[19]:
                    for _ in range(0,4): yield FallingEdge(self.clock)
                    raise SDIOResponseError("Card status register bit 19, ERROR (general or unknown error), set in response to cmd%d" %(cmd_num))
                if card_status[12:9].integer != 0xf:
                    for _ in range(0,4): yield FallingEdge(self.clock)
                    raise SDIOResponseError("Card status register CURRENT_STATE != 0xf, which it should be for an SDIO card")
        raise ReturnValue(response)

    @cocotb.coroutine
    def cmd_go_idle(self):
        """
        Send a CMD0, or GO_IDLE_STATE.
        According to section 4.4 of the SDIO spec:
          "Note that in SD mode, CMD0 is only used to indicated entry into SPI mode and shall be supported.
           An I/O only card or the I/O portion of a combo card is not reset by CMD0"

        """
        cmd = sdio_utils.init_cmd(cmd_num=0)
        yield self.phy.acquire_cmd_lock()
        yield self.phy.send_cmd(cmd)
        if self.spi_mode:
            response = yield self.get_cmd_response(cmd)
            self.phy.release_cmd_lock()
            self.log.debug("Response: 'b%s" %response.binstr)
        else:
            self.phy.release_cmd_lock()

    @cocotb.coroutine
    def cmd_send_relative_addr(self):
        """
        Send a CMD3, or SEND_RELATIVE_ADDR.
        Asks the card to publish a new relative address (RCA). Contents are empty.

        """
        cmd = sdio_utils.init_cmd(cmd_num=3)
        yield self.phy.acquire_cmd_lock()
        yield self.phy.send_cmd(cmd)
        response = yield self.get_cmd_response(cmd)
        self.phy.release_cmd_lock()
        self.log.debug("Response: 'b%s" %response.binstr)
        raise ReturnValue(response)

    @cocotb.coroutine
    def cmd_send_if_cond(self):
        """
        Send a CMD8
        """
        cmd = sdio_utils.init_cmd(cmd_num=8)
        # Bits 19:16 are the VHS (table 4-8 SD Spec 6.00)
        cmd[16] = 1 # 19:16 == 3'b001 means 2.7-3.6V voltage, which is right for 2.0
        cmd[15:8] = random.getrandbits(8) # This is an 8-bit pattern which is echoed back, set it to something random
        yield self.phy.acquire_cmd_lock()
        yield self.phy.send_cmd(cmd)
        response = yield self.get_cmd_response(cmd)
        self.phy.release_cmd_lock()
        self.log.debug("Response: 'b%s" %response.binstr)
        raise ReturnValue(response)

    @cocotb.coroutine
    def cmd_send_op_cond(self):
        """
        Send a CMD5
        Similar to the operation of ACMD41 for SD memory cards, it is used to inquire about the
        voltage range needed byt he I/O card.
        """
        cmd = sdio_utils.init_cmd(cmd_num=5)
        # Hardcode to support only around 3v3 (see CMD5 in SDIO spec)
        cmd[8+18] = 1;
        cmd[8+19] = 1;
        cmd[8+20] = 1;
        cmd[8+21] = 1;
        cmd[8+22] = 1;
        yield self.phy.acquire_cmd_lock()
        yield self.phy.send_cmd(cmd)
        response = yield self.get_cmd_response(cmd)
        self.phy.release_cmd_lock()
        self.log.debug("Response: 'b%s" %response.binstr)
        raise ReturnValue(response)


    @cocotb.coroutine
    def cmd_select_card(self, rca=0):
        """
        Send a CMD7, or SELECT_CARD.
        Toggle a card between stand-by and transfer states. Card is selected by its own relative address, and is de-selected by
        any other address.

        """
        cmd = sdio_utils.init_cmd(cmd_num=7)
        cmd[39:24] = rca
        yield self.phy.acquire_cmd_lock()
        yield self.phy.send_cmd(cmd)
        self.log.debug("CMD select card: 'b%s" %cmd.binstr)
        response = yield self.get_cmd_response(cmd)
        self.phy.release_cmd_lock()
        self.log.debug("Response: 'b%s" %response.binstr)
        raise ReturnValue(response)

    @cocotb.coroutine
    def cmd_io_rw_direct(self, rw=0, fn=0, raw=0, addr=0, data=None, timeout_possible=False):
        """
        Send a CMD52, IO_RW_DIRECT command
        It's the simplest form of register access within 128k of space (17-bits address).
        Details in section 5.1 of SDIO spec (page 25)

        args:
        rw - 0: read, 1: write
        raw - read after write
        address - not wider than 17-bits
        data - not wider than 8-bits

        """

        cmd = sdio_utils.init_cmd(cmd_num=52)
        cmd[39] = rw
        cmd[38:36] = fn
        cmd[35] = raw
        cmd[33:17] = addr
        if rw:
            assert(data != None)
            cmd[15:8] = data
        yield self.phy.acquire_cmd_lock()
        yield self.phy.send_cmd(cmd)
        response = yield self.get_cmd_response(cmd,timeout_possible=timeout_possible)
        self.phy.release_cmd_lock()

        if response is "timeout" and timeout_possible:
            raise ReturnValue(response)

        if self.spi_mode:
            response_data = response[7:0].integer
        else:            
            # Inspect the response flags here
            response_flags = BinaryValue(value=response[23:16].integer,bits=8,bigEndian=False)

            if response_flags[7]:
                for _ in range(0,4): yield FallingEdge(self.clock)
                raise SDIOResponseError("IO_RW_DIRECT response flags: COM_CRC_ERROR")
            if response_flags[6]:
                for _ in range(0,4): yield FallingEdge(self.clock)
                raise SDIOResponseError("IO_RW_DIRECT response flags: ILLEGAL_COMMAND")
            if response_flags[3]:
                for _ in range(0,4): yield FallingEdge(self.clock)
                raise SDIOResponseError("IO_RW_DIRECT response flags: ERROR")
            if response_flags[1]:
                for _ in range(0,4): yield FallingEdge(self.clock)
                raise SDIOResponseError("IO_RW_DIRECT response flags: FUNCTION_NUMBER (invalid function number %d)" % fn)
            if response_flags[0]:
                for _ in range(0,4): yield FallingEdge(self.clock)
                raise SDIOResponseError("IO_RW_DIRECT response flags: OUT_OF_RANGE")

            # TODO - this always responds 0 why?!
            self.log.debug("IO_RW_DIRECT response flags, IO_CURRENT_STATE: %d" %(response_flags[5:4].integer))

            response_data = response[15:8].integer

        raise ReturnValue(response_data)

    @cocotb.coroutine
    def cmd_io_rw_extended(self, rw=0, fn=0, block=0, op=0, addr=0, count=0, data=None, blocksize=None, read_wait=None, could_abort=True):
        """
        Send a CMD53, IO_RW_EXTENDED command
        This is a data command and allows reading/writing to multiple address spaces with a single command.
        Details in section 5.3 of SDIO spec (page 28)

        args:
        rw - 0: read, 1: write
        fn - the function to access
        block - whether we're doing a transfer in blocks or bytes
                Block may or may not be supported, SMB bit in CCCR indicates so
        op - 0: multi byte R/W to a fixed address, 1: to an incrementing address
        address - not wider than 17-bits
        count - depending on the mode it has different meanings
                block == 1 ? 0: infinite (keep going until we sent an abort) 1: 1 block, 2: 2 blocks, etc.
                block == 0 ? 0: 512 bytes , 1: 1 byte, 2: 2 bytes etc.
        data - a list of data values (each not wider than 8-bits)

        """
        cmd        = sdio_utils.init_cmd(cmd_num=53)
        cmd[39]    = rw
        cmd[38:36] = fn
        cmd[35]    = block
        cmd[34]    = op
        cmd[33:17] = addr
        cmd[16:8]  = count

        yield self.phy.acquire_cmd_lock()
        yield self.phy.send_cmd(cmd)
        response = yield self.get_cmd_response(cmd)
        self.phy.release_cmd_lock()

        if self.spi_mode:
            pass
        else:
            # Inspect the response flags here
            response_flags = BinaryValue(value=response[23:16].integer,bits=8,bigEndian=False)

            if response_flags[7]:
                for _ in range(0,4): yield FallingEdge(self.clock)
                raise SDIOResponseError("IO_RW_DIRECT response flags: COM_CRC_ERROR")
            if response_flags[6]:
                for _ in range(0,4): yield FallingEdge(self.clock)
                raise SDIOResponseError("IO_RW_DIRECT response flags: ILLEGAL_COMMAND")
            if response_flags[3]:
                for _ in range(0,4): yield FallingEdge(self.clock)
                raise SDIOResponseError("IO_RW_DIRECT response flags: ERROR")
            if response_flags[1]:
                for _ in range(0,4): yield FallingEdge(self.clock)
                raise SDIOResponseError("IO_RW_DIRECT response flags: FUNCTION_NUMBER (invalid function number %d)" % fn)
            if response_flags[0]:
                for _ in range(0,4): yield FallingEdge(self.clock)
                raise SDIOResponseError("IO_RW_DIRECT response flags: OUT_OF_RANGE")

            # TODO - this always responds 0 why?!
            self.log.debug("IO_RW_EXTENDED response flags, IO_CURRENT_STATE: %d" %(response_flags[5:4].integer))

        if block:
            blocks = count
        else:
            blocks = 1

        if rw:
            assert(data != None)
            for blockcount in range(0,blocks):
                # Let's give a random number of bytes of clocks
                random_pad_bytes = random.randint(1,4)
                for _ in range(0,random_pad_bytes*8):
                    yield RisingEdge(self.clock)

                if block:
                    self.log.debug("Writing block %d" %blockcount)
                    # Check if the block write has been aborted
                    if self.phy.data_write_aborted:
                        self.phy.data_write_aborted = False
                        self.log.info("Detected block write aborted after %d blocks" %(blockcount))
                        raise ReturnValue(0)
                bytes_to_write = data[blockcount] if block else data
                # Send data bytes on the data lines
                crc_resp = yield self.phy.data_bus_write(bytes_to_write,could_abort=could_abort,final_block=blockcount+1==blocks)
                # TODO check the CRC response and do something sensible with it
                
        else:
            blockdata = []
            for blockcount in range(0,blocks):
                if block:
                    assert(blocksize != None), "Called with block=1 but blocksize was not passed, please call with blocksize set"
                bytes_to_read = blocksize if block else count
                if block:
                    self.log.debug("Reading block %d" %blockcount)
                data,status = yield self.phy.data_bus_read(bytes_to_read,could_abort=could_abort,final_block=blockcount+1==blocks)
                if status == "aborted" or self.phy.data_read_aborted:
                    self.phy.data_read_aborted = False
                    # We were an aborted read, return what we have
                    raise ReturnValue(data)
                if block:
                    blockdata.append(data)
                    # Wait a little bit before polling for the next data block
                    if self.spi_mode:
                        for _ in range(0,8):
                            yield RisingEdge(self.clock)
                    else:
                        # Wait a few cycles before polling the bus for the next block of data
                        yield FallingEdge(self.clock)
                        yield FallingEdge(self.clock)
                # If we've been passed a read-wait period, do that
                if read_wait:
                    self.phy.read_wait(True)
                    for x in range(read_wait):
                        yield FallingEdge(self.clock)
                    self.phy.read_wait(False)

            if block:
                raise ReturnValue(blockdata)
            else:
                raise ReturnValue(data)

        raise ReturnValue(0)

    @cocotb.coroutine
    def sdio_init(self, reset=False, dump_regs=False, rca_changes=0):
        """
        Run the full SDIO initialization sequence.

        """
        self.log.info("Beginning SDIO device initialization")
        if self.spi_mode:
            pass # Add pre-init test commands here
        else:
            # Send a CMD52 to write to the RES bit of the CCCR, bit 3 of address 6
            reg = yield self.read_reg(fn=0,addr=sdio_utils.get_addr_by_name('I/O abort'),timeout_possible=True)
            if reg != "timeout":
                reg = BinaryValue(value=reg,bits=8,bigEndian=False)
                # Set the reset bit (RES)
                reg[3] = 1
                yield self.write_reg(fn=0,addr=sdio_utils.get_addr_by_name('I/O abort'),data=reg.integer)

                for _ in range(random.randint(8,16)): yield RisingEdge(self.clock)

        
        # Now send a cmd0
        yield self.cmd_go_idle()
        for _ in range(random.randint(8,16)): yield RisingEdge(self.clock)

        yield self.cmd_go_idle()
        for _ in range(random.randint(8,16)): yield RisingEdge(self.clock)

        # CMD8
        yield self.cmd_send_if_cond()
        for _ in range(random.randint(8,16)): yield RisingEdge(self.clock)

        # CMD5
        yield self.cmd_send_op_cond()
        for _ in range(random.randint(8,16)): yield RisingEdge(self.clock)

        if self.spi_mode:
            pass
        else:
            # Now CMD3
            response = yield self.cmd_send_relative_addr()
            self.log.info("SDIO device RCA response: %x ('b%s)" %(response[39:24].integer, response[39:24].binstr))
            self.rca = response[39:24].integer
            # Device should now be in standby state

            # Actually do that again a number of times to check that the RCA changes and we can still init with it
            for loop in range(rca_changes):
                # Reissue CMD3
                response = yield self.cmd_send_relative_addr()
                self.log.info("SDIO device RCA response: %x ('b%s)" %(response[39:24].integer, response[39:24].binstr))
                self.rca = response[39:24].integer

            # Now issue a CMD7 selecting the card
            response = yield self.cmd_select_card(self.rca)
            self.log.debug("SDIO response to select: %x ('b%s)" %(response[39:8].integer, response[39:8].binstr))

        # Read register 8
        reg = yield self.cmd_io_rw_direct(rw=0, fn=0, raw=0, addr=sdio_utils.get_addr_by_name('Card capability'))
        reg = BinaryValue(value=reg,bits=8,bigEndian=False)

        self.sdc = reg[0].integer # Support direct command (CMD52)
        self.smb = reg[1].integer # Support multiple block transfer
        self.srw = reg[2].integer # Support read wait
        self.sbs = reg[3].integer # Support bus control (suspend/resume)
        self.s4mi = reg[4].integer # Support block gap interrupt (card generates interrupts between gaps of 4-bit data)
        self.lsc =  reg[6].integer # Low-speed card, else it's fullspeed
        self.b4ls = reg[7].integer # 4-bit low speed card support

        self.log.info("Card capability register:")
        self.log.info("  SDC - support direct command: %s" %(1 if self.sdc else 0))
        self.log.info("  SMB - support multiple block transfer : %s" %(1 if self.smb else 0))
        self.log.info("  SRW - support read wait: %s" %(1 if self.srw else 0))
        self.log.info("  SBS - support bus control (suspend/resume): %s" %(1 if self.sbs else 0))
        self.log.info("  S4MI - support 4-bit block gap interrupt: %s" %(1 if self.s4mi else 0))
        self.log.info("  LSC - is a low-speed card, if not it's high-speed: %s" %(1 if self.lsc else 0))
        self.log.info("  4BLS - support 4-bit low-speed: %s" %(1 if self.b4ls else 0))

        # Store the function card information structure (CIS) address in variables
        self.fn_cis_addrs[0] = yield self.read_reg(fn=0, addr=sdio_utils.get_addr_by_name('Common CIS pointer byte 0'))
        self.fn_cis_addrs[0] |= (yield self.read_reg(fn=0, addr=sdio_utils.get_addr_by_name('Common CIS pointer byte 1'))) << 8
        self.fn_cis_addrs[0] |= (yield self.read_reg(fn=0, addr=sdio_utils.get_addr_by_name('Common CIS pointer byte 2'))) << 16
        self.log.debug("Function 0 CIS address: 0x%x" %self.fn_cis_addrs[0])

        # Do a data ready to get the CIS, much faster, assume 256 is enough
        self.cis_data = yield self.cmd_io_rw_extended(rw=0, fn=0, block=0, op=0, addr=self.fn_cis_addrs[0], count=256)
        self.log.debug("CIS data: %s" %self.cis_data)

        # Check how many functions we have
        for fn in range(1,8):
            # Read the FBR0 and if it's 0 there's no SDIO function there
            reg = yield self.read_reg(fn=0, addr=sdio_utils._cia_base_addresses['FBR%d'%fn])
            if reg & 0xf:
                self.log.debug("Function %d detected as present" %fn)
                self.fn_count += 1
                self.fn_cis_addrs[fn] = yield self.read_reg(fn=0, addr=sdio_utils._cia_base_addresses['FBR%d'%fn] + sdio_utils.get_addr_by_name('Function CIS pointer byte 0'))
                self.fn_cis_addrs[fn] |= (yield self.read_reg(fn=0, addr=sdio_utils._cia_base_addresses['FBR%d'%fn] + sdio_utils.get_addr_by_name('Function CIS pointer byte 1'))) << 8
                self.fn_cis_addrs[fn] |= (yield self.read_reg(fn=0, addr=sdio_utils._cia_base_addresses['FBR%d'%fn] + sdio_utils.get_addr_by_name('Function CIS pointer byte 2'))) << 16
                self.log.debug("Function %d CIS address: 0x%x" %(fn,self.fn_cis_addrs[fn]))

        if dump_regs:
            yield self.dump_cccrs()
            for fn in range(0,self.fn_count):
                self.dump_cis(self.fn_cis_addrs[fn])
                if fn > 0:
                    yield self.dump_fbrs(fn)

        # Get the function maximum block sizes
        # FN0 - get the upper block size for function 0 from its CIS table
        fn0_cis_tuples = self.parse_cis_tuple_table(self.fn_cis_addrs[0])
        found_size = False
        for tuple in fn0_cis_tuples:
            # See page 64 (section 16.7.3) in the SDIO spec for what I'm doing here
            if tuple[0] == 0x22:
                self.fn_max_blocksizes[0] = (tuple[4] << 8) | tuple[3]
                found_size = True
                break
        if not found_size:
            raise SDIODataError("Unable to determine Function 0's block size from its CIS table")
        else:
            self.log.info("SDIO function 0 max block size is %d bytes" %self.fn_max_blocksizes[0])

        # FN1-7 slightly different as their CIS pointers is in the respective FBRs
        for fn in range(1,self.fn_count):
            fn_cis_tuples = self.parse_cis_tuple_table(self.fn_cis_addrs[fn])
            found_size = False
            for tuple in fn_cis_tuples:
                # See page 65 (section 16.7.4) in the SDIO spec for what I'm doing here
                if tuple[0] == 0x22:
                    self.fn_max_blocksizes[fn] = (tuple[15] << 8) | tuple[14]
                    found_size = True
                    break
            if not found_size:
                raise SDIODataError("Unable to determine Function %d's block size from its CIS table" %fn)
            else:
                self.log.info("SDIO function %d max block size is %d bytes" %(fn,self.fn_max_blocksizes[fn]))

        self.log.info("SDIO Initialized")


    @cocotb.coroutine
    def dump_cccrs(self):
        """
        Print out all of the CCCRs (card common control registers)

        See section 6.9 (page 33) of the SDIO spec.
        """
        self.log.info("CCCRs:")
        for reg in sdio_utils._cccrs:
            val = yield self.read_reg(fn=0, addr=reg['addr'])
            if 'bin' in reg and reg['bin']:
                # Print out in binary format
                self.log.info("0x%02x %-30s: %s" %(reg['addr'], reg['name'], BinaryValue(value=val,bits=8,bigEndian=False).binstr))
            else:
                self.log.info("0x%02x %-30s: %02x" %(reg['addr'], reg['name'], val))

    @cocotb.coroutine
    def dump_fbrs(self, func=0):
        """
        Print out the FBR (function basic registers) for a particular function

        See section 6.9 (page 33) of the SDIO spec.
        """
        self.log.info("FBR for function %d:" % func)
        for reg in sdio_utils._fbrs:
            addr = (func << 8) + reg['addr']
            val = yield self.read_reg(fn=0, addr=addr)
            if 'bin' in reg and reg['bin']:
                # Print out in binary format
                self.log.info("0x%02x %-35s: %s" %(reg['addr'], reg['name'], BinaryValue(value=val,bits=8,bigEndian=False).binstr))
            else:
                self.log.info("0x%02x %-35s: %02x" %(reg['addr'], reg['name'], val))


    def dump_cis(self,addr=0):
        """
        Get the CIS tuples from the address and print them out
        """
        tuples = self.parse_cis_tuple_table(addr)
        if tuples:
            self.log.info("CIS tuples from address %x" %addr)
            for tuple in tuples:
                self.log.info("Tuple type: %02x, tuples: %s" %(tuple[0], ["%02x" %t for t in tuple[2:]])) # Skip the tuple size byte

    def parse_cis_tuple_table(self,addr=0):
        """
        Parse and return all of the tuples as a list of lists
        """
        self.log.debug("Parsing CIS section starting at 0x%x" % addr)
        link = -1
        byte_of_tuple = 0
        return_tuples = []
        current_tuple = []
        cis_offset = addr & 0xff
        for byte in self.cis_data[cis_offset:]:
            if link == 0 and byte_of_tuple > 0:
                # We're back to the beginning of a tuple
                byte_of_tuple = 0
                link -= 1  # Set this back to -1
                return_tuples.append(current_tuple)
                current_tuple = []

            #byte = yield self.read_reg(fn=0, addr=addr)
            current_tuple.append(byte)
            if byte_of_tuple == 0 and byte == 0xff:
                # Finish up
                break

            #self.log.info("Byte @ %04x: %02x (byte_of_tuple=)%d" %(addr,byte,byte_of_tuple))
            if byte_of_tuple == 1:
                if byte == 0:
                    raise SDIODataError("Reading CIS and got 0 for a tuple link value")
                # This is the link value
                link = byte + 1 # + 1 because we subtract 1 immediately at the end

            byte_of_tuple += 1
            addr += 1
            if link:
                link -= 1

            if byte_of_tuple > 100:
                raise SDIODataError("Tuple in CIS too long")
        #raise ReturnValue(return_tuples)
        return return_tuples

    @cocotb.coroutine
    def read_reg(self,fn,addr,timeout_possible=False):
        """
        Read an 8-bit register with the CMD52 guy

        See table 6-1 of the SDIO spec (page 33) for details on some of the CIA registers
        """
        response = yield self.cmd_io_rw_direct(rw=0, fn=fn, addr=addr, timeout_possible=timeout_possible)
        raise ReturnValue(response)

    @cocotb.coroutine
    def write_reg(self,fn,addr,data,timeout_possible=False):
        """
        Write an 8-bit register with the CMD52 guy

        See table 6-1 of the SDIO spec (page 33) for details on some of the CIA registers
        """
        response = yield self.cmd_io_rw_direct(rw=1, fn=fn, addr=addr, data=data, timeout_possible=timeout_possible)
        raise ReturnValue(response)

    @cocotb.coroutine
    def set_bus_width(self,width=4):
        """
        Set bus width of the SDIO device to 4
        """
        if width==4:
            value = BinaryValue(value=0x2,bits=2,bigEndian=False)
        elif width==1:
            value = BinaryValue(value=0x0,bits=2,bigEndian=False)
        else:
            self.log.warning("Bus width %d not supported, ignoring" %width)
            return

        reg_addr = sdio_utils.get_addr_by_name('Bus interface control')
        reg = yield self.read_reg(fn=0,addr=reg_addr)
        reg = BinaryValue(value=reg,bits=8,bigEndian=False)
        # Write the bus width fields
        reg[0] = value[0].integer
        reg[1] = value[1].integer
        yield self.write_reg(fn=0,addr=reg_addr,data=reg.integer)
        # Confirm it
        reg = yield self.read_reg(fn=0,addr=reg_addr)
        assert ((reg & 0x3) == value.integer)

        reg = yield self.read_reg(fn=0,addr=reg_addr)
        self.phy.bus_width = width # Set it in the phy model so it knows what to drive to the device
        self.log.debug("Bus interface control reg: %s" % BinaryValue(value=reg,bits=8,bigEndian=False))

    @cocotb.coroutine
    def enable_fn(self,fn=1):
        """
        Enable a function
        """
        reg_addr = sdio_utils.get_addr_by_name('I/O enables')
        reg = yield self.read_reg(fn=0,addr=reg_addr)
        reg |= 1 << fn
        yield self.write_reg(fn=0,addr=reg_addr,data=reg)

    @cocotb.coroutine
    def send_abort(self,fn=1):
        """
        Send abort to a function by writing tot he ASx bits (2:0) in CCCR reg 6
        """
        reg_addr = sdio_utils.get_addr_by_name('I/O abort')
        reg = yield self.read_reg(fn=0,addr=reg_addr)
        reg &= ~0x7
        reg |= (fn & 0x7)
        yield self.write_reg(fn=0,addr=reg_addr,data=reg)
        self.log.info("Abort CMD52 sent to fn%d at %sns" %(fn,cocotb.utils.get_sim_time(units='ns')))

    @cocotb.coroutine
    def set_block_size(self,fn,blocksize):
        """
        Set the transfer blocksize for a particular function in the CIA registers (CCCRs for FN0, FBRs for FN1-8)

        Only supports functions 0 and 1 for now

        The block size is set by writing the block size to the I/O block size register
        in the FBR (Table 6-3 and 6-4 in the SDIO spec). The block size for function 0 is set
        by writing FN0 Block Size Register in the CCCRs. The block size used and maximum byte count
        per command when block transfers are not being used (block=0) is specified in the CIS (card
        information structure) in the the tuple TPLFE_MAX_BLK_SIZE (section 16.7.4) on a per-function
        basis.

        FBR = function basic registers

        I/O block size is at offset 0xN10-0xN11 (two bytes) within each function's FBRs, which are
        at offset 0xN00 where N = function number, so for function 1, regs 0x110 and 0x111

        """
        if fn==0:
            assert blocksize <= self.fn0_max_blocksize, "Trying to set a block size (%d) for function 0 greater than it supports (%d)" %(blocksize,self.fn0_max_blocksize)
            fn0_blocksize_addr0 = sdio_utils._cia_base_addresses['CCCR'] + sdio_utils.get_addr_by_name('FN0 block size byte 0')
            yield self.write_reg(fn=0,addr=fn0_blocksize_addr0,data=blocksize & 0xff)
            yield self.write_reg(fn=0,addr=fn0_blocksize_addr0 + 1,data=((blocksize) >> 8) & 0xff)
        elif fn < self.fn_count:
            assert blocksize <= self.fn_max_blocksizes[fn], "Trying to set a block size (%d) for function %d greater than it supports (%d)" %(blocksize,fn,self.fn_max_blocksizes[fn])
            fn_blocksize_addr = sdio_utils._cia_base_addresses['FBR%d'%fn] + sdio_utils.get_addr_by_name('Function I/O block size byte 0')
            yield self.write_reg(fn=0,addr=fn_blocksize_addr,data=blocksize & 0xff)
            yield self.write_reg(fn=0,addr=fn_blocksize_addr + 1,data=((blocksize) >> 8) & 0xff)
        else:
            raise Exception("Cannot set block size on function %d as it doesn't exist!" %fn)
            raise ReturnValue(0)
        self.log.info("Block size of function %d set to %d" %(fn, blocksize))

    @cocotb.coroutine
    def soft_reset(self):
        """
        Send abort to a function by writing tot he ASx bits (2:0) in CCCR reg 6
        """
        reg_addr = sdio_utils.get_addr_by_name('I/O abort')
        reg = yield self.write_reg(fn=0,addr=reg_addr,data=1<<3)
        self.log.info("Soft reset CMD52 sent to device at %sns" %(cocotb.utils.get_sim_time(units='ns')))
        # We need to re-init now and so we need to reset some state
        self.init_state()
Esempio n. 9
0
class RegressionManager(object):
    """Encapsulates all regression capability into a single place"""
    def __init__(self, root_name, modules, tests=None, seed=None, hooks=[]):
        """
        Args:
            modules (list): A list of Python module names to run

        Kwargs
        """
        self._queue = []
        self._root_name = root_name
        self._dut = None
        self._modules = modules
        self._functions = tests
        self._running_test = None
        self._cov = None
        self.log = SimLog("cocotb.regression")
        self._seed = seed
        self._hooks = hooks

    def initialise(self):

        self.start_time = time.time()
        self.test_results = []
        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0
        self.xunit = XUnitReporter()

        suite_name = os.getenv('RESULT_TESTSUITE') if os.getenv(
            'RESULT_TESTSUITE') else "all"
        package_name = os.getenv('RESULT_TESTPACKAGE') if os.getenv(
            'RESULT_TESTPACKAGE') else "all"

        self.xunit.add_testsuite(name=suite_name,
                                 tests=repr(self.ntests),
                                 package=package_name)

        if (self._seed is not None):
            self.xunit.add_property(name="random_seed",
                                    value=("%d" % self._seed))

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            self._cov = coverage.coverage(branch=True, omit=["*cocotb*"])
            self._cov.start()

        handle = simulator.get_root_handle(self._root_name)

        self._dut = cocotb.handle.SimHandle(handle) if handle else None

        if self._dut is None:
            raise AttributeError("Can not find Root Handle (%s)" %
                                 self._root_name)

        # Auto discovery
        for module_name in self._modules:
            try:
                self.log.debug("Python Path: " + ",".join(sys.path))
                self.log.debug("PWD: " + os.getcwd())
                module = _my_import(module_name)
            except Exception as E:
                self.log.critical("Failed to import module %s: %s",
                                  module_name, E)
                self.log.info("MODULE variable was \"%s\"",
                              ".".join(self._modules))
                self.log.info("Traceback: ")
                self.log.info(traceback.format_exc())
                raise

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    if not hasattr(module, test):
                        raise AttributeError("Test %s doesn't exist in %s" %
                                             (test, module_name))

                    self._queue.append(getattr(module, test)(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except TestError:
                        skip = True
                        self.log.warning("Failed to initialize test %s" %
                                         thing.name)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name,
                                                classname=module_name,
                                                time="0.0",
                                                sim_time_ns="0.0",
                                                ratio_time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1
                        self._store_test_result(module_name, thing.name, None,
                                                0.0, 0.0, 0.0)
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(key=lambda test: test.sort_name())

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                          (valid_tests.module, valid_tests.funcname))

        for module_name in self._hooks:
            self.log.info("Loading hook from module '" + module_name + "'")
            module = _my_import(module_name)

            for thing in vars(module).values():
                if hasattr(thing, "im_hook"):
                    try:
                        test = thing(self._dut)
                    except TestError:
                        self.log.warning("Failed to initialize hook %s" %
                                         thing.name)
                    else:
                        cocotb.scheduler.add(test)

    def tear_down(self):
        """It's the end of the world as we know it"""
        if self.failures:
            self.log.error("Failed %d out of %d tests (%d skipped)" %
                           (self.failures, self.count - 1, self.skipped))
        else:
            self.log.info("Passed %d tests (%d skipped)" %
                          (self.count - 1, self.skipped))
        if self._cov:
            self._cov.stop()
            self.log.info("Writing coverage data")
            self._cov.save()
            self._cov.html_report()
        if len(self.test_results) > 0:
            self._log_test_summary()
        self._log_sim_summary()
        self.log.info("Shutting down...")
        self.xunit.write()
        simulator.stop_simulator()

    def next_test(self):
        """Get the next test to run"""
        if not self._queue:
            return None
        return self._queue.pop(0)

    def _add_failure(self, result):
        self.xunit.add_failure(
            stdout=repr(str(result)),
            stderr="\n".join(self._running_test.error_messages),
            message="Test failed with random_seed={}".format(self._seed))
        self.failures += 1

    def handle_result(self, result):
        """Handle a test result

        Dumps result to XML and schedules the next test (if any)

        Args: result (TestComplete exception)
        """
        real_time = time.time() - self._running_test.start_time
        sim_time_ns = get_sim_time('ns') - self._running_test.start_sim_time
        ratio_time = sim_time_ns / real_time
        self.xunit.add_testcase(name=self._running_test.funcname,
                                classname=self._running_test.module,
                                time=repr(real_time),
                                sim_time_ns=repr(sim_time_ns),
                                ratio_time=repr(ratio_time))

        running_test_funcname = self._running_test.funcname

        # Helper for logging result
        def _result_was():
            result_was = ("%s (result was %s)" %
                          (running_test_funcname, result.__class__.__name__))
            return result_was

        result_pass = True

        if (isinstance(result, TestSuccess)
                and not self._running_test.expect_fail
                and not self._running_test.expect_error):
            self.log.info("Test Passed: %s" % running_test_funcname)

        elif (isinstance(result, TestFailure)
              and self._running_test.expect_fail):
            self.log.info("Test failed as expected: " + _result_was())

        elif (isinstance(result, TestSuccess)
              and self._running_test.expect_error):
            self.log.error("Test passed but we expected an error: " +
                           _result_was())
            self._add_failure(result)
            result_pass = False

        elif isinstance(result, TestSuccess):
            self.log.error("Test passed but we expected a failure: " +
                           _result_was())
            self._add_failure(result)
            result_pass = False

        elif isinstance(result, TestError) and self._running_test.expect_error:
            self.log.info("Test errored as expected: " + _result_was())

        elif isinstance(result, SimFailure):
            if self._running_test.expect_error:
                self.log.info("Test errored as expected: " + _result_was())
            else:
                self.log.error("Test error has lead to simulator shutting us "
                               "down")
                self._add_failure(result)
                self._store_test_result(self._running_test.module,
                                        self._running_test.funcname, False,
                                        sim_time_ns, real_time, ratio_time)
                self.tear_down()
                return

        else:
            self.log.error("Test Failed: " + _result_was())
            self._add_failure(result)
            result_pass = False

        self._store_test_result(self._running_test.module,
                                self._running_test.funcname, result_pass,
                                sim_time_ns, real_time, ratio_time)

        self.execute()

    def execute(self):
        self._running_test = cocotb.regression.next_test()
        if self._running_test:
            start = ''
            end = ''
            if self.log.colour:
                start = ANSI.COLOR_TEST
                end = ANSI.COLOR_DEFAULT
            # Want this to stand out a little bit
            self.log.info("%sRunning test %d/%d:%s %s" %
                          (start, self.count, self.ntests, end,
                           self._running_test.funcname))
            if self.count is 1:
                test = cocotb.scheduler.add(self._running_test)
            else:
                test = cocotb.scheduler.new_test(self._running_test)
            self.count += 1
        else:
            self.tear_down()

    def _log_test_summary(self):
        TEST_FIELD = 'TEST'
        RESULT_FIELD = 'PASS/FAIL'
        SIM_FIELD = 'SIM TIME(NS)'
        REAL_FIELD = 'REAL TIME(S)'
        RATIO_FIELD = 'RATIO(NS/S)'

        TEST_FIELD_LEN = max(
            len(TEST_FIELD),
            len(max([x['test'] for x in self.test_results], key=len)))
        RESULT_FIELD_LEN = len(RESULT_FIELD)
        SIM_FIELD_LEN = len(SIM_FIELD)
        REAL_FIELD_LEN = len(REAL_FIELD)
        RATIO_FIELD_LEN = len(RATIO_FIELD)

        LINE_LEN = 3 + TEST_FIELD_LEN + 2 + RESULT_FIELD_LEN + 2 + SIM_FIELD_LEN + 2 + REAL_FIELD_LEN + 2 + RATIO_FIELD_LEN + 3

        LINE_SEP = "*" * LINE_LEN + "\n"

        summary = ""
        summary += LINE_SEP
        summary += "** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}}  {d:>{d_len}}  {e:>{e_len}} **\n".format(
            a=TEST_FIELD,
            a_len=TEST_FIELD_LEN,
            b=RESULT_FIELD,
            b_len=RESULT_FIELD_LEN,
            c=SIM_FIELD,
            c_len=SIM_FIELD_LEN,
            d=REAL_FIELD,
            d_len=REAL_FIELD_LEN,
            e=RATIO_FIELD,
            e_len=RATIO_FIELD_LEN)
        summary += LINE_SEP
        for result in self.test_results:
            hilite = ''

            if result['pass'] is None:
                pass_fail_str = "N/A"
            elif result['pass']:
                pass_fail_str = "PASS"
            else:
                pass_fail_str = "FAIL"
                if self.log.colour:
                    hilite = ANSI.COLOR_HILITE_SUMMARY

            summary += "{start}** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}.2f}   {d:>{d_len}.2f}   {e:>{e_len}.2f}  **\n".format(
                a=result['test'],
                a_len=TEST_FIELD_LEN,
                b=pass_fail_str,
                b_len=RESULT_FIELD_LEN,
                c=result['sim'],
                c_len=SIM_FIELD_LEN - 1,
                d=result['real'],
                d_len=REAL_FIELD_LEN - 1,
                e=result['ratio'],
                e_len=RATIO_FIELD_LEN - 1,
                start=hilite)
        summary += LINE_SEP

        self.log.info(summary)

    def _log_sim_summary(self):
        real_time = time.time() - self.start_time
        sim_time_ns = get_sim_time('ns')
        ratio_time = sim_time_ns / real_time

        summary = ""

        summary += "*************************************************************************************\n"
        summary += "**                                 ERRORS : {0:<39}**\n".format(
            self.failures)
        summary += "*************************************************************************************\n"
        summary += "**                               SIM TIME : {0:<39}**\n".format(
            '{0:.2f} NS'.format(sim_time_ns))
        summary += "**                              REAL TIME : {0:<39}**\n".format(
            '{0:.2f} S'.format(real_time))
        summary += "**                        SIM / REAL TIME : {0:<39}**\n".format(
            '{0:.2f} NS/S'.format(ratio_time))
        summary += "*************************************************************************************\n"

        self.log.info(summary)

    def _store_test_result(self, module_name, test_name, result_pass, sim_time,
                           real_time, ratio):
        result = {
            'test': '.'.join([module_name, test_name]),
            'pass': result_pass,
            'sim': sim_time,
            'real': real_time,
            'ratio': ratio
        }
        self.test_results.append(result)
Esempio n. 10
0
class RegressionManager(object):
    """Encapsulates all regression capability into a single place"""

    def __init__(self, root_name, modules, tests=None):
        """
        Args:
            modules (list): A list of python module names to run

        Kwargs
        """
        self._queue = []
        self._root_name = root_name
        self._dut = None
        self._modules = modules
        self._functions = tests
        self._running_test = None
        self._cov = None
        self.log = SimLog("cocotb.regression")

    def initialise(self):

        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0
        self.xunit = XUnitReporter()
        self.xunit.add_testsuite(name="all", tests=repr(self.ntests),
                                 package="all")

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            self._cov = coverage.coverage(branch=True, omit=["*cocotb*"])
            self._cov.start()

        handle = simulator.get_root_handle(self._root_name)

        self._dut = cocotb.handle.SimHandle(handle) if handle else None

        if self._dut is None:
            raise AttributeError("Can not find Root Handle (%s)" %
                                 self._root_name)

        # Auto discovery
        for module_name in self._modules:
            try:
                module = _my_import(module_name)
            except ImportError:
                self.log.critical("Failed to import module %s", module_name)
                self.log.info("MODULE variable was \"%s\"",
                                                    ",".join(self._modules))
                raise

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    if not hasattr(module, test):
                        raise AttributeError("Test %s doesn't exist in %s" %
                                             (test, module_name))

                    self._queue.append(getattr(module, test)(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except TestError:
                        skip = True
                        self.log.warning("Failed to initialise test %s" %
                                         thing.name)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name,
                                                classname=module_name,
                                                time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(key=lambda test: "%s.%s" %
                         (test.module, test.funcname))

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                          (valid_tests.module,
                           valid_tests.funcname))

    def tear_down(self):
        """It's the end of the world as we know it"""
        if self.failures:
            self.log.error("Failed %d out of %d tests (%d skipped)" %
                           (self.failures, self.count - 1, self.skipped))
        else:
            self.log.info("Passed %d tests (%d skipped)" %
                          (self.count - 1, self.skipped))
        if self._cov:
            self._cov.stop()
            self.log.info("Writing coverage data")
            self._cov.save()
            self._cov.html_report()
        self.log.info("Shutting down...")
        self.xunit.write()
        simulator.stop_simulator()

    def next_test(self):
        """Get the next test to run"""
        if not self._queue:
            return None
        return self._queue.pop(0)

    def handle_result(self, result):
        """Handle a test result

        Dumps result to XML and schedules the next test (if any)

        Args: result (TestComplete exception)
        """
        self.xunit.add_testcase(name=self._running_test.funcname,
                                classname=self._running_test.module,
                                time=repr(time.time() -
                                          self._running_test.start_time))

        running_test_funcname = self._running_test.funcname

        # Helper for logging result
        def _result_was():
            result_was = ("%s (result was %s)" %
                          (running_test_funcname, result.__class__.__name__))
            return result_was

        if (isinstance(result, TestSuccess) and
                not self._running_test.expect_fail and
                not self._running_test.expect_error):
            self.log.info("Test Passed: %s" % running_test_funcname)

        elif (isinstance(result, TestFailure) and
                self._running_test.expect_fail):
            self.log.info("Test failed as expected: " + _result_was())

        elif (isinstance(result, TestSuccess) and
              self._running_test.expect_error):
            self.log.error("Test passed but we expected an error: " +
                           _result_was())
            self.xunit.add_failure(stdout=repr(str(result)),
                                   stderr="\n".join(
                                   self._running_test.error_messages))
            self.failures += 1

        elif isinstance(result, TestSuccess):
            self.log.error("Test passed but we expected a failure: " +
                           _result_was())
            self.xunit.add_failure(stdout=repr(str(result)),
                                   stderr="\n".join(
                                   self._running_test.error_messages))
            self.failures += 1

        elif isinstance(result, TestError) and self._running_test.expect_error:
            self.log.info("Test errored as expected: " + _result_was())

        elif isinstance(result, SimFailure):
            if self._running_test.expect_error:
                self.log.info("Test errored as expected: " + _result_was())
            else:
                self.log.error("Test error has lead to simulator shuttting us "
                               "down")
                self.failures += 1
                self.tear_down()
                return

        else:
            self.log.error("Test Failed: " + _result_was())
            self.xunit.add_failure(stdout=repr(str(result)),
                                   stderr="\n".join(
                                   self._running_test.error_messages))
            self.failures += 1

        self.execute()

    def execute(self):
        self._running_test = cocotb.regression.next_test()
        if self._running_test:
            # Want this to stand out a little bit
            self.log.info("%sRunning test %d/%d:%s %s" %
                          (ANSI.BLUE_BG + ANSI.BLACK_FG,
                           self.count, self.ntests,
                           ANSI.DEFAULT_FG + ANSI.DEFAULT_BG,
                           self._running_test.funcname))
            if self.count is 1:
                test = cocotb.scheduler.add(self._running_test)
            else:
                test = cocotb.scheduler.new_test(self._running_test)
            self.count += 1
        else:
            self.tear_down()
Esempio n. 11
0
class Scheduler(object):
    """The main scheduler.

    Here we accept callbacks from the simulator and schedule the appropriate
    coroutines.

    A callback fires, causing the :any:`react` method to be called, with the
    trigger that caused the callback as the first argument.

    We look up a list of coroutines to schedule (indexed by the trigger) and
    schedule them in turn. NB implementors should not depend on the scheduling
    order!

    Some additional management is required since coroutines can return a list
    of triggers, to be scheduled when any one of the triggers fires.  To
    ensure we don't receive spurious callbacks, we have to un-prime all the
    other triggers when any one fires.

    Due to the simulator nuances and fun with delta delays we have the
    following modes:

    Normal mode
        - Callbacks cause coroutines to be scheduled
        - Any pending writes are cached and do not happen immediately

    ReadOnly mode
        - Corresponds to cbReadOnlySynch (VPI) or vhpiCbLastKnownDeltaCycle
          (VHPI).  In this state we are not allowed to perform writes.

    Write mode
        - Corresponds to cbReadWriteSynch (VPI) or vhpiCbEndOfProcesses (VHPI)
          In this mode we play back all the cached write updates.

    We can legally transition from normal->write by registering a ReadWrite
    callback, however usually once a simulator has entered the ReadOnly phase
    of a given timestep then we must move to a new timestep before performing
    any writes.  The mechanism for moving to a new timestep may not be
    consistent across simulators and therefore we provide an abstraction to
    assist with compatibility.


    Unless a coroutine has explicitly requested to be scheduled in ReadOnly
    mode (for example wanting to sample the finally settled value after all
    delta delays) then it can reasonably be expected to be scheduled during
    "normal mode" i.e. where writes are permitted.
    """

    _MODE_NORMAL   = 1  # noqa
    _MODE_READONLY = 2  # noqa
    _MODE_WRITE    = 3  # noqa
    _MODE_TERM     = 4  # noqa

    # Singleton events, recycled to avoid spurious object creation
    _readonly = ReadOnly()
    # TODO[gh-759]: For some reason, the scheduler requires that these triggers
    # are _not_ the same instances used by the tests themselves. This is risky,
    # because it can lead to them overwriting each other's callbacks. We should
    # try to remove this `copy.copy` in future.
    _next_timestep = copy.copy(NextTimeStep())
    _readwrite = copy.copy(ReadWrite())
    _timer1 = Timer(1)
    _timer0 = Timer(0)

    def __init__(self):

        self.log = SimLog("cocotb.scheduler")
        if _debug:
            self.log.setLevel(logging.DEBUG)

        # A dictionary of pending coroutines for each trigger,
        # indexed by trigger
        self._trigger2coros = collections.defaultdict(list)

        # A dictionary mapping coroutines to the trigger they are waiting for
        self._coro2trigger = {}

        # Our main state
        self._mode = Scheduler._MODE_NORMAL

        # A dictionary of pending writes
        self._writes = {}

        self._pending_coros = []
        self._pending_callbacks = []
        self._pending_triggers = []
        self._pending_threads = []
        self._pending_events = []   # Events we need to call set on once we've unwound

        self._terminate = False
        self._test_result = None
        self._entrypoint = None
        self._main_thread = threading.current_thread()

        # Select the appropriate scheduling algorithm for this simulator
        self.advance = self.default_scheduling_algorithm
        self._is_reacting = False

    def default_scheduling_algorithm(self):
        """
        Decide whether we need to schedule our own triggers (if at all) in
        order to progress to the next mode.

        This algorithm has been tested against the following simulators:
            Icarus Verilog
        """
        if not self._terminate and self._writes:

            if self._mode == Scheduler._MODE_NORMAL:
                if not self._readwrite.primed:
                    self._readwrite.prime(self.react)
            elif not self._next_timestep.primed:
                self._next_timestep.prime(self.react)

        elif self._terminate:
            if _debug:
                self.log.debug("Test terminating, scheduling Timer")

            for t in self._trigger2coros:
                t.unprime()

            for t in [self._readwrite, self._readonly, self._next_timestep,
                      self._timer1, self._timer0]:
                if t.primed:
                    t.unprime()

            self._timer1.prime(self.begin_test)
            self._trigger2coros = collections.defaultdict(list)
            self._coro2trigger = {}
            self._terminate = False
            self._mode = Scheduler._MODE_TERM

    def begin_test(self, trigger=None):
        """Called to initiate a test.

        Could be called on start-up or from a callback.
        """
        if _debug:
            self.log.debug("begin_test called with trigger: %s" %
                           (str(trigger)))
        if _profiling:
            ps = pstats.Stats(_profile).sort_stats('cumulative')
            ps.dump_stats("test_profile.pstat")
            ctx = profiling_context()
        else:
            ctx = nullcontext()

        with ctx:
            self._mode = Scheduler._MODE_NORMAL
            if trigger is not None:
                trigger.unprime()

            # Issue previous test result, if there is one
            if self._test_result is not None:
                if _debug:
                    self.log.debug("Issue test result to regression object")
                cocotb.regression_manager.handle_result(self._test_result)
                self._test_result = None
            if self._entrypoint is not None:
                test = self._entrypoint
                self._entrypoint = None
                self.schedule(test)
                self.advance()

    def react(self, trigger):
        """
        Called when a trigger fires.

        We ensure that we only start the event loop once, rather than
        letting it recurse.
        """
        if self._is_reacting:
            # queue up the trigger, the event loop will get to it
            self._pending_triggers.append(trigger)
            return

        # start the event loop
        self._is_reacting = True
        try:
            self._event_loop(trigger)
        finally:
            self._is_reacting = False


    def _event_loop(self, trigger):
        """
        Run an event loop triggered by the given trigger.

        The loop will keep running until no further triggers fire.

        This should be triggered by only:
        * The beginning of a test, when there is no trigger to react to
        * A GPI trigger
        """
        if _profiling:
            ctx = profiling_context()
        else:
            ctx = nullcontext()

        with ctx:
            # When a trigger fires it is unprimed internally
            if _debug:
                self.log.debug("Trigger fired: %s" % str(trigger))
            # trigger.unprime()

            if self._mode == Scheduler._MODE_TERM:
                if _debug:
                    self.log.debug("Ignoring trigger %s since we're terminating" %
                                   str(trigger))
                return

            if trigger is self._readonly:
                self._mode = Scheduler._MODE_READONLY
            # Only GPI triggers affect the simulator scheduling mode
            elif isinstance(trigger, GPITrigger):
                self._mode = Scheduler._MODE_NORMAL

            # We're the only source of ReadWrite triggers which are only used for
            # playing back any cached signal updates
            if trigger is self._readwrite:

                if _debug:
                    self.log.debug("Writing cached signal updates")

                while self._writes:
                    handle, value = self._writes.popitem()
                    handle.setimmediatevalue(value)

                self._readwrite.unprime()

                return

            # Similarly if we've scheduled our next_timestep on way to readwrite
            if trigger is self._next_timestep:

                if not self._writes:
                    self.log.error(
                        "Moved to next timestep without any pending writes!")
                else:
                    self.log.debug(
                        "Priming ReadWrite trigger so we can playback writes")
                    self._readwrite.prime(self.react)

                return

            # work through triggers one by one
            is_first = True
            self._pending_triggers.append(trigger)
            while self._pending_triggers:
                trigger = self._pending_triggers.pop(0)

                if not is_first and isinstance(trigger, GPITrigger):
                    self.log.warning(
                        "A GPI trigger occurred after entering react - this "
                        "should not happen."
                    )
                    assert False

                # this only exists to enable the warning above
                is_first = False

                if trigger not in self._trigger2coros:

                    # GPI triggers should only be ever pending if there is an
                    # associated coroutine waiting on that trigger, otherwise it would
                    # have been unprimed already
                    if isinstance(trigger, GPITrigger):
                        self.log.critical(
                            "No coroutines waiting on trigger that fired: %s" %
                            str(trigger))

                        trigger.log.info("I'm the culprit")
                    # For Python triggers this isn't actually an error - we might do
                    # event.set() without knowing whether any coroutines are actually
                    # waiting on this event, for example
                    elif _debug:
                        self.log.debug(
                            "No coroutines waiting on trigger that fired: %s" %
                            str(trigger))

                    continue

                # Scheduled coroutines may append to our waiting list so the first
                # thing to do is pop all entries waiting on this trigger.
                scheduling = self._trigger2coros.pop(trigger)

                if _debug:
                    debugstr = "\n\t".join([coro.__name__ for coro in scheduling])
                    if len(scheduling):
                        debugstr = "\n\t" + debugstr
                    self.log.debug("%d pending coroutines for event %s%s" %
                                   (len(scheduling), str(trigger), debugstr))

                # This trigger isn't needed any more
                trigger.unprime()

                for coro in scheduling:
                    if _debug:
                        self.log.debug("Scheduling coroutine %s" % (coro.__name__))
                    self.schedule(coro, trigger=trigger)
                    if _debug:
                        self.log.debug("Scheduled coroutine %s" % (coro.__name__))

                # Schedule may have queued up some events so we'll burn through those
                while self._pending_events:
                    if _debug:
                        self.log.debug("Scheduling pending event %s" %
                                       (str(self._pending_events[0])))
                    self._pending_events.pop(0).set()

            # no more pending triggers
            self.advance()
            if _debug:
                self.log.debug("All coroutines scheduled, handing control back"
                               " to simulator")


    def unschedule(self, coro):
        """Unschedule a coroutine.  Unprime any pending triggers"""

        # Unprime the trigger this coroutine is waiting on
        try:
            trigger = self._coro2trigger.pop(coro)
        except KeyError:
            # coroutine probably finished
            pass
        else:
            if coro in self._trigger2coros[trigger]:
                self._trigger2coros[trigger].remove(coro)
            if not self._trigger2coros[trigger]:
                trigger.unprime()
                del self._trigger2coros[trigger]

        if Join(coro) in self._trigger2coros:
            self._pending_triggers.append(Join(coro))
        else:
            try:
                # throws an error if the background coroutine errored
                # and no one was monitoring it
                coro.retval
            except Exception as e:
                self._test_result = TestError(
                    "Forked coroutine {} raised exception {}"
                    .format(coro, e)
                )
                self._terminate = True

    def save_write(self, handle, value):
        if self._mode == Scheduler._MODE_READONLY:
            raise Exception("Write to object {0} was scheduled during a read-only sync phase.".format(handle._name))
        self._writes[handle] = value

    def _coroutine_yielded(self, coro, trigger):
        """Prime the trigger and update our internal mappings."""
        self._coro2trigger[coro] = trigger

        self._trigger2coros[trigger].append(coro)
        if not trigger.primed:
            try:
                trigger.prime(self.react)
            except Exception as e:
                # Convert any exceptions into a test result
                self.finish_test(
                    create_error(self, "Unable to prime trigger %s: %s" %
                                 (str(trigger), str(e))))

    def queue(self, coroutine):
        """Queue a coroutine for execution"""
        self._pending_coros.append(coroutine)

    def queue_function(self, coroutine):
        """Queue a coroutine for execution and move the containing thread
        so that it does not block execution of the main thread any longer.
        """

        # We should be able to find ourselves inside the _pending_threads list

        for t in self._pending_threads:
            if t.thread == threading.current_thread():
                t.thread_suspend()
                self._pending_coros.append(coroutine)
                return t


    def run_in_executor(self, func, *args, **kwargs):
        """Run the coroutine in a separate execution thread
        and return a yieldable object for the caller.
        """
        # Create a thread
        # Create a trigger that is called as a result of the thread finishing
        # Create an Event object that the caller can yield on
        # Event object set when the thread finishes execution, this blocks the
        #   calling coroutine (but not the thread) until the external completes

        def execute_external(func, _waiter):
            _waiter._outcome = outcomes.capture(func, *args, **kwargs)
            if _debug:
                self.log.debug("Execution of external routine done %s" % threading.current_thread())
            _waiter.thread_done()

        waiter = external_waiter()
        thread = threading.Thread(group=None, target=execute_external,
                                  name=func.__name__ + "_thread",
                                  args=([func, waiter]), kwargs={})

        waiter.thread = thread;
        self._pending_threads.append(waiter)

        return waiter

    def add(self, coroutine):
        """Add a new coroutine.

        Just a wrapper around self.schedule which provides some debug and
        useful error messages in the event of common gotchas.
        """
        if isinstance(coroutine, cocotb.decorators.coroutine):
            self.log.critical(
                "Attempt to schedule a coroutine that hasn't started")
            coroutine.log.error("This is the failing coroutine")
            self.log.warning(
                "Did you forget to add parentheses to the @test decorator?")
            self._test_result = TestError(
                "Attempt to schedule a coroutine that hasn't started")
            self._terminate = True
            return

        elif not isinstance(coroutine, cocotb.decorators.RunningCoroutine):
            self.log.critical(
                "Attempt to add something to the scheduler which isn't a "
                "coroutine")
            self.log.warning(
                "Got: %s (%s)" % (str(type(coroutine)), repr(coroutine)))
            self.log.warning("Did you use the @coroutine decorator?")
            self._test_result = TestError(
                "Attempt to schedule a coroutine that hasn't started")
            self._terminate = True
            return

        if _debug:
            self.log.debug("Adding new coroutine %s" % coroutine.__name__)

        self.schedule(coroutine)
        self.advance()
        return coroutine

    def new_test(self, coroutine):
        self._entrypoint = coroutine

    def schedule(self, coroutine, trigger=None):
        """Schedule a coroutine by calling the send method.

        Args:
            coroutine (cocotb.decorators.coroutine): The coroutine to schedule.
            trigger (cocotb.triggers.Trigger): The trigger that caused this
                coroutine to be scheduled.
        """
        if trigger is None:
            send_outcome = outcomes.Value(None)
        else:
            send_outcome = trigger._outcome
        if _debug:
            self.log.debug("Scheduling with {}".format(send_outcome))

        try:
            result = coroutine._advance(send_outcome)
            if _debug:
                self.log.debug("Coroutine %s yielded %s (mode %d)" %
                               (coroutine.__name__, str(result), self._mode))

        # TestComplete indication is game over, tidy up
        except TestComplete as test_result:
            # Tag that close down is needed, save the test_result
            # for later use in cleanup handler
            self.log.debug("TestComplete received: %s" % test_result.__class__.__name__)
            self.finish_test(test_result)
            return

        # Normal coroutine completion
        except cocotb.decorators.CoroutineComplete as exc:
            if _debug:
                self.log.debug("Coroutine completed: %s" % str(coroutine))
            self.unschedule(coroutine)
            return

        # Don't handle the result if we're shutting down
        if self._terminate:
            return

        # convert lists into `First` Waitables.
        if isinstance(result, list):
            result = cocotb.triggers.First(*result)

        # convert waitables into coroutines
        if isinstance(result, cocotb.triggers.Waitable):
            result = result._wait()

        # convert coroutinues into triggers
        if isinstance(result, cocotb.decorators.RunningCoroutine):
            if not result.has_started():
                self.queue(result)
                if _debug:
                    self.log.debug("Scheduling nested coroutine: %s" %
                                   result.__name__)
            else:
                if _debug:
                    self.log.debug("Joining to already running coroutine: %s" %
                                   result.__name__)

            result = result.join()

        if isinstance(result, Trigger):
            if _debug:
                self.log.debug("%s: is instance of Trigger" % result)
            self._coroutine_yielded(coroutine, result)

        else:
            msg = ("Coroutine %s yielded something the scheduler can't handle"
                   % str(coroutine))
            msg += ("\nGot type: %s repr: %s str: %s" %
                    (type(result), repr(result), str(result)))
            msg += "\nDid you forget to decorate with @cocotb.coroutine?"
            try:
                raise_error(self, msg)
            except Exception as e:
                self.finish_test(e)

        # We do not return from here until pending threads have completed, but only
        # from the main thread, this seems like it could be problematic in cases
        # where a sim might change what this thread is.
        def unblock_event(ext):
            @cocotb.coroutine
            def wrapper():
                ext.event.set()
                yield PythonTrigger()

        if self._main_thread is threading.current_thread():

            for ext in self._pending_threads:
                ext.thread_start()
                if _debug:
                    self.log.debug("Blocking from %s on %s" % (threading.current_thread(), ext.thread))
                state = ext.thread_wait()
                if _debug:
                    self.log.debug("Back from wait on self %s with newstate %d" % (threading.current_thread(), state))
                if state == external_state.EXITED:
                    self._pending_threads.remove(ext)
                    self._pending_events.append(ext.event)

        # Handle any newly queued coroutines that need to be scheduled
        while self._pending_coros:
            self.add(self._pending_coros.pop(0))

        while self._pending_callbacks:
            self._pending_callbacks.pop(0)()


    def finish_test(self, test_result):
        """Cache the test result and set the terminate flag."""
        self.log.debug("finish_test called with %s" % (repr(test_result)))
        if not self._terminate:
            self._terminate = True
            self._test_result = test_result
            self.cleanup()

    def finish_scheduler(self, test_result):
        """Directly call into the regression manager and end test
           once we return the sim will close us so no cleanup is needed.
        """
        self.log.debug("Issue sim closedown result to regression object")
        cocotb.regression_manager.handle_result(test_result)

    def cleanup(self):
        """Clear up all our state.

        Unprime all pending triggers and kill off any coroutines stop all externals.
        """
        for trigger, waiting in dict(self._trigger2coros).items():
            for coro in waiting:
                if _debug:
                    self.log.debug("Killing %s" % str(coro))
                coro.kill()

        if self._main_thread is not threading.current_thread():
            raise Exception("Cleanup() called outside of the main thread")

        for ext in self._pending_threads:
            self.log.warn("Waiting for %s to exit", ext.thread)
Esempio n. 12
0
class Scheduler(object):

    def __init__(self):
        self.waiting = collections.defaultdict(list)
        self.delay_waiting = collections.defaultdict(list)
        self.log = SimLog("cocotb.scheduler")
        self.writes = {}
        self.writes_lock = threading.RLock()
        self._remove = []
        self._pending_adds = []
        self._startpoint = None
        self._terminate = False
        self._test_result = None
        self._do_cleanup = None
        self._entry_lock = RLock()
        self._external_trigger = threading.Semaphore(1)
        self._external_trigger._value = False
        self._external = False
        self._readonly = False
        self._react_timer = None
        # Keep this last
        self._readwrite = self.add(self.move_to_rw())

    def react(self, trigger):
        """
        React called when a trigger fires.  We find any coroutines that are waiting on the particular
            trigger and schedule them.
        """
        trigger.log.debug("Fired!")

        if isinstance(trigger, ReadOnly):
            self.enable_react_delay()
            self._readonly = True

        if trigger not in self.waiting:
            # This isn't actually an error - often might do event.set() without knowing
            # whether any coroutines are actually waiting on this event
            # NB should catch a GPI trigger cause that would be catestrophic
            self.log.debug("Not waiting on triggger that fired! (%s)" % (str(trigger)))
            return

        # Scheduled coroutines may append to our waiting list
        # so the first thing to do is pop all entries waiting
        # on this trigger.
        self._scheduling = self.waiting.pop(trigger)
        to_run = len(self._scheduling)

        self.log.debug("%d pending coroutines for event %s" % (to_run, trigger))

        while self._scheduling:
            coroutine = self._scheduling.pop(0)
            del_list = trigger.clearpeers()
            while del_list:
                self.remove(del_list.pop(0))
            self.schedule(coroutine, trigger=trigger)
            self.log.debug("Scheduled coroutine %s" % (coroutine.__name__))

        # Various interactions with the simulator cannot occur during
        # ReadOnly event periods
        if self._readonly is False:

            # We may also have possible routines that need to be added since
            # the exit from ReadOnly
            for ptrigger, pwaiting in self.delay_waiting.items():
                for pcoro in pwaiting:
                    self.delay_waiting[ptrigger].remove(pcoro)
                    self._add_trigger(ptrigger, pcoro)
                del self.delay_waiting[ptrigger]

            # If the python has caused any subsequent events to fire we might
            # need to schedule more coroutines before we drop back into the
            # simulator
            self._entry_lock.acquire()
            while self._pending_adds:
                coroutine = self._pending_adds.pop(0)
                self._entry_lock.release()
                self.add(coroutine)
                self._entry_lock.acquire()
            self._entry_lock.release()

            # If we've performed any writes that are cached then schedule
            # another callback for the read-write part of the sim cycle, but
            # if we are terminating then do not allow another callback to be
            # scheduled, only do this if this trigger was not ReadOnly as
            # Scheduling ReadWrite is a violation, it will be picked up
            # on next react
            if self._terminate is False and len(self.writes) and self._readwrite is None:
                self._readwrite = self.add(self.move_to_rw())

        return

    def set_external(self):
        """ Take the semaphore to indicate to the react later that there an external
        is being added to the list
        """
#        self._external_trigger.acquire()
        self._external_trigger._value = True

    def playout_writes(self):
        if self.writes:
            while self.writes:
                handle, args = self.writes.popitem()
                handle.setimmediatevalue(args)


    def save_write(self, handle, args):
        self.writes[handle]=args

    def _add_trigger(self, trigger, coroutine):
        """Adds a new trigger which will cause the coroutine to continue when fired"""
        try:
            # If we are in readonly for the currently firing trigger then new coroutines
            # are not added to the waiting list and primed, they are instead
            # added to a secondary list of events that will then be handled on the next
            # entry to react when we exit ReadOnly into NextTimeStep
            if self._readonly is True:
                self._entry_lock.acquire()
                self.delay_waiting[trigger].append(coroutine)
                self._entry_lock.release()
            else:
                self._entry_lock.acquire()
                self.waiting[trigger].append(coroutine)
                self._entry_lock.release()
                # We drop the lock before calling out to the simulator (most likely consequence of prime)
                trigger.prime(self.react)

        except TestError as e:
            self.waiting[trigger].remove(coroutine)
            # Do not re-call raise_error since the error will already be logged at point of interest
            raise e

        except Exception as e:
            self.waiting[trigger].remove(coroutine)
            raise_error(self, "Unable to prime a trigger: %s" % str(e))

    def queue(self, coroutine):
        """Queue a coroutine for execution"""
        self._entry_lock.acquire()
        self._pending_adds.append(coroutine)
        self._entry_lock.release()

    def add(self, coroutine):
        """Add a new coroutine. Required because we cant send to a just started generator (FIXME)"""

        if isinstance(coroutine, cocotb.decorators.coroutine):
            self.log.critical("Attempt to schedule a coroutine that hasn't started")
            coroutine.log.error("This is the failing coroutine")
            self.log.warning("Did you forget to add paranthesis to the @test decorator?")
            self._result = TestError("Attempt to schedule a coroutine that hasn't started")
            self.cleanup()
            return

        elif not isinstance(coroutine, cocotb.decorators.RunningCoroutine):
            self.log.critical("Attempt to add something to the scheduler which isn't a coroutine")
            self.log.warning("Got: %s (%s)" % (str(type(coroutine)), repr(coroutine)))
            self.log.warning("Did you use the @coroutine decorator?")
            self._result = TestError("Attempt to schedule a coroutine that hasn't started")
            self.cleanup()
            return


        self.log.debug("Queuing new coroutine %s" % coroutine.__name__)
        self.log.debug("Adding  %s" % coroutine.__name__)
        self.schedule(coroutine)
        return coroutine

    def new_test(self, coroutine):
        self._startpoint = coroutine

    def remove(self, trigger):
        """Remove a trigger from the list of pending coroutines"""
        self._entry_lock.acquire()
        self.waiting.pop(trigger)
        self._entry_lock.release()
        trigger.unprime()

    def schedule_remove(self, coroutine, callback):
        """Adds the specified coroutine to the list of routines
           That will be removed at the end of the current loop
        """
        self._entry_lock.acquire()
        self._remove.append((coroutine, callback))
        self._entry_lock.release()

    def prune_routines(self):
        """
        Process the remove list that can have accumulatad during the
        execution of a parent routine
        """
        while self._remove:
            self._entry_lock.acquire()
            delroutine, cb = self._remove.pop(0)
            for trigger, waiting in self.waiting.items():
                for coro in waiting:
                    if coro is delroutine:
                        self.log.debug("Closing %s" % str(coro))
                        self._entry_lock.release()
                        cb()
                        self._entry_lock.acquire()
                        self.waiting[trigger].remove(coro)
                        self._entry_lock.release()
                        coro.close()
                        self._entry_lock.acquire()
            # Clean up any triggers that no longer have pending coroutines
            for trigger, waiting in self.waiting.items():
                if not waiting:
                    self._entry_lock.release()
                    trigger.unprime()
                    self._entry_lock.acquire()
                    del self.waiting[trigger]
            self._entry_lock.release()

    def schedule(self, coroutine, trigger=None):
        """
        Schedule a coroutine by calling the send method

        Args:
            coroutine (cocotb.decorators.coroutine): The coroutine to schedule

            trigger (cocotb.triggers.Trigger): The trigger that caused this
                                                coroutine to be scheduled
        """
        if hasattr(trigger, "pass_retval"):
            self.log.debug("Coroutine returned a retval")
            sendval = trigger.retval
        else:
            coroutine.log.debug("Scheduling (%s)" % str(trigger))
            sendval = trigger
        try:

            try:
                result = coroutine.send(sendval)

            # Normal co-routine completion
            except cocotb.decorators.CoroutineComplete as exc:
                self.log.debug("Coroutine completed execution with CoroutineComplete: %s" % str(coroutine))

                # Call any pending callbacks that were waiting for this coroutine to exit
                exc()
                return

            # Entries may have been added to the remove list while the
            # coroutine was running, clear these down and deschedule
            # before resuming
            if self._terminate is False:
                self.prune_routines()

            if isinstance(result, Trigger):
                self._add_trigger(result, coroutine)
            elif isinstance(result, cocotb.decorators.RunningCoroutine):
                if self._terminate is False:
                    self.log.debug("Scheduling nested co-routine: %s" % result.__name__)

                    # Queue current routine to schedule when the nested routine exits
                    self.queue(result)
                    new_trigger = result.join()
                    new_trigger.pass_retval = True
                    self._add_trigger(new_trigger, coroutine)

            elif isinstance(result, list):
                for trigger in result:
                    trigger.addpeers(result)
                    self._add_trigger(trigger, coroutine)
            else:
                msg = "Coroutine %s yielded something that was not a trigger or a coroutine!" % str(coroutine)
                msg += "\nGot type: %s repr: %s str: %s" % (type(result), repr(result), str(result))
                msg += "\nDid you forget to decorate with @cocotb.cocorutine?"
                raise_error(self, msg)

        # TestComplete indication is game over, tidy up
        except TestComplete as test_result:
            # Tag that close down is needed, save the test_result
            # for later use in cleanup handler
            # If we're already tearing down we ignore any further test results
            # that may be raised. Required because currently Python triggers don't unprime
            if not self._terminate:
                self.finish_test(test_result)
                return

        coroutine.log.debug("Finished sheduling coroutine (%s)" % str(trigger))

    def finish_scheduler(self, test_result):
        # if the sim it's self has issued a close down then the
        # normal shutdown will not work
        self.cleanup()
        self.issue_result(test_result)

    def finish_test(self, test_result):
        if not self._terminate:
            self._terminate = True
            self._test_result = test_result
            self.cleanup()
            self._do_cleanup = self.add(self.move_to_cleanup())

    def cleanup(self):
        """ Clear up all our state

            Unprime all pending triggers and kill off any coroutines"""
        for trigger, waiting in self.waiting.items():
            for coro in waiting:
                 self.log.debug("Killing %s" % str(coro))
                 coro.kill()

    def issue_result(self, test_result):
        # Tell the handler what the result was
        self.log.debug("Issue test result to regresssion object")
        cocotb.regression.handle_result(test_result)

    @cocotb.decorators.coroutine
    def move_to_cleanup(self):
        yield Timer(1)
        self.prune_routines()
        self._do_cleanup = None

        self.issue_result(self._test_result)
        self._test_result = None

        # If another test was added to queue kick it off
        self._terminate = False
        if self._startpoint is not None:
            newstart = self._startpoint
            self._startpoint = None
            self.add(newstart)

        self.log.debug("Cleanup done")


    @cocotb.decorators.coroutine
    def move_to_rw(self):
        yield ReadWrite()
        self._readwrite = None
        self.playout_writes()

    @cocotb.decorators.coroutine
    def internal_clock(self, clock):
        while True:
            yield RisingEdge(clock)

    def enable_react_delay(self):
        if self._react_timer is None:
            self._react_timer = self.add(self.react_delay())

    @cocotb.decorators.coroutine
    def react_delay(self):
        yield NextTimeStep()
        self._react_timer = None
        self._readonly = False
Esempio n. 13
0
class Scoreboard:
    """Generic scoreboarding class.

    We can add interfaces by providing a monitor and an expected output queue.

    The expected output can either be a function which provides a transaction
    or a simple list containing the expected output.

    TODO:
        Statistics for end-of-test summary etc.

    Args:
        dut (SimHandle): Handle to the DUT.
        reorder_depth (int, optional): Consider up to `reorder_depth` elements
            of the expected result list as passing matches.
            Default is 0, meaning only the first element in the expected result list
            is considered for a passing match.
        fail_immediately (bool, optional): Raise :any:`TestFailure`
            immediately when something is wrong instead of just
            recording an error. Default is ``True``.

    .. deprecated:: 1.4.1
    """

    def __init__(self, dut, reorder_depth=0, fail_immediately=True):  # FIXME: reorder_depth needed here?
        self.dut = dut
        self.log = SimLog("cocotb.scoreboard.%s" % self.dut._name)
        self.errors = 0
        self.expected = {}
        self._imm = fail_immediately

        warnings.warn(
            "This Scoreboard implementation has been deprecated and will be removed soon.\n"
            "If this implementation works for you, copy the implementation into your project, "
            "while following cocotb's license agreement.",
            DeprecationWarning)

    @property
    def result(self):
        """Determine the test result, do we have any pending data remaining?

        Returns:
            :any:`TestFailure`: If not all expected output was received or
            error were recorded during the test.
        """
        fail = False
        for monitor, expected_output in self.expected.items():
            if callable(expected_output):
                self.log.debug("Can't check all data returned for %s since "
                               "expected output is callable function rather "
                               "than a list" % str(monitor))
                continue
            if len(expected_output):
                self.log.warning("Still expecting %d transactions on %s" %
                                 (len(expected_output), str(monitor)))
                for index, transaction in enumerate(expected_output):
                    self.log.info("Expecting %d:\n%s" %
                                  (index, hexdump(str(transaction))))
                    if index > 5:
                        self.log.info("... and %d more to come" %
                                      (len(expected_output) - index - 1))
                        break
                fail = True
        if fail:
            return TestFailure("Not all expected output was received")
        if self.errors:
            return TestFailure("Errors were recorded during the test")
        return TestSuccess()

    def compare(self, got, exp, log, strict_type=True):
        """Common function for comparing two transactions.

        Can be re-implemented by a sub-class.

        Args:
            got: The received transaction.
            exp: The expected transaction.
            log: The logger for reporting messages.
            strict_type (bool, optional): Require transaction type to match
                exactly if ``True``, otherwise compare its string representation.

        Raises:
            :any:`TestFailure`: If received transaction differed from
                expected transaction when :attr:`fail_immediately` is ``True``.
                If *strict_type* is ``True``,
                also the transaction type must match.
        """

        # Compare the types
        if strict_type and type(got) != type(exp):
            self.errors += 1
            log.error("Received transaction type is different than expected")
            log.info("Received: %s but expected %s" %
                     (str(type(got)), str(type(exp))))
            if self._imm:
                raise TestFailure("Received transaction of wrong type. "
                                  "Set strict_type=False to avoid this.")
            return
        # Or convert to a string before comparison
        elif not strict_type:
            got, exp = str(got), str(exp)

        # Compare directly
        if got != exp:
            self.errors += 1

            # Try our best to print out something useful
            strgot, strexp = str(got), str(exp)

            log.error("Received transaction differed from expected output")
            if not strict_type:
                log.info("Expected:\n" + hexdump(strexp))
            else:
                log.info("Expected:\n" + repr(exp))
            if not isinstance(exp, str):
                try:
                    for word in exp:
                        log.info(str(word))
                except Exception:
                    pass
            if not strict_type:
                log.info("Received:\n" + hexdump(strgot))
            else:
                log.info("Received:\n" + repr(got))
            if not isinstance(got, str):
                try:
                    for word in got:
                        log.info(str(word))
                except Exception:
                    pass
            log.warning("Difference:\n%s" % hexdiffs(strexp, strgot))
            if self._imm:
                raise TestFailure("Received transaction differed from expected "
                                  "transaction")
        else:
            # Don't want to fail the test
            # if we're passed something without __len__
            try:
                log.debug("Received expected transaction %d bytes" %
                          (len(got)))
                log.debug(repr(got))
            except Exception:
                pass

    def add_interface(self, monitor, expected_output, compare_fn=None,
                      reorder_depth=0, strict_type=True):
        """Add an interface to be scoreboarded.

        Provides a function which the monitor will callback with received
        transactions.

        Simply check against the expected output.

        Args:
            monitor: The monitor object.
            expected_output: Queue of expected outputs.
            compare_fn (callable, optional): Function doing the actual comparison.
            reorder_depth (int, optional): Consider up to *reorder_depth* elements
                of the expected result list as passing matches.
                Default is 0, meaning only the first element in the expected result list
                is considered for a passing match.
            strict_type (bool, optional): Require transaction type to match
                exactly if ``True``, otherwise compare its string representation.

        Raises:
            :any:`TypeError`: If no monitor is on the interface or
                *compare_fn* is not a callable function.
        """
        # save a handle to the expected output so we can check if all expected
        # data has been received at the end of a test.
        self.expected[monitor] = expected_output

        # Enforce some type checking as we only work with a real monitor
        if not isinstance(monitor, Monitor):
            raise TypeError("Expected monitor on the interface but got %s" %
                            (type(monitor).__qualname__))

        if compare_fn is not None:
            if callable(compare_fn):
                monitor.add_callback(compare_fn)
                return
            raise TypeError("Expected a callable compare function but got %s" %
                            str(type(compare_fn)))

        self.log.info("Created with reorder_depth %d" % reorder_depth)

        def check_received_transaction(transaction):
            """Called back by the monitor when a new transaction has been
            received."""

            if monitor.name:
                log_name = self.log.name + '.' + monitor.name
            else:
                log_name = self.log.name + '.' + type(monitor).__qualname__

            log = logging.getLogger(log_name)

            if callable(expected_output):
                exp = expected_output(transaction)

            elif len(expected_output):  # we expect something
                for i in range(min((reorder_depth + 1), len(expected_output))):
                    if expected_output[i] == transaction:
                        break  # break out of enclosing for loop
                else:  # run when for loop is exhausted (but no break occurs)
                    i = 0
                exp = expected_output.pop(i)
            else:
                self.errors += 1
                log.error("Received a transaction but wasn't expecting "
                          "anything")
                log.info("Got: %s" % (hexdump(str(transaction))))
                if self._imm:
                    raise TestFailure("Received a transaction but wasn't "
                                      "expecting anything")
                return

            self.compare(transaction, exp, log, strict_type=strict_type)

        monitor.add_callback(check_received_transaction)
Esempio n. 14
0
class Scheduler(object):
    """
    The main scheduler.

    Here we accept callbacks from the simulator and schedule the appropriate
    coroutines.

    A callback fires, causing the `react`_ method to be called, with the
    trigger that caused the callback as the first argument.

    We look up a list of coroutines to schedule (indexed by the trigger) and
    schedule them in turn. NB implementors should not depend on the scheduling
    order!

    Some additional management is required since coroutines can return a list
    of triggers, to be scheduled when any one of the triggers fires.  To
    ensure we don't receive spurious callbacks, we have to un-prime all the
    other triggers when any one fires.

    Due to the simulator nuances and fun with delta delays we have the
    following modes:

    Normal mode
        - Callbacks cause coroutines to be scheduled
        - Any pending writes are cached and do not happen immediately

    ReadOnly mode
        - Corresponds to cbReadOnlySynch (VPI) or vhpiCbLastKnownDeltaCycle
          (VHPI).  In this state we are not allowed to perform writes.

    Write mode
        - Corresponds to cbReadWriteSynch (VPI) or vhpiCbEndOfProcesses (VHPI)
          In this mode we play back all the cached write updates.

    We can legally transition from normal->write by registering a ReadWrite
    callback, however usually once a simulator has entered the ReadOnly phase
    of a given timestep then we must move to a new timestep before performing
    any writes.  The mechanism for moving to a new timestep may not be
    consistent across simulators and therefore we provide an abstraction to
    assist with compatibility.


    Unless a coroutine has explicitly requested to be scheduled in ReadOnly
    mode (for example wanting to sample the finally settled value after all
    delta delays) then it can reasonably be expected to be scheduled during
    "normal mode" i.e. where writes are permitted.
    """

    _MODE_NORMAL   = 1  # noqa
    _MODE_READONLY = 2  # noqa
    _MODE_WRITE    = 3  # noqa
    _MODE_TERM     = 4  # noqa

    # Singleton events, recycled to avoid spurious object creation
    _readonly = ReadOnly()
    _next_timestep = _NextTimeStep()
    _readwrite = _ReadWrite()
    _timer1 = Timer(1)
    _timer0 = Timer(0)

    def __init__(self):

        self.log = SimLog("cocotb.scheduler")
        if _debug:
            self.log.setLevel(logging.DEBUG)

        # A dictionary of pending coroutines for each trigger,
        # indexed by trigger
        self._trigger2coros = collections.defaultdict(list)

        # A dictionary of pending triggers for each coroutine, indexed by coro
        self._coro2triggers = collections.defaultdict(list)

        # Our main state
        self._mode = Scheduler._MODE_NORMAL

        # A dictionary of pending writes
        self._writes = {}

        self._pending_coros = []
        self._pending_callbacks = []
        self._pending_triggers = []

        self._terminate = False
        self._test_result = None
        self._entrypoint = None

        # Select the appropriate scheduling algorithm for this simulator
        self.advance = self.default_scheduling_algorithm

    def default_scheduling_algorithm(self):
        """
        Decide whether we need to schedule our own triggers (if at all) in
        order to progress to the next mode.

        This algorithm has been tested against the following simulators:
            Icarus Verilog
        """
        if not self._terminate and self._writes:

            if self._mode == Scheduler._MODE_NORMAL:
                if not self._readwrite.primed:
                    self._readwrite.prime(self.react)
            elif not self._next_timestep.primed:
                self._next_timestep.prime(self.react)

        elif self._terminate:
            if _debug:
                self.log.debug("Test terminating, scheduling Timer")

            for t in self._trigger2coros:
                t.unprime()

            for t in [self._readwrite, self._readonly, self._next_timestep,
                      self._timer1, self._timer0]:
                if t.primed:
                    t.unprime()

            self._timer1.prime(self.begin_test)
            self._trigger2coros = collections.defaultdict(list)
            self._coro2triggers = collections.defaultdict(list)
            self._terminate = False
            self._mode = Scheduler._MODE_TERM

    def begin_test(self, trigger=None):
        """
        Called to initiate a test.

        Could be called on start-up or from a callback
        """
        if _debug:
            self.log.debug("begin_test called with trigger: %s" %
                           (str(trigger)))
        if _profiling:
            ps = pstats.Stats(_profile).sort_stats('cumulative')
            ps.dump_stats("test_profile.pstat")
            _profile.enable()

        self._mode = Scheduler._MODE_NORMAL
        if trigger is not None:
            trigger.unprime()

        # Issue previous test result, if there is one
        if self._test_result is not None:
            if _debug:
                self.log.debug("Issue test result to regresssion object")
            cocotb.regression.handle_result(self._test_result)
            self._test_result = None
        if self._entrypoint is not None:
            test = self._entrypoint
            self._entrypoint = None
            self.schedule(test)
            self.advance()

        if _profiling:
            _profile.disable()

    def react(self, trigger, depth=0):
        """
        React called when a trigger fires.

        We find any coroutines that are waiting on the particular trigger and
        schedule them.
        """
        if _profiling and not depth:
            _profile.enable()

        # When a trigger fires it is unprimed internally
        if _debug:
            self.log.debug("Trigger fired: %s" % str(trigger))
        # trigger.unprime()

        if self._mode == Scheduler._MODE_TERM:
            if _debug:
                self.log.debug("Ignoring trigger %s since we're terminating" %
                               str(trigger))
            return

        if trigger is self._readonly:
            self._mode = Scheduler._MODE_READONLY
        # Only GPI triggers affect the simulator scheduling mode
        elif isinstance(trigger, GPITrigger):
            self._mode = Scheduler._MODE_NORMAL

        # We're the only source of ReadWrite triggers which are only used for
        # playing back any cached signal updates
        if trigger is self._readwrite:

            if _debug:
                self.log.debug("Writing cached signal updates")

            while self._writes:
                handle, value = self._writes.popitem()
                handle.setimmediatevalue(value)

            self._readwrite.unprime()

            if _profiling:
                _profile.disable()
            return

        # Similarly if we've scheduled our next_timestep on way to readwrite
        if trigger is self._next_timestep:

            if not self._writes:
                self.log.error(
                    "Moved to next timestep without any pending writes!")
            else:
                self.log.debug(
                    "Priming ReadWrite trigger so we can playback writes")
                self._readwrite.prime(self.react)

            if _profiling:
                _profile.disable()
            return

        if trigger not in self._trigger2coros:

            # GPI triggers should only be ever pending if there is an
            # associated coroutine waiting on that trigger, otherwise it would
            # have been unprimed already
            if isinstance(trigger, GPITrigger):
                self.log.critical(
                    "No coroutines waiting on trigger that fired: %s" %
                    str(trigger))

                trigger.log.info("I'm the culprit")
            # For Python triggers this isn't actually an error - we might do
            # event.set() without knowing whether any coroutines are actually
            # waiting on this event, for example
            elif _debug:
                self.log.debug(
                    "No coroutines waiting on trigger that fired: %s" %
                    str(trigger))

            if _profiling:
                _profile.disable()
            return

        # Scheduled coroutines may append to our waiting list so the first
        # thing to do is pop all entries waiting on this trigger.
        scheduling = self._trigger2coros.pop(trigger)

        if _debug:
            debugstr = "\n\t".join([coro.__name__ for coro in scheduling])
            if len(scheduling):
                debugstr = "\n\t" + debugstr
            self.log.debug("%d pending coroutines for event %s%s" %
                           (len(scheduling), str(trigger), debugstr))

        # If the coroutine was waiting on multiple triggers we may be able
        # to unprime the other triggers that didn't fire
        for coro in scheduling:
            for pending in self._coro2triggers[coro]:
                for others in self._trigger2coros[pending]:
                    if others not in scheduling:
                        break
                else:
                    # if pending is not trigger and pending.primed:
                    #     pending.unprime()
                    if pending.primed:
                        pending.unprime()
                    del self._trigger2coros[pending]

        for coro in scheduling:
            self.schedule(coro, trigger=trigger)
            if _debug:
                self.log.debug("Scheduled coroutine %s" % (coro.__name__))

        while self._pending_triggers:
            if _debug:
                self.log.debug("Scheduling pending trigger %s" %
                               (str(self._pending_triggers[0])))
            self.react(self._pending_triggers.pop(0), depth=depth + 1)

        # We only advance for GPI triggers
        if not depth and isinstance(trigger, GPITrigger):
            self.advance()

            if _debug:
                self.log.debug("All coroutines scheduled, handing control back"
                               " to simulator")

            if _profiling:
                _profile.disable()
        return

    def unschedule(self, coro):
        """Unschedule a coroutine.  Unprime any pending triggers"""

        for trigger in self._coro2triggers[coro]:
            if coro in self._trigger2coros[trigger]:
                self._trigger2coros[trigger].remove(coro)
            if not self._trigger2coros[trigger]:
                trigger.unprime()
        del self._coro2triggers[coro]

        if coro._join in self._trigger2coros:
            self._pending_triggers.append(coro._join)

        # Remove references to allow GC to clean up
        del coro._join

    def save_write(self, handle, value):
        self._writes[handle] = value

    def _coroutine_yielded(self, coro, triggers):
        """
        Prime the triggers and update our internal mappings
        """
        self._coro2triggers[coro] = triggers

        for trigger in triggers:

            self._trigger2coros[trigger].append(coro)
            if not trigger.primed:
                try:
                    trigger.prime(self.react)
                except Exception as e:
                    # Convert any exceptions into a test result
                    self.finish_test(
                        create_error(self, "Unable to prime trigger %s: %s" %
                                     (str(trigger), str(e))))

    def queue(self, coroutine):
        """Queue a coroutine for execution"""
        self._pending_coros.append(coroutine)

    def add(self, coroutine):
        """
        Add a new coroutine.

        Just a wrapper around self.schedule which provides some debug and
        useful error mesages in the event of common gotchas
        """
        if isinstance(coroutine, cocotb.decorators.coroutine):
            self.log.critical(
                "Attempt to schedule a coroutine that hasn't started")
            coroutine.log.error("This is the failing coroutine")
            self.log.warning(
                "Did you forget to add parentheses to the @test decorator?")
            self._test_result = TestError(
                "Attempt to schedule a coroutine that hasn't started")
            self._terminate = True
            return

        elif not isinstance(coroutine, cocotb.decorators.RunningCoroutine):
            self.log.critical(
                "Attempt to add something to the scheduler which isn't a "
                "coroutine")
            self.log.warning(
                "Got: %s (%s)" % (str(type(coroutine)), repr(coroutine)))
            self.log.warning("Did you use the @coroutine decorator?")
            self._test_result = TestError(
                "Attempt to schedule a coroutine that hasn't started")
            self._terminate = True
            return

        if _debug:
            self.log.debug("Adding new coroutine %s" % coroutine.__name__)

        self.schedule(coroutine)
        self.advance()
        return coroutine

    def new_test(self, coroutine):
        self._entrypoint = coroutine

    def schedule(self, coroutine, trigger=None):
        """
        Schedule a coroutine by calling the send method

        Args:
            coroutine (cocotb.decorators.coroutine): The coroutine to schedule

            trigger (cocotb.triggers.Trigger): The trigger that caused this
                                                coroutine to be scheduled
        """
        if hasattr(trigger, "pass_retval"):
            sendval = trigger.retval
            if _debug:
                coroutine.log.debug("Scheduling with ReturnValue(%s)" %
                                    (repr(sendval)))
        else:
            sendval = trigger
            if _debug:
                coroutine.log.debug("Scheduling with %s" % str(trigger))

        try:
            result = coroutine.send(sendval)
            if _debug:
                self.log.debug("Coroutine %s yielded %s (mode %d)" %
                               (coroutine.__name__, str(result), self._mode))

        # TestComplete indication is game over, tidy up
        except TestComplete as test_result:
            # Tag that close down is needed, save the test_result
            # for later use in cleanup handler
            self.log.debug("TestComplete received: %s" % test_result.__class__.__name__)
            self.finish_test(test_result)
            return

        # Normal co-routine completion
        except cocotb.decorators.CoroutineComplete as exc:
            if _debug:
                self.log.debug("Coroutine completed: %s" % str(coroutine))
            self.unschedule(coroutine)
            return

        # Don't handle the result if we're shutting down
        if self._terminate:
            return

        # Queue current routine to schedule when the nested routine exits
        if isinstance(result, cocotb.decorators.RunningCoroutine):
            if _debug:
                self.log.debug("Scheduling nested co-routine: %s" %
                               result.__name__)

            self.queue(result)
            new_trigger = result.join()
            self._coroutine_yielded(coroutine, [new_trigger])

        elif isinstance(result, Trigger):
            self._coroutine_yielded(coroutine, [result])

        elif (isinstance(result, list) and
                not [t for t in result if not isinstance(t, Trigger)]):
            self._coroutine_yielded(coroutine, result)

        else:
            msg = ("Coroutine %s yielded something the scheduler can't handle"
                   % str(coroutine))
            msg += ("\nGot type: %s repr: %s str: %s" %
                    (type(result), repr(result), str(result)))
            msg += "\nDid you forget to decorate with @cocotb.coroutine?"
            try:
                raise_error(self, msg)
            except Exception as e:
                self.finish_test(e)

        # Handle any newly queued coroutines that need to be scheduled
        while self._pending_coros:
            self.add(self._pending_coros.pop(0))

        while self._pending_callbacks:
            self._pending_callbacks.pop(0)()

    def finish_test(self, test_result):
        """Cache the test result and set the terminate flag"""
        self.log.debug("finish_test called with %s" % (repr(test_result)))
        if not self._terminate:
            self._terminate = True
            self._test_result = test_result
            self.cleanup()

    def finish_scheduler(self, test_result):
        """Directly call into the regression manager and end test
           once we return the sim will close us so no cleanup is needed"""
        self.log.debug("Issue sim closedown result to regresssion object")
        cocotb.regression.handle_result(test_result)

    def cleanup(self):
        """
        Clear up all our state

        Unprime all pending triggers and kill off any coroutines
        """
        for trigger, waiting in self._trigger2coros.items():
            for coro in waiting:
                if _debug:
                    self.log.debug("Killing %s" % str(coro))
                coro.kill()
Esempio n. 15
0
class RunningCoroutine(object):
    """Per instance wrapper around an function to turn it into a coroutine


        Provides the following:

            coro.join() creates a Trigger that will fire when this coroutine
            completes

            coro.kill() will destroy a coroutine instance (and cause any Join
            triggers to fire
    """
    def __init__(self, inst, parent):
        if hasattr(inst, "__name__"):
            self.__name__ = "%s" % inst.__name__
            self.log = SimLog("cocotb.coroutine.%s" % self.__name__, id(self))
        else:
            self.log = SimLog("cocotb.coroutine.fail")
        self._coro = inst
        self._started = False
        self._finished = False
        self._callbacks = []
        self._join = _Join(self)
        self._parent = parent
        self.__doc__ = parent._func.__doc__
        self.module = parent._func.__module__
        self.funcname = parent._func.__name__
        self.retval = None

        if not hasattr(self._coro, "send"):
            self.log.error("%s isn't a valid coroutine! Did you use the yield "
                           "keyword?" % self.funcname)
            raise CoroutineComplete(callback=self._finished_cb)

    def __iter__(self):
        return self

    def __str__(self):
        return str(self.__name__)

    def send(self, value):
        try:
            if isinstance(value, ExternalException):
                self.log.debug("Injecting ExternalException(%s)" % (repr(value)))
                return self._coro.throw(value.exception)
            self._started = True
            return self._coro.send(value)
        except TestComplete as e:
            if isinstance(e, TestFailure):
                self.log.warning(str(e))
            raise
        except ExternalException as e:
            self.retval = e
            self._finished = True
            raise CoroutineComplete(callback=self._finished_cb)
        except ReturnValue as e:
            self.retval = e.retval
            self._finished = True
            raise CoroutineComplete(callback=self._finished_cb)
        except StopIteration as e:
            self._finished = True
            self.retval = getattr(e, 'value', None)  # for python >=3.3
            raise CoroutineComplete(callback=self._finished_cb)
        except Exception as e:
            self._finished = True
            raise raise_error(self, "Send raised exception: %s" % (str(e)))

    def throw(self, exc):
        return self._coro.throw(exc)

    def close(self):
        return self._coro.close()

    def kill(self):
        """Kill a coroutine"""
        self.log.debug("kill() called on coroutine")
        cocotb.scheduler.unschedule(self)

    def _finished_cb(self):
        """Called when the coroutine completes.
            Allows us to mark the coroutine as finished so that boolean testing
            works.
            Also call any callbacks, usually the result of coroutine.join()"""
        self._finished = True

    def join(self):
        """Return a trigger that will fire when the wrapped coroutine exits"""
        if self._finished:
            return NullTrigger()
        else:
            return self._join

    def has_started(self):
        return self._started

    def __nonzero__(self):
        """Provide boolean testing
            if the coroutine has finished return false
            otherwise return true"""
        return not self._finished
Esempio n. 16
0
class RegressionManager(object):
    """Encapsulates all regression capability into a single place"""
    def __init__(self, root_name, modules, tests=None, seed=None, hooks=[]):
        """
        Args:
            root_name (str): The name of the root handle.
            modules (list): A list of Python module names to run.
            tests (list, optional): A list of tests to run.
                Defaults to ``None``, meaning all discovered tests will be run.
            seed (int,  optional): The seed for the random number generator to use.
                Defaults to ``None``.
            hooks (list, optional): A list of hook modules to import.
                Defaults to the empty list.
        """
        self._queue = []
        self._root_name = root_name
        self._dut = None
        self._modules = modules
        self._functions = tests
        self._running_test = None
        self._cov = None
        self.log = SimLog("cocotb.regression")
        self._seed = seed
        self._hooks = hooks

    def initialise(self):

        self.start_time = time.time()
        self.test_results = []
        self.ntests = 0
        self.count = 1
        self.skipped = 0
        self.failures = 0

        results_filename = os.getenv('COCOTB_RESULTS_FILE', "results.xml")
        suite_name = os.getenv('RESULT_TESTSUITE', "all")
        package_name = os.getenv('RESULT_TESTPACKAGE', "all")

        self.xunit = XUnitReporter(filename=results_filename)

        self.xunit.add_testsuite(name=suite_name,
                                 tests=repr(self.ntests),
                                 package=package_name)

        if (self._seed is not None):
            self.xunit.add_property(name="random_seed",
                                    value=("%d" % self._seed))

        if coverage is not None:
            self.log.info("Enabling coverage collection of Python code")
            self._cov = coverage.coverage(branch=True, omit=["*cocotb*"])
            self._cov.start()

        handle = simulator.get_root_handle(self._root_name)

        self._dut = cocotb.handle.SimHandle(handle) if handle else None

        if self._dut is None:
            raise AttributeError("Can not find Root Handle (%s)" %
                                 self._root_name)

        # Auto discovery
        for module_name in self._modules:
            try:
                self.log.debug("Python Path: " + ",".join(sys.path))
                self.log.debug("PWD: " + os.getcwd())
                module = _my_import(module_name)
            except Exception as E:
                self.log.critical("Failed to import module %s: %s",
                                  module_name, E)
                self.log.info("MODULE variable was \"%s\"",
                              ".".join(self._modules))
                self.log.info("Traceback: ")
                self.log.info(traceback.format_exc())
                raise

            if self._functions:

                # Specific functions specified, don't auto discover
                for test in self._functions.rsplit(','):
                    try:
                        _test = getattr(module, test)
                    except AttributeError:
                        self.log.error(
                            "Requested test %s wasn't found in module %s",
                            test, module_name)
                        err = AttributeError("Test %s doesn't exist in %s" %
                                             (test, module_name))
                        _py_compat.raise_from(err,
                                              None)  # discard nested traceback

                    if not hasattr(_test, "im_test"):
                        self.log.error(
                            "Requested %s from module %s isn't a cocotb.test decorated coroutine",
                            test, module_name)
                        raise ImportError("Failed to find requested test %s" %
                                          test)
                    self._queue.append(_test(self._dut))
                    self.ntests += 1
                break

            for thing in vars(module).values():
                if hasattr(thing, "im_test"):
                    try:
                        test = thing(self._dut)
                        skip = test.skip
                    except Exception:
                        skip = True
                        self.log.warning("Failed to initialize test %s" %
                                         thing.name,
                                         exc_info=True)

                    if skip:
                        self.log.info("Skipping test %s" % thing.name)
                        self.xunit.add_testcase(name=thing.name,
                                                classname=module_name,
                                                time="0.0",
                                                sim_time_ns="0.0",
                                                ratio_time="0.0")
                        self.xunit.add_skipped()
                        self.skipped += 1
                        self._store_test_result(module_name, thing.name, None,
                                                0.0, 0.0, 0.0)
                    else:
                        self._queue.append(test)
                        self.ntests += 1

        self._queue.sort(key=lambda test: test.sort_name())

        for valid_tests in self._queue:
            self.log.info("Found test %s.%s" %
                          (valid_tests.module, valid_tests.funcname))

        for module_name in self._hooks:
            self.log.info("Loading hook from module '" + module_name + "'")
            module = _my_import(module_name)

            for thing in vars(module).values():
                if hasattr(thing, "im_hook"):
                    try:
                        test = thing(self._dut)
                    except Exception:
                        self.log.warning("Failed to initialize hook %s" %
                                         thing.name,
                                         exc_info=True)
                    else:
                        cocotb.scheduler.add(test)

    def tear_down(self):
        """It's the end of the world as we know it"""
        if self.failures:
            self.log.error("Failed %d out of %d tests (%d skipped)" %
                           (self.failures, self.count - 1, self.skipped))
        else:
            self.log.info("Passed %d tests (%d skipped)" %
                          (self.count - 1, self.skipped))
        if self._cov:
            self._cov.stop()
            self.log.info("Writing coverage data")
            self._cov.save()
            self._cov.html_report()
        if len(self.test_results) > 0:
            self._log_test_summary()
        self._log_sim_summary()
        self.log.info("Shutting down...")
        self.xunit.write()
        simulator.stop_simulator()

    def next_test(self):
        """Get the next test to run"""
        if not self._queue:
            return None
        return self._queue.pop(0)

    def _add_failure(self, result):
        self.xunit.add_failure(
            stdout=repr(str(result)),
            stderr="\n".join(self._running_test.error_messages),
            message="Test failed with random_seed={}".format(self._seed))
        self.failures += 1

    def handle_result(self, test):
        """Handle a test completing.

        Dump result to XML and schedule the next test (if any).

        Args:
            test: The test that completed
        """
        assert test is self._running_test

        real_time = time.time() - test.start_time
        sim_time_ns = get_sim_time('ns') - test.start_sim_time
        ratio_time = self._safe_divide(sim_time_ns, real_time)

        self.xunit.add_testcase(name=test.funcname,
                                classname=test.module,
                                time=repr(real_time),
                                sim_time_ns=repr(sim_time_ns),
                                ratio_time=repr(ratio_time))

        # Helper for logging result
        def _result_was():
            result_was = ("{} (result was {})".format(
                test.funcname, result.__class__.__name__))
            return result_was

        result_pass = True

        # check what exception the test threw
        try:
            test._outcome.get()
        except Exception as e:
            if sys.version_info >= (3, 5):
                result = remove_traceback_frames(e, ['handle_result', 'get'])
                # newer versions of the `logging` module accept plain exception objects
                exc_info = result
            elif sys.version_info >= (3, ):
                result = remove_traceback_frames(e, ['handle_result', 'get'])
                # newer versions of python have Exception.__traceback__
                exc_info = (type(result), result, result.__traceback__)
            else:
                # Python 2
                result = e
                exc_info = remove_traceback_frames(sys.exc_info(),
                                                   ['handle_result', 'get'])
        else:
            result = TestSuccess()

        if (isinstance(result, TestSuccess) and not test.expect_fail
                and not test.expect_error):
            self.log.info("Test Passed: %s" % test.funcname)

        elif (isinstance(result, AssertionError) and test.expect_fail):
            self.log.info("Test failed as expected: " + _result_was())

        elif (isinstance(result, TestSuccess) and test.expect_error):
            self.log.error("Test passed but we expected an error: " +
                           _result_was())
            self._add_failure(result)
            result_pass = False

        elif isinstance(result, TestSuccess):
            self.log.error("Test passed but we expected a failure: " +
                           _result_was())
            self._add_failure(result)
            result_pass = False

        elif isinstance(result, SimFailure):
            if isinstance(result, test.expect_error):
                self.log.info("Test errored as expected: " + _result_was())
            else:
                self.log.error(
                    "Test error has lead to simulator shutting us "
                    "down",
                    exc_info=exc_info)
                self._add_failure(result)
                self._store_test_result(test.module, test.funcname, False,
                                        sim_time_ns, real_time, ratio_time)
                self.tear_down()
                return

        elif test.expect_error:
            if isinstance(result, test.expect_error):
                self.log.info("Test errored as expected: " + _result_was())
            else:
                self.log.info("Test errored with unexpected type: " +
                              _result_was())
                self._add_failure(result)
                result_pass = False

        else:
            self.log.error("Test Failed: " + _result_was(), exc_info=exc_info)
            self._add_failure(result)
            result_pass = False

        self._store_test_result(test.module, test.funcname, result_pass,
                                sim_time_ns, real_time, ratio_time)

        self.execute()

    def execute(self):
        self._running_test = cocotb.regression_manager.next_test()
        if self._running_test:
            start = ''
            end = ''
            if want_color_output():
                start = ANSI.COLOR_TEST
                end = ANSI.COLOR_DEFAULT
            # Want this to stand out a little bit
            self.log.info("%sRunning test %d/%d:%s %s" %
                          (start, self.count, self.ntests, end,
                           self._running_test.funcname))

            cocotb.scheduler.add_test(self._running_test)
            self.count += 1
        else:
            self.tear_down()

    def _log_test_summary(self):
        TEST_FIELD = 'TEST'
        RESULT_FIELD = 'PASS/FAIL'
        SIM_FIELD = 'SIM TIME(NS)'
        REAL_FIELD = 'REAL TIME(S)'
        RATIO_FIELD = 'RATIO(NS/S)'

        TEST_FIELD_LEN = max(
            len(TEST_FIELD),
            len(max([x['test'] for x in self.test_results], key=len)))
        RESULT_FIELD_LEN = len(RESULT_FIELD)
        SIM_FIELD_LEN = len(SIM_FIELD)
        REAL_FIELD_LEN = len(REAL_FIELD)
        RATIO_FIELD_LEN = len(RATIO_FIELD)

        LINE_LEN = 3 + TEST_FIELD_LEN + 2 + RESULT_FIELD_LEN + 2 + SIM_FIELD_LEN + 2 + REAL_FIELD_LEN + 2 + RATIO_FIELD_LEN + 3

        LINE_SEP = "*" * LINE_LEN + "\n"

        summary = ""
        summary += LINE_SEP
        summary += "** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}}  {d:>{d_len}}  {e:>{e_len}} **\n".format(
            a=TEST_FIELD,
            a_len=TEST_FIELD_LEN,
            b=RESULT_FIELD,
            b_len=RESULT_FIELD_LEN,
            c=SIM_FIELD,
            c_len=SIM_FIELD_LEN,
            d=REAL_FIELD,
            d_len=REAL_FIELD_LEN,
            e=RATIO_FIELD,
            e_len=RATIO_FIELD_LEN)
        summary += LINE_SEP
        for result in self.test_results:
            hilite = ''

            if result['pass'] is None:
                pass_fail_str = "N/A"
            elif result['pass']:
                pass_fail_str = "PASS"
            else:
                pass_fail_str = "FAIL"
                if want_color_output():
                    hilite = ANSI.COLOR_HILITE_SUMMARY

            summary += "{start}** {a:<{a_len}}  {b:^{b_len}}  {c:>{c_len}.2f}   {d:>{d_len}.2f}   {e:>{e_len}.2f}  **\n".format(
                a=result['test'],
                a_len=TEST_FIELD_LEN,
                b=pass_fail_str,
                b_len=RESULT_FIELD_LEN,
                c=result['sim'],
                c_len=SIM_FIELD_LEN - 1,
                d=result['real'],
                d_len=REAL_FIELD_LEN - 1,
                e=result['ratio'],
                e_len=RATIO_FIELD_LEN - 1,
                start=hilite)
        summary += LINE_SEP

        self.log.info(summary)

    def _log_sim_summary(self):
        real_time = time.time() - self.start_time
        sim_time_ns = get_sim_time('ns')
        ratio_time = self._safe_divide(sim_time_ns, real_time)

        summary = ""

        summary += "*************************************************************************************\n"
        summary += "**                                 ERRORS : {0:<39}**\n".format(
            self.failures)
        summary += "*************************************************************************************\n"
        summary += "**                               SIM TIME : {0:<39}**\n".format(
            '{0:.2f} NS'.format(sim_time_ns))
        summary += "**                              REAL TIME : {0:<39}**\n".format(
            '{0:.2f} S'.format(real_time))
        summary += "**                        SIM / REAL TIME : {0:<39}**\n".format(
            '{0:.2f} NS/S'.format(ratio_time))
        summary += "*************************************************************************************\n"

        self.log.info(summary)

    @staticmethod
    def _safe_divide(a, b):
        try:
            return a / b
        except ZeroDivisionError:
            if a == 0:
                return float('nan')
            else:
                return float('inf')

    def _store_test_result(self, module_name, test_name, result_pass, sim_time,
                           real_time, ratio):
        result = {
            'test': '.'.join([module_name, test_name]),
            'pass': result_pass,
            'sim': sim_time,
            'real': real_time,
            'ratio': ratio
        }
        self.test_results.append(result)
Esempio n. 17
0
class RunningCoroutine(object):
    """Per instance wrapper around an function to turn it into a coroutine


        Provides the following:

            coro.join() creates a Trigger that will fire when this coroutine
            completes

            coro.kill() will destroy a coroutine instance (and cause any Join
            triggers to fire
    """
    def __init__(self, inst, parent):
        if hasattr(inst, "__name__"):
            self.__name__ = "%s" % inst.__name__
            self.log = SimLog("cocotb.coroutine.%s" % self.__name__, id(self))
        else:
            self.log = SimLog("cocotb.coroutine.fail")
        self._coro = inst
        self._started = False
        self._finished = False
        self._callbacks = []
        self._join = _Join(self)
        self._parent = parent
        self.__doc__ = parent._func.__doc__
        self.module = parent._func.__module__
        self.funcname = parent._func.__name__
        self.retval = None

        if not hasattr(self._coro, "send"):
            self.log.error("%s isn't a valid coroutine! Did you use the yield "
                           "keyword?" % self.funcname)
            raise CoroutineComplete(callback=self._finished_cb)

    def __iter__(self):
        return self

    def __str__(self):
        return str(self.__name__)

    def send(self, value):
        try:
            if isinstance(value, ExternalException):
                self.log.debug("Injecting ExternalException(%s)" %
                               (repr(value)))
                return self._coro.throw(value.exception)
            self._started = True
            return self._coro.send(value)
        except TestComplete as e:
            if isinstance(e, TestFailure):
                self.log.warning(str(e))
            raise
        except ExternalException as e:
            self.retval = e
            self._finished = True
            raise CoroutineComplete(callback=self._finished_cb)
        except ReturnValue as e:
            self.retval = e.retval
            self._finished = True
            raise CoroutineComplete(callback=self._finished_cb)
        except StopIteration:
            self._finished = True
            raise CoroutineComplete(callback=self._finished_cb)
        except Exception as e:
            self._finished = True
            raise raise_error(self, "Send raised exception: %s" % (str(e)))

    def throw(self, exc):
        return self._coro.throw(exc)

    def close(self):
        return self._coro.close()

    def kill(self):
        """Kill a coroutine"""
        self.log.debug("kill() called on coroutine")
        cocotb.scheduler.unschedule(self)

    def _finished_cb(self):
        """Called when the coroutine completes.
            Allows us to mark the coroutine as finished so that boolean testing
            works.
            Also call any callbacks, usually the result of coroutine.join()"""
        self._finished = True

    def join(self):
        """Return a trigger that will fire when the wrapped coroutine exits"""
        if self._finished:
            return NullTrigger()
        else:
            return self._join

    def has_started(self):
        return self._started

    def __nonzero__(self):
        """Provide boolean testing
            if the coroutine has finished return false
            otherwise return true"""
        return not self._finished
Esempio n. 18
0
class Scheduler(object):
    """The main scheduler.

    Here we accept callbacks from the simulator and schedule the appropriate
    coroutines.

    A callback fires, causing the :any:`react` method to be called, with the
    trigger that caused the callback as the first argument.

    We look up a list of coroutines to schedule (indexed by the trigger) and
    schedule them in turn. NB implementors should not depend on the scheduling
    order!

    Some additional management is required since coroutines can return a list
    of triggers, to be scheduled when any one of the triggers fires.  To
    ensure we don't receive spurious callbacks, we have to un-prime all the
    other triggers when any one fires.

    Due to the simulator nuances and fun with delta delays we have the
    following modes:

    Normal mode
        - Callbacks cause coroutines to be scheduled
        - Any pending writes are cached and do not happen immediately

    ReadOnly mode
        - Corresponds to cbReadOnlySynch (VPI) or vhpiCbLastKnownDeltaCycle
          (VHPI).  In this state we are not allowed to perform writes.

    Write mode
        - Corresponds to cbReadWriteSynch (VPI) or vhpiCbEndOfProcesses (VHPI)
          In this mode we play back all the cached write updates.

    We can legally transition from normal->write by registering a ReadWrite
    callback, however usually once a simulator has entered the ReadOnly phase
    of a given timestep then we must move to a new timestep before performing
    any writes.  The mechanism for moving to a new timestep may not be
    consistent across simulators and therefore we provide an abstraction to
    assist with compatibility.


    Unless a coroutine has explicitly requested to be scheduled in ReadOnly
    mode (for example wanting to sample the finally settled value after all
    delta delays) then it can reasonably be expected to be scheduled during
    "normal mode" i.e. where writes are permitted.
    """

    _MODE_NORMAL   = 1  # noqa
    _MODE_READONLY = 2  # noqa
    _MODE_WRITE    = 3  # noqa
    _MODE_TERM     = 4  # noqa

    # Singleton events, recycled to avoid spurious object creation
    _readonly = ReadOnly()
    # TODO[gh-759]: For some reason, the scheduler requires that these triggers
    # are _not_ the same instances used by the tests themselves. This is risky,
    # because it can lead to them overwriting each other's callbacks. We should
    # try to remove this `copy.copy` in future.
    _next_timestep = copy.copy(NextTimeStep())
    _readwrite = copy.copy(ReadWrite())
    _timer1 = Timer(1)
    _timer0 = Timer(0)

    def __init__(self):

        self.log = SimLog("cocotb.scheduler")
        if _debug:
            self.log.setLevel(logging.DEBUG)

        # A dictionary of pending coroutines for each trigger,
        # indexed by trigger
        self._trigger2coros = collections.defaultdict(list)

        # A dictionary of pending triggers for each coroutine, indexed by coro
        self._coro2triggers = collections.defaultdict(list)

        # Our main state
        self._mode = Scheduler._MODE_NORMAL

        # A dictionary of pending writes
        self._writes = {}

        self._pending_coros = []
        self._pending_callbacks = []
        self._pending_triggers = []
        self._pending_threads = []
        self._pending_events = []   # Events we need to call set on once we've unwound

        self._terminate = False
        self._test_result = None
        self._entrypoint = None
        self._main_thread = threading.current_thread()

        # Select the appropriate scheduling algorithm for this simulator
        self.advance = self.default_scheduling_algorithm
        self._is_reacting = False

    def default_scheduling_algorithm(self):
        """
        Decide whether we need to schedule our own triggers (if at all) in
        order to progress to the next mode.

        This algorithm has been tested against the following simulators:
            Icarus Verilog
        """
        if not self._terminate and self._writes:

            if self._mode == Scheduler._MODE_NORMAL:
                if not self._readwrite.primed:
                    self._readwrite.prime(self.react)
            elif not self._next_timestep.primed:
                self._next_timestep.prime(self.react)

        elif self._terminate:
            if _debug:
                self.log.debug("Test terminating, scheduling Timer")

            for t in self._trigger2coros:
                t.unprime()

            for t in [self._readwrite, self._readonly, self._next_timestep,
                      self._timer1, self._timer0]:
                if t.primed:
                    t.unprime()

            self._timer1.prime(self.begin_test)
            self._trigger2coros = collections.defaultdict(list)
            self._coro2triggers = collections.defaultdict(list)
            self._terminate = False
            self._mode = Scheduler._MODE_TERM

    def begin_test(self, trigger=None):
        """Called to initiate a test.

        Could be called on start-up or from a callback.
        """
        if _debug:
            self.log.debug("begin_test called with trigger: %s" %
                           (str(trigger)))
        if _profiling:
            ps = pstats.Stats(_profile).sort_stats('cumulative')
            ps.dump_stats("test_profile.pstat")
            ctx = profiling_context()
        else:
            ctx = nullcontext()

        with ctx:
            self._mode = Scheduler._MODE_NORMAL
            if trigger is not None:
                trigger.unprime()

            # Issue previous test result, if there is one
            if self._test_result is not None:
                if _debug:
                    self.log.debug("Issue test result to regression object")
                cocotb.regression_manager.handle_result(self._test_result)
                self._test_result = None
            if self._entrypoint is not None:
                test = self._entrypoint
                self._entrypoint = None
                self.schedule(test)
                self.advance()

    def react(self, trigger):
        """
        Called when a trigger fires.

        We ensure that we only start the event loop once, rather than
        letting it recurse.
        """
        if self._is_reacting:
            # queue up the trigger, the event loop will get to it
            self._pending_triggers.append(trigger)
            return

        # start the event loop
        self._is_reacting = True
        try:
            self._event_loop(trigger)
        finally:
            self._is_reacting = False


    def _event_loop(self, trigger):
        """
        Run an event loop triggered by the given trigger.

        The loop will keep running until no further triggers fire.

        This should be triggered by only:
        * The beginning of a test, when there is no trigger to react to
        * A GPI trigger
        """
        if _profiling:
            ctx = profiling_context()
        else:
            ctx = nullcontext()

        with ctx:
            # When a trigger fires it is unprimed internally
            if _debug:
                self.log.debug("Trigger fired: %s" % str(trigger))
            # trigger.unprime()

            if self._mode == Scheduler._MODE_TERM:
                if _debug:
                    self.log.debug("Ignoring trigger %s since we're terminating" %
                                   str(trigger))
                return

            if trigger is self._readonly:
                self._mode = Scheduler._MODE_READONLY
            # Only GPI triggers affect the simulator scheduling mode
            elif isinstance(trigger, GPITrigger):
                self._mode = Scheduler._MODE_NORMAL

            # We're the only source of ReadWrite triggers which are only used for
            # playing back any cached signal updates
            if trigger is self._readwrite:

                if _debug:
                    self.log.debug("Writing cached signal updates")

                while self._writes:
                    handle, value = self._writes.popitem()
                    handle.setimmediatevalue(value)

                self._readwrite.unprime()

                return

            # Similarly if we've scheduled our next_timestep on way to readwrite
            if trigger is self._next_timestep:

                if not self._writes:
                    self.log.error(
                        "Moved to next timestep without any pending writes!")
                else:
                    self.log.debug(
                        "Priming ReadWrite trigger so we can playback writes")
                    self._readwrite.prime(self.react)

                return

            # work through triggers one by one
            is_first = True
            self._pending_triggers.append(trigger)
            while self._pending_triggers:
                trigger = self._pending_triggers.pop(0)

                if not is_first and isinstance(trigger, GPITrigger):
                    self.log.warning(
                        "A GPI trigger occurred after entering react - this "
                        "should not happen."
                    )
                    assert False

                # this only exists to enable the warning above
                is_first = False

                if trigger not in self._trigger2coros:

                    # GPI triggers should only be ever pending if there is an
                    # associated coroutine waiting on that trigger, otherwise it would
                    # have been unprimed already
                    if isinstance(trigger, GPITrigger):
                        self.log.critical(
                            "No coroutines waiting on trigger that fired: %s" %
                            str(trigger))

                        trigger.log.info("I'm the culprit")
                    # For Python triggers this isn't actually an error - we might do
                    # event.set() without knowing whether any coroutines are actually
                    # waiting on this event, for example
                    elif _debug:
                        self.log.debug(
                            "No coroutines waiting on trigger that fired: %s" %
                            str(trigger))

                    continue

                # Scheduled coroutines may append to our waiting list so the first
                # thing to do is pop all entries waiting on this trigger.
                scheduling = self._trigger2coros.pop(trigger)

                if _debug:
                    debugstr = "\n\t".join([coro.__name__ for coro in scheduling])
                    if len(scheduling):
                        debugstr = "\n\t" + debugstr
                    self.log.debug("%d pending coroutines for event %s%s" %
                                   (len(scheduling), str(trigger), debugstr))

                # This trigger isn't needed any more
                trigger.unprime()

                # If the coroutine was waiting on multiple triggers we may be able
                # to unprime the other triggers that didn't fire
                scheduling_set = set(scheduling)
                other_triggers = {
                    t
                    for coro in scheduling
                    for t in self._coro2triggers[coro]
                } - {trigger}

                for pending in other_triggers:
                    # every coroutine waiting on this trigger is already being woken
                    if scheduling_set.issuperset(self._trigger2coros[pending]):
                        if pending.primed:
                            pending.unprime()
                        del self._trigger2coros[pending]

                for coro in scheduling:
                    if _debug:
                        self.log.debug("Scheduling coroutine %s" % (coro.__name__))
                    self.schedule(coro, trigger=trigger)
                    if _debug:
                        self.log.debug("Scheduled coroutine %s" % (coro.__name__))

                # Schedule may have queued up some events so we'll burn through those
                while self._pending_events:
                    if _debug:
                        self.log.debug("Scheduling pending event %s" %
                                       (str(self._pending_events[0])))
                    self._pending_events.pop(0).set()

            # no more pending triggers
            self.advance()
            if _debug:
                self.log.debug("All coroutines scheduled, handing control back"
                               " to simulator")


    def unschedule(self, coro):
        """Unschedule a coroutine.  Unprime any pending triggers"""

        for trigger in self._coro2triggers[coro]:
            if coro in self._trigger2coros[trigger]:
                self._trigger2coros[trigger].remove(coro)
            if not self._trigger2coros[trigger]:
                trigger.unprime()
                del self._trigger2coros[trigger]
        del self._coro2triggers[coro]

        if Join(coro) in self._trigger2coros:
            self._pending_triggers.append(Join(coro))
        else:
            try:
                # throws an error if the background coroutine errored
                # and no one was monitoring it
                coro.retval
            except Exception as e:
                self._test_result = TestError(
                    "Forked coroutine {} raised exception {}"
                    .format(coro, e)
                )
                self._terminate = True

    def save_write(self, handle, value):
        if self._mode == Scheduler._MODE_READONLY:
            raise Exception("Write to object {0} was scheduled during a read-only sync phase.".format(handle._name))
        self._writes[handle] = value

    def _coroutine_yielded(self, coro, triggers):
        """Prime the triggers and update our internal mappings."""
        self._coro2triggers[coro] = triggers

        for trigger in triggers:

            self._trigger2coros[trigger].append(coro)
            if not trigger.primed:
                try:
                    trigger.prime(self.react)
                except Exception as e:
                    # Convert any exceptions into a test result
                    self.finish_test(
                        create_error(self, "Unable to prime trigger %s: %s" %
                                     (str(trigger), str(e))))

    def queue(self, coroutine):
        """Queue a coroutine for execution"""
        self._pending_coros.append(coroutine)

    def queue_function(self, coroutine):
        """Queue a coroutine for execution and move the containing thread
        so that it does not block execution of the main thread any longer.
        """

        # We should be able to find ourselves inside the _pending_threads list

        for t in self._pending_threads:
            if t.thread == threading.current_thread():
                t.thread_suspend()
                self._pending_coros.append(coroutine)
                return t


    def run_in_executor(self, func, *args, **kwargs):
        """Run the coroutine in a separate execution thread
        and return a yieldable object for the caller.
        """
        # Create a thread
        # Create a trigger that is called as a result of the thread finishing
        # Create an Event object that the caller can yield on
        # Event object set when the thread finishes execution, this blocks the
        #   calling coroutine (but not the thread) until the external completes

        def execute_external(func, _waiter):
            _waiter._outcome = outcomes.capture(func, *args, **kwargs)
            if _debug:
                self.log.debug("Execution of external routine done %s" % threading.current_thread())
            _waiter.thread_done()

        waiter = external_waiter()
        thread = threading.Thread(group=None, target=execute_external,
                                  name=func.__name__ + "_thread",
                                  args=([func, waiter]), kwargs={})

        waiter.thread = thread;
        self._pending_threads.append(waiter)

        return waiter

    def add(self, coroutine):
        """Add a new coroutine.

        Just a wrapper around self.schedule which provides some debug and
        useful error messages in the event of common gotchas.
        """
        if isinstance(coroutine, cocotb.decorators.coroutine):
            self.log.critical(
                "Attempt to schedule a coroutine that hasn't started")
            coroutine.log.error("This is the failing coroutine")
            self.log.warning(
                "Did you forget to add parentheses to the @test decorator?")
            self._test_result = TestError(
                "Attempt to schedule a coroutine that hasn't started")
            self._terminate = True
            return

        elif not isinstance(coroutine, cocotb.decorators.RunningCoroutine):
            self.log.critical(
                "Attempt to add something to the scheduler which isn't a "
                "coroutine")
            self.log.warning(
                "Got: %s (%s)" % (str(type(coroutine)), repr(coroutine)))
            self.log.warning("Did you use the @coroutine decorator?")
            self._test_result = TestError(
                "Attempt to schedule a coroutine that hasn't started")
            self._terminate = True
            return

        if _debug:
            self.log.debug("Adding new coroutine %s" % coroutine.__name__)

        self.schedule(coroutine)
        self.advance()
        return coroutine

    def new_test(self, coroutine):
        self._entrypoint = coroutine

    def schedule(self, coroutine, trigger=None):
        """Schedule a coroutine by calling the send method.

        Args:
            coroutine (cocotb.decorators.coroutine): The coroutine to schedule.
            trigger (cocotb.triggers.Trigger): The trigger that caused this
                coroutine to be scheduled.
        """
        if trigger is None:
            send_outcome = outcomes.Value(None)
        else:
            send_outcome = trigger._outcome
        if _debug:
            self.log.debug("Scheduling with {}".format(send_outcome))

        try:
            result = coroutine._advance(send_outcome)
            if _debug:
                self.log.debug("Coroutine %s yielded %s (mode %d)" %
                               (coroutine.__name__, str(result), self._mode))

        # TestComplete indication is game over, tidy up
        except TestComplete as test_result:
            # Tag that close down is needed, save the test_result
            # for later use in cleanup handler
            self.log.debug("TestComplete received: %s" % test_result.__class__.__name__)
            self.finish_test(test_result)
            return

        # Normal coroutine completion
        except cocotb.decorators.CoroutineComplete as exc:
            if _debug:
                self.log.debug("Coroutine completed: %s" % str(coroutine))
            self.unschedule(coroutine)
            return

        # Don't handle the result if we're shutting down
        if self._terminate:
            return

        # Queue current routine to schedule when the nested routine exits
        yield_successful = False
        if isinstance(result, cocotb.decorators.RunningCoroutine):

            if not result.has_started():
                self.queue(result)
                if _debug:
                    self.log.debug("Scheduling nested coroutine: %s" %
                                   result.__name__)
            else:
                if _debug:
                    self.log.debug("Joining to already running coroutine: %s" %
                                   result.__name__)

            new_trigger = result.join()
            self._coroutine_yielded(coroutine, [new_trigger])
            yield_successful = True

        elif isinstance(result, Trigger):
            if _debug:
                self.log.debug("%s: is instance of Trigger" % result)
            self._coroutine_yielded(coroutine, [result])
            yield_successful = True

        # If we get a list, make sure it's a list of triggers or coroutines.
        # For every coroutine, replace it with coroutine.join().
        # This could probably be done more elegantly via list comprehension.
        elif isinstance(result, list):
            new_triggers = []
            for listobj in result:
                if isinstance(listobj, Trigger):
                    new_triggers.append(listobj)
                elif isinstance(listobj, cocotb.decorators.RunningCoroutine):
                    if _debug:
                        self.log.debug("Scheduling coroutine in list: %s" %
                                       listobj.__name__)
                    if not listobj.has_started():
                        self.queue(listobj)
                    new_trigger = listobj.join()
                    new_triggers.append(new_trigger)
                else:
                    # If we encounter something not a coroutine or trigger,
                    # set the success flag to False and break out of the loop.
                    yield_successful = False
                    break

            # Make sure the lists are the same size. If they are not, it means
            # it contained something not a trigger/coroutine, so do nothing.
            if len(new_triggers) == len(result):
                self._coroutine_yielded(coroutine, new_triggers)
                yield_successful = True

        # If we didn't successfully yield anything, thrown an error.
        # Do it this way to make the logic in the list case simpler.
        if not yield_successful:
            msg = ("Coroutine %s yielded something the scheduler can't handle"
                   % str(coroutine))
            msg += ("\nGot type: %s repr: %s str: %s" %
                    (type(result), repr(result), str(result)))
            msg += "\nDid you forget to decorate with @cocotb.coroutine?"
            try:
                raise_error(self, msg)
            except Exception as e:
                self.finish_test(e)

        # We do not return from here until pending threads have completed, but only
        # from the main thread, this seems like it could be problematic in cases
        # where a sim might change what this thread is.
        def unblock_event(ext):
            @cocotb.coroutine
            def wrapper():
                ext.event.set()
                yield PythonTrigger()

        if self._main_thread is threading.current_thread():

            for ext in self._pending_threads:
                ext.thread_start()
                if _debug:
                    self.log.debug("Blocking from %s on %s" % (threading.current_thread(), ext.thread))
                state = ext.thread_wait()
                if _debug:
                    self.log.debug("Back from wait on self %s with newstate %d" % (threading.current_thread(), state))
                if state == external_state.EXITED:
                    self._pending_threads.remove(ext)
                    self._pending_events.append(ext.event)

        # Handle any newly queued coroutines that need to be scheduled
        while self._pending_coros:
            self.add(self._pending_coros.pop(0))

        while self._pending_callbacks:
            self._pending_callbacks.pop(0)()


    def finish_test(self, test_result):
        """Cache the test result and set the terminate flag."""
        self.log.debug("finish_test called with %s" % (repr(test_result)))
        if not self._terminate:
            self._terminate = True
            self._test_result = test_result
            self.cleanup()

    def finish_scheduler(self, test_result):
        """Directly call into the regression manager and end test
           once we return the sim will close us so no cleanup is needed.
        """
        self.log.debug("Issue sim closedown result to regression object")
        cocotb.regression_manager.handle_result(test_result)

    def cleanup(self):
        """Clear up all our state.

        Unprime all pending triggers and kill off any coroutines stop all externals.
        """
        for trigger, waiting in dict(self._trigger2coros).items():
            for coro in waiting:
                if _debug:
                    self.log.debug("Killing %s" % str(coro))
                coro.kill()

        if self._main_thread is not threading.current_thread():
            raise Exception("Cleanup() called outside of the main thread")

        for ext in self._pending_threads:
            self.log.warn("Waiting for %s to exit", ext.thread)
Esempio n. 19
0
class RunningCoroutine(object):
    """Per instance wrapper around an function to turn it into a coroutine


        Provides the following:

            coro.join() creates a Trigger that will fire when this coroutine completes

            coro.kill() will destroy a coroutine instance (and cause any Join triggers to fire
    """
    def __init__(self, inst, parent):
        self.__name__ = "%s" % inst.__name__
        self.log = SimLog("cocotb.coroutine.%s" % self.__name__, id(self))
        self._coro = inst
        self._finished = False
        self._callbacks = []
        self._parent = parent
        self.__doc__ = parent._func.__doc__
        self.module = parent._func.__module__
        self.funcname = parent._func.__name__
        self.retval = None

        if not hasattr(self._coro, "send"):
            self.log.error("%s isn't a value coroutine! Did you use the yield keyword?"
                % self.__name__)
            raise CoroutineComplete(callback=self._finished_cb)

    def __iter__(self):
        return self

    def __str__(self):
        return str(self.__name__)

    def send(self, value):
        try:
            return self._coro.send(value)
        except TestComplete as e:
            if isinstance(e, TestFailure):
                self.log.warning(str(e))
            else:
                self.log.info(str(e))
            raise
        except ReturnValue as e:
            self.retval = e.retval
            raise CoroutineComplete(callback=self._finished_cb)
        except StopIteration:
            raise CoroutineComplete(callback=self._finished_cb)
        except Exception as e:
            raise_error(self, "Send raised exception: %s" % (str(e)))

    def throw(self, exc):
        return self._coro.throw(exc)

    def close(self):
        return self._coro.close()

    def kill(self):
        """Kill a coroutine"""
        self.log.debug("kill() called on coroutine")
        cocotb.scheduler.schedule_remove(self, self._finished_cb)

    def _finished_cb(self):
        """Called when the coroutine completes.
            Allows us to mark the coroutine as finished so that boolean testing works.
            Also call any callbacks, usually the result of coroutine.join()"""
        self._finished = True
        self.log.debug("Coroutine finished calling pending callbacks (%d pending)" % len(self._callbacks))
        for cb in self._callbacks:
            cb()
        self._callbacks = []

    def join(self):
        """Return a trigger that will fire when the wrapped coroutine exits"""
        return Join(self)

    def __nonzero__(self):
        """Provide boolean testing
            if the coroutine has finished return false
            otherwise return true"""
        return not self._finished