コード例 #1
0
ファイル: test_drv.py プロジェクト: kirknodeng/cocotb
class test_drv(BusDriver):
    _signals = ["A", "B", "X"]
    _optional_signals= []

    def __init__(self, entity, name, clock):
        BusDriver.__init__(self, entity, name, clock)
        self.bus.A.setimmediatevalue(5)
        self.bus.B.setimmediatevalue(5)
        self.log.debug("Test DrvM created")
        self.busy_event = Event("%s_busy" % name)
        self.busy = False

    @cocotb.coroutine
    def _acquire_lock(self):
        if self.busy:
            yield self.busy_event.wait()
        self.busy_event.clear()
        self.busy = True

    def _release_lock(self):
        self.busy = False
        self.busy_event.set()


    @cocotb.coroutine
    def write(self, value_a, value_b, sync=True):
        """
        """
        yield self._acquire_lock()

        if sync:
            yield RisingEdge(self.clock)
        self.bus.A <= value_a
        self.bus.B <= value_b

        self._release_lock()
コード例 #2
0
class BusAgent(Agent):
    '''
    This component implements the powlib bus protocol, an interface through 
    both simulation and hardware components can access a powlib interconnect. Please
    note that bus in this context is used to refer to the interface protocol.
    '''
    def __init__(self, wrInterface, rdInterface, baseAddr=None, passive=False):
        '''
        Constructor. wrInterface and rdInterface should be a HandshakeInterface 
        with the signals addr, data, be, and op. wrInterface should be assosicated
        with the writing SimHandles, whereas rdInterface should be associated with
        the reading SimHandles. baseAddr should be the base address of the interface, 
        however it's optional if there's no intent to read or the BusAgent is passive.
        '''

        drivers = Namespace(
            wr=HandshakeWriteDriver(interface=wrInterface),
            rd=HandshakeReadDriver(
                interface=rdInterface)) if not passive else None
        monitors = Namespace(wr=HandshakeMonitor(interface=wrInterface),
                             rd=HandshakeMonitor(interface=rdInterface))

        Agent.__init__(self, drivers=drivers, monitors=monitors)

        self.__rdEvent = Event()
        self.__baseAddr = baseAddr

    inport = property(lambda self: self._drivers.wr.inport)
    outport = property(lambda self: self._monitors.rd.outport)

    def _behavior(self):
        '''
        The behavior is only needed by the read coroutine, to indicate when the
        data has been read.
        '''
        self.__rdEvent.set()

    def write(self, addr, data, be=None, op=OP_WRITE):
        '''
        Writes data over the bus interface. addr refers to the destination
        address. data is the specified data. be is the byte enable mask. op
        is the operation, which, by default is write.
        '''
        if be is None:
            beWidth = len(self._drivers.wr._interface.be)
            be = int(((1 << beWidth) - 1))
        self._drivers.wr.write(
            data=Transaction(addr=addr, data=data, be=be, op=op))

    @coroutine
    def read(self, addr):
        '''
        Reads data from the bus interface. addr specifies where to read. If
        addr is an int, then only a single read will occur. If addr is a list,
        a burst of reads will occur.
        '''

        # Create the receiving port. Clear the event.
        rdInport = InPort(block=self)
        self._monitors.rd.outport.connect(rdInport)

        # Prepare the list of read transactions.
        isinstance_long = False
        try:
            isinstance_long = isinstance(addr, long)
        except NameError:
            pass
        if isinstance(addr, int) or isinstance_long:
            addrList = [addr]
        elif isinstance(addr, list):
            addrList = addr
        else:
            raise TypeError("addr must be either int/long or list.")

        # Write out the burst read.
        for each_addr in addrList:
            self.write(addr=each_addr, data=self.__baseAddr, op=OP_READ)

        # Read each word.
        transList = []
        for each_read in range(len(addrList)):
            self.__rdEvent.clear()
            yield self.__rdEvent.wait()
            assert (rdInport.ready())
            trans = rdInport.read()
            transList.append(trans)

        # Disconnect the port.
        self._monitors.rd.outport.disconnect(rdInport)

        # If only a single read occurs, then return on the single transaction.
        # If a burst, then return the list.
        if len(transList) == 1:
            raise ReturnValue(transList[0])
        else:
            raise ReturnValue(transList)
コード例 #3
0
class Driver(object):
    """

    Class defining the standard interface for a driver within a testbench

    The driver is responsible for serialising transactions onto the physical
    pins of the interface.  This may consume simulation time.
    """
    def __init__(self):
        """
        Constructor for a driver instance
        """
        # self._busy = Lock()
        self._pending = Event(name="Driver._pending")
        self._sendQ = deque()

        # Subclasses may already set up logging
        if not hasattr(self, "log"):
            self.log = SimLog("cocotb.driver.%s" % (self.__class__.__name__))

        # Create an independent coroutine which can send stuff
        self._thread = cocotb.scheduler.add(self._send_thread())

    def kill(self):
        if self._thread:
            self._thread.kill()
            self._thread = None

    def append(self, transaction, callback=None, event=None):
        """
        Queue up a transaction to be sent over the bus.

        Mechanisms are provided to permit the caller to know when the
        transaction is processed

        callback: optional function to be called when the transaction has been
        sent

        event: event to be set when the tansaction has been sent
        """
        self._sendQ.append((transaction, callback, event))
        self._pending.set()

    def clear(self):
        """
        Clear any queued transactions without sending them onto the bus
        """
        self._sendQ = deque()

    @coroutine
    def send(self, transaction, sync=True):
        """
        Blocking send call (hence must be "yielded" rather than called)

        Sends the transaction over the bus

        Args:
            transaction (any): the transaction to send

        Kwargs:
            sync (boolean): synchronise the transfer by waiting for risingedge
        """
        yield self._send(transaction, None, None, sync=sync)

    def _driver_send(self, transaction, sync=True):
        """
        actual impementation of the send.

        subclasses should override this method to implement the actual send
        routine
        """
        raise NotImplementedError("Subclasses of Driver should define a "
                                  "_driver_send coroutine")

    @coroutine
    def _send(self, transaction, callback, event, sync=True):
        """
        assumes the caller has already acquired the busy lock

        releases busy lock once sending is complete
        """
        yield self._driver_send(transaction, sync=sync)

        # Notify the world that this transaction is complete
        if event:
            event.set()
        if callback:
            callback(transaction)

        # No longer hogging the bus
        # self.busy.release()

    @coroutine
    def _send_thread(self):
        while True:

            # Sleep until we have something to send
            while not self._sendQ:
                self._pending.clear()
                yield self._pending.wait()

            synchronised = False

            # Send in all the queued packets,
            # only synchronise on the first send
            while self._sendQ:
                transaction, callback, event = self._sendQ.popleft()
                self.log.debug("Sending queued packet...")
                yield self._send(transaction, callback, event,
                                 sync=not synchronised)
                synchronised = True
コード例 #4
0
class Monitor:
    """Base class for Monitor objects.

    Monitors are passive 'listening' objects that monitor pins going in or out of a DUT.
    This class should not be used directly,
    but should be sub-classed and the internal :meth:`_monitor_recv` method should be overridden.
    This :meth:`_monitor_recv` method should capture some behavior of the pins, form a transaction,
    and pass this transaction to the internal :meth:`_recv` method.
    The :meth:`_monitor_recv` method is added to the cocotb scheduler during the ``__init__`` phase,
    so it should not be awaited anywhere.

    The primary use of a Monitor is as an interface for a :class:`~cocotb.scoreboard.Scoreboard`.

    Args:
        callback (callable): Callback to be called with each recovered transaction
            as the argument. If the callback isn't used, received transactions will
            be placed on a queue and the event used to notify any consumers.
        event (cocotb.triggers.Event): Event that will be called when a transaction
            is received through the internal :meth:`_recv` method.
            `Event.data` is set to the received transaction.
    """
    def __init__(self, callback=None, event=None):
        self._event = event
        self._wait_event = Event()
        self._recvQ = deque()
        self._callbacks = []
        self.stats = MonitorStatistics()

        # Sub-classes may already set up logging
        if not hasattr(self, "log"):
            self.log = SimLog("cocotb.monitor.%s" % (type(self).__qualname__))

        if callback is not None:
            self.add_callback(callback)

        # Create an independent coroutine which can receive stuff
        self._thread = cocotb.scheduler.add(self._monitor_recv())

    def kill(self):
        """Kill the monitor coroutine."""
        if self._thread:
            self._thread.kill()
            self._thread = None

    def __len__(self):
        return len(self._recvQ)

    def __getitem__(self, idx):
        return self._recvQ[idx]

    def add_callback(self, callback):
        """Add function as a callback.

        Args:
            callback (callable): The function to call back.
        """
        self.log.debug("Adding callback of function %s to monitor",
                       callback.__qualname__)
        self._callbacks.append(callback)

    @coroutine
    async def wait_for_recv(self, timeout=None):
        """With *timeout*, :meth:`.wait` for transaction to arrive on monitor
        and return its data.

        Args:
            timeout: The timeout value for :class:`~.triggers.Timer`.
                Defaults to ``None``.

        Returns:
            Data of received transaction.
        """
        if timeout:
            t = Timer(timeout)
            fired = await First(self._wait_event.wait(), t)
            if fired is t:
                return None
        else:
            await self._wait_event.wait()

        return self._wait_event.data

    # this is not `async` so that we fail in `__init__` on subclasses without it
    def _monitor_recv(self):
        """Actual implementation of the receiver.

        Sub-classes should override this method to implement the actual receive
        routine and call :meth:`_recv` with the recovered transaction.
        """
        raise NotImplementedError("Attempt to use base monitor class without "
                                  "providing a ``_monitor_recv`` method")

    def _recv(self, transaction):
        """Common handling of a received transaction."""

        self.stats.received_transactions += 1

        # either callback based consumer
        for callback in self._callbacks:
            callback(transaction)

        # Or queued with a notification
        if not self._callbacks:
            self._recvQ.append(transaction)

        if self._event is not None:
            self._event.set(data=transaction)

        # If anyone was waiting then let them know
        if self._wait_event is not None:
            self._wait_event.set(data=transaction)
            self._wait_event.clear()
コード例 #5
0
class Monitor(object):

    def __init__(self, callback=None, event=None):
        """
        Constructor for a monitor instance

        callback will be called with each recovered transaction as the argument

        If the callback isn't used, received transactions will be placed on a
        queue and the event used to notify any consumers.
        """
        self._event = event
        self._wait_event = None
        self._recvQ = deque()
        self._callbacks = []
        self.stats = MonitorStatistics()
        self._wait_event = Event()

        # Subclasses may already set up logging
        if not hasattr(self, "log"):
            self.log = SimLog("cocotb.monitor.%s" % (self.__class__.__name__))

        if callback is not None:
            self.add_callback(callback)

        # Create an independent coroutine which can receive stuff
        self._thread = cocotb.scheduler.add(self._monitor_recv())

    def kill(self):
        if self._thread:
            self._thread.kill()
            self._thread = None

    def __len__(self):
        return len(self._recvQ)

    def __getitem__(self, idx):
        return self._recvQ[idx]

    def add_callback(self, callback):
        self.log.debug("Adding callback of function %s to monitor" %
                       (callback.__name__))
        self._callbacks.append(callback)

    @coroutine
    def wait_for_recv(self, timeout=None):
        if timeout:
            t = Timer(timeout)
            fired = yield [self._wait_event.wait(), t]
            if fired is t:
                raise ReturnValue(None)
        else:
            yield self._wait_event.wait()

        pkt = self._wait_event.data
        raise ReturnValue(pkt)

    @coroutine
    def _monitor_recv(self):
        """
        actual impementation of the receiver

        subclasses should override this method to implement the actual receive
        routine and call self._recv() with the recovered transaction
        """
        raise NotImplementedError("Attempt to use base monitor class without "
                                  "providing a _monitor_recv method")

    def _recv(self, transaction):
        """Common handling of a received transaction."""

        self.stats.received_transactions += 1

        # either callback based consumer
        for callback in self._callbacks:
            callback(transaction)

        # Or queued with a notification
        if not self._callbacks:
            self._recvQ.append(transaction)

        if self._event is not None:
            self._event.set()

        # If anyone was waiting then let them know
        if self._wait_event is not None:
            self._wait_event.set(data=transaction)
            self._wait_event.clear()
コード例 #6
0
ファイル: wishbone.py プロジェクト: mkreider/cocotb2
class WishboneMaster(Wishbone):
    """Wishbone master
    """
    _acked_ops          = 0  # ack cntr. comp with opbuf len. wait for equality before releasing lock
    _res_buf            = [] # save readdata/ack/err
    _aux_buf            = [] # save read/write order
    _op_cnt             = 0 # number of ops we've been issued
    _clk_cycle_count    = 0
    _timeout            = None

    
    def __init__(self, entity, name, clock, timeout=5000):
        Wishbone.__init__(self, entity, name, clock)
        sTo = ", no cycle timeout"        
        if not (timeout is None):
            sTo = ", cycle timeout is %u clockcycles" % timeout
        self.log.info("Wishbone Master created%s" % sTo)
        self.busy_event = Event("%s_busy" % name)
        self.busy = False
        self._timeout = timeout
        
    @coroutine 
    def _clk_cycle_counter(self):
        """
            Cycle counter to time bus operations
        """
        clkedge = RisingEdge(self.clock)
        self._clk_cycle_count = 0
        while self.busy:
            yield clkedge
            self._clk_cycle_count += 1    
  
    @coroutine
    def _open_cycle(self):
        #Open new wishbone cycle        
        if self.busy:
            self.log.error("Opening Cycle, but WB Driver is already busy. Someting's wrong")
            yield self.busy_event.wait()
        self.busy_event.clear()
        self.busy       = True
        cocotb.fork(self._read())
        cocotb.fork(self._clk_cycle_counter()) 
        self.bus.cyc    <= 1
        self._acked_ops = 0  
        self._res_buf   = [] 
        self._aux_buf   = []
        self.log.debug("Opening cycle, %u Ops" % self._op_cnt)
        
    @coroutine    
    def _close_cycle(self):
        #Close current wishbone cycle  
        clkedge = RisingEdge(self.clock)
        count           = 0
        last_acked_ops  = 0
        #Wait for all Operations being acknowledged by the slave before lowering the cycle line
        #This is not mandatory by the bus standard, but a crossbar might send acks to the wrong master
        #if we don't wait. We don't want to risk that, it could hang the bus
        while self._acked_ops < self._op_cnt:
            if last_acked_ops != self._acked_ops:
                self.log.debug("Waiting for missing acks: %u/%u" % (self._acked_ops, self._op_cnt) )
            last_acked_ops = self._acked_ops    
            #check for timeout when finishing the cycle            
            count += 1
            if (not (self._timeout is None)):
                if (count > self._timeout): 
                    raise TestFailure("Timeout of %u clock cycles reached when waiting for reply from slave" % self._timeout)                
            yield clkedge
            
        self.busy = False
        self.busy_event.set()
        self.bus.cyc <= 0 
        self.log.debug("Closing cycle")
        yield clkedge        

    
    @coroutine
    def _wait_stall(self):
        """Wait for stall to be low before continuing
        """
        clkedge = RisingEdge(self.clock)
        count = 0
        if hasattr(self.bus, "stall"):
            count = 0            
            while self.bus.stall.getvalue():
                yield clkedge
                count += 1
                if (not (self._timeout is None)):
                    if (count > self._timeout): 
                        raise TestFailure("Timeout of %u clock cycles reached when on stall from slave" % self._timeout)                
            self.log.debug("Stalled for %u cycles" % count)
        raise ReturnValue(count)
    
    
    @coroutine
    def _wait_ack(self):
        """Wait for ACK on the bus before continuing
        """
        #wait for acknownledgement before continuing - Classic Wishbone without pipelining
        clkedge = RisingEdge(self.clock)
        count = 0
        if not hasattr(self.bus, "stall"):
            while not self._get_reply():
                yield clkedge
                count += 1
            self.log.debug("Waited %u cycles for ackknowledge" % count)
        raise ReturnValue(count)    
    
    
    def _get_reply(self):
        #helper function for slave acks
        tmpAck = int(self.bus.ack.getvalue())
        tmpErr = 0
        tmpRty = 0
        if hasattr(self.bus, "err"):        
            tmpErr = int(self.bus.err.getvalue())
        if hasattr(self.bus, "rty"):        
            tmpRty = int(self.bus.rty.getvalue())
        #check if more than one line was raised    
        if ((tmpAck + tmpErr + tmpRty)  > 1):
            raise TestFailure("Slave raised more than one reply line at once! ACK: %u ERR: %u RTY: %u" % (tmpAck, tmpErr, tmpRty))
        #return 0 if no reply, 1 for ACK, 2 for ERR, 3 for RTY. use 'replyTypes' Dict for lookup
        return (tmpAck + 2 * tmpErr + 3 * tmpRty)
        
    
    @coroutine 
    def _read(self):
        """
            Reader for slave replies
        """
        count = 0
        clkedge = RisingEdge(self.clock)
        while self.busy:
            reply = self._get_reply()    
            # valid reply?            
            if(bool(reply)):
                datrd = int(self.bus.datrd.getvalue())
                #append reply and meta info to result buffer
                tmpRes =  wbr(ack=reply, sel=None, adr=None, datrd=datrd, datwr=None, waitIdle=None, waitStall=None, waitAck=self._clk_cycle_count)               
                self._res_buf.append(tmpRes)
                self._acked_ops += 1
            yield clkedge
            count += 1    

    
    @coroutine
    def _drive(self, we, adr, datwr, sel, idle):
        """
            Drive the Wishbone Master Out Lines
        """
    
        clkedge = RisingEdge(self.clock)
        if self.busy:
            # insert requested idle cycles            
            if idle != None:
                idlecnt = idle
                while idlecnt > 0:
                    idlecnt -= 1
                    yield clkedge
            # drive outputs    
            self.bus.stb    <= 1
            self.bus.adr    <= adr
            self.bus.sel    <= sel
            self.bus.datwr  <= datwr
            self.bus.we     <= we
            yield clkedge
            #deal with flow control (pipelined wishbone)
            stalled = yield self._wait_stall()
            #append operation and meta info to auxiliary buffer
            self._aux_buf.append(wba(sel, adr, datwr, stalled, idle, self._clk_cycle_count))
            #reset strobe and write enable without advancing time
            self.bus.stb    <= 0
            self.bus.we     <= 0
            # non pipelined wishbone
            yield self._wait_ack()
        else:
           self.log.error("Cannot drive the Wishbone bus outside a cycle!")


  
    @coroutine
    def send_cycle(self, arg):
        """
            The main sending routine        
        
        Args:
            list(WishboneOperations)
            
        """
        cnt = 0
        clkedge = RisingEdge(self.clock)
        yield clkedge
        if is_sequence(arg):
            if len(arg) < 1:
                self.log.error("List contains no operations to carry out")
            else:
         
                self._op_cnt = len(arg)
                firstword = True
                for op in arg:
                    if firstword:
                        firstword = False
                        result = []
                        yield self._open_cycle()
                        
                    if op.dat != None:
                        we  = 1
                        dat = op.dat
                    else:
                        we  = 0
                        dat = 0
                    yield self._drive(we, op.adr, dat, op.sel, op.idle)
                    self.log.debug("#%3u WE: %s ADR: 0x%08x DAT: 0x%08x SEL: 0x%1x IDLE: %3u" % (cnt, we, op.adr, dat, op.sel, op.idle))
                    cnt += 1
                yield self._close_cycle()
                
                #do pick and mix from result- and auxiliary buffer so we get all operation and meta info
                for res, aux in zip(self._res_buf, self._aux_buf):
                    res.datwr       = aux.datwr
                    res.sel         = aux.sel
                    res.adr         = aux.adr
                    res.waitIdle    = aux.waitIdle
                    res.waitStall   = aux.waitStall
                    res.waitack    -= aux.ts
                    result.append(res)
                
            raise ReturnValue(result)
        else:
            self.log.error("Expecting a list")
            raise ReturnValue(None)    
コード例 #7
0
class I2CIoLayerAnalyser:
    def __init__(self, helperSlave, listOperation):

        self.sda = helperSlave.io.sda_rd
        self.scl = helperSlave.io.scl_rd
        self.clk = helperSlave.io.clk

        self.event_RisingEdge = Event()
        self.event_FallingEdge = Event()

        self.event_Start = Event()
        self.event_Stop = Event()

        self.listOp = list()
        self.refListOp = listOperation
        self.dataBinRead = list()

        self.startSeq = 0

    #==========================================================================
    # Start to analyse the bus
    #==========================================================================
    @cocotb.coroutine
    def start(self):

        self.fork_falling = cocotb.fork(self._FallingEdgeDetection())
        self.fork_rising = cocotb.fork(self._RisingEdgeDetection())
        self.fork_start = cocotb.fork(self._startDetection())
        self.fork_stop = cocotb.fork(self._stopDetection())
        yield self._analyser()

    #==========================================================================
    # Stop all processes
    #==========================================================================
    def stop(self):
        self.fork_falling.kill()
        self.fork_rising.kill()
        self.fork_start.kill()
        self.fork_stop.kill()

    #==========================================================================
    # Store all event appening on the bus
    #==========================================================================
    @cocotb.coroutine
    def _analyser(self):

        self.listOp = list()

        # Start ---------------------------------------------------------------
        yield self.event_Start.wait()
        yield RisingEdge(self.clk)
        self.startSeq = 0

        while True:

            dataBinRead = list()
            index = 0
            # Read data -----------------------------------------------------------
            while index < I2CConfig.dataWdith:
                yield self.event_RisingEdge.wait()
                dataBinRead.append(int(self.sda))
                #print("data ", index, " value " , int(self.sda), "index ", index, "start " , self.startSeq )

                if self.startSeq == 1:
                    index = 0
                    self.startSeq = 0
                    dataBinRead = list()
                    dataBinRead.append(int(self.sda))

                index += 1

            dataInRead = int("".join([str(x) for x in dataBinRead]), 2)
            self.listOp.append(DATA(dataInRead))

            # Read ACK ------------------------------------------------------------
            yield self.event_RisingEdge.wait()
            if int(self.sda) == 0:
                self.listOp.append(ACK())
            else:
                self.listOp.append(NACK())

            #print()

    #==========================================================================
    # Detect the start condition
    #==========================================================================
    @cocotb.coroutine
    def _startDetection(self):
        yield RisingEdge(self.clk)
        while True:
            prev = int(self.sda)
            yield RisingEdge(self.clk)
            if prev == 1 and int(self.sda) == 0:
                if int(self.scl) == 1:
                    self.event_Start.set()
                    self.listOp.append(START())
                    self.startSeq = 1

    #==========================================================================
    # Detect the stop condition
    #==========================================================================
    @cocotb.coroutine
    def _stopDetection(self):
        yield RisingEdge(self.clk)
        while True:
            prev = int(self.sda)
            yield RisingEdge(self.clk)
            if prev == 0 and int(self.sda) == 1:
                if int(self.scl) == 1:
                    self.event_Stop.set()
                    self.listOp.append(STOP())

                    # check sequence...
                    for (op, ref) in zip(self.listOp, self.refListOp):

                        if isinstance(ref, START) and isinstance(op, START):
                            pass
                        elif isinstance(ref, STOP) and isinstance(op, STOP):
                            pass
                        elif isinstance(ref, ACK) and isinstance(op, ACK):
                            pass
                        elif isinstance(ref, NACK) and isinstance(op, NACK):
                            pass
                        elif (isinstance(ref, WRITE) or isinstance(
                                ref, READ)) and isinstance(op, DATA):
                            if ref.data != op.data:
                                print(("ref ", hex(ref.data), " op ",
                                       hex(op.data)))
                                assertEquals(ref.data, op.data,
                                             "Analyser data ERROR")

                        else:
                            assertEquals(True, False,
                                         "%s is not equal to %s" % (ref, op))

    #==========================================================================
    # Detect a Rising edge of scl
    #==========================================================================
    @cocotb.coroutine
    def _RisingEdgeDetection(self):
        while True:
            yield RisingEdge(self.scl)
            self.event_RisingEdge.set()

    #==========================================================================
    # Detect a Falling edge of scl
    #==========================================================================
    @cocotb.coroutine
    def _FallingEdgeDetection(self):
        while True:
            yield FallingEdge(self.scl)
            self.event_FallingEdge.set()
コード例 #8
0
ファイル: __init__.py プロジェクト: tgingold/cocotb
class Driver(object):
    """

    Class defining the standard interface for a driver within a testbench

    The driver is responsible for serialising transactions onto the physical
    pins of the interface.  This may consume simulation time.
    """
    def __init__(self):
        """
        Constructor for a driver instance
        """
        # self._busy = Lock()
        self._pending = Event(name="Driver._pending")
        self._sendQ = deque()

        # Subclasses may already set up logging
        if not hasattr(self, "log"):
            self.log = SimLog("cocotb.driver.%s" % (self.__class__.__name__))

        # Create an independent coroutine which can send stuff
        self._thread = cocotb.scheduler.add(self._send_thread())

    def kill(self):
        if self._thread:
            self._thread.kill()
            self._thread = None

    def append(self, transaction, callback=None, event=None):
        """
        Queue up a transaction to be sent over the bus.

        Mechanisms are provided to permit the caller to know when the
        transaction is processed

        callback: optional function to be called when the transaction has been
        sent

        event: event to be set when the tansaction has been sent
        """
        self._sendQ.append((transaction, callback, event))
        self._pending.set()

    def clear(self):
        """
        Clear any queued transactions without sending them onto the bus
        """
        self._sendQ = deque()

    @coroutine
    def send(self, transaction, sync=True):
        """
        Blocking send call (hence must be "yielded" rather than called)

        Sends the transaction over the bus

        Args:
            transaction (any): the transaction to send

        Kwargs:
            sync (boolean): synchronise the transfer by waiting for risingedge
        """
        yield self._send(transaction, None, None, sync=sync)

    def _driver_send(self, transaction, sync=True):
        """
        actual impementation of the send.

        subclasses should override this method to implement the actual send
        routine
        """
        raise NotImplementedError("Subclasses of Driver should define a "
                                  "_driver_send coroutine")

    @coroutine
    def _send(self, transaction, callback, event, sync=True):
        """
        assumes the caller has already acquired the busy lock

        releases busy lock once sending is complete
        """
        yield self._driver_send(transaction, sync=sync)

        # Notify the world that this transaction is complete
        if event:
            event.set()
        if callback:
            callback(transaction)

        # No longer hogging the bus
        # self.busy.release()

    @coroutine
    def _send_thread(self):
        while True:

            # Sleep until we have something to send
            while not self._sendQ:
                self._pending.clear()
                yield self._pending.wait()

            synchronised = False

            # Send in all the queued packets,
            # only synchronise on the first send
            while self._sendQ:
                transaction, callback, event = self._sendQ.popleft()
                self.log.debug("Sending queued packet...")
                yield self._send(transaction, callback, event,
                                 sync=not synchronised)
                synchronised = True
コード例 #9
0
class ResetDriver(Driver):
    '''
    This driver simply drives the system's resets.
    '''
    def __init__(self, interface, param_namespace, name=""):
        '''
        Constructor.                        
        interface       = The interface associated with the driver should contain the
                          cocotb handles to all the resets that need to be driven.
        param_namespace = A namespace with parameters associated with each clock 
                          in interface.
        name            = String identifier needed for logging.           
        '''
        self.__param_namespace = param_namespace
        self.__event = Event()
        self.__log = SimLog("cocotb.rsts.{}".format(name))
        Driver.__init__(self, interface)

    @coroutine
    def wait(self):
        '''
        Yield on this method as a way to wait until
        all resets finish their operation.
        '''
        if not self.__event.fired:
            yield self.__event.wait()
        yield NullTrigger()

    @coroutine
    def _drive(self):
        '''
        Implements the behavior of the reset driver.
        '''

        # Define a simply function for extracting a parameter corresponding
        # to a reset.
        def add_param(params, param_name, param_dict):
            param = getattr(params, param_name, None)
            if param is not None: param_dict[param_name] = param

        # The running coroutines need to be collected.
        cos = []

        # Collect the parameters and run each reset's respective
        # start reset coroutine.
        for name, handle in vars(self._interface).items():
            params = getattr(self.__param_namespace, name, None)
            param_dict = {}
            add_param(params=params,
                      param_name="active_mode",
                      param_dict=param_dict)
            add_param(params=params,
                      param_name="associated_clock",
                      param_dict=param_dict)
            add_param(params=params,
                      param_name="wait_cycles",
                      param_dict=param_dict)
            add_param(params=params,
                      param_name="wait_time",
                      param_dict=param_dict)
            self.__log.info("Initiating reset {}...".format(name))
            cos.append(fork(start_reset(reset=handle, **param_dict)))

        # Wait until all start reset coroutines finish their operation.
        for co in cos:
            yield co.join()
        self.__event.set()
        self.__log.info("Resets have been de-asserted...")
コード例 #10
0
class CocotbSDIOHostDriver(BusDriver):
    """
    A SDIO host driver for this testbench, driven directly from Cocotb
    """
    _signals = [
        "cmd_coco_dir", "cmd_coco_out", "cmd_coco_in", "data_coco_dir",
        "data_coco_out", "data_coco_in"
    ]

    def __init__(self, entity, name, clock):
        BusDriver.__init__(self, entity, name, clock)
        self.bus.cmd_coco_dir.setimmediatevalue(0)
        self.bus.data_coco_dir.setimmediatevalue(0x00)
        self.bus.data_coco_out.setimmediatevalue(0xff)
        self.bus.cmd_coco_out.setimmediatevalue(1)
        self.bus_width = 1

        # Locks implemented with cocotb Events, normal threading.Lock guys don't work
        self.cmd_bus_busy_event = Event("cmd_bus_busy")
        self.cmd_bus_busy = False
        self.data_write_aborted = False
        self.data_read_aborted = False

    @cocotb.coroutine
    def acquire_cmd_lock(self):
        if self.cmd_bus_busy:
            yield self.cmd_bus_busy_event.wait()
        self.cmd_bus_busy_event.clear()
        self.cmd_bus_busy = True

    def release_cmd_lock(self):
        self.cmd_bus_busy = False
        self.cmd_bus_busy_event.set()

    @cocotb.coroutine
    def send_cmd(self, cmd):
        """
        Transmit a command, we will calculate the CRC and append it
        """
        crc7 = sdio_utils.crc7_gen(number=cmd[47:8].integer)
        cmd[7:1] = BinaryValue(value=crc7, bits=7, bigEndian=False).integer
        self.log.debug("SDIO host sending CMD%d: 'b%s" %
                       (cmd[45:40].integer, cmd.binstr))
        self.bus.cmd_coco_dir <= 1
        for x in range(47, -1, -1):
            # Now shift this out bit by bit, host sends on falling edge
            yield FallingEdge(self.clock)
            self.bus.cmd_coco_out <= cmd[x].integer
            if x == 0:
                # Deassert the output enable onto the bus on the final bit
                self.bus.cmd_coco_dir <= 0

    @cocotb.coroutine
    def get_cmd_response_bits(self, cmd, timeout=1000, timeout_possible=False):
        """
        Read the response from the bus
        """
        cmd_num = cmd[45:40].integer
        response_type, response_length = sdio_utils.get_response_type(cmd_num)
        response_bit_counter = response_length
        response = BinaryValue(value=0, bits=response_length, bigEndian=False)
        if response_type == None:
            return
        timeout_count = 0
        while timeout_count < timeout:
            yield RisingEdge(self.clock)
            if response_bit_counter == response_length:
                # Response not started yet
                if self.bus.cmd_coco_in.value == 0:
                    # Start bit of response
                    response[response_bit_counter - 1] = int(
                        self.bus.cmd_coco_in.value)
                    response_bit_counter -= 1
            elif response_bit_counter > 0:
                # Shift in the response bits
                response[response_bit_counter - 1] = int(
                    self.bus.cmd_coco_in.value)
                response_bit_counter -= 1
            else:
                break
            timeout_count += 1
        if timeout_count == timeout:
            timeout_message = "Timeout waiting for response to cmd%d: %x ('b%s)" % (
                cmd_num, cmd.integer, cmd.binstr)
            if timeout_possible:
                self.log.info(timeout_message)
                raise ReturnValue("timeout")
            else:
                raise TestFailure(timeout_message)
        raise ReturnValue(response)

    @cocotb.coroutine
    def data_bus_read(self,
                      count=1,
                      timeout=None,
                      could_abort=False,
                      final_block=False):
        """
        Read the data bytes off the SD bus
        """
        data = []
        # Wait for the start bit
        timeout_count = 0
        timed_out = False
        timens_before = get_sim_time(
            units='ns'
        )  # Value we started at - SDIO timeout is 1ms, wait that long
        while BinaryValue(value=int(self.bus.data_coco_in),
                          bits=8,
                          bigEndian=False)[0] == 1:
            yield RisingEdge(self.clock)
            self.log.debug("Waited a clock for data bus bit 0 to be 0")
            if timeout:
                # Timeout in cycles has been specified
                timeout_count += 1  # I think it's allowed up to a second but in simulation let's just wait an infeasibly long time
                if timeout_count == timeout:
                    timed_out = True
            elif int(get_sim_time(units='ns') - timens_before) >= 1000000:
                # 1ms timeout
                timed_out = True

            if timed_out:
                if could_abort:
                    self.log.info(
                        "Timed out waiting for start bit of a block of data. Assuming aborted transfer."
                    )
                    raise ReturnValue((data, "aborted"))
                else:
                    raise sdio_host.SDIODataError(
                        "Timeout waiting for start bit of data transfer")

        self.log.debug("SDIO host model data receive start bit detected")
        for byte in range(0, count):
            current_byte = BinaryValue(value=0, bits=8, bigEndian=False)
            if self.bus_width == 4:
                yield RisingEdge(self.clock)
                # WTF can't access this bus input thing without explicitly casting it as a BinaryValue.
                dbus = BinaryValue(value=int(self.bus.data_coco_in),
                                   bits=8,
                                   bigEndian=False)
                current_byte[7] = dbus[3].integer
                current_byte[6] = dbus[2].integer
                current_byte[5] = dbus[1].integer
                current_byte[4] = dbus[0].integer
                yield RisingEdge(self.clock)
                dbus = BinaryValue(value=int(self.bus.data_coco_in),
                                   bits=8,
                                   bigEndian=False)
                current_byte[3] = dbus[3].integer
                current_byte[2] = dbus[2].integer
                current_byte[1] = dbus[1].integer
                current_byte[0] = dbus[0].integer
            else:
                # 1-bit mode
                for bit in range(7, -1, -1):
                    yield RisingEdge(self.clock)
                    dbus = BinaryValue(value=int(self.bus.data_coco_in),
                                       bits=8,
                                       bigEndian=False)
                    current_byte[bit] = dbus[0].integer
            self.log.debug("SDIO host model received byte num %03d: %02x" %
                           (byte, current_byte.integer))
            data.append(current_byte.integer)
            if could_abort and self.data_read_aborted:
                self.log.info("Detected data read aborted after %d bytes" %
                              (byte))
                raise ReturnValue((data, "aborted"))

        # CRC time
        crc0 = BinaryValue(value=0, bits=16, bigEndian=False)
        crc1 = BinaryValue(value=0, bits=16, bigEndian=False)
        crc2 = BinaryValue(value=0, bits=16, bigEndian=False)
        crc3 = BinaryValue(value=0, bits=16, bigEndian=False)
        for bit in range(15, -1, -1):
            yield RisingEdge(self.clock)
            dbus = BinaryValue(value=int(self.bus.data_coco_in),
                               bits=8,
                               bigEndian=False)
            crc0[bit] = dbus[0].integer
            if self.bus_width == 4:
                crc1[bit] = dbus[1].integer
                crc2[bit] = dbus[2].integer
                crc3[bit] = dbus[3].integer
            if could_abort and self.data_read_aborted:
                self.log.info(
                    "Detected data read aborted during CRC, already received %d bytes"
                    % (len(data)))
                raise ReturnValue((data, "aborted"))

        if self.bus_width == 4:
            num_bits_to_crc = count * 2
            # Calculate our CRC on the data
            d0, d1, d2, d3 = sdio_utils.crc16_array_prep(self.bus_width, data)
            crc16_d0 = BinaryValue(value=sdio_utils.crc16_gen(
                d0, num_bits_to_crc),
                                   bits=16,
                                   bigEndian=False)
            crc16_d1 = BinaryValue(value=sdio_utils.crc16_gen(
                d1, num_bits_to_crc),
                                   bits=16,
                                   bigEndian=False)
            crc16_d2 = BinaryValue(value=sdio_utils.crc16_gen(
                d2, num_bits_to_crc),
                                   bits=16,
                                   bigEndian=False)
            crc16_d3 = BinaryValue(value=sdio_utils.crc16_gen(
                d3, num_bits_to_crc),
                                   bits=16,
                                   bigEndian=False)
            self.log.debug(
                "Received CRC for lane 0: %04x, calculated CRC: %04x" %
                (crc0.integer, crc16_d0.integer))
            self.log.debug(
                "Received CRC for lane 1: %04x, calculated CRC: %04x" %
                (crc1.integer, crc16_d1.integer))
            self.log.debug(
                "Received CRC for lane 2: %04x, calculated CRC: %04x" %
                (crc2.integer, crc16_d2.integer))
            self.log.debug(
                "Received CRC for lane 3: %04x, calculated CRC: %04x" %
                (crc3.integer, crc16_d3.integer))
            crc_pass = crc0.integer == crc16_d0.integer
            crc_pass &= crc1.integer == crc16_d1.integer
            crc_pass &= crc2.integer == crc16_d2.integer
            crc_pass &= crc3.integer == crc16_d3.integer
        else:
            num_bits_to_crc = count * 8
            crc16_d0 = BinaryValue(value=sdio_utils.crc16_gen(
                data, num_bits_to_crc),
                                   bits=16,
                                   bigEndian=False)
            self.log.debug(
                "Received CRC for lane 0: %04x, calculated CRC: %04x" %
                (crc0.integer, crc16_d0.integer))
            crc_pass = crc0.integer == crc16_d0.integer

        if crc_pass:
            pass
        elif not crc_pass and not self.data_read_aborted:
            raise sdio_host.SDIODataError(
                "Extended RW IO read data CRC error.")
        elif self.data_read_aborted:
            self.log.info(
                "Detected data read aborted during block read, CRC didn't match but that's to be expected"
            )
            raise ReturnValue((data, "aborted"))

        raise ReturnValue((data, "okay"))

    def read_wait(self, asserting=True):
        """
        DAT[2] is used to indicate to the device during a multi-block read (CMD53) that we don't
        want the next block of data yet
        """
        if asserting:
            # Deposit the values and set the direction
            self.bus.data_coco_dir[2] <= 1
            self.bus.data_coco_out[2] <= 0
        else:
            self.bus.data_coco_dir[2] <= 0
            self.bus.data_coco_out[2] <= 1

    @cocotb.coroutine
    def data_bus_write(self,
                       data=None,
                       timeout=4000,
                       could_abort=False,
                       final_block=False):
        """
        Write out the bytes in data (which should be a list of integers each no bigger than a byte) on the data
        bus and append the CRC.

        It's up to the caller to pass the exact right number of bytes for the command.

        We'll return the block crc response and wait until it's all done being written by the host
        """
        num_bytes = len(data)
        self.log.debug("Writing %d bytes onto data bus" % num_bytes)

        self.bus.data_coco_dir[0] <= 1
        if self.bus_width == 4:
            self.bus.data_coco_dir[1] <= 1
            self.bus.data_coco_dir[2] <= 1
            self.bus.data_coco_dir[3] <= 1

        # First the start bit(s)
        yield FallingEdge(self.clock)
        if self.bus_width == 4:
            self.bus.data_coco_out[0] <= 0
            self.bus.data_coco_out[1] <= 0
            self.bus.data_coco_out[2] <= 0
            self.bus.data_coco_out[3] <= 0
        elif self.bus_width == 1:
            self.bus.data_coco_out[0] <= 0

        num_bits_to_crc = 0
        # Next the data
        for byte_num in range(0, len(data)):
            byte = BinaryValue(value=data[byte_num], bits=8, bigEndian=False)
            if self.bus_width == 4:
                # See section 3.6.1/figure 3-9 (page 9) of the SD physical spec for how the data is presented
                yield FallingEdge(self.clock)
                self.log.debug("Data write bit %d, 4-bit MSNibble: %1x" %
                               (num_bits_to_crc, byte[7:4].integer))
                self.bus.data_coco_out <= byte[7:4].integer | (0xf << 4)
                yield FallingEdge(self.clock)
                self.log.debug("Data write bit %d, 4-bit LSNibble: %1x" %
                               (num_bits_to_crc + 1, byte[3:0].integer))
                self.log.debug("    SDIO host data write byte %02d: %02x" %
                               (byte_num, byte))
                self.bus.data_coco_out <= byte[3:0].integer | (0xf << 4)
                num_bits_to_crc += 2
            else:
                self.log.debug("    SDIO host data write byte %02d: %02x" %
                               (byte_num, byte))
                for bit in range(7, -1, -1):
                    yield FallingEdge(self.clock)
                    self.bus.data_coco_out <= byte[bit].integer | (0x7f << 1)
                    num_bits_to_crc += 1

        # Now the CRC
        if self.bus_width == 4:
            d0, d1, d2, d3 = sdio_utils.crc16_array_prep(self.bus_width, data)
            crc16_d0 = BinaryValue(value=sdio_utils.crc16_gen(
                d0, num_bits_to_crc),
                                   bits=16,
                                   bigEndian=False)
            crc16_d1 = BinaryValue(value=sdio_utils.crc16_gen(
                d1, num_bits_to_crc),
                                   bits=16,
                                   bigEndian=False)
            crc16_d2 = BinaryValue(value=sdio_utils.crc16_gen(
                d2, num_bits_to_crc),
                                   bits=16,
                                   bigEndian=False)
            crc16_d3 = BinaryValue(value=sdio_utils.crc16_gen(
                d3, num_bits_to_crc),
                                   bits=16,
                                   bigEndian=False)
            self.log.debug("input to CRC16, data bytes lane 3 : %s" % [
                BinaryValue(value=x, bits=8, bigEndian=False).binstr
                for x in d3
            ])
            self.log.debug("input to CRC16, data bytes lane 2 : %s" % [
                BinaryValue(value=x, bits=8, bigEndian=False).binstr
                for x in d2
            ])
            self.log.debug("input to CRC16, data bytes lane 1 : %s" % [
                BinaryValue(value=x, bits=8, bigEndian=False).binstr
                for x in d1
            ])
            self.log.debug("input to CRC16, data bytes lane 0 : %s" % [
                BinaryValue(value=x, bits=8, bigEndian=False).binstr
                for x in d0
            ])
            self.log.debug("CRC16, data lane 3 : %s (%04x)" %
                           (crc16_d3.binstr, crc16_d3.integer))
            self.log.debug("CRC16, data lane 2 : %s (%04x)" %
                           (crc16_d2.binstr, crc16_d2.integer))
            self.log.debug("CRC16, data lane 1 : %s (%04x)" %
                           (crc16_d1.binstr, crc16_d1.integer))
            self.log.debug("CRC16, data lane 0 : %s (%04x)" %
                           (crc16_d0.binstr, crc16_d0.integer))
            for bit in range(15, -1, -1):
                yield FallingEdge(self.clock)
                self.bus.data_coco_out[0] <= crc16_d0[bit].integer
                self.bus.data_coco_out[1] <= crc16_d1[bit].integer
                self.bus.data_coco_out[2] <= crc16_d2[bit].integer
                self.bus.data_coco_out[3] <= crc16_d3[bit].integer
        else:
            crc16 = BinaryValue(value=sdio_utils.crc16_gen(
                data, num_bits_to_crc),
                                bits=16,
                                bigEndian=False)
            for bit in range(15, -1, -1):
                yield FallingEdge(self.clock)
                self.bus.data_coco_out[0] = crc16[bit].integer | (0x7f << 1)

        yield FallingEdge(self.clock)
        # All CRC bits clocked out now, stop bit
        self.bus.data_coco_out <= 0xf
        self.bus.data_coco_dir <= 1
        yield FallingEdge(self.clock)
        self.bus.data_coco_dir <= 0  # Let bus go high-impedence

        # 2 cycles until CRC response and busy indication
        yield RisingEdge(self.clock)
        yield RisingEdge(self.clock)
        # Go to next rising edge
        yield RisingEdge(self.clock)

        # Now we should expect the start bit
        crc_resp = 0
        dbus = BinaryValue(value=int(self.bus.data_coco_in),
                           bits=8,
                           bigEndian=False)
        if dbus[0].integer != 0:
            self.log.error(
                "Write block CRC response did not start in the correct place.")
        for i in range(5):
            yield RisingEdge(self.clock)
            dbus = BinaryValue(value=int(self.bus.data_coco_in),
                               bits=8,
                               bigEndian=False)
            if i < 3:
                crc_resp |= dbus[0].integer << i
        if crc_resp != 0x2:
            self.log.info(
                "Write block CRC response incorrect, response was %x" %
                crc_resp)
            raise ReturnValue(crc_resp)

        timeout_count = 0
        dbus = BinaryValue(value=int(self.bus.data_coco_in),
                           bits=8,
                           bigEndian=False)
        while dbus[0].integer == 0:
            yield RisingEdge(self.clock)
            timeout_count += 1
            if timeout_count == timeout:
                self.log.error("Timeout waiting for device to write data")
                break
            dbus = BinaryValue(value=int(self.bus.data_coco_in),
                               bits=8,
                               bigEndian=False)
        raise ReturnValue(crc_resp)
コード例 #11
0
class CocotbSPIHostDriver(BusDriver):
    """
    A SPI SDIO host driver for this testbench, driven directly from Cocotb

    SPI->SDIO pin mappings:
     SPI CS -> SDIO D[3]
     SPI MOSI -> SDIO Cmd
     SPI MISO -> SDIO D[0]
     SPI IRQ -> SDIO D[3]

    """
    _signals = [
        "cmd_coco_dir", "cmd_coco_out", "cmd_coco_in", "data_coco_dir",
        "data_coco_out", "data_coco_in"
    ]

    def __init__(self, entity, name, clock):
        BusDriver.__init__(self, entity, name, clock)
        self.bus.cmd_coco_dir.setimmediatevalue(
            1)  # SPI mode - direction of CMD pin is always host->slave (1)
        self.bus.cmd_coco_out.setimmediatevalue(1)
        self.bus.data_coco_dir.setimmediatevalue(
            0x08)  # D[3] (chip select) is always asserted host->slave
        self.bus.data_coco_out.setimmediatevalue(0x08)
        self.bus_width = 1

        # Locks implemented with cocotb Events, normal threading.Lock guys don't work
        self.cmd_bus_busy_event = Event("cmd_bus_busy")
        self.cmd_bus_busy = False
        self.data_write_aborted = False
        self.data_read_aborted = False

    def set_cs_n(self, val=1):
        """
        SPI CS is active low, assume caller knows this (don't invert in here)
        """
        self.bus.data_coco_out[3] <= val

    def set_mosi(self, val=0):
        """
        SPI MOSI assignment
        """
        self.bus.cmd_coco_out <= val

    def get_miso(self):
        """
        SPI MISO read
        """
        return int(
            BinaryValue(value=int(self.bus.data_coco_in),
                        bits=8,
                        bigEndian=False)[0])

    @cocotb.coroutine
    def acquire_cmd_lock(self):
        if self.cmd_bus_busy:
            yield self.cmd_bus_busy_event.wait()
        self.cmd_bus_busy_event.clear()
        self.cmd_bus_busy = True

    def release_cmd_lock(self):
        self.cmd_bus_busy = False
        self.cmd_bus_busy_event.set()

    @cocotb.coroutine
    def send_cmd(self, cmd):
        """
        Transmit a command, we will calculate the CRC and append it
        """
        crc7 = sdio_utils.crc7_gen(number=cmd[47:8].integer)
        cmd[7:1] = BinaryValue(value=crc7, bits=7, bigEndian=False).integer
        self.log.debug("SDIO host sending CMD%d: 'b%s" %
                       (cmd[45:40].integer, cmd.binstr))
        self.set_cs_n(0)
        for x in range(47, -1, -1):
            # Now shift this out bit by bit
            #yield FallingEdge(self.clock)
            yield RisingEdge(self.clock)
            self.set_mosi(cmd[x].integer)
            # Think we should probably keep CS asserted after a command, at least until the response
            #if x == 0:
            #    self.set_cs_n(1) # Deassert CS
        yield RisingEdge(self.clock)
        self.set_mosi(1)

    @cocotb.coroutine
    def get_cmd_response_bits(self, cmd, timeout=1000, timeout_possible=False):
        """
        Read the response from the bus, first 8 bits are always an R1 indicating if there were any errors
        """
        cmd_num = cmd[45:40].integer
        response_type, response_length = sdio_utils.get_spi_response_type(
            cmd_num)
        response_bit_counter = response_length
        response = BinaryValue(value=0, bits=response_length, bigEndian=False)
        if response_type == None:
            return
        timeout_count = 0
        while timeout_count < timeout:
            yield RisingEdge(self.clock)
            if response_bit_counter == response_length:
                # Response not started yet
                if self.get_miso() == 0:
                    # Start bit of response
                    response[response_bit_counter - 1] = self.get_miso()
                    response_bit_counter -= 1
            elif response_bit_counter > 0:
                # Shift in the response bits
                response[response_bit_counter - 1] = self.get_miso()
                response_bit_counter -= 1
            else:
                break
            timeout_count += 1
        if timeout_count == timeout:
            timeout_message = "Timeout waiting for response to SPI cmd%d: %x ('b%s)" % (
                cmd_num, cmd.integer, cmd.binstr)
            if timeout_possible:
                self.log.info(timeout_message)
                raise ReturnValue("timeout")
            else:
                raise TestFailure(timeout_message)
        if not cmd_num in [53]:
            # De-assert CS if not a bulk data transfer
            self.set_cs_n(1)
            for _ in range(8):
                yield RisingEdge(self.clock)

        raise ReturnValue(response)

    @cocotb.coroutine
    def data_bus_read(self,
                      count=1,
                      timeout=None,
                      could_abort=False,
                      final_block=False):
        """
        Read the data bytes off the SD bus
        """
        data = []
        # Wait for the start bit
        timeout_count = 0
        timed_out = False
        timens_before = get_sim_time(
            units='ns'
        )  # Value we started at - SDIO timeout is 1ms, wait that long
        while BinaryValue(value=int(self.bus.data_coco_in),
                          bits=8,
                          bigEndian=False)[0] == 1:
            yield RisingEdge(self.clock)
            self.log.debug("Waited a clock for data bus bit 0 to be 0")
            if timeout:
                # Timeout in cycles has been specified
                timeout_count += 1  # I think it's allowed up to a second but in simulation let's just wait an infeasibly long time
                if timeout_count == timeout:
                    timed_out = True
            elif int(get_sim_time(units='ns') - timens_before) >= 1000000:
                # 1ms timeout
                timed_out = True

            if timed_out:
                if could_abort:
                    self.log.info(
                        "Timed out waiting for start bit of a block of data. Assuming aborted transfer."
                    )
                    raise ReturnValue((data, "aborted"))
                else:
                    raise sdio_host.SDIODataError(
                        "Timeout waiting for start bit of data transfer")

        self.log.debug("SDIO host model data receive start bit detected")
        for byte in range(0, count):
            current_byte = BinaryValue(value=0, bits=8, bigEndian=False)
            # 1-bit mode
            for bit in range(7, -1, -1):
                yield RisingEdge(self.clock)
                current_byte[bit] = self.get_miso()
            self.log.debug("SDIO host model received byte num %03d: %02x" %
                           (byte, current_byte.integer))
            data.append(current_byte.integer)
            if could_abort and self.data_read_aborted:
                self.log.info("Detected data read aborted after %d bytes" %
                              (byte))
                raise ReturnValue((data, "aborted"))

        # CRC time
        crc0 = BinaryValue(value=0, bits=16, bigEndian=False)
        for bit in range(15, -1, -1):
            yield RisingEdge(self.clock)
            crc0[bit] = self.get_miso()
            if could_abort and self.data_read_aborted:
                self.log.info(
                    "Detected data read aborted during CRC, already received %d bytes"
                    % (len(data)))
                raise ReturnValue((data, "aborted"))

        num_bits_to_crc = count * 8
        crc16_d0 = BinaryValue(value=sdio_utils.crc16_gen(
            data, num_bits_to_crc),
                               bits=16,
                               bigEndian=False)
        self.log.debug("Received CRC for lane 0: %04x, calculated CRC: %04x" %
                       (crc0.integer, crc16_d0.integer))
        crc_pass = crc0.integer == crc16_d0.integer

        if crc_pass:
            pass
        elif not crc_pass and not self.data_read_aborted:
            raise sdio_host.SDIODataError(
                "Extended RW IO SPI read data CRC error.")
        elif self.data_read_aborted:
            self.log.info(
                "Detected data read aborted during block read, CRC didn't match but that's to be expected"
            )
            raise ReturnValue((data, "aborted"))

        if final_block:
            yield RisingEdge(self.clock)
            self.set_cs_n(1)
            for _ in range(8):
                yield RisingEdge(self.clock)

        raise ReturnValue((data, "okay"))

    @cocotb.coroutine
    def data_bus_write(self,
                       data=None,
                       timeout=4000,
                       could_abort=False,
                       final_block=False):
        """
        SPI bus block write data (which should be a list of integers each no bigger than a byte) on the data
        bus and append the CRC.

        Prepend this with the appropriate block-write token.

        It's up to the caller to pass the correct number of bytes for the command.

        We'll return the block crc response and wait until it's all done being written by the host
        """
        num_bytes = len(data)
        self.log.debug("Writing %d bytes onto SPI data bus" % num_bytes)

        # Start token (as per SD physical spec sect. 7.3.3.2)
        self.set_mosi(1)
        for _ in range(0, 7):
            yield RisingEdge(self.clock)
        self.set_mosi(0)
        yield RisingEdge(self.clock)

        num_bits_to_crc = 0
        # Next the data
        for byte_num in range(0, len(data)):
            byte = BinaryValue(value=data[byte_num], bits=8, bigEndian=False)
            self.log.debug("    SPI host data write byte %02d: %02x" %
                           (byte_num, byte))
            for bit in range(7, -1, -1):
                self.set_mosi(byte[bit].integer)
                yield RisingEdge(self.clock)
                num_bits_to_crc += 1

        crc16 = BinaryValue(value=sdio_utils.crc16_gen(data, num_bits_to_crc),
                            bits=16,
                            bigEndian=False)
        self.log.debug("     Block write CRC: %04x" % crc16.integer)
        for bit in range(15, -1, -1):
            self.set_mosi(crc16[bit].integer)
            yield RisingEdge(self.clock)

        # Set MOSI back to idle state
        self.set_mosi(1)

        # Write status, read bytes until we see it again
        status = BinaryValue(value=0, bits=8, bigEndian=False)
        write_done = False
        timeout_count = 0
        while not write_done:
            for bit in range(7, -1, -1):
                yield RisingEdge(self.clock)
                status[bit] = self.get_miso()
            if status[4] == 0:
                assert (
                    status[0] == 1
                ), "SPI write data block response token malformed, bit 0 wasn't 1 (%02x)" % status.integer
                write_done = True
                # We have a status:
                if status[3:1].integer == 0x2:
                    # Data accepted
                    self.log.debug("SPI block write accepted")
                elif status[3:1].integer == 0x5:
                    self.log.error(
                        "SPI block data write rejectd due to incorrect CRC")
            timeout_count += 8
            if timeout_count >= timeout:
                self.log.error("Timeout waiting for device to write data")
                break

        # Now wait for write busy to be deasserted
        timeout_count = 0
        write_busy = BinaryValue(value=0, bits=8, bigEndian=False)
        while True:
            # Read bytes from the bus until we get 1 again
            for bit in range(7, -1, -1):
                yield RisingEdge(self.clock)
                write_busy[bit] = self.get_miso()
            if write_busy[0] == 1:
                break
            timeout_count += 8
            if timeout_count >= timeout:
                self.log.error("Timeout waiting for device to write data")
                break

        if final_block:
            yield RisingEdge(self.clock)
            self.set_cs_n(1)
            for _ in range(8):
                yield RisingEdge(self.clock)

        raise ReturnValue(status[3:1].integer)
コード例 #12
0
class AXIS_Monitor(BusMonitor):
    _signals = ["tvalid"]
    _optional_signals = [
        "tdata", "tkeep", "tready", "tlast", "tstrb", "tid", "tdest", "tuser"
    ]

    def __init__(self,
                 entity,
                 name,
                 clock,
                 lsb_first=True,
                 tuser_bytewise=False,
                 **kwargs):
        self._init_done = Event(
            "Init Done"
        )  # workaround for scheduler immediately running newly added coroutines
        BusMonitor.__init__(self, entity, name, clock, **kwargs)

        if hasattr(self.bus, 'tdata'):
            self._n_bytes, rem = divmod(len(self.bus.tdata), 8)
            if rem:
                raise AttributeError("tdata width has to be multiple of 8")
        else:
            self._n_bytes = 1

        if hasattr(self.bus, 'tuser'):
            self._tuser_bytewise = tuser_bytewise
            if tuser_bytewise and self._n_bytes:
                self._user_bits, rem = divmod(len(self.bus.tuser),
                                              self._n_bytes)
                if rem:
                    raise AttributeError(
                        "in byte-wise mode tuser width has to be multiple of tdata width"
                    )
            else:
                self._user_bits = len(self.bus.tuser)

        self._lsb_first = lsb_first
        self._init_done.set()

    @coroutine
    def _monitor_recv(self):
        """Watch the pins and reconstruct transactions."""

        yield self._init_done.wait()

        # Avoid spurious object creation by recycling
        clkedge = RisingEdge(self.clock)
        rdonly = ReadOnly()

        class _dummy():
            def __init__(self, value):
                self.value = value

        tlast = getattr(self.bus, 'tlast', None)
        tready = getattr(self.bus, 'tready', _dummy(BinaryValue('1')))
        tkeep = getattr(self.bus, 'tkeep',
                        _dummy(BinaryValue("1" * self._n_bytes)))
        tstrb = getattr(self.bus, 'tstrb', tkeep)
        tdata = getattr(self.bus, 'tdata', None)
        tid = getattr(self.bus, 'tid', None)
        tdest = getattr(self.bus, 'tdest', None)
        tuser = getattr(self.bus, 'tuser', None)
        tvalid = self.bus.tvalid

        packet_buf = {}

        while True:

            yield clkedge
            yield rdonly

            if self.in_reset:
                if packet_buf:
                    self.log.warning(
                        "Discarding unfinished packet(s) as the bus is in reset"
                    )
                    packet_buf = {}
                continue

            if int(tvalid) and int(tready):
                if self._lsb_first:
                    byte_range = range(self._n_bytes - 1, -1, -1)
                else:
                    byte_range = range(self._n_bytes)

                filtered_data = []
                filtered_user = []

                for b in byte_range:
                    byte_type = resolve(tkeep.value.binstr[b] +
                                        tstrb.value.binstr[b])
                    if byte_type == "11":
                        # data byte
                        if tdata:
                            filtered_data.append(
                                int(
                                    resolve(tdata.value.binstr[b * 8:(b + 1) *
                                                               8]), 2))
                        if tuser and self._tuser_bytewise:
                            filtered_user.append(
                                int(
                                    resolve(
                                        tuser.value.binstr[b * self._user_bits:
                                                           (b + 1) *
                                                           self._user_bits]),
                                    2))
                    elif byte_type == "10":
                        # position byte
                        if tdata:
                            filtered_data.append(0)
                        if tuser and self._tuser_bytewise:
                            filtered_user.append(0)
                    elif byte_type == "01":
                        raise AXIS_ProtocolError(
                            "Invald combination of TKEEP and TSTRB byte qualifiers"
                        )
                    else:
                        # null byte
                        pass

                stream_id = (int(tid) if tid else None,
                             int(tdest) if tdest else None)
                if not tlast or int(tlast):
                    recv_pkt = {}
                    try:
                        if tdata:
                            recv_pkt["data"] = b"".join(
                                packet_buf[stream_id]["data"]) + bytes(
                                    filtered_data)
                        if tuser:
                            if self._tuser_bytewise:
                                recv_pkt["user"] = packet_buf[stream_id][
                                    "user"] + filtered_user
                            else:
                                recv_pkt["user"] = resolve(tuser.value.binstr)
                        if tid:
                            recv_pkt["tid"] = int(tid)
                        if tdest:
                            recv_pkt["tdest"] = int(tdest)

                        self._recv(recv_pkt)
                        del packet_buf[stream_id]
                    except KeyError:
                        # No buffered data
                        if tdata:
                            recv_pkt["data"] = bytes(filtered_data)
                        if tuser:
                            if self._tuser_bytewise:
                                recv_pkt["user"] = filtered_user
                            else:
                                recv_pkt["user"] = resolve(tuser.value.binstr)
                        if tid:
                            recv_pkt["tid"] = int(tid)
                        if tdest:
                            recv_pkt["tdest"] = int(tdest)
                        self._recv(recv_pkt)
                else:
                    try:
                        if tdata:
                            packet_buf[stream_id]["data"].append(
                                bytes(filtered_data))
                        if tuser and self._tuser_bytewise:
                            packet_buf[stream_id]["user"].extend(filtered_user)
                    except KeyError:
                        packet_buf[stream_id] = {}
                        if tdata:
                            packet_buf[stream_id]["data"] = [
                                bytes(filtered_data)
                            ]
                        if tuser and self._tuser_bytewise:
                            packet_buf[stream_id]["user"] = filtered_user
コード例 #13
0
ファイル: scheduler.py プロジェクト: Rezam1998/cocotb
class Scheduler(object):
    """The main scheduler.

    Here we accept callbacks from the simulator and schedule the appropriate
    coroutines.

    A callback fires, causing the :any:`react` method to be called, with the
    trigger that caused the callback as the first argument.

    We look up a list of coroutines to schedule (indexed by the trigger) and
    schedule them in turn.

    .. attention::

       Implementors should not depend on the scheduling order!

    Some additional management is required since coroutines can return a list
    of triggers, to be scheduled when any one of the triggers fires.  To
    ensure we don't receive spurious callbacks, we have to un-prime all the
    other triggers when any one fires.

    Due to the simulator nuances and fun with delta delays we have the
    following modes:

    Normal mode
        - Callbacks cause coroutines to be scheduled
        - Any pending writes are cached and do not happen immediately

    ReadOnly mode
        - Corresponds to :any:`cbReadOnlySynch` (VPI) or :any:`vhpiCbLastKnownDeltaCycle`
          (VHPI).  In this state we are not allowed to perform writes.

    Write mode
        - Corresponds to :any:`cbReadWriteSynch` (VPI) or :c:macro:`vhpiCbEndOfProcesses` (VHPI)
          In this mode we play back all the cached write updates.

    We can legally transition from Normal to Write by registering a :class:`~cocotb.triggers.ReadWrite`
    callback, however usually once a simulator has entered the ReadOnly phase
    of a given timestep then we must move to a new timestep before performing
    any writes.  The mechanism for moving to a new timestep may not be
    consistent across simulators and therefore we provide an abstraction to
    assist with compatibility.


    Unless a coroutine has explicitly requested to be scheduled in ReadOnly
    mode (for example wanting to sample the finally settled value after all
    delta delays) then it can reasonably be expected to be scheduled during
    "normal mode" i.e. where writes are permitted.
    """

    _MODE_NORMAL = 1  # noqa
    _MODE_READONLY = 2  # noqa
    _MODE_WRITE = 3  # noqa
    _MODE_TERM = 4  # noqa

    # Singleton events, recycled to avoid spurious object creation
    _next_time_step = NextTimeStep()
    _read_write = ReadWrite()
    _read_only = ReadOnly()
    _timer1 = Timer(1)

    def __init__(self):

        self.log = SimLog("cocotb.scheduler")
        if _debug:
            self.log.setLevel(logging.DEBUG)

        # Use OrderedDict here for deterministic behavior (gh-934)

        # A dictionary of pending coroutines for each trigger,
        # indexed by trigger
        self._trigger2coros = _py_compat.insertion_ordered_dict()

        # A dictionary mapping coroutines to the trigger they are waiting for
        self._coro2trigger = _py_compat.insertion_ordered_dict()

        # Our main state
        self._mode = Scheduler._MODE_NORMAL

        # A dictionary of pending writes
        self._writes = _py_compat.insertion_ordered_dict()

        self._pending_coros = []
        self._pending_triggers = []
        self._pending_threads = []
        self._pending_events = [
        ]  # Events we need to call set on once we've unwound

        self._terminate = False
        self._test = None
        self._main_thread = threading.current_thread()

        self._is_reacting = False

        self._write_coro_inst = None
        self._writes_pending = Event()

    @cocotb.decorators.coroutine
    def _do_writes(self):
        """ An internal coroutine that performs pending writes """
        while True:
            yield self._writes_pending.wait()
            if self._mode != Scheduler._MODE_NORMAL:
                yield self._next_time_step

            yield self._read_write

            while self._writes:
                handle, value = self._writes.popitem()
                handle.setimmediatevalue(value)
            self._writes_pending.clear()

    def _check_termination(self):
        """
        Handle a termination that causes us to move onto the next test.
        """
        if self._terminate:
            if _debug:
                self.log.debug("Test terminating, scheduling Timer")

            if self._write_coro_inst is not None:
                self._write_coro_inst.kill()
                self._write_coro_inst = None

            for t in self._trigger2coros:
                t.unprime()

            if self._timer1.primed:
                self._timer1.unprime()

            self._timer1.prime(self._test_completed)
            self._trigger2coros = _py_compat.insertion_ordered_dict()
            self._coro2trigger = _py_compat.insertion_ordered_dict()
            self._terminate = False
            self._writes = _py_compat.insertion_ordered_dict()
            self._writes_pending.clear()
            self._mode = Scheduler._MODE_TERM

    def _test_completed(self, trigger=None):
        """Called after a test and its cleanup have completed
        """
        if _debug:
            self.log.debug("begin_test called with trigger: %s" %
                           (str(trigger)))
        if _profiling:
            ps = pstats.Stats(_profile).sort_stats('cumulative')
            ps.dump_stats("test_profile.pstat")
            ctx = profiling_context()
        else:
            ctx = _py_compat.nullcontext()

        with ctx:
            self._mode = Scheduler._MODE_NORMAL
            if trigger is not None:
                trigger.unprime()

            # extract the current test, and clear it
            test = self._test
            self._test = None
            if test is None:
                raise InternalError(
                    "_test_completed called with no active test")
            if test._outcome is None:
                raise InternalError(
                    "_test_completed called with an incomplete test")

            # Issue previous test result
            if _debug:
                self.log.debug("Issue test result to regression object")

            # this may scheduler another test
            cocotb.regression_manager.handle_result(test)

            # if it did, make sure we handle the test completing
            self._check_termination()

    def react(self, trigger):
        """
        Called when a trigger fires.

        We ensure that we only start the event loop once, rather than
        letting it recurse.
        """
        if self._is_reacting:
            # queue up the trigger, the event loop will get to it
            self._pending_triggers.append(trigger)
            return

        if self._pending_triggers:
            raise InternalError(
                "Expected all triggers to be handled but found {}".format(
                    self._pending_triggers))

        # start the event loop
        self._is_reacting = True
        try:
            self._event_loop(trigger)
        finally:
            self._is_reacting = False

    def _event_loop(self, trigger):
        """
        Run an event loop triggered by the given trigger.

        The loop will keep running until no further triggers fire.

        This should be triggered by only:
        * The beginning of a test, when there is no trigger to react to
        * A GPI trigger
        """
        if _profiling:
            ctx = profiling_context()
        else:
            ctx = _py_compat.nullcontext()

        with ctx:
            # When a trigger fires it is unprimed internally
            if _debug:
                self.log.debug("Trigger fired: %s" % str(trigger))
            # trigger.unprime()

            if self._mode == Scheduler._MODE_TERM:
                if _debug:
                    self.log.debug(
                        "Ignoring trigger %s since we're terminating" %
                        str(trigger))
                return

            if trigger is self._read_only:
                self._mode = Scheduler._MODE_READONLY
            # Only GPI triggers affect the simulator scheduling mode
            elif isinstance(trigger, GPITrigger):
                self._mode = Scheduler._MODE_NORMAL

            # work through triggers one by one
            is_first = True
            self._pending_triggers.append(trigger)
            while self._pending_triggers:
                trigger = self._pending_triggers.pop(0)

                if not is_first and isinstance(trigger, GPITrigger):
                    self.log.warning(
                        "A GPI trigger occurred after entering react - this "
                        "should not happen.")
                    assert False

                # this only exists to enable the warning above
                is_first = False

                # Scheduled coroutines may append to our waiting list so the first
                # thing to do is pop all entries waiting on this trigger.
                try:
                    scheduling = self._trigger2coros.pop(trigger)
                except KeyError:
                    # GPI triggers should only be ever pending if there is an
                    # associated coroutine waiting on that trigger, otherwise it would
                    # have been unprimed already
                    if isinstance(trigger, GPITrigger):
                        self.log.critical(
                            "No coroutines waiting on trigger that fired: %s" %
                            str(trigger))

                        trigger.log.info("I'm the culprit")
                    # For Python triggers this isn't actually an error - we might do
                    # event.set() without knowing whether any coroutines are actually
                    # waiting on this event, for example
                    elif _debug:
                        self.log.debug(
                            "No coroutines waiting on trigger that fired: %s" %
                            str(trigger))

                    del trigger
                    continue

                if _debug:
                    debugstr = "\n\t".join(
                        [coro.__name__ for coro in scheduling])
                    if len(scheduling):
                        debugstr = "\n\t" + debugstr
                    self.log.debug("%d pending coroutines for event %s%s" %
                                   (len(scheduling), str(trigger), debugstr))

                # This trigger isn't needed any more
                trigger.unprime()

                for coro in scheduling:
                    if _debug:
                        self.log.debug("Scheduling coroutine %s" %
                                       (coro.__name__))
                    self.schedule(coro, trigger=trigger)
                    if _debug:
                        self.log.debug("Scheduled coroutine %s" %
                                       (coro.__name__))

                # Schedule may have queued up some events so we'll burn through those
                while self._pending_events:
                    if _debug:
                        self.log.debug("Scheduling pending event %s" %
                                       (str(self._pending_events[0])))
                    self._pending_events.pop(0).set()

                # remove our reference to the objects at the end of each loop,
                # to try and avoid them being destroyed at a weird time (as
                # happened in gh-957)
                del trigger
                del coro
                del scheduling

            # no more pending triggers
            self._check_termination()
            if _debug:
                self.log.debug("All coroutines scheduled, handing control back"
                               " to simulator")

    def unschedule(self, coro):
        """Unschedule a coroutine.  Unprime any pending triggers"""

        # Unprime the trigger this coroutine is waiting on
        try:
            trigger = self._coro2trigger.pop(coro)
        except KeyError:
            # coroutine probably finished
            pass
        else:
            if coro in self._trigger2coros.setdefault(trigger, []):
                self._trigger2coros[trigger].remove(coro)
            if not self._trigger2coros[trigger]:
                trigger.unprime()
                del self._trigger2coros[trigger]

        assert self._test is not None

        if coro is self._test:
            if _debug:
                self.log.debug("Unscheduling test {}".format(coro))

            if not self._terminate:
                self._terminate = True
                self.cleanup()

        elif Join(coro) in self._trigger2coros:
            self.react(Join(coro))
        else:
            try:
                # throws an error if the background coroutine errored
                # and no one was monitoring it
                coro._outcome.get()
            except (TestComplete, AssertionError) as e:
                coro.log.info("Test stopped by this forked coroutine")
                e = remove_traceback_frames(e, ['unschedule', 'get'])
                self._test.abort(e)
            except Exception as e:
                coro.log.error("Exception raised by this forked coroutine")
                e = remove_traceback_frames(e, ['unschedule', 'get'])
                self._test.abort(e)

    def save_write(self, handle, value):
        if self._mode == Scheduler._MODE_READONLY:
            raise Exception(
                "Write to object {0} was scheduled during a read-only sync phase."
                .format(handle._name))

        # TODO: we should be able to better keep track of when this needs to
        # be scheduled
        if self._write_coro_inst is None:
            self._write_coro_inst = self._do_writes()
            self.schedule(self._write_coro_inst)

        self._writes[handle] = value
        self._writes_pending.set()

    def _coroutine_yielded(self, coro, trigger):
        """Prime the trigger and update our internal mappings."""
        self._coro2trigger[coro] = trigger

        trigger_coros = self._trigger2coros.setdefault(trigger, [])
        if coro is self._write_coro_inst:
            # Our internal write coroutine always runs before any user coroutines.
            # This preserves the behavior prior to the refactoring of writes to
            # this coroutine.
            trigger_coros.insert(0, coro)
        else:
            # Everything else joins the back of the queue
            trigger_coros.append(coro)

        if not trigger.primed:

            if trigger_coros != [coro]:
                # should never happen
                raise InternalError(
                    "More than one coroutine waiting on an unprimed trigger")

            try:
                trigger.prime(self.react)
            except Exception as e:
                # discard the trigger we associated, it will never fire
                self._trigger2coros.pop(trigger)

                # replace it with a new trigger that throws back the exception
                error_trigger = NullTrigger(outcome=outcomes.Error(e))
                self._coro2trigger[coro] = error_trigger
                self._trigger2coros[error_trigger] = [coro]

                # wake up the coroutines
                error_trigger.prime(self.react)

    def queue(self, coroutine):
        """Queue a coroutine for execution"""
        self._pending_coros.append(coroutine)

    def queue_function(self, coro):
        """Queue a coroutine for execution and move the containing thread
        so that it does not block execution of the main thread any longer.
        """
        # We should be able to find ourselves inside the _pending_threads list
        matching_threads = [
            t for t in self._pending_threads
            if t.thread == threading.current_thread()
        ]
        if len(matching_threads) == 0:
            raise RuntimeError(
                "queue_function called from unrecognized thread")

        # Raises if there is more than one match. This can never happen, since
        # each entry always has a unique thread.
        t, = matching_threads

        @cocotb.coroutine
        def wrapper():
            # This function runs in the scheduler thread
            try:
                _outcome = outcomes.Value((yield coro))
            except BaseException as e:
                _outcome = outcomes.Error(e)
            event.outcome = _outcome
            # Notify the current (scheduler) thread that we are about to wake
            # up the background (`@external`) thread, making sure to do so
            # before the background thread gets a chance to go back to sleep by
            # calling thread_suspend.
            # We need to do this here in the scheduler thread so that no more
            # coroutines run until the background thread goes back to sleep.
            t.thread_resume()
            event.set()

        event = threading.Event()
        self._pending_coros.append(wrapper())
        # The scheduler thread blocks in `thread_wait`, and is woken when we
        # call `thread_suspend` - so we need to make sure the coroutine is
        # queued before that.
        t.thread_suspend()
        # This blocks the calling `@external` thread until the coroutine finishes
        event.wait()
        return event.outcome.get()

    def run_in_executor(self, func, *args, **kwargs):
        """Run the coroutine in a separate execution thread
        and return a yieldable object for the caller.
        """

        # Create a thread
        # Create a trigger that is called as a result of the thread finishing
        # Create an Event object that the caller can yield on
        # Event object set when the thread finishes execution, this blocks the
        #   calling coroutine (but not the thread) until the external completes

        def execute_external(func, _waiter):
            _waiter._outcome = outcomes.capture(func, *args, **kwargs)
            if _debug:
                self.log.debug("Execution of external routine done %s" %
                               threading.current_thread())
            _waiter.thread_done()

        @cocotb.coroutine
        def wrapper():
            waiter = external_waiter()
            thread = threading.Thread(group=None,
                                      target=execute_external,
                                      name=func.__name__ + "_thread",
                                      args=([func, waiter]),
                                      kwargs={})

            waiter.thread = thread
            self._pending_threads.append(waiter)

            yield waiter.event.wait()

            return waiter.result  # raises if there was an exception

        return wrapper()

    def add(self, coroutine):
        """Add a new coroutine.

        Just a wrapper around self.schedule which provides some debug and
        useful error messages in the event of common gotchas.
        """
        if isinstance(coroutine, cocotb.decorators.coroutine):
            raise TypeError(
                "Attempt to schedule a coroutine that hasn't started: {}.\n"
                "Did you forget to add parentheses to the @cocotb.test() "
                "decorator?".format(coroutine))

        if inspect.iscoroutine(coroutine):
            return self.add(cocotb.decorators.RunningTask(coroutine))

        elif sys.version_info >= (3, 6) and inspect.isasyncgen(coroutine):
            raise TypeError(
                "{} is an async generator, not a coroutine. "
                "You likely used the yield keyword instead of await.".format(
                    coroutine.__qualname__))

        elif not isinstance(coroutine, cocotb.decorators.RunningTask):
            raise TypeError(
                "Attempt to add a object of type {} to the scheduler, which "
                "isn't a coroutine: {!r}\n"
                "Did you forget to use the @cocotb.coroutine decorator?".
                format(type(coroutine), coroutine))

        if _debug:
            self.log.debug("Adding new coroutine %s" % coroutine.__name__)

        self.schedule(coroutine)
        self._check_termination()
        return coroutine

    def add_test(self, test_coro):
        """Called by the regression manager to queue the next test"""
        if self._test is not None:
            raise InternalError("Test was added while another was in progress")
        self._test = test_coro
        return self.add(test_coro)

    # This collection of functions parses a trigger out of the object
    # that was yielded by a coroutine, converting `list` -> `Waitable`,
    # `Waitable` -> `RunningTask`, `RunningTask` -> `Trigger`.
    # Doing them as separate functions allows us to avoid repeating unencessary
    # `isinstance` checks.

    def _trigger_from_started_coro(
            self, result: cocotb.decorators.RunningTask) -> Trigger:
        if _debug:
            self.log.debug("Joining to already running coroutine: %s" %
                           result.__name__)
        return result.join()

    def _trigger_from_unstarted_coro(
            self, result: cocotb.decorators.RunningTask) -> Trigger:
        self.queue(result)
        if _debug:
            self.log.debug("Scheduling nested coroutine: %s" % result.__name__)
        return result.join()

    def _trigger_from_waitable(self,
                               result: cocotb.triggers.Waitable) -> Trigger:
        return self._trigger_from_unstarted_coro(result._wait())

    def _trigger_from_list(self, result: list) -> Trigger:
        return self._trigger_from_waitable(cocotb.triggers.First(*result))

    def _trigger_from_any(self, result) -> Trigger:
        """Convert a yielded object into a Trigger instance"""
        # note: the order of these can significantly impact performance

        if isinstance(result, Trigger):
            return result

        if isinstance(result, cocotb.decorators.RunningTask):
            if not result.has_started():
                return self._trigger_from_unstarted_coro(result)
            else:
                return self._trigger_from_started_coro(result)

        if inspect.iscoroutine(result):
            return self._trigger_from_unstarted_coro(
                cocotb.decorators.RunningTask(result))

        if isinstance(result, list):
            return self._trigger_from_list(result)

        if isinstance(result, cocotb.triggers.Waitable):
            return self._trigger_from_waitable(result)

        if sys.version_info >= (3, 6) and inspect.isasyncgen(result):
            raise TypeError(
                "{} is an async generator, not a coroutine. "
                "You likely used the yield keyword instead of await.".format(
                    result.__qualname__))

        raise TypeError(
            "Coroutine yielded an object of type {}, which the scheduler can't "
            "handle: {!r}\n"
            "Did you forget to decorate with @cocotb.coroutine?".format(
                type(result), result))

    def schedule(self, coroutine, trigger=None):
        """Schedule a coroutine by calling the send method.

        Args:
            coroutine (cocotb.decorators.coroutine): The coroutine to schedule.
            trigger (cocotb.triggers.Trigger): The trigger that caused this
                coroutine to be scheduled.
        """
        if trigger is None:
            send_outcome = outcomes.Value(None)
        else:
            send_outcome = trigger._outcome
        if _debug:
            self.log.debug("Scheduling with {}".format(send_outcome))

        coro_completed = False
        try:
            result = coroutine._advance(send_outcome)
            if _debug:
                self.log.debug("Coroutine %s yielded %s (mode %d)" %
                               (coroutine.__name__, str(result), self._mode))

        except cocotb.decorators.CoroutineComplete as exc:
            if _debug:
                self.log.debug("Coroutine {} completed with {}".format(
                    coroutine, coroutine._outcome))
            coro_completed = True

        # this can't go in the else above, as that causes unwanted exception
        # chaining
        if coro_completed:
            self.unschedule(coroutine)

        # Don't handle the result if we're shutting down
        if self._terminate:
            return

        if not coro_completed:
            try:
                result = self._trigger_from_any(result)
            except TypeError as exc:
                # restart this coroutine with an exception object telling it that
                # it wasn't allowed to yield that
                result = NullTrigger(outcome=outcomes.Error(exc))

            self._coroutine_yielded(coroutine, result)

        # We do not return from here until pending threads have completed, but only
        # from the main thread, this seems like it could be problematic in cases
        # where a sim might change what this thread is.

        if self._main_thread is threading.current_thread():

            for ext in self._pending_threads:
                ext.thread_start()
                if _debug:
                    self.log.debug("Blocking from %s on %s" %
                                   (threading.current_thread(), ext.thread))
                state = ext.thread_wait()
                if _debug:
                    self.log.debug(
                        "Back from wait on self %s with newstate %d" %
                        (threading.current_thread(), state))
                if state == external_state.EXITED:
                    self._pending_threads.remove(ext)
                    self._pending_events.append(ext.event)

        # Handle any newly queued coroutines that need to be scheduled
        while self._pending_coros:
            self.add(self._pending_coros.pop(0))

    def finish_test(self, exc):
        self._test.abort(exc)

    def finish_scheduler(self, exc):
        """Directly call into the regression manager and end test
           once we return the sim will close us so no cleanup is needed.
        """
        # If there is an error during cocotb initialization, self._test may not
        # have been set yet. Don't cause another Python exception here.

        if self._test:
            self.log.debug("Issue sim closedown result to regression object")
            self._test.abort(exc)
            cocotb.regression_manager.handle_result(self._test)

    def cleanup(self):
        """Clear up all our state.

        Unprime all pending triggers and kill off any coroutines stop all externals.
        """
        # copy since we modify this in kill
        items = list(self._trigger2coros.items())

        # reversing seems to fix gh-928, although the order is still somewhat
        # arbitrary.
        for trigger, waiting in items[::-1]:
            for coro in waiting:
                if _debug:
                    self.log.debug("Killing %s" % str(coro))
                coro.kill()

        if self._main_thread is not threading.current_thread():
            raise Exception("Cleanup() called outside of the main thread")

        for ext in self._pending_threads:
            self.log.warn("Waiting for %s to exit", ext.thread)
コード例 #14
0
ファイル: component.py プロジェクト: powlib/sim
class Driver(Component):
    '''
    Block intended for driving transactions onto
    an interface.
    '''
    def __init__(self, interface):
        '''
        Constructs the driver with a given
        interface.
        '''
        Component.__init__(self, interface)
        self.__inport = InPort(block=self)
        self.__queue = deque()
        self.__event = Event()
        self.__flushevt = Event()
        fork(self._drive())

    @property
    def inport(self):
        '''
        Safely return the inport associated with the driver.
        '''
        return self.__inport

    def write(self, data):
        '''
        Writes data into the driver's queue.
        '''
        self.__queue.append(data)
        self._event.set()

    @coroutine
    def flush(self):
        '''
        Blocks until the driver's queue is
        empty.
        '''
        if self._ready() == True:
            self.__flushevt.clear()
            yield self.__flushevt.wait()
        yield NullTrigger()

    def _behavior(self):
        '''
        Implements the behavior of the block.
        '''
        if self.inport.ready():
            data = self.inport.read()
            self.write(data)

    @property
    def _event(self):
        '''
        Return the event object used to synchronize the _drive coroutine
        with the parent coroutine for new data.
        '''
        return self.__event

    def _ready(self):
        '''
        Checks to see if data is available in the driver's queue.
        '''
        return len(self.__queue) != 0

    def _read(self):
        '''
        Reads data from the driver's queue. Returns the data is 
        data is available. This method is also responsible for 
        waking up the coroutine on which a flush might be yielding.
        '''
        try:
            data = self.__queue.popleft()
            if self._ready() == False:
                self.__flushevt.set()
            return data
        except IndexError:
            return None

    @coroutine
    def _drive(self):
        '''
        This coroutine should be implemented such that it writes out
        data to the specified interface with the data acquired from the
        driver's queue. The _ready and _read methods should be used for 
        acquiring the data, the _event property should be used for 
        synchronization, the _interface property should be used for
        acquiring the handles.
        '''
        raise NotImplementedError("The drive coroutine should be implemented.")
コード例 #15
0
class FwriscTracerSignalBfm():
    '''
    Implements a signal-level BFM for the FWRISC tracer
    '''
    
    def __init__(self, scope):
        self.listener_l = []
        self.lock = Lock()
        self.ev = Event()
        self.scope = scope
        cocotb.fork(self.run())
        pass

    @cocotb.coroutine
    def run(self):
        while True:
            yield RisingEdge(self.scope.clock)
            
            if self.scope.rd_write and self.scope.rd_waddr != 0:
#                print("reg_write")
                self.reg_write(int(self.scope.rd_waddr), int(self.scope.rd_wdata))
            
            if self.scope.ivalid:
#                print("instr_exec")
                self.instr_exec(int(self.scope.pc), int(self.scope.instr))
                
            if self.scope.mvalid and self.scope.mwrite:
                self.mem_write(
                    int(self.scope.maddr), 
                    int(self.scope.mstrb),
                    int(self.scope.mdata))
        pass
    
    def add_addr_region(self, base, limit):
        # NOP
        pass
    
    def set_trace_all_memwrite(self, t):
        pass
    
    def set_trace_instr(self, all_instr, jump_instr, call_instr):
        pass
    
    def set_trace_reg_writes(self, t):
        pass    
    
    def add_listener(self, l):
        self.listener_l.append(l)
    
    def instr_exec(self, pc, instr):
        for l in self.listener_l:
            l.instr_exec(pc, instr)
    
    def reg_write(self, waddr, wdata):
        for l in self.listener_l:
            l.reg_write(waddr, wdata)
    
    def mem_write(self, maddr, mstrb, mdata):
        for l in self.listener_l:
            l.mem_write(maddr, mstrb, mdata)
            
    @cocotb.coroutine
    def get_reg_info(self, raddr):
        yield self.lock.acquire()
        self.get_reg_info_req(raddr)
        yield self.ev.wait()
        ret = self.ev.data
        self.ev.clear()
        self.lock.release()
        return ret
コード例 #16
0
ファイル: eth_mac.py プロジェクト: corundum/cocotbext-eth
class EthMacTx(Reset):
    def __init__(self,
                 bus,
                 clock,
                 reset=None,
                 ptp_time=None,
                 ptp_ts=None,
                 ptp_ts_tag=None,
                 ptp_ts_valid=None,
                 reset_active_level=True,
                 ifg=12,
                 speed=1000e6,
                 *args,
                 **kwargs):

        self.bus = bus
        self.clock = clock
        self.reset = reset
        self.ptp_time = ptp_time
        self.ptp_ts = ptp_ts
        self.ptp_ts_tag = ptp_ts_tag
        self.ptp_ts_valid = ptp_ts_valid
        self.ifg = ifg
        self.speed = speed
        self.log = logging.getLogger(f"cocotb.{bus._entity._name}.{bus._name}")

        self.log.info("Ethernet MAC TX model")
        self.log.info("cocotbext-eth version %s", __version__)
        self.log.info("Copyright (c) 2020 Alex Forencich")
        self.log.info("https://github.com/alexforencich/cocotbext-eth")

        super().__init__(*args, **kwargs)

        self.stream = AxiStreamSink(bus,
                                    clock,
                                    reset,
                                    reset_active_level=reset_active_level)
        self.stream.queue_occupancy_limit = 4

        self.active = False
        self.queue = Queue()
        self.active_event = Event()

        self.ts_queue = Queue()

        self.queue_occupancy_bytes = 0
        self.queue_occupancy_frames = 0

        self.time_scale = cocotb.utils.get_sim_steps(1, 'sec')

        self.width = len(self.bus.tdata)
        self.byte_lanes = 1

        if hasattr(self.bus, "tkeep"):
            self.byte_lanes = len(self.bus.tkeep)

        self.byte_size = self.width // self.byte_lanes
        self.byte_mask = 2**self.byte_size - 1

        self.log.info("Ethernet MAC TX model configuration")
        self.log.info("  Byte size: %d bits", self.byte_size)
        self.log.info("  Data width: %d bits (%d bytes)", self.width,
                      self.byte_lanes)
        if hasattr(self.bus, "tkeep"):
            self.log.info("  tkeep width: %d bits", len(self.bus.tkeep))
        else:
            self.log.info("  tkeep: not present")
        if hasattr(self.bus, "tuser"):
            self.log.info("  tuser width: %d bits", len(self.bus.tuser))
        else:
            self.log.info("  tuser: not present")
        if self.ptp_time:
            self.log.info("  ptp_time width: %d bits", len(self.ptp_time))
        else:
            self.log.info("  ptp_time: not present")

        if self.bus.tready is None:
            raise ValueError("tready is required")

        if self.byte_size != 8:
            raise ValueError("Byte size must be 8")

        if self.byte_lanes * self.byte_size != self.width:
            raise ValueError(
                f"Bus does not evenly divide into byte lanes "
                f"({self.byte_lanes} * {self.byte_size} != {self.width})")

        if self.ptp_ts:
            self.ptp_ts.setimmediatevalue(0)
        if self.ptp_ts_tag:
            self.ptp_ts_tag.setimmediatevalue(0)
        if self.ptp_ts_valid:
            self.ptp_ts_valid.setimmediatevalue(0)

        self._run_cr = None
        self._run_ts_cr = None

        self._init_reset(reset, reset_active_level)

    def _recv(self, frame):
        if self.queue.empty():
            self.active_event.clear()
        self.queue_occupancy_bytes -= len(frame)
        self.queue_occupancy_frames -= 1
        return frame

    async def recv(self):
        frame = await self.queue.get()
        return self._recv(frame)

    def recv_nowait(self):
        frame = self.queue.get_nowait()
        return self._recv(frame)

    def count(self):
        return self.queue.qsize()

    def empty(self):
        return self.queue.empty()

    def idle(self):
        return not self.active

    def clear(self):
        while not self.queue.empty():
            self.queue.get_nowait()
        self.active_event.clear()
        self.queue_occupancy_bytes = 0
        self.queue_occupancy_frames = 0

    async def wait(self, timeout=0, timeout_unit=None):
        if not self.empty():
            return
        if timeout:
            await First(self.active_event.wait(), Timer(timeout, timeout_unit))
        else:
            await self.active_event.wait()

    def _handle_reset(self, state):
        if state:
            self.log.info("Reset asserted")
            if self._run_cr is not None:
                self._run_cr.kill()
                self._run_cr = None
            if self._run_ts_cr is not None:
                self._run_ts_cr.kill()
                self._run_ts_cr = None

            if self.ptp_ts_valid:
                self.ptp_ts_valid.value = 0

            self.active = False

            while not self.ts_queue.empty():
                self.ts_queue.get_nowait()
        else:
            self.log.info("Reset de-asserted")
            if self._run_cr is None:
                self._run_cr = cocotb.start_soon(self._run())
            if self._run_ts_cr is None and self.ptp_ts:
                self._run_ts_cr = cocotb.start_soon(self._run_ts())

    async def _run(self):
        frame = None
        self.active = False

        while True:
            # wait for data
            cycle = await self.stream.recv()

            frame = EthMacFrame(bytearray())
            frame.sim_time_start = get_sim_time()

            # wait for preamble time
            await Timer(self.time_scale * 8 * 8 // self.speed, 'step')

            frame.sim_time_sfd = get_sim_time()

            if self.ptp_time:
                frame.ptp_timestamp = self.ptp_time.value.integer
                frame.ptp_tag = cycle.tuser.integer >> 1
                self.ts_queue.put_nowait((frame.ptp_timestamp, frame.ptp_tag))

            # process frame data
            while True:
                byte_count = 0

                for offset in range(self.byte_lanes):
                    if not hasattr(self.bus, "tkeep") or (
                            cycle.tkeep.integer >> offset) & 1:
                        frame.data.append((cycle.tdata.integer >>
                                           (offset * self.byte_size))
                                          & self.byte_mask)
                        byte_count += 1

                # wait for serialization time
                await Timer(self.time_scale * byte_count * 8 // self.speed,
                            'step')

                if cycle.tlast.integer:
                    frame.sim_time_end = get_sim_time()
                    self.log.info("RX frame: %s", frame)

                    self.queue_occupancy_bytes += len(frame)
                    self.queue_occupancy_frames += 1

                    await self.queue.put(frame)
                    self.active_event.set()

                    frame = None

                    break

                # get next cycle
                # TODO improve underflow handling
                assert not self.stream.empty(), "underflow"
                cycle = await self.stream.recv()

            # wait for IFG
            await Timer(self.time_scale * self.ifg * 8 // self.speed, 'step')

    async def _run_ts(self):
        clock_edge_event = RisingEdge(self.clock)

        while True:
            await clock_edge_event
            self.ptp_ts_valid.value = 0

            if not self.ts_queue.empty():
                ts, tag = self.ts_queue.get_nowait()
                self.ptp_ts.value = ts
                if self.ptp_ts_tag is not None:
                    self.ptp_ts_tag.value = tag
                self.ptp_ts_valid.value = 1
コード例 #17
0
ファイル: __init__.py プロジェクト: TC01/cocotb
class Driver(object):
    """Class defining the standard interface for a driver within a testbench.

    The driver is responsible for serialising transactions onto the physical
    pins of the interface.  This may consume simulation time.
    """
    def __init__(self):
        """Constructor for a driver instance."""
        self._pending = Event(name="Driver._pending")
        self._sendQ = deque()

        # Subclasses may already set up logging
        if not hasattr(self, "log"):
            self.log = SimLog("cocotb.driver.%s" % (self.__class__.__name__))

        # Create an independent coroutine which can send stuff
        self._thread = cocotb.scheduler.add(self._send_thread())

    def kill(self):
        """Kill the coroutine sending stuff."""
        if self._thread:
            self._thread.kill()
            self._thread = None

    def append(self, transaction, callback=None, event=None, **kwargs):
        """Queue up a transaction to be sent over the bus.

        Mechanisms are provided to permit the caller to know when the
        transaction is processed.

        Args:
            transaction (any): The transaction to be sent.
            callback (callable, optional): Optional function to be called 
                when the transaction has been sent.
            event (optional): :class:`~cocotb.triggers.Event` to be set
                when the transaction has been sent.
            **kwargs: Any additional arguments used in child class' 
                :any:`_driver_send` method.
        """
        self._sendQ.append((transaction, callback, event, kwargs))
        self._pending.set()

    def clear(self):
        """Clear any queued transactions without sending them onto the bus."""
        self._sendQ = deque()

    @coroutine
    def send(self, transaction, sync=True, **kwargs):
        """Blocking send call (hence must be "yielded" rather than called).

        Sends the transaction over the bus.

        Args:
            transaction (any): The transaction to be sent.
            sync (bool, optional): Synchronise the transfer by waiting for a rising edge.
            **kwargs (dict): Additional arguments used in child class'
                :any:`_driver_send` method.
        """
        yield self._send(transaction, None, None, sync=sync, **kwargs)

    def _driver_send(self, transaction, sync=True, **kwargs):
        """Actual implementation of the send.

        Subclasses should override this method to implement the actual 
        :meth:`~cocotb.drivers.Driver.send` routine.

        Args:
            transaction (any): The transaction to be sent.
            sync (boolean, optional): Synchronise the transfer by waiting for a rising edge.
            **kwargs: Additional arguments if required for protocol implemented in subclass.
        """
        raise NotImplementedError("Subclasses of Driver should define a "
                                  "_driver_send coroutine")

    @coroutine
    def _send(self, transaction, callback, event, sync=True, **kwargs):
        """Send coroutine.

        Args:
            transaction (any): The transaction to be sent.
            callback (callable, optional): Optional function to be called 
                when the transaction has been sent.
            event (optional): event to be set when the transaction has been sent.
            sync (boolean, optional): Synchronise the transfer by waiting for a rising edge.
            **kwargs: Any additional arguments used in child class' 
                :any:`_driver_send` method.
        """
        yield self._driver_send(transaction, sync=sync, **kwargs)

        # Notify the world that this transaction is complete
        if event:
            event.set()
        if callback:
            callback(transaction)

    @coroutine
    def _send_thread(self):
        while True:

            # Sleep until we have something to send
            while not self._sendQ:
                self._pending.clear()
                yield self._pending.wait()

            synchronised = False

            # Send in all the queued packets,
            # only synchronise on the first send
            while self._sendQ:
                transaction, callback, event, kwargs = self._sendQ.popleft()
                self.log.debug("Sending queued packet...")
                yield self._send(transaction, callback, event,
                                 sync=not synchronised, **kwargs)
                synchronised = True
コード例 #18
0
class WishboneMaster(Wishbone):
    """
    Wishbone master
    """
    def __init__(self, entity, name, clock, timeout=None, width=32, **kwargs):
        sTo = ", no cycle timeout"
        if timeout is not None:
            sTo = ", cycle timeout is %u clockcycles" % timeout
        self.busy_event         = Event("%s_busy" % name)
        self._timeout           = timeout
        self.busy               = False
        self._acked_ops         = 0
        self._res_buf           = []
        self._aux_buf           = []
        self._op_cnt            = 0
        self._clk_cycle_count   = 0
        Wishbone.__init__(self, entity, name, clock, width, **kwargs)
        self.log.info("Wishbone Master created%s" % sTo)


    @coroutine 
    def _clk_cycle_counter(self):
        """
        Cycle counter to time bus operations
        """
        clkedge = RisingEdge(self.clock)
        self._clk_cycle_count = 0
        while self.busy:
            yield clkedge
            self._clk_cycle_count += 1


    @coroutine
    def _open_cycle(self):
        #Open new wishbone cycle
        if self.busy:
            self.log.error("Opening Cycle, but WB Driver is already busy. Someting's wrong")
            yield self.busy_event.wait()
        self.busy_event.clear()
        self.busy       = True
        cocotb.fork(self._read())
        cocotb.fork(self._clk_cycle_counter()) 
        self.bus.cyc    <= 1
        self._acked_ops = 0  
        self._res_buf   = [] 
        self._aux_buf   = []
        self.log.debug("Opening cycle, %u Ops" % self._op_cnt)


    @coroutine
    def _close_cycle(self):
        #Close current wishbone cycle  
        clkedge = RisingEdge(self.clock)
        count           = 0
        last_acked_ops  = 0
        #Wait for all Operations being acknowledged by the slave before lowering the cycle line
        #This is not mandatory by the bus standard, but a crossbar might send acks to the wrong master
        #if we don't wait. We don't want to risk that, it could hang the bus
        while self._acked_ops < self._op_cnt:
            if last_acked_ops != self._acked_ops:
                self.log.debug("Waiting for missing acks: %u/%u" % (self._acked_ops, self._op_cnt) )
            last_acked_ops = self._acked_ops    
            #check for timeout when finishing the cycle            
            count += 1
            if (not (self._timeout is None)):
                if (count > self._timeout): 
                    raise TestFailure("Timeout of %u clock cycles reached when waiting for reply from slave" % self._timeout)                
            yield clkedge

        self.busy = False
        self.busy_event.set()
        self.bus.cyc <= 0 
        self.log.debug("Closing cycle")
        yield clkedge


    @coroutine
    def _wait_stall(self):
        """Wait for stall to be low before continuing (Pipelined Wishbone)
        """
        clkedge = RisingEdge(self.clock)
        count = 0
        if hasattr(self.bus, "stall"):
            count = 0            
            while self.bus.stall.value == 1:
                yield clkedge
                count += 1
                if (not (self._timeout is None)):
                    if (count > self._timeout): 
                        raise TestFailure("Timeout of %u clock cycles reached when on stall from slave" % self._timeout)                
            self.log.debug("Stalled for %u cycles" % count)
        raise ReturnValue(count)


    @coroutine
    def _wait_ack(self):
        """Wait for ACK on the bus before continuing (Non pipelined Wishbone)
        """
        #wait for acknownledgement before continuing - Classic Wishbone without pipelining
        clkedge = RisingEdge(self.clock)
        count = 0
        if hasattr(self.bus, "stall"):
            self.bus.stb    <= 0
        if self._acktimeout == 0:
            while not self._get_reply()[0] :
                yield clkedge
                count += 1
        else:
            while (not self._get_reply()[0]) and (count < self._acktimeout) :
                yield clkedge
                count += 1
        if (self._acktimeout != 0) and (count >= self._acktimeout):
            raise TestFailure("Timeout of %u clock cycles reached when waiting for acknowledge" % count)

        if not hasattr(self.bus, "stall"):
            self.bus.stb    <= 0
        self._acked_ops += 1
        self.log.debug("Waited %u cycles for ackknowledge" % count)

        raise ReturnValue(count)    


    def _get_reply(self):
        code = 0 # 0 if no reply, 1 for ACK, 2 for ERR, 3 for RTY
        ack = self.bus.ack.value == 1
        if ack:
            code = 1
        if hasattr(self.bus, "err") and self.bus.err.value == 1:
            if ack:
                raise TestFailure("Slave raised ACK and ERR line")
            ack = True
            code = 2
        if hasattr(self.bus, "rty") and self.bus.rty.value == 1:
            if ack:
                raise TestFailure("Slave raised {} and RTY line".format("ACK" if code == 1 else "ERR"))
            ack = True
            code = 3
        return ack, code


    @coroutine 
    def _read(self):
        """
        Reader for slave replies
        """
        count = 0
        clkedge = RisingEdge(self.clock)
        while self.busy:
            ack, reply = self._get_reply()
            # valid reply?
            if ack:
                datrd = self.bus.datrd.value
                #append reply and meta info to result buffer
                tmpRes =  WBRes(ack=reply, sel=None, adr=None, datrd=datrd, datwr=None, waitIdle=None, waitStall=None, waitAck=self._clk_cycle_count)               
                self._res_buf.append(tmpRes)
                self._acked_ops += 1
            yield clkedge
            count += 1


    @coroutine
    def _drive(self, we, adr, datwr, sel, idle):
        """
        Drive the Wishbone Master Out Lines
        """

        clkedge = RisingEdge(self.clock)
        if self.busy:
            # insert requested idle cycles
            if idle is not None:
                idlecnt = idle
                while idlecnt > 0:
                    idlecnt -= 1
                    yield clkedge
            # drive outputs    
            self.bus.stb    <= 1
            self.bus.adr    <= adr
            if hasattr(self.bus, "sel"):
                self.bus.sel <= sel if sel is not None else BinaryValue("1" * len(self.bus.sel))
            self.bus.datwr  <= datwr
            self.bus.we     <= we
            yield clkedge
            #deal with flow control (pipelined wishbone)
            stalled = yield self._wait_stall()
            #append operation and meta info to auxiliary buffer
            self._aux_buf.append(WBAux(sel, adr, datwr, stalled, idle, self._clk_cycle_count))
            yield self._wait_ack()
            self.bus.we     <= 0
        else:
            self.log.error("Cannot drive the Wishbone bus outside a cycle!")



    @coroutine
    def send_cycle(self, arg):
        """
        The main sending routine

        Args:
            list(WishboneOperations)
        """
        cnt = 0
        clkedge = RisingEdge(self.clock)
        yield clkedge
        if is_sequence(arg):
            self._op_cnt = len(arg)
            if self._op_cnt < 1:
                self.log.error("List contains no operations to carry out")
            else:
                result = []
                yield self._open_cycle()

                for op in arg:
                    if not isinstance(op, WBOp):
                        raise TestFailure("Sorry, argument must be a list of WBOp (Wishbone Operation) objects!")    

                    self._acktimeout = op.acktimeout

                    if op.dat is not None:
                        we  = 1
                        dat = op.dat
                    else:
                        we  = 0
                        dat = 0
                    yield self._drive(we, op.adr, dat, op.sel, op.idle)
                    if op.sel is not None:
                        self.log.debug("#%3u WE: %s ADR: 0x%08x DAT: 0x%08x SEL: 0x%1x IDLE: %3u" % (cnt, we, op.adr, dat, op.sel, op.idle))
                    else:
                        self.log.debug("#%3u WE: %s ADR: 0x%08x DAT: 0x%08x SEL: None  IDLE: %3u" % (cnt, we, op.adr, dat, op.idle))
                    cnt += 1

                yield self._close_cycle()

                #do pick and mix from result- and auxiliary buffer so we get all operation and meta info
                for res, aux in zip(self._res_buf, self._aux_buf):
                    res.datwr       = aux.datwr
                    res.sel         = aux.sel
                    res.adr         = aux.adr
                    res.waitIdle    = aux.waitIdle
                    res.waitStall   = aux.waitStall
                    res.waitAck    -= aux.ts
                    result.append(res)

            raise ReturnValue(result)
        else:
            raise TestFailure("Sorry, argument must be a list of WBOp (Wishbone Operation) objects!")
            raise ReturnValue(None)
コード例 #19
0
ファイル: opb.py プロジェクト: enchanter/cocotb
class OPBMaster(BusDriver):
    """
    On-chip peripheral bus master
    """
    _signals = ["xferAck", "errAck", "toutSup", "retry", "DBus_out", "select", "RNW", "BE", "ABus", "DBus_in"]
    _optional_signals = ["seqAddr"]
    _max_cycles = 16

    def __init__(self, entity, name, clock):
        BusDriver.__init__(self, entity, name, clock)
        self.bus.select.setimmediatevalue(0)
        self.log.debug("OPBMaster created")
        self.busy_event = Event("%s_busy" % name)
        self.busy = False

    @cocotb.coroutine
    def _acquire_lock(self):
        if self.busy:
            yield self.busy_event.wait()
        self.busy_event.clear()
        self.busy = True

    def _release_lock(self):
        self.busy = False
        self.busy_event.set()

    @cocotb.coroutine
    def read(self, address, sync=True):
        """
        Issue a request to the bus and block until this
        comes back. Simulation time still progresses
        but syntactically it blocks.
        """
        yield self._acquire_lock()

        # Apply values for next clock edge
        if sync:
            yield RisingEdge(self.clock)
        self.bus.ABus <= address
        self.bus.select <= 1
        self.bus.RNW <= 1
        self.bus.BE <= 0xF

        count = 0
        while not int(self.bus.xferAck.value):
            yield RisingEdge(self.clock)
            yield ReadOnly()
            if int(self.bus.toutSup.value):
                count = 0
            else:
                count += 1
            if count >= self._max_cycles:
                raise OPBException("Read took longer than 16 cycles")
        data = int(self.bus.DBus_out.value)

        # Deassert read
        self.bus.select <= 0
        self._release_lock()
        self.log.info("Read of address 0x%x returned 0x%08x" % (address, data))
        raise ReturnValue(data)

    @cocotb.coroutine
    def write(self, address, value, sync=True):
        """
        """
        yield self._acquire_lock()

        if sync:
            yield RisingEdge(self.clock)
        self.bus.ABus <= address
        self.bus.select <= 1
        self.bus.RNW <= 0
        self.bus.BE <= 0xF
        self.bus.DBus_out <= value

        count = 0
        while not int(self.bus.xferAck.value):
            yield RisingEdge(self.clock)
            yield ReadOnly()
            if int(self.bus.toutSup.value):
                count = 0
            else:
                count += 1
            if count >= self._max_cycles:
                raise OPBException("Write took longer than 16 cycles")

        self.bus.select <= 0
        self._release_lock()
コード例 #20
0
ファイル: fwrisc_tracer_bfm.py プロジェクト: zhuyh128/fwrisc
class FwriscTracerBfm():
    def __init__(self):
        self.listener_l = []
        self.lock = Lock()
        self.ev = Event()
        self.addr_region_idx = 0
        pass

    def add_listener(self, l):
        self.listener_l.append(l)

    def add_addr_region(self, base, limit):
        self.set_addr_region(self.addr_region_idx, base, limit, 1)
        self.addr_region_idx += 1

    @cocotb.bfm_export(cocotb.bfm_uint32_t, cocotb.bfm_uint32_t)
    def instr_exec(self, pc, instr):
        #        print("instr_exec: " + hex(pc))
        for l in self.listener_l:
            l.instr_exec(pc, instr)

    @cocotb.bfm_export(cocotb.bfm_uint32_t, cocotb.bfm_uint32_t)
    def reg_write(self, waddr, wdata):
        #        print("reg_write: " + hex(waddr))
        for l in self.listener_l:
            l.reg_write(waddr, wdata)

    @cocotb.bfm_export(cocotb.bfm_uint32_t, cocotb.bfm_uint32_t,
                       cocotb.bfm_uint32_t)
    def mem_write(self, maddr, mstrb, mdata):
        #        print("mem_write: " + hex(maddr))
        for l in self.listener_l:
            l.mem_write(maddr, mstrb, mdata)

    @cocotb.coroutine
    def get_reg_info(self, raddr):
        yield self.lock.acquire()
        self.get_reg_info_req(raddr)
        yield self.ev.wait()
        ret = self.ev.data
        self.ev.clear()
        self.lock.release()
        return ret

    @cocotb.bfm_import(cocotb.bfm_uint32_t)
    def get_reg_info_req(self, raddr):
        pass

    @cocotb.bfm_import(cocotb.bfm_uint32_t, cocotb.bfm_uint32_t,
                       cocotb.bfm_uint32_t, cocotb.bfm_uint32_t)
    def set_addr_region(self, i, base, limit, valid):
        pass

    @cocotb.bfm_export(cocotb.bfm_uint32_t, cocotb.bfm_uint32_t)
    def get_reg_info_ack(self, rdata, accessed):
        self.ev.set((rdata, accessed))

    @cocotb.bfm_import(cocotb.bfm_uint32_t)
    def set_trace_all_memwrite(self, t):
        pass

    @cocotb.bfm_import(cocotb.bfm_uint32_t, cocotb.bfm_uint32_t,
                       cocotb.bfm_uint32_t)
    def set_trace_instr(self, all_instr, jump_instr, call_instr):
        pass

    @cocotb.bfm_import(cocotb.bfm_uint32_t)
    def set_trace_reg_writes(self, t):
        pass
コード例 #21
0
ファイル: gmii.py プロジェクト: corundum/cocotbext-eth
class GmiiSink(Reset):

    def __init__(self, data, er, dv, clock, reset=None, enable=None, mii_select=None, reset_active_level=True, *args, **kwargs):
        self.log = logging.getLogger(f"cocotb.{data._path}")
        self.data = data
        self.er = er
        self.dv = dv
        self.clock = clock
        self.reset = reset
        self.enable = enable
        self.mii_select = mii_select

        self.log.info("GMII sink")
        self.log.info("cocotbext-eth version %s", __version__)
        self.log.info("Copyright (c) 2020 Alex Forencich")
        self.log.info("https://github.com/alexforencich/cocotbext-eth")

        super().__init__(*args, **kwargs)

        self.active = False
        self.queue = Queue()
        self.active_event = Event()

        self.mii_mode = False

        self.queue_occupancy_bytes = 0
        self.queue_occupancy_frames = 0

        self.width = 8
        self.byte_width = 1

        assert len(self.data) == 8
        if self.er is not None:
            assert len(self.er) == 1
        if self.dv is not None:
            assert len(self.dv) == 1

        self._run_cr = None

        self._init_reset(reset, reset_active_level)

    def _recv(self, frame, compact=True):
        if self.queue.empty():
            self.active_event.clear()
        self.queue_occupancy_bytes -= len(frame)
        self.queue_occupancy_frames -= 1
        if compact:
            frame.compact()
        return frame

    async def recv(self, compact=True):
        frame = await self.queue.get()
        return self._recv(frame, compact)

    def recv_nowait(self, compact=True):
        frame = self.queue.get_nowait()
        return self._recv(frame, compact)

    def count(self):
        return self.queue.qsize()

    def empty(self):
        return self.queue.empty()

    def idle(self):
        return not self.active

    def clear(self):
        while not self.queue.empty():
            self.queue.get_nowait()
        self.active_event.clear()
        self.queue_occupancy_bytes = 0
        self.queue_occupancy_frames = 0

    async def wait(self, timeout=0, timeout_unit=None):
        if not self.empty():
            return
        if timeout:
            await First(self.active_event.wait(), Timer(timeout, timeout_unit))
        else:
            await self.active_event.wait()

    def _handle_reset(self, state):
        if state:
            self.log.info("Reset asserted")
            if self._run_cr is not None:
                self._run_cr.kill()
                self._run_cr = None

            self.active = False
        else:
            self.log.info("Reset de-asserted")
            if self._run_cr is None:
                self._run_cr = cocotb.start_soon(self._run())

    async def _run(self):
        frame = None
        self.active = False

        clock_edge_event = RisingEdge(self.clock)

        while True:
            await clock_edge_event

            if self.enable is None or self.enable.value:
                d_val = self.data.value.integer
                dv_val = self.dv.value.integer
                er_val = 0 if self.er is None else self.er.value.integer

                if frame is None:
                    if dv_val:
                        # start of frame
                        frame = GmiiFrame(bytearray(), [])
                        frame.sim_time_start = get_sim_time()
                else:
                    if not dv_val:
                        # end of frame

                        if self.mii_select is not None:
                            self.mii_mode = bool(self.mii_select.value.integer)

                        if self.mii_mode:
                            odd = True
                            sync = False
                            b = 0
                            be = 0
                            data = bytearray()
                            error = []
                            for n, e in zip(frame.data, frame.error):
                                odd = not odd
                                b = (n & 0x0F) << 4 | b >> 4
                                be |= e
                                if not sync and b == EthPre.SFD:
                                    odd = True
                                    sync = True
                                if odd:
                                    data.append(b)
                                    error.append(be)
                                    be = 0
                            frame.data = data
                            frame.error = error

                        frame.compact()
                        frame.sim_time_end = get_sim_time()
                        self.log.info("RX frame: %s", frame)

                        self.queue_occupancy_bytes += len(frame)
                        self.queue_occupancy_frames += 1

                        self.queue.put_nowait(frame)
                        self.active_event.set()

                        frame = None

                if frame is not None:
                    if frame.sim_time_sfd is None and d_val in (EthPre.SFD, 0xD):
                        frame.sim_time_sfd = get_sim_time()

                    frame.data.append(d_val)
                    frame.error.append(er_val)
コード例 #22
0
class Port:
    """Base port"""
    def __init__(self, fc_init=[[0] * 6] * 8, *args, **kwargs):
        self.log = logging.getLogger(
            f"cocotb.pcie.{type(self).__name__}.{id(self)}")
        self.log.name = f"cocotb.pcie.{type(self).__name__}"

        self.parent = None
        self.rx_handler = None

        self.max_link_speed = None
        self.max_link_width = None

        self.tx_queue = Queue(1)
        self.tx_queue_sync = Event()

        self.rx_queue = Queue()

        self.cur_link_speed = None
        self.cur_link_width = None

        self.time_scale = get_sim_steps(1, 'sec')

        # ACK/NAK protocol
        # TX
        self.next_transmit_seq = 0x000
        self.ackd_seq = 0xfff
        self.retry_buffer = Queue()

        # RX
        self.next_recv_seq = 0x000
        self.nak_scheduled = False
        self.ack_nak_latency_timer_steps = 0

        self.max_payload_size = 128
        self.max_latency_timer_steps = 0

        self.send_ack = Event()

        self._ack_latency_timer_cr = None

        # Flow control
        self.send_fc = Event()

        self.fc_state = [
            FcChannelState(fc_init[k], self.start_fc_update_timer)
            for k in range(8)
        ]

        self.fc_initialized = False
        self.fc_init_vc = 0
        self.fc_init_type = FcType.P

        self.fc_idle_timer_steps = get_sim_steps(10, 'us')
        self.fc_update_steps = get_sim_steps(30, 'us')

        self._fc_update_timer_cr = None

        super().__init__(*args, **kwargs)

        # VC0 is always active
        self.fc_state[0].active = True

        cocotb.start_soon(self._run_transmit())
        cocotb.start_soon(self._run_receive())
        cocotb.start_soon(self._run_fc_update_idle_timer())

    def classify_tlp_vc(self, tlp):
        return 0

    async def send(self, pkt):
        pkt.release_fc()
        await self.fc_state[self.classify_tlp_vc(pkt)].tx_tlp_fc_gate(pkt)
        await self.tx_queue.put(pkt)
        self.tx_queue_sync.set()

    async def _run_transmit(self):
        await NullTrigger()
        while True:
            while self.tx_queue.empty() and not self.send_ack.is_set(
            ) and not self.send_fc.is_set() and self.fc_initialized:
                self.tx_queue_sync.clear()
                await First(self.tx_queue_sync.wait(), self.send_ack.wait(),
                            self.send_fc.wait())

            pkt = None

            if self.send_ack.is_set():
                # Send ACK or NAK DLLP
                # Runs when
                #  - ACK timer expires
                #  - ACK/NAK transmit requested
                self.send_ack.clear()
                if self.nak_scheduled:
                    pkt = Dllp.create_nak((self.next_recv_seq - 1) & 0xfff)
                else:
                    pkt = Dllp.create_ack((self.next_recv_seq - 1) & 0xfff)
            elif self.send_fc.is_set() or (not self.fc_initialized
                                           and self.tx_queue.empty()):
                # Send FC DLLP
                # Runs when
                #  - FC timer expires
                #  - FC update DLLP transmit requested
                #  - FC init is not done AND no TLPs are queued for transmit
                if self.send_fc.is_set():
                    # Send FC update DLLP
                    for fc_ch in self.fc_state:
                        if not fc_ch.active or not fc_ch.fi2:
                            continue

                        sim_time = get_sim_time()
                        if fc_ch.next_fc_p_tx <= sim_time:
                            pkt = Dllp()
                            pkt.vc = self.fc_init_vc
                            pkt.type = DllpType.UPDATE_FC_P
                            pkt.hdr_fc = fc_ch.ph.rx_credits_allocated
                            pkt.data_fc = fc_ch.pd.rx_credits_allocated
                            fc_ch.next_fc_p_tx = sim_time + self.fc_update_steps
                            break
                        if fc_ch.next_fc_np_tx <= sim_time:
                            pkt = Dllp()
                            pkt.vc = self.fc_init_vc
                            pkt.type = DllpType.UPDATE_FC_NP
                            pkt.hdr_fc = fc_ch.nph.rx_credits_allocated
                            pkt.data_fc = fc_ch.npd.rx_credits_allocated
                            fc_ch.next_fc_np_tx = sim_time + self.fc_update_steps
                            break
                        if fc_ch.next_fc_cpl_tx <= sim_time:
                            pkt = Dllp()
                            pkt.vc = self.fc_init_vc
                            pkt.type = DllpType.UPDATE_FC_CPL
                            pkt.hdr_fc = fc_ch.cplh.rx_credits_allocated
                            pkt.data_fc = fc_ch.cpld.rx_credits_allocated
                            fc_ch.next_fc_cpl_tx = sim_time + self.fc_update_steps
                            break

                if not self.fc_initialized and not pkt:
                    # Send FC init DLLP
                    fc_ch = self.fc_state[self.fc_init_vc]
                    pkt = Dllp()
                    pkt.vc = self.fc_init_vc
                    if self.fc_init_type == FcType.P:
                        pkt.type = DllpType.INIT_FC1_P if not fc_ch.fi1 else DllpType.INIT_FC2_P
                        pkt.hdr_fc = fc_ch.ph.rx_credits_allocated
                        pkt.data_fc = fc_ch.pd.rx_credits_allocated
                        self.fc_init_type = FcType.NP
                    elif self.fc_init_type == FcType.NP:
                        pkt.type = DllpType.INIT_FC1_NP if not fc_ch.fi1 else DllpType.INIT_FC2_NP
                        pkt.hdr_fc = fc_ch.nph.rx_credits_allocated
                        pkt.data_fc = fc_ch.npd.rx_credits_allocated
                        self.fc_init_type = FcType.CPL
                    elif self.fc_init_type == FcType.CPL:
                        pkt.type = DllpType.INIT_FC1_CPL if not fc_ch.fi1 else DllpType.INIT_FC2_CPL
                        pkt.hdr_fc = fc_ch.cplh.rx_credits_allocated
                        pkt.data_fc = fc_ch.cpld.rx_credits_allocated
                        self.fc_init_type = FcType.P
                        # find next active VC that hasn't finished FC init
                        for k in range(8):
                            vc = (self.fc_init_vc + 1 + k) % 8
                            if self.fc_state[
                                    vc].active and not self.fc_state[vc].fi2:
                                self.fc_init_vc = vc
                                break

                    # check all active VC and report FC not initialized if any are not complete
                    self.fc_initialized = True
                    for vc in range(8):
                        if self.fc_state[
                                vc].active and not self.fc_state[vc].fi2:
                            self.fc_initialized = False

                if not pkt:
                    # no more DLLPs to send, clear event
                    self.send_fc.clear()

            if pkt is not None:
                self.log.debug("Send DLLP %s", pkt)
            elif not self.tx_queue.empty():
                pkt = self.tx_queue.get_nowait()
                pkt.seq = self.next_transmit_seq
                self.log.debug("Send TLP %s", pkt)
                self.next_transmit_seq = (self.next_transmit_seq + 1) & 0xfff
                self.retry_buffer.put_nowait(pkt)

            if pkt:
                await self.handle_tx(pkt)

    async def handle_tx(self, pkt):
        raise NotImplementedError()

    async def ext_recv(self, pkt):
        if isinstance(pkt, Dllp):
            # DLLP
            self.log.debug("Receive DLLP %s", pkt)
            self.handle_dllp(pkt)
        else:
            # TLP
            self.log.debug("Receive TLP %s", pkt)
            if pkt.seq == self.next_recv_seq:
                # expected seq
                self.next_recv_seq = (self.next_recv_seq + 1) & 0xfff
                self.nak_scheduled = False
                self.start_ack_latency_timer()
                pkt = Tlp(pkt)
                self.fc_state[self.classify_tlp_vc(pkt)].rx_process_tlp_fc(pkt)
                self.rx_queue.put_nowait(pkt)
            elif (self.next_recv_seq - pkt.seq) & 0xfff < 2048:
                self.log.warning(
                    "Received duplicate TLP, discarding (seq %d, expecting %d)",
                    pkt.seq, self.next_recv_seq)
                self.stop_ack_latency_timer()
                self.send_ack.set()
            else:
                self.log.warning(
                    "Received out-of-sequence TLP, sending NAK (seq %d, expecting %d)",
                    pkt.seq, self.next_recv_seq)
                if not self.nak_scheduled:
                    self.nak_scheduled = True
                    self.stop_ack_latency_timer()
                    self.send_ack.set()

    async def _run_receive(self):
        while True:
            tlp = await self.rx_queue.get()
            if self.rx_handler is None:
                raise Exception("Receive handler not set")
            await self.rx_handler(tlp)

    def handle_dllp(self, dllp):
        if dllp.type == DllpType.NOP:
            # discard NOP
            pass
        elif dllp.type in {DllpType.ACK, DllpType.NAK}:
            # ACK or NAK
            if (((self.next_transmit_seq - 1) & 0xfff) -
                    dllp.seq) & 0xfff > 2048:
                self.log.warning(
                    "Received ACK/NAK DLLP for future TLP, discarding (seq %d, next TX %d, ACK %d)",
                    dllp.seq, self.next_transmit_seq, self.ackd_seq)
            elif (dllp.seq - self.ackd_seq) & 0xfff > 2048:
                self.log.warning(
                    "Received ACK/NAK DLLP for previously-ACKed TLP, discarding (seq %d, next TX %d, ACK %d)",
                    dllp.seq, self.next_transmit_seq, self.ackd_seq)
            else:
                while dllp.seq != self.ackd_seq:
                    # purge TLPs from retry buffer
                    self.retry_buffer.get_nowait()
                    self.ackd_seq = (self.ackd_seq + 1) & 0xfff
                    self.log.debug("ACK TLP seq %d", self.ackd_seq)
                if dllp.type == DllpType.NAK:
                    # retransmit
                    self.log.warning("Got NAK DLLP, start TLP replay")
                    raise Exception("TODO")
        elif dllp.type in {
                DllpType.INIT_FC1_P, DllpType.INIT_FC1_NP,
                DllpType.INIT_FC1_CPL, DllpType.INIT_FC2_P,
                DllpType.INIT_FC2_NP, DllpType.INIT_FC2_CPL,
                DllpType.UPDATE_FC_P, DllpType.UPDATE_FC_NP,
                DllpType.UPDATE_FC_CPL
        }:
            # Flow control
            self.fc_state[dllp.vc].handle_fc_dllp(dllp)
        else:
            raise Exception("TODO")

    def start_ack_latency_timer(self):
        if self._ack_latency_timer_cr is not None:
            if not self._ack_latency_timer_cr._finished:
                # already running
                return
        self._ack_latency_timer_cr = cocotb.start_soon(
            self._run_ack_latency_timer())

    def stop_ack_latency_timer(self):
        if self._ack_latency_timer_cr is not None:
            self._ack_latency_timer_cr.kill()
            self._ack_latency_timer_cr = None

    async def _run_ack_latency_timer(self):
        await Timer(max(self.max_latency_timer_steps, 1), 'step')
        if not self.nak_scheduled:
            self.send_ack.set()

    def start_fc_update_timer(self):
        if self._fc_update_timer_cr is not None:
            if not self._fc_update_timer_cr._finished:
                # already running
                return
        self._fc_update_timer_cr = cocotb.start_soon(
            self._run_fc_update_timer())

    def stop_fc_update_timer(self):
        if self._fc_update_timer_cr is not None:
            self._fc_update_timer_cr.kill()
            self._fc_update_timer_cr = None

    async def _run_fc_update_timer(self):
        await Timer(max(self.max_latency_timer_steps, 1), 'step')
        self.send_fc.set()

    async def _run_fc_update_idle_timer(self):
        while True:
            await Timer(max(self.fc_idle_timer_steps, 1), 'step')
            self.send_fc.set()
コード例 #23
0
class OPBMaster(BusDriver):
    """On-chip peripheral bus master."""
    _signals = [
        "xferAck", "errAck", "toutSup", "retry", "DBus_out", "select", "RNW",
        "BE", "ABus", "DBus_in"
    ]
    _optional_signals = ["seqAddr"]
    _max_cycles = 16

    def __init__(self, entity, name, clock, **kwargs):
        BusDriver.__init__(self, entity, name, clock, **kwargs)
        self.bus.select.setimmediatevalue(0)
        self.log.debug("OPBMaster created")
        self.busy_event = Event("%s_busy" % name)
        self.busy = False

    @cocotb.coroutine
    def _acquire_lock(self):
        if self.busy:
            yield self.busy_event.wait()
        self.busy_event.clear()
        self.busy = True

    def _release_lock(self):
        self.busy = False
        self.busy_event.set()

    @cocotb.coroutine
    def read(self, address, sync=True):
        """Issue a request to the bus and block until this comes back.

        Simulation time still progresses but syntactically it blocks.

        Args:
            address (int): The address to read from.
            sync (bool, optional): Wait for rising edge on clock initially.
                Defaults to True.

        Returns:
            BinaryValue: The read data value.

        Raises:
            OPBException: If read took longer than 16 cycles.
        """
        yield self._acquire_lock()

        # Apply values for next clock edge
        if sync:
            yield RisingEdge(self.clock)
        self.bus.ABus <= address
        self.bus.select <= 1
        self.bus.RNW <= 1
        self.bus.BE <= 0xF

        count = 0
        while not int(self.bus.xferAck.value):
            yield RisingEdge(self.clock)
            yield ReadOnly()
            if int(self.bus.toutSup.value):
                count = 0
            else:
                count += 1
            if count >= self._max_cycles:
                raise OPBException("Read took longer than 16 cycles")
        data = int(self.bus.DBus_out.value)

        # Deassert read
        self.bus.select <= 0
        self._release_lock()
        self.log.info("Read of address 0x%x returned 0x%08x" % (address, data))
        return data

    @cocotb.coroutine
    def write(self, address, value, sync=True):
        """Issue a write to the given address with the specified value.

        Args:
            address (int): The address to read from.
            value (int): The data value to write.
            sync (bool, optional): Wait for rising edge on clock initially.
                Defaults to True.

        Raises:
            OPBException: If write took longer than 16 cycles.
        """
        yield self._acquire_lock()

        if sync:
            yield RisingEdge(self.clock)
        self.bus.ABus <= address
        self.bus.select <= 1
        self.bus.RNW <= 0
        self.bus.BE <= 0xF
        self.bus.DBus_out <= value

        count = 0
        while not int(self.bus.xferAck.value):
            yield RisingEdge(self.clock)
            yield ReadOnly()
            if int(self.bus.toutSup.value):
                count = 0
            else:
                count += 1
            if count >= self._max_cycles:
                raise OPBException("Write took longer than 16 cycles")

        self.bus.select <= 0
        self._release_lock()
コード例 #24
0
class AxiStreamSink(object):

    _signals = ["tdata"]
    _optional_signals = [
        "tvalid", "tready", "tlast", "tkeep", "tid", "tdest", "tuser"
    ]

    def __init__(self, entity, name, clock, reset=None, *args, **kwargs):
        self.log = SimLog("cocotb.%s.%s" % (entity._name, name))
        self.entity = entity
        self.clock = clock
        self.reset = reset
        self.bus = Bus(self.entity,
                       name,
                       self._signals,
                       optional_signals=self._optional_signals,
                       **kwargs)

        self.log.info("AXI stream sink")
        self.log.info("cocotbext-axi version %s", __version__)
        self.log.info("Copyright (c) 2020 Alex Forencich")
        self.log.info("https://github.com/alexforencich/cocotbext-axi")

        super().__init__(*args, **kwargs)

        self.active = False
        self.queue = deque()
        self.sync = Event()
        self.read_queue = []

        self.pause = False
        self._pause_generator = None
        self._pause_cr = None

        self.queue_occupancy_bytes = 0
        self.queue_occupancy_frames = 0
        self.queue_occupancy_limit_bytes = None
        self.queue_occupancy_limit_frames = None

        self.width = len(self.bus.tdata)
        self.byte_width = 1

        self.reset = reset

        if hasattr(self.bus, "tready"):
            self.bus.tready.setimmediatevalue(0)
        if hasattr(self.bus, "tkeep"):
            self.byte_width = len(self.bus.tkeep)

        self.byte_size = self.width // self.byte_width
        self.byte_mask = 2**self.byte_size - 1

        cocotb.fork(self._run())

    def recv(self, compact=True):
        if self.queue:
            frame = self.queue.popleft()
            self.queue_occupancy_bytes -= len(frame)
            self.queue_occupancy_frames -= 1
            if compact:
                frame.compact()
            return frame
        return None

    def read(self, count=-1):
        while True:
            frame = self.recv(compact=True)
            if frame is None:
                break
            self.read_queue.extend(frame.tdata)
        if count < 0:
            count = len(self.read_queue)
        data = self.read_queue[:count]
        del self.read_queue[:count]
        return data

    def count(self):
        return len(self.queue)

    def empty(self):
        return not self.queue

    def full(self):
        if self.queue_occupancy_limit_bytes and self.queue_occupancy_bytes > self.queue_occupancy_limit_bytes:
            return True
        elif self.queue_occupancy_limit_frames and self.queue_occupancy_frames > self.queue_occupancy_limit_frames:
            return True
        else:
            return False

    def idle(self):
        return not self.active

    async def wait(self, timeout=0, timeout_unit='ns'):
        if not self.empty():
            return
        self.sync.clear()
        if timeout:
            await First(self.sync.wait(), Timer(timeout, timeout_unit))
        else:
            await self.sync.wait()

    def set_pause_generator(self, generator=None):
        if self._pause_cr is not None:
            self._pause_cr.kill()
            self._pause_cr = None

        self._pause_generator = generator

        if self._pause_generator is not None:
            self._pause_cr = cocotb.fork(self._run_pause())

    def clear_pause_generator(self):
        self.set_pause_generator(None)

    async def _run(self):
        frame = AxiStreamFrame([], [], [], [], [])
        self.active = False

        while True:
            await ReadOnly()

            # read handshake signals
            tready_sample = (not hasattr(self.bus,
                                         "tready")) or self.bus.tready.value
            tvalid_sample = (not hasattr(self.bus,
                                         "tvalid")) or self.bus.tvalid.value

            if self.reset is not None and self.reset.value:
                await RisingEdge(self.clock)
                frame = AxiStreamFrame([], [], [], [], [])
                self.active = False
                if hasattr(self.bus, "tready"):
                    self.bus.tready <= 0
                continue

            if tready_sample and tvalid_sample:
                for offset in range(self.byte_width):

                    frame.tdata.append((self.bus.tdata.value.integer >>
                                        (offset * self.byte_size))
                                       & self.byte_mask)
                    if hasattr(self.bus, "tkeep"):
                        frame.tkeep.append(
                            (self.bus.tkeep.value.integer >> offset) & 1)
                    if hasattr(self.bus, "tid"):
                        frame.tid.append(self.bus.tid.value.integer)
                    if hasattr(self.bus, "tdest"):
                        frame.tdest.append(self.bus.tdest.value.integer)
                    if hasattr(self.bus, "tuser"):
                        frame.tuser.append(self.bus.tuser.value.integer)

                if not hasattr(self.bus, "tlast") or self.bus.tlast.value:
                    if self.byte_size == 8:
                        frame.tdata = bytearray(frame.tdata)

                    self.log.info(f"RX frame: {frame}")

                    self.queue_occupancy_bytes += len(frame)
                    self.queue_occupancy_frames += 1

                    self.queue.append(frame)
                    self.sync.set()

                    frame = AxiStreamFrame([], [], [], [], [])

            await RisingEdge(self.clock)

            self.bus.tready <= (not self.full() and not self.pause)

    async def _run_pause(self):
        for val in self._pause_generator:
            self.pause = val
            await RisingEdge(self.clock)
コード例 #25
0
ファイル: scheduler.py プロジェクト: cmarqu/cocotb
class Scheduler(object):
    """The main scheduler.

    Here we accept callbacks from the simulator and schedule the appropriate
    coroutines.

    A callback fires, causing the :any:`react` method to be called, with the
    trigger that caused the callback as the first argument.

    We look up a list of coroutines to schedule (indexed by the trigger) and
    schedule them in turn. NB implementors should not depend on the scheduling
    order!

    Some additional management is required since coroutines can return a list
    of triggers, to be scheduled when any one of the triggers fires.  To
    ensure we don't receive spurious callbacks, we have to un-prime all the
    other triggers when any one fires.

    Due to the simulator nuances and fun with delta delays we have the
    following modes:

    Normal mode
        - Callbacks cause coroutines to be scheduled
        - Any pending writes are cached and do not happen immediately

    ReadOnly mode
        - Corresponds to cbReadOnlySynch (VPI) or vhpiCbLastKnownDeltaCycle
          (VHPI).  In this state we are not allowed to perform writes.

    Write mode
        - Corresponds to cbReadWriteSynch (VPI) or vhpiCbEndOfProcesses (VHPI)
          In this mode we play back all the cached write updates.

    We can legally transition from normal->write by registering a ReadWrite
    callback, however usually once a simulator has entered the ReadOnly phase
    of a given timestep then we must move to a new timestep before performing
    any writes.  The mechanism for moving to a new timestep may not be
    consistent across simulators and therefore we provide an abstraction to
    assist with compatibility.


    Unless a coroutine has explicitly requested to be scheduled in ReadOnly
    mode (for example wanting to sample the finally settled value after all
    delta delays) then it can reasonably be expected to be scheduled during
    "normal mode" i.e. where writes are permitted.
    """

    _MODE_NORMAL   = 1  # noqa
    _MODE_READONLY = 2  # noqa
    _MODE_WRITE    = 3  # noqa
    _MODE_TERM     = 4  # noqa

    # Singleton events, recycled to avoid spurious object creation
    _readonly = ReadOnly()
    _timer1 = Timer(1)

    def __init__(self):

        self.log = SimLog("cocotb.scheduler")
        if _debug:
            self.log.setLevel(logging.DEBUG)

        # Use OrderedDict here for deterministic behavior (gh-934)

        # A dictionary of pending coroutines for each trigger,
        # indexed by trigger
        self._trigger2coros = collections.OrderedDict()

        # A dictionary mapping coroutines to the trigger they are waiting for
        self._coro2trigger = collections.OrderedDict()

        # Our main state
        self._mode = Scheduler._MODE_NORMAL

        # A dictionary of pending writes
        self._writes = collections.OrderedDict()

        self._pending_coros = []
        self._pending_triggers = []
        self._pending_threads = []
        self._pending_events = []   # Events we need to call set on once we've unwound

        self._terminate = False
        self._test_result = None
        self._entrypoint = None
        self._main_thread = threading.current_thread()

        self._is_reacting = False

        self._write_coro_inst = None
        self._writes_pending = Event()

    @cocotb.decorators.coroutine
    def _do_writes(self):
        """ An internal coroutine that performs pending writes """
        while True:
            yield self._writes_pending.wait()
            if self._mode != Scheduler._MODE_NORMAL:
                yield NextTimeStep()

            yield ReadWrite()

            while self._writes:
                handle, value = self._writes.popitem()
                handle.setimmediatevalue(value)
            self._writes_pending.clear()

    def _check_termination(self):
        """
        Handle a termination that causes us to move onto the next test.
        """
        if self._terminate:
            if _debug:
                self.log.debug("Test terminating, scheduling Timer")

            if self._write_coro_inst is not None:
                self._write_coro_inst.kill()
                self._write_coro_inst = None

            for t in self._trigger2coros:
                t.unprime()

            if self._timer1.primed:
                self._timer1.unprime()

            self._timer1.prime(self.begin_test)
            self._trigger2coros = collections.OrderedDict()
            self._coro2trigger = collections.OrderedDict()
            self._terminate = False
            self._writes = collections.OrderedDict()
            self._writes_pending.clear()
            self._mode = Scheduler._MODE_TERM

    def begin_test(self, trigger=None):
        """Called to initiate a test.

        Could be called on start-up or from a callback.
        """
        if _debug:
            self.log.debug("begin_test called with trigger: %s" %
                           (str(trigger)))
        if _profiling:
            ps = pstats.Stats(_profile).sort_stats('cumulative')
            ps.dump_stats("test_profile.pstat")
            ctx = profiling_context()
        else:
            ctx = nullcontext()

        with ctx:
            self._mode = Scheduler._MODE_NORMAL
            if trigger is not None:
                trigger.unprime()

            # Issue previous test result, if there is one
            if self._test_result is not None:
                if _debug:
                    self.log.debug("Issue test result to regression object")
                cocotb.regression_manager.handle_result(self._test_result)
                self._test_result = None
            if self._entrypoint is not None:
                test = self._entrypoint
                self._entrypoint = None
                self.schedule(test)
                self._check_termination()

    def react(self, trigger):
        """
        Called when a trigger fires.

        We ensure that we only start the event loop once, rather than
        letting it recurse.
        """
        if self._is_reacting:
            # queue up the trigger, the event loop will get to it
            self._pending_triggers.append(trigger)
            return

        assert not self._pending_triggers

        # start the event loop
        self._is_reacting = True
        try:
            self._event_loop(trigger)
        finally:
            self._is_reacting = False


    def _event_loop(self, trigger):
        """
        Run an event loop triggered by the given trigger.

        The loop will keep running until no further triggers fire.

        This should be triggered by only:
        * The beginning of a test, when there is no trigger to react to
        * A GPI trigger
        """
        if _profiling:
            ctx = profiling_context()
        else:
            ctx = nullcontext()

        with ctx:
            # When a trigger fires it is unprimed internally
            if _debug:
                self.log.debug("Trigger fired: %s" % str(trigger))
            # trigger.unprime()

            if self._mode == Scheduler._MODE_TERM:
                if _debug:
                    self.log.debug("Ignoring trigger %s since we're terminating" %
                                   str(trigger))
                return

            if trigger is self._readonly:
                self._mode = Scheduler._MODE_READONLY
            # Only GPI triggers affect the simulator scheduling mode
            elif isinstance(trigger, GPITrigger):
                self._mode = Scheduler._MODE_NORMAL

            # work through triggers one by one
            is_first = True
            self._pending_triggers.append(trigger)
            while self._pending_triggers:
                trigger = self._pending_triggers.pop(0)

                if not is_first and isinstance(trigger, GPITrigger):
                    self.log.warning(
                        "A GPI trigger occurred after entering react - this "
                        "should not happen."
                    )
                    assert False

                # this only exists to enable the warning above
                is_first = False

                if trigger not in self._trigger2coros:

                    # GPI triggers should only be ever pending if there is an
                    # associated coroutine waiting on that trigger, otherwise it would
                    # have been unprimed already
                    if isinstance(trigger, GPITrigger):
                        self.log.critical(
                            "No coroutines waiting on trigger that fired: %s" %
                            str(trigger))

                        trigger.log.info("I'm the culprit")
                    # For Python triggers this isn't actually an error - we might do
                    # event.set() without knowing whether any coroutines are actually
                    # waiting on this event, for example
                    elif _debug:
                        self.log.debug(
                            "No coroutines waiting on trigger that fired: %s" %
                            str(trigger))

                    continue

                # Scheduled coroutines may append to our waiting list so the first
                # thing to do is pop all entries waiting on this trigger.
                scheduling = self._trigger2coros.pop(trigger)

                if _debug:
                    debugstr = "\n\t".join([coro.__name__ for coro in scheduling])
                    if len(scheduling):
                        debugstr = "\n\t" + debugstr
                    self.log.debug("%d pending coroutines for event %s%s" %
                                   (len(scheduling), str(trigger), debugstr))

                # This trigger isn't needed any more
                trigger.unprime()

                for coro in scheduling:
                    if _debug:
                        self.log.debug("Scheduling coroutine %s" % (coro.__name__))
                    self.schedule(coro, trigger=trigger)
                    if _debug:
                        self.log.debug("Scheduled coroutine %s" % (coro.__name__))

                # Schedule may have queued up some events so we'll burn through those
                while self._pending_events:
                    if _debug:
                        self.log.debug("Scheduling pending event %s" %
                                       (str(self._pending_events[0])))
                    self._pending_events.pop(0).set()

            # no more pending triggers
            self._check_termination()
            if _debug:
                self.log.debug("All coroutines scheduled, handing control back"
                               " to simulator")


    def unschedule(self, coro):
        """Unschedule a coroutine.  Unprime any pending triggers"""

        # Unprime the trigger this coroutine is waiting on
        try:
            trigger = self._coro2trigger.pop(coro)
        except KeyError:
            # coroutine probably finished
            pass
        else:
            if coro in self._trigger2coros.setdefault(trigger, []):
                self._trigger2coros[trigger].remove(coro)
            if not self._trigger2coros[trigger]:
                trigger.unprime()
                del self._trigger2coros[trigger]

        if Join(coro) in self._trigger2coros:
            self.react(Join(coro))
        else:
            try:
                # throws an error if the background coroutine errored
                # and no one was monitoring it
                coro.retval
            except TestComplete as test_result:
                self.log.debug("TestComplete received: {}".format(test_result.__class__.__name__))
                self.finish_test(test_result)
            except Exception as e:
                self.finish_test(create_error(self, "Forked coroutine {} raised exception: {}".format(coro, e)))

    def save_write(self, handle, value):
        if self._mode == Scheduler._MODE_READONLY:
            raise Exception("Write to object {0} was scheduled during a read-only sync phase.".format(handle._name))

        # TODO: we should be able to better keep track of when this needs to
        # be scheduled
        if self._write_coro_inst is None:
            self._write_coro_inst = self._do_writes()
            self.schedule(self._write_coro_inst)

        self._writes[handle] = value
        self._writes_pending.set()

    def _coroutine_yielded(self, coro, trigger):
        """Prime the trigger and update our internal mappings."""
        self._coro2trigger[coro] = trigger

        trigger_coros = self._trigger2coros.setdefault(trigger, [])
        if coro is self._write_coro_inst:
            # Our internal write coroutine always runs before any user coroutines.
            # This preserves the behavior prior to the refactoring of writes to
            # this coroutine.
            trigger_coros.insert(0, coro)
        else:
            # Everything else joins the back of the queue
            trigger_coros.append(coro)

        if not trigger.primed:
            try:
                trigger.prime(self.react)
            except Exception as e:
                # Convert any exceptions into a test result
                self.finish_test(
                    create_error(self, "Unable to prime trigger %s: %s" %
                                 (str(trigger), str(e))))

    def queue(self, coroutine):
        """Queue a coroutine for execution"""
        self._pending_coros.append(coroutine)

    def queue_function(self, coroutine):
        """Queue a coroutine for execution and move the containing thread
        so that it does not block execution of the main thread any longer.
        """
        # We should be able to find ourselves inside the _pending_threads list
        matching_threads = [
            t
            for t in self._pending_threads
            if t.thread == threading.current_thread()
        ]
        if len(matching_threads) == 0:
            raise RuntimeError("queue_function called from unrecognized thread")

        # Raises if there is more than one match. This can never happen, since
        # each entry always has a unique thread.
        t, = matching_threads

        t.thread_suspend()
        self._pending_coros.append(coroutine)
        return t

    def run_in_executor(self, func, *args, **kwargs):
        """Run the coroutine in a separate execution thread
        and return a yieldable object for the caller.
        """
        # Create a thread
        # Create a trigger that is called as a result of the thread finishing
        # Create an Event object that the caller can yield on
        # Event object set when the thread finishes execution, this blocks the
        #   calling coroutine (but not the thread) until the external completes

        def execute_external(func, _waiter):
            _waiter._outcome = outcomes.capture(func, *args, **kwargs)
            if _debug:
                self.log.debug("Execution of external routine done %s" % threading.current_thread())
            _waiter.thread_done()

        waiter = external_waiter()
        thread = threading.Thread(group=None, target=execute_external,
                                  name=func.__name__ + "_thread",
                                  args=([func, waiter]), kwargs={})

        waiter.thread = thread
        self._pending_threads.append(waiter)

        return waiter

    def add(self, coroutine):
        """Add a new coroutine.

        Just a wrapper around self.schedule which provides some debug and
        useful error messages in the event of common gotchas.
        """
        if isinstance(coroutine, cocotb.decorators.coroutine):
            self.log.critical(
                "Attempt to schedule a coroutine that hasn't started")
            coroutine.log.error("This is the failing coroutine")
            self.log.warning(
                "Did you forget to add parentheses to the @test decorator?")
            self._test_result = TestError(
                "Attempt to schedule a coroutine that hasn't started")
            self._terminate = True
            return

        elif not isinstance(coroutine, cocotb.decorators.RunningCoroutine):
            self.log.critical(
                "Attempt to add something to the scheduler which isn't a "
                "coroutine")
            self.log.warning(
                "Got: %s (%s)" % (str(type(coroutine)), repr(coroutine)))
            self.log.warning("Did you use the @coroutine decorator?")
            self._test_result = TestError(
                "Attempt to schedule a coroutine that hasn't started")
            self._terminate = True
            return

        if _debug:
            self.log.debug("Adding new coroutine %s" % coroutine.__name__)

        self.schedule(coroutine)
        self._check_termination()
        return coroutine

    def new_test(self, coroutine):
        self._entrypoint = coroutine

    def schedule(self, coroutine, trigger=None):
        """Schedule a coroutine by calling the send method.

        Args:
            coroutine (cocotb.decorators.coroutine): The coroutine to schedule.
            trigger (cocotb.triggers.Trigger): The trigger that caused this
                coroutine to be scheduled.
        """
        if trigger is None:
            send_outcome = outcomes.Value(None)
        else:
            send_outcome = trigger._outcome
        if _debug:
            self.log.debug("Scheduling with {}".format(send_outcome))

        try:
            result = coroutine._advance(send_outcome)
            if _debug:
                self.log.debug("Coroutine %s yielded %s (mode %d)" %
                               (coroutine.__name__, str(result), self._mode))

        # TestComplete indication is game over, tidy up
        except TestComplete as test_result:
            # Tag that close down is needed, save the test_result
            # for later use in cleanup handler
            self.log.debug("TestComplete received: %s" % test_result.__class__.__name__)
            self.finish_test(test_result)
            return

        # Normal coroutine completion
        except cocotb.decorators.CoroutineComplete as exc:
            if _debug:
                self.log.debug("Coroutine completed: %s" % str(coroutine))
            self.unschedule(coroutine)
            return

        # Don't handle the result if we're shutting down
        if self._terminate:
            return

        # convert lists into `First` Waitables.
        if isinstance(result, list):
            result = cocotb.triggers.First(*result)

        # convert waitables into coroutines
        if isinstance(result, cocotb.triggers.Waitable):
            result = result._wait()

        # convert coroutinues into triggers
        if isinstance(result, cocotb.decorators.RunningCoroutine):
            if not result.has_started():
                self.queue(result)
                if _debug:
                    self.log.debug("Scheduling nested coroutine: %s" %
                                   result.__name__)
            else:
                if _debug:
                    self.log.debug("Joining to already running coroutine: %s" %
                                   result.__name__)

            result = result.join()

        if isinstance(result, Trigger):
            if _debug:
                self.log.debug("%s: is instance of Trigger" % result)
            self._coroutine_yielded(coroutine, result)

        else:
            msg = ("Coroutine %s yielded something the scheduler can't handle"
                   % str(coroutine))
            msg += ("\nGot type: %s repr: %s str: %s" %
                    (type(result), repr(result), str(result)))
            msg += "\nDid you forget to decorate with @cocotb.coroutine?"
            try:
                raise_error(self, msg)
            except Exception as e:
                self.finish_test(e)

        # We do not return from here until pending threads have completed, but only
        # from the main thread, this seems like it could be problematic in cases
        # where a sim might change what this thread is.
        def unblock_event(ext):
            @cocotb.coroutine
            def wrapper():
                ext.event.set()
                yield PythonTrigger()

        if self._main_thread is threading.current_thread():

            for ext in self._pending_threads:
                ext.thread_start()
                if _debug:
                    self.log.debug("Blocking from %s on %s" % (threading.current_thread(), ext.thread))
                state = ext.thread_wait()
                if _debug:
                    self.log.debug("Back from wait on self %s with newstate %d" % (threading.current_thread(), state))
                if state == external_state.EXITED:
                    self._pending_threads.remove(ext)
                    self._pending_events.append(ext.event)

        # Handle any newly queued coroutines that need to be scheduled
        while self._pending_coros:
            self.add(self._pending_coros.pop(0))

    def finish_test(self, test_result):
        """Cache the test result and set the terminate flag."""
        self.log.debug("finish_test called with %s" % (repr(test_result)))
        if not self._terminate:
            self._terminate = True
            self._test_result = test_result
            self.cleanup()

    def finish_scheduler(self, test_result):
        """Directly call into the regression manager and end test
           once we return the sim will close us so no cleanup is needed.
        """
        self.log.debug("Issue sim closedown result to regression object")
        cocotb.regression_manager.handle_result(test_result)

    def cleanup(self):
        """Clear up all our state.

        Unprime all pending triggers and kill off any coroutines stop all externals.
        """
        # copy since we modify this in kill
        items = list(self._trigger2coros.items())

        # reversing seems to fix gh-928, although the order is still somewhat
        # arbitrary.
        for trigger, waiting in items[::-1]:
            for coro in waiting:
                if _debug:
                    self.log.debug("Killing %s" % str(coro))
                coro.kill()

        if self._main_thread is not threading.current_thread():
            raise Exception("Cleanup() called outside of the main thread")

        for ext in self._pending_threads:
            self.log.warn("Waiting for %s to exit", ext.thread)
コード例 #26
0
class PCIe_host():
    def __init__(self):
        self.read_outstanding = 0
        self.rxdata = []
        self.requested = 0
        self.txqueue = Queue.Queue()
        self.completion_data = np.zeros(4*1048576, dtype = 'uint64')
        self.write_data = np.zeros(4*1048576, dtype = 'uint64')
        self.write_data_expected = np.zeros(4*1048576, dtype = 'uint64')
        self.errors = 0
        self.read_tag = 0
        self.read_data = 0
        self.command_start = 0 # address for commands
    def write(self, data, address):
        """32 bit address / 64 bit data write"""
        tlp = np.zeros(6, dtype='uint32')
        tlp[0] = 0x40000002
        tlp[1] = 0xbeef00ff
        tlp[2] = address
        tlp[3] = endianswap(data)
        tlp[4] = endianswap(data >> 32)
        self.txqueue.put(tlp)

    @cocotb.coroutine
    def read(self, address, tag=0):
        """ 32 bit read """
        tlp = np.zeros(4, dtype='uint32')
        tlp[0] = 0x00000001
        tlp[1] = 0xbaaa00ff | tag << 8
        tlp[2] = address
        self.read_wait = Event("rw")
        self.txqueue.put(tlp)
        yield self.read_wait.wait()
        raise ReturnValue(self.read_data)

    def complete(self, address, reqid_tag):
        address &= 0xFFFF8
        address = address >> 3
        complete_size = random.choice([16,32]) # DW
        cdata_64 = self.completion_data[address:address + 512/8]
        cdata_32 = np.fromstring(cdata_64.tostring(), dtype = 'uint32')
        cdata_32.byteswap(True)
        for i in range(128/complete_size):
            tlp = np.zeros(4 + complete_size, dtype='uint32')
            tlp[0] = 0x4A000000 | complete_size
            tlp[1] = 0xbeef0000 | (512-complete_size*4*i)
            tlp[2] = (reqid_tag << 8) | ((i & 1) << 6)
            tlp[3:3+complete_size] = cdata_32[i*complete_size: (i+1)*complete_size]
            address += complete_size/2
            self.txqueue.put(tlp)

    @cocotb.coroutine
    def do_tx(self, p):
        while True:
            try:
                tlp = self.txqueue.get(False)
                i_last = len(tlp)/2 - 1
                for i in range(len(tlp)/2):
                    while random.choice([0,0,0,1]):
                        p.m_axis_rx_tvalid = 0
                        yield RisingEdge(p.clock)
                    p.m_axis_rx_tvalid = 1
                    p.m_axis_rx_tdata = tlp[2*i] | (int(tlp[2*i+1]) << 32)
                    if i == i_last:
                        p.m_axis_rx_tlast = 1
                    else:
                        p.m_axis_rx_tlast = 0
                    yield RisingEdge(p.clock)
            except Queue.Empty:
                p.m_axis_rx_tvalid = 0
                yield RisingEdge(p.clock)

    @cocotb.coroutine
    def docycle(self, dut):
        p = dut
        p.m_axis_tx_tvalid = 0
        p.s_axis_tx_tready = 0
        p.pci_reset = 1
        for i in range(10):
            yield RisingEdge(p.clock)
        p.pci_reset = 0
        for i in range(10):
            yield RisingEdge(p.clock)
        cocotb.fork(self.do_tx(p))
        while True:
            # RX
            if (int(p.s_axis_tx_tvalid) == 1) and (int(p.s_axis_tx_tready == 1)):
                tdata = int(p.s_axis_tx_tdata)
                tlast = int(p.s_axis_tx_tlast)
                t_1dw = int(p.s_axis_tx_1dw)
                self.rxdata.append(tdata & 0xFFFFFFFF)
                self.rxdata.append(tdata >> 32)
                if (t_1dw == 1) and (tlast == 0):
                    raise TestFailure("RX: Error: 1dw and not tlast")
                if tlast == 1:
                    dw0 = self.rxdata[0]
                    dw1 = self.rxdata[1]
                    dw2 = self.rxdata[2]
                    dw3 = self.rxdata[3]
                    tlptype = (dw0 >> 24) & 0x5F
                    length = dw0 & 0x3FF
                    if (dw0 & 0x20000000) != 0:
                        bits = 64
                        address = dw3 | (int(dw2) << 32)
                    else:
                        bits = 32
                        address = dw2
                    if tlptype == 0b0000000: # read
                        #print "{} bit read request at 0x{:016X}, tag = 0x{:02X}, length = {} dw".format(bits, address, int(dw1 >> 8 & 0xFF), length)
                        reqid_tag = 0xFFFFFF & (dw1 >> 8)
                        self.complete(address, reqid_tag)
                    elif tlptype == 0b1000000: # write
                        self.handle_write_tlp(bits, address, length, self.rxdata)
                    elif tlptype == 0b1001010: # read completion
                        self.handle_read_completion_tlp(bits, length, self.rxdata)
                    else:
                        raise TestFailure("RX: unknown TLP {} {} {}".format(hex(dw0), hex(dw1), hex(dw2)))
                    self.rxdata = []
            p.s_axis_tx_tready = random.choice([0,1,1,1])
            yield RisingEdge(p.clock)
    def handle_read_completion_tlp(self, bits, length, rxdata):
        if length != 1:
            raise TestFailure("ERROR: read completion TLP has incorrect length in header, {}".format(length))
        if len(rxdata) != 4:
            raise TestFailure("ERROR: read completion TLP length does not match header, {}".format(len(self.rxdata)))
        data = endianswap(rxdata[3])
        tag = 0xFF & (rxdata[2]>>8)
        print "read completion, data = {}, tag = {}".format(data, tag)
        self.read_data = data
        self.read_wait.set(data)

    def handle_write_tlp(self, bits, address, length, rxdata):
        #print "{} bit write request at 0x{:016X}, {} header dw, {} payload dw".format(bits, address, length, len(rxdata))
        #if random.choice([0,0,0,0,1]):
            #self.read(0x1, self.read_tag)
            #self.read_tag += 1
        if length & 0x01 != 0:
            raise TestFailure("write request is an odd length")
        if length + 4 != len(self.rxdata):
            raise TestFailure("write request length does not match".format(len(rxdata)))
        ndata = np.asarray(rxdata, dtype = 'uint32')
        if bits == 32:
            data = ndata[3:3+length]
        else:
            data = ndata[4:4+length]
        data.byteswap(True)
        addr_start = (address/8) & 0xFFFFFF
        d = np.fromstring(data.tostring(), dtype = 'uint64')
        self.write_data[addr_start:addr_start + length/2] = d
        #print "d[{}] = 0x{:016X}".format(0, d[0])
    def write_fifo(self, fifo, data):
        self.write(data, (8+fifo)*8)
コード例 #27
0
class Driver:
    """Class defining the standard interface for a driver within a testbench.

    The driver is responsible for serializing transactions onto the physical
    pins of the interface.  This may consume simulation time.
    """

    def __init__(self):
        """Constructor for a driver instance."""
        self._pending = Event(name="Driver._pending")
        self._sendQ = deque()

        # Sub-classes may already set up logging
        if not hasattr(self, "log"):
            self.log = SimLog("cocotb.driver.%s" % (type(self).__qualname__))

        # Create an independent coroutine which can send stuff
        self._thread = cocotb.scheduler.add(self._send_thread())

    def kill(self):
        """Kill the coroutine sending stuff."""
        if self._thread:
            self._thread.kill()
            self._thread = None

    def append(self, transaction, callback=None, event=None, **kwargs):
        """Queue up a transaction to be sent over the bus.

        Mechanisms are provided to permit the caller to know when the
        transaction is processed.

        Args:
            transaction (any): The transaction to be sent.
            callback (callable, optional): Optional function to be called
                when the transaction has been sent.
            event (optional): :class:`~cocotb.triggers.Event` to be set
                when the transaction has been sent.
            **kwargs: Any additional arguments used in child class'
                :any:`_driver_send` method.
        """
        self._sendQ.append((transaction, callback, event, kwargs))
        self._pending.set()

    def clear(self):
        """Clear any queued transactions without sending them onto the bus."""
        self._sendQ = deque()

    @coroutine
    def send(self, transaction, sync=True, **kwargs):
        """Blocking send call (hence must be "yielded" rather than called).

        Sends the transaction over the bus.

        Args:
            transaction (any): The transaction to be sent.
            sync (bool, optional): Synchronize the transfer by waiting for a rising edge.
            **kwargs (dict): Additional arguments used in child class'
                :any:`_driver_send` method.
        """
        yield self._send(transaction, None, None, sync=sync, **kwargs)

    def _driver_send(self, transaction, sync=True, **kwargs):
        """Actual implementation of the send.

        Sub-classes should override this method to implement the actual
        :meth:`~cocotb.drivers.Driver.send` routine.

        Args:
            transaction (any): The transaction to be sent.
            sync (bool, optional): Synchronize the transfer by waiting for a rising edge.
            **kwargs: Additional arguments if required for protocol implemented in a sub-class.
        """
        raise NotImplementedError("Sub-classes of Driver should define a "
                                  "_driver_send coroutine")

    @coroutine
    def _send(self, transaction, callback, event, sync=True, **kwargs):
        """Send coroutine.

        Args:
            transaction (any): The transaction to be sent.
            callback (callable, optional): Optional function to be called
                when the transaction has been sent.
            event (optional): event to be set when the transaction has been sent.
            sync (bool, optional): Synchronize the transfer by waiting for a rising edge.
            **kwargs: Any additional arguments used in child class'
                :any:`_driver_send` method.
        """
        yield self._driver_send(transaction, sync=sync, **kwargs)

        # Notify the world that this transaction is complete
        if event:
            event.set()
        if callback:
            callback(transaction)

    @coroutine
    def _send_thread(self):
        while True:

            # Sleep until we have something to send
            while not self._sendQ:
                self._pending.clear()
                yield self._pending.wait()

            synchronised = False

            # Send in all the queued packets,
            # only synchronize on the first send
            while self._sendQ:
                transaction, callback, event, kwargs = self._sendQ.popleft()
                self.log.debug("Sending queued packet...")
                yield self._send(transaction, callback, event,
                                 sync=not synchronised, **kwargs)
                synchronised = True
コード例 #28
0
ファイル: avalon.py プロジェクト: darylz/cocotb
class AvalonMaster(AvalonMM):
    """Avalon-MM master
    """
    def __init__(self, entity, name, clock):
        AvalonMM.__init__(self, entity, name, clock)
        self.log.debug("AvalonMaster created")
        self.busy_event = Event("%s_busy" % name)
        self.busy = False

    @coroutine
    def _acquire_lock(self):
        if self.busy:
            yield self.busy_event.wait()
        self.busy_event.clear()
        self.busy = True

    def _release_lock(self):
        self.busy = False
        self.busy_event.set()

    @coroutine
    def read(self, address):
        """
        Issue a request to the bus and block until this
        comes back. Simulation time still progresses
        but syntactically it blocks.
        See http://www.altera.com/literature/manual/mnl_avalon_spec_1_3.pdf
        """
        yield self._acquire_lock()

        # Apply values for next clock edge
        yield RisingEdge(self.clock)
        self.bus.address <= address
        self.bus.read <= 1

        # Wait for waitrequest to be low
        yield self._wait_for_nsignal(self.bus.waitrequest)

        # Assume readLatency = 1
        # FIXME need to configure this, should take a dictionary of Avalon properties.
        yield RisingEdge(self.clock)

        # Deassert read
        self.bus.read <= 0

        # Get the data
        yield ReadOnly()
        data = self.bus.readdata.value
        yield NextTimeStep()
        self._release_lock()
        raise ReturnValue(data)

    @coroutine
    def write(self, address, value):
        """
        Issue a write to the given address with the specified
        value.
        See http://www.altera.com/literature/manual/mnl_avalon_spec_1_3.pdf
        """
        yield self._acquire_lock()

        # Apply valuse to bus
        yield RisingEdge(self.clock)
        self.bus.address <= address
        self.bus.writedata <= value
        self.bus.write <= 1

        # Wait for waitrequest to be low
        count = yield self._wait_for_nsignal(self.bus.waitrequest)

        # Deassert write
        yield RisingEdge(self.clock)
        self.bus.write <= 0
        self._release_lock()
コード例 #29
0
class AvalonMaster(AvalonMM):
    """Avalon-MM master
    """
    def __init__(self, entity, name, clock):
        AvalonMM.__init__(self, entity, name, clock)
        self.log.debug("AvalonMaster created")
        self.busy_event = Event("%s_busy" % name)
        self.busy = False

    def __len__(self):
        return 2**len(self.bus.address)

    @coroutine
    def _acquire_lock(self):
        if self.busy:
            yield self.busy_event.wait()
        self.busy_event.clear()
        self.busy = True

    def _release_lock(self):
        self.busy = False
        self.busy_event.set()

    @coroutine
    def read(self, address, sync=True):
        """
        Issue a request to the bus and block until this
        comes back. Simulation time still progresses
        but syntactically it blocks.
        See http://www.altera.com/literature/manual/mnl_avalon_spec_1_3.pdf
        """
        if not self._can_read:
            self.log.error("Cannot read - have no read signal")
            raise TestError("Attempt to read on a write-only AvalonMaster")

        yield self._acquire_lock()

        # Apply values for next clock edge
        if sync:
            yield RisingEdge(self.clock)
        self.bus.address <= address
        self.bus.read <= 1
        if hasattr(self.bus, "byteenable"):
            self.bus.byteenable <= int("1" * len(self.bus.byteenable), 2)
        if hasattr(self.bus, "cs"):
            self.bus.cs <= 1

        # Wait for waitrequest to be low
        if hasattr(self.bus, "waitrequest"):
            yield self._wait_for_nsignal(self.bus.waitrequest)
        yield RisingEdge(self.clock)

        # Deassert read
        self.bus.read <= 0
        if hasattr(self.bus, "byteenable"):
            self.bus.byteenable <= 0
        if hasattr(self.bus, "cs"):
            self.bus.cs <= 0
        v = self.bus.address.value
        v.binstr = "x" * len(self.bus.address)
        self.bus.address <= v

        if hasattr(self.bus, "readdatavalid"):
            while True:
                yield ReadOnly()
                if int(self.bus.readdatavalid):
                    break
                yield RisingEdge(self.clock)
        else:
            # Assume readLatency = 1 if no readdatavalid
            # FIXME need to configure this,
            # should take a dictionary of Avalon properties.
            yield ReadOnly()

        # Get the data
        data = self.bus.readdata.value

        self._release_lock()
        raise ReturnValue(data)

    @coroutine
    def write(self, address, value):
        """
        Issue a write to the given address with the specified
        value.
        See http://www.altera.com/literature/manual/mnl_avalon_spec_1_3.pdf
        """
        if not self._can_write:
            self.log.error("Cannot write - have no write signal")
            raise TestError("Attempt to write on a read-only AvalonMaster")

        yield self._acquire_lock()

        # Apply valuse to bus
        yield RisingEdge(self.clock)
        self.bus.address <= address
        self.bus.writedata <= value
        self.bus.write <= 1
        if hasattr(self.bus, "byteenable"):
            self.bus.byteenable <= int("1" * len(self.bus.byteenable), 2)
        if hasattr(self.bus, "cs"):
            self.bus.cs <= 1

        # Wait for waitrequest to be low
        if hasattr(self.bus, "waitrequest"):
            count = yield self._wait_for_nsignal(self.bus.waitrequest)

        # Deassert write
        yield RisingEdge(self.clock)
        self.bus.write <= 0
        if hasattr(self.bus, "byteenable"):
            self.bus.byteenable <= 0
        if hasattr(self.bus, "cs"):
            self.bus.cs <= 0
        v = self.bus.address.value
        v.binstr = "x" * len(self.bus.address)
        self.bus.address <= v

        v = self.bus.writedata.value
        v.binstr = "x" * len(self.bus.writedata)
        self.bus.writedata <= v
        self._release_lock()
コード例 #30
0
ファイル: scheduler.py プロジェクト: biggsbenjamin/cocotb
class Scheduler(object):
    """The main scheduler.

    Here we accept callbacks from the simulator and schedule the appropriate
    coroutines.

    A callback fires, causing the :any:`react` method to be called, with the
    trigger that caused the callback as the first argument.

    We look up a list of coroutines to schedule (indexed by the trigger) and
    schedule them in turn. NB implementors should not depend on the scheduling
    order!

    Some additional management is required since coroutines can return a list
    of triggers, to be scheduled when any one of the triggers fires.  To
    ensure we don't receive spurious callbacks, we have to un-prime all the
    other triggers when any one fires.

    Due to the simulator nuances and fun with delta delays we have the
    following modes:

    Normal mode
        - Callbacks cause coroutines to be scheduled
        - Any pending writes are cached and do not happen immediately

    ReadOnly mode
        - Corresponds to cbReadOnlySynch (VPI) or vhpiCbLastKnownDeltaCycle
          (VHPI).  In this state we are not allowed to perform writes.

    Write mode
        - Corresponds to cbReadWriteSynch (VPI) or vhpiCbEndOfProcesses (VHPI)
          In this mode we play back all the cached write updates.

    We can legally transition from normal->write by registering a ReadWrite
    callback, however usually once a simulator has entered the ReadOnly phase
    of a given timestep then we must move to a new timestep before performing
    any writes.  The mechanism for moving to a new timestep may not be
    consistent across simulators and therefore we provide an abstraction to
    assist with compatibility.


    Unless a coroutine has explicitly requested to be scheduled in ReadOnly
    mode (for example wanting to sample the finally settled value after all
    delta delays) then it can reasonably be expected to be scheduled during
    "normal mode" i.e. where writes are permitted.
    """

    _MODE_NORMAL = 1  # noqa
    _MODE_READONLY = 2  # noqa
    _MODE_WRITE = 3  # noqa
    _MODE_TERM = 4  # noqa

    # Singleton events, recycled to avoid spurious object creation
    _readonly = ReadOnly()
    _timer1 = Timer(1)

    def __init__(self):

        self.log = SimLog("cocotb.scheduler")
        if _debug:
            self.log.setLevel(logging.DEBUG)

        # Use OrderedDict here for deterministic behavior (gh-934)

        # A dictionary of pending coroutines for each trigger,
        # indexed by trigger
        self._trigger2coros = collections.OrderedDict()

        # A dictionary mapping coroutines to the trigger they are waiting for
        self._coro2trigger = collections.OrderedDict()

        # Our main state
        self._mode = Scheduler._MODE_NORMAL

        # A dictionary of pending writes
        self._writes = collections.OrderedDict()

        self._pending_coros = []
        self._pending_triggers = []
        self._pending_threads = []
        self._pending_events = [
        ]  # Events we need to call set on once we've unwound

        self._terminate = False
        self._test_result = None
        self._entrypoint = None
        self._main_thread = threading.current_thread()

        self._is_reacting = False

        self._write_coro_inst = None
        self._writes_pending = Event()

    @cocotb.decorators.coroutine
    def _do_writes(self):
        """ An internal coroutine that performs pending writes """
        while True:
            yield self._writes_pending.wait()
            if self._mode != Scheduler._MODE_NORMAL:
                yield NextTimeStep()

            yield ReadWrite()

            while self._writes:
                handle, value = self._writes.popitem()
                handle.setimmediatevalue(value)
            self._writes_pending.clear()

    def _check_termination(self):
        """
        Handle a termination that causes us to move onto the next test.
        """
        if self._terminate:
            if _debug:
                self.log.debug("Test terminating, scheduling Timer")

            if self._write_coro_inst is not None:
                self._write_coro_inst.kill()
                self._write_coro_inst = None

            for t in self._trigger2coros:
                t.unprime()

            if self._timer1.primed:
                self._timer1.unprime()

            self._timer1.prime(self.begin_test)
            self._trigger2coros = collections.OrderedDict()
            self._coro2trigger = collections.OrderedDict()
            self._terminate = False
            self._writes = collections.OrderedDict()
            self._writes_pending.clear()
            self._mode = Scheduler._MODE_TERM

    def begin_test(self, trigger=None):
        """Called to initiate a test.

        Could be called on start-up or from a callback.
        """
        if _debug:
            self.log.debug("begin_test called with trigger: %s" %
                           (str(trigger)))
        if _profiling:
            ps = pstats.Stats(_profile).sort_stats('cumulative')
            ps.dump_stats("test_profile.pstat")
            ctx = profiling_context()
        else:
            ctx = nullcontext()

        with ctx:
            self._mode = Scheduler._MODE_NORMAL
            if trigger is not None:
                trigger.unprime()

            # Issue previous test result, if there is one
            if self._test_result is not None:
                if _debug:
                    self.log.debug("Issue test result to regression object")
                cocotb.regression_manager.handle_result(self._test_result)
                self._test_result = None
            if self._entrypoint is not None:
                test = self._entrypoint
                self._entrypoint = None
                self.schedule(test)
                self._check_termination()

    def react(self, trigger):
        """
        Called when a trigger fires.

        We ensure that we only start the event loop once, rather than
        letting it recurse.
        """
        if self._is_reacting:
            # queue up the trigger, the event loop will get to it
            self._pending_triggers.append(trigger)
            return

        if self._pending_triggers:
            raise InternalError(
                "Expected all triggers to be handled but found {}".format(
                    self._pending_triggers))

        # start the event loop
        self._is_reacting = True
        try:
            self._event_loop(trigger)
        finally:
            self._is_reacting = False

    def _event_loop(self, trigger):
        """
        Run an event loop triggered by the given trigger.

        The loop will keep running until no further triggers fire.

        This should be triggered by only:
        * The beginning of a test, when there is no trigger to react to
        * A GPI trigger
        """
        if _profiling:
            ctx = profiling_context()
        else:
            ctx = nullcontext()

        with ctx:
            # When a trigger fires it is unprimed internally
            if _debug:
                self.log.debug("Trigger fired: %s" % str(trigger))
            # trigger.unprime()

            if self._mode == Scheduler._MODE_TERM:
                if _debug:
                    self.log.debug(
                        "Ignoring trigger %s since we're terminating" %
                        str(trigger))
                return

            if trigger is self._readonly:
                self._mode = Scheduler._MODE_READONLY
            # Only GPI triggers affect the simulator scheduling mode
            elif isinstance(trigger, GPITrigger):
                self._mode = Scheduler._MODE_NORMAL

            # work through triggers one by one
            is_first = True
            self._pending_triggers.append(trigger)
            while self._pending_triggers:
                trigger = self._pending_triggers.pop(0)

                if not is_first and isinstance(trigger, GPITrigger):
                    self.log.warning(
                        "A GPI trigger occurred after entering react - this "
                        "should not happen.")
                    assert False

                # this only exists to enable the warning above
                is_first = False

                if trigger not in self._trigger2coros:

                    # GPI triggers should only be ever pending if there is an
                    # associated coroutine waiting on that trigger, otherwise it would
                    # have been unprimed already
                    if isinstance(trigger, GPITrigger):
                        self.log.critical(
                            "No coroutines waiting on trigger that fired: %s" %
                            str(trigger))

                        trigger.log.info("I'm the culprit")
                    # For Python triggers this isn't actually an error - we might do
                    # event.set() without knowing whether any coroutines are actually
                    # waiting on this event, for example
                    elif _debug:
                        self.log.debug(
                            "No coroutines waiting on trigger that fired: %s" %
                            str(trigger))

                    del trigger
                    continue

                # Scheduled coroutines may append to our waiting list so the first
                # thing to do is pop all entries waiting on this trigger.
                scheduling = self._trigger2coros.pop(trigger)

                if _debug:
                    debugstr = "\n\t".join(
                        [coro.__name__ for coro in scheduling])
                    if len(scheduling):
                        debugstr = "\n\t" + debugstr
                    self.log.debug("%d pending coroutines for event %s%s" %
                                   (len(scheduling), str(trigger), debugstr))

                # This trigger isn't needed any more
                trigger.unprime()

                for coro in scheduling:
                    if _debug:
                        self.log.debug("Scheduling coroutine %s" %
                                       (coro.__name__))
                    self.schedule(coro, trigger=trigger)
                    if _debug:
                        self.log.debug("Scheduled coroutine %s" %
                                       (coro.__name__))

                # Schedule may have queued up some events so we'll burn through those
                while self._pending_events:
                    if _debug:
                        self.log.debug("Scheduling pending event %s" %
                                       (str(self._pending_events[0])))
                    self._pending_events.pop(0).set()

                # remove our reference to the objects at the end of each loop,
                # to try and avoid them being destroyed at a weird time (as
                # happened in gh-957)
                del trigger
                del coro
                del scheduling

            # no more pending triggers
            self._check_termination()
            if _debug:
                self.log.debug("All coroutines scheduled, handing control back"
                               " to simulator")

    def unschedule(self, coro):
        """Unschedule a coroutine.  Unprime any pending triggers"""

        # Unprime the trigger this coroutine is waiting on
        try:
            trigger = self._coro2trigger.pop(coro)
        except KeyError:
            # coroutine probably finished
            pass
        else:
            if coro in self._trigger2coros.setdefault(trigger, []):
                self._trigger2coros[trigger].remove(coro)
            if not self._trigger2coros[trigger]:
                trigger.unprime()
                del self._trigger2coros[trigger]

        if Join(coro) in self._trigger2coros:
            self.react(Join(coro))
        else:
            try:
                # throws an error if the background coroutine errored
                # and no one was monitoring it
                coro.retval
            except TestComplete as test_result:
                self.log.debug("TestComplete received: {}".format(
                    test_result.__class__.__name__))
                self.finish_test(test_result)
            except Exception as e:
                self.finish_test(
                    create_error(
                        self,
                        "Forked coroutine {} raised exception: {}".format(
                            coro, e)))

    def save_write(self, handle, value):
        if self._mode == Scheduler._MODE_READONLY:
            raise Exception(
                "Write to object {0} was scheduled during a read-only sync phase."
                .format(handle._name))

        # TODO: we should be able to better keep track of when this needs to
        # be scheduled
        if self._write_coro_inst is None:
            self._write_coro_inst = self._do_writes()
            self.schedule(self._write_coro_inst)

        self._writes[handle] = value
        self._writes_pending.set()

    def _coroutine_yielded(self, coro, trigger):
        """Prime the trigger and update our internal mappings."""
        self._coro2trigger[coro] = trigger

        trigger_coros = self._trigger2coros.setdefault(trigger, [])
        if coro is self._write_coro_inst:
            # Our internal write coroutine always runs before any user coroutines.
            # This preserves the behavior prior to the refactoring of writes to
            # this coroutine.
            trigger_coros.insert(0, coro)
        else:
            # Everything else joins the back of the queue
            trigger_coros.append(coro)

        if not trigger.primed:
            try:
                trigger.prime(self.react)
            except Exception as e:
                # Convert any exceptions into a test result
                self.finish_test(
                    create_error(
                        self, "Unable to prime trigger %s: %s" %
                        (str(trigger), str(e))))

    def queue(self, coroutine):
        """Queue a coroutine for execution"""
        self._pending_coros.append(coroutine)

    def queue_function(self, coroutine):
        """Queue a coroutine for execution and move the containing thread
        so that it does not block execution of the main thread any longer.
        """
        # We should be able to find ourselves inside the _pending_threads list
        matching_threads = [
            t for t in self._pending_threads
            if t.thread == threading.current_thread()
        ]
        if len(matching_threads) == 0:
            raise RuntimeError(
                "queue_function called from unrecognized thread")

        # Raises if there is more than one match. This can never happen, since
        # each entry always has a unique thread.
        t, = matching_threads

        t.thread_suspend()
        self._pending_coros.append(coroutine)
        return t

    def run_in_executor(self, func, *args, **kwargs):
        """Run the coroutine in a separate execution thread
        and return a yieldable object for the caller.
        """

        # Create a thread
        # Create a trigger that is called as a result of the thread finishing
        # Create an Event object that the caller can yield on
        # Event object set when the thread finishes execution, this blocks the
        #   calling coroutine (but not the thread) until the external completes

        def execute_external(func, _waiter):
            _waiter._outcome = outcomes.capture(func, *args, **kwargs)
            if _debug:
                self.log.debug("Execution of external routine done %s" %
                               threading.current_thread())
            _waiter.thread_done()

        waiter = external_waiter()
        thread = threading.Thread(group=None,
                                  target=execute_external,
                                  name=func.__name__ + "_thread",
                                  args=([func, waiter]),
                                  kwargs={})

        waiter.thread = thread
        self._pending_threads.append(waiter)

        return waiter

    def add(self, coroutine):
        """Add a new coroutine.

        Just a wrapper around self.schedule which provides some debug and
        useful error messages in the event of common gotchas.
        """
        if isinstance(coroutine, cocotb.decorators.coroutine):
            self.log.critical(
                "Attempt to schedule a coroutine that hasn't started")
            coroutine.log.error("This is the failing coroutine")
            self.log.warning(
                "Did you forget to add parentheses to the @test decorator?")
            self._test_result = TestError(
                "Attempt to schedule a coroutine that hasn't started")
            self._terminate = True
            return

        elif not isinstance(coroutine, cocotb.decorators.RunningCoroutine):
            self.log.critical(
                "Attempt to add something to the scheduler which isn't a "
                "coroutine")
            self.log.warning("Got: %s (%s)" %
                             (str(type(coroutine)), repr(coroutine)))
            self.log.warning("Did you use the @coroutine decorator?")
            self._test_result = TestError(
                "Attempt to schedule a coroutine that hasn't started")
            self._terminate = True
            return

        if _debug:
            self.log.debug("Adding new coroutine %s" % coroutine.__name__)

        self.schedule(coroutine)
        self._check_termination()
        return coroutine

    def new_test(self, coroutine):
        self._entrypoint = coroutine

    def schedule(self, coroutine, trigger=None):
        """Schedule a coroutine by calling the send method.

        Args:
            coroutine (cocotb.decorators.coroutine): The coroutine to schedule.
            trigger (cocotb.triggers.Trigger): The trigger that caused this
                coroutine to be scheduled.
        """
        if trigger is None:
            send_outcome = outcomes.Value(None)
        else:
            send_outcome = trigger._outcome
        if _debug:
            self.log.debug("Scheduling with {}".format(send_outcome))

        try:
            result = coroutine._advance(send_outcome)
            if _debug:
                self.log.debug("Coroutine %s yielded %s (mode %d)" %
                               (coroutine.__name__, str(result), self._mode))

        # TestComplete indication is game over, tidy up
        except TestComplete as test_result:
            # Tag that close down is needed, save the test_result
            # for later use in cleanup handler
            self.log.debug("TestComplete received: %s" %
                           test_result.__class__.__name__)
            self.finish_test(test_result)
            return

        # Normal coroutine completion
        except cocotb.decorators.CoroutineComplete as exc:
            if _debug:
                self.log.debug("Coroutine completed: %s" % str(coroutine))
            self.unschedule(coroutine)
            return

        # Don't handle the result if we're shutting down
        if self._terminate:
            return

        # convert lists into `First` Waitables.
        if isinstance(result, list):
            result = cocotb.triggers.First(*result)

        # convert waitables into coroutines
        if isinstance(result, cocotb.triggers.Waitable):
            result = result._wait()

        # convert coroutinues into triggers
        if isinstance(result, cocotb.decorators.RunningCoroutine):
            if not result.has_started():
                self.queue(result)
                if _debug:
                    self.log.debug("Scheduling nested coroutine: %s" %
                                   result.__name__)
            else:
                if _debug:
                    self.log.debug("Joining to already running coroutine: %s" %
                                   result.__name__)

            result = result.join()

        if isinstance(result, Trigger):
            if _debug:
                self.log.debug("%s: is instance of Trigger" % result)
            self._coroutine_yielded(coroutine, result)

        else:
            msg = (
                "Coroutine %s yielded something the scheduler can't handle" %
                str(coroutine))
            msg += ("\nGot type: %s repr: %s str: %s" %
                    (type(result), repr(result), str(result)))
            msg += "\nDid you forget to decorate with @cocotb.coroutine?"
            try:
                raise_error(self, msg)
            except Exception as e:
                self.finish_test(e)

        # We do not return from here until pending threads have completed, but only
        # from the main thread, this seems like it could be problematic in cases
        # where a sim might change what this thread is.
        def unblock_event(ext):
            @cocotb.coroutine
            def wrapper():
                ext.event.set()
                yield PythonTrigger()

        if self._main_thread is threading.current_thread():

            for ext in self._pending_threads:
                ext.thread_start()
                if _debug:
                    self.log.debug("Blocking from %s on %s" %
                                   (threading.current_thread(), ext.thread))
                state = ext.thread_wait()
                if _debug:
                    self.log.debug(
                        "Back from wait on self %s with newstate %d" %
                        (threading.current_thread(), state))
                if state == external_state.EXITED:
                    self._pending_threads.remove(ext)
                    self._pending_events.append(ext.event)

        # Handle any newly queued coroutines that need to be scheduled
        while self._pending_coros:
            self.add(self._pending_coros.pop(0))

    def finish_test(self, test_result):
        """Cache the test result and set the terminate flag."""
        self.log.debug("finish_test called with %s" % (repr(test_result)))
        if not self._terminate:
            self._terminate = True
            self._test_result = test_result
            self.cleanup()

    def finish_scheduler(self, test_result):
        """Directly call into the regression manager and end test
           once we return the sim will close us so no cleanup is needed.
        """
        self.log.debug("Issue sim closedown result to regression object")
        cocotb.regression_manager.handle_result(test_result)

    def cleanup(self):
        """Clear up all our state.

        Unprime all pending triggers and kill off any coroutines stop all externals.
        """
        # copy since we modify this in kill
        items = list(self._trigger2coros.items())

        # reversing seems to fix gh-928, although the order is still somewhat
        # arbitrary.
        for trigger, waiting in items[::-1]:
            for coro in waiting:
                if _debug:
                    self.log.debug("Killing %s" % str(coro))
                coro.kill()

        if self._main_thread is not threading.current_thread():
            raise Exception("Cleanup() called outside of the main thread")

        for ext in self._pending_threads:
            self.log.warn("Waiting for %s to exit", ext.thread)
コード例 #31
0
class AvalonMaster(AvalonMM):
    """Avalon Memory Mapped Interface (Avalon-MM) Master."""
    def __init__(self, entity, name, clock, **kwargs):
        AvalonMM.__init__(self, entity, name, clock, **kwargs)
        self.log.debug("AvalonMaster created")
        self.busy_event = Event("%s_busy" % name)
        self.busy = False

    def __len__(self):
        return 2**len(self.bus.address)

    @coroutine
    def _acquire_lock(self):
        if self.busy:
            yield self.busy_event.wait()
        self.busy_event.clear()
        self.busy = True

    def _release_lock(self):
        self.busy = False
        self.busy_event.set()

    @coroutine
    def read(self, address, sync=True):
        """Issue a request to the bus and block until this comes back.

        Simulation time still progresses
        but syntactically it blocks.

        Args:
            address (int): The address to read from.
            sync (bool, optional): Wait for rising edge on clock initially.
                Defaults to True.

        Returns:
            BinaryValue: The read data value.

        Raises:
            :any:`TestError`: If master is write-only.
        """
        if not self._can_read:
            self.log.error("Cannot read - have no read signal")
            raise TestError("Attempt to read on a write-only AvalonMaster")

        yield self._acquire_lock()

        # Apply values for next clock edge
        if sync:
            yield RisingEdge(self.clock)
        self.bus.address <= address
        self.bus.read <= 1
        if hasattr(self.bus, "byteenable"):
            self.bus.byteenable <= int("1"*len(self.bus.byteenable), 2)
        if hasattr(self.bus, "cs"):
            self.bus.cs <= 1

        # Wait for waitrequest to be low
        if hasattr(self.bus, "waitrequest"):
            yield self._wait_for_nsignal(self.bus.waitrequest)
        yield RisingEdge(self.clock)

        # Deassert read
        self.bus.read <= 0
        if hasattr(self.bus, "byteenable"):
            self.bus.byteenable <= 0
        if hasattr(self.bus, "cs"):
            self.bus.cs <= 0
        v = self.bus.address.value
        v.binstr = "x" * len(self.bus.address)
        self.bus.address <= v

        if hasattr(self.bus, "readdatavalid"):
            while True:
                yield ReadOnly()
                if int(self.bus.readdatavalid):
                    break
                yield RisingEdge(self.clock)
        else:
            # Assume readLatency = 1 if no readdatavalid
            # FIXME need to configure this,
            # should take a dictionary of Avalon properties.
            yield ReadOnly()

        # Get the data
        data = self.bus.readdata.value

        self._release_lock()
        raise ReturnValue(data)

    @coroutine
    def write(self, address, value):
        """Issue a write to the given address with the specified
        value.

        Args:
            address (int): The address to write to.
            value (int): The data value to write.

        Raises:
            :any:`TestError`: If master is read-only.
        """
        if not self._can_write:
            self.log.error("Cannot write - have no write signal")
            raise TestError("Attempt to write on a read-only AvalonMaster")

        yield self._acquire_lock()

        # Apply values to bus
        yield RisingEdge(self.clock)
        self.bus.address <= address
        self.bus.writedata <= value
        self.bus.write <= 1
        if hasattr(self.bus, "byteenable"):
            self.bus.byteenable <= int("1"*len(self.bus.byteenable), 2)
        if hasattr(self.bus, "cs"):
            self.bus.cs <= 1

        # Wait for waitrequest to be low
        if hasattr(self.bus, "waitrequest"):
            count = yield self._wait_for_nsignal(self.bus.waitrequest)

        # Deassert write
        yield RisingEdge(self.clock)
        self.bus.write <= 0
        if hasattr(self.bus, "byteenable"):
            self.bus.byteenable <= 0
        if hasattr(self.bus, "cs"):
            self.bus.cs <= 0
        v = self.bus.address.value
        v.binstr = "x" * len(self.bus.address)
        self.bus.address <= v

        v = self.bus.writedata.value
        v.binstr = "x" * len(self.bus.writedata)
        self.bus.writedata <= v
        self._release_lock()
コード例 #32
0
class UVMRoot(UVMComponent):
    # Function: get()
    # Static accessor for <uvm_root>.
    #
    # The static accessor is provided as a convenience wrapper
    # around retrieving the root via the <uvm_coreservice_t::get_root>
    # method.
    #
    # | // Using the uvm_coreservice_t:
    # | uvm_coreservice_t cs
    # | uvm_root r
    # | cs = uvm_coreservice_t::get()
    # | r = cs.get_root()
    # |
    # | // Not using the uvm_coreservice_t:
    # | uvm_root r
    # | r = uvm_root::get()
    #

    m_relnotes_done = False

    @classmethod
    def get(cls):
        from .uvm_coreservice import UVMCoreService
        cs = UVMCoreService.get()
        return cs.get_root()

    m_inst = None

    def __init__(self):
        UVMComponent.__init__(self, "__top__", None)
        #self.m_children = {}
        self.clp = UVMCmdlineProcessor.get_inst()
        self.finish_on_completion = True
        self.m_phase_all_done = False
        self.m_phase_all_done_event = Event('phase_all_done_event')
        # If set, then the entire testbench topology is printed just after completion
        # of the end_of_elaboration phase.
        self.enable_print_topology = False

        self.m_rh.set_name("reporter")
        self.report_header()

        #  // This sets up the global verbosity. Other command line args may
        #  // change individual component verbosity.
        self.m_check_verbosity()

        #  // Variable: top_levels
        #  //
        #  // This variable is a list of all of the top level components in UVM. It
        #  // includes the uvm_test_top component that is created by <run_test> as
        #  // well as any other top level components that have been instantiated
        #  // anywhere in the hierarchy.
        self.top_levels = []

    #endfunction

    m_called_get_common_domain = False
    #   // internal function not to be used
    #   // get the initialized singleton instance of uvm_root
    @classmethod
    def m_uvm_get_root(cls):
        from .uvm_domain import UVMDomain
        if UVMRoot.m_inst is None:
            UVMRoot.m_inst = UVMRoot()
        # tpoikela: Added flag to avoid infinite recursion
        if cls.m_called_get_common_domain is False:
            cls.m_called_get_common_domain = True
            UVMDomain.get_common_domain()
            UVMRoot.m_inst.m_domain = UVMDomain.get_uvm_domain()
            cls.m_called_get_common_domain = False
        return UVMRoot.m_inst

    #@cocotb.coroutine
    #def run_phase(self):
    # TODO at checks that sim_time == 0

    def get_type_name(self):
        return "uvm_root"

    def final_phase(self, phase):
        from .uvm_coreservice import UVMCoreService
        cs = UVMCoreService.get()
        tr_db = cs.get_default_tr_database()
        if tr_db.is_open():
            tr_db.close_db()

    #----------------------------------------------------------------------------
    # Group: Simulation Control
    #----------------------------------------------------------------------------

    # Task: run_test
    #
    # Phases all components through all registered phases. If the optional
    # test_name argument is provided, or if a command-line plusarg,
    # +UVM_TESTNAME=TEST_NAME, is found, then the specified component is created
    # just prior to phasing. The test may contain new verification components or
    # the entire testbench, in which case the test and testbench can be chosen from
    # the command line without forcing recompilation. If the global (package)
    # variable, finish_on_completion, is set, then $finish is called after
    # phasing completes.

    @cocotb.coroutine
    def run_test(self, test_name=""):
        uvm_debug(self, 'run_test', 'Called with testname |' + test_name + '|')
        from .uvm_coreservice import UVMCoreService
        cs = UVMCoreService.get()
        factory = cs.get_factory()
        testname_plusarg = False
        test_names = []
        msg = ""
        uvm_test_top = None  # uvm_component
        ##process phase_runner_proc; // store thread forked below for final cleanup

        # Set up the process that decouples the thread that drops objections from
        # the process that processes drop/all_dropped objections. Thus, if the
        # original calling thread (the "dropper") gets killed, it does not affect
        # drain-time and propagation of the drop up the hierarchy.
        # Needs to be done in run_test since it needs to be in an
        # initial block to fork a process.
        uvm_debug(self, 'run_test', 'Calling m_init_objections')
        yield UVMObjection().m_init_objections()

        # Retrieve the test names provided on the command line.  Command line
        # overrides the argument.
        test_name_count = self.clp.get_arg_values("+UVM_TESTNAME=", test_names)

        uvm_debug(self, 'run_test',
                  'Found testnames from cmdline: ' + str(test_names))
        # If at least one, use first in queue.
        if test_name_count > 0:
            test_name = test_names[0]
            uvm_debug(self, 'run_test', 'Found test name %s' % (test_name))
            testname_plusarg = True

        # If multiple, provided the warning giving the number, which one will be
        # used and the complete list.
        if test_name_count > 1:
            test_list = ""
            sep = ""
            for i in range(0, len(test_names)):
                if i != 0:
                    sep = ", "
                test_list = test_list + sep + test_names[i]
            self.uvm_report_warning(
                "MULTTST",
                MULTI_TESTS.format(test_name_count, test_name, test_list),
                UVM_NONE)

        # if test now defined, create it using common factory
        uvm_debug(self, 'run_test', 'Running now test ' + test_name)
        if test_name != "":
            if "uvm_test_top" in self.m_children:
                uvm_fatal(
                    "TTINST",
                    "An uvm_test_top already exists via a previous call to run_test",
                    UVM_NONE)
            #0; // forces shutdown because $finish is forked
            yield Timer(0, "NS")
            uvm_debug(self, 'run_test',
                      "factory.create in UVMRoot testname " + test_name)

            uvm_test_top = factory.create_component_by_name(
                test_name, "", "uvm_test_top", None)

            if uvm_test_top is None:
                if testname_plusarg:
                    msg = "command line +UVM_TESTNAME=" + test_name
                else:
                    msg = "call to run_test(" + test_name + ")"
                uvm_report_fatal("INVTST",
                                 "Requested test from " + msg + " not found.",
                                 UVM_NONE)
        yield Timer(0, "NS")

        if len(self.m_children) == 0:
            self.uvm_report_fatal(
                "NOCOMP",
                ("No components instantiated. You must either instantiate" +
                 " at least one component before calling run_test or use" +
                 " run_test to do so. To run a test using run_test," +
                 " use +UVM_TESTNAME or supply the test name in" +
                 " the argument to run_test(). Exiting simulation."), UVM_NONE)
            return

        self.running_test_msg(test_name, uvm_test_top)

        # phase runner, isolated from calling process
        #fork begin
        # spawn the phase runner task
        #phase_runner_proc = process::self()
        uvm_debug(self, 'run_test', 'Forking now phases')
        cocotb.fork(UVMPhase.m_run_phases())
        uvm_debug(self, 'run_test', 'After phase-fork executing')
        #end
        #join_none
        yield Timer(0)
        ##0; // let the phase runner start

        uvm_debug(self, 'run_test', 'Waiting all phases to complete JKJK')
        yield self.wait_all_phases_done()
        uvm_debug(self, 'run_test', 'All phases are done now JKJK')

        #// clean up after ourselves
        #phase_runner_proc.kill()
        l_rs = get_report_server()
        l_rs.report_summarize()
        if self.finish_on_completion:
            self.uvm_report_info('FINISH', '$finish was reached in run_test()',
                                 UVM_NONE)

    @cocotb.coroutine
    def wait_all_phases_done(self):
        yield self.m_phase_all_done_event.wait()
        #wait (m_phase_all_done == 1)

    # Function: die
    #
    # This method is called by the report server if a report reaches the maximum
    # quit count or has a UVM_EXIT action associated with it, e.g., as with
    # fatal errors.
    #
    # Calls the <uvm_component::pre_abort()> method
    # on the entire <uvm_component> hierarchy in a bottom-up fashion.
    # It then calls <uvm_report_server::report_summarize> and terminates the simulation
    # with ~$finish~.
    def die(self):
        l_rs = get_report_server()
        # do the pre_abort callbacks
        self.m_do_pre_abort()
        l_rs.report_summarize()
        #$finish
        raise Exception('die(): $finish from UVMRoot')

    def running_test_msg(self, test_name, uvm_test_top):
        if test_name == "":
            self.uvm_report_info("RNTST", "Running test ...", UVM_LOW)
        elif test_name == uvm_test_top.get_type_name():
            self.uvm_report_info("RNTST", "Running test " + test_name + "...",
                                 UVM_LOW)
        else:
            self.uvm_report_info(
                "RNTST",
                ("Running test " + uvm_test_top.get_type_name() +
                 " (via factory override for test \"" + test_name + "\")..."),
                UVM_LOW)

    #  // Function: set_timeout
    #  //
    #  // Specifies the timeout for the simulation. Default is <`UVM_DEFAULT_TIMEOUT>
    #  //
    #  // The timeout is simply the maximum absolute simulation time allowed before a
    #  // ~FATAL~ occurs.  If the timeout is set to 20ns, then the simulation must end
    #  // before 20ns, or a ~FATAL~ timeout will occur.
    #  //
    #  // This is provided so that the user can prevent the simulation from potentially
    #  // consuming too many resources (Disk, Memory, CPU, etc) when the testbench is
    #  // essentially hung.
    #  //
    #  //
    #
    #  extern function void set_timeout(time timeout, bit overridable=1)

    #  // Variable: finish_on_completion
    #  //
    #  // If set, then run_test will call $finish after all phases are executed.
    #
    #  bit  finish_on_completion = 1
    #
    #
    #  //----------------------------------------------------------------------------
    #  // Group: Topology
    #  //----------------------------------------------------------------------------
    #
    #

    #  // Function: find
    #
    #  extern function uvm_component find (string comp_match)
    def find(self, comp_match):
        comp_list = []

        self.find_all(comp_match, comp_list)

        if len(comp_list) > 1:
            uvm_report_warning(
                "MMATCH",
                sv.sformatf(
                    "Found %0d components matching '%s'. Returning first match, %0s.",
                    len(comp_list), comp_match, comp_list[0].get_full_name()),
                UVM_NONE)

        if len(comp_list) == 0:
            uvm_report_warning(
                "CMPNFD", "Component matching '", comp_match,
                "' was not found in the list of uvm_components", UVM_NONE)
            return None

        return comp_list[0]

    #
    #  // Function: find_all
    #  //
    #  // Returns the component handle (find) or list of components handles
    #  // (find_all) matching a given string. The string may contain the wildcards,
    #  // * and ?. Strings beginning with '.' are absolute path names. If the optional
    #  // argument comp is provided, then search begins from that component down
    #  // (default=all components).
    #
    #  extern function void find_all (string comp_match,
    #                                 ref uvm_component comps[$],
    #                                 input uvm_component comp=None)
    #
    def find_all(self, comp_match, comps, comp=None):
        if (comp is None):
            comp = self
        self.m_find_all_recurse(comp_match, comps, comp)

    #  // Function: print_topology
    #  //
    #  // Print the verification environment's component topology. The
    #  // ~printer~ is a <uvm_printer> object that controls the format
    #  // of the topology printout; a ~None~ printer prints with the
    #  // default output.
    #
    #function void uvm_root::print_topology(uvm_printer printer=None)
    def print_topology(self, printer=None):
        s = ""
        if (len(self.m_children) == 0):
            self.uvm_report_warning(
                "EMTCOMP", "print_topology - No UVM components to print.",
                UVM_NONE)
            return

        if (printer is None):
            from .uvm_global_vars import uvm_default_printer
            printer = uvm_default_printer

        for c in self.m_children:
            if self.m_children[c].print_enabled:
                printer.print_object("", self.m_children[c])
        self.uvm_report_info("UVMTOP",
                             "UVM testbench topology:\n" + printer.emit(),
                             UVM_NONE)

    #  // Variable- phase_timeout
    #  //
    #  // Specifies the timeout for the run phase. Default is `UVM_DEFAULT_TIMEOUT
    #
    #
    #  time phase_timeout = `UVM_DEFAULT_TIMEOUT
    #
    #

    #  // PRIVATE members
    #  extern function void m_find_all_recurse(string comp_match,
    #                                          ref uvm_component comps[$],
    #                                          input uvm_component comp=None);
    def m_find_all_recurse(self, comp_match, comps, comp=None):
        child_comp = None

        if comp.has_first_child():
            child_comp = comp.get_first_child()
            while True:
                self.m_find_all_recurse(comp_match, comps, child_comp)
                if not comp.has_next_child():
                    break
                else:
                    child_comp = comp.get_next_child()
        if uvm_is_match(comp_match,
                        comp.get_full_name()) and comp.get_name() != "":
            comps.append(comp)

    #  extern protected function new ()

    #  extern protected virtual function bit m_add_child (uvm_component child)
    #// Add to the top levels array
    def m_add_child(self, child):
        if (super().m_add_child(child)):
            if child.get_name() == "uvm_test_top":
                top_levels.insert(0, child)
            else:
                top_levels.append(child)
            return True
        else:
            return False
        #endfunction
        #

    #  extern function void build_phase(uvm_phase phase)
    def build_phase(self, phase):
        UVMComponent.build_phase(self, phase)
        #self.m_set_cl_msg_args()
        self.m_do_verbosity_settings()
        self.m_do_timeout_settings()
        self.m_do_factory_settings()
        self.m_do_config_settings()
        # TODO self.m_do_max_quit_settings()
        self.m_do_dump_args()
        #endfunction

    #  extern local function void m_do_verbosity_settings()
    def m_do_verbosity_settings(self):
        set_verbosity_settings = []
        split_vals = []
        tmp_verb = 0

        # Retrieve them all into set_verbosity_settings
        self.clp.get_arg_values("+uvm_set_verbosity=", set_verbosity_settings)

        for i in range(len(set_verbosity_settings)):
            uvm_split_string(set_verbosity_settings[i], ",", split_vals)
            if len(split_vals) < 4 or len(split_vals) > 5:
                self.uvm_report_warning(
                    "INVLCMDARGS",
                    sv.sformatf(MSG_INVLCMDARGS, set_verbosity_settings[i]),
                    UVM_NONE, "", "")

            # Invalid verbosity
            if not (self.clp.m_convert_verb(split_vals[2], tmp_verb)):
                self.uvm_report_warning(
                    "INVLCMDVERB",
                    sv.sformatf(
                        "Invalid verbosity found on the command line for setting '%s'.",
                        set_verbosity_settings[i]), UVM_NONE, "", "")
        #endfunction
        #
        #

    #  extern local function void m_do_timeout_settings()
    def m_do_timeout_settings(self):
        timeout_settings = []
        timeout = ""
        timeout_int = 0
        override_spec = ""
        timeout_count = self.clp.get_arg_values("+UVM_TIMEOUT=",
                                                timeout_settings)

        if timeout_count == 0:
            return
        else:
            timeout = timeout_settings[0]
            if timeout_count > 1:
                timeout_list = ""
                sep = ""
                for i in range(len(timeout_settings)):
                    if (i != 0):
                        sep = "; "
                    timeout_list = timeout_list + sep + timeout_settings[i]

                self.uvm_report_warning(
                    "MULTTIMOUT",
                    sv.sformatf(MSG_MULTTIMOUT, timeout_count, timeout,
                                timeout_list), UVM_NONE)

            self.uvm_report_info(
                "TIMOUTSET",
                sv.sformatf(
                    "'+UVM_TIMEOUT=%s' provided on the command line is being applied.",
                    timeout), UVM_NONE)
            sv.sscanf(timeout, "%d,%s", timeout_int, override_spec)
            #case(override_spec)
            if override_spec == "YES":
                self.set_timeout(timeout_int, 1)
            elif override_spec == "NO":
                self.set_timeout(timeout_int, 0)
            else:
                self.set_timeout(timeout_int, 1)
        #endfunction
        #

    #  extern local function void m_do_factory_settings()
    def m_do_factory_settings(self):
        args = []

        self.clp.get_arg_matches(
            "/^\\+(UVM_SET_INST_OVERRIDE|uvm_set_inst_override)=/", args)
        for i in range(len(args)):
            value = args[i][23:len(args[i])]
            self.m_process_inst_override(value)

        self.clp.get_arg_matches(
            "/^\\+(UVM_SET_TYPE_OVERRIDE|uvm_set_type_override)=/", args)
        for i in range(len(args)):
            value = args[i][23:len(args[i])]
            self.m_process_type_override(value)
        #endfunction
        #

    #  extern local function void m_process_inst_override(string ovr)
    def m_process_inst_override(self, ovr):
        from .uvm_coreservice import UVMCoreService
        split_val = []
        cs = UVMCoreService.get()
        factory = cs.get_factory()

        uvm_split_string(ovr, ",", split_val)

        if len(split_val) != 3:
            self.uvm_report_error(
                "UVM_CMDLINE_PROC",
                "Invalid setting for +uvm_set_inst_override=" + ovr +
                ", setting must specify <requested_type>,<override_type>,<instance_path>",
                UVM_NONE)
            return

        uvm_info(
            "INSTOVR",
            "Applying instance override from the command line: +uvm_set_inst_override=",
            ovr, UVM_NONE)
        factory.set_inst_override_by_name(split_val[0], split_val[1],
                                          split_val[2])
        #endfunction
        #

    #  extern local function void m_process_type_override(string ovr)
    def m_process_type_override(self, ovr):
        pass  # TODO

    #  extern local function void m_do_config_settings()
    def m_do_config_settings(self):
        args = []

        self.clp.get_arg_matches(
            "/^\\+(UVM_SET_CONFIG_INT|uvm_set_config_int)=/", args)
        for i in range(len(args)):
            self.m_process_config(args[i][20:len(args[i])], 1)

        self.clp.get_arg_matches(
            "/^\\+(UVM_SET_CONFIG_STRING|uvm_set_config_string)=/", args)
        for i in range(len(args)):
            self.m_process_config(args[i][23:len(args[i])], 0)

        # TODO might not be needed, ever
        #self.clp.get_arg_matches("/^\\+(UVM_SET_DEFAULT_SEQUENCE|uvm_set_default_sequence)=/", args)
        #for i in range(len(args)):
        #    self.m_process_default_sequence(args[i][26:len(args[i])])

        #endfunction
        #

    #  extern local function void m_do_max_quit_settings()

    #  extern local function void m_do_dump_args()
    def m_do_dump_args(self):
        dump_args = []
        all_args = []
        out_string = ""
        if self.clp.get_arg_matches("+UVM_DUMP_CMDLINE_ARGS", dump_args):
            all_args = self.clp.get_args()
            for i in range(len(all_args)):
                if (all_args[i] == "__-f__"):
                    continue
                out_string = out_string + all_args[i] + " "
            uvm_info("DUMPARGS", out_string, UVM_NONE)
        #endfunction

    #  extern local function void m_process_config(string cfg, bit is_int)
    def m_process_config(self, cfg, is_int):
        v = 0
        split_val = []
        cs = UVMCoreService.get()
        m_uvm_top = cs.get_root()

        uvm_split_string(cfg, ",", split_val)
        if len(split_val) == 1:
            uvm_report_error("UVM_CMDLINE_PROC",
                             ("Invalid +uvm_set_config command\"" + cfg +
                              "\" missing field and value: component is \"" +
                              split_val[0] + "\""), UVM_NONE)
            return

        if len(split_val) == 2:
            uvm_report_error(
                "UVM_CMDLINE_PROC", "Invalid +uvm_set_config command\"" + cfg +
                "\" missing value: component is \"" + split_val[0] +
                "\"  field is \"" + split_val[1] + "\"", UVM_NONE)
            return

        if len(split_val) > 3:
            uvm_report_error(
                "UVM_CMDLINE_PROC",
                sv.sformatf(
                    "Invalid +uvm_set_config command\"%s\" : expected only 3 fields (component, field and value).",
                    cfg), UVM_NONE)
            return

        # TODO finish this
        #  if(is_int):
        #    if(split_val[2].len() > 2):
        #      string base, extval
        #      base = split_val[2].substr(0,1)
        #      extval = split_val[2].substr(2,split_val[2].len()-1);
        #      case(base)
        #        "'b" : v = extval.atobin()
        #        "0b" : v = extval.atobin()
        #        "'o" : v = extval.atooct()
        #        "'d" : v = extval.atoi()
        #        "'h" : v = extval.atohex()
        #        "'x" : v = extval.atohex()
        #        "0x" : v = extval.atohex()
        #        default : v = split_val[2].atoi()
        #      endcase
        #    end
        #    else begin
        #      v = split_val[2].atoi()
        #    end
        #    self.uvm_report_info("UVM_CMDLINE_PROC", {"Applying config setting from the command line: +uvm_set_config_int=", cfg}, UVM_NONE)
        #    uvm_config_int::set(m_uvm_top, split_val[0], split_val[1], v)
        #  end
        #  else begin
        #    self.uvm_report_info("UVM_CMDLINE_PROC", {"Applying config setting from the command line: +uvm_set_config_string=", cfg}, UVM_NONE)
        #    uvm_config_string::set(m_uvm_top, split_val[0], split_val[1], split_val[2])
        #  end
        #
        #endfunction

    #  extern local function void m_process_default_sequence(string cfg)

    #  extern function void m_check_verbosity()
    def m_check_verbosity(self):
        verb_string = ""
        verb_settings = []
        verb_count = 0
        plusarg = 0
        verbosity = UVM_MEDIUM

        # Retrieve the verbosities provided on the command line.
        verb_count = self.clp.get_arg_values("+UVM_VERBOSITY=", verb_settings)
        if verb_count:
            verb_settings.append(verb_string)

        # If none provided, provide message about the default being used.
        if verb_count == 0:
            self.uvm_report_info(
                "DEFVERB",
                "No verbosity specified on the command line.  Using the default: UVM_MEDIUM",
                UVM_NONE)
        #
        # If at least one, use the first.
        if verb_count > 0:
            verb_string = verb_settings[0]
            plusarg = 1

        # If more than one, provide the warning stating how many, which one will
        # be used and the complete list.
        if (verb_count > 1):
            verb_list = ""
            sep = ""
            for i in range(len(verb_settings)):
                if (i != 0):
                    sep = ", "
                verb_list = verb_list + sep + verb_settings[i]
            self.uvm_report_warning(
                "MULTVERB",
                sv.sformatf(WARN_MSG1, verb_count, verb_string, verb_list),
                UVM_NONE)

        if (plusarg == 1):
            # case(verb_string)
            if verb_string == "UVM_NONE":
                verbosity = UVM_NONE
            elif verb_string == "NONE":
                verbosity = UVM_NONE
            elif verb_string == "UVM_LOW":
                verbosity = UVM_LOW
            elif verb_string == "LOW":
                verbosity = UVM_LOW
            elif verb_string == "UVM_MEDIUM":
                verbosity = UVM_MEDIUM
            elif verb_string == "MEDIUM":
                verbosity = UVM_MEDIUM
            elif verb_string == "UVM_HIGH":
                verbosity = UVM_HIGH
            elif verb_string == "HIGH":
                verbosity = UVM_HIGH
            elif verb_string == "UVM_FULL":
                verbosity = UVM_FULL
            elif verb_string == "FULL":
                verbosity = UVM_FULL
            elif verb_string == "UVM_DEBUG":
                verbosity = UVM_DEBUG
            elif verb_string == "DEBUG":
                verbosity = UVM_DEBUG
            else:
                verbosity = verb_string.atoi()
                if verbosity > 0:
                    self.uvm_report_info("NSTVERB",
                                         sv.sformatf(NON_STD_VERB, verbosity),
                                         UVM_NONE)
                if verbosity == 0:
                    verbosity = UVM_MEDIUM
                    self.uvm_report_warning(
                        "ILLVERB",
                        "Illegal verbosity value, using default of UVM_MEDIUM.",
                        UVM_NONE)

        self.set_report_verbosity_level_hier(verbosity)

        #endfunction

    #  extern virtual function void report_header(UVM_FILE file = 0)
    def report_header(self, _file=0):
        q = []
        args = []

        #srvr = UVMReportServer.get_server()
        clp = UVMCmdlineProcessor.get_inst()

        if clp.get_arg_matches("+UVM_NO_RELNOTES", args):
            return

        q.append("\n" + LINE + "\n")
        q.append(uvm_revision_string() + "\n")
        q.append(uvm_mgc_copyright + "\n")
        q.append(uvm_cdn_copyright + "\n")
        q.append(uvm_snps_copyright + "\n")
        q.append(uvm_cy_copyright + "\n")
        q.append(uvm_nv_copyright + "\n")
        q.append(uvm_tpoikela_copyright + "\n")
        q.append(LINE + "\n")

        if UVM_OBJECT_DO_NOT_NEED_CONSTRUCTOR == 0:
            if UVMRoot.m_relnotes_done is False:
                q.append(
                    "\n  ***********       IMPORTANT RELEASE NOTES         ************\n"
                )
            q.append(
                "\n  You are using a Python version of the UVM library which \n"
            )
            q.append("  requires cocotb and an HDL simulator.\n")
            UVMRoot.m_relnotes_done = True
        #`endif

        if UVMRoot.m_relnotes_done is True:
            q.append(
                "\n      (Specify +UVM_NO_RELNOTES to turn off this notice)\n")

        self.uvm_report_info("UVM/RELNOTES",
                             UVM_STRING_QUEUE_STREAMING_PACK(q), UVM_LOW)
        #endfunction

    #  // singleton handle
    #  static local uvm_root m_inst
    #
    #  // For error checking
    #  extern virtual task run_phase (uvm_phase phase)

    #   phase_started
    #   -------------
    #  At end of elab phase we need to do tlm binding resolution.
    def phase_started(self, phase):
        eof_elab_phase = UVMEndOfElaborationPhase.get()
        uvm_debug(self, 'phase_started', phase.get_name())
        #if phase == end_of_elaboration_ph:
        if phase.get_name() == eof_elab_phase.get_name():
            uvm_debug(self, 'phase_started',
                      "uvm_root resolving bindings now..")
            self.do_resolve_bindings()
            if self.enable_print_topology:
                self.print_topology()
            srvr = UVMReportServer.get_server()
            if srvr.get_severity_count(UVM_ERROR) > 0:
                self.uvm_report_fatal("BUILDERR",
                                      "stopping due to build errors", UVM_NONE)
コード例 #33
0
ファイル: __init__.py プロジェクト: cpehle/cocotb
class Monitor(object):

    def __init__(self, callback=None, event=None):
        """
        Constructor for a monitor instance

        callback will be called with each recovered transaction as the argument

        If the callback isn't used, received transactions will be placed on a
        queue and the event used to notify any consumers.
        """
        self._event = event
        self._wait_event = None
        self._recvQ = []
        self._callbacks = []
        self.stats = MonitorStatistics()
        self._wait_event = Event()

        # Subclasses may already set up logging
        if not hasattr(self, "log"):
            self.log = SimLog("cocotb.monitor.%s" % (self.__class__.__name__))

        if callback is not None:
            self.add_callback(callback)

        # Create an independent coroutine which can receive stuff
        self._thread = cocotb.scheduler.add(self._monitor_recv())

    def kill(self):
        if self._thread:
            self._thread.kill()
            self._thread = None

    def __len__(self):
        return len(self._recvQ)

    def __getitem__(self, idx):
        return self._recvQ[idx]

    def add_callback(self, callback):
        self.log.debug("Adding callback of function %s to monitor" %
                       (callback.__name__))
        self._callbacks.append(callback)

    @coroutine
    def wait_for_recv(self, timeout=None):
        if timeout:
            t = Timer(timeout)
            fired = yield [self._wait_event.wait(), t]
            if fired is t:
                raise ReturnValue(None)
        else:
            yield self._wait_event.wait()

        pkt = self._wait_event.data
        raise ReturnValue(pkt)

    @coroutine
    def _monitor_recv(self):
        """
        actual impementation of the receiver

        subclasses should override this method to implement the actual receive
        routine and call self._recv() with the recovered transaction
        """
        raise NotImplementedError("Attempt to use base monitor class without "
                                  "providing a _monitor_recv method")

    def _recv(self, transaction):
        """Common handling of a received transaction."""

        self.stats.received_transactions += 1

        # either callback based consumer
        for callback in self._callbacks:
            callback(transaction)

        # Or queued with a notification
        if not self._callbacks:
            self._recvQ.append(transaction)

        if self._event is not None:
            self._event.set()

        # If anyone was waiting then let them know
        if self._wait_event is not None:
            self._wait_event.set(data=transaction)
            self._wait_event.clear()
コード例 #34
0
ファイル: baser.py プロジェクト: ylm/verilog-ethernet
class BaseRSerdesSink:
    def __init__(self,
                 data,
                 header,
                 clock,
                 enable=None,
                 scramble=True,
                 reverse=False,
                 *args,
                 **kwargs):
        self.log = logging.getLogger(f"cocotb.{data._path}")
        self.data = data
        self.header = header
        self.clock = clock
        self.enable = enable
        self.scramble = scramble
        self.reverse = reverse

        self.log.info("BASE-R serdes sink")
        self.log.info("Copyright (c) 2021 Alex Forencich")
        self.log.info("https://github.com/alexforencich/verilog-ethernet")

        super().__init__(*args, **kwargs)

        self.active = False
        self.queue = Queue()
        self.active_event = Event()

        self.queue_occupancy_bytes = 0
        self.queue_occupancy_frames = 0

        self.width = len(self.data)
        self.byte_size = 8
        self.byte_lanes = 8

        assert self.width == self.byte_lanes * self.byte_size

        self.log.info("BASE-R serdes sink model configuration")
        self.log.info("  Byte size: %d bits", self.byte_size)
        self.log.info("  Data width: %d bits (%d bytes)", self.width,
                      self.byte_lanes)
        self.log.info("  Enable scrambler: %s", self.scramble)
        self.log.info("  Bit reverse: %s", self.reverse)

        self._run_cr = cocotb.fork(self._run())

    def _recv(self, frame, compact=True):
        if self.queue.empty():
            self.active_event.clear()
        self.queue_occupancy_bytes -= len(frame)
        self.queue_occupancy_frames -= 1
        if compact:
            frame.compact()
        return frame

    async def recv(self, compact=True):
        frame = await self.queue.get()
        return self._recv(frame, compact)

    def recv_nowait(self, compact=True):
        frame = self.queue.get_nowait()
        return self._recv(frame, compact)

    def count(self):
        return self.queue.qsize()

    def empty(self):
        return self.queue.empty()

    def idle(self):
        return not self.active

    def clear(self):
        while not self.queue.empty():
            self.queue.get_nowait()
        self.active_event.clear()
        self.queue_occupancy_bytes = 0
        self.queue_occupancy_frames = 0

    async def wait(self, timeout=0, timeout_unit=None):
        if not self.empty():
            return
        if timeout:
            await First(self.active_event.wait(), Timer(timeout, timeout_unit))
        else:
            await self.active_event.wait()

    async def _run(self):
        frame = None
        scrambler_state = 0
        self.active = False

        while True:
            await RisingEdge(self.clock)

            if self.enable is None or self.enable.value:
                data = self.data.value.integer
                header = self.header.value.integer

                if self.reverse:
                    # bit reverse
                    data = sum(1 << (63 - i) for i in range(64)
                               if (data >> i) & 1)
                    header = sum(1 << (1 - i) for i in range(2)
                                 if (header >> i) & 1)

                if self.scramble:
                    # 64b/66b descrambler
                    b = 0
                    for i in range(len(self.data)):
                        if bool(scrambler_state & (1 << 38)) ^ bool(
                                scrambler_state
                                & (1 << 57)) ^ bool(data & (1 << i)):
                            b = b | (1 << i)
                        scrambler_state = (scrambler_state & 0x1ffffffffffffff
                                           ) << 1 | bool(data & (1 << i))
                    data = b

                # 10GBASE-R decoding

                # remap control characters
                ctrl = bytearray(
                    baser_ctrl_to_xgmii_mapping.get((data >> i * 7 + 8)
                                                    & 0x7f, XgmiiCtrl.ERROR)
                    for i in range(8))

                data = data.to_bytes(8, 'little')

                dl = bytearray()
                cl = []
                if header == BaseRSync.DATA:
                    # data
                    dl = data
                    cl = [0] * 8
                elif header == BaseRSync.CTRL:
                    if data[0] == BaseRBlockType.CTRL:
                        # C7 C6 C5 C4 C3 C2 C1 C0 BT
                        dl = ctrl
                        cl = [1] * 8
                    elif data[0] == BaseRBlockType.OS_4:
                        # D7 D6 D5 O4 C3 C2 C1 C0 BT
                        dl = ctrl[0:4]
                        cl = [1] * 4
                        if (data[4] >> 4) & 0xf == BaseRO.SEQ_OS:
                            dl.append(XgmiiCtrl.SEQ_OS)
                        elif (data[4] >> 4) & 0xf == BaseRO.SIG_OS:
                            dl.append(XgmiiCtrl.SIG_OS)
                        else:
                            dl.append(XgmiiCtrl.ERROR)
                        cl.append(1)
                        dl += data[5:]
                        cl += [0] * 3
                    elif data[0] == BaseRBlockType.START_4:
                        # D7 D6 D5    C3 C2 C1 C0 BT
                        dl = ctrl[0:4]
                        cl = [1] * 4
                        dl.append(XgmiiCtrl.START)
                        cl.append(1)
                        dl += data[5:]
                        cl += [0] * 3
                    elif data[0] == BaseRBlockType.OS_START:
                        # D7 D6 D5    O0 D3 D2 D1 BT
                        if data[4] & 0xf == BaseRO.SEQ_OS:
                            dl.append(XgmiiCtrl.SEQ_OS)
                        elif data[4] & 0xf == BaseRO.SIG_OS:
                            dl.append(XgmiiCtrl.SIG_OS)
                        else:
                            dl.append(XgmiiCtrl.ERROR)
                        cl.append(1)
                        dl += data[1:4]
                        cl += [0] * 3
                        dl.append(XgmiiCtrl.START)
                        cl.append(1)
                        dl += data[5:]
                        cl += [0] * 3
                    elif data[0] == BaseRBlockType.OS_04:
                        # D7 D6 D5 O4 O0 D3 D2 D1 BT
                        if data[4] & 0xf == BaseRO.SEQ_OS:
                            dl.append(XgmiiCtrl.SEQ_OS)
                        elif data[4] & 0xf == BaseRO.SIG_OS:
                            dl.append(XgmiiCtrl.SIG_OS)
                        else:
                            dl.append(XgmiiCtrl.ERROR)
                        cl.append(1)
                        dl += data[1:4]
                        cl += [0] * 3
                        if (data[4] >> 4) & 0xf == BaseRO.SEQ_OS:
                            dl.append(XgmiiCtrl.SEQ_OS)
                        elif (data[4] >> 4) & 0xf == BaseRO.SIG_OS:
                            dl.append(XgmiiCtrl.SIG_OS)
                        else:
                            dl.append(XgmiiCtrl.ERROR)
                        cl.append(1)
                        dl += data[5:]
                        cl += [0] * 3
                    elif data[0] == BaseRBlockType.START_0:
                        # D7 D6 D5 D4 D3 D2 D1    BT
                        dl.append(XgmiiCtrl.START)
                        cl.append(1)
                        dl += data[1:]
                        cl += [0] * 7
                    elif data[0] == BaseRBlockType.OS_0:
                        # C7 C6 C5 C4 O0 D3 D2 D1 BT
                        if data[4] & 0xf == BaseRO.SEQ_OS:
                            dl.append(XgmiiCtrl.SEQ_OS)
                        elif data[4] & 0xf == BaseRO.SIG_OS:
                            dl.append(XgmiiCtrl.SEQ_OS)
                        else:
                            dl.append(XgmiiCtrl.ERROR)
                        cl.append(1)
                        dl += data[1:4]
                        cl += [0] * 3
                        dl += ctrl[4:]
                        cl += [1] * 4
                    elif data[0] in {
                            BaseRBlockType.TERM_0, BaseRBlockType.TERM_1,
                            BaseRBlockType.TERM_2, BaseRBlockType.TERM_3,
                            BaseRBlockType.TERM_4, BaseRBlockType.TERM_5,
                            BaseRBlockType.TERM_6, BaseRBlockType.TERM_7
                    }:
                        # C7 C6 C5 C4 C3 C2 C1    BT
                        # C7 C6 C5 C4 C3 C2    D0 BT
                        # C7 C6 C5 C4 C3    D1 D0 BT
                        # C7 C6 C5 C4    D2 D1 D0 BT
                        # C7 C6 C5    D3 D2 D1 D0 BT
                        # C7 C6    D4 D3 D2 D1 D0 BT
                        # C7    D5 D4 D3 D2 D1 D0 BT
                        #    D6 D5 D4 D3 D2 D1 D0 BT
                        term_lane = block_type_term_lane_mapping[data[0]]
                        dl += data[1:term_lane + 1]
                        cl += [0] * term_lane
                        dl.append(XgmiiCtrl.TERM)
                        cl.append(1)
                        dl += ctrl[term_lane + 1:]
                        cl += [1] * (7 - term_lane)
                    else:
                        # invalid block type
                        self.log.warning("Invalid block type")
                        dl = [XgmiiCtrl.ERROR] * 8
                        cl = [1] * 8
                else:
                    # invalid sync header
                    self.log.warning("Invalid sync header")
                    dl = [XgmiiCtrl.ERROR] * 8
                    cl = [1] * 8

                for offset in range(self.byte_lanes):
                    d_val = dl[offset]
                    c_val = cl[offset]

                    if frame is None:
                        if c_val and d_val == XgmiiCtrl.START:
                            # start
                            frame = XgmiiFrame(bytearray([EthPre.PRE]), [0])
                            frame.sim_time_start = get_sim_time()
                            frame.start_lane = offset
                    else:
                        if c_val:
                            # got a control character; terminate frame reception
                            if d_val != XgmiiCtrl.TERM:
                                # store control character if it's not a termination
                                frame.data.append(d_val)
                                frame.ctrl.append(c_val)

                            frame.compact()
                            frame.sim_time_end = get_sim_time()
                            self.log.info("RX frame: %s", frame)

                            self.queue_occupancy_bytes += len(frame)
                            self.queue_occupancy_frames += 1

                            self.queue.put_nowait(frame)
                            self.active_event.set()

                            frame = None
                        else:
                            if frame.sim_time_sfd is None and d_val == EthPre.SFD:
                                frame.sim_time_sfd = get_sim_time()

                            frame.data.append(d_val)
                            frame.ctrl.append(c_val)
コード例 #35
0
ファイル: avalon.py プロジェクト: Martoni/cocotb
class AvalonMaster(AvalonMM):
    """Avalon-MM master
    """
    def __init__(self, entity, name, clock):
        AvalonMM.__init__(self, entity, name, clock)
        self.log.debug("AvalonMaster created")
        self.busy_event = Event("%s_busy" % name)
        self.busy = False

    def __len__(self):
        return 2**len(self.bus.address)

    @coroutine
    def _acquire_lock(self):
        if self.busy:
            yield self.busy_event.wait()
        self.busy_event.clear()
        self.busy = True

    def _release_lock(self):
        self.busy = False
        self.busy_event.set()

    @coroutine
    def read(self, address, sync=True):
        """
        Issue a request to the bus and block until this
        comes back. Simulation time still progresses
        but syntactically it blocks.
        See http://www.altera.com/literature/manual/mnl_avalon_spec_1_3.pdf
        """
        if not self._can_read:
            self.log.error("Cannot read - have no read signal")
            raise TestError("Attempt to read on a write-only AvalonMaster")

        yield self._acquire_lock()

        # Apply values for next clock edge
        if sync:
            yield RisingEdge(self.clock)
        self.bus.address <= address
        self.bus.read <= 1
        if hasattr(self.bus, "byteenable"):
            self.bus.byteenable <= int("1"*len(self.bus.byteenable), 2)
        if hasattr(self.bus, "cs"):
            self.bus.cs <= 1

        # Wait for waitrequest to be low
        if hasattr(self.bus, "waitrequest"):
            yield self._wait_for_nsignal(self.bus.waitrequest)
        yield RisingEdge(self.clock)
        # Get the data
        data = self.bus.readdata.value
        # Deassert read
        self.bus.read <= 0
        if hasattr(self.bus, "byteenable"):
            self.bus.byteenable <= 0
        if hasattr(self.bus, "cs"):
            self.bus.cs <= 0
        v = self.bus.address.value
        v.binstr = "x" * len(self.bus.address)
        self.bus.address <= v

        if hasattr(self.bus, "readdatavalid"):
            while True:
                yield ReadOnly()
                # Get the data
                if int(self.bus.readdatavalid):
                    data = self.bus.readdata.value
                    break
                yield RisingEdge(self.clock)
        else:
            # Assume readLatency = 1 if no readdatavalid
            # FIXME need to configure this,
            # should take a dictionary of Avalon properties.
            yield ReadOnly()

        self._release_lock()
        raise ReturnValue(data)

    @coroutine
    def write(self, address, value):
        """
        Issue a write to the given address with the specified
        value.
        See http://www.altera.com/literature/manual/mnl_avalon_spec_1_3.pdf
        """
        if not self._can_write:
            self.log.error("Cannot write - have no write signal")
            raise TestError("Attempt to write on a read-only AvalonMaster")

        yield self._acquire_lock()

        # Apply valuse to bus
        yield RisingEdge(self.clock)
        self.bus.address <= address
        self.bus.writedata <= value
        self.bus.write <= 1
        if hasattr(self.bus, "byteenable"):
            self.bus.byteenable <= int("1"*len(self.bus.byteenable), 2)
        if hasattr(self.bus, "cs"):
            self.bus.cs <= 1

        # Wait for waitrequest to be low
        if hasattr(self.bus, "waitrequest"):
            count = yield self._wait_for_nsignal(self.bus.waitrequest)

        # Deassert write
        yield RisingEdge(self.clock)
        self.bus.write <= 0
        if hasattr(self.bus, "byteenable"):
            self.bus.byteenable <= 0
        if hasattr(self.bus, "cs"):
            self.bus.cs <= 0
        v = self.bus.address.value
        v.binstr = "x" * len(self.bus.address)
        self.bus.address <= v

        v = self.bus.writedata.value
        v.binstr = "x" * len(self.bus.writedata)
        self.bus.writedata <= v
        self._release_lock()
コード例 #36
0
ファイル: I2CHAL.py プロジェクト: svancau/SpinalHDL
class I2CHALAnalyser:

    def __init__(self, helperSlave, listOperation):
        self.sda = helperSlave.io.sda_rd
        self.scl = helperSlave.io.scl_rd
        self.clk = helperSlave.io.clk

        self.event_RisingEdge  = Event()
        self.event_FallingEdge  = Event()

        self.event_Start = Event()
        self.event_Stop  = Event()

        self.listOp = list()
        self.refListOp = listOperation
        self.dataBinRead = list()

        self.startSeq = 0


    #==========================================================================
    # Start to analyse the bus
    #==========================================================================
    @cocotb.coroutine
    def start(self):

        self.fork_falling = cocotb.fork(self._FallingEdgeDetection())
        self.fork_rising  = cocotb.fork(self._RisingEdgeDetection())
        self.fork_start   = cocotb.fork(self._startDetection())
        self.fork_stop    = cocotb.fork(self._stopDetection())
        yield self._analyser()


    #==========================================================================
    # Stop all processes
    #==========================================================================
    def stop(self):
        self.fork_falling.kill()
        self.fork_rising.kill()
        self.fork_start.kill()
        self.fork_stop.kill()


    #==========================================================================
    # Store all event appening on the bus
    #==========================================================================
    @cocotb.coroutine
    def _analyser(self):

        self.listOp = list()

        # Start ---------------------------------------------------------------
        yield self.event_Start.wait()
        yield RisingEdge(self.clk)
        self.startSeq = 0

        while True:

            dataBinRead = list()
            index = 0
            # Read data -----------------------------------------------------------
            while index < I2CConfig.dataWdith:
                yield self.event_RisingEdge.wait()
                dataBinRead.append(int(self.sda))
                #print("data ", index, " value " , int(self.sda), "index ", index, "start " , self.startSeq )

                if self.startSeq == 1:
                    index = 0
                    self.startSeq = 0
                    dataBinRead = list()
                    dataBinRead.append(int(self.sda))

                index += 1

            dataInRead = int("".join([str(x) for x in dataBinRead]), 2)
            self.listOp.append(DATA(dataInRead))

            # Read ACK ------------------------------------------------------------
            yield self.event_RisingEdge.wait()
            if int(self.sda) == 0:
                self.listOp.append(ACK())
            else:
                self.listOp.append(NACK())

            #print()

    #==========================================================================
    # Detect the start condition
    #==========================================================================
    @cocotb.coroutine
    def _startDetection(self):
        yield RisingEdge(self.clk)
        while True:
            prev = int(self.sda)
            yield RisingEdge(self.clk)
            if prev == 1 and int(self.sda) == 0:
                if int(self.scl) == 1:
                    self.event_Start.set()
                    self.listOp.append(START())
                    self.startSeq = 1


    #==========================================================================
    # Detect the stop condition
    #==========================================================================
    @cocotb.coroutine
    def _stopDetection(self):
        yield RisingEdge(self.clk)
        while True:
            prev = int(self.sda)
            yield RisingEdge(self.clk)
            if prev == 0 and int(self.sda) == 1:
                if int(self.scl) == 1:
                    self.event_Stop.set()
                    self.listOp.append(STOP())

                    # check sequence...
                    for (op, ref) in zip(self.listOp, self.refListOp):

                        if isinstance(ref, START) and isinstance(op, START):
                            pass
                        elif isinstance(ref, STOP) and isinstance(op, STOP):
                            pass
                        elif isinstance(ref, ACK) and isinstance(op, ACK):
                            pass
                        elif isinstance(ref, NACK) and isinstance(op, NACK):
                            pass
                        elif (isinstance(ref, WRITE) or isinstance(ref, READ)) and isinstance(op, DATA):
                            if ref.data != op.data:
                                print("ref ", hex(ref.data), " op ", hex(op.data))
                                assertEquals(ref.data , op.data , "Analyser data ERROR")

                        else:
                            assertEquals(True , False , "%s is not equal to %s" % (ref, op))


    #==========================================================================
    # Detect a Rising edge of scl
    #==========================================================================
    @cocotb.coroutine
    def _RisingEdgeDetection(self):
        while True:
            yield RisingEdge(self.scl)
            self.event_RisingEdge.set()


    #==========================================================================
    # Detect a Falling edge of scl
    #==========================================================================
    @cocotb.coroutine
    def _FallingEdgeDetection(self):
        while True:
            yield FallingEdge(self.scl)
            self.event_FallingEdge.set()
コード例 #37
0
ファイル: avalon.py プロジェクト: TC01/cocotb
class AvalonMaster(AvalonMM):
    """Avalon Memory Mapped Interface (Avalon-MM) Master"""
    def __init__(self, entity, name, clock, **kwargs):
        AvalonMM.__init__(self, entity, name, clock, **kwargs)
        self.log.debug("AvalonMaster created")
        self.busy_event = Event("%s_busy" % name)
        self.busy = False

    def __len__(self):
        return 2**len(self.bus.address)

    @coroutine
    def _acquire_lock(self):
        if self.busy:
            yield self.busy_event.wait()
        self.busy_event.clear()
        self.busy = True

    def _release_lock(self):
        self.busy = False
        self.busy_event.set()

    @coroutine
    def read(self, address, sync=True):
        """Issue a request to the bus and block until this
        comes back. Simulation time still progresses
        but syntactically it blocks.
        
        Args:
            address (int): The address to read from.
            sync (bool, optional): Wait for rising edge on clock initially.
                Defaults to True.
            
        Returns:
            BinaryValue: The read data value.
            
        Raises:
            :any:`TestError`: If master is write-only.
        """
        if not self._can_read:
            self.log.error("Cannot read - have no read signal")
            raise TestError("Attempt to read on a write-only AvalonMaster")

        yield self._acquire_lock()

        # Apply values for next clock edge
        if sync:
            yield RisingEdge(self.clock)
        self.bus.address <= address
        self.bus.read <= 1
        if hasattr(self.bus, "byteenable"):
            self.bus.byteenable <= int("1"*len(self.bus.byteenable), 2)
        if hasattr(self.bus, "cs"):
            self.bus.cs <= 1

        # Wait for waitrequest to be low
        if hasattr(self.bus, "waitrequest"):
            yield self._wait_for_nsignal(self.bus.waitrequest)
        yield RisingEdge(self.clock)

        # Deassert read
        self.bus.read <= 0
        if hasattr(self.bus, "byteenable"):
            self.bus.byteenable <= 0
        if hasattr(self.bus, "cs"):
            self.bus.cs <= 0
        v = self.bus.address.value
        v.binstr = "x" * len(self.bus.address)
        self.bus.address <= v

        if hasattr(self.bus, "readdatavalid"):
            while True:
                yield ReadOnly()
                if int(self.bus.readdatavalid):
                    break
                yield RisingEdge(self.clock)
        else:
            # Assume readLatency = 1 if no readdatavalid
            # FIXME need to configure this,
            # should take a dictionary of Avalon properties.
            yield ReadOnly()

        # Get the data
        data = self.bus.readdata.value

        self._release_lock()
        raise ReturnValue(data)

    @coroutine
    def write(self, address, value):
        """Issue a write to the given address with the specified
        value.

        Args:
            address (int): The address to write to.
            value (int): The data value to write.

        Raises:
            :any:`TestError`: If master is read-only.
        """
        if not self._can_write:
            self.log.error("Cannot write - have no write signal")
            raise TestError("Attempt to write on a read-only AvalonMaster")

        yield self._acquire_lock()

        # Apply values to bus
        yield RisingEdge(self.clock)
        self.bus.address <= address
        self.bus.writedata <= value
        self.bus.write <= 1
        if hasattr(self.bus, "byteenable"):
            self.bus.byteenable <= int("1"*len(self.bus.byteenable), 2)
        if hasattr(self.bus, "cs"):
            self.bus.cs <= 1

        # Wait for waitrequest to be low
        if hasattr(self.bus, "waitrequest"):
            count = yield self._wait_for_nsignal(self.bus.waitrequest)

        # Deassert write
        yield RisingEdge(self.clock)
        self.bus.write <= 0
        if hasattr(self.bus, "byteenable"):
            self.bus.byteenable <= 0
        if hasattr(self.bus, "cs"):
            self.bus.cs <= 0
        v = self.bus.address.value
        v.binstr = "x" * len(self.bus.address)
        self.bus.address <= v

        v = self.bus.writedata.value
        v.binstr = "x" * len(self.bus.writedata)
        self.bus.writedata <= v
        self._release_lock()
コード例 #38
0
class XgmiiSink(Reset):
    def __init__(self,
                 data,
                 ctrl,
                 clock,
                 reset=None,
                 enable=None,
                 reset_active_level=True,
                 *args,
                 **kwargs):
        self.log = logging.getLogger(f"cocotb.{data._path}")
        self.data = data
        self.ctrl = ctrl
        self.clock = clock
        self.reset = reset
        self.enable = enable

        self.log.info("XGMII sink")
        self.log.info("cocotbext-eth version %s", __version__)
        self.log.info("Copyright (c) 2020 Alex Forencich")
        self.log.info("https://github.com/alexforencich/cocotbext-eth")

        super().__init__(*args, **kwargs)

        self.active = False
        self.queue = Queue()
        self.active_event = Event()

        self.queue_occupancy_bytes = 0
        self.queue_occupancy_frames = 0

        self.width = len(self.data)
        self.byte_size = 8
        self.byte_lanes = len(self.ctrl)

        assert self.width == self.byte_lanes * self.byte_size

        self.log.info("XGMII sink model configuration")
        self.log.info("  Byte size: %d bits", self.byte_size)
        self.log.info("  Data width: %d bits (%d bytes)", self.width,
                      self.byte_lanes)

        self._run_cr = None

        self._init_reset(reset, reset_active_level)

    def _recv(self, frame, compact=True):
        if self.queue.empty():
            self.active_event.clear()
        self.queue_occupancy_bytes -= len(frame)
        self.queue_occupancy_frames -= 1
        if compact:
            frame.compact()
        return frame

    async def recv(self, compact=True):
        frame = await self.queue.get()
        return self._recv(frame, compact)

    def recv_nowait(self, compact=True):
        frame = self.queue.get_nowait()
        return self._recv(frame, compact)

    def count(self):
        return self.queue.qsize()

    def empty(self):
        return self.queue.empty()

    def idle(self):
        return not self.active

    def clear(self):
        while not self.queue.empty():
            self.queue.get_nowait()
        self.active_event.clear()
        self.queue_occupancy_bytes = 0
        self.queue_occupancy_frames = 0

    async def wait(self, timeout=0, timeout_unit=None):
        if not self.empty():
            return
        if timeout:
            await First(self.active_event.wait(), Timer(timeout, timeout_unit))
        else:
            await self.active_event.wait()

    def _handle_reset(self, state):
        if state:
            self.log.info("Reset asserted")
            if self._run_cr is not None:
                self._run_cr.kill()
                self._run_cr = None

            self.active = False
        else:
            self.log.info("Reset de-asserted")
            if self._run_cr is None:
                self._run_cr = cocotb.start_soon(self._run())

    async def _run(self):
        frame = None
        self.active = False

        clock_edge_event = RisingEdge(self.clock)

        while True:
            await clock_edge_event

            if self.enable is None or self.enable.value:
                for offset in range(self.byte_lanes):
                    d_val = (self.data.value.integer >> (offset * 8)) & 0xff
                    c_val = (self.ctrl.value.integer >> offset) & 1

                    if frame is None:
                        if c_val and d_val == XgmiiCtrl.START:
                            # start
                            frame = XgmiiFrame(bytearray([EthPre.PRE]), [0])
                            frame.sim_time_start = get_sim_time()
                            frame.start_lane = offset
                    else:
                        if c_val:
                            # got a control character; terminate frame reception
                            if d_val != XgmiiCtrl.TERM:
                                # store control character if it's not a termination
                                frame.data.append(d_val)
                                frame.ctrl.append(c_val)

                            frame.compact()
                            frame.sim_time_end = get_sim_time()
                            self.log.info("RX frame: %s", frame)

                            self.queue_occupancy_bytes += len(frame)
                            self.queue_occupancy_frames += 1

                            self.queue.put_nowait(frame)
                            self.active_event.set()

                            frame = None
                        else:
                            if frame.sim_time_sfd is None and d_val == EthPre.SFD:
                                frame.sim_time_sfd = get_sim_time()

                            frame.data.append(d_val)
                            frame.ctrl.append(c_val)
コード例 #39
0
ファイル: __init__.py プロジェクト: zpan007/cocotb
class Monitor(object):
    """Base class for Monitor objects. 

    Monitors are passive 'listening' objects that monitor pins going in or out of a DUT. 
    This class should not be used
    directly, but should be subclassed and the internal :any:`_monitor_recv` method
    should be overridden and decorated as a :any:`coroutine`.  This :any:`_monitor_recv`
    method should capture some behavior of the pins, form a transaction, and
    pass this transaction to the internal :any:`_recv` method.  The :any:`_monitor_recv`
    method is added to the cocotb scheduler during the ``__init__`` phase, so it
    should not be yielded anywhere.

    The primary use of a Monitor is as an interface for a
    :class:`~cocotb.scoreboard.Scoreboard`.

    Args:
        callback (callable): Callback to be called with each recovered transaction 
            as the argument. If the callback isn't used, received transactions will 
            be placed on a queue and the event used to notify any consumers.
        event (event): Object that supports a ``set`` method that will be called when
            a transaction is received through the internal :any:`_recv` method.
    """

    def __init__(self, callback=None, event=None):
        self._event = event
        self._wait_event = None
        self._recvQ = deque()
        self._callbacks = []
        self.stats = MonitorStatistics()
        self._wait_event = Event()

        # Subclasses may already set up logging
        if not hasattr(self, "log"):
            self.log = SimLog("cocotb.monitor.%s" % (self.__class__.__name__))

        if callback is not None:
            self.add_callback(callback)

        # Create an independent coroutine which can receive stuff
        self._thread = cocotb.scheduler.add(self._monitor_recv())

    def kill(self):
        """Kill the monitor coroutine."""
        if self._thread:
            self._thread.kill()
            self._thread = None

    def __len__(self):
        return len(self._recvQ)

    def __getitem__(self, idx):
        return self._recvQ[idx]

    def add_callback(self, callback):
        """Add function as a callback.

        Args:
            callback (callable): The function to call back.
        """
        self.log.debug("Adding callback of function %s to monitor" %
                       (callback.__name__))
        self._callbacks.append(callback)

    @coroutine
    def wait_for_recv(self, timeout=None):
        """With *timeout*, :meth:`.wait` for transaction to arrive on monitor 
        and return its data.

        Args:
            timeout (optional): The timeout value for :class:`~.triggers.Timer`.
                Defaults to ``None``.

        Returns: Data of received transaction.
        """
        if timeout:
            t = Timer(timeout)
            fired = yield [self._wait_event.wait(), t]
            if fired is t:
                raise ReturnValue(None)
        else:
            yield self._wait_event.wait()

        pkt = self._wait_event.data
        raise ReturnValue(pkt)

    @coroutine
    def _monitor_recv(self):
        """Actual implementation of the receiver.

        Subclasses should override this method to implement the actual receive
        routine and call :any:`_recv` with the recovered transaction.
        """
        raise NotImplementedError("Attempt to use base monitor class without "
                                  "providing a ``_monitor_recv`` method")

    def _recv(self, transaction):
        """Common handling of a received transaction."""

        self.stats.received_transactions += 1

        # either callback based consumer
        for callback in self._callbacks:
            callback(transaction)

        # Or queued with a notification
        if not self._callbacks:
            self._recvQ.append(transaction)

        if self._event is not None:
            self._event.set()

        # If anyone was waiting then let them know
        if self._wait_event is not None:
            self._wait_event.set(data=transaction)
            self._wait_event.clear()
コード例 #40
0
class I2CMasterModelHAL:


    def __init__(self, helperSlave, clockDivider):

        self.wr_scl   = helperSlave.io.scl_rd
        self.wr_sda   = helperSlave.io.sda_rd
        self.rd_scl   = helperSlave.io.scl_wr
        self.rd_sda   = helperSlave.io.sda_wr
        self.clk      = helperSlave.io.clk

        self.sda       = 1
        self.scl       = 1

        self.clockDivider = clockDivider

        self.scl_en       = 0

        self.trigger     = Event()
        self.sclRising   = Event()
        self.sclFalling  = Event()

        self.dataRead    = Event()

        self.freezeBus   = False


    ##########################################################################
    # Start the master
    @cocotb.coroutine
    def startMaster(self, listOperations):

        yield RisingEdge(self.clk)

        self.fork_scl    = cocotb.fork(self._genSCL())
        self.fork_drain  = cocotb.fork(self._manageOpenDrain())
        cocotb.fork(self._runMaster(listOperations))


    ##########################################################################
    # Stop all processes
    def stop(self):

        self.fork_scl.kill()
        self.fork_drain.kill()


    ##########################################################################
    # Execute all operation
    @cocotb.coroutine
    def _runMaster(self, listOperations):

        for index in range(0,len(listOperations)):

            operation = listOperations[index]

            # START -----------------------------------------------------------
            if isinstance(operation, START):
                if index != 0 :
                    yield self.sclRising.wait()

                yield self._genStart()

            # WRITE -----------------------------------------------------------
            elif isinstance(operation, WRITE):
                yield  self._writeData(operation.data)

            # READ ------------------------------------------------------------
            elif isinstance(operation, READ):
                yield self._readData()

            # ACK -------------------------------------------------------------
            elif isinstance(operation, ACK):
                prevOperation = listOperations[index-1]

                yield self.sclFalling.wait()
                self.sda = 1 if isinstance(prevOperation, WRITE) else 0
                yield self.sclRising.wait()
                yield self.sclFalling.wait()
                self.sda = 1

            # NACK ------------------------------------------------------------
            elif isinstance(operation, NACK):

                yield self.sclFalling.wait()
                self.sda  = 1
                yield self.sclRising.wait()
                yield self.sclFalling.wait()

            # STOP ------------------------------------------------------------
            elif isinstance(operation, STOP):
                yield self._genStop()




    ##########################################################################
    # Simulation of the openDrain
    @cocotb.coroutine
    def _manageOpenDrain(self):
        while True:
            yield RisingEdge(self.clk)

            if int(self.rd_sda) == 0 :
                self.wr_sda <= 0
            else:
                self.wr_sda <= self.sda

            if int(self.rd_scl) == 0 :
                self.wr_scl <= 0
            else:
                self.wr_scl <= self.scl


    ##########################################################################
    # SCL generation
    @cocotb.coroutine
    def _genSCL(self):
        cnt = 0
        self.scl = 1
        while True:

            yield RisingEdge(self.clk)

            if self.scl_en == 1 and self.freezeBus == False:
                cnt += 1
                if (cnt >= self.clockDivider):
                    if self.scl == 0:
                        self.scl = 1
                        self.sclRising.set()

                    else:
                        self.scl = 0
                        self.sclFalling.set()

                    cnt = 0

                elif(cnt == self.clockDivider/2):
                    self.trigger.set()
            else:
                self.scl <= 1


    ##########################################################################
    # Generate the start condition
    @cocotb.coroutine
    def _genStart(self):

        self.scl_en = 1

        yield self.trigger.wait()

        self.sda = 0

        yield self.sclFalling.wait()

        self.sda = 1


    ##########################################################################
    # Generate the stop condition
    @cocotb.coroutine
    def _genStop(self):

        self.sda = 0

        yield self.sclRising.wait()
        yield self.trigger.wait()

        self.sda    = 1
        self.scl_en = 0


    ##########################################################################
    # Write a data
    @cocotb.coroutine
    def _writeData(self, data):

        data2Send = bin(data)[2:].zfill(I2CConfig.dataWdith)
        write     = True
        index     = 0

        while write:

            yield FallingEdge(self.wr_scl)

            self.sda = int(data2Send[index])

            if index == I2CConfig.dataWdith-1:
                break

            index += 1

    ##########################################################################
    # Read a data
    @cocotb.coroutine
    def _readData(self):
        cnt  = 0
        dataRead = list()
        while True:

            if (cnt == I2CConfig.dataWdith):
                dataInt = int("".join([str(x) for x in dataRead]), 2)
                self.dataRead.set(data= dataInt )

                break
            else:
                yield self.sclRising.wait()
                dataRead.append( int(self.rd_sda) )

            cnt += 1