class Timer(Process):
    def __init__(self, waitUntil=10000.0, name="timer", sim=None):
        super(Timer, self).__init__(name=name, sim=sim)
        self.__timeout = waitUntil
        self.event = SimEvent(name="timer_event", sim=sim)
        self.ended = False
        
    def wait(self):
        yield hold, self, self.__timeout
        self.ended = True
        self.event.signal()
class ContinuousQuerier(Process):
    """Check Section 5.2"""
    
    # the interval between the first two queries MUST be at least one second
    MINIMUM_FIRST_WAIT = 1000
    MINIMUM_INCREMENT_RATE = 2
    
    FIRST_WAIT = MINIMUM_FIRST_WAIT + 4000
    INCREMENT_RATE = MINIMUM_INCREMENT_RATE * 2
    
    def __init__(self, subquery, sim, sender=None):
        super(ContinuousQuerier, self).__init__(sim=sim)
        self.sender = sender
        # if there is a unique response, it should stop querying
        # I will assume that there is not unique response (asking for PTR records)
        self.subquery = subquery
        self._random = Random()
        self.stopped = False
        self.__stop = SimEvent(name="stop_continuous_querier", sim=sim)
    
    def query_continuously(self):
        # SHOULD also delay the first query of the series
        # by a randomly-chosen amount in the range 20-120ms.
        
        twait = 20 + self._random.random()*100
        timer = Timer(waitUntil=twait, sim=self.sim)
        self.sim.activate(timer, timer.wait())
        yield waitevent, self, (timer.event, self.__stop,)
        self.sender.send_query(self.subquery, to_node=self.sender.node_id) # joining a network
        
        # the interval between the first two queries MUST be at least one second
        twait = ContinuousQuerier.FIRST_WAIT
        while not self.stopped:
            timer = Timer(waitUntil=twait, sim=self.sim)
            self.sim.activate(timer, timer.wait())
            yield waitevent, self, (timer.event, self.__stop,)
            self.sender.send_query(self.subquery) # subsequent queries
            
            if twait!=3600000:
                # the intervals between successive queries MUST increase by at least a factor of two
                twait = twait * ContinuousQuerier.INCREMENT_RATE
                # When the interval between queries reaches or exceeds 60 minutes
                # a querier MAY cap the interval to a maximum of 60 minutes
                # and perform subsequent queries at a steady-state rate of one query per hour
                if twait>3600000:
                    twait = 3600000 # 1h
    
    def stop(self):
        self.stopped = True
        self.__stop.signal()
    
    def reset(self):
        self.stopped = False
示例#3
0
class Alarm(Process):
    @classmethod
    def setOnetime(cls, delay, name=None, at=0, drift=('fixed', 0)):
        tm = Alarm(delay, name, drift=drift)
        activate(tm, tm.onetime())
        return tm.event

    @classmethod
    def setPeriodic(cls, interval, name=None, at=0,
                    until=infinite, drift=('fixed', 0)):
        tm = Alarm(interval, name, until, drift)
        activate(tm, tm.loop(), at=at)
        return tm.event

    def __init__(self, interval, name=None,
                 until=infinite, drift=('fixed', 0)):
        Process.__init__(self)
        self.interval = interval
        if name is not None:
            eventname = name
        else:
            eventname = "a_SimEvent"
        self.event = SimEvent(eventname)
        self.until = until
        try:
            key, mean, cfg = drift
        except ValueError:
            key, mean = drift
            cfg = {}
        lb = cfg.get('lb', 0); ub = cfg.get('ub', interval)
        if lb < 0: raise ValueError('drift lb = %s >= 0' %lb)
        if ub > interval:
            raise ValueError('drift ub = %s < %s = interval' %interval)
        cfg['lb'] = lb; cfg['ub'] = ub
        self.rgen = RandInterval.get(key, mean, cfg)

    def onetime(self):
        drift = self.rgen.next()
        yield hold, self, self.interval + drift
        self.event.signal()

    def loop(self):
        left = 0
        while (self.until < 0) or (now() < self.until):
            yield hold, self, left
            drift = self.rgen.next()
            yield hold, self, drift
            self.event.signal()
            left = self.interval - drift
class NewWPDetector(Process):
    
    def __init__(self, mdns_instance, sim):
        super(NewWPDetector, self).__init__( sim = sim )
        self.instance = mdns_instance
        self.last_wp_name = None
        self.__new_record_update = SimEvent(name="new_record_updated", sim=sim)
    
    def check_new_wps(self):
        while True:
            wp_r = self.instance.get_whitepage_record()
            if wp_r is not None:
                new_name = wp_r.node_name
                if self.last_wp_name != new_name:
                    self.instance.notify_whitepage_changed()
                    self.last_wp_name = new_name
            yield waitevent, self, (self.__new_record_update,)
    
    def notify_txt_record(self, record):
        if "iw" in record.keyvalues:
            self.__new_record_update.signal()
示例#5
0
文件: arf.py 项目: reidlindsay/wins
class ARF(DCF):
    """Auto Rate Fallback (ARF) protocol.

    Implementation based on description from Kamerman and Monteban (1997). By
    default ARF operates using CSMA/CA (i.e. not MACAW). To enable RTS/CTS
    messages, set `usecsma` to False.

    ARF Timeout
    ===========
    The ARF timer is used to indicate that the rate of transmissions should be
    increased prior to having `nsuccess` consecutive ACKs. This mechanism is
    meant to allow ARF to explore higher data rates when traffic is sparse.

    The timer can be implemented as a time-based or packet-based timer. The
    time-based timer is a straight forward timeout timer that waits for a
    duration before firing and indicating that the rate of the transmission
    should be increased. The packet-based timer uses the "virtual timer"
    approach described by Qiao, Choi, and Shin (2002). This "timer" counts the
    number of transmission attempts made rather than using a time-based timer.

    This module implements the ARF timer as a time-based timer.

    :CVariables:
     * `base`: Property to access base rate enumeration from `ra`.
     * `rates`: Property to access ordered list of valid rate enumerations. This
                is ordered based on the data rates reported by `get_datarate()`.

    :IVariables:
     * `ra`: `RateAdapt` module.
     * `ackcount`: Dictionary containing number of consecutive successful ACKs
                   received (negative values indicate consecutive dropped ACKs).
                   Values are keyed by destination address.
     * `arftimer`: Dictionary containing ARF timers used for updating transmit
                   data rate. Values are keyed by destination address. This
                   module implements ARF timers as packet-based timers.
     * `probation`: Dictionary containing probation flags indicating if
                    probation period has been entered; values are keyed by
                    destination address.
     * `nsuccess`: Threshold for number of consecutive ACKs that must be
                   received prior to increasing the data rate.
     * `nfailure`: Threshold for number of consecutive ACK failures that
                   will trigger a decrease in the data rate.
     * `timeout`: Timeout value for ARF timer; if exceeded, then increase rate.

    """
    name = "Auto Rate Fallback"
    tracename = "ARF"
    def __init__(self, usecsma=True, **kwargs):
        """Constructor."""
        # create ARF parameters
        self.nsuccess = None
        self.nfailure = None
        self.timeout  = None
        self.ackcount = {}
        self.arftimer = {}
        self.probation = {}
        self._rates = None
        # create ARF events
        self.dropack = SimEvent()
        self.recvack = SimEvent()
        DCF.__init__(self, usecsma=usecsma, **kwargs)
        # update event names
        self.dropack.name = "%s%s"%(self.name, ".dropack")
        self.recvack.name = "%s%s"%(self.name, ".recvack")

    base = property(fget=lambda self: self.ra.base)
    rates = property(fget=lambda self: self.get_rates() )

    def configure(self, base=None, nsuccess=None, nfailure=None, timeout=None, **kwargs):
        """Configure rate adaptation parameters.

        :param base: Base rate used to initialize `RateAdapt` component [default=0].
        :param nsuccess: Threshold for number of consecutive ACKs that must be
                         received prior to increasing the data rate.
        :param nfailure: Threshold for number of consecutive ACK failures that
                         will trigger a decrease in the data rate.
        :param timeout: Timeout that can automatically trigger
        """
        DCF.configure(self, **kwargs)
        if base is None: base = 0
        if nsuccess is None: nsuccess = ARF_NSUCCESS
        if nfailure is None: nfailure = ARF_NFAILURE
        if timeout  is None: timeout  = ARF_TIMEOUT
        ra = self.newchild("ra", RateAdapt, base=base, tracename=self.tracename+".RA")
        ra.set_rate(self.broadcast) # use default base rate for broadcast
        # create FSM to manage ARF
        fsm = self.newchild("manager", FSM, tracename=self.tracename+".MGR")
        fsm.goto(self.MGR)
        # set other parameters
        self.nsuccess = nsuccess
        self.nfailure = nfailure
        self.timeout  = timeout
        self.ackcount = {}
        self.arftimer = {}
        self.probation = {}

    def MGR(self, fsm):
        """MGR state; manage ARF protocol."""
        yield waitevent, fsm, (self.dropack, self.recvack)
        dropack = (self.dropack in fsm.eventsFired)
        recvack = (self.recvack in fsm.eventsFired)
        # get destination parameter from event
        if dropack: dst = self.dropack.signalparam
        if recvack: dst = self.recvack.signalparam
        errmsg = "[ARF]: Invalid state transition!"
        assert (dropack ^ recvack), errmsg
        # initialize ackcount if needed
        if (dst not in self.ackcount):
            self.ackcount[dst] = 0
            self.probation[dst] = False
            self.arftimer[dst] = 0
        nack = self.ackcount[dst]
        probation = self.probation[dst]
        timer = 0
        # check probation condition
        errmsg = "[ARF]: ACK count must be zero in probation!"
        assert not ((nack<>0) and probation), errmsg
        # increment arftimer for every transmission attempt (packet-based timer)
        timer += 1
        # process drop ACK or recv ACK
        if (nack>0):
            if dropack:     nack = -1
            elif recvack:   nack += 1
        elif (nack<0):
            if dropack:     nack -= 1
            elif recvack:   nack = +1
        else:
            if dropack:     nack = -1
            elif recvack:   nack = +1
            # probation? dropack -> decrease rate and continue using lower rate
            if probation and dropack:
                nack, timer, rate = 0, 0, self.decrate(dst)
                self.log("DECRATE", rate=rate, dst=dst, ackcount=nack, timer=timer, probation=probation)
            probation =  False      # reset flag
        # log events
        if dropack: self.log("DROPACK", dst=dst, ackcount=nack, arftimer=timer)
        if recvack: self.log("RECVACK", dst=dst, ackcount=nack, arftimer=timer)
        # check if Nsuccess or Nfailure condition is met
        timerexpired = (timer>self.timeout)
        if (self.nsuccess-nack<1) or timerexpired:
            # (Nack >= +Nsuccess) OR (Timer expired)
            #   -> increase rate, enter probation period
            rate = self.incrate(dst)
            nack, timer = 0, 0      # reinitialize ackcount and timer
            probation = True        # set probation flag
            self.log("INCRATE", rate=rate, dst=dst, ackcount=nack, probation=True)
        elif (nack+self.nfailure<1):
            # Nack <= -Nfailure -> decrease rate
            rate = self.decrate(dst)
            nack, timer = 0, 0     # set ackcount to 0, reset timer
            self.log("DECRATE", rate=rate, dst=dst, ackcount=nack, probation=probation)
        # set ackcount/probation value
        self.ackcount[dst]  = nack
        self.arftimer[dst]  = timer
        self.probation[dst] = probation
        # continue in MGR
        yield fsm.goto(self.MGR)

    def retry(self, *args, **kwargs):
        """Overloaded to manage dropped ACKs."""
        rc = self.retrycount
        DCF.retry(self, *args, **kwargs)
        # Actual retry?
        if (self.retrycount>rc) and self.isdot11data(self.datatosend):
            # retry count was increased -> dropped ACK
            data = self.get_dot11data(self.datatosend)
            dst, src  = data.addr1, data.addr2
            #self.log("DROPACK", dst=dst, src=src)
            self.dropack.signal(dst)

    def send_data(self, data):
        """Additional processing for outgoing DATA."""
        DCF.send_data(self, data)
        dst  = data.addr1
        rate = self.ra.get_rate(dst)
        data.setanno('phy-rate', rate)

    def recv_ack(self, ack):
        """Additional processing for incoming ACK."""
        DCF.recv_ack(self, ack)
        # Expecting ACK?
        if self.isdot11data(self.datatosend):
            data = self.get_dot11data(self.datatosend)
            dst, src = data.addr1, data.addr2
            # ACK for me? -> received ACK
            if (src==ack.addr1):
                #self.log("RECVACK", ack, dst=dst, src=src)
                self.recvack.signal(dst)

    def incrate(self, dst):
        """Increase data rate corresponding to destination `dst`."""
        rate = self.ra.get_rate(dst)
        errmsg = "[ARF]: Invalid rate (%s) in incrate()!"%(rate)
        assert (rate in self.rates), errmsg
        idx = self.rates.index(rate)
        if (idx+1<len(self.rates)):
            rate = self.rates[idx+1]
        self.ra.set_rate(dst, rate)
        return rate

    def decrate(self, dst):
        """Decrease data rate corresponding to destination `dst`."""
        rate = self.ra.get_rate(dst)
        errmsg = "[ARF]: Invalid rate (%s) in incrate()!"%(rate)
        assert (rate in self.rates), errmsg
        idx = self.rates.index(rate)
        if (idx>0):
            rate = self.rates[idx-1]
        self.ra.set_rate(dst, rate)
        return rate

    def get_rates(self, force=False):
        """Get list of valid rate enumerations.

        :param force: If true, recalculate rates; otherwise use cached value
                      [default=False].
        :return: List of valid rate enumerations.
        """
        if force: self._rates = None
        # initialize ordered rates
        if self._rates is None:
            orates = []     # ordered rates
            rates = [r for r in self.phy.rate]
            while rates:
                maxrate, maxidx = None, None
                for k in range(len(rates)):
                    r = rates[k]
                    rbps = self.get_datarate(r)
                    if maxrate is None: maxrate, maxidx = r, k
                    mbps = self.get_datarate(maxrate)
                    if (rbps>mbps):  maxrate, maxidx = r, k
                orates.insert(0, rates.pop(maxidx))     # add max rate to orates
            self._rates = orates
        return self._rates

    def log_send(self, p, *args, **kwargs):
        """Updated to print `ARF` related parameters."""
        if p.hasanno('phy-rate'):
            kwargs['phy-rate'] = p.getanno('phy-rate')
        DCF.log_send(self, p, *args, **kwargs)

    def log_recv(self, p, *args, **kwargs):
        """Updated to print `ARF` related parameters."""
        if p.hasanno('phy-rate'):
            kwargs['phy-rate'] = p.getanno('phy-rate')
        DCF.log_recv(self, p, *args, **kwargs)
示例#6
0
文件: fsm.py 项目: reidlindsay/wins
class Timer(FSM):
    """Stopwatch timer that fires an event after waiting for a specified time.

    After starting `Timer`, use `pause()`, `resume()`, and `stop()` to control
    the execution of the timer. The `done` and `kill` signals to monitor the
    completion of the `Timer`.

    :CVariables:
     * `CMDPAUSE`: Internal constant for pause command.
     * `CMDRESUME`: Internal constant for resume command.
     * `duration`: Property to access time that `Timer` should run.
     * `timepassed`: Property to access time elapsed by `Timer`.
     * `timeleft`: Property to access time left on `Timer`.
     * `ispaused`: Property to check if timer is currently paused.
     * `fired`: Property to check if `done` occurred.
     * `running`: Property to check if `started` and currently running.
     * `stopped`: Property to check if not `fired` and no longer active.
     * `ctrlQ`: Internal `Queue` for control messages.

    :IVariables:
     * `done`: SimEvent signalled when timer finishes successfully.
     * `kill`: SimEvent signalled when timer is prematurely stopped.
    """
    name="timer"
    tracename="TIMER"
    CMDPAUSE  = "pause"
    CMDRESUME = "resume"
    def __init__(self, duration, start=False, initstate=None, **kwargs):
        """Constructor.

        :param duration: Time for which `Timer` should run.
        :param start: Boolean; if true, `start()` immediately.
        :param initstate: Depricated (do not use).
        :param kwargs: Keywords passed to `FSM` constructor.
        """
        assert (duration>0), "[TIMER]: Cannot simulate non-positive duration!"
        FSM.__init__(self, start=False, initstate=self.RUN, **kwargs)
        self.__tpassed = 0
        self.__duration  = duration
        self.__ctrlQ = Queue()
        self.__tic   = None
        self.done = SimEvent(name=self.name+".done")
        self.kill = SimEvent(name=self.name+".kill")
        if start: self.start()

    duration   = property(fget=lambda self: self.__duration)
    timepassed = property(fget=lambda self: self.__timepassed() )
    timeleft   = property(fget=lambda self: self.duration - self.timepassed)
    ispaused   = property(fget=lambda self: (self.state==self.PAUSE) )
    fired      = property(fget=lambda self: self.done.occurred)
    running    = property(fget=lambda self: \
                          self.started and (self.state==self.RUN) )
    stopped    = property(fget=lambda self: \
                          (not self.fired) and (self.state==self.HALT) )
    ctrlQ      = property(fget=lambda self: self.__ctrlQ)

    def RUN(self, fsm):
        """RUN state; timer is active.

        Call `pause()` to pause an active timer. This method will signal `done`
        upon completion.
        """
        # set parameters
        tstart = now()
        self.__tic = tstart      # temporarily store tic as start of RUN
        queue  = self.ctrlQ
        tleft  = self.duration - self.__tpassed  # time left on timer
        # wait for timer to expire (or be paused)
        yield queue.remove(fsm, 1, renege=tleft)
        self.__tic = None
        telapsed = now() - tstart
        self.__tpassed += telapsed
        if fsm.acquired(queue):
            # PAUSE command
            assert (telapsed<tleft), \
                    "[TIMER]: Elapsed time exceeded time left during RUN!"
            assert (len(fsm.got)==1), "[TIMER]: Control queue failed!"
            cmd = fsm.got[0]
            assert (cmd==self.CMDPAUSE), \
                   "[TIMER]: Invalid control command received in RUN!"
            yield fsm.goto(self.PAUSE)
        else:
            assert (abs(self.__tpassed-self.duration)<const.EPSILON), \
                    "[TIMER]: Timer failed to complete properly in RUN!"
            self.__tpassed = self.duration
            if self.verbose>TIMER_VERBOSE: self.log("DONE")
            self.done.signal()
        yield fsm.goto(self.HALT, force=False)

    def PAUSE(self, fsm):
        """PAUSE state; timer is paused.

        Call `resume()` to restart timer.
        """
        queue = self.ctrlQ
        yield queue.remove(fsm, 1)
        assert fsm.acquired(queue) and (len(fsm.got)==1), \
               "[TIMER]: PAUSE failed to dequeue control message!"
        cmd = fsm.got[0]
        assert (cmd==self.CMDRESUME), \
                "[TIMER]: Invalid control command received in PAUSE!"
        yield fsm.goto(self.RUN)

    def resume(self, proc):
        """Blocking call to restart timer if in `PAUSE` state.

        :param proc: Process to block on resume command.
        """
        if (self.state==self.PAUSE):
            return self.ctrlQ.insert(proc, [self.CMDRESUME])
        else:
            return hold, proc, 0

    def pause(self, proc):
        """Blocking call to pause timer if in `RUN` state.

        :param proc: Process to block on pause command.
        """
        if (self.state==self.RUN):
            return self.ctrlQ.insert(proc, [self.CMDPAUSE])
        else:
            return hold, proc, 0

    def __timepassed(self):
        """Private method to determine time elapsed on timer."""
        if self.__tic is None:
            return self.__tpassed
        else:
            tdelta = now() - self.__tic
            return (self.__tpassed + tdelta)

    def HALT(self, fsm, force=True):
        """Overload `HALT` state to signal `kill` if needed."""
        if (force or (self.timeleft>0)):
            timepassed = self.timepassed
            if self.verbose>TIMER_VERBOSE:
                self.log("CANCEL", timepassed=time2usec(timepassed, fmt="%.4g"), \
                                   timeleft=self.timeleft, force=force)
            self.kill.signal(timepassed)
        yield hold, fsm, 0

    def log(self, evt=None, p=None, *args, **kwargs):
        """Overloaded to check verbose level and set common annotations."""
        force = False
        if ('verbose' in kwargs): force = (kwargs['verbose']>TIMER_VERBOSE)
        if self.verbose>TIMER_VERBOSE or force:
            FSM.log(self, evt, p, *args, **kwargs)
示例#7
0
class NAVTimer(Element):
    """Provides API to maintain network allocation vector and associated virtual
    carrier sense timer.

    :CVariables:
     * `timer`: Internal `Timer` object.
     * `done`: SimPy Event signalled when NAV timer successfully expires.
     * `kill`: SimPy Event signalled when NAV timer stops/pauses unexpectedly.
     * `fired`: Check if NAV timer (i.e. `timer`) has fired.
     * `running`: Check if NAV timer is still running.
     * `stopped`: Check if NAV timer has been stopped.
    """
    name = "nav timer"
    tracename = "NAV"
    def __init__(self, **kwargs):
        """Constructor."""
        self.__timer = None
        Element.__init__(self, **kwargs)

    timer = property(fget=lambda self: self.__timer)
    done = property(fget=lambda self: self.timer.done)
    kill = property(fget=lambda self: self.timer.kill)
    fired = property(fget=lambda self: \
                     isinstance(self.timer,Timer) and self.timer.fired)
    running = property(fget=lambda self: \
                       isinstance(self.timer,Timer) and self.timer.running)
    stopped = property(fget=lambda self: \
                       isinstance(self.timer,Timer) and self.timer.stopped)

    def configure(self, **kwargs):
        """Initialize `timer` to None and create monitor."""
        self.__timer = None
        self.__wakeup = SimEvent(name=self.name+".wake")
        mon = self.newchild("mon", FSM, tracename=self.tracename+".MON")
        #mon.goto(self.MON)
        mon.goto(self.MONIDLE)

    def update(self, proc, t=None):
        """Blocking call to cancel active timer, if needed, and starts a new
        timer.

        :param t: New timer value.
        :return: Blocking clause.

        If `t` is None, this method will just cancel any active timer and reset
        `timer` to None.
        """
        # cancel active timer
        if isinstance(self.timer, Timer):
            self.timer.halt()
            #self.delchild("navtimer")
            self.__timer = None
        # start new timer
        if (t>0):
            self.__timer = self.newchild("navtimer", Timer, t, start=True, \
                                         tracename=self.tracename+".TIMER")
            self.__wakeup.signal(self.timer)
        return hold, proc, 0

    def reset(self):
        """Non-blocking call to cancel active timer."""
        return self.update(None, None)

    def MONIDLE(self, fsm):
        """MONIDLE state; monitor `timer` when NAV Timer is in IDLE state."""
        # wait for timer to be set
        self.log_idle()
        yield waitevent, fsm, self.__wakeup
        t = self.__wakeup.signalparam
        assert isinstance(t, Timer)
        yield fsm.goto(self.MONBUSY, t)

    def MONBUSY(self, fsm, t):
        """MONIDLE state; monitor `timer` when NAV Timer is in BUSY state."""
        self.log_busy(duration=time2usec(t.duration) )
        yield waitevent, fsm, (t.done, t.kill)
        # timer fired
        if (t.done in fsm.eventsFired):
            pass
        # timer stopped
        elif (t.kill in fsm.eventsFired) and t.stopped:
            pass
        # otherwise -> raise exception
        else:
            raise RuntimeError, "[NAVTIMER]: Monitor indicates " + \
                                "NAV timer paused unexpectedly!"
        # done monitoring timer
        yield fsm.goto(self.MONIDLE)

    def MON(self, fsm):
        """MON state; monitor `timer` events."""
        while fsm.active():
            # wait for timer to be set
            self.log_idle()
            yield waitevent, fsm, self.__wakeup
            t = self.__wakeup.signalparam
            # monitor timer events
            while isinstance(t, Timer):
                self.log_busy(duration=time2usec(t.duration) )
                yield waitevent, fsm, (t.done, t.kill)
                # timer fired
                if (t.done in fsm.eventsFired):
                    pass
                # timer stopped
                elif (t.kill in fsm.eventsFired) and t.stopped:
                    pass
                # otherwise -> raise exception
                else:
                    raise RuntimeError, "[NAVTIMER]: Monitor indicates " + \
                            "NAV timer paused unexpectedly!"
                # done monitoring timer
                t = None
        return

    def log_busy(self, **kwargs):
        """Log NAV busy."""
        self.log("NAVBUSY", **kwargs)

    def log_idle(self, **kwargs):
        """Log NAV idle."""
        self.log("NAVIDLE", **kwargs)

    def log(self, evt=None, p=None, *args, **kwargs):
        """Overloaded to check verbose level and set common annotations."""
        force = False
        if ('verbose' in kwargs): force = (kwargs['verbose']>NAV_VERBOSE)
        if self.verbose>NAV_VERBOSE or force:
            Element.log(self, evt, p, *args, **kwargs)
示例#8
0
class ClientNode(IDable, Thread, RTI):
    """Base client node.

    Base client node accepts txn requests and dispatch them to storage nodes.
    They are also hosts of paxos protocol entities.

    """
    def __init__(self, system, ID, configs):
        IDable.__init__(self, 'zone%s/cn' % ID)
        Thread.__init__(self)
        RTI.__init__(self, self.ID)
        self.logger = logging.getLogger(self.__class__.__name__)
        self.system = system
        self.snodes = []
        self.configs = configs
        self.groupLocations = {}
        self.txnsRunning = set([])
        self.shouldClose = False
        self.closeEvent = SimEvent()
        #paxos entities
        self.paxosPRunner = None
        self.paxosAcceptor = None
        self.paxosLearner = None

    def addSNodes(self, snodes):
        self.snodes.extend(snodes)

    #notify new txn arrive, called by the system
    def onTxnArrive(self, txn):
        self.system.onTxnArrive(txn)
        self.txnsRunning.add(txn)
        self.dispatchTxn(txn)

    #notify new txn depart, called by the storage nodes
    def onTxnDepart(self, txn):
        if txn in self.txnsRunning:
            self.txnsRunning.remove(txn)
            self.system.onTxnDepart(txn)

    def dispatchTxn(self, txn):
        #just basic load balance
        hosts = self.getTxnHosts(txn)
        bestHost = iter(hosts).next()
        leastLoad = bestHost.load
        for host in hosts:
            if host.load < leastLoad:
                leastLoad = host.load
                bestHost = host
        bestHost.onTxnArrive(txn)
        self.logger.debug('%s dispatch %s to %s at %s' %
                          (self.ID, txn.ID, bestHost, now()))
        return bestHost

    def getTxnHosts(self, txn):
        hosts = set([])
        for gid in txn.gids:
            hosts.add(self.groupLocations[gid])
        return hosts

    def close(self):
        self.logger.info('Closing %s at %s' % (self, now()))
        self.shouldClose = True
        self.closeEvent.signal()

    def _close(self):
        ##periodically check if we still have txn running
        #while True:
        #    yield hold, self, 100
        #    if len(self.txnsRunning) == 0:
        #        break
        for snode in self.groupLocations.values():
            snode.close()
        for snode in self.groupLocations.values():
            if not snode.isFinished():
                yield waitevent, self, snode.finish
        try:
            self.paxosPRunner.close()
            self.paxosAcceptor.close()
            self.paxosLearner.close()
        except:
            pass

    def run(self):
        while not self.shouldClose:
            yield waitevent, self, self.closeEvent
            if self.shouldClose:
                for step in self._close():
                    yield step
示例#9
0
class ClientNode(IDable, Thread, RTI):
    """Base client node.

    Base client node accepts txn requests and dispatch them to storage nodes.
    They are also hosts of paxos protocol entities.

    """
    def __init__(self, system, ID, configs):
        IDable.__init__(self, 'zone%s/cn'%ID)
        Thread.__init__(self)
        RTI.__init__(self, self.ID)
        self.logger = logging.getLogger(self.__class__.__name__)
        self.system = system
        self.snodes = []
        self.configs = configs
        self.groupLocations = {}
        self.txnsRunning = set([])
        self.shouldClose = False
        self.closeEvent = SimEvent()
        #paxos entities
        self.paxosPRunner = None
        self.paxosAcceptor = None
        self.paxosLearner = None

    def addSNodes(self, snodes):
        self.snodes.extend(snodes)

    #notify new txn arrive, called by the system
    def onTxnArrive(self, txn):
        self.system.onTxnArrive(txn)
        self.txnsRunning.add(txn)
        self.dispatchTxn(txn)

    #notify new txn depart, called by the storage nodes
    def onTxnDepart(self, txn):
        if txn in self.txnsRunning:
            self.txnsRunning.remove(txn)
            self.system.onTxnDepart(txn)

    def dispatchTxn(self, txn):
        #just basic load balance
        hosts = self.getTxnHosts(txn)
        bestHost = iter(hosts).next()
        leastLoad = bestHost.load
        for host in hosts:
            if host.load < leastLoad:
                leastLoad = host.load
                bestHost = host
        bestHost.onTxnArrive(txn)
        self.logger.debug('%s dispatch %s to %s at %s'
                          %(self.ID, txn.ID, bestHost, now()))
        return bestHost

    def getTxnHosts(self, txn):
        hosts = set([])
        for gid in txn.gids:
            hosts.add(self.groupLocations[gid])
        return hosts

    def close(self):
        self.logger.info('Closing %s at %s'%(self, now()))
        self.shouldClose = True
        self.closeEvent.signal()

    def _close(self):
        ##periodically check if we still have txn running
        #while True:
        #    yield hold, self, 100
        #    if len(self.txnsRunning) == 0:
        #        break
        for snode in self.groupLocations.values():
            snode.close()
        for snode in self.groupLocations.values():
            if not snode.isFinished():
                yield waitevent, self, snode.finish
        try:
            self.paxosPRunner.close()
            self.paxosAcceptor.close()
            self.paxosLearner.close()
        except:
            pass

    def run(self):
        while not self.shouldClose:
            yield waitevent, self, self.closeEvent
            if self.shouldClose:
                for step in self._close():
                    yield step
class Provider(Process, DiscoveryEventObserver):
    
    RETRY_ON_FAILURE = 2000 # Time the providers sleep if no WP was found
    UPDATE_TIME = 3600000 # 1h
    
    def __init__(self, dataaccess, discovery, sim):
        super(Provider, self).__init__(sim = sim)
        
        self.discovery = discovery
        self.discovery.add_changes_observer(self)
        if dataaccess is not None: # it should be None just for testing purposes!
            self.clue_manager = ClueManager(dataaccess)
        
        self.__stop = False
        self.wp_node_name = None
        self.connector = None
        
        self.externalCondition = SimEvent(name="external_condition_on_%s"%(self.name), sim = sim)
        self.on_external_condition = False # both when a response from a request is received or when the WP changes!
        self.clueChanged = SimEvent(name="clue_change_on_%s"%(self.name), sim = sim)
        self.stopProvider = SimEvent(name="stop_provider_%s"%(self.name), sim = sim)
        self.timer = None
        
        self.last_contribution_to_aggregated_clue = Version(-1, -1)
        self.last_wp_notification = WPRequestNotifier("super_fake_node", self.sim)
        # When we have to detect the need of updating our clues in the WP:
        #     have we received a new version confirming that the WP received it correctly?
        # Once we decide to send our clue a WP needs to acknowledge that has received it.
        # Even if it is not the same WP.
        # Otherwise it may happen that a new WP is initialized from a version higher than
        # the one this node has contributed without having its information.
        self.pending_update = False
        
    def _new_wp_in_the_neighborhood(self, new_wp_r, remaining):
        """Returns the time the node should sleep"""
        # only if I have a Version the new WP does not have
        if self.last_contribution_to_aggregated_clue > new_wp_r.version or \
            self.pending_update: # or if the last update could not be sent to the former WP
            # the WP may not have my information
            if self.last_wp_notification.wp_name != new_wp_r.node_name: # in the first loop this will always be true
                self.last_wp_notification = WPRequestNotifier(new_wp_r.node_name, self.sim)
                retry = self.sent_through_connector()
                return Provider.RETRY_ON_FAILURE if retry else Provider.UPDATE_TIME
            else:
                if self.last_wp_notification.got_response():
                    if self.last_wp_notification.successfully_sent():
                        # I know that the initial aggregated clue of the WP is lower than my last contribution
                        # but there is no need to sent it because I've already done that.
                        return remaining if remaining > 0 else Provider.UPDATE_TIME
                    else:
                        # I did send it and I did receive it,
                        # but I received a negative response yet, so I retry.
                        # no need to update self.last_wp_notification
                        retry = self.sent_through_connector()
                        return Provider.RETRY_ON_FAILURE if retry else Provider.UPDATE_TIME
                else:
                    # still waiting for the previous request's response
                    # CAUTION: if you set to RETRY time, then next time will not enter in this method.
                    # That means that will try to send the clue again in RETRY_TIME :-S
                    return Provider.UPDATE_TIME
        else: # nothing to send
            return remaining if remaining > 0 else Provider.UPDATE_TIME
    
    def update_clues_on_whitepage(self):
        remaining = 0
        sleep_for = 0
        while not self.__stop:
            if self.on_external_condition:
                new_wp_r = self.discovery.get_whitepage_record()
                if new_wp_r is None:
                    sleep_for = Provider.RETRY_ON_FAILURE
                else:
                    sleep_for = self._new_wp_in_the_neighborhood(new_wp_r, remaining)
                self.on_external_condition = False # for the next iteration
            else:
                # last clue has expired or it has changed => send it to the WP
                retry = self.sent_through_connector()
                sleep_for = Provider.RETRY_ON_FAILURE if retry else Provider.UPDATE_TIME
            
            self.timer = Timer(sleep_for, sim=self.sim)
            self.sim.activate(self.timer, self.timer.wait())
            
            before = self.sim.now()
            yield waitevent, self, ( self.timer.event,
                                     self.externalCondition,
                                     self.clueChanged,
                                     self.stopProvider )
            remaining = Provider.UPDATE_TIME - (self.sim.now() - before)
    
    def sent_through_connector(self):
        """Returns if it needs to be retried or not."""
        self.__update_connector_if_needed()
        if self.connector is not None:
            ok = self.connector.send_clue( self.clue_manager.get_clue() )
            if not self.connector.send_confirmed:
                self.pending_update = True
            return not ok # if the message could not be sent (e.g. because WP is not local anymore), retry
        return self.connector is None # if the connector could not be updated, retry
    
    def __update_connector_if_needed(self):
        wp = self.discovery.get_whitepage()
        if wp is None:
            self.connector = None
        else:
            # TODO reuse connectors as done in "consumer" module
            if self.wp_node_name==None or self.wp_node_name!=wp.name:
                self.wp_node_name = wp.name
                if wp==self.discovery.me:
                    self.connector = LocalConnector(self.discovery, self)
                else:
                    self.connector = RemoteConnector(self.discovery.me, wp, self.sim, self)            
                    
    def refresh_clue(self):
        refreshed = self.clue_manager.refresh()
        if refreshed:
            self.clueChanged.signal()
    
    def on_whitepage_selected_after_none(self):
        if self.timer==None: self.cancel(self.timer)
        self.on_external_condition = True
        self.externalCondition.signal()
    
    # Just in case
    def stop(self):
        self.__stop = True
        self.stopProvider.signal()
        
    def set_last_version(self, version):
        self.last_contribution_to_aggregated_clue = version
        self.last_wp_notification.response_received( successful = True )
        self.on_external_condition = True
        self.pending_update = False
        self.externalCondition.signal()
    
    def set_error_on_last_request(self):
        self.last_wp_notification.response_received( successful = False )
        self.on_external_condition = True
        self.externalCondition.signal()
示例#11
0
class ChannelInterface(Element):
    """Interface to connect to a `Channel`.

    This element has four `Port` objects:
        1. "TXU" - sends received packets to upstream element.
        #. "RXU" - receives traffic from an upstream element.
        #. "TXD" - sends traffic to `Channel`.
        #. "RXD" - receives traffic from the `Channel`.

    A `ChannelInterface` must be connected to a `Channel` using the
    `Channel.connect()` method.

    Packet Annotations and ChannelInterface
    =======================================
    The following annotations are marked or used by `ChannelInterface`.

    ============== =================================================
    Name            Description
    ============== =================================================
    cif-duration    Specifies duration of packet so that
                    `ChannelInterface` can simulate transmission and
                    reception of packet. **This must be set by an
                    upstream protocol.**
    -------------- -------------------------------------------------
    cif-collision   List of other Packet objects that have arrived
                    during the reception of a Packet. This collision
                    list should be used to resolve collisions.
    -------------- -------------------------------------------------
    cif-src         Reference to `ChannelInterface` set when sending
                    a packet to the `Channel`.
    -------------- -------------------------------------------------
    cif-dst         Reference to `ChannelInterface` set when 
                    receiving a packet from the `Channel`.
    -------------- -------------------------------------------------
    cif-drp         If marked, indicates that the packet was not
                    completely received because of transmission.
                    This only occurs in halfduplex operation.
    -------------- -------------------------------------------------
    cif-txts        Timestamp for when the packet was transmitted
                    from 'cif-src'.
    -------------- -------------------------------------------------
    cif-rxts        Timestamp for when the packet arrived at the
                    receiver 'cif-dst'.
    ============== =================================================

    :CVariables:
     * `ifstate`: Property to access current state of `ChannelInterface`.
     * `intransmit`: Property to check if in `CHANNELIF_TX` state.
     * `inreceive`:  Property to check if in `CHANNELIF_RX` state.

    :IVariables:
     * `halfduplex`: Boolean flag; if true, then use half-duplex operation,
       otherwise use full-duplex operation.
     * `txdata`: SimEvent signalled when Packet is sent to `Channel`.
     * `txdone`: SimEvent signalled when Packet duration is done simulating.
     * `rxdata`: SimEvent signalled when Packet starts being received.
     * `rxdone`: SimEvent signalled when Packet is done being received.
     * `rxbuffer`: Receive buffer of Packets that are actively being received.
    """
    name = "channel interface"
    tracename = "CIF"
    def __init__(self, **kwargs):
        """Constructor."""
        self.halduplex = None
        self.__ifstate = CHANNELIF_RX
        # set up events and buffer
        self.txdata = SimEvent(name="txdata")
        self.txdone = SimEvent(name="txdone")
        self.rxdata = SimEvent(name="rxdata")
        self.rxdone = SimEvent(name="rxdone")
        self.rxbuffer = []
        Element.__init__(self, **kwargs)
        # rename events
        self.txdata.name = "%s(%s).txdata"%(self.name, self.uid)
        self.txdone.name = "%s(%s).txdone"%(self.name, self.uid)
        self.rxdata.name = "%s(%s).rxdata"%(self.name, self.uid)
        self.rxdone.name = "%s(%s).rxdone"%(self.name, self.uid)
        # monitor events -> keep up to date
        #monitor_events(self.txdata, self.txdone)
        #monitor_events(self.rxdata, self.rxdone)

    ifstate = property(fget=lambda self: self.__ifstate)
    intransmit = property(fget=lambda self: (self.ifstate==CHANNELIF_TX) )
    inreceive  = property(fget=lambda self: (self.ifstate==CHANNELIF_RX) )

    def configure(self, halfduplex=True, **kwargs):
        """Set up ports and other parameters.

        :param halfduplex: Boolean; if true, operate in half-duplex mode;
                           otherwise use full-duplex operation.
        """
        # ports to upstream target
        self.addport("RXU"), self.addport("TXU")
        # ports to downstream target (i.e. a Channel)
        self.addport("TXD"), self.addport("RXD")
        # set up other parameters
        self.halfduplex = halfduplex
        self.newchild("txfsm", FSM, tracename=self.tracename+".TX")
        self.newchild("rxfsm", FSM, tracename=self.tracename+".RX")
        self.txfsm.goto(self.SEND)
        self.rxfsm.goto(self.RECV)

    def SEND(self, fsm):
        """Manage downstream (or outgoing traffic."""
        yield self.RXU.recv(fsm, 1)
        assert fsm.acquired(self.RXU) and (len(fsm.got)==1), \
                "[CHANNELIF]: SEND() error occurred during recv() from RXU!"
        # get packet and set annotations
        p, duration = fsm.got[0], 0
        errmsg = "[CHANNELIF]: Cannot send packet that does not support ANNO!"
        assert ANNO.supported(p), errmsg
        self.set_sendanno(p)
        if p.hasanno('cif-duration'): duration = p.getanno('cif-duration')
        # min-time is const.EPSILON
        if duration<const.EPSILON: duration = const.EPSILON
        p.setanno('cif-duration', duration)
        self.log_send(p)
        # send and simulate duration
        self.__ifstate = CHANNELIF_TX     # start TX
        self.drop("all")                # drop all packet in rxbuffer
        self.txdata.signal(p)
        yield self.TXD.send(fsm, [p])
        yield hold, fsm, duration       # simulate duration
        self.drop("all")                # drop all packet in rxbuffer
        self.__ifstate = CHANNELIF_RX     # resume RX
        self.txdone.signal(p)
        # continue in SEND
        yield fsm.goto(self.SEND)

    def RECV(self, fsm):
        """Manage upstream (or incoming) traffic.

        This method spawn worker processes to simulate the capture of each
        packet. These worker processes manage 'cif-drp' and 'cif-collision'
        annotations; along with simulating packet 'cif-duration'.
        """
        yield self.RXD.recv(fsm, 1)
        errmsg = "[CHANNELIF]: RECV() error occurred during recv() from RXD!"
        assert fsm.acquired(self.RXD) and (len(fsm.got)==1), errmsg
        # start capture thread
        for p in fsm.got:
            w = FSM()
            w.goto(self.CAPTURE, p)
            w.start(prior=True)
        # continue in RECV
        yield fsm.goto(self.RECV)
        assert False, "State transition failed!"

    def CAPTURE(self, fsm, p):
        """Simulate capture process for a Packet `p`."""
        self.log("CAPSTART", p)
        errmsg = "[CHANNELIF]: Cannot capture packet that doesn't support ANNO!"
        assert ANNO.supported(p), errmsg
        duration = const.EPSILON
        self.rxbuffer.append(p)                 # add to rxbuffer
        if self.intransmit: self.drop("all")    # drop all packet in rxbuffer
        # mark/use other annotations
        self.set_recvanno(p)
        if p.hasanno('cif-duration'): duration = p.getanno('cif-duration')
        assert not(duration<const.EPSILON), \
                "[CHANNELIF]: Invlaid duration in CAPTURE! (t=%s)"%(duration)
        # resume operation
        self.log("rxdata.sig", p)
        self.rxdata.signal(p)                   # signal rxdata
        yield hold, fsm, duration               # simulate duration
        if self.intransmit: self.drop("all")    # drop all packet in rxbuffer
        self.rxbuffer.remove(p)                 # remove from rxbuffer
        # drop or forward to upper layer
        if p.hasanno('cif-drp') and p.getanno('cif-drp'):
            self.log_drop( p, halfduplex="%s"%(self.halfduplex) )
            self.cleananno(p)
        else:
            pargs = {'cif-duration':time2usec(duration) }
            self.log_recv(p, **pargs)
            yield self.TXU.send(fsm, [p])
        # signal rxdone
        self.log("rxdone.sig", p)
        self.rxdone.signal(p)
        yield fsm.stop()
        assert False, "stop failed!"

    def set_sendanno(self, p):
        """Set annotations for outgoing traffic prior to sending downstream.

        :return: Modified packet `p`.

        By default, this method sets the 'cif-src' and 'cif-txts' annotations.
        Overload this method as necessary.
        """
        p.setanno('cif-src', self, ref=True)
        p.setanno('cif-txts', now() )
        # remove unwanted annotations in outgoing packets
        rannolist = ['cif-dst', 'cif-collision', 'cif-rxts', \
                     'cif-drp', 'cif-iheap']
        for a in rannolist:
            if p.hasanno(a): p.delanno(a)
        return p

    def set_recvanno(self, p):
        """Set annotation for incoming traffic at start of capture (i.e. right
        after packet has been inserted into `rxbuffer`).

        :return: Modified packet `p`.

        By default, this method initializes the 'cif-collision' annotation, sets the
        'cif-dst' annotation, and sets the 'cif-rxts' annotation. Overload this
        method as necessary.
        """
        assert (p in self.rxbuffer), "[CHANNELIF]: set_recvanno(p) " + \
                "could not find packet 'p' in rxbuffer!"
        p.setanno('cif-dst', self, ref=True)
        p.setanno('cif-collision', [], priv=True)
        # add p to collision list of all in rxbuffer/p
        idx = self.rxbuffer.index(p)
        range_not_idx = range(len(self.rxbuffer) )
        range_not_idx.remove(idx)
        for k in range_not_idx:
            c = self.rxbuffer[k]
            ### XXX ###
            #c.getanno('cif-collision').append(Reference(p) )
            ### XXX ###
            c.getanno('cif-collision').append(p)
        # add rxbuffer/p to collision list of p
        for k in range_not_idx:
            c = self.rxbuffer[k]
            ### XXX ###
            #p.getanno('cif-collision').append(Reference(c))
            ### XXX ###
            p.getanno('cif-collision').append(c)
        # set timestamp for arrival at cif-dst
        p.setanno('cif-rxts', now() )
        return p

    def cleananno(self, p):
        """Remove unwanted annotations from packet `p`.

        Removes any annotations that could cause cyclic references.
        """
        if ANNO.supports(p, 'cif-collision'):
            coll = strcollision(p)
            p.delanno('cif-collision')
            p.setanno('cif-collision', coll, priv=True)
        return p

    def drop(self, p):
        """Set 'cif-drp' annotation in packet p.

        :param p: Packet to mark; or if "all", then mark all packets in
                  `rxbuffer`.
        """
        if isinstance(p, Reference): p = p._deref
        if (p=="all"):
            for c in self.rxbuffer:
                self.drop(c)
        elif p in self.rxbuffer:
            p.setanno('cif-drp', True)
        else:
            raise RuntimeError, \
                  "[CHANNELIF]: drop() could not find packet in rxbuffer!"

    def interval_heap(self, p):
        """Create interval heap from collision list in packet `p`.

        :param p: Packet containing collision list in 'cif-collision' annotation.
        :return: Interval heap.

        This method uses the 'cif-rxts' and 'cif-duration' annotations of packet
        `p` and each packet in its collision list to create an interval heap. To
        do this, the method will create a list of partitions over the duration
        of packet `p` and sort colliding packets into the appropriate
        partitions. An interval heap looks like:

            [(t0,t1,[...]), (t1,t2,[...]), ...]

        This method sets the 'cif-iheap' annotation.
        """
        # get packet p parameters
        for a in ['cif-collision', 'cif-rxts']:
            errmsg = "[CHANNELIF]: interval_heap() requires '%s' annotation!"%(a)
            assert ANNO.supports(p, a), errmsg
        coll = p.getanno('cif-collision')
        duration = const.EPSILON
        if p.hasanno('cif-duration'): duration = p.getanno('cif-duration')
        ta = p.getanno('cif-rxts')
        tb = ta + duration
        # get times for all packets in collision list
        times = [ta, tb]
        for c in coll:
            errmsg = "[CHANNELIF]: interval_heap() requires 'cif-rxts' " + \
                     "annotation in collision list packet!"
            assert ANNO.supports(c, 'cif-rxts'), errmsg
            duration = const.EPSILON
            if c.hasanno('cif-duration'): duration = c.getanno('cif-duration')
            t0 = c.getanno('cif-rxts')  # start of packet c
            t1 = t0 + duration          # end of packet c
            # check if t0, t1 are in times
            t0intimes = any([(abs(t0-t)<2*const.EPSILON) for t in times])
            if (not t0intimes) and (ta<t0<tb): times.append(t0)
            t1intimes = any([(abs(t1-t)<2*const.EPSILON) for t in times])
            if (not t1intimes) and (ta<t1<tb): times.append(t1)
        # sort times and create interval heap
        times.sort()
        iheap = [(times[k], times[k+1], []) for k in range(len(times)-1) ]
        #print "%s: Interval heap for %s (%.8f, %.8f) @ %.8f"%(self.traceid, \
        #        p.traceid, ta,tb, now())
        for c in coll:
            errmsg = "[CHANNELIF]: interval_heap() requires 'cif-rxts' " + \
                     "annotation in collision list packet!"
            assert ANNO.supports(c, 'cif-rxts'), errmsg
            duration = const.EPSILON
            if c.hasanno('cif-duration'): duration = c.getanno('cif-duration')
            t0 = c.getanno('cif-rxts')  # start of packet c
            t1 = t0 + duration          # end of packet c
            # insert into interval heap
            #print "  + inserting %s, (%.8f, %.8f)"%(c.traceid,t0,t1)
            for k in range(len(iheap)):
                ia, ib = iheap[k][0], iheap[k][1]
                errmsg = "[CHANNELIF]: malformed interval in  " + \
                         "interval_heap()! (ia=%s, ib=%s)"%(ia, ib)
                assert (ia<ib), errmsg
                if (t0<ib) and (t1>ia):
                    iheap[k][2].append(Reference(c))
                    #print "    --> inserted  into (%.8f, %.8f)"%(ia,ib)
                else:
                    #print "    --> not added into (%.8f, %.8f)"%(ia,ib)
                    pass
        # set iheap annotation
        p.setanno('cif-iheap', iheap, priv=True)
        return iheap

    def sinr_heap(self, p, force=True):
        """Calculate signal-to-interference-and noise ratio (SINR) for each
        partition created by `interval_heap()`.

        :param p: Packet to inspect.
        :param force: If true, recalculate interval heap, else use existing
                      annotation if it exists.
        :return: SINR heap.

        SINR heap has looks like this:

            [(t0, t1, sinr0), (t1, t2, sinr1), ... ]

        Note: This method uses the 'rxpower' and 'noisepower' annotations.
        """
        # check packet
        errmsg = "[CHANNELIF]: sinr_heap() cannot process non-Packet!"
        assert ANNO.supported(p), errmsg
        for a in ['rxpower', 'noisepower']:
            errmsg = "[CHANNELIF]: sinr_heap() cannot find '%s' annotation!"%(a)
            assert ANNO.supports(p, a), errmsg
        # get parameters
        rxpower = p.getanno('rxpower')          # in dBm
        noisepower = p.getanno('noisepower')    # in dBm
        npow = db2linear(noisepower)
        # get interval heap
        if p.hasanno('cif-iheap') and not force:
            iheap = p.getanno('cif-iheap')
        else:
            iheap = self.interval_heap(p)
        # start creating sinr heap
        sinrheap = []
        #print "%s: SINR heap for %s @ %.8f"%(self.traceid, p.traceid, now())
        for ta,tb,coll in iheap:
            ipow = 0
            for c in coll:
                errmsg = "[CHANNELIF]: sinr_heap() cannot find 'rxpower' " + \
                         "annotation in collision list!"
                assert ANNO.supports(c, 'rxpower'), errmsg
                ipow += db2linear(c.getanno('rxpower') )
            sinr = rxpower - linear2db(ipow + npow)
            sinrheap.append((ta,tb,sinr) )
            #print "  --> (%.8f, %.8f): %.3f dB, coll = %s"%(ta,tb, sinr, [c.traceid for c in coll])
        return sinrheap

    def log_send(self, p, *args, **kwargs):
        """Convenience method for logging a send event for packet `p`."""
        if self.verbose>CHANNELIF_VERBOSE:
            kwargs.update(self.get_cif_anno(p))
            self.log("snd", p, *args, **kwargs)

    def log_recv(self, p, *args, **kwargs):
        """Convenience method for logging a receive event for packet `p`."""
        if self.verbose>CHANNELIF_VERBOSE:
            if p.hasanno('cif-src') and ('cif-src' not in kwargs):
                kwargs['cif-src'] = p.getanno('cif-src').traceid
            if p.hasanno('cif-dst') and ('cif-dst' not in kwargs):
                kwargs['cif-dst'] = p.getanno('cif-dst').traceid
            kwargs.update(self.get_cif_anno(p))
            self.log("rcv", p, *args, **kwargs)

    def log_drop(self, p, *args, **kwargs):
        """Convenience method for logging a drop event for packet `p`."""
        if self.verbose>CHANNELIF_VERBOSE:
            if 'halfduplex' not in kwargs: kwargs['halfduplex'] = self.halfduplex
            self.log("drp", p, *args, **kwargs)

    def log(self, event=None, p=None, *args, **kwargs):
        """Overloaded to check verbose level and set common annotations."""
        force = False
        if ('verbose' in kwargs): force = (kwargs['verbose']>CHANNELIF_VERBOSE)
        if self.verbose>CHANNELIF_VERBOSE or force:
            kwargs.update(self.get_cif_anno(p))
            Element.log(self, event, p, *args, **kwargs)

    def get_cif_anno(self, p):
        """Convenience method to extract annotations and convert to strings."""
        kwargs = {}
        if not isinstance(p, Packet): return kwargs
        if p.hasanno('cif-collision'):
            kwargs['cif-collision'] = strcollision(p)
        if p.hasanno('cif-duration'):
            kwargs['cif-duration'] = time2usec(p.getanno('cif-duration') )
        if p.hasanno('cif-src'):
            kwargs['cif-src'] = "%s"%(p.getanno('cif-src').traceid)
        if p.hasanno('cif-dst'):
            kwargs['cif-dst'] = "%s"%(p.getanno('cif-dst').traceid)
        return kwargs
示例#12
0
文件: queue.py 项目: reidlindsay/wins
class Queue(Traceable, Store):
    """Convenient API to SimPy Store.

    See the constructor `__init__()` for configuration options.

    Queues and Renege Clauses
    =========================
    This class provides an API to get/put to SimPy Stores with renege clauses
    (i.e. `remove()` and `insert()`). A valid renege argument to these methods
    must be one of the following:

        1. a timeout value,
        #. a SimEvent to wait on,
        #. a list or tuple of SimEvents to wait on,
        #. a tuple that is a valid renege clause, e.g. (hold, proc, tsec)

    The `acquired()` and `stored()` method can only be used with SimPy Processes
    when a renege clause is provided to the associated `remove()` or `insert()`.
    Whenver using an `FSM`, however, these methods are overloaded so that they
    will work regardless of the presence of a renege clause.

    :CVariables:
     * `name`: Name of Queue.
     * `tracename`: Name used in `Trace`.

       By default, `Queue` tracenames will be appended with the queue's `uid`.

     * `all`: Property to access private SimEvent that gets signalled when a
       get, put, or drop occurs.
     * `priority`: Property to access priority flag set during constructor.
     * `length`: Property to access number of elements buffered in Queue.
     * `traceid`: Overload property to return `tracename`.

    :IVariables:
     * `monitorQ`: Boolean flag; if true, signal events `enQ` and `deQ`.
     * `enQ`: SimEvent signalled when a new item is inserted in Queue.
     * `deQ`: SimEvent signalled when an item is removed from the Queue.
     * `__priorityQ`: Private boolean flag; if true, `Queue` operates in
       priority mode.
     * `__dummy`: Private SimEvent used so that `acquired()` and `stored()`
       methods can be used to check if `insert()` and `remove()` are successful.
    """
    name="queue"
    tracename="Q"
    def __init__(self, priority=False, monitorQ=False, \
                 unitName="packet", capacity="unbounded", \
                 initialBuffered=None, **kwargs):
        """Constructor.

        :param priority:  Boolean; if True, use priority queueing.
        :param monitorQ:  Boolean; if True, support `enQ`, `deQ`, and `drp`.
        :param unitName:  Description of units stored in `Queue`.
        :param capacity:  Capacity of underlying Store [default='unbounded'].
        :param initialBuffered: Initialize list of buffered objects.
        :param kwargs:    Keywords passed to `Traceable` constructors.
        """
        # check that initialBuffered is properly formatted
        if initialBuffered and priority:
            isPrio = all([isinstance(s,tuple) for s in initialBuffered] )
            if not isPrio: initialBuffered = [(p, 0) for p in initialBuffered]
        # call constructors
        Store.__init__(self, unitName=unitName, capacity=capacity, \
                       initialBuffered=initialBuffered, \
                       putQType=PriorityQ, getQType=PriorityQ)
        Traceable.__init__(self, **kwargs)
        # set other parameters
        self.tracename = self.tracename + "%d"%(self.uid)
        self.__priority = priority
        self.monitorQ = monitorQ
        self.enQ = SimEvent(name=self.name+".enQ")
        self.deQ = SimEvent(name=self.name+".deQ")
        self.drp = SimEvent(name=self.name+".drp")
        self.__dummy = SimEvent(name=self.name+".dummy")
        # set up Queue for priority
        if self.priority:
            self.theBuffer = _PriorityList(self.theBuffer)
        self.addSort(None) # setup addSort for priority queueing

    priority = property(fget=lambda self: self.__priority)
    length = property(fget=lambda self: self.nrBuffered)
    traceid = property(fget=lambda self: self.tracename)

    def insert(self, proc, S, prio=None, renege=None):
        """Insert a list of objects `S` into the `Queue`.

        :param proc:    SimPy Process that will execute insert.
        :param S:       List of objects to insert.
        :param prio:    Priority level of insert (only used when `priority` is
                        set to `True`).
        :param renege:  Renege clause or timeout.
        :return: Yield clause to block on.

        If `priority` is false, `prio` will be ignored. Otherwise, if `priority`
        is True and `prio` is not provided, then objects in `S` will be inserted
        with priority level 0.

        Also, when `priority` is True, `S` can contain 2-tuples of *(obj, prio)*
        that would allow each object in `S` to be inserted with its own priority.

        Normal usage with a `renege` clause is as follows:

                >>> yield queue.insert(proc, S, renege=<renege clause>)
                >>> if proc.stored(queue):
                ...     # do something
                ... else:
                ...     # do something else

        See notes above for more on `Queues and Renege Clauses`_.
        """
        assert isinstance(S, list) or isinstance(S, tuple), \
                "[QUEUE]: can only insert() list or tuple of objects!"
        assert isinstance(proc, Process), \
                "[QUEUE]: insert() requires valid Process!"
        # handle prio arg
        if self.priority:
            if prio is None: prio = 0
            isPrio = all([isinstance(s,tuple) for s in S] )
            if not isPrio: S = [(s, prio) for s in S]
        else:
            prio = 0
        # create put command
        pcmd = put, proc, self, S, prio
        # set renege clause
        if isinstance(renege, int) or isinstance(renege, float):
            pcmd = pcmd, (hold, proc, renege)
        elif isinstance(renege, SimEvent):
            pcmd = pcmd, (waitevent, proc, renege)
        elif isinstance(renege, tuple) or isinstance(renege, list):
            isEventList = all([isinstance(e, SimEvent) for e in renege] )
            if isEventList:
                pcmd = pcmd, (waitevent, proc, renege)
            else:
                pcmd = pcmd, renege  # assume renege is a valid tuple
        else:
            errmsg = "[QUEUE]: Found invalid renege clause!"
            assert (renege is None), errmsg
        return pcmd

    def remove(self, proc, fn=1, prio=None, priofilter=None, \
               renege=None, **kwargs):
        """Remove a list of objects from the `Queue`.

        :param proc:        `SimPy` `Process` that will block on insert.
        :param fn:          Positive integer or filter function (or "all").
        :param prio:        Priority level of remove.
        :param priofilter:  Priority-based filter function.
        :param renege:      Renege clause.
        :param kwargs:      Additional keywords passed to filter function.

        `proc` is the only mandatory argument.
        
        Normally, `fn` is used to indicate either the number of objects to
        remove from `Queue` or provide a filter function (i.e. the same as
        removing objects from a `SimPy` `Store`).
        
        If `priority` is True and `priofilter` is specified, a list of 2-tuples
        containing *(object, priority level)* is passed to the `priofilter`
        function. Normal filter functions (passed with `fn`) will receive a list
        of buffered objects without priority information.

        The `renege` clause can be one of the following:

            * a timeout value;
            * a (list of) SimEvent(s) to wait on;
            * or tuple representing a valid renege clause (e.g. hold, proc, 0).

        Normal usage with a `renege` clause is as follows:

                >>> yield queue.remove(proc, fn, renege=<renege clause>)
                >>> if proc.acquired(queue):
                ...     # do something
                ... else:
                ...     # do something else

        See notes above for more on `Queues and Renege Clauses`_.

        :note: When `fn` or `priofilter` are used to specify filter functions,
               all objects returned by the filter function will be returned to
               the requesting `Process`.
        """
        isallstr = lambda s: isinstance(s, str) and (s.lower()=="all")
        if isallstr(fn): fn = "all"
        if isallstr(priofilter): priofilter = "all"
        assert isinstance(proc, Process), \
                "[QUEUE]: remove() must be called with a valid Process!"
        assert isinstance(fn, int) or callable(fn) or (fn=="all"), \
                "[QUEUE]: remove() must be called with " + \
                "\'fn\' that is integer, callable, or \"all\"!"
        # set up filter parameters
        if fn=="all": fn = lambda buff: [a for a in buff]
        if priofilter=="all": priofilter = lambda buff: [a for a,p, in buff]
        # set up priority parameters
        if self.priority:
            callback, nget = None, None
            if callable(fn): callback = fn
            elif callable(priofilter): callback = priofilter
            elif isinstance(fn, int): nget = fn
            elif fn is None: nget = 1
            # call private __filter method
            if priofilter is not None:
                filt = lambda buff: self.__filter(buff, callback, nget, prio, True, **kwargs)
            else:
                filt = lambda buff: self.__filter(buff, callback, nget, prio, **kwargs)
            fn = filt
        else:
            prio = 0
            if fn is None: fn = 1
        # create get command
        gcmd = get, proc, self, fn
        if prio is not None: gcmd += (prio, )
        # set renege clause
        if isinstance(renege, int) or isinstance(renege, float):
            # renege is a timeout
            gcmd = gcmd, (hold, proc, renege)
        elif isinstance(renege, SimEvent):
            # renege is a single SimEvent
            gcmd = gcmd, (waitevent, proc, renege)
        elif isinstance(renege, tuple) and isinstance(renege, tuple):
            # renege is an event list or valid tuple
            isEventList = all([isinstance(e, SimEvent) for e in renege] )
            if isEventList:
                gcmd = gcmd, (waitevent, proc, renege)
            else:
                gcmd = gcmd, renege  # assume renege is a valid tuple
        else:
            errmsg = "[QUEUE]: Found invalid renege clause!"
            assert (renege is None), errmsg
        return gcmd

    def _buffer_object(self, x):
        """Internal method to extract actual object from a buffer entry."""
        if self.priority: return x[0]
        else:             return x

    def __filter(self, buff, callback, nget, prio, flag=False, **kwargs):
        """Private filter method to manage filtering in `remove()` with
        `priority`."""
        pbuff = [a for a,b in buff if b>=prio]
        if flag:
            rbuff = callback(buff, **kwargs)
        elif (callback is None) and (nget is not None):
            rbuff = pbuff
        else:
            rbuff = callback(pbuff, **kwargs)
        if len(rbuff)< nget: return []
        else: return rbuff[:nget]
        return rbuff[:nget]

    def __copy_theBuffer(self):
        """Private method to get objects in `Store.theBuffer`."""
        return [self._buffer_object(a) for a in self.theBuffer]

    def __copy_putQ(self, contents=False):
        """Private method to get any `Process` waiting to put objects in `Store`."""
        if contents:
            x = []
            for p in self.putQ:
                x += [self._buffer_object(a) for a in p._whatToPut]
        else:
            x = [p for p in self.putQ]
        return x

    def __copy_getQ(self):
        """Private method to get any `Process` waiting to get objects from `Store`."""
        return [p for p in self.getQ]

    def _get(self, arg):
        """Overload `SimPy` `Store._get()` method."""
        if self.verbose>QUEUE_HIGH_VERBOSE or self.monitorQ:
            # run _get
            prethebuff  = self.__copy_theBuffer()
            preputbuff  = self.__copy_putQ(True)
            rval = Store._get(self, arg)
            postthebuff = self.__copy_theBuffer()
            postputbuff = self.__copy_putQ(True)
            # perform checks
            obj = arg[1]
            getbuff = obj.got
            putbuff = [a for a in preputbuff if \
                        ((a in postthebuff) or (a in getbuff)) ]
            drpbuff = [a for a in preputbuff if \
                        ((a not in postputbuff) and (a not in postthebuff) ) ]
            #self.log("logget")
            self.__log_all({'get': getbuff, \
                            'put': putbuff, \
                            'drp': drpbuff} )
        else:
            rval = Store._get(self, arg)
        return rval

    def _put(self, arg):
        """Overload `SimPy` `Store._put()` method."""
        nevents = len(self.sim._timestamps)
        if self.verbose>QUEUE_HIGH_VERBOSE or self.monitorQ:
            # get objects to request
            reqput = []
            reqput += [self._buffer_object(p) for p in arg[0][3] ]
            # run _put
            prethebuff  = self.__copy_theBuffer()
            preget      = self.__copy_getQ()
            preputbuff  = self.__copy_putQ(True)
            reqput += [p for p in preputbuff]
            rval = Store._put(self, arg)
            postthebuff = self.__copy_theBuffer()
            postget     = self.__copy_getQ()
            postputbuff = self.__copy_putQ(True)
            # perform checks
            getbuff = []
            for proc in [p for p in preget if (p not in postget)]:
                getbuff += [a for a in proc.got]
            putbuff = [a for a in reqput if \
                        ((a in postthebuff) or (a in getbuff) ) ]
            drpbuff = [a for a in reqput if \
                        ((a not in putbuff) and (a not in postputbuff) ) ]
            #self.log("logput")
            self.__log_all({'get': getbuff, \
                            'put': putbuff, \
                            'drp': drpbuff} )
        else:
            rval = Store._put(self, arg)
        # fudge event queue to "de-prior" put event
        numnew = len(self.sim._timestamps) - nevents
        if (0<numnew):
            # process put
            p = heapq.nsmallest(1, [x for x in self.sim._timestamps if (x[0]==self.sim._t)])
            putevt = p[0]
            pt = putevt[0]
            psortpr = putevt[1]
            assert (pt==self.sim._t), "[QUEUE]: _put(), put(at, sim._t) = (%s, %s)"%(pt, self.sim._t)
            assert (psortpr==self.sim._sortpr)
            if 1<numnew:
                glargest = heapq.nlargest(numnew-1, [x for x in self.sim._timestamps if (x[0]==self.sim._t)])
                for k in range(numnew-1):
                    g, krev = glargest[k], (numnew-2-k)
                    # process put with get commands
                    getevt, gt, gsortpr = g, g[0], g[1]
                    assert (gt==self.sim._t), "[QUEUE]: _put(), get(at, sim._t) = (%s, %s)"%(gt, self.sim._t)
                    assert (abs(psortpr+krev+1)==abs(gsortpr)), "getpr = %s, putpr = %s"%(p, g)
                    # set new get pointer and make non-prior
                    getevt[0] = gt + const.EPSILON
                    getevt[1] = abs(gsortpr) + 1
            putevt[1] = abs(psortpr) - (numnew - 1)
        else:
            assert False, "[QUEUE]: _put found unexpected number of events!"
        return rval

    def __log_all(self, buf):
        """Private method to log all get, put, and drop events.

        :param buf: Dictionary of buffers corresponding to each `Queue` event.
        
        Use appropriate callback to execute logging of buffers in `buf`.
        """
        callback = {'put':self.log_insert, \
                    'get':self.log_remove, \
                    'drp':self.log_drop}
        for key,val in buf.items():
            if key in callback:
                for x in val: callback[key](x)

    def log_insert(self, p):
        """Log insert or enqueue event."""
        pargs = {'qlen': self.length}
        if self.verbose>QUEUE_HIGH_VERBOSE: self.log("+", p, **pargs)
        if self.monitorQ: self.enQ.signal(p)

    def log_remove(self, p):
        """Log remove or dequeue event."""
        pargs = {'qlen': self.length}
        if self.verbose>QUEUE_HIGH_VERBOSE: self.log("-", p, **pargs)
        if self.monitorQ: self.deQ.signal(p)

    def log_drop(self, p):
        """Log drop event."""
        pargs = {'qlen': self.length}
        if self.verbose>QUEUE_LOW_VERBOSE: self.log("drp", p, **pargs)
        if self.monitorQ: self.drp.signal(p)

    def addSort(self, sortFunc):
        """
        Overload `Store.addSort()` for priority queueing.
        
        :param sortFunc: Sort method to reorder objects.
        
        `sortFunc` should take two arguments, namely `Queue` and a list
        containing the objects currently buffered by the `Queue`. When operating
        in `priority` mode, `sortFunc` will receive a priority buffer. That is a
        list containing 2-tuples of (*object*, *prioritylevel*).
        """
        return Store.addSort(self, \
                lambda me, buff: me.__callsort(sortFunc, buff) )

    def __callsort(self, callback, buff):
        """Private method to call sorting methods set by `Store.addSort()`;
        enforce priority sorting before calling auxilary sort method."""
        global _prioritySort, _PriorityList
        if callable(callback) and self.priority:
            # sort buff -> call callback
            cbuff = callback(self, _prioritySort(self, buff) )
            isPrio = all([isinstance(s, tuple) for s in cbuff] )
            # make into _PriorityList again
            if isPrio:
                priobuff = _PriorityList(cbuff)
            else:
                priobuff = _PriorityList()
                noprio_buff = [a for a,b in buff]
                for p in cbuff:
                    if p in noprio_buff:
                        prio = buff[noprio_buff.index(p)][1]
                        priobuff.append((p,prio) )
            # re-sort for priority and return
            return _prioritySort(self, priobuff)
        elif callable(callback) and (not self.priority):
            # non-priority callback
            return callback(self, buff)
        elif self.priority:
            # sort for priority
            return _prioritySort(self, buff)
        else:
            # don't do anything
            return buff

    def __len__(self):
        """Allows Python len() function to be used on `Queue`."""
        return self.length
示例#13
0
文件: csphy.py 项目: reidlindsay/wins
class CSPHY(PHY):
    """Base class for implementing physical layer with carrier sense support,
    which is needed by `CSMAC` MAC protocols.

    The functionality of this class, which should be implemented by subclasses,
    includes the following:

        * `set_csbusy()` when the medium is "busy" (e.g. the received power
          level exceeds some threshold, a packet is detected, etc.)
        * `set_csidle()` when the medium becomes "idle" (e.g. there are no more
          packets being received, the received power falls below some threshold
          for some time, etc.)

    :CVariables:
     * `csmode`: Property to access current carrier sense state. Use
       `set_csbusy()`/`set_csidle()` to modify.
     * `isbusy`: Property to determine if current `csmode` is `CSPHY_BUSY`.
     * `isidle`: Property to determine if current `csmode` is `CSPHY_IDLE`.

    :IVariables:
     * `csbusy`: SimEvent signalled if carrier sense state transitions to busy.
     * `csidle`: SimEvent signalled if carrier sense state transitions to idle.
     * `__csmode`: Private variable to maintain carrier sense state; initialized
       to `CSPHY_IDLE`. Use `set_csbusy()`/`set_csidle()` to modify.

    :note: This element has `csmode` initialized to `CSPHY_IDLE`.
    """
    name = "carrier sense PHY"
    tracename = "CSPHY"
    def __init__(self, **kwargs):
        """Constructor."""
        self.__csmode = CSPHY_IDLE
        self.csbusy = SimEvent(name="csbusy")
        self.csidle = SimEvent(name="csidle")
        PHY.__init__(self, **kwargs)
        # rename events
        self.csbusy.name = "%s(%s).csbusy"%(self.name, self.uid)
        self.csidle.name = "%s(%s).csidle"%(self.name, self.uid)
        # monitor events -> keep up to date
        monitor_events(self.csbusy, self.csidle)

    csmode = property(fget=lambda self: self.__csmode)
    isbusy = property(fget=lambda self: (self.csmode == CSPHY_BUSY) )
    isidle = property(fget=lambda self: (self.csmode == CSPHY_IDLE) )

    def set_csbusy(self, *args, **kwargs):
        """Transition to busy state.

        :param args: Additional arguments passed when signalling `csbusy`.
        :param kwargs: Additional keywords used with `log()`.

        The SimEvent `csbusy` will only be signalled if there is a `state`
        transition from idle to busy.
        """
        ostate = self.csmode
        self.__csmode = CSPHY_BUSY
        if (ostate != self.csmode):
            # IDLE -> BUSY!
            if self.verbose>CSPHY_VERBOSE: self.log(self.csmode,*args,**kwargs)
            self.csbusy.signal(*args)

    def set_csidle(self, *args, **kwargs):
        """Transition to idle state.

        :param args: Additional arguments passed when signalling `csidle`.
        :param kwargs: Additional keywords used with `log()`.

        The SimEvent `csidle` will only be signalled if there is a `csmode`
        transition from busy to idle.
        """
        ostate = self.csmode
        self.__csmode = CSPHY_IDLE
        if (ostate != self.csmode):
            # BUSY -> IDLE!
            if self.verbose>CSPHY_VERBOSE: self.log(self.csmode,*args,**kwargs)
            self.csidle.signal(*args)
示例#14
0
文件: dcf.py 项目: reidlindsay/wins
class DCF(CSMAC):
    """Distributed Control Function of IEEE 802.11 MAC protocol.

    DCF and Ports
    =============
    The DCF module has two downstream port ('TXD' and 'RXD'). These ports are
    used to interact with the associated physical layer.

    CSMA/CA
    =======
    Carrier sense multiple access with collision avoidance (CSMA/CA) may be used
    for the IEEE 802.11 MAC protocol. This mode of operation will not use
    RTS/CTS reservation messages. Instead, after carrier sense backoff, the
    protocol will immediately send the data message. Use the boolean flag
    `usecsma` to enable this mode of operation.

    :CVariables:
     * `pifs`: Property to access point-coordination interframe spacing.
     * `difs`: Property to access distributed interframe spacing.
     * `acktimeout`: Property to access ACK timeout.
     * `ctstimeout`: Property to access CTS timeout.
     * `ackduration`: Property to access duration of ACK.
     * `ctsduration`: Property to access duration of CTS.

    :IVariables:
     * `sifs`: Short interframe spacing.
     * `slottime`: Duration of a contention slot.
     * `cwmin`: Minimum contention window size.
     * `cwmax`: Maximum contention window size.
     * `cslot`: Contention slot value.
     * `datatosend`: Packet currently being handled; `None` indicates not busy.
     * `retrycount`: Retry counter.
     * `retrylimit`: Maximum number of retries allowed.
     * `usecsma`: Boolean flag; if true, use CSMA/CA.
     * `rxdata`: SimEvent signalled when a packet arrives on `Port` 'RXD'.
     * `_ctsduration`: Internal member used to cache duration of CTS.
     * `_ackduration`: Internal member used to cache duration of ACK.

    :note: `DCF` only handles Ethernet packets. (i.e. `htype` must be
           `const.ARP_HTYPE_ETHERNET`).
    """
    name = "distributed coordination function"
    tracename = "DCF"
    cwmin = DOT11_CWMIN
    cwmax = DOT11_CWMAX
    retrylimit = DOT11_RETRYLIMIT
    def __init__(self, cwmin=None, cwmax=None, retrylimit=None, \
                       usecsma=False, **kwargs):
        """Constructor.

        :param cwmin: Minimum contention window size.
        :param cwmax: Maximum contention window size.
        :param retrylimit: Maximum number of retries allowed.
        :param usecsma: Boolean flag; if true, use CSMA/CA without RTS-CTS
                        reservation messages.
        :param kwargs: Additional keywords passed to `configure()`.

        The default parameters are specified by the class.
        """
        if cwmin is None: cwmin = self.__class__.cwmin
        if cwmax is None: cwmax = self.__class__.cwmax
        if retrylimit is None: retrylimit = self.__class__.retrylimit
        # timing parameters
        self.sifs, self.slottime = None, None
        self.cwmin, self.cwmax   = cwmin, cwmax
        self.cslot = None
        # events and other members
        self.datatosend = None
        self.retrycount = None
        self.retrylimit = retrylimit
        self.usecsma = usecsma
        self.rxdata = SimEvent(name=".rxdata")
        self._ctsduration = None
        self._ackduration = None
        # call CSMAC constructor
        CSMAC.__init__(self, **kwargs)
        self.rxdata.name = "%s.rxdata"%(self.name)

    pifs = property(fget=lambda self: self.sifs + self.slottime)
    difs = property(fget=lambda self: self.pifs + self.slottime)
    ctstimeout = property(fget=lambda self: self.get_ctstimeout() )
    acktimeout = property(fget=lambda self: self.get_acktimeout() )
    navbusy = property(fget=lambda self: self.nav.running)
    navidle = property(fget=lambda self: not self.navbusy)
    ctsduration = property(fget=lambda self: self.get_ctsduration() )
    ackduration = property(fget=lambda self: self.get_ackduration() )

    def configure(self, **kwargs):
        """Configure downstream ports and `FSM`."""
        CSMAC.configure(self, **kwargs)
        # add downstream and control ports
        self.addport("TXD", tracename=self.tracename+".TXD")
        self.addport("RXD", tracename=self.tracename+".RXD")
        self.addport("TXC", tracename=self.tracename+".TXC") # control port
        # create FSM to manage send/recv execution of DCF
        txfsm = self.newchild("txfsm", FSM, tracename=self.tracename+".TX")
        rxfsm = self.newchild("rxfsm", FSM, tracename=self.tracename+".RX")
        txfsm.goto(self.IDLE)
        rxfsm.goto(self.RECV)
        # set up timing parameters
        self.set_timing()
        nav = self.newchild("nav", NAVTimer, tracename=self.tracename+".NAV")

    def encapsulate(self, p, src=None, dst=None, **kwargs):
        """Convenience method to encapsulate an packet in an IEEE 802.11 data
        header (i.e. `Dot11Data`).

        :param p: Packet to encapsulate.
        :param src: Source address [default=`address`]
        :param dst: Destination address [default=`broadcast`]
        :param kwargs: Additional keywords passed to `Dot11Data` constructor.
        :return: Newly created `Dot11Data` packet.

        :note: This method adds/updates a CRC using `crcupdate()`.
        """
        if src is None: src = self.address
        if dst is None: dst = self.broadcast
        addr1, addr2 = dst, src
        pargs = {'addr1': addr1, 'addr2': addr2}
        kwargs.update(pargs)
        data = self.dot11data(**kwargs)
        data.add_payload(p)
        pkt = crcupdate(data)
        return pkt

    def retry(self, count=None):
        """Update retry count and set backoff parameters.

        :param count: If specified, `retrycount` will be set to this value;
                      otherwise, increment `retrycount`.

        Slot value `cslot` is set to a random integer between [0, CW), where
        the contention window CW is defined as:

            CW = CWmin + 2^`retrycount`
        """
        if count is None: count = self.retrycount + 1
        cwsize = min(self.cwmax, self.cwmin * (2**count) )
        self.cslot = random.randint(cwsize)
        self.retrycount = count
        # update RETRY flag if first retry
        if ((count==1) and self.isdot11data(self.datatosend)):
            pkt = self.get_dot11data(self.datatosend)
            dst = pkt.addr1
            errmsg = "[DCF]: Cannot retry() with broadcast packet!"
            assert (dst != self.broadcast), errmsg
            # update retry field
            pkt.FCfield |= DOT11_FC_RETRY
            self.datatosend = crcupdate(pkt)
        if count>0:
            self.log("retry%d"%(count), self.datatosend, retrycount=count, retrylimit=self.retrylimit)

    def IDLE(self, fsm):
        """IDLE state; reset parameters and check for new data from 'RXU'."""
        assert (self.htype==const.ARP_HTYPE_ETHERNET), \
                "[DCF]: Unsupported hardware type (%s)!"%(self.htype)
        # reset parameters and check RXU
        self.cslot = None
        self.datatosend = None
        self.retrycount = self.retrylimit + 1
        # csbusy -> go to RXBUSY
        if self.isbusy: yield fsm.goto(self.RXBUSY)
        yield self.RXU.recv(fsm, 1, \
                renege=(self.csbusy, self.csidle, self.rxdata) )
        # RXU -> new data to transmit -> go to TXDATA
        if fsm.acquired(self.RXU):
            assert (len(fsm.got)==1), \
                    "[DCF]: Error receiving from RXU in IDLE!"
            p = fsm.got[0]
            yield fsm.goto(self.TXDATA, p)
        # csbusy -> go to RXBUSY
        if (self.csbusy in fsm.eventsFired):
            p = self.csbusy.signalparam
            yield fsm.goto(self.RXBUSY)
        # rxdata -> go to RXPKT
        if (self.rxdata in fsm.eventsFired):
            p = self.rxdata.signalparam
            yield fsm.goto(self.RXPKT, p)
        # otherwise -> ignore
        ignore = self.csidle in fsm.eventsFired
        # continue in IDLE
        yield fsm.goto(self.IDLE)

    def TXDATA(self, fsm, pkt):
        """TXDATA state; initialize `datatosend` and associated parameters
        before transitioning to `BACKOFF`."""
        assert (self.datatosend is None), \
                "[DCF]: 'datatosend' already set in TXDATA!"
        assert (self.htype==const.ARP_HTYPE_ETHERNET), \
                "[DCF]: Unsupported hardware type (%s)!"%(self.htype)
        assert isinstance(pkt, Ether) and pkt.haslayer(Ether), \
                "[DCF]: Got non-Ether packet in TXDATA!"
        # process Ethernet frame
        eth = pkt[Ether]
        addr, src, dst = self.addr, eth.src, eth.dst
        pkt = crcupdate(eth)
        # initialize datatosend and other parameters
        self.datatosend = self.encapsulate(pkt, src=src, dst=dst)
        isbroadcast = (dst==self.broadcast)
        self.retry(count=0)
        if isbroadcast: self.retrycount = self.retrylimit
        self.datatosend.setanno('mac-txts', now())
        # go to BACKOFF
        yield fsm.goto(self.BACKOFF)

    def BACKOFF(self, fsm):
        """BACKOFF state; perform backoff operation."""
        assert self.isdot11data(self.datatosend), \
                "[DCF]: Cannot determine 'datatosend' in BACKOFF!"
        # retry limit exceeded -> DROP PACKET! -> go to IDLE
        if self.retrycount>self.retrylimit:
            self.log_drop(self.datatosend, drop="retry limit exceeded")
            pkt = self.datatosend.payload
            self.datatosend.remove_payload()
            p = crcremove(pkt)
            self.drpdata.signal(p)
            yield fsm.goto(self.IDLE)
        # csbusy -> go to RXBUSY
        if self.isbusy:
            yield fsm.goto(self.RXBUSY)
        # check for nav timer -> start nav backoff
        if self.navbusy: yield fsm.goto(self.NAVBACKOFF)
        # start backoff timer
        backoff = self.difs + self.cslot*self.slottime
        timer = self.newchild("backofftimer", Timer, backoff, start=True, \
                              tracename=self.tracename+".BACKOFF")
        self.log("BACKOFF", self.datatosend, backoff=time2usec(backoff), cslot=self.cslot)
        yield waitevent, fsm, (timer.done, timer.kill, self.csbusy, self.rxdata)
        csbusy = (self.csbusy in fsm.eventsFired)
        rxdata = (self.rxdata in fsm.eventsFired)
        # timer done -> go to TXRTS or TXBCAST
        if (timer.done in fsm.eventsFired):
            isbroadcast = (self.datatosend.addr1==self.broadcast)
            if isbroadcast:    ns = self.TXBCAST
            elif self.usecsma: ns = self.TXUCAST
            else:              ns = self.TXRTS
            yield fsm.goto(ns)
        # timer kill -> raise exception
        elif (timer.kill in fsm.eventsFired):
            raise RuntimeError, "[DCF]: Unexpected kill signal " + \
                                "from timer in BACKOFF!"
        # csbusy/rxdata -> halt timer -> update cslot -> go to RXBUSY/RXPKT
        elif csbusy or rxdata:
            yield timer.pause(fsm)
            if timer.timepassed>self.difs:
                rslot = int(timer.timeleft/self.slottime)
                self.cslot = rslot      # update cslot
            timer.halt()
            if rxdata:
                p = self.rxdata.signalparam
                yield fsm.goto(self.RXPKT, p)
            else:
                yield fsm.goto(self.RXBUSY)
        # otherwise -> raise error!
        else:
            raise RuntimeError, "[DCF]: Unexpected interruption in BACKOFF!"

    def NAVBACKOFF(self, fsm):
        """NAVBACKOFF state; defer access for NAV and virtual carrier sense."""
        nav = self.nav
        assert nav.running, "[DCF]: NAV timer not running in NAVBACKOFF!"
        yield waitevent, fsm, (nav.done, nav.kill, self.csbusy, self.rxdata)
        # nav done -> go to RESUME
        if (nav.done in fsm.eventsFired):
            yield fsm.goto(self.RESUME)
        # nav kill -> raise exception
        elif (nav.kill in fsm.eventsFired):
            raise RuntimeError, "[DCF]: Unexpected kill signal " + \
                                "from NAV timer in NAVBACKOFF!"
        # csbusy -> go to RXBUSY
        elif (self.csbusy in fsm.eventsFired):
            p = self.csbusy.signalparam
            yield fsm.goto(self.RXBUSY)
        # rxdata -> go to RXPKT
        elif (self.rxdata in fsm.eventsFired):
            p = self.rxdata.signalparam
            yield fsm.goto(self.RXPKT, p)
        # otherwise -> raise error!
        else:
            raise RuntimeError, "[DCF]: Interrupted in NAVBACKOFF!"
        # go to RESUME
        yield fsm.goto(self.RESUME)

    def TXBCAST(self, fsm):
        """TXBCAST state; broadcast `datatosend`."""
        assert self.isdot11data(self.datatosend), \
                "[DCF]: Cannot determine 'datatosend' in TXBCAST!"
        assert (self.datatosend.addr1==self.broadcast), \
                "[DCF]: Non-broadcast 'datatosend' in TXBCAST!"
        data = self.datatosend
        self.send_data(data)
        # send and hold for duration
        duration = self.duration(data)
        src, dst = data.addr2, data.addr1
        self.log_send(data, src=src, dst=dst, duration=time2usec(duration) )
        yield self.TXD.send(fsm, [data])
        yield hold, fsm, duration
        # go back to IDLE
        yield fsm.goto(self.IDLE)

    def TXRTS(self, fsm):
        """TXRTS state; send RTS for `datatosend`."""
        assert self.isdot11data(self.datatosend), \
                "[DCF]: Cannot determine 'datatosend' in TXRTS!"
        assert not (self.datatosend.addr1==self.broadcast), \
                "[DCF]: Cannot send broadcast 'datatosend' in TXRTS!"
        # create RTS
        src, dst = self.datatosend.addr2, self.datatosend.addr1
        rts = self.dot11rts(addr1=dst, addr2=src)
        if (self.retrycount>0):
            rts.FCfield |= DOT11_FC_RETRY
        # calculate NAV
        self.send_rts(rts, self.datatosend)
        nav = self.rtsnav(self.datatosend)
        rts.ID = nav
        pkt = crcupdate(rts)
        # send and hold for duration
        duration = self.duration(pkt)
        self.log_send(pkt, src=src, dst=dst, nav=nav, \
                      duration=time2usec(duration), retry=self.retrycount)
        yield self.TXD.send(fsm, [pkt])
        yield hold, fsm, duration
        # go to RXCTS
        yield fsm.goto(self.RXCTS)

    def RXCTS(self, fsm):
        """RXCTS state; wait for CTS response."""
        assert self.isdot11data(self.datatosend), \
                "[DCF]: Cannot determine 'datatosend' in RXCTS!"
        # start timeout timer
        timeout = self.ctstimeout
        timer = self.newchild("ctstimeout", Timer, timeout, start=True, \
                              tracename=self.tracename+".CTSTIMEOUT")
        yield waitevent, fsm, (self.rxdata, timer.done, timer.kill)
        # rxdata -> check for CTS
        if (self.rxdata in fsm.eventsFired):
            timer.halt()
            p, cts = self.rxdata.signalparam, None
            if self.isdot11cts(p):
                cts = self.get_dot11cts(p)
                addr, addr1 = self.address, cts.addr1
                crcerror = self.haserror(cts)
                # CTS for me -> update NAV -> pause for SIFS -> go to TXUCAST
                if (addr1==addr) and (not crcerror):
                    self.recv_cts(cts)     # process CTS
                    self.log_recv(cts, addr=addr, addr1=addr1, nav=cts.ID)
                    yield self.navupdate(fsm, cts.ID*1e-6)
                    yield hold, fsm, self.sifs
                    yield fsm.goto(self.TXUCAST)
            # otherwise -> retry -> go to RXPKT
            self.retry()
            yield fsm.goto(self.RXPKT, p)
        # timer done -> retry -> go to RESUME
        elif (timer.done in fsm.eventsFired):
            self.retry()
            yield fsm.goto(self.RESUME)
        # timer kill -> raise exception
        elif (timer.kill in fsm.eventsFired):
            raise RuntimeError, "[DCF]: Unexpected kill signal " + \
                                "from timer in RXCTS!"
        # otherwise -> raise error!
        else:
            raise RuntimeError, "[DCF]: Unexpected interruption in RXCTS!"

    def TXUCAST(self, fsm):
        """TXUCAST state; transmit unicast `datatosend` packet."""
        assert self.isdot11data(self.datatosend), \
                "[DCF]: Cannot determine 'datatosend' in TXUCAST!"
        assert not (self.datatosend.addr1==self.broadcast), \
                "[DCF]: Cannot send broadcast 'datatosend' in TXUCAST!"
        # FIXME: Assume all packet formatting has been done. No need to worry
        # about setting parameters or updating CRC.
        data = self.get_dot11data(self.datatosend)
        self.send_data(data)
        # send and hold for duration
        duration = self.duration(data)
        self.log_send(data, addr1=data.addr1, addr2=data.addr2)
        yield self.TXD.send(fsm, [data])
        yield hold, fsm, duration
        # go to RXACK
        yield fsm.goto(self.RXACK)

    def RXACK(self, fsm):
        """RXACK state; wait on ACK for `datatosend`."""
        assert self.isdot11data(self.datatosend), \
                "[DCF]: Cannot determine 'datatosend' in RXACK!"
        data = self.get_dot11data(self.datatosend)
        isbroadcast = (data.addr1==self.broadcast)
        assert not isbroadcast, \
                "[DCF]: Broadcast 'datatosend' cannot get ACK in RXACK!"
        # start timeout timer
        timeout = self.acktimeout
        timer = self.newchild("acktimeout", Timer, timeout, start=True, \
                              tracename=self.tracename+".ACKTIMEOUT")
        yield waitevent, fsm, (self.rxdata, timer.done, timer.kill)
        # rxdata -> check for ACK
        if (self.rxdata in fsm.eventsFired):
            timer.halt()
            p, ack = self.rxdata.signalparam, None
            if self.isdot11ack(p):
                ack = self.get_dot11ack(p)
                src, dst = data.addr2, data.addr1
                addr, addr1 = self.address, ack.addr1
                crcerror = self.haserror(ack)
                # ACK for me -> success -> go to IDLE
                if (addr1==src) and (not crcerror):
                    self.recv_ack(ack)     # process ACK
                    self.log_recv(ack, addr1=addr1, src=src, dst=dst)
                    yield fsm.goto(self.IDLE)
            # otherwise -> failure -> retry -> go to RXPKT
            self.retry()
            yield fsm.goto(self.RXPKT, p)
        # timer done -> timeout -> retry -> go to RESUME
        elif (timer.done in fsm.eventsFired):
            self.retry()
            yield fsm.goto(self.RESUME)
        # timer kill -> raise exception
        elif (timer.kill in fsm.eventsFired):
            raise RuntimeError, "[DCF]: Unexpected kill signal " + \
                                "from timer in RXACK!"
        # otherwise -> raise error!
        else:
            raise RuntimeError, "[DCF]: Unexpected interruption in RXACK!"
        return

    def RXBUSY(self, fsm):
        """RXBUSY state; check for `rxdata` and `csidle`."""
        yield waitevent, fsm, (self.rxdata, self.csidle)
        # rxdata -> go to RXPKT to classify
        if (self.rxdata in fsm.eventsFired):
            p = self.rxdata.signalparam
            yield fsm.goto(self.RXPKT, p)
        # csidle -> go back to RESUME
        elif (self.csidle in fsm.eventsFired):
            p = self.csidle.signalparam
            yield fsm.goto(self.RESUME)
        else:
            raise RuntimeError, \
                    "[DCF]: Unexpected interruption in RXBUSY!"
        return

    def RXPKT(self, fsm, pkt):
        """RXPKT state; classify incoming packets."""
        if (self.htype==const.ARP_HTYPE_ETHERNET):
            yield fsm.goto(self.RXETH, pkt)
        else:
            raise RuntimeError, "[DCF]: Unsupported hardware " + \
                    "type (%s) in RXPKT!"%(self.htype)

    def RXETH(self, fsm, pkt):
        """RXETH state; classify incoming Ethernet `pkt`.

        All packets that have an error will be dropped.
        """
        # drop packets with errors
        addr = self.address
        crcerror = self.haserror(pkt)
        if crcerror:
            self.log_drop(pkt, drop="CRC error")
            yield fsm.goto(self.RESUME)
        # classify Dot11 packet
        isdata = self.isdot11data(pkt)
        isrts  = self.isdot11rts(pkt)
        iscts  = self.isdot11cts(pkt)
        isack  = self.isdot11ack(pkt)
        if isdata:
            # receive DATA? -> go to RXDATA
            data = self.get_dot11data(pkt)
            yield fsm.goto(self.RXDATA, data)
        elif isrts:
            # receive RTS? -> go to RXRTS
            rts = self.get_dot11rts(pkt)
            yield fsm.goto(self.RXRTS, rts)
        elif iscts:
            # drop unsolicited CTS
            cts = self.get_dot11cts(pkt)
            addr1 = cts.addr1
            # update NAV -> log drop
            nav = cts.ID
            drop = "unsolicited CTS"
            self.log_drop(cts, addr=addr, addr1=addr1, drop=drop, nav=nav)
            yield self.navupdate(fsm, nav*1e-6)
            # process unsolicited CTS
            self.recv_cts(cts)
        elif isack:
            # unsolicited ack?
            ack = self.get_dot11ack(pkt)
            addr1, drop = ack.addr1, "unsolicited ack"
            self.log_drop(pkt, addr=addr, addr1=addr1, drop=drop)
            # process unsolicited ACK
            self.recv_ack(ack)
        else:
            raise RuntimeError, "[DCF]: Got unexpected message in RXETH!"
        # go to RESUME
        yield fsm.goto(self.RESUME)

    def RXDATA(self, fsm, pkt):
        """RXDATA state; process received DATA message."""
        crcerror = self.haserror(pkt)
        if crcerror:
            self.log_drop(pkt, drop="CRC error")
            yield fsm.goto(self.RESUME)
        assert self.isdot11data(pkt), "[DCF]: Cannot find DATA in RXDATA!"
        data = self.get_dot11data(pkt)
        isbroadcast = (data.addr1==self.broadcast)
        isforme     = (data.addr1==self.address)
        promiscuous = self.promiscuous
        src, dst    = data.addr2, data.addr1
        iseth =  isinstance(data, Packet) and data.haslayer(Ether)
        # isbroadcast or isforme -> send to RXU
        if (isforme or isbroadcast or promiscuous) and iseth:
            self.recv_data(data)
            eth = data[Ether]
            p = crcremove(eth)
            self.log_recv(data, src=src, dst=dst, promiscuous=promiscuous)
            data.remove_payload()
            yield self.TXU.send(fsm, [p])
            assert fsm.stored(self.TXU), \
                    "[DCF]: Error sending packet to 'TXU'!"
            if isforme: yield fsm.goto(self.TXACK, data)
        # non-Ethernet packets or not for me -> drop
        else:
            drop = "non-Ethernet packet"
            if not isforme: drop = "not for me"
            self.log_drop(pkt, src=src, dst=dst, drop=drop)
        # go to RESUME
        yield fsm.goto(self.RESUME)

    def TXACK(self, fsm, pkt):
        """TXACK state; transmit ACK message in response to `data`."""
        assert self.isdot11data(pkt), "[DCF]: Cannot find Dot11Data in TXACK!"
        data = self.get_dot11data(pkt)
        assert not (data.addr1==self.broadcast), \
                "[DCF]: Cannot send ACK for broadcast data!"
        addr1 = data.addr2
        ack = self.dot11ack(addr1=addr1)
        self.send_ack(ack)
        pkt = crcupdate(ack)
        # pause for SIFS
        yield hold, fsm, self.sifs
        # send and hold duration
        duration = self.duration(pkt)
        self.log_send(pkt, addr1=addr1, duration=duration)
        yield self.TXD.send(fsm, [pkt])
        yield hold, fsm, duration
        yield fsm.goto(self.RESUME)

    def RXRTS(self, fsm, pkt):
        """RXRTS state; update NAV and process RTS message."""
        crcerror = self.haserror(pkt)
        if crcerror:
            self.log_drop(pkt, drop="CRC error")
            yield fsm.goto(self.RESUME)
        assert self.isdot11rts(pkt), "[DCF]: Cannot find RTS in RXRTS!"
        addr, rts = self.address, self.get_dot11rts(pkt)
        addr1, addr2 = rts.addr1, rts.addr2
        isforme = (addr1==addr)
        # check NAV? -> update NAV or drop RTS
        nav = rts.ID
        navbusy = self.navbusy
        navidle = self.navidle
        # process incoming RTS
        self.recv_rts(rts)
        # NAV IDLE -> send CTS
        if (isforme and navidle):
            self.log_recv(rts, addr1=addr1, addr2=addr2, nav=nav)
            yield fsm.goto(self.TXCTS, rts)
        # NAV BUSY -> drop RTS
        elif (isforme and navbusy):
            drop = "NAV busy"
            self.log_drop(rts, addr1=addr1, addr2=addr2, drop=drop, nav=nav)
        # not for me -> drop RTS
        else:
            drop = "not for me"
            self.log_drop(rts,addr=addr,addr1=addr1,addr2=addr2,drop=drop)
            yield self.navupdate(fsm, nav*1e-6)
        # go to RESUME
        yield fsm.goto(self.RESUME)

    def TXCTS(self, fsm, rts):
        """TXCTS state; send CTS response message."""
        assert self.isdot11rts(rts), "[DCF]: Cannot find RTS in TXCTS!"
        addr, addr1 = self.address, rts.addr2
        # create CTS
        pkt = self.dot11cts(addr1=addr1)
        cts = crcupdate(pkt)
        # update nav
        self.send_cts(cts, rts)
        nav = self.ctsnav(rts)
        cts.ID = nav
        pkt = crcupdate(cts)
        # pause for SIFS
        yield hold, fsm, self.sifs
        # send and hold duration
        duration = self.duration(pkt)
        self.log_send(pkt, addr=addr, addr1=addr1, nav=nav)
        yield self.TXD.send(fsm, [pkt])
        yield hold, fsm, duration
        # set NAV and resume
        yield self.navupdate(fsm, nav*1e-6)
        yield fsm.goto(self.RESUME)

    def RESUME(self, fsm):
        """RESUME state; resume operation in `IDLE` or `BACKOFF`."""
        if self.datatosend:
            yield fsm.goto(self.BACKOFF)
        else:
            yield fsm.goto(self.IDLE)

    def RECV(self, fsm):
        """RECV state; check for receive data from downstream element."""
        yield self.RXD.recv(fsm, 1)
        assert fsm.acquired(self.RXD) and (len(fsm.got)==1), \
                "[DCF]: Error receiving from RXD in RECV state!"
        p = fsm.got[0]
        self.rxdata.signal(p)
        # continue in RECV
        yield fsm.goto(self.RECV)

    def send_rts(self, rts, data):
        """Additional processing for outgoing RTS."""
        errmsg = "[DCF]: Cannot process non-RTS in send_rts()!"
        assert self.isdot11rts(rts), errmsg
        errmsg = "[DCF]: Cannot process non-DATA in send_rts()!"
        assert self.isdot11data(data), errmsg
        rts.setanno('mac-root', str(data.traceid))
        if data.hasanno('net-root'):
            rts.setanno('net-root', data.getanno('net-root'))

    def recv_rts(self, rts):
        """Additional processing for incoming RTS."""
        errmsg = "[DCF]: Cannot process non-RTS in recv_rts()!"
        assert self.isdot11rts(rts), errmsg

    def send_cts(self, cts, rts):
        """Additional processing for outgoing CTS."""
        errmsg = "[DCF]: Cannot process non-CTS in send_cts()!"
        assert self.isdot11cts(cts), errmsg
        errmsg = "[DCF]: Cannot process non-RTS in send_cts()!"
        assert self.isdot11rts(rts), errmsg

    def recv_cts(self, cts):
        """Additional processing for incoming CTS."""
        errmsg = "[DCF]: Cannot process non-CTS in recv_cts()!"
        assert self.isdot11cts(cts), errmsg

    def send_data(self, data):
        """Additional processing for outgoing DATA."""
        errmsg = "[DCF]: Cannot process non-DATA in send_data()!"
        assert self.isdot11data(data), errmsg
        data.setanno('mac-root', str(data.traceid))

    def recv_data(self, data):
        """Additional processing for incoming DATA."""
        errmsg = "[DCF]: Cannot process non-DATA in recv_data()!"
        assert self.isdot11data(data), errmsg
        data.setanno('mac-rxts', now())

    def send_ack(self, ack):
        """Additional processing for outgoing ACK."""
        errmsg = "[DCF]: Cannot process non-ACK in send_ack()!"
        assert self.isdot11ack(ack), errmsg

    def recv_ack(self, ack):
        """Additional processing for incoming ACK."""
        errmsg = "[DCF]: Cannot process non-ACK in recv_ack()!"
        assert self.isdot11ack(ack), errmsg
        # Expecting ACK?
        if self.isdot11data(self.datatosend):
            data = self.get_dot11data(self.datatosend)
            dst, src = data.addr1, data.addr2
            # ACK for me? -> received ACK -> signal ackdata
            if (src==ack.addr1):
                pkt = self.datatosend.payload
                self.datatosend.remove_payload()
                p = crcremove(pkt)
                self.ackdata.signal(p)

    def rtsnav(self, p, *args, **kwargs):
        """Calculate NAV value for RTS given DATA `p`.

        :param p: DATA packet.
        :param args: Additional arguments passed to compute `duration` of DATA.
        :param kwargs: Keywords passed to compute `duration` of DATA.
        :return: Integer; representing NAV value.

        This method uses `duration()` to compute the NAV as follows:

            NAV = SIFS + CTS + SIFS + DATA + SIFS + ACK

        :note: It is assumed that `p` is a valid packet.
        """
        d = self.sifs + self.ctsduration
        d += self.sifs + self.duration(p, *args, **kwargs)
        d += self.sifs + self.ackduration
        nav = int(d*1e6)
        return nav

    def ctsnav(self, rts):
        """Calculate NAV value for CTS.

        :param rts: RTS packet.
        :return: Integer; representing NAV value.

        This method computes CTS NAV as follows:

            NAV = RTSNAV - SIFS - CTS

        :note: Assumes NAV for `rts` is greater than a SIFS + CTS duration.
        """
        errmsg = "[DCF]: Invalid non-RTS message in ctsnav()!"
        assert self.isdot11rts(rts), errmsg
        # compute CTS NAV
        nav = rts.ID - int((self.sifs+self.ctsduration)*1e6)
        return nav

    def navupdate(self, proc, t=None):
        """Update NAVTimer.

        :param proc: Process that blocks on NAV update.
        :param t: New timer value for virtual carrier sense busy.

        If `t` is None, this method will reset the NAVTimer `nav`.
        """
        return self.nav.update(proc, t)

    def get_ctstimeout(self):
        """Calculate timeout for CTS messages.

        CTSTIMEOUT = SIFS + CTS duration + SLOTTIME
        """
        timeout = self.sifs + self.ctsduration + self.slottime
        return timeout

    def get_acktimeout(self):
        """Calculate timeout for ACK messages.

        ACKTIMEOUT = SIFS + ACK duration + SLOTTIME
        """
        timeout = self.sifs + self.ackduration + self.slottime
        return timeout

    def get_ctsduration(self, force=False):
        """Calculate duration of CTS message.

        :param force: If true, ignore any cached value.
        """
        if force or (self._ctsduration is None):
            cts = self.dot11cts()
            pkt = crcupdate(cts)
            self._ctsduration = self.duration(pkt)
        return self._ctsduration

    def get_ackduration(self, force=False):
        """Calculate duration of ack message.

        :param force: If true, ignore any cached value.
        """
        if force or (self._ackduration is None):
            ack = self.dot11ack()
            pkt = crcupdate(ack)
            self._ackduration = self.duration(pkt)
        return self._ackduration

    def set_timing(self, phymode=None):
        """Set up timing parameters (e.g. `sifs`, `slottime`, etc.).

        :param phymode: Enumeration for physical layer mode of operation
                        [default=None].

        This method sets up timing parameters based on the `phymode` enumeration
        which enumerates the physical layer mode of operation.

        :note: This method is called from `configure()`.
        """
        m = DOT11A_PHY_MODE 
        if phymode is None:
            p = self.phy
            if isinstance(p, Dot11APHY):    m = DOT11A_PHY_MODE
            elif isinstance(p, Dot11NPHY):  m = DOT11N_PHY_MODE
        else:
            m = phymode
        # check PHY mode
        assert (m in DOT11_TIMING), \
                "[DCF]: Cannot set timing from invalid PHY mode (%s)!"(m)
        # set up timing
        self.sifs = DOT11_TIMING[m]['sifs']
        self.slottime = DOT11_TIMING[m]['slottime']

    def get_datarate(self, r):
        """Get the data rate in bits-per-second (bps).

        :param r: Rate enumeration.

        This method calls `get_datarate()` for `phy`.
        """
        return self.phy.get_datarate(r)


    def calclength(self, duration, rate):
        """Calculate length of packet in bytes.

        :param duration: Duration of packet in seconds.
        :param rate: Rate enumeration.

        This method calls `calclength()` for `phy`.
        """
        return self.phy.calclength(duration, rate)

    def isdot11data(self, p):
        """Check if packet is DATA; *overload as needed*."""
        return isdot11data(p)

    def isdot11rts(self, p):
        """Check if packet is RTS; *overload as needed*."""
        return isdot11rts(p)

    def isdot11cts(self, p):
        """Check if packet is CTS; *overload as needed*."""
        return isdot11cts(p)

    def isdot11ack(self, p):
        """Check if packet is ACK; *overload as needed*."""
        return isdot11ack(p)

    def get_dot11data(self, p):
        """Extract DATA from `p`; *overload as needed*."""
        return get_dot11data(p)

    def get_dot11rts(self, p):
        """Extract RTS from `p`; *overload as needed*."""
        return get_dot11rts(p)

    def get_dot11cts(self, p):
        """Extract CTS from `p`; *overload as needed*."""
        return get_dot11cts(p)

    def get_dot11ack(self, p):
        """Extract ACK from `p`; *overload as needed*."""
        return get_dot11ack(p)

    def dot11data(self, *args, **kwargs):
        """Create new `Dot11Data` packet."""
        return Dot11Data(*args, **kwargs)

    def dot11rts(self, *args, **kwargs):
        """Create new `Dot11RTS` packet."""
        return Dot11RTS(*args, **kwargs)

    def dot11cts(self, *args, **kwargs):
        """Create new `Dot11CTS` packet."""
        return Dot11CTS(*args, **kwargs)

    def dot11ack(self, *args, **kwargs):
        """Create new `Dot11ACK` packet."""
        return Dot11Ack(*args, **kwargs)

    def connect(self, p):
        """Overloaded to connect and call `set_phy()`."""
        self.set_phy(p)
        return CSMAC.connect(self, p)

    def log_send(self, p, *args, **kwargs):
        """Convenience method for logging send event."""
        if self.verbose>DOT11_VERBOSE:
            kwargs['addr'] = self.address
            kwargs['retrycount'] = self.retrycount
            kwargs['retrylimit'] = self.retrylimit
            if p.hasanno('cif-duration'):
                kwargs['cif-duration'] = time2usec(p.getanno('cif-duration') )
            if p.hasanno('phy-rate'):
                kwargs['phy-rate'] = p.getanno('phy-rate')
            self.log("snd", p, *args, **kwargs)

    def log_recv(self, p, *args, **kwargs):
        """Convenience method for logging receive event."""
        if self.verbose>DOT11_VERBOSE:
            kwargs['addr'] = self.address
            if p.hasanno('phy-sinr'):
                kwargs['phy-sinr'] = "%.4f dB"%(p.getanno('phy-sinr') )
            self.log("rcv", p, *args, **kwargs)

    def log_drop(self, p, *args, **kwargs):
        """Convenience method for logging drop event."""
        if self.verbose>DOT11_VERBOSE:
            kwargs['addr'] = self.address
            self.log("drp", p, *args, **kwargs)

    def log(self, event=None, p=None, *args, **kwargs):
        """Overloaded to check verbose level and set common annotations."""
        force = False
        if ('verbose' in kwargs): force = (kwargs['verbose']>DOT11_VERBOSE)
        if self.verbose>DOT11_VERBOSE or force:
            kwargs.update(self.get_dcf_anno(p))
            CSMAC.log(self, event, p, *args, **kwargs)

    def get_dcf_anno(self, p):
        """Convenience method to extract annotations and convert to strings."""
        kwargs = {}
        if not isinstance(p, Packet): return kwargs
        kwargs['addr'] = self.address
        if p.hasanno('cif-duration'):
            kwargs['cif-duration'] = time2usec(p.getanno('cif-duration') )
        if p.hasanno('phy-rate'):
            kwargs['phy-rate'] = p.getanno('phy-rate')
        if p.hasanno('phy-sinr'):
            kwargs['phy-sinr'] = "%.4f dB"%(p.getanno('phy-sinr') )
        if p.hasanno('net-root'):
            kwargs['net-root'] = p.getanno('net-root')
        if p.hasanno('mac-root'):
            kwargs['mac-root'] = p.getanno('mac-root')
        if p.hasanno('mac-txts'):
            kwargs['mac-txts'] = p.getanno('mac-txts')
        if p.hasanno('mac-rxts'):
            kwargs['mac-rxts'] = p.getanno('mac-rxts')
        return kwargs
示例#15
0
class Dot11APHY(CSPHY):
    """Implementation of IEEE 802.11a physical layer protocol.

    Modulation and Coding Parameters
    ================================

    The table below describes modulation and convolutional coding parameters for
    IEEE 802.11a.

    =========== ============= ============ ====== ======= =========
    Rate Index    Data Rate    Modulation   Nbps     M    Code rate
    ----------- ------------- ------------ ------ ------- ---------
         0          6 Mbps       BPSK         1      2        1/2
         1          9 Mbps       BPSK         1      2        3/4
         2         12 Mbps       QPSK         2      4        1/2
         3         18 Mbps       QPSK         2      4        3/4
         4         24 Mbps      16-QAM        4     16        1/2
         5         36 Mbps      16-QAM        4     16        3/4
         6         48 Mbps      64-QAM        6     64        2/3
         7         54 Mbps      64-QAM        6     64        3/4
    =========== ============= ============ ====== ======= =========

    :note: Rate index is used in Packets when referring to rate. Use
           `DOT11A_DATARATE` to convert this to a bitrate value.

    Dot11APHY and Ports
    ===================
    Every `Dot11APHY` has the following ports:

        1. "TXU" - sends decoded packets to an upstream element.
        #. "RXU" - receives traffic to send from an upstream element.
        #. "TXD" - sends encoded packets to a downstream element.
        #. "RXD" - receives traffic from a downstream element.

    The upstream element of a `Dot11APHY` is usually a `CSMAC` (or subclass),
    and the downstream element is usually a `ChannelInterface` (or subclass).

    Dot11APHY and Annotations
    =========================
    This class uses/sets the following annotations:

    ============== ==========================================================
    Name            Description
    ============== ==========================================================
    cif-txpower     Power of transmitted packets (in dBm).
    -------------- ----------------------------------------------------------
    rate            Rate index used to specify coding and modulation scheme.
    -------------- ----------------------------------------------------------
    cif-duration    Duration of packet is marked on outgoing packets using
                    the `duration()` method during `SEND`.
    -------------- ----------------------------------------------------------
    cif-collision   List of packets that arrived at the same time as the
                    current packet (i.e. collided with the marked packet).
    -------------- ----------------------------------------------------------
    dot11a-detect   Indicates whether or not the physical layer packet
                    detection was successful (see `framedetect()`).
    -------------- ----------------------------------------------------------
    dot11a-header   Indicates whether or not the physical layer header
                    decoding was successful (see `decode_header()`).
    -------------- ----------------------------------------------------------
    dot11a-per      Packet-error rate (PER) modeled by the physical layer
                    (the `decode_header()` will initially set this, but
                    `decode_data()` will overwrite it as necessary).
    ============== ==========================================================

    :IVariables:
     * `detect`: Internal SimEvent signalled when a packet is detected.
     * `mod`: `MQAM`, child member for modulation.
     * `coder`: `RCPC`, child member for coding.
     * `detectdelay`: Duration required for packet detection.

    :CVariables:
     * `detectdelay`: Duration required for packet detection.
    """

    name = "IEEE 802.11a"
    tracename = "80211A"
    detectdelay = DOT11A_TDETECT

    def __init__(self, **kwargs):
        """Constructor."""
        self.detect = SimEvent(name=".detect")
        self.detectdelay = None
        CSPHY.__init__(self, **kwargs)
        self.detect.name = "%s%s" % (self.name, ".detect")
        # monitor_events(self.detect)

    def configure(self, detectdelay=None, **kwargs):
        """Call `CSPHY.configure()`; add ports, `FSM`, and other members."""
        if detectdelay is None:
            self.detectdelay = self.__class__.detectdelay
        CSPHY.configure(self, **kwargs)
        # add ports and FSM
        self.addport("RXU"), self.addport("TXU")
        self.addport("TXD"), self.addport("RXD")
        # create FSM to manage send/recv execution of phy
        txfsm = self.newchild("txfsm", FSM, tracename=self.tracename + ".TX")
        rxfsm = self.newchild("rxfsm", FSM, tracename=self.tracename + ".RX")
        txfsm.goto(self.SEND)
        rxfsm.goto(self.LISTEN)
        # set up other members
        mod = self.newchild("mod", MQAM, tracename=self.tracename + ".MQAM")
        coder = self.newchild("coder", RCPC, tracename=self.tracename + ".RCPC")

    def connect(self, radio):
        """Convenience method to connect PHY to an RF front end (i.e. `Radio`).

        **Overload this method to change how a connection is made.**

        :note: This method also calls `set_radio()` to set the appropriate
               pointer for the physical layer.
        """
        assert isinstance(radio, Radio), "[DOT11A]: Cannot connect to non-Radio!"
        self.set_radio(radio)
        self.TXD.connect(radio.getport("RXU"))
        radio.getport("TXU").connect(self.RXD)

    def duration(self, p, rate=None):
        """Calculate duration of packet `p` using `calcduration()` method.

        :param p: Packet to compute duration for (or packet length in bytes).
        :param rate: Optional rate index to denote modulation/coding scheme.
        :return: Duration of packet in seconds.

        This method checks for the 'phy-rate' annotation, which is a rate index
        to denote the coding and modulation scheme to be used. If a 'phy-rate'
        annotation is not found (or specified as a parameter), this method uses
        the base rate (i.e. rate 0) to calculate the duration of the waveform.
        """
        plen = p
        if isinstance(p, Packet):
            if p.hasanno("rate") and (rate is None):
                rate = p.getanno("rate")
            plen = len(p)
        if rate is None:
            rate = 0
        return self.calcduration(plen, rate)

    def calcper_header(self, p, **kwargs):
        """Calculate probability of error for header decoding.

        :param p: Packet being decoded.
        :param kwargs: Additional keywords arguments passed to `sinr_heap()`
                       (or `sinr()`).
        :return: PER for header decoding.

        This method sets the 'dot11a-sinr' and 'dot11a-per' annotations. The
        operation of this method depends on `DOT11A_USE_PIECEWISE_PER`.
        """
        for a in ["cif-rxts"]:
            errmsg = "[DOT11APHY]: calcper_header() cannot find " + "'%s' annotation!" % (a)
            assert ANNO.supports(p, a), errmsg
        # calculate PER using appropriate method
        plen = len(p.payload)
        if DOT11A_USE_PIECEWISE_PER:
            sinrheap = self.sinr_heap(p, **kwargs)
            t0 = p.getanno("cif-rxts") + DOT11A_TSHORT + DOT11A_TLONG
            t1 = t0 + DOT11A_TSIGNAL
            xheap = [(max(ta, t0), min(tb, t1), sinr) for (ta, tb, sinr) in sinrheap if (ta < t1) and (tb > t0)]
            errmsg = "[DOT11APHY]: Unable to find valid data from SINR heap!"
            assert len(xheap) > 0, errmsg
            # calculate piecewise PER and average SINR
            psuccess, stot = 1.0, 0.0
            for ta, tb, sinr in xheap:
                alpha = (tb - ta) / (t1 - t0)
                hlen = len(Dot11A()) * alpha
                stot += db2linear(sinr) * alpha
                psuccess *= 1.0 - self.calcper(hlen, 0, sinr)
            per = 1.0 - psuccess
            sinr = linear2db(stot)
        else:
            sinr, hlen = self.sinr(p, **kwargs), len(p) - plen
            # configure modulation and coding to calculate PER
            per = self.calcper(hlen, 0, sinr)
        # set annotations and return PER
        p.setanno("dot11a-sinr", sinr)
        p.setanno("dot11a-per", per)
        return per

    def calcper_data(self, p, **kwargs):
        """Calculate probability of error for data decoding.

        :param p: Packet being decoded.
        :param kwargs: Additional keywords arguments passed to `sinr_heap()`
                       (or `sinr()`).
        :return: PER for decoding packet payload.

        This method sets the 'dot11a-sinr' and 'dot11a-per' annotations. The
        operation of this method depends on `DOT11A_USE_PIECEWISE_PER`.
        """
        for a in ["cif-rxts", "cif-duration"]:
            errmsg = "[DOT11APHY]: calcper_data() cannot find " + "'%s' annotation!" % (a)
            assert ANNO.supports(p, a), errmsg
        # verify header parameters
        plen = len(p.payload)
        rate, length = p.rate, p.length
        assert 0 <= rate < len(DOT11A_DATARATE), "[DOT11A]: Invalid rate option (%s)!" % (rate)
        assert p.length == plen, "[DOT11A]: Header length reported " + "does not equal payload length; %s!=%s" % (
            p.length,
            plen,
        )
        # calculate PER using appropriate method
        if DOT11A_USE_PIECEWISE_PER:
            sinrheap = self.sinr_heap(p, **kwargs)
            t1 = p.getanno("cif-rxts") + p.getanno("cif-duration")
            t0 = t1 - self.calcnofdm(plen, rate) * DOT11A_TSYM
            xheap = [(max(ta, t0), min(tb, t1), sinr) for (ta, tb, sinr) in sinrheap if (ta < t1) and (tb > t0)]
            errmsg = "[DOT11APHY]: Unable to find valid data from SINR heap!"
            assert len(xheap) > 0, errmsg
            # calculate piecewise PER and average SINR
            psuccess, stot = 1.0, 0.0
            for ta, tb, sinr in xheap:
                alpha = (tb - ta) / (t1 - t0)
                dlen = plen * alpha
                stot += db2linear(sinr) * alpha
                psuccess *= 1.0 - self.calcper(dlen, rate, sinr)
            per = 1.0 - psuccess
            sinr = linear2db(stot)
        else:
            # configure modulation and coding to calculate PER
            sinr, plen = self.sinr(p, **kwargs), length
            per = self.calcper(plen, rate, sinr)
        # set annotations and return PER
        p.setanno("dot11a-sinr", sinr)
        p.setanno("dot11a-per", per)
        return per

    def calcper(self, p, rate, sinr):
        """Calculate packet-error rate for given parameters.

        :param p: Packet or packet length in bytes.
        :param rate: Rate enumeration to indicate modulation and coding scheme.
        :param sinr: Signal-to-interference-and-noise ratio.
        """
        # configure modulation and coding to calculate PER
        assert 0 <= rate < len(DOT11A_DATARATE), "[DOT11A]: Invalid rate enumeration!"
        plen = p
        if isinstance(p, Packet):
            plen = len(p)
        mod, coder = self.mod, self.coder
        mod.mtype = DOT11A_MTYPE[rate]
        coder.rate = DOT11A_CODERATE[rate]
        uber = mod.ber(sinr)
        per = coder.per(plen, uber)
        return per

    def encode(self, p, rate=None, txpower=None):
        """Encapsulate MPDU in a `Dot11A` packet; set annotations.

        :param p: MPDU to encode.
        :param rate: Optional rate index to denote modulation/coding scheme.
        :param txpower: Optional transmit power (in dBm).
        :return: `Dot11A` packet with MPDU as payload.

        This method will make sure that the following annotations are set:
            * rate [default=0]
            * txpower [default=`DOT11A_MAXPOWER`]
            * duration [from `duration()`]

        If the `rate` parameter is not specified and no 'phy-rate' annotation is
        found in packet `p`, this method will use the base rate (i.e. zero) to
        encode packet `p`.

        If the `txpower` parameter is not specified and no 'cif-txpower'
        annotation is found in packet `p`, this method will use
        `DOT11A_MAXPOWER` as the default transmit power.
        """
        # check parameters
        if p.hasanno("phy-rate") and (rate is None):
            rate = p.getanno("phy-rate")
        if p.hasanno("cif-txpower") and (txpower is None):
            txpower = p.getanno("cif-txpower")
        if rate is None:
            rate = 0
        if txpower is None:
            txpower = DOT11A_MAXPOWER
        duration = self.duration(p, rate=rate)
        # set annotations
        p.setanno("phy-rate", rate)
        p.setanno("cif-txpower", txpower)
        p.setanno("cif-duration", duration)
        # encap in Dot11A
        length = len(p)
        w = Dot11A(rate=rate, length=length) / p
        return w

    def framedetect(self, p, thresh=None):
        """Apply packet detection model for detecting training sequence; based
        on signal-to-interference-and-noise ratio (SINR).

        :param p: `Dot11A` packet being received.
        :param thresh: Packet detection SINR threshold (in dB)
                       [default=`DOT11A_FDTHRESHOLD`].
        :return: Boolean flag; if true, packet detection was successful.

        This method checks to make sure `p` is a `Dot11A` packet, and that the
        received SINR is greater than the receiver detection threshold `thresh`.
        This method will also mark the 'dot11a-detect' annotation to indicate
        the success (or failure) of the frame detection.

        **Overload this method to change how frame detection works.**

        :note: If `p` is not a `Dot11A` packet, this method will set the
               'dot11a-detect' annotation to false and return false.
        """
        if not isinstance(p, Dot11A):
            if ANNO.supported(p):
                p.setanno("dot11a-detect", False)
            return False
        # check SINR
        if thresh is None:
            thresh = DOT11A_FDTHRESHOLD
        sinr = self.sinr(p)
        detect = True
        if sinr < thresh:
            detect = False
        # mark annotations
        p.setanno("dot11a-detect", detect)
        return detect

    def decode_header(self, p):
        """Apply physical layer model for header decoding.

        :param p: `Dot11A` packet being received.
        :return: Boolean flag; if true, header decoding was successful.

        This method uses `mod` and `coder` to determine the error
        characteristics of the header decoding process. The following conditions
        must be met in order to have successful header decoding:

            * packet `p` must be a `Dot11A` packet,
            * the 'dot11a-detect' annotation must be true,
            * the header decoding must succeed,
            * and all header parameters must be valid.
        
        This method marks the 'dot11a-header' annotation to mark success (or
        reason for failure) of header decoding, and sets the 'dot11a-per'
        annotation to indicate the probability of error in decoding the header.
        """
        header, danno = "", "dot11a-detect"
        isdot11a = isinstance(p, Dot11A)
        detected = isdot11a and p.hasanno(danno) and p.getanno(danno)
        if not (isdot11a and detected):
            if not isdot11a:
                header = "not Dot11A packet"
            if not detected:
                header = "not detected"
            p.setanno("dot11a-header", header)
            return False
        # check header parameters
        plen = len(p.payload)
        rate, length = p.rate, p.length
        okrate = 0 <= rate < len(DOT11A_DATARATE)
        oklen = p.length == plen
        okpar = self.parity(p)
        if not (okrate and oklen and okpar):
            header = "header parameters failed"
            p.setanno("dot11a-header", header)
            return False
        # decode header of Dot11A
        per = self.calcper_header(p, force=False)
        sinr = p.getanno("dot11a-sinr")
        # simulate packet header decoding errors
        header = "success"
        if random.uniform(0, 1) < per:
            header = "decoding failed"
        # mark annotations
        p.setanno("dot11a-header", header + " (SINR = %.2f dB)" % (sinr))
        p.setanno("dot11a-per", per)
        return header == "success"

    def decode(self, p):
        """Apply physical layer model for decoding a `Dot11A` packet.

        :param p: `Dot11A` packet to decode.
        :return: Decoded payload (or `None` if header decoding fails).

        This method uses `mod` and `coder` to simulate packet errors. This
        method decodes the `Dot11A` header and then decodes the payload. If
        `decode_header()` fails, this method will return `None` and skip payload
        decoding. Otherwise, this method will simulate errors and call
        `seterror()` if any packet errors occur.

        This method also overwrites the 'dot11a-per' annotation with the packet
        error probability for the payload.

        :note: This method will log a drop event if `decode_header()` fails.
        """
        header, hanno = "header failed", "dot11a-header"
        # update SINR (and SINR heap)
        sinr = self.sinr(p, force=True)
        if not self.decode_header(p):
            if p.hasanno(hanno):
                header = p.getanno(hanno)
            self.log_drop(p, header=header)
            return None
        # verify header decoding
        detectanno = "dot11a-detect"
        errmsg = "[DOT11APHY]: Cannot decode payload of non-Dot11A packet!"
        assert isinstance(p, Dot11A), errmsg
        errmsg = "[DOT11APHY]: Cannot decode payload that has not been detected!"
        assert p.hasanno(detectanno) and p.getanno(detectanno), errmsg
        # calculate PER
        per = self.calcper_data(p, force=False)
        sinr = p.getanno("dot11a-sinr")
        # simulate packet header decoding errors
        error = False
        if random.uniform(0, 1) < per:
            error = True
        # mark annotations
        pkt = p.payload
        if error:
            self.seterror(pkt)
        pkt.setanno("dot11a-per", per)
        return pkt

    def sinr(self, p, **kwargs):
        """Calculate signal-to-interference-and-noise ratio (SINR).

        :param p: Packet to compute SINR for.
        :param kwargs: Additional keyword arguments passed to `sinr_heap()`.

        This method uses the 'rxpower', 'noisepower', and 'cif-collision'
        annotations to calculate the SINR of the received packet p.

        :note: This method sets the 'phy-sinr' annotation indicating the SINR (in dB).
        """
        for a in ["rxpower", "noisepower", "cif-collision"]:
            errmsg = "[DOT11APHY]: sinr() cannot find '%s' annotation!" % (a)
            assert ANNO.supports(p, a), errmsg
        # get SINR heap
        sinrheap = self.sinr_heap(p, **kwargs)
        minsinr = +inf
        # find minimum SINR
        for ta, tb, sinr in sinrheap:
            if sinr < minsinr:
                minsinr = sinr
        # set annotations
        p.setanno("phy-sinr", minsinr)
        return minsinr

    def parity(self, p):
        """Check parity of `Dot11A` packet header.

        :param p: `Dot11A` packet.
        :return: Boolean; true if parity check passes.

        :note: If `DOT11A_USEPARITY` is false, this method returns true.
        """
        assert isinstance(p, Dot11A), "[DOT11APHY]: Cannot check parity of non-Dot11A packet!"
        return p.checkpar()

    def SEND(self, fsm):
        """SEND state; simulate encoding and send process.

        This state performs the following tasks:

            1. Get packet from 'RXU' port.
            2. Call `encode()` to generate waveform for outgoing packet.
            3. Mark 'cif-duration' annotation with value returned by `duration()`.
            4. Simulate `txproctime` for outgoing packet.
            5. Send outgoing waveform to 'TXD'.
            6. Simulate `duration` of waveform.
        """
        while fsm.active():
            yield self.RXU.recv(fsm, 1)
            assert fsm.acquired(self.RXU) and (
                len(fsm.got) == 1
            ), "[DOT11APHY]: Error receiving from RXU port in SEND()!"
            p = fsm.got[0]
            # simulate encoding and Tx processing time
            w = self.encode(p)
            duration = self.duration(p)
            if ANNO.supported(w):
                w.setanno("cif-duration", duration)
            yield hold, fsm, self.txproctime(p)
            # send waveform and simulate duration
            self.log_send(w, duration=time2usec(duration))
            yield self.TXD.send(fsm, [w])
            assert fsm.stored(self.TXD), "[DOT11APHY]: Error sending to TXD in SEND!"
            yield hold, fsm, duration
        return

    def LISTEN(self, fsm):
        """LISTEN state; monitor `radio` and manage packet detection."""
        r = self.radio
        assert isinstance(r, Radio), "[DOT11A]: Cannot find radio in LISTEN!"
        while fsm.active():
            # check rxenergy -> set csbusy?
            rxenergy = r.rxenergy()
            rxhigh = r.inreceive and (r.rxenergy() > DOT11A_CSTHRESHOLD)
            if rxhigh:
                self.set_csbusy(rxenergy="high, %.2f dBm" % (rxenergy), rxbuffer=[x.traceid for x in r.rxbuffer])
            else:
                self.set_csidle(rxenergy="%.2f dBm" % (rxenergy))
            # monitor events and RXD port
            yield self.RXD.recv(fsm, 1, renege=(r.rxdata, r.rxdone, r.txdata, r.txdone, self.detect))
            # RXD -> ignore incoming packets in LISTEN
            if fsm.acquired(self.RXD):
                assert len(fsm.got) == 1, (
                    "[DOT11A]: Received unexpected " + "number of packets from 'RXD' port in LISTEN state!"
                )
                p = fsm.got[0]
                self.log_drop(p, drop="not detected in LISTEN")
            # rxdata -> start DETECT thread
            if r.rxdata in fsm.eventsFired:
                p = r.rxdata.signalparam
                fname = "detect(%s)" % (p._id)
                ### XXX ####
                f = FSM()
                # f = self.newchild(fname, FSM, tracename=fname.upper() )
                f.goto(self.DETECT, p)
                f.start()
            # detect -> set csbusy -> goto DECODE
            if self.detect in fsm.eventsFired:
                p = self.detect.signalparam
                rxenergy = "%.2f dBm" % (r.rxenergy())
                sinr = "%.2f dB" % (self.sinr(p))
                self.set_csbusy(p, detect=True, rxenergy=rxenergy)
                danno = "dot11a-detect"
                errmsg = "[DOT11A]: Cannot find 'dot11a-detect' " + "annotation in detected packet!"
                assert ANNO.supports(p, danno) and p.getanno(danno), errmsg
                # yield hold, fsm, 0
                self.log("detect", p, rxenergy=rxenergy, sinr=sinr, rxbuffer=[x.traceid for x in r.rxbuffer])
                yield fsm.goto(self.DECODE, p)
            # ignore otherwise
            ignore = r.txdata in fsm.eventsFired
            ignore = ignore or (r.txdone in fsm.eventsFired)
            ignore = ignore or (r.rxdone in fsm.eventsFired)
            if ignore:
                pass
        return

    def DECODE(self, fsm, pkt):
        """DECODE state; monitor `radio` and manage packet decoding."""
        r = self.radio
        assert isinstance(r, Radio), "[DOT11A]: Cannot find radio in DECODE!"
        assert self.isbusy, "[DOT11A]: Carrier sense *not* busy in DECODE!"
        while fsm.active():
            # monitor events and RXD port
            yield self.RXD.recv(fsm, 1, renege=(r.rxdata, r.rxdone, r.txdata, r.txdone, self.detect))
            # receive pkt -> apply error model and forward to upper layers
            if fsm.acquired(self.RXD):
                assert len(fsm.got) == 1, (
                    "[DOT11A]: Received unexpected " + "number of packets from 'RXD' port in DECODE state!"
                )
                p = fsm.got[0]
                if p is pkt:
                    payload = self.decode(p)
                    if payload:
                        self.log_recv(p)
                        self.cleananno(p)  # replace/remove unwanted annotations
                        p.remove_payload()
                        yield self.TXU.send(fsm, [payload])
                        yield hold, fsm, const.EPSILON  # pause before resuming
                    pkt = None
            # rxdone received before RXD -> interface dropped packet?
            if r.rxdone in fsm.eventsFired:
                p = r.rxdone.signalparam
                if p is pkt:
                    qlen = self.RXD.length
                    drop = ANNO.supports(p, "cif-drp") and p.getanno("cif-drp")
                    errmsg = "[DOT11A]: Unexpected rxdone received in DECODE!"
                    assert (qlen == 0) and drop, errmsg
                    self.log_drop(p, drop="interface dropped packet")
                    pkt = None
            # rxdata -> drop new packets
            if r.rxdata in fsm.eventsFired:
                p = r.rxdata.signalparam
                assert p is not pkt, "[DOT11A]: Unexpected rxdata for pkt in DECODE!"
                self.log_drop(p, drop="ignore rxdata in DECODE")
            # detect -> drop detected packets
            if self.detect in fsm.eventsFired:
                p = r.rxdata.signalparam
                assert p is not pkt, "[DOT11A]: Unexpected detect for pkt in DECODE!"
                self.log_drop(p, drop="ignore detect in DECODE", decode=pkt.traceid)
            # ignore otherwise
            ignore = r.txdata in fsm.eventsFired
            ignore = ignore or (r.txdone in fsm.eventsFired)
            if ignore:
                pass
            # check if DECODE is done
            if pkt is None:
                yield fsm.goto(self.LISTEN)
        return

    def DETECT(self, fsm, pkt, thresh=None):
        """DETECT state; simulate physical layer frame detection.

        :param pkt: `Dot11A` packet being detected.
        :param thresh: SINR threshold used by `framedetect()`.

        This method signals the `detect` SimEvent when `pkt` is detected. Before
        calling `framedetect()`, this state will pause for the packet detection
        duration `detectdelay`.

        :note: Upon completion this state method sleeps the calling `FSM`.
        """
        r = self.radio
        assert isinstance(r, Radio), "[DOT11A]: Cannot find radio in DETECT!"
        assert pkt in r.rxbuffer, "[DOT11A]: Current pkt is not in radio rxbuffer!"
        yield hold, fsm, self.detectdelay
        if r.inreceive and self.framedetect(pkt, thresh=thresh):
            self.detect.signal(pkt)
        return

    def get_valid_rates(self):
        """Get list of rate enumerations supported by PHY.

        This method uses the PHY configuration to determine the list of valid
        rate enumerations supported by the PHY (i.e. `ntx`).
        """
        ntx = self.radio.ntx
        rates = range(len(DOT11A_DATARATE))
        return rates

    def get_datarate(self, r):
        """Get the data rate in bits-per-second (bps).

        :param r: Rate enumeration.

        This method gets data rate from `DOT11A_DATARATE`.
        """
        errmsg = "[DOT11APHY]: Invalid rate index (%s)!" % (r)
        assert 0 <= r < len(DOT11A_DATARATE), errmsg
        return DOT11A_DATARATE[r]

    def log_send(self, p, *args, **kwargs):
        """Convenience method for logging send event."""
        if self.verbose > DOT11A_VERBOSE:
            if isinstance(p, Dot11A):
                kwargs["phy-rate"] = p.rate
                kwargs["length"] = p.length
            if p.hasanno("cif-txpower"):
                kwargs["cif-txpower"] = "%.2f dBm" % (p.getanno("cif-txpower"))
            if p.hasanno("cif-duration"):
                kwargs["cif-duration"] = time2usec(p.getanno("cif-duration"))
            self.log("snd", p, *args, **kwargs)

    def log_recv(self, p, *args, **kwargs):
        """Convenience method for logging receive event."""
        if self.verbose > DOT11A_VERBOSE:
            if isinstance(p, Dot11A):
                kwargs["phy-rate"] = p.rate
                kwargs["length"] = p.length
            if p.hasanno("phy-sinr"):
                kwargs["phy-sinr"] = "%.2f dB" % (p.getanno("phy-sinr"))
            if p.hasanno("rxpower"):
                kwargs["rxpower"] = "%.2f dBm" % (p.getanno("rxpower"))
            if p.hasanno("noisepower"):
                kwargs["noisepower"] = "%.2f dBm" % (p.getanno("noisepower"))
            if p.hasanno("cif-duration"):
                kwargs["cif-duration"] = time2usec(p.getanno("cif-duration"))
            if p.hasanno("dot11a-per"):
                kwargs["dot11a-per"] = "%.5g" % (p.getanno("dot11a-per"))
            if p.hasanno("crcerror"):
                crcerror = p.getanno("crcerror")
                if crcerror:
                    kwargs["crc"] = "FAIL"
                else:
                    kwargs["crc"] = "OK"
            self.log("rcv", p, *args, **kwargs)

    def log_drop(self, p, *args, **kwargs):
        """Convenience method for logging drop event."""
        if self.verbose > DOT11A_VERBOSE:
            if isinstance(p, Dot11A):
                kwargs["phy-rate"] = p.rate
                kwargs["length"] = p.length
            if p.hasanno("phy-sinr"):
                kwargs["phy-sinr"] = "%.2f dBm" % (p.getanno("phy-sinr"))
            if p.hasanno("crcerror"):
                crcerror = p.getanno("crcerror")
                if crcerror:
                    kwargs["crc"] = "FAIL"
                else:
                    kwargs["crc"] = "OK"
            self.log("drp", p, *args, **kwargs)
        self.cleananno(p)

    def calcduration(self, plen, rate=None):
        """Calculate duration of a packet of length `plen`.

        :param plen: Packet length (in bytes).
        :param rate: Optional rate index to denote modulation/coding scheme.
        :return: Duration of packet in seconds.

        If `rate` is not specified this method will use the base rate (i.e. rate
        index 0) to calculate the duration of the waveform.
        """
        if rate is None:
            rate = 0
        # calculate duration
        nofdm = self.calcnofdm(plen, rate)  # number of data OFDM symbols
        d = DOT11A_TSHORT + DOT11A_TLONG + DOT11A_TSIGNAL + DOT11A_TSYM * nofdm
        return d

    def calcnofdm(self, plen, rate):
        """Calculate the number of OFDM symbols in the data payload of a packet.

        :param plen: Length of packet in bytes).
        :param rate: Rate index to denote modulation/coding scheme.
        :return: Number of OFDM symbols in payload of 802.11n waveform.
        """
        # check packet length and rate
        assert not (plen < 0), "[DOT11APHY]: Cannot compute " + "duration of negative length packet!"
        assert 0 <= rate < len(DOT11A_NDBPS), "[DOT11APHY]: Invalid rate index (%s)!" % (rate)
        blen = 8 * plen
        nbits = 16 + blen + 6  # service bits + PSDU + tail-bits
        ndbps = DOT11A_NDBPS[rate]
        nofdm = ceil(1.0 * nbits / ndbps)
        return int(nofdm)
class Cache(Process):
    
    WHEN_FIELD = 0
    ACTION_FIELD = 1 # what to do
    RECORD_FIELD = 2
    
    EVENT_KNOWN_ANSWER = "add_to_known_answer_suppression"
    EVENT_RENEW = "try_to_renew"
    EVENT_FLUSH= "flush_record"
    
    def __init__(self, sim, record_observer=None):
        super(Cache, self).__init__(sim=sim)
        self.record_observer = record_observer
        self.__new_record_cached = SimEvent(name="new_record_cached", sim=sim)
        self._random = Random()
        self.pending_events = [] # tuples with the form (when, action, record)
        self.records = [] # cached records
    
    def get_known_answers(self):
        known_answers = []
        for record in self.records:
            found = False
            for event in self.pending_events:
                if event[Cache.ACTION_FIELD] == Cache.EVENT_KNOWN_ANSWER and event[Cache.RECORD_FIELD] == record:
                    found = True
                    break
            if found:
                known_answers.append( deepcopy(record) )
        return known_answers
    
    def _delete_events_for_record(self, record):
        to_delete = []
        for event in self.pending_events:
            if event[Cache.RECORD_FIELD] == record:
                to_delete.append(event)
        
        for event in to_delete:
            self.pending_events.remove(event)
    
    def _get_time_after_percentage(self, ttl, percentage):
        """Percentage example: 0.45 (means 45%)"""
        # remember that ttl is measured in seconds and simulation time in ms!
        return self.sim.now() + ttl * 1000 * percentage 
    
    def _create_new_events(self, record):
        ttl = record.ttl
        
        # at 1/2 of the TTL => does not add to known answer suppression
        when = self._get_time_after_percentage(ttl, 0.5)
        self.pending_events.append( (when, Cache.EVENT_KNOWN_ANSWER, record) )
        
        # section 5.2, http://tools.ietf.org/html/draft-cheshire-dnsext-multicastdns-15
        # at 80%, 85%, 90% and 95% of the TTL => try to renew the record
        for percentage in ( 80, 85, 90, 95 ):
            percentage_with_variation = (percentage + self._random.random()*2) / 100.0 # 2% of variation
            when = self._get_time_after_percentage(ttl, percentage_with_variation)
            self.pending_events.append( (when, Cache.EVENT_RENEW, record) )
    
    def cache_record(self, record):
        self._delete_events_for_record(record)
        self._create_new_events(record)
        
        if record in self.records:
            self.records.remove(record) # does delete the previous one?
        self.records.append(record)
        
        # sorts by the 1st element in the set
        self.pending_events.sort(key=lambda tup: tup[0])
        
        # wake up wait_for_next_event method
        self.__new_record_cached.signal()
    
    def flush_all(self):
        del self.records[:]
        del self.pending_events[:]
    
    # Inspired by RequestInstance class
    def wait_for_next_event(self):
        while True:
            
            if not self.pending_events: # if it's empty...
                yield waitevent, self, (self.__new_record_cached,)
            else:
                next_event = self.pending_events[0]
                
                if self.sim.now() < next_event[Cache.WHEN_FIELD]:
                    twait = next_event[Cache.WHEN_FIELD]-self.sim.now()
                    self.timer = Timer(waitUntil=twait, sim=self.sim)
                    self.timer.event.name = "sleep_until_next_event"
                    self.sim.activate(self.timer, self.timer.wait())
                    yield waitevent, self, (self.timer.event, self.__new_record_cached,)
                else:
                    del self.pending_events[0] # action will be taken
                    
                    if next_event[Cache.ACTION_FIELD] == Cache.EVENT_FLUSH:
                        # delete old record
                        self.records.remove( next_event[Cache.RECORD_FIELD] )
                        
                    elif next_event[Cache.ACTION_FIELD] == Cache.EVENT_RENEW:
                        if self.record_observer is not None:
                            self.record_observer.renew_record( next_event[Cache.RECORD_FIELD] )
class Responder(Process):
    
    ANSWER_AT_FIELD = 0
    QUERY_FIELD = 1
    
    def __init__(self, sim, sender=None):
        super(Responder, self).__init__(sim=sim)
        self._random = Random()
        self.__new_query_queued = SimEvent(name="new_query_queued", sim=sim)
        self.local_records = {} # key: record, value: last time advertised
        self.queued_queries = [] # tuple: ( when to answer, query )
        self.sender = sender
    
    def record_changes(self, record):
        for old_record in self.local_records.iterkeys():
            if record == old_record:
                return record.have_data_changed(old_record)
        return False # if that record didn't exist before
    
    def write_record(self, record):
        if record in self.local_records:
            if self.record_changes(record):
                # "Whenever a host has a resource record with new data"                
                # record is equal to a key (__eq__()==True), but has outdated data
                del self.local_records[record] # remove old key with outdated data
                # set new key
                self.local_records[record] = -1 # -1 => next time should be sent using multicast (because is an announcement)
                self.announce(record)
        else:
            # "Whenever a host has a resource record with new data"
            self.local_records[record] = -1
            self.announce(record)
    
    def something_happened(self):
        # Whenever it might potentially be new data (e.g. after rebooting, waking from
        # sleep, connecting to a new network link, changing IP address, etc.)
        for record in self.local_records.iterkeys():
            self.announce(record)
    
    # 10.2 Announcements to Flush Outdated Cache Entries
    def announce(self, announced_record):
        # TODO optimize to announce more than one? is that possible according to the standard?
        
        # Generating a fake query which will never be sent
        sq = SubQuery(announced_record.name, announced_record.type)
        # They may know my other records, I'm just announcing one
        known_answers = [record for record in self.local_records if record!=announced_record]
        q = Query( queries = [sq,], known_answers = known_answers )
        
        # a little trick here, we queue a false query which will result in a response
        # containing the record we want to announce
        self.queue_query(q)
    
    def queue_query(self, query):
        # TODO optimization:
        # if the query is already planned, don't answer twice in such a short period of time
        # if an answer for the same query was answered in the last 1000 ms, wait for the response
        
        if query.response_is_unique():
            # if the response is unique, answer within 10 ms
            when = self.sim.now() + self._random.random() * 10
            self.queued_queries.append( (when, query) )
        else:
            # delay between 20 and 120 ms
            when = self.sim.now() + 20 + self._random.random() * 100
            self.queued_queries.append( (when, query) )
        
        # sorts by the 1st element in the set
        self.queued_queries.sort(key=lambda tup: tup[0])
        
        # wake up wait_for_next_event method
        self.__new_query_queued.signal()
    
    def answer(self):
        while True:
            if not self.queued_queries: # if it's empty...
                yield waitevent, self, (self.__new_query_queued,)
            else:
                next_query = self.queued_queries[0]
                
                if self.sim.now() < next_query[Responder.ANSWER_AT_FIELD]:
                    twait = next_query[Responder.ANSWER_AT_FIELD] - self.sim.now()
                    self.timer = Timer(waitUntil=twait, sim=self.sim)
                    self.timer.event.name = "sleep_until_next_query"
                    self.sim.activate(self.timer, self.timer.wait())
                    yield waitevent, self, (self.timer.event, self.__new_query_queued,)
                else:
                    del self.queued_queries[0] # query will be processed
                    self.process_query( next_query[Responder.QUERY_FIELD] )
    
    def process_query(self, query):
        answers = self._get_possible_answers(query)
        self._suppress_known_answers(query, answers)
        if len(answers)>0: # avoid sending empty UDP messages!
            self._send_using_proper_method(query, answers)
    
    def _get_possible_answers(self, query):
        answers = []
        for subquery in query.queries:
            for record in self.local_records.iterkeys():
                if subquery.record_type == "PTR": # special queries in DNS-SD!
                    
                    if subquery.name == "_services._dns-sd._udp.local":
                        answers.append( deepcopy(record) ) # all of the records
                    elif record.name.endswith(subquery.name):
                        answers.append( deepcopy(record) ) # all of the records
                
                elif subquery.name == record.name and subquery.record_type == record.type:
                    answers.append( deepcopy(record) )
        return answers
    
    def _suppress_known_answers(self, query, answers):
        # Section 7.1.  Known-Answer Suppression
        for known_answer in query.known_answers:
            for record in answers:
                if known_answer.name == record.name and known_answer.type == record.type:
                    answers.remove(record)
    
    def _send_using_proper_method(self, query, answers):
        unicast = query.question_type is "QU" # unicast type
        
        # See 5.4 Questions Requesting Unicast Responses
        if unicast:
            # event if it was marked as unicast, can be sent as multicast
            for record in answers:
                thresold_time = record.ttl * 1000 * 0.25 # ttl is measured in seconds and simulation time in ms!
                last_time_sent = self.local_records[record]
                now = self.sim.now()
                
                if last_time_sent==-1: # never sent before
                    unicast = False
                    self.local_records[record] = now
                elif ( now - last_time_sent ) > thresold_time:
                    unicast = False # not recently advertised, send using multicast
                    self.local_records[record] = now
        
        
        if unicast:
            self.sender.send_unicast( query.to_node, DNSPacket(ttype=DNSPacket.TYPE_RESPONSE, data=answers) )
        else:
            self.sender.send_multicast( DNSPacket(ttype=DNSPacket.TYPE_RESPONSE, data=answers) )
class RequestInstance(Process): # TODO rename to something more meaningful such as RequestSender
    """ This class performs an HTTP request in SimPy """
    
    ReqIdGenerator = 0
    
    def __init__(self, actionNode, destinationNodes, url, data=None, waitUntil=10000.0, name="request", sim=None):
        super(RequestInstance, self).__init__(name=name, sim=sim)
        self.name += " (from=%s, url=%s)"%(actionNode.name, url)
        self.__actionNode = weakref.proxy(actionNode) #weakref.ref(actionNode)
        self.__destinationNodes = weakref.WeakSet(destinationNodes) # tuple with all the nodes to be requested
        self.url = url # accessible
        self.__data = data
        
        self.requestInit = {} # requestInit[reqId1] = now(), requestInit[reqId2] = now()
        self.responses = [] # accessible
        self.__maxWaitingTime = waitUntil
        self.nodeNamesByReqId = {} # used in the gossiping mechanism with the gossiping requests
        
        self.__newResponseReceived = SimEvent(name="request_response_for_%s"%(self.name), sim=sim)
        self.__observers = weakref.WeakSet()
    
        
    def startup(self):
        t_init = self.sim.now()
        
        for node in self.__destinationNodes:
            # already removed from the list prior to calling to this method, but just in case...
            if node is not self.__actionNode:
                reqId = RequestInstance.ReqIdGenerator
                RequestInstance.ReqIdGenerator += 1
                
                request = HttpRequest(reqId, self.url, data=self.__data)
                self.nodeNamesByReqId[reqId] = node.name
                
                self.requestInit[reqId] = self.sim.now()
                node.queueRequest(self, request)
                #if self.__data!=None:
                #    G.executionData.requests['data-exchanged'] += len(self.__data)
            else:
                raise Exception("A request to the same node is impossible! ")
        
        self.timer = Timer(waitUntil=G.timeout_after, sim=self.sim)
        self.timer.event.name = "request_timeout_for_%s"%(self.name)
        self.sim.activate(self.timer, self.timer.wait())#, self.__maxWaitingTime)
        while not self.allReceived() and not self.timer.ended:
            yield waitevent, self, (self.timer.event, self.__newResponseReceived,)
        
        
        if not self.allReceived(): # timeout reached
            #print "Response not received!"
            response_time = self.sim.now() - t_init
            
            for node_name in self.get_unanswered_nodes():
                G.traceRequest(t_init,
                           self.__actionNode.name,
                           node_name,
                           self.url,
                           408, # TIMEOUT. See http://www.restlet.org/documentation/2.0/jse/api/org/restlet/data/Status.html#CLIENT_ERROR_REQUEST_TIMEOUT
                           response_time )
            
            # self.__actionNode.addClientRequestActivityObservation(now()-init, now())
            
            # this information can be extracted from the traces
            # G.executionData.requests['failure'].append(self)
            
        for o in self.__observers:
            o.notifyRequestFinished(self)
            
    def get_unanswered_nodes(self):
        # not yet deleted requestInit keys, are the ids without a response
        return [self.get_destination_node_name(reqId) for reqId in self.requestInit.keys()]
    
    def getWaitingFor(self):
        return len(self.requestInit)
    
    def allReceived(self):
        return self.getWaitingFor()==0
    
    def addResponse(self, response): # TODO associate with a node
        #if response.getstatus()==404:
        #    dest_node_name = self.get_destination_node_name(response.getid())
        #    print dest_node_name
        
        # timeouts have been already taken into account in the 'timeout' counter
        if not self.timer.ended:
            t_init = self.requestInit[response.getid()]
            response_time = self.sim.now() - t_init
            G.traceRequest(t_init,
                           self.__actionNode.name,
                           self.get_destination_node_name(response.getid()),
                           response.geturl(),
                           response.getstatus(),
                           response_time )
            
            #G.executionData.response_time_monitor.observe( now() - t_init ) # request time
            del self.requestInit[response.getid()]
            
            self.responses.append(response) #dest_node_name
            
            #G.executionData.requests['data-exchanged'] += len(response.get_data())
            
            #fileHandle = open ( 'test.txt', 'a' )
            #fileHandle.write ( response.get_data() )
            #fileHandle.close()
            
            self.__newResponseReceived.signal()
            
    def get_destination_node_name(self, responseId):
        return self.nodeNamesByReqId[responseId]
    
    def toString(self):
        for resp in self.responses:
            print resp.getmsg()
    
    def getActionNode(self):
        return self.__actionNode
    
    def addObserver(self, observer):
        self.__observers.add(observer)
示例#19
0
class StorageNode(IDable, Thread, RTI):
    """Base storage node."""
    def __init__(self, cnode, ID, configs):
        IDable.__init__(self, '%s/sn%s'%(cnode.ID.split('/')[0], ID))
        Thread.__init__(self)
        RTI.__init__(self, self.ID)
        self.logger = logging.getLogger(self.__class__.__name__)
        self.system = cnode.system
        self.configs = configs
        self.cnode = cnode
        self.maxNumTxns = configs.get('max.num.txns.per.storage.node', 1024)
        self.pool = Resource(self.maxNumTxns, name='pool', unitName='thread')
        self.groups = {}    #{gid : group}
        self.newTxns = []
        self.txnsRunning = set([])
        self.shouldClose = False
        self.monitor = Profiler.getMonitor(self.ID)
        self.M_POOL_WAIT_PREFIX = '%s.pool.wait' %self.ID
        self.M_TXN_RUN_PREFIX = '%s.txn.run' %self.ID
        self.M_NUM_TXNS_RUN_KEY = '%s.num.txns'%self.ID
        self.runningThreads = set([])
        self.closeEvent = SimEvent()
        self.newTxnEvent = SimEvent()

    @property
    def load(self):
        return len(self.txnsRunning) + len(self.newTxns)

    def close(self):
        self.logger.info('Closing %s at %s'%(self, now()))
        self.shouldClose = True
        self.closeEvent.signal()

    def onTxnArrive(self, txn):
        self.newTxns.append(txn)
        self.newTxnEvent.signal()

    def onTxnsArrive(self, txns):
        self.newTxns.extend(txns)
        self.newTxnEvent.signal()

    def newTxnRunner(self, txn):
        class DefaultTxnRunner(Thread):
            def __init__(self, snode, txn):
                Thread.__init__(self)
                self.snode = snode
                self.txn = txn
                self.logger = logging.getLogger(self.__class__.__name__)

            def run(self):
                self.logger.debug('Running transaction %s at %s'
                                  %(txn.ID, now()))
                yield hold, self, RandInterval.get('expo', 100).next()
        return DefaultTxnRunner(self, txn)

    class TxnStarter(Thread):
        def __init__(self, snode, txn):
            Thread.__init__(self)
            self.snode = snode
            self.txn = txn

        def run(self):
            #add self and txn to snode
            self.snode.runningThreads.add(self)
            #wait for pool thread resource if necessary
            #self.snode.logger.debug(
            #    '%s start txn=%s, running=%s, outstanding=%s'
            #    %(self.snode, self.txn.ID,
            #      '(%s)'%(','.join([t.ID for t in self.snode.txnsRunning])),
            #      '(%s)'%(','.join([t.ID for t in self.snode.newTxns]))
            #     ))
            self.snode.monitor.start(
                '%s.%s'%(self.snode.M_POOL_WAIT_PREFIX, self.txn.ID))
            yield request, self, self.snode.pool
            self.snode.monitor.stop(
                '%s.%s'%(self.snode.M_POOL_WAIT_PREFIX, self.txn.ID))
            #start runner and wait for it to finish
            thread = self.snode.newTxnRunner(self.txn)
            assert self.txn not in self.snode.txnsRunning, \
                    '%s already started txn %s'%(self.snode, self.txn)
            self.snode.txnsRunning.add(self.txn)
            self.snode.monitor.observe(self.snode.M_NUM_TXNS_RUN_KEY,
                                       len(self.snode.txnsRunning))
            self.snode.monitor.start(
                '%s.%s'%(self.snode.M_TXN_RUN_PREFIX, self.txn.ID))
            thread.start()
            yield waitevent, self, thread.finish
            self.snode.monitor.stop(
                '%s.%s'%(self.snode.M_TXN_RUN_PREFIX, self.txn.ID))
            yield release, self, self.snode.pool
            #clean up
            self.snode.txnsRunning.remove(self.txn)
            self.snode.runningThreads.remove(self)
            self.snode.cnode.onTxnDepart(self.txn)

    def run(self):
        #the big while loop
        while True:
            yield waitevent, self, self.newTxnEvent
            while len(self.newTxns) > 0:
                #pop from new txn to running txn
                txn = self.newTxns.pop(0)
                #start
                thread = StorageNode.TxnStarter(self, txn)
                thread.start()
示例#20
0
文件: dsr.py 项目: reidlindsay/wins
class DSR(Routing):
    """Dynamic Source Routing protocol (RFC 4728).

    This implementation uses link-level acknowledgements to do route
    maintenance (see `MAC.drpdata` and `MAC.ackdata`).

    :ivar rreqtable: `RouteRequestTable` used to manage route requests.
    :ivar rreqrate: Rate annotation applied to all Route Requests.
    :ivar datarate: Rate annotation applied to all unicast messages.
    :ivar maintbuffer: Dictionary containing buffer of packets being maintained
                       by DSR, indexed by next hop addresses.

    :cvar mac: Property to access pointer to `MAC`.
    :cvar rrt: Alias for `rreqtable`.
    :cvar MaxMaintRexmt: Maximum number of retransmission for route maintenance.
    """
    name = "DSR"
    tracename = "DSR"
    MaxMaintRexmt = 0
    DiscoveryHopLimit = 255
    MaxTTL = DiscoveryHopLimit
    BroadcastJitter = 10e-3
    MaintenanceTimeout = 10.0
    RexmtBufferSize = 50
    MaxGratuitousRexmt = 6
    def __init__(self, *args, **kwargs):
        """Constructor."""
        # set up parameters
        self.__mac = None
        self.rreqrate = None
        self.datarate = None
        self.maintbuffer = None
        # additional parameters for signalling
        self.sndrreq = SimEvent()
        self.drprreq = SimEvent()
        self.finrreq = SimEvent()
        self.sndfail = SimEvent()
        # call base constructor
        Routing.__init__(self, *args, **kwargs)

    mac = property(fget=lambda self: self.get_mac(), \
                   fset=lambda self, m: self.set_mac(m) )
    rrt = property(fget=lambda self: self.rreqtable)
    promiscuous = property(fget=lambda self: self.get_promiscuous() )

    def configure(self, mac=None, rreqrate=None, datarate=None, **kwargs):
        """Configure pointers and parameters.

        :param mac: `MAC` module corresponding to this network protocol.
        :param rreqrate: Rate index used for flooding RREQ messages.
        :param datarate: Rate index used for sending other DSR messages.
        :param kwargs: Additional keywords passed to `Routing.configure()`.

        If `mac` is not provided, this module will attempt to automatically
        determine the appropriate `MAC`.

        If `rreqrate` and/or `datarate` are not specified, `DSR` will not
        attempt to set the rate annotation of the outgoing packet. It is
        important that the `rreqrate` be higher than `datarate` to ensure stable
        links for DSR.
        """
        Routing.configure(self, **kwargs)
        self.mac = mac
        self.rreqrate = rreqrate
        self.datarate = datarate
        self.maintbuffer = {}
        self.table = self.newchild('routingtable', RouteCache, \
                                   name=self.name+".rcache", \
                                   tracename=self.tracename+".RCACHE")
        rreqtable  = self.newchild('rreqtable', RouteRequestTable, \
                                   name=self.name+".rreqtable", \
                                   tracename=self.tracename+".RREQTBL")

    ##############################
    # TX STATES
    ##############################
    def IPFORWARD(self, fsm, p):
        """IPFORWARD state; start route maintenance for IP+DSR if needed.

        :param p: IP packet to send.

        :note: This state assumes a valid IP+DSR packet was went to it.
        """
        # get IP parameters
        ip, dsr = p[IP], p[DSRPacket]
        src, dst = ip.src, ip.dst
        # start route maintenance or deliver
        self.checkdsropt(ip, exception=True, incoming=False)
        # send broadcast immediately
        if (dst==self.broadcast):
            yield fsm.goto(self.IPDELIVER, ip, dst)
        # otherwise ...
        srcroute = getdsr_srcroute(ip)
        if srcroute:
            path = srcroute.addresses
            segsleft, naddr = srcroute.segsleft, len(path)
            nexthop = path[-segsleft]
        else:
            nexthop = dst
        # cache route to nexthop and start route maintenance for ip
        self.addroute(nexthop)
        self.maintain(ip, nexthop)      # put in maintbuffer

    def IPROUTING(self, fsm, p):
        """IPROUTING state; start route discovery for `p`."""
        # error messages
        dropbuffer = "sendbuffer overflow"
        # get IP/DSR/Option parameters
        ip, dsr = p[IP], p[DSRPacket]
        src, dst = ip.src, ip.dst
        # has route -> resend
        if self.hasroute(dst):
            yield fsm.goto(self.IPSEND, ip, src, dst)
        # otherwise -> start route discovery
        rre = self.rrt.getentry(dst)
        drop = rre.buffer(p)
        kwargs = {'src':src, 'dst':dst}
        kwargs['nbuffered'] = len(rre.sendbuffer)
        kwargs['timeleft']  = time2msec(rre.timeleft())
        self.debug("SENDBUFF", p, **kwargs)
        for d in drop:
            self.log_drop(d, drop=dropbuffer)
        # send RREQ
        yield fsm.goto(self.TXRREQ, dst)

    ##############################
    # TX DSR OPTIONS
    ##############################
    def TXRREQ(self, fsm, target, options=[], rexmt=0):
        """TXRREQ state; create and send route request.

        :param target: Target for route discovery.
        :param options: Additional DSR options [default=None].

        :note: This state is persistent. It will keep trying to send until
        """
        # error messages
        rreqerror = "[DSR]: Error getting RREQ Table entry!"
        droprexmt = "max rexmt exceeded"
        drophasrt = "route already exists"
        # pause for jitter
        jitter = random.uniform(0,1)*self.BroadcastJitter
        yield hold, fsm, jitter
        # check route and rexmt count
        if self.hasroute(target):
            self.log("RREQSTOP", target=target, rexmt=rexmt, drop=drophasrt)
            rre = self.rrt.getentry(target)
            while rre.sendbuffer:
                p = rre.sendbuffer.pop(0)
                f = FSM.launch(self.IPRECOVER, p, self.address, target)
            self.rrt.delentry(target)
            # signal that RREQ has finished
            self.finrreq.signal(target)
            yield fsm.stop()    # HALT and stop sending RREQ
        if (rexmt>self.rrt.MaxRequestRexmt):
            self.log("RREQDROP", target=target, rexmt=rexmt, drop=droprexmt)
            rre = self.rrt.getentry(target)
            while rre.sendbuffer:
                p = rre.sendbuffer.pop(0)
                self.log_drop(p, drop=droprexmt)
            self.rrt.delentry(target)
            # signal that RREQ has been abandoned
            self.drprreq.signal(target)
            yield fsm.stop()    # HALT and stop sending RREQ
        # get RREQ parameters
        sendrreq = self.rrt.sendrreq(target)
        rre = self.rrt.getentry(target)
        tleft = rre.timeleft()
        # get parameters for logging
        kwargs = {'rexmt':rexmt, 'nbuffered': len(rre.sendbuffer)}
        kwargs['options']  = [o.tracename for o in options]
        kwargs['jitter'] = time2msec(jitter)
        kwargs['timeleft'] = time2msec(tleft)
        # cannot send RREQ? -> RREQ is busy, drop attempt
        if not sendrreq:
            self.debug("RREQBUSY", target=target, **kwargs)
            yield fsm.stop()    # HALT and allow other RREQ to finish
        # otherwise -> send RREQ
        ID, ttl = rre.ID, rre.ttl
        # create DSR+RREQ+options
        nextheader = self.getproto(None)
        dsr = DSRPacket(nextheader=nextheader)
        rreq = DSROPT_RREQ(identification=ID, target=target)
        dsr.options = [rreq] + [o for o in options]
        # create IP+DSR
        proto = self.getproto(dsr)
        src, dst = self.address, self.broadcast
        ip = IP(src=src, dst=dst, proto=proto, ttl=ttl)
        ip.add_payload(dsr)
        # send RREQ -> wait for timeout, then rexmt
        self.debug("TXRREQ", ip, target=target, **kwargs)
        f = FSM.launch(self.IPDELIVER, ip, dst)
        # signal that RREQ has been sent to target
        self.sndrreq.signal(target)
        # wait for send RREQ to timeout before trying again
        yield hold, fsm, tleft
        yield fsm.goto(self.TXRREQ, target, options, rexmt+1)

    def TXRREP(self, fsm, src, dst, rreq, addresses):
        """TXRREP state; create and send route reply to `dst`."""
        self.debug("TXRREP", src=src, dst=dst, addresses=addresses)
        # cache reverse route from rreq?
        opt = getdsr_rreq(rreq)
        if opt and DSR_USE_REVERSE_ROUTES:
            rpath = [a for a in opt.addresses]
            rpath.reverse()
            if rpath: self.addroute(dst, rpath)     # more than one hop away
            else:     self.addroute(dst)            # one hop neighbor
        # create RREP option
        rrep = DSROPT_RREP()
        rrep.addresses = [a for a in addresses]
        # no route to dst -> send RREQ+RREP
        if not self.hasroute(dst):
            yield fsm.goto(self.TXRREQ, dst, options=[rrep])
        # otherwise -> ...
        if opt and DSR_USE_REVERSE_ROUTES:
            path = [a for a in rpath]
        else:
            path = self.srcroute(dst)
        ttl = len(path)+1
        # create DSR
        nextheader = self.getproto(None)
        dsr = DSRPacket(nextheader=nextheader)
        dsr.options = [rrep]
        # create IP
        proto = self.getproto(dsr)
        ip = IP(src=src, dst=dst, proto=proto, ttl=ttl)
        ip.add_payload(dsr)
        # add SRCROUTE?
        pkt = self.updateroute(ip, path)
        #assert (pkt is not None)
        assert self.safe((pkt is not None))
        yield fsm.goto(self.IPFORWARD, pkt)

    def TXRERR(self, fsm, src, dst, errortype, type_specific):
        """TXRERR state; create and send a new route error message."""
        self.debug("TXRERR", src=src, dst=dst, unreachable=type_specific)
        # erro messages
        errornotme = "[DSR]: Cannot send RERR not from me!"
        assert self.safe(src==self.address), errornotme
        # create RERR option
        rerr = DSROPT_RERR()
        rerr.err_src, rerr.err_dst = src, dst
        rerr.errortype, rerr.type_specific = errortype, type_specific
        # create DSR + RERR Option
        nextheader = self.getproto(None)
        dsr = DSRPacket(nextheader=nextheader)
        dsr.options = [rerr]
        # create IP + DSR
        proto = self.getproto(dsr)
        ip = IP(src=src, dst=dst, proto=proto)
        ip.add_payload(dsr)
        # no route -> start route discovery?
        if not self.hasroute(dst):
            yield fsm.goto(self.IPSEND, ip, src, dst)
        # otherwise -> ...
        path = self.srcroute(dst)
        ttl = len(path)+1
        # add SRCROUTE?
        ip.ttl = ttl
        pkt = self.updateroute(ip, path)
        #assert (pkt is not None)
        assert self.safe((pkt is not None))
        yield fsm.goto(self.IPFORWARD, pkt)

    def MAINT(self, fsm, nexthop, rexmt=0, send=True):
        """MAINT state; perform route maintenace for nexthop.

        :note: Assume maintenance buffer contains valid IP+DSR packets.
        """
        yield hold, fsm, 0  # yield to other threads
        #assert (nexthop in self.maintbuffer)
        assert self.safe((nexthop in self.maintbuffer))
        # error messages
        droproute = "broken route"
        # get maintenance parameters
        buff = self.maintbuffer[nexthop]['buffer']
        if (len(buff)<1):
            del self.maintbuffer[nexthop]
            yield fsm.stop()        # HALT and stop maintenance
        # get head of buffer
        p = buff[0]
        ip, dsr = p[IP], p[DSRPacket]
        addr, src, dst = self.address, ip.src, ip.dst
        # check if path is still valid
        opt, path = getdsr_srcroute(ip), None   # one hop away?
        if opt:
            segsleft = opt.segsleft             # more than one hop?
            path = [a for a in opt.addresses[-segsleft:]]
        pathok = self.hasroute(dst, path)
        # if path is broken -> drop or resend
        if not pathok:
            p = buff.pop(0)
            if (addr==src):
                f = FSM.launch(self.IPRECOVER, p, src, dst)
            else:
                self.log_drop(p, drop=droproute)
            yield fsm.goto(self.MAINT, nexthop)     # continue with next packet
        # otherwise -> send head of buffer?
        if send:
            f = FSM.launch(self.IPDELIVER, ip, nexthop)
        # wait for link-level feedback (ACK/DROP)
        mac, feedback = self.mac, None
        yield waitevent, fsm, (mac.ackdata, mac.drpdata)
        if (mac.ackdata in fsm.eventsFired):
            p = mac.ackdata.signalparam
            if self.issame(ip, p): feedback = "ackdata"
        elif (mac.drpdata in fsm.eventsFired):
            p = mac.drpdata.signalparam
            if self.issame(ip, p): feedback = "drpdata"
        # process feedback
        if (feedback=="ackdata"):
            p = buff.pop(0)
            yield fsm.goto(self.MAINT, nexthop)     # continue with next packet
        elif (feedback=="drpdata"):
            rexmt += 1
            norexmt = (rexmt>self.MaxMaintRexmt)
            # rexmt not exceeded -> try again
            if not norexmt:
                self.debug("REXMT%d"%(rexmt), ip)
                yield fsm.goto(self.MAINT, nexthop, rexmt)
            # otherwise -> broken link!!
            etype = DSR_ERROR_NODE_UNREACHABLE
            esrc, unreachable = self.address, nexthop
            self.debug("DROPDATA", src=esrc, unreachable=unreachable)
            self.removelink(esrc, unreachable)
            # signal broken link
            self.sndfail.signal(esrc)
            # clear out maintenance buffer
            errdst = []
            while buff:
                p = buff.pop(0)
                ip, dsr = p[IP], p[DSRPacket]
                src, dst = ip.src, ip.dst
                # send RERR (for non-RREP messages)
                rrep = getdsr_rrep(ip)
                sendrerr = (not rrep) and (src not in errdst)
                # recover packet or send RERR
                if (addr==src):
                    f = FSM.launch(self.IPRECOVER, p, src, dst)
                elif sendrerr:
                    errdst.append(src)
            # send RERR to sources
            for edst in errdst:
                f = FSM.launch(self.TXRERR, esrc, edst, etype, unreachable)
            # continue to allow graceful shutdown
            yield fsm.goto(self.MAINT, nexthop)
        else:
            # feedback not for me -> continue waiting
            yield fsm.goto(self.MAINT, nexthop, rexmt, send=False)

    def IPRECOVER(self, fsm, p, src, dst):
        """IPRECOVER state; attempt to resend packet `p`.

        :note: This method assumes that `p` is a valid IP+DSR packet.
        """
        self.debug("IPRECOVER", p, src=src, dst=dst)
        # error messages
        errornotme = "[DSR]: Cannot recover message not from me!"
        errorbcast = "[DSR]: Cannot recover broadcast messages!"
        droprcount = "recover attempts exceeded"
        assert self.safe(src==self.address), errornotme
        assert self.safe(dst!=self.broadcast), errorbcast
        # drop RREP or RERR packets
        rrep = getdsr_rrep(p)
        rerr = getdsr_rerr(p)
        drop = ""
        if rrep: drop = "do not recover RREP"
        if rerr: drop = "do not recover RERR"
        if drop:
            self.log_drop(p, src=src, dst=dst, drop=drop)
            yield fsm.stop()            # HALT and discard packet
        # keep secret annotation for keeping track of recovery attempts
        rcount = 0
        if p.hasanno('iprecover'): rcount = p.getanno('iprecover') + 1
        p.setanno('iprecover', rcount, priv=True)
        # max retries exceeded?
        if (rcount>1): # FIXME: use (>self.MaxMaintRexmt) instead?
            self.log_drop(p, src=src, dst=dst, rcount=rcount, drop=droprcount)
            yield fsm.stop()
        # otherwise -> remove stale route, update, and resend
        ip, dsr = p[IP], p[DSRPacket]
        dsr.options = [o for o in dsr.options if (not getdsr_srcroute(o))]
        assert (dsr.nextheader!=const.IP_PROTO_NONE)
        yield fsm.goto(self.IPSEND, ip, src, dst)

    ##############################
    # RX STATES
    ##############################
    def IPCLASSIFY(self, fsm, p, cache=True):
        """IPCLASSIFY state; overloaded to implement DSR classification.

        :param p: Received IP packet.
        :param cache: Cache routing information from `p`.

        :note: This state assumes that packet `p` passed all validity checks in
               `IPRECV` (i.e. `checkiprecv()`).
        """
        errmsg = "[DSR]: IPCLASSIFY does not support promiscuous mode."
        #assert (not self.promiscuous), errmsg
        assert self.safe((not self.promiscuous)), errmsg
        # error messages
        errordsropt  = "[DSR]: Unprocessed DSR options still remain!"
        dropnotforme = "not for me"
        # get IP+DSR packets
        ip = p[IP]
        dsr = ip[DSRPacket]
        # get IP/DSR parameters
        addr, bcast = self.address, self.broadcast
        src, dst = ip.src, ip.dst
        # get DSR options
        rreq, rrep     = getdsr_rreq(ip), getdsr_rrep(ip)
        rerr, srcroute = getdsr_rerr(ip), getdsr_srcroute(ip)
        isforme, isbcast = (dst==addr), (dst==bcast)
        unicast, promiscuous = (not isbcast), self.promiscuous
        # cache routing info from packet
        if cache: self.cacheroute(ip)
        # classify DSR options
        if srcroute and unicast:
            yield fsm.goto(self.RXSRCROUTE, ip, dsr, srcroute)
        elif rreq and isbcast:
            yield fsm.goto(self.RXRREQ,     ip, dsr, rreq)
        elif rrep and isforme:
            yield fsm.goto(self.RXRREP,     ip, dsr, rrep)
        elif rerr and isforme:
            yield fsm.goto(self.RXRERR,     ip, dsr, rerr)
        elif dsr.options:
            #assert (self.promiscuous or (not isforme)), errordsropt
            assert self.safe((self.promiscuous or (not isforme))), errordsropt
        # classify payload of packet
        if (dst==addr) or (dst==bcast):
            # packet for me -> detach payload
            payload = dsr.payload
            dsr.remove_payload()
            self.log_recv(payload, src=src, dst=dst)
            if (dsr.nextheader==const.IP_PROTO_IP):
                yield fsm.goto(self.IPRECV, payload)    # IPENCAP -> reclassify
            elif (dsr.nextheader!=const.IP_PROTO_NONE):
                pkt = self.set_recvanno(payload, src, dst)
                yield self.TXU.send(fsm, [pkt])         # DATA -> send to upper layer
        else:
            # otherwise -> drop
            self.log_drop(ip, drop=dropnotforme)

    ##############################
    # RX DSR OPTIONS
    ##############################
    def RXSRCROUTE(self, fsm, ip, dsr, opt):
        """RXSRCROUTE state; process source route option.

        :note: Assumes `checkiprecv()` passed.
        """
        self.debug("RXSRCROUTE", ip)
        # get IP/DSR/Option parameters
        segsleft = opt.segsleft
        path = opt.addresses[-segsleft]
        # forward packet to next hop in source route
        if (segsleft>1):
            # intermediate hop -> update SRCROUTE and send
            ip.ttl = ip.ttl - 1
            opt.segsleft = segsleft - 1
            yield fsm.goto(self.IPFORWARD, ip)
        elif (segsleft==1):
            # last hop -> strip SRCROUTE and send
            ip.ttl = ip.ttl - 1
            dsr.options = [o for o in dsr.options if (not getdsr_srcroute(o))]
            yield fsm.goto(self.IPFORWARD, ip)

    def RXRREQ(self, fsm, ip, dsr, opt):
        """RXRREQ state; process route request option.

        :note: Assumes `checkiprecv()` passed.
        """
        self.debug("RXRREQ", ip)
        # error messages
        droprreq = "in source route or not new RREQ"
        # get IP/DSR/Option parameters
        addr, src, dst = self.address, ip.src, ip.dst
        ID, target = opt.identification, opt.target
        rreqonly = (dsr.nextheader==const.IP_PROTO_NONE)
        # check if target reached
        if (addr==target):
            # send RREP [and process remaining packet]
            path = [a for a in opt.addresses] + [target]
            f = FSM.launch(self.TXRREP, addr, src, opt, path)
            # strip RREQ and set dst=target
            dsr.options = [o for o in dsr.options if (not getdsr_rreq(o))]
            ip.dst = target
            # check for other options
            rerr, rrep = getdsr_rerr(dsr), getdsr_rrep(dsr)
            if rerr:
                yield fsm.goto(self.RXRERR, ip, dsr, rerr)
            elif rrep:
                yield fsm.goto(self.RXRREP, ip, dsr, rrep)
            # RREQ only -> HALT and discard packet
            if rreqonly: yield fsm.stop()
            # otherwise -> remove DSR options and reclassify
            dsr.options = []
            yield fsm.goto(self.IRECV, ip)
        # otherwise -> attempt to forward RREQ
        assert (addr!=target)
        inroute = (src==addr) or (addr in opt.addresses)
        newrreq = self.rrt.addcache(src, ID, target)
        # check if RREQ should be dropped
        if (inroute or (not newrreq)):
            self.log_drop(ip, drop=droprreq)
            yield fsm.stop()    # HALT and discard packet
        # update options and forward RREQ
        ip.ttl = ip.ttl - 1
        opt.addresses = [a for a in opt.addresses] + [addr]
        yield fsm.goto(self.IPFORWARD, ip)

    def RXRREP(self, fsm, ip, dsr, opt):
        """RXRREP state; handle route reply message.

        :note: Assumes `checkiprecv()` passed.
        """
        self.debug("RXRREP", ip)
        # get IP/DSR/Option parameters
        src, dst = ip.src, ip.dst
        addr, target = self.address, opt.addresses[-1]
        rreponly = (dsr.nextheader==const.IP_PROTO_NONE)
        # only keep RREP options and update route cache
        assert (dst==addr)
        dsr.options = [o for o in dsr.options if (getdsr_rrep(o))]
        self.cacheroute(ip)
        #assert (self.hasroute(target))
        assert self.safe((self.hasroute(target)))   # verify route to target
        # get entry from Route Request Table and forward packets
        rre = self.rrt.getentry(target)
        while rre.sendbuffer:
            p = rre.sendbuffer.pop(0)
            f = FSM.launch(self.IPRECOVER, p, addr, target)
        self.rrt.delentry(target)
        # more than RREP?
        if not rreponly:
            # remove DSR options and reclassify
            dsr.options = []
            yield fsm.goto(self.IRECV, ip)

    def RXRERR(self, fsm, ip, dsr, opt):
        """RXRERR state; handle route error message.

        :note: Assumes `checkiprecv()` passed.
        """
        self.debug("RXRRER", ip)
        # error messages
        droptype = "unsupported route error type"
        # get IP/DSR/Option parameters
        addr, src, dst = self.address, ip.src, ip.dst
        errsrc, errdst = opt.err_src, opt.err_dst
        errortype, unreachable = opt.errortype, opt.type_specific
        rerronly = (dsr.nextheader==const.IP_PROTO_NONE)
        #assert (addr==dst)
        assert self.safe((addr==dst))
        # process RERR packet (or drop unsupported errortype)
        if (errortype==DSR_ERROR_NODE_UNREACHABLE):
            self.removelink(errsrc, unreachable)
        else:
            self.log_drop(ip, drop=droptype)
        # more than RERR?
        if not rerronly:
            # remove DSR options and reclassify
            dsr.options = []
            yield fsm.goto(self.IRECV, ip)

    ##############################
    # HELPER METHODS
    ##############################
    def encap_data(self, p, src, dst, force=False, **kwargs):
        """Encapsulate data packet (if needed) for delivery to next hop.

        :param p: Packet to deliver.
        :param src: Source address.
        :param dst: Destination address.
        :param force: If true, force encapsulation into a new IP packet.
        :param kwargs: Additional keywords passed to IP constructor.
        :return: Encapsulated/modified packet for delivery.

        If `p` is already an IP packet and `force` is false, this method will
        update 'src', 'dst', and 'ttl' fields as needed. Otherwise, this method
        encapsulates `p` with an IP header.

        :note: By default this method assumes that `ptype` indicates IPv4.
               *Overload this method to handle other protocol types*.
        """
        # if no encapsulation needed -> update parameters
        isip = isinstance(p, Packet) and p.haslayer(IP)
        isdsr = isip and p.haslayer(DSRPacket)
        if isdsr and (not force):
            ip, dsr = p[IP], p[DSRPacket]
            dsr.nextheader = self.getproto(dsr.payload)
            if self.hasroute(ip.dst):
                p = self.updateroute(ip)
            # update IP parameters
            return Routing.encap_data(self, p, src, dst, force=False, **kwargs)
        # create DSR
        nextheader = self.getproto(p)
        dsr = DSRPacket(nextheader=nextheader)
        dsr.add_payload(p)
        # create IP+DSR
        proto = self.getproto(dsr)
        ip = IP(src=src, dst=dst, proto=proto, **kwargs)
        ip.add_payload(dsr)
        # update and return
        return self.encap_data(ip, src, dst, force=False, **kwargs)

    def maintain(self, p, nexthop):
        """Start or udpate route maintenace on IP+DSR packet.

        :note: This assumes a IP+DSR packet was passed to it.
        """
        # error messages
        errornexthop = "[DSR]: maintain() found invalid nexthop!"
        dropbuffer = "maintenance buffer overflow"
        dropttl = "TTL expired"
        # get IP/DSR/Option parameters
        ip, dsr = p[IP], p[DSRPacket]
        addr, src, dst, ttl = self.address, ip.src, ip.dst, ip.ttl
        # TTL expired?
        if (ttl<1):
            self.log_drop(p, drop=dropttl)
            return
        # start or continue route maintenance
        if nexthop in self.maintbuffer:
            # add packet to end of maintenance buffer
            buff = self.maintbuffer[nexthop]['buffer']
            if (len(buff)<self.RexmtBufferSize):
                buff.append(ip)
                self.debug("MAINTBUFF", ip)
            else:
                self.log_drop(ip, drop=dropbuffer)
        else:
            # add new buffer and launch thread to maintain it
            self.maintbuffer[nexthop] = {'buffer':[ip]}
            f = FSM.launch(self.MAINT, nexthop)

    def cacheroute(self, p):
        """Cache relevant route information from IP+DSR packet.

        :note: This method assumes that the packet has passed through
               `checkiprecv()`.
        """
        errmsg = "[DSR]: cacheroute() does not support promiscuous mode."
        #assert (not self.promiscuous), errmsg
        assert self.safe((not self.promiscuous)), errmsg
        # get ip/dsr packets and parameters
        ip = p[IP]
        dsr = p[DSRPacket]
        addr, bcast = self.address, self.broadcast
        src, dst, ttl = ip.src, ip.dst, ip.ttl
        # classify based on DSR option
        rreq, rrep     = getdsr_rreq(ip), getdsr_rrep(ip)
        rerr, srcroute = getdsr_rerr(ip), getdsr_srcroute(ip)
        # process source route option
        if srcroute:
            # cache remaining route to dst
            segsleft = srcroute.segsleft
            path = srcroute.addresses[-segsleft:]
            if (segsleft>1):     self.addroute(dst, path[1:])
            elif (segsleft==1): self.addroute(dst)
        # process route request option
        if rreq:
            # no routing info to cache, but use target as dst for other options
            dst = rreq.target
        # process route reply option
        if rrep:
            # check if on route or intended dst of RREP
            inroute = (addr in rrep.addresses[:-1])
            isforme = (dst==addr)
            # cache routing info
            idx = None
            if inroute:   idx = rrep.addresses.index(addr)+1
            elif isforme: idx = 0
            if (idx is not None):
                target = rrep.addresses[-1]
                rt = rrep.addresses[idx:-1]
                if rt: self.addroute(target, rt)    # more than one hop away
                else:  self.addroute(target)        # target is neighbor
        # process route error option
        if rerr:
            errsrc, unreachable = rerr.err_src, rerr.type_specific
            if (rerr.errortype==DSR_ERROR_NODE_UNREACHABLE):
                self.removelink(errsrc, unreachable)

    def checkiprecv(self, p, nextheader=True, **kwargs):
        """Overloaded to check for valid IP+DSR packet.

        :param p: Packet received in `IPRECV`.
        :param args: Additional arguments passed to base class method.
        :param kwargs: Additional keywords passed to base class method.
        :return: String containing any error condition, or nothing for no error.
        """
        # error messages
        dropnondsr = "non-IP+DSR packet in IPRECV"
        dropheader = "invalid IP+DSR nextheader"
        dropdsropt = "invalid DSR options!"
        # call base method
        drop = Routing.checkiprecv(self, p, **kwargs)
        if drop: return drop
        # valid IP+DSR?
        isdsr = p.haslayer(IP) and p.haslayer(DSRPacket)
        if not isdsr: return dropnondsr
        dsr = p[DSRPacket]
        # check nextheader
        if nextheader:
            payproto = self.getproto(dsr.payload)
            if (payproto!=dsr.nextheader): return dropheader
        # check DSR options
        if not self.checkdsropt(p, exception=False):
            return dropdsropt
        return ""   # no error

    def checkdsropt(self, p, exception=True, incoming=True):
        """Check DSR options of incoming packet for any violations.

        :param p: Packet to check.
        :param exception: If true, throws exception for violation.
        :param incoming: If true, treat `p` as a received packet.
        :return: Boolean flag; if True, DSR packet is okay.

        :note: Do not use this method to check options for outgoing packets
               without setting `incoming` to False.
        """
        if isinstance(p, Reference): p = p._deref
        isip = isinstance(p, Packet) and p.haslayer(IP)
        isdsr = isip and p.haslayer(DSRPacket)
        if not isdsr:
            if exception: raise RuntimeError, "[DSR]: non-IP+DSR packet!"
            return False
        # get IP+DSR packets
        ip  = p[IP]
        dsr = ip[DSRPacket]
        # get IP/DSR parameters
        addr, bcast = self.address, self.broadcast
        src, dst = ip.src, ip.dst
        # classify DSR options
        rreq, rrep     = getdsr_rreq(ip), getdsr_rrep(ip)
        rerr, srcroute = getdsr_rerr(ip), getdsr_srcroute(ip)
        isforme, isbcast = (dst==addr), (dst==bcast)
        unicast, promiscuous = (not isbcast), self.promiscuous
        # error check options
        dsrok = True
        if srcroute:
            path = srcroute.addresses
            segsleft, naddr = srcroute.segsleft, len(path)
            dsrok &= not rreq
            dsrok &= not isbcast
            dsrok &= (segsleft>0) and (segsleft<naddr+1)
            dsrok &= not (isforme and (not promiscuous))
            if dsrok and (not promiscuous):
                if incoming:
                    # incoming -> I am current hop?
                    currhop = path[-segsleft]
                    dsrok &= (currhop==addr)
                else:
                    # outgoing -> I am right before nexthop?
                    currhop = path[-segsleft]
                    if (currhop==path[0]):
                        dsrok &= segsleft==naddr
                        dsrok &= (addr==src)
                    else:
                        dsrok &= (segsleft<naddr)
                        if dsrok: dsrok &= (addr==path[-(segsleft+1)])
            #if exception: assert dsrok, "[DSR]: Invalid SRCROUTE option!"
            if exception: assert self.safe(dsrok), "[DSR]: Invalid SRCROUTE option!"
        if rreq:
            dsrok &= not srcroute
            dsrok &= not unicast
            #if exception: assert dsrok, "[DSR]: Invalid RREQ option!"
            if exception: assert self.safe(dsrok), "[DSR]: Invalid RREQ option!"
        if rrep:
            dsrok &= not (isbcast and (not rreq))
            dsrok &= not rerr
            dsrok &= (len(rrep.addresses)>0)
            #if exception: assert dsrok, "[DSR]: Invalid RREP option!"
            if exception: assert self.safe(dsrok), "[DSR]: Invalid RREP option!"
        if rerr:
            dsrok &= not (isbcast and (not rreq))
            dsrok &= not rrep
            dsrok &= (rerr.errortype==DSR_ERROR_NODE_UNREACHABLE)
            #if exception: assert dsrok, "[DSR]: Invalid RERR option!"
            if exception: assert self.safe(dsrok), "[DSR]: Invalid RERR option!"
        return dsrok

    def updateroute(self, p, path=None, chksum=True):
        """Update IP+DSR routing info using a particular route.

        :param p: IP+DSR packet.
        :param path: New route; if `None`, get it from route cache.
        :param chksum: If true, update checksum.
        :return: Packet with updated routing info (or `None` if no valid route).
        """
        ip, dsr = p[IP], p[DSRPacket]
        # get IP/DSR/Option parameters
        addr, src, dst = self.address, ip.src, ip.dst
        if path is None: path = self.srcroute(dst)
        # no path found?
        if path is None: return None
        # strip old SRCROUTE
        dsr.options = [o for o in dsr.options if (not getdsr_srcroute(o))]
        # apply new route
        ip.ttl = len(path) + 1
        if path:
            srcroute = DSROPT_SRCROUTE()
            srcroute.segsleft = len(path)
            srcroute.addresses = [a for a in path]
            dsr.options = [srcroute] + [o for o in dsr.options]
        # update chksum?
        if chksum: chksum = self.updatechksum(ip, overwrite=True)
        return ip

    def set_sendanno(self, *args, **kwargs):
        """Overloaded to set rate annotations in outgoing packets."""
        p = Routing.set_sendanno(self, *args, **kwargs)
        isip = isinstance(p, Packet) and p.haslayer(IP)
        isdsr = isip and p.haslayer(DSRPacket)
        if not isdsr: return p
        # remove stale annotations
        if p.hasanno('phy-fixed-rate'): p.delanno('phy-fixed-rate')
        # set rate annotation
        rreq = getdsr_rreq(p)
        rate = None
        if rreq: rate = self.rreqrate   # assume RREQ rate is higher
        else:    rate = self.datarate
        if (rate is not None):
            p.setanno('phy-rate', rate)
            # use fixed-rate anno for broadcast packets
            if rreq: p.setanno('phy-fixed-rate', rate)
        return p

    def getproto(self, p):
        """Overloaded to check for DSR packets."""
        if isinstance(p, Reference): p = p._deref
        # determine protocol type of packet
        if isinstance(p, DSRPacket): proto = const.IP_PROTO_DSR
        else:                        proto = Routing.getproto(self, p)
        return proto

    def issame(self, pa, pb):
        """See if packets have the same IP+DSR header information."""
        isip  = isinstance(pa,Packet) and pa.haslayer(IP)
        isdsr = isip and pa.haslayer(DSRPacket)
        if not isdsr: return False
        isip  = isinstance(pb,Packet) and pb.haslayer(IP)
        isdsr = isip and pb.haslayer(DSRPacket)
        if not isdsr: return False
        ipa, dsra = pa[IP], pa[DSRPacket]
        ipb, dsrb = pb[IP], pb[DSRPacket]
        # check IP/DSR fields
        same = True
        same &= (ipa.src==ipb.src) and (ipa.dst==ipb.dst)
        same &= (ipa.id==ipb.id) and (ipa.proto==ipb.proto)
        if (ipa.chksum is not None) and (ipb.chksum is not None):
            same &= (ipa.chksum==ipb.chksum)
        same &= (dsra.length==dsrb.length)
        same &= (dsra.nextheader==dsrb.nextheader)
        same &= (len(dsra.options)==len(dsrb.options))
        return same

    ##############################
    # ROUTE TABLE METHODS
    ##############################
    def srcroute(self, *args, **kwargs):
        """Get source route from route cache."""
        return self.table.srcroute(*args, **kwargs)

    def removelink(self, a, b):
        """Remove link from route cache."""
        return self.table.removelink(a, b, self.address)

    ##############################
    # PROPERTY METHODS
    ##############################
    def get_mac(self):
        """Get MAC corresponding to this module."""
        if isinstance(self.__mac, MAC): return self.__mac
        # otherwise try to get MAC from ARP
        mac, arp = None, self.TXD.target.parent
        if isinstance(arp, ARP):
            if self in arp.listen: mac, f = arp.listen[self]
        # final check on MAC
        if not isinstance(mac, MAC): mac = None
        return mac

    def set_mac(self, mac):
        """Set MAC corresponding to this module."""
        m = None
        if isinstance(mac, MAC): m = mac
        self.__mac = m

    def get_promiscuous(self):
        """Get `MAC.promiscuous` flag."""
        p = None
        if self.mac: p = self.mac.promiscuous
        return p

    ##############################
    # LOGGING METHODS
    ##############################
    def get_ip_anno(self, p):
        """Internal method to get relevant annotations for an IP+DSR packet."""
        kwargs = Routing.get_ip_anno(self, p)
        kwargs.update(self.get_dsr_anno(p) )
        kwargs.update(self.get_agt_anno(p) )
        return kwargs

    def get_agt_anno(self, p):
        """Internal method to get relevant annotations for a AGT packet."""
        kwargs = {}
        isagt = isinstance(p, Packet) and p.haslayer(AGT)
        if not isagt: return kwargs
        agt = p[AGT]
        kwargs['agt-root'] = agt.traceid
        return kwargs

    def get_dsr_anno(self, p):
        """Internal method to get relevant annotations for a DSR packet."""
        kwargs = {}
        isdsr = isinstance(p, Packet) and p.haslayer(DSRPacket)
        if not isdsr: return kwargs
        dsr = p[DSRPacket]
        kwargs['dsr.nextheader'] = dsr.nextheader
        # classify based on DSR option
        rreq, rrep     = getdsr_rreq(dsr), getdsr_rrep(dsr)
        rerr, srcroute = getdsr_rerr(dsr), getdsr_srcroute(dsr)
        # get parameters from options
        if rreq:
            kwargs['rreq.ID'] = rreq.identification
            kwargs['rreq.target'] = rreq.target
            kwargs['rreq.addresses'] = rreq.addresses
        if rrep:
            kwargs['rrep.addresses'] = rrep.addresses
        if srcroute:
            kwargs['srcroute.segsleft'] = srcroute.segsleft
            kwargs['srcroute.addresses'] = srcroute.addresses
        if rerr:
            kwargs['rerr.errortype'] = _dsr_error_types[rerr.errortype]
            kwargs['rerr.err_src'] = rerr.err_src
            kwargs['rerr.err_dst'] = rerr.err_dst
            kwargs['rerr.type_specific'] = rerr.type_specific
        # get other parameters
        if p.hasanno('phy-rate'):
            kwargs['phy-rate'] = p.getanno('phy-rate')
        return kwargs

    def debug(self, *args, **kwargs):
        """Log debug statements to trace."""
        if DSRDEBUG:
            self.log(*args, **kwargs)

    def safe(self, x):
        """Print trace to standard out before throwing exception."""
        if not x:
            self.trace.output()
        return x
示例#21
0
class StorageNode(IDable, Thread, RTI):
    """Base storage node."""
    def __init__(self, cnode, ID, configs):
        IDable.__init__(self, '%s/sn%s' % (cnode.ID.split('/')[0], ID))
        Thread.__init__(self)
        RTI.__init__(self, self.ID)
        self.logger = logging.getLogger(self.__class__.__name__)
        self.system = cnode.system
        self.configs = configs
        self.cnode = cnode
        self.maxNumTxns = configs.get('max.num.txns.per.storage.node', 1024)
        self.pool = Resource(self.maxNumTxns, name='pool', unitName='thread')
        self.groups = {}  #{gid : group}
        self.newTxns = []
        self.txnsRunning = set([])
        self.shouldClose = False
        self.monitor = Profiler.getMonitor(self.ID)
        self.M_POOL_WAIT_PREFIX = '%s.pool.wait' % self.ID
        self.M_TXN_RUN_PREFIX = '%s.txn.run' % self.ID
        self.M_NUM_TXNS_RUN_KEY = '%s.num.txns' % self.ID
        self.runningThreads = set([])
        self.closeEvent = SimEvent()
        self.newTxnEvent = SimEvent()

    @property
    def load(self):
        return len(self.txnsRunning) + len(self.newTxns)

    def close(self):
        self.logger.info('Closing %s at %s' % (self, now()))
        self.shouldClose = True
        self.closeEvent.signal()

    def onTxnArrive(self, txn):
        self.newTxns.append(txn)
        self.newTxnEvent.signal()

    def onTxnsArrive(self, txns):
        self.newTxns.extend(txns)
        self.newTxnEvent.signal()

    def newTxnRunner(self, txn):
        class DefaultTxnRunner(Thread):
            def __init__(self, snode, txn):
                Thread.__init__(self)
                self.snode = snode
                self.txn = txn
                self.logger = logging.getLogger(self.__class__.__name__)

            def run(self):
                self.logger.debug('Running transaction %s at %s' %
                                  (txn.ID, now()))
                yield hold, self, RandInterval.get('expo', 100).next()

        return DefaultTxnRunner(self, txn)

    class TxnStarter(Thread):
        def __init__(self, snode, txn):
            Thread.__init__(self)
            self.snode = snode
            self.txn = txn

        def run(self):
            #add self and txn to snode
            self.snode.runningThreads.add(self)
            #wait for pool thread resource if necessary
            #self.snode.logger.debug(
            #    '%s start txn=%s, running=%s, outstanding=%s'
            #    %(self.snode, self.txn.ID,
            #      '(%s)'%(','.join([t.ID for t in self.snode.txnsRunning])),
            #      '(%s)'%(','.join([t.ID for t in self.snode.newTxns]))
            #     ))
            self.snode.monitor.start(
                '%s.%s' % (self.snode.M_POOL_WAIT_PREFIX, self.txn.ID))
            yield request, self, self.snode.pool
            self.snode.monitor.stop(
                '%s.%s' % (self.snode.M_POOL_WAIT_PREFIX, self.txn.ID))
            #start runner and wait for it to finish
            thread = self.snode.newTxnRunner(self.txn)
            assert self.txn not in self.snode.txnsRunning, \
                    '%s already started txn %s'%(self.snode, self.txn)
            self.snode.txnsRunning.add(self.txn)
            self.snode.monitor.observe(self.snode.M_NUM_TXNS_RUN_KEY,
                                       len(self.snode.txnsRunning))
            self.snode.monitor.start(
                '%s.%s' % (self.snode.M_TXN_RUN_PREFIX, self.txn.ID))
            thread.start()
            yield waitevent, self, thread.finish
            self.snode.monitor.stop('%s.%s' %
                                    (self.snode.M_TXN_RUN_PREFIX, self.txn.ID))
            yield release, self, self.snode.pool
            #clean up
            self.snode.txnsRunning.remove(self.txn)
            self.snode.runningThreads.remove(self)
            self.snode.cnode.onTxnDepart(self.txn)

    def run(self):
        #the big while loop
        while True:
            yield waitevent, self, self.newTxnEvent
            while len(self.newTxns) > 0:
                #pop from new txn to running txn
                txn = self.newTxns.pop(0)
                #start
                thread = StorageNode.TxnStarter(self, txn)
                thread.start()
示例#22
0
文件: agent.py 项目: reidlindsay/wins
class Agent(Element):
    """Act as a source and sink.

    This module generates (and receives) traffic for (and from) other Agents.
    There are two ports for traffic: the egress port 'TX' and ingress port 'RX'.
    This traffic module can operate in one of three modes: 'cbr', 'poisson', and
    the default 'backlog'.
    
    The traffic source generates fixed length packets, but varies the
    interpacket wait time based on the `mode` of operation and the average
    `delay`. The 'backlog' mode only works if the parent container of the
    `Agent` is a node containing a valid 'mac' object.

    If the parent container of `Agent` is a node containing a valid 'net'
    object, this module will use the source and destination addresses of the
    received packets for indexing into the `recvbuffer`.

    :ivar nsent: Number of packets sent by `Agent`.
    :ivar nrcvd: Number of packets received by `Agent`.
    :ivar delay: Average delay (in sec).
    :ivar plen:  Fixed packet length (in bytes).
    :ivar recvbuffer: Receive buffer maintained to keep track of duplicate
                      packets; buffer size is `RecvBufferSize`.

    :cvar DefaultPacketLength: Default packet length (in bytes).
    :cvar MaxSeqNo: Maximum sequence number.
    :cvar TrafficModes: List of supported traffic generating modes.
    :cvar DefaultTrafficMode: Default traffic generating mode.
    :cvar RecvBufferSize: Buffer size of `recvbuffer`.
    :cvar mode: Property access/modify current mode of operation
                (see valid `TrafficModes`).
    :cvar mac: Property to access `MAC` module of parent node.
    :cvar addr: Property to access address of `NET` module of parent node.
    :cvar seqno: Property to access internal sequence number.
    :cvar broadcast: Property to access broadcast address of `NET` module of
                     parent node.
    """
    name = "agent"
    tracename = "AGT"
    DefaultPacketLength = 1024
    MaxSeqNo = 65536
    TrafficModes = ["backlog", "cbr", "poisson"]
    DefaultTrafficMode = "backlog"
    RecvBufferSize = 16
    def __init__(self, *args, **kwargs):
        """Constructor."""
        self._seqno, self._mode = 0, None
        self._payload = None
        # set parameters
        self.delay = None
        self.nsent, self.nrcvd = 0, 0
        self.dest, self.plen = None, None
        self.recv, self.send = SimEvent(), SimEvent()
        self.recvbuffer = {}
        Element.__init__(self, *args, **kwargs)

    mode = property(fget=lambda self: self.get_mode(), \
                    fset=lambda self, m: self.set_mode(m))
    mac = property(fget=lambda self: self.get_mac())
    addr = property(fget=lambda self: self.get_address())
    seqno = property(fget=lambda self: self._seqno)
    broadcast = property(fget=lambda self: self.get_address(broadcast=True))

    def configure(self, dest=None, plen=None, delay=None, mode=None, **kwargs):
        """Set up parameters and start agent if needed.

        :param dest: If `None`, the agent will not send any data; otherwise it
                     will send data to the desired `dest`.
        :param plen: Packet length for data to send.
        :param delay: Average delay to wait before sending a new packet.
        :param mode: Operation mode ("cbr", "poisson", or "backlog")
                     [default="backlog"].
        """
        # set up parameters
        if plen<1: plen = self.DefaultPacketLength
        self._seqno = 0
        self.dest = dest
        self.delay = delay
        self.mode = mode
        # initialize random data in payload buffer
        self.plen = plen
        x = np.random.randint(0, 128, self.plen*2)
        args = tuple(x.tolist())
        self._payload = struct.pack("b"*len(x), *args)
        # set up ports and start FSM's
        self.addport("TX"), self.addport("RX")
        f = self.newchild("txfsm", FSM, tracename=self.tracename+".TX")
        g = self.newchild("rxfsm", FSM, tracename=self.tracename+".RX")
        f.goto(self.INIT)
        g.goto(self.RECV)

    def connect(self, net):
        """Connect to a network layer protocol."""
        self.TX.connect(net.RXU)
        net.TXU.connect(self.RX)

    def INIT(self, fsm):
        """INIT state; *overload to implement initialization before `SEND`*."""
        yield fsm.goto(self.SEND)

    def SEND(self, fsm):
        """SEND state; keep backlog of data in MAC RXU queue."""
        yield hold, fsm, const.EPSILON         # yield to other threads
        if (not self.dest): yield fsm.stop()    # HALT and sleep
        # check parameters
        if (self.mode=="backlog") and self.mac:
            macQ = self.mac.RXU
            macQ.monitorQ = True
        # create packet to send downstream
        idx = self.seqno%self.plen
        pay = self._payload[idx:idx+self.plen]
        p = AGT(id=self.uid, seqno=self.seqno)/pay
        p.setanno('net-dst', self.dest)
        # set agent annotations
        txts = now()
        p.setanno('agt-txts', txts)
        p.setanno('agt-plen', self.plen)
        # additional send processing
        pkt = self.senddata(p)
        # send packet
        self.nsent += 1
        self.log("snd", pkt, seqno=self.seqno, nsent=self.nsent)
        self.nextseqno(update=True)       # update counter
        self.send.signal(pkt)
        yield self.TX.send(fsm, [pkt])
        # pause agent
        yield fsm.goto(self.PAUSE)

    def PAUSE(self, fsm):
        """PAUSE state; pause agent based on operation mode."""
        yield hold, fsm, 0  # yield to other threads
        if (not self.dest): yield fsm.stop()    # HALT and sleep
        # wait for MAC to empty in 'backlog' mode
        p, moredata = None, False        # need to send more data
        if (self.mode=="backlog") and self.mac:
            macQ = self.mac.RXU
            # check MAC input queue
            while (not moredata):
                yield waitevent, fsm, macQ.deQ
                p = macQ.deQ.signalparam
                if (macQ.length<1):
                    moredata = True
        # wait for prescribed delay
        d = None
        if self.delay:
            # pause based on operation mode
            # -> 'poisson' uses exponentail backoff
            # -> otherwise use fixed delay
            if (self.mode=="poisson"): d = np.random.exponential(self.delay)
            else:                      d = self.delay
        # enforce minimum wait time
        d = max(d, const.EPSILON)
        self.log("WAIT", p, delay=d, avgdelay=self.delay)
        yield hold, fsm, d
        # goto CONTINUE
        yield fsm.goto(self.CONTINUE)

    def CONTINUE(self, fsm):
        """CONTINUE state; add processing after PAUSE before next SEND."""
        yield fsm.goto(self.SEND)

    def RECV(self, fsm):
        """RECV state; Receive traffic."""
        # get packet from lower layer
        yield self.RX.recv(fsm, 1)
        p = fsm.got[0]
        self.recv.signal(p)
        # check packet parameters
        assert isinstance(p, AGT)
        if self.addr and (not AGENT_ID_INDEX):
            # use addressing for indexing into recvbuffer
            assert p.hasanno('net-dst') and p.hasanno('net-src')
            src, dst = p.getanno('net-src'), p.getanno('net-dst')
            isforme = (dst==self.addr) or (dst==self.broadcast)
            errmsg = "[AGT]: Got packet not intended for me!"
            assert (isforme), errmsg
            index = src
        else:
            # use AGT.id for indexing into recvbuffer
            index = int(p.id)
        # set agent annotations
        rxts = now()
        latency = rxts - p.getanno('agt-txts')
        p.setanno('agt-rxts', rxts)
        p.setanno('agt-latency', latency)
        # check for duplicates -> drop DUP
        key = ID, seqno = int(p.id), int(p.seqno)
        if index not in self.recvbuffer: self.recvbuffer[index] = []
        if key in self.recvbuffer[index]:
            self.recvdup(p)
            self.log("dup", p)
        else:
            # log receive
            self.nrcvd += 1
            self.recvbuffer[index].append(key)
            pkt = self.recvdata(p)
            self.log("rcv", pkt)
        # trim receive buffer
        while (len(self.recvbuffer[index])>self.RecvBufferSize):
            x = self.recvbuffer[index].pop(0)
        # conintue in RECV
        yield fsm.goto(self.RECV)

    def senddata(self, p):
        """Process received data packets as desired.

        :return: Data packet to send.

        By default, this method does nothing; *overload as needed*.
        """
        return p

    def recvdup(self, p):
        """Process duplicate packets as desired.

        By default, this method does nothing; *overload as needed*.
        """
        return p

    def recvdata(self, p):
        """Process received data packets as desired.

        :return: Processed packet.

        By default, this method does nothing; *overload as needed*.
        """
        return p

    def nextseqno(self, update=False):
        """Increment sequence number."""
        newseqno = (self.seqno+1)%self.MaxSeqNo
        if update: self._seqno = newseqno
        return newseqno

    def get_mode(self):
        """Get current traffic generating mode."""
        mode = self.DefaultTrafficMode
        if self._mode in self.TrafficModes: mode = self._mode
        return mode

    def set_mode(self, mode):
        """Set current traffic generating mode."""
        ismode = isinstance(mode, str) and (mode.lower() in self.TrafficModes)
        if ismode: mode = mode.lower()
        else:      mode = self.DefaultTrafficMode
        self._mode = mode

    def get_mac(self):
        """Return MAC of parent node."""
        mac = None
        if isinstance(self.parent, Base):
            if self.parent.haschild('mac'):
                mac = self.parent.mac
        if not isinstance(mac, MAC):
            mac = None
        return mac

    def get_address(self, broadcast=False):
        """Return network address of parent node."""
        addr, net = None, None
        if isinstance(self.parent, Base):
            if self.parent.haschild('net'):
                net = self.parent.net
        if isinstance(net, NET):
            addr = net.address
            if broadcast: addr = net.broadcast
        return addr

    def log(self, event=None, p=None, *args, **kwargs):
        """Overloaded to check verbose level and set common annotations."""
        force = False
        if ('verbose' in kwargs): force = (kwargs['verbose']>AGENT_VERBOSE)
        if self.verbose>AGENT_VERBOSE or force:
            kwargs.update(self.get_agt_anno(p))
            Element.log(self, event, p, *args, **kwargs)

    def get_agt_anno(self, p):
        """Get common annotations for logging data."""
        kwargs = {}
        kwargs['mode'] = self.mode
        if self.addr: kwargs['addr'] = self.addr
        if not isinstance(p, AGT): return kwargs
        kwargs['id'] = p.id
        kwargs['seqno'] = p.seqno
        kwargs['nsent'] = self.nsent
        kwargs['nrcvd'] = self.nrcvd
        if p.hasanno('cif-collision'):
            kwargs['cif-collision'] = strcollision(p)
            assert (kwargs['cif-collision'] is not None)
        if p.hasanno('dot11n-sinr'):
            kwargs['dot11n-sinr'] = "%.4f dB"%(p.getanno('dot11n-sinr') )
        if p.hasanno('dot11n-channel-fading'):
            kwargs['dot11n-channel-fading'] = "%.4f dB"%(p.getanno('dot11n-channel-fading') )
        if p.hasanno('phy-sinr'):
            kwargs['sinr'] = "%.4f dB"%(p.getanno('phy-sinr') )
        if p.hasanno('phy-rate'):
            kwargs['phy-rate'] = p.getanno('phy-rate')
        if p.hasanno('net-dst'):
            kwargs['net-dst'] = p.getanno('net-dst')
        if p.hasanno('net-src'):
            kwargs['net-src'] = p.getanno('net-src')
        if p.hasanno('net-root'):
            kwargs['net-root'] = p.getanno('net-root')
        if p.hasanno('mac-root'):
            kwargs['mac-root'] = p.getanno('mac-root')
        if p.hasanno('mac-txts'):
            kwargs['mac-txts'] = p.getanno('mac-txts')
        if p.hasanno('mac-rxts'):
            kwargs['mac-rxts'] = p.getanno('mac-rxts')
        if p.hasanno('agt-txts'):
            kwargs['agt-txts'] = p.getanno('agt-txts')
        if p.hasanno('agt-rxts'):
            kwargs['agt-rxts'] = p.getanno('agt-rxts')
        if p.hasanno('agt-plen'):
            kwargs['agt-plen'] = p.getanno('agt-plen')
        if p.hasanno('agt-latency'):
            kwargs['agt-latency'] = p.getanno('agt-latency')
        return kwargs