Ejemplo n.º 1
0
    def __init__(self, env, parent, params):
        # Call parent constructor
        super().__init__(env)

        # Store the node that contains this manager, it is a DtnNode
        self.parent = parent

        # Create a queue that can be locked and set the capacity to the specified value
        self.queue = DtnMaxCapacityQueue(env, parent, params.max_buffer_size)

        # Handshake queue
        self.handshake_queue = DtnQueue(env)

        # Lock to ensure that all operations related to putting one bundle in
        # the queue are "atomic"
        self.put_lock = DtnLock(env)

        # Get all connections possible for this node
        self.cons = {
            d: c
            for (o, d), c in self.env.connections.items()
            if o == self.parent.nid
        }

        # Create an engine for each connection in the system
        for dest, con in self.cons.items():
            # Process to get elements out of the queue to transmit
            env.process(self.queue_extractor(dest, con))
Ejemplo n.º 2
0
    def __init__(self, env, nid, props):
        super().__init__(env)
        # Initialize node properties
        self.nid = nid
        self.type = env.config['network'].nodes[nid].type
        self.alias = env.config['network'].nodes[nid].alias
        self.props = props

        # Initialize variables
        self.generators = {}  # Map: {generator type: Generator class}
        self.queues = {}  # Map: {neighbor id: DtnNeighborManager}
        self.neighbors = []  # List of neighbors as specified in config file
        self.radios = {}  # Map: {radio id: Radio class}
        self.endpoints = {}  # Map: {eid: Endpoint class}

        # Convergence layers. This is a map of map of maps of the following form
        # {neighbor id: duct id: induct/ouduct: DtnAbstractDuct subclass}
        self.ducts = defaultdict(lambda: defaultdict(dict))

        # Queue to store all bundles that are waiting to be forwarded
        self.in_queue = DtnQueue(env)

        # Queue for the limbo
        self.limbo_queue = DtnQueue(env)

        # Create variables to store results
        self.dropped = []
Ejemplo n.º 3
0
    def __init__(self, env, name, parent, neighbor):
        super(DtnAbstractDuct, self).__init__(env)
        self.base_name = name  # See network architecture file (xml)
        self.parent = parent  # DtnNode
        self.neighbor = neighbor  # A string with the name of the neighbor
        self.monitor = self.env.monitor

        # Connection through which data is sent
        #self.conn = env.connections[parent.nid, neighbor]

        # Add the queue for the convergence layer. DTN does not control it and
        # therefore it is assumed to be plain FIFO
        self.in_queue = DtnQueue(env)

        # Queue to store messages that were not successfully sent by the duct
        # and thus must be sent to the node's limbo
        self.to_limbo = DtnQueue(self.env)

        # Queue to store messages that were successfully sent by the duct
        self.success_queue = DtnQueue(self.env)
Ejemplo n.º 4
0
class DtnAbstractDuct(Simulable, metaclass=abc.ABCMeta):
    """ An abstract duct. It operates 2 queues:

        1) in_queue: Queue where all bundles to be sent are placed.
        2) to_limbo: Queue where bundles that fail to be sent by the
                     convergence layer are placed. The ``fail_manager``
                     function in this class takes them an puts them in
                     the node's limbo queue for re-routing.
    """
    duct_type = None

    def __init__(self, env, name, parent, neighbor):
        super(DtnAbstractDuct, self).__init__(env)
        self.base_name = name  # See network architecture file (xml)
        self.parent = parent  # DtnNode
        self.neighbor = neighbor  # A string with the name of the neighbor
        self.monitor = self.env.monitor

        # Connection through which data is sent
        #self.conn = env.connections[parent.nid, neighbor]

        # Add the queue for the convergence layer. DTN does not control it and
        # therefore it is assumed to be plain FIFO
        self.in_queue = DtnQueue(env)

        # Queue to store messages that were not successfully sent by the duct
        # and thus must be sent to the node's limbo
        self.to_limbo = DtnQueue(self.env)

        # Queue to store messages that were successfully sent by the duct
        self.success_queue = DtnQueue(self.env)

    def initialize(self, peer, **kwargs):
        # Peer duct (for an outduct it is an induct and vice versa)
        self.peer = peer

        # Activate the process for this convergence layer
        self.env.process(self.run())

        # Run the fail manager that returns bundles that failed to be sent for
        # re-routers
        self.env.process(self.fail_manager())

        # Run the success manager
        self.env.process(self.success_manager())

    @property
    def is_alive(self):
        return self.parent.is_alive

    @abc.abstractmethod
    def total_datarate(self, dest):
        pass

    @property
    def transmit_mode(self):
        return 'fwd' if self.duct_type == 'outduct' else 'ack'

    @property
    def name(self):
        return '{} {} ({}-{})'.format(self.__class__.__name__, self.base_name,
                                      self.parent.nid, self.neighbor)

    @property
    def stored(self):
        return self.in_queue.stored

    @property
    @abc.abstractmethod
    def radios(self):
        """ Returns a dictionary with the radios for this duct """
        pass

    def send(self, message):
        """ Pass a message to the duct for transmission
            This is a non-blocking call.
        """
        self.env.process(self.do_send(message))

    def do_send(self, message):
        # Add to the queue
        self.disp('{} delivered to {}', message, self.__class__.__name__)
        yield from self.in_queue.put(message)

    @abc.abstractmethod
    def run(self):
        pass

    def ack(self, message):
        """ This is called by a DtnConnection if you transmit something an specify direction
            equals "ack". It is more general than LTP, it is a generic mechanism to communicate
            "backwards" between two ducts (from rx's induct to tx's outduct) and no go through
            the in_queue (that takes message from the upper layer)
        """
        self.env.process(self.do_ack(message))

    def do_ack(self, message):
        """ A duct, by default should not be able to perform ack. See DtnOutductLTP for an example
            of implementation
        """
        # Fake yield to ensure this is a coroutine
        yield self.env.timeout(0)

        # This is not allowed unless this function is re-implemented
        # See DtnOutductLTP and DtnInductLTP for an example of how to use it
        raise RuntimeError('You cannot ACK in this convergence layer')

    def notify_success(self, message):
        self.env.process(self.do_notify_success(message))

    def do_notify_success(self, message):
        yield from self.success_queue.put(message)

    def success_manager(self):
        while self.is_alive:
            # Wait for a block that was successfully transmitted
            # Do nothing in the default implementation. Just here to
            # prevent the queue from filling indefinitely
            yield from self.success_queue.get()

    @abc.abstractmethod
    def radio_error(self, message):
        """ This function signals an LTP session that it needs to terminate because an error
            has occurred in the radio. The ``fail_manager`` will then be responsible to put
            the corresponding bundles to the node's limbo.
        """
        pass

    def fail_manager(self):
        while self.is_alive:
            # Wait for a block that was not successfully transmitted
            # by an LTP session
            bundle = yield from self.to_limbo.get()

            # Get the cid that needs to be excluded. This is not very neat, but
            # essentially reaches into the current DtnNeighborManager for this
            # neighbor and pulls the contact id
            cid = self.parent.queues[self.neighbor].current_cid

            # Send to node's limbo for re-routers
            self.parent.limbo(bundle, cid)
Ejemplo n.º 5
0
 def initialize_fwd_handler(self, hid):
     self.fwd_handlers.add(hid)
     self.fwd_queues[hid] = DtnQueue(self.env)
     self.env.process(self.run_fwd_handler(hid))
Ejemplo n.º 6
0
    def __init__(self, env, parent, shared=True):
        # Call parent constructor
        super(DtnBasicRadio, self).__init__(env, parent, shared)

        # Create input queue
        self.in_queue = DtnQueue(env)
Ejemplo n.º 7
0
class DtnBasicRadio(DtnAbstractRadio):

    def __init__(self, env, parent, shared=True):
        # Call parent constructor
        super(DtnBasicRadio, self).__init__(env, parent, shared)

        # Create input queue
        self.in_queue = DtnQueue(env)

    @property
    def stored(self):
        df = self.in_queue.stored
        df['where'] = 'radio'
        return df

    def initialize(self, rate=0, BER=0, J_bit=0, **kwargs):
        # Store configuration parameters
        self.datarate  = rate
        self.BER       = BER
        self.J_bit     = J_bit

        # Call parent initializer
        super(DtnBasicRadio, self).initialize()

    def do_put(self, neighbor, message, peer, direction):
        # Create item to send
        item = (neighbor, message, peer, direction)

        # Add it to the queue
        yield from self.in_queue.put(item)

    def run(self, **kwargs):
        while self.is_alive:
            # Get the next segment to transmit
            item = yield from self.in_queue.get()

            # Depack item
            neighbor, message, peer, direction = item

            # Get the connection to send this message through
            conn = self.outcons[neighbor]

            # Compute total transmission time
            Ttx = message.num_bits/self.datarate

            # Apply delay for radio to transmit entire segment
            yield self.env.timeout(Ttx)

            # Count the energy consumed
            self.energy += message.num_bits * self.J_bit

            # Transmit the message through the connection.
            self.send_through_connection(message, conn, peer, direction)

    def send_through_connection(self, message, conn, peer, direction):
        """ Send a message through a connection

            :param Message message: The message to send
            :param conn: The connection to send the message through
            :param peer: The peer duct that will receive the message
            :param str direction: 'fwd' vs. 'ack'. By default 'fwd', use 'ack'
                                  for the acknowledgement messages of protocols
                                  like LTP (from dest to origin).
        """
        # This is a non-blocking call since the bundle is out in transit
        conn.transmit(peer, message, self.BER, direction=direction)
Ejemplo n.º 8
0
class DtnEpidemicManager(Simulable):
    def __init__(self, env, parent, params):
        # Call parent constructor
        super().__init__(env)

        # Store the node that contains this manager, it is a DtnNode
        self.parent = parent

        # Create a queue that can be locked and set the capacity to the specified value
        self.queue = DtnMaxCapacityQueue(env, parent, params.max_buffer_size)

        # Handshake queue
        self.handshake_queue = DtnQueue(env)

        # Lock to ensure that all operations related to putting one bundle in
        # the queue are "atomic"
        self.put_lock = DtnLock(env)

        # Get all connections possible for this node
        self.cons = {
            d: c
            for (o, d), c in self.env.connections.items()
            if o == self.parent.nid
        }

        # Create an engine for each connection in the system
        for dest, con in self.cons.items():
            # Process to get elements out of the queue to transmit
            env.process(self.queue_extractor(dest, con))

    @property
    def is_alive(self):
        return self.parent.is_alive

    def put(self, rt_record, priority):
        self.env.process(self.do_put(rt_record, priority))

    def do_put(self, rt_record, priority):
        # Only one bundle can be put into the queue at a time. Otherwise, you
        # have race conditions (e.g. you make room for a bundle but before you
        # actually get put in queue someone else takes your room)
        yield self.put_lock.acquire()

        # Put the bundle in the queue. Bundles that where used to make ro
        to_drop = yield from self.queue.put(rt_record, priority, where='left')

        # Drop bundles
        for rt_record in to_drop:
            self.parent.drop(rt_record.bundle,
                             drop_reason='Opportunistic queue full')

        # Release the lock to signal that another bundle can start the process
        # of getting added to the queue
        self.put_lock.release()

    def queue_extractor(self, dest, conn):
        while self.is_alive:
            # Wait until the next connection starts
            if conn.is_red: yield conn.green

            # Handshake with other node see which data is missing on other node
            # NOTE: This is a blocking call since you cannot do the rest until
            # this is completed.
            to_send = yield from self.do_handshake(dest, conn)

            # If the connection red again, continue
            if conn.is_red: continue

            # Send all data that was flagged as missing in the other node
            for rt_record in to_send:
                # Log and monitor exit of bundle
                self.disp('{} departs from the manager', rt_record.bundle)

                # Send this bundle towards the convergence layer
                self.send(dest, rt_record)

                # If the connection red again, continue
                if conn.is_red: continue

            # Wait until connection ends
            if conn.is_green: yield conn.red

    def send(self, dest, rt_record):
        self.parent.forward_to_outduct(dest, rt_record)

    def do_handshake(self, dest, conn):
        # Gather list of all bundles you have
        bids = tuple(self.queue.keys())

        # Create a bundle
        bnd = Bundle(self.env,
                     self.parent.nid,
                     dest,
                     'telemetry',
                     16 + 8 * len(bids),
                     1,
                     False,
                     eid=1)

        # Add the data to this bundle
        bnd.data = bids

        # Get a new routing record. 1 = critical priority
        rt_record = self.parent.router.new_record(bnd, 1)

        # Send this record
        self.send(dest, rt_record)

        # Wait until either the connection ends or you receive the response
        # of the handshaking mechanism
        yield conn.red | self.handshake_queue.is_empty()

        # If the connections is red, then the handshake has failed
        if conn.is_red: return

        # Get the bundle from the peer
        bnd = yield from self.handshake_queue.get()

        print('hola')
Ejemplo n.º 9
0
class DtnNode(Simulable):
    def __init__(self, env, nid, props):
        super().__init__(env)
        # Initialize node properties
        self.nid = nid
        self.type = env.config['network'].nodes[nid].type
        self.alias = env.config['network'].nodes[nid].alias
        self.props = props

        # Initialize variables
        self.generators = {}  # Map: {generator type: Generator class}
        self.queues = {}  # Map: {neighbor id: DtnNeighborManager}
        self.neighbors = []  # List of neighbors as specified in config file
        self.radios = {}  # Map: {radio id: Radio class}
        self.endpoints = {}  # Map: {eid: Endpoint class}

        # Convergence layers. This is a map of map of maps of the following form
        # {neighbor id: duct id: induct/ouduct: DtnAbstractDuct subclass}
        self.ducts = defaultdict(lambda: defaultdict(dict))

        # Queue to store all bundles that are waiting to be forwarded
        self.in_queue = DtnQueue(env)

        # Queue for the limbo
        self.limbo_queue = DtnQueue(env)

        # Create variables to store results
        self.dropped = []

    def reset(self):
        # Reset node elements
        self.router.reset()
        self.selector.reset()
        for _, gen in self.generators.items():
            gen.reset()
        for _, radio in self.radios.items():
            radio.reset()

    @property
    def available_radios(self):
        return self.radios

    def initialize(self):
        """ Initialize the node. Note that his can only be done after the connections
            have been created.
        """
        self.mobility_model = self.env.mobility_models[
            self.props.mobility_model]

        # Initialize bundle generators for this node
        self.initialize_bundle_generators()

        # Initialize the band selector
        self.initialize_outduct_selector()

        # Initialize radios. Note: This must be done prior to initializing
        # the ducts since ducts require radios. Ducts are initialized in the
        # environment.
        self.initialize_radios()

        # Now that you have created everything, start the forward and limbo managers
        self.env.process(self.forward_manager())
        self.env.process(self.limbo_manager())

    def initialize_bundle_generators(self):
        # Initialize variables
        config = self.env.config

        # Get the list of generators for this node
        gens = config[self.type].generators

        # Instantiate generators dynamically based on class name
        for gen in gens:
            clazz = getattr(config[gen], 'class')
            module = importlib.import_module(f'simulator.generators.{clazz}')
            clazz = getattr(module, clazz)

            # Create the generator
            self.generators[gen] = clazz(self.env, self, config[gen])

            # Initialize the generator
            self.generators[gen].initialize()

    def initialize_router(self):
        # Initialize variables
        config = self.env.config

        # Get type of router for this node
        router_type = config[self.type].router

        # Instantiate generators dynamically based on class name
        clazz = load_class_dynamically('simulator.routers',
                                       getattr(config[router_type], 'class'))
        self.router = clazz(self.env, self)

        # Initialize this router
        self.router.initialize()

        # If this router is not opportunistic, you are done
        if not self.router.opportunistic: return

        # If this is an opportunistic router, then a specialized queue
        # manager is needed.
        clazz = load_class_dynamically('simulator.nodes',
                                       config[router_type].manager)
        self.queues['opportunistic'] = clazz(self.env, self,
                                             config[router_type])

    def initialize_outduct_selector(self):
        # Initialize variables
        config = self.env.config

        # Get type of router for this node
        selector_type = config[self.type].selector

        # Instantiate generators dynamically based on class name
        clazz = getattr(config[selector_type], 'class')
        module = importlib.import_module(f'simulator.selectors.{clazz}')
        clazz = getattr(module, clazz)
        self.selector = clazz(self.env, self)

        # Initialize this router
        self.selector.initialize()

    def initialize_radios(self):
        # Iterate over the radios
        for radio in self.props.radios:
            # Create the radio for this duct
            class_name = getattr(self.config[radio], 'class')
            clazz = load_class_dynamically('simulator.radios', class_name)

            # Get properties
            radio_props = dict(self.config[radio])

            # Store the new radio
            self.radios[radio] = clazz(self.env, self)

            # Initialize the radio
            self.radios[radio].initialize(**radio_props)

    def initialize_neighbors_and_ducts(self):
        # Iterate over all neighbors
        for orig, neighbor in self.env.connections.keys():
            # If this is not the right origin, continue
            if orig != self.nid: continue

            # Store this neighbor
            self.neighbors.append(neighbor)

            # Create and store the priority queue for this neighbor
            self.queues[neighbor] = DtnCgrNeighborManager(
                self.env, self, neighbor)

            # Get the neighbor node and the connection between them
            other = self.env.nodes[neighbor]
            conn = self.env.connections[self.nid, neighbor]

            # Iterate over defined ducts and create them
            for duct_id, duct_name in self.env.config[conn.type].ducts.items():
                # Get the properties of this duct
                props = self.env.config[duct_name]

                # Initialize variables
                iduct, oduct = None, None

                # Create the induct and outduct
                for class_name in getattr(props, 'class'):
                    # Load class type. Since you can't know if it is an induct or outduct,
                    # try both.
                    try:
                        clazz = load_class_dynamically(
                            'simulator.ducts.inducts', class_name)
                    except ModuleNotFoundError:
                        clazz = load_class_dynamically(
                            'simulator.ducts.outducts', class_name)

                    # Construct depending on whether it is an induct or outduct
                    if clazz.duct_type == None:
                        raise RuntimeError(
                            f'{clazz} has no duct_type defined. Is it an induct or outduct?'
                        )
                    elif clazz.duct_type == 'outduct':
                        oduct = clazz(self.env, duct_id, self, neighbor)
                    elif clazz.duct_type == 'induct':
                        iduct = clazz(self.env, duct_id, other, orig)
                    else:
                        raise RuntimeError(
                            'f{clazz} has duct_type = {clazz.duct_type}. Valid options are "induct" and '
                            '"outduct"')

                # If either the ouduct or induct are not set, throw error
                if iduct == None or oduct == None:
                    raise RuntimeError('Could not create duct f{duct_id}')

                # Store the newly created ducts
                self.ducts[neighbor][duct_id]['outduct'] = oduct
                other.ducts[orig][duct_id]['induct'] = iduct

            # Iterate over defined ducts and initialize them. This is done separately
            # since you need to have created all ducts to initialize them for ParallelLTP
            for duct_id, duct_name in self.env.config[conn.type].ducts.items():
                # Get the properties of this duct
                props = self.env.config[duct_name]

                oduct = self.ducts[neighbor][duct_id]['outduct']
                iduct = other.ducts[orig][duct_id]['induct']

                # Initialize duct parameters. Initialization must happen after creating the ducts
                # since they must point to each other.
                oduct.initialize(iduct, **dict(props))
                iduct.initialize(oduct, **dict(props))

        # For now, assume that there is no need for an opportunistic queue.
        # If so, it will be initialized with the router.
        self.queues['opportunistic'] = None

    def initialize_endpoints(self):
        # Add any additional endpoints
        for eid, ept_class in self.props.endpoints.items():
            # Handle special case for default endpoint
            if eid == 0:
                self.endpoints[eid] = DtnDefaultEndpoint(self.env, self)
                self.endpoints[eid].initialize()
                continue

            # Find the endpoint type
            clazz = load_class_dynamically('simulator.endpoints', ept_class)

            # Store the new endpoint
            self.endpoints[eid] = clazz(self.env, self)

            # Initialize endpoint
            if eid in self.config:
                self.endpoints[eid].initialize(**dict(self.config[eid]))
            else:
                self.endpoints[eid].initialize()

    def initialize_neighbor_managers(self):
        for neighbor, mgr in self.queues.items():
            if neighbor.lower() == 'opportunistic':
                continue
            mgr.initialize()

    def forward_manager(self):
        """ This agent pulls bundles from the node incoming queue for processing.
            It ensures that this happens one at a time following the order in which
            they are added to the queue. Note that both new bundles and bundles awaiting
            re-routers will be directed to the ``in_queue`` (see ``forward`` vs ``limbo``)
        """
        # Iterate forever looking for new bundles to forward
        while self.is_alive:
            # Wait until there is a bundle to process
            item = yield from self.in_queue.get()

            # Depack item
            bundle, first_time = item[0], item[1]

            # Trigger forwarding mechanism. If you add a delay in the forwarding mechanism,
            # this delay will be preserved here. To have non-blocking behavior, use
            # ``self.env.process(self.process_bundle(item[0], first_time=item[1])``
            self.process_bundle(bundle, first_time=first_time)

    def process_bundle(self, bundle, first_time=True):
        """ Process this bundle in the node. This entails:
                1) If this node is the destination, you are done
                2) Otherwise, route the bundle
                3) If no routes are available, drop
                4) Otherwise, put the bundle into one or more neighbor queues to await transmission by
                   the corresponding convergence layer

            :param bundle: The bundle to forward
            :param first_time: True if this is the first time this node sees this bundle
        """
        # If this bundle has errors, drop immediately
        if bundle.has_errors:
            self.drop(bundle, 'error')
            return

        # It this bundle exceeds the TTL, drop
        if self.check_bundle_TTL(bundle):
            return

        # Add this node in the list of visited nodes (NOTE: must be done before ``find_routes``)
        if first_time: bundle.visited.append(self.nid)

        # Reset the list of excluded contacts
        if first_time: bundle.excluded = []

        # If bundle has reached destination, simply store
        if bundle.dest == self.nid:
            self.arrive(bundle)
            return

        # Get contacts for this bundle
        records_to_fwd, cids_to_exclude = self.router.find_routes(
            bundle, first_time)

        # If router asks for limbo, send this bundle there
        if records_to_fwd == 'limbo':
            self.limbo(bundle, cids_to_exclude)
            return

        # If you can neither forward nor re-route, then drop
        if not records_to_fwd and not cids_to_exclude:
            self.drop(bundle, 'unroutable')
            return

        # If the router has indicated to drop, do it
        if records_to_fwd == 'drop':
            self.drop(bundle, 'router_drops')
            return

        for record in records_to_fwd:
            # Log this successful routers event
            self.disp('{} is routed towards {}', record.bundle,
                      record.contact['dest'])

            # Get the record to forward. If critical and first time, deepcopy it
            to_fwd = deepcopy(
                record) if bundle.critical and first_time else record

            # Pass the bundle to the appropriate neighbor manager
            self.store_routed_bundle(to_fwd)

        # If you have forwarded at least once, you are done
        if records_to_fwd: return

        # Trigger re-routers excluding the appropriate contacts
        self.limbo(bundle, cids_to_exclude)

    def store_routed_bundle(self, rt_record):
        # Get the neighbor node to send to
        neighbor = rt_record.neighbor

        # Put in the queue
        self.queues[neighbor].put(rt_record, rt_record.priority)

    def forward_to_outduct(self, neighbor, rt_record):
        """ Called whenever a bundle successfully exits a DtnNeighborManager """
        # Initialize variables
        bundle = rt_record.bundle

        # Get the outduct for this bundle
        duct = self.selector.select_duct(neighbor, bundle)

        # If bundle TTL is exceeded, do not forward
        if self.check_bundle_TTL(bundle):
            return

        # Put bundle in the convergence layer
        duct['outduct'].send(bundle)

    def forward(self, bundle):
        """ Put a bundle in the queue of bundles to route. Forward should be used
            the first time that you route a bundle. For bundles that are being
            re-routed, use ``limbo`` instead

            .. Tip:: This function never blocks despite the ``yield from`` because
                     the input queue has infinite capacity
        """
        self.env.process(self.do_forward(bundle))

    def do_forward(self, bundle):
        yield from self.in_queue.put((bundle, True))

    def limbo(self, bundle, contact_ids):
        """ Put a bundle in the queue of bundles to route (this bundle is re-routed
            and thus this is equivalent to ION's limbo). Add the provided contacts
            as excluded since you already tried to send this bundle through them

            .. Tip:: This function never blocks despite the ``yield from`` because
                     the input queue has infinite capacity
        """
        # HACK: If contact_ids is None, try re-routing again
        if contact_ids is not None:
            if not isinstance(contact_ids, (list, tuple)):
                contact_ids = (contact_ids, )
            bundle.excluded.extend(contact_ids)
        self.env.process(self.do_limbo(bundle))

    def do_limbo(self, bundle):
        # If you do not have a limbo wait finite, wait for a second here.
        # Otherwise you will try to re-route the bundle at the same instant
        # in time, thus creating an infinite loop
        if self.props.limbo_wait == float('inf'):
            yield self.env.timeout(1)

        # Add to the limbo queue
        yield from self.limbo_queue.put((bundle, False))

    def limbo_manager(self):
        # Initialize variables
        dt = self.props.limbo_wait

        # if dt = inf, then you want to pause the limbo until there
        # is something in it
        check_empty = dt == float('inf')

        while self.is_alive:
            # Wait for a while. Only do this if you specified a rate at
            # which to pull from the queue
            if not check_empty: yield self.env.timeout(dt)

            # Get everything from the queue
            items = yield from self.limbo_queue.get_all(
                check_empty=check_empty)

            # Put all items in the input queue
            for item in items:
                yield from self.in_queue.put(item)

    def check_bundle_TTL(self, bundle):
        if self.t - bundle.creation_time < bundle.TTL:
            return False

        # Drop the bundle
        self.drop(bundle, f'TTL (t={self.t})')

        return True

    def arrive(self, bundle):
        # If node is not alive, delete
        if not self.is_alive:
            self.drop(bundle, 'dead_node')

        # Mark arrival
        self.disp('{} arrives at destination', bundle)
        bundle.arrived = True
        bundle.arrival_time = self.t
        bundle.latency = bundle.arrival_time - bundle.creation_time

        # Dispatch bundle to the appropriate endpoint depending on the bundle EID
        self.endpoints[bundle.eid].put(bundle)

    def drop(self, bundle, drop_reason):
        self.disp('{} is dropped at node {}', bundle, self.nid)
        bundle.dropped = True
        bundle.drop_reason = drop_reason
        self.dropped.append(bundle)

    def radio_error(self, message):
        self.disp('Error in radio')

    def __str__(self):
        return '<DtnNode {}>'.format(self.nid)