コード例 #1
0
 def __init__(self, dpid, vid):
     self.hosts = {}
     self.dpid = dpid
     self.vid = vid
     self.rt = RoutingTable(self.dpid, self.vid)
     # port_no - neighbor_switch
     self.neighbors = {}
     self.host_forwarding_table = {}
コード例 #2
0
ファイル: viro_routing.py プロジェクト: dumb0002/viro-geni
 def __init__(self, dpid, vid):
       self.dpid = dpid
       self.vid = vid
       self.L = VidAddr.L
       self.routingTable = RoutingTable(vid, dpid)
       self.rdvStore = RdvStore()
       self.liveNbrs = {}
       self.portNbrs = {} # mapping of Nbrs = port #
       self.rdvRequestTracker = cl.defaultdict(dict)
コード例 #3
0
class Switch(object):
    """switches"""
    def __init__(self, dpid, vid):
        self.hosts = {}
        self.dpid = dpid
        self.vid = vid
        self.rt = RoutingTable(self.dpid, self.vid)
        # port_no - neighbor_switch
        self.neighbors = {}
        self.host_forwarding_table = {}

    @staticmethod
    def is_edge_switch(dpid):
        return dpid not in core_switches

    def addHost(self, host):
        self.hosts[host.local_mac] = host
        host.set_switch(self)

    def buildNeighborRT(self):
        if not self.neighbors:
            return

        # fill in neighbors first
        self.rt.fillInNeighbors(self.neighbors)

    def isGateway(self, switch_a):
        # Judge if switch_a is a level-k gateway of self. Return all possible levels
        levels = []
        distance = self.vid.getLevel(switch_a.vid)
        for neighbor_of_a in switch_a.neighbors.itervalues():
            level = self.vid.getLevel(neighbor_of_a.vid)
            if distance < level:
                levels.append(level)
        return levels

    def add_host_forwarding_entry(self, host):
        if self.host_forwarding_table.get(host):
            return False

        self.host_forwarding_table[host] = 0
        return True

    def __eq__(self, switch):
        return self.dpid == switch.dpid

    def __str__(self):
        return str(self.vid) + str(self.hosts)

    def __repr__(self):
        return str(self.vid) + " " + str(self.hosts)
コード例 #4
0
ファイル: controller.py プロジェクト: dumb0002/viro-geni
    def _handle_ViroSwitchUp(self, event):
        """ Local VIRO switch is connecting to the local controller
    We set the controller Id to our local VIRO controller """
        log.debug("Connection %s %s" %
                  (event.connection, dpidToStr(event.dpid)))

        self.sw.dpid = event.dpid
        self.sw.connection = event.connection

        self.routing.dpid = event.dpid
        self.routing.routingTable.dpid = event.dpid

        # Guobao added 02/05/2015
        #self.previousRoutingTable = copy.deepcopy(self.routing.routingTable)
        self.previousRoutingTable = RoutingTable(None, None)

        # Turn on ability to specify table in flow_mods
        msg = nx.nx_flow_mod_table_id()
        self.sw.connection.send(msg)

        msg = msgFactory.ofpControllerId(LocalViro.CONTROLLER_ID)
        self.sw.connection.send(msg)

        msg = msgFactory.ctrlPktsLocalViroPacketIn(
            LocalViro.CONTROLLER_ID)  # set the rule for Viro Packets
        self.sw.connection.send(msg)

        msg_ip = msgFactory.IPv4LocalViroPacketIn(
            LocalViro.CONTROLLER_ID)  # set the rule for IP Packets
        self.sw.connection.send(msg_ip)

        # Guobao -- Fallback rule for Table 0
        msg = msgFactory.FallBack(1)
        self.sw.connection.send(msg)

        # Start periodic function of the local controller
        # 1. Neighbor discovery
        # 2. Routing table rounds
        # 3. Failure discovery
        # We don't use recurring timers to avoid function call overlaps in case of delays
        # So, we call the timer function after the end of the callback function
        Timer(LocalViro.DISCOVER_TIME, self.neibghoorDiscoverCallback)
        #Timer(LocalViro.FAILURE_TIME, self.discoveryFailureCallback) # comment here

        self.round = 1
        Timer(LocalViro.UPDATE_RT_TIME, self.startRoundCallback)

        # Sync RT Table every UPDATE_RT_TIME?
        Timer(LocalViro.UPDATE_RT_TIME, self.pushRTHelper)
コード例 #5
0
def write_table(channel: Channel, prefix: str, table: RoutingTable,
                output_folder):
    bytes = table.serialize()
    filename = "{}-rank{}-channel{}".format(prefix, channel.fpga.rank,
                                            channel.index)

    write_file(os.path.join(output_folder, filename), bytes, binary=True)
コード例 #6
0
    def __init__(self, port_data=PORT_DATA, port_ctrl=PORT_CTRL):

        # Unique object ID:
        self.id = uid()

        self.logger = twiggy.log.name('manage %s' % self.id)
        self.port_data = port_data
        self.port_ctrl = port_ctrl

        # Set up a router socket to communicate with other topology
        # components; linger period is set to 0 to prevent hanging on
        # unsent messages when shutting down:
        self.zmq_ctx = zmq.Context()
        self.sock_ctrl = self.zmq_ctx.socket(zmq.ROUTER)
        self.sock_ctrl.setsockopt(zmq.LINGER, LINGER_TIME)
        self.sock_ctrl.bind("tcp://*:%i" % self.port_ctrl)

        # Set up a poller for detecting acknowledgements to control messages:
        self.ctrl_poller = zmq.Poller()
        self.ctrl_poller.register(self.sock_ctrl, zmq.POLLIN)

        # Data structures for instances of objects that correspond to processes
        # keyed on object IDs (bidicts are used to enable retrieval of
        # broker/module IDs from object instances):
        self.brokers = bidict.bidict()
        self.modules = bidict.bidict()

        # Set up a dynamic table to contain the routing table:
        self.routing_table = RoutingTable()

        # Number of emulation steps to run:
        self.steps = np.inf
コード例 #7
0
    def __init__(self, my_node, querier, bootstrap_nodes):
        self.my_node = my_node
        self.querier = querier
        #Copy the bootstrap list
        self.bootstrap_nodes = [n for n in bootstrap_nodes]

        self.main = RoutingTable(my_node, NODES_PER_BUCKET)
        self.replacement = RoutingTable(my_node, NODES_PER_BUCKET)
        self.ping_msg = message.OutgoingPingQuery(my_node.id)
        self.find_node_msg = message.OutgoingFindNodeQuery(
            my_node.id, my_node.id)
        self.mode = BOOTSTRAP_MODE
        self.num_concurrent_refresh_msgs = 0
        #This must be called by an external party: self.do_bootstrap()
        #After initializing callbacks

        # Add myself to the routing table
        rnode = self.main.add(my_node)
        self._reset_refresh_task(rnode)
コード例 #8
0
 def __init__(self, my_node, bootstrap_nodes, msg_f):
     self.my_node = my_node
     self.bootstrapper = bootstrap.OverlayBootstrapper(
         my_node.id, bootstrap_nodes, msg_f)
     self.msg_f = msg_f
     self.table = RoutingTable(my_node, NODES_PER_BUCKET)
     # maintenance variables
     self._next_stale_maintenance_index = 0
     self._maintenance_mode = BOOTSTRAP_MODE
     self._replacement_queue = _ReplacementQueue(self.table)
     self._query_received_queue = _QueryReceivedQueue(self.table)
     self._found_nodes_queue = _FoundNodesQueue(self.table)
     self._maintenance_tasks = [
         self._ping_a_staled_rnode,
         self._ping_a_query_received_node,
         self._ping_a_found_node,
         self._ping_a_replacement_node,
     ]
     self._num_pending_filling_lookups = NUM_FILLING_LOOKUPS
コード例 #9
0
ファイル: layers.py プロジェクト: matwitke/carsim
class NetworkLayer(BasedLayer):
    def __init__(self, owner):
        self.is_router = False
        self.rt = RoutingTable()
        self.it = InterfacesTable()
        self.owner = owner


    def update_local_routes(self):
        list_of_interfaces = self.it.get_all_interfaces().values()
        for iface in list_of_interfaces:
            ipv6_address = iface.get_ipv6_address()
            self.rt.add_route(ipv6_address.network, None, iface.name)


    def get_ipv6(self):
        list_of_interfaces = self.it.get_all_interfaces().values()
        ipv6_address = None
        while ipv6_address == None and len(list_of_interfaces)>0:
            iface = list_of_interfaces.pop()
            ipv6_address = iface.get_ipv6_address()
        return ipv6_address


    def handle_upper_msg(self, msg):
        dst_node = connection_manager.get_element_by_name(msg.dst)
        dst_ipv6_address = dst_node.net_layer.get_ipv6()
        route = self.rt.get_route(dst_ipv6_address)
        out_interface = self.it.get_interface_by_name(route.interface)
        src_ipv6_address = out_interface.ipv6_address
        ipv6_msg = IPv6Datagram(src_ipv6_address, dst_ipv6_address,msg)
        out_interface.handle_upper_msg(ipv6_msg)

    def handle_lower_msg(self, msg):
        # act as a router
        if self.is_router:
            pass
        # otherwise it is client
        else:
            print "rcv message from %s" % msg.dst_address
            msg = msg.payload
            self.upper_layer.handle_lower_msg(msg)
コード例 #10
0
    def __init__(self, my_node, querier, bootstrap_nodes):
        self.my_node = my_node
        self.querier = querier
        #Copy the bootstrap list
        self.bootstrap_nodes = [n for n in bootstrap_nodes]
        
        self.main = RoutingTable(my_node, NODES_PER_BUCKET)
        self.replacement = RoutingTable(my_node, NODES_PER_BUCKET)
        self.ping_msg = message.OutgoingPingQuery(my_node.id)
        self.find_node_msg = message.OutgoingFindNodeQuery(
            my_node.id,
            my_node.id)
        self.mode = BOOTSTRAP_MODE
        self.num_concurrent_refresh_msgs = 0
        #This must be called by an external party: self.do_bootstrap()
        #After initializing callbacks

        # Add myself to the routing table
        rnode = self.main.add(my_node)
        self._reset_refresh_task(rnode)
コード例 #11
0
    def __init__(self, alpha=3, k=20, identifier=None):

        # Initialiaze DatagramRPCProtocol
        super(KademliaNode, self).__init__()

        # TODO: Make the node id a function of node's public key
        # Just like Bitcoin wallet IDs use HASH160
        if identifier is None:
            identifier = random_id()

        self.identifier = identifier

        # Constants from the kademlia protocol
        self.k = k
        self.alpha = alpha

        # Each node has their own dictionary
        self.storage = {}

        # The k-bucket based kademlia routing table
        self.routing_table = RoutingTable(self.identifier, k=self.k)
コード例 #12
0
ファイル: mpi_proc.py プロジェクト: mreitm/neurokernel
    def spawn(self):
        """
        Spawn MPI processes for and execute each of the managed targets.
        """

        if self._is_parent:
            # Find the path to the mpi_backend.py script (which should be in the
            # same directory as this module:
            parent_dir = os.path.dirname(__file__)
            mpi_backend_path = os.path.join(parent_dir, 'mpi_backend.py')

            # Spawn processes:
            self._intercomm = MPI.COMM_SELF.Spawn(sys.executable,
                                                  args=[mpi_backend_path],
                                                  maxprocs=len(self))

            # First, transmit twiggy logging emitters to spawned processes so
            # that they can configure their logging facilities:
            for i in self._targets.keys():
                self._intercomm.send(twiggy.emitters, i)

            # Next, serialize the routing table ONCE and then transmit it to all
            # of the child nodes:
            try:
                routing_table = self.routing_table
            except:
                routing_table = RoutingTable()
                self.log_warning(
                    'Routing Table is null, using empty routing table.')

            self._intercomm.bcast(routing_table, root=MPI.ROOT)

            # Transmit class to instantiate, globals required by the class, and
            # the constructor arguments; the backend will wait to receive
            # them and then start running the targets on the appropriate nodes.
            req = MPI.Request()
            r_list = []
            for i in self._targets.keys():
                target_globals = all_global_vars(self._targets[i])

                # Serializing atexit with dill appears to fail in virtualenvs
                # sometimes if atexit._exithandlers contains an unserializable function:
                if 'atexit' in target_globals:
                    del target_globals['atexit']
                data = (self._targets[i], target_globals, self._kwargs[i])
                r_list.append(self._intercomm.isend(data, i))

                # Need to clobber data to prevent all_global_vars from
                # including it in its output:
                del data
            req.Waitall(r_list)
コード例 #13
0
 def __init__(self, my_node, bootstrap_nodes, msg_f):
     self.my_node = my_node
     self.bootstrapper = bootstrap.OverlayBootstrapper(my_node.id,
                                                       bootstrap_nodes, msg_f)
     self.msg_f = msg_f
     self.table = RoutingTable(my_node, NODES_PER_BUCKET)
     # maintenance variables
     self._next_stale_maintenance_index = 0
     self._maintenance_mode = BOOTSTRAP_MODE
     self._replacement_queue = _ReplacementQueue(self.table)
     self._query_received_queue = _QueryReceivedQueue(self.table)
     self._found_nodes_queue = _FoundNodesQueue(self.table)
     self._maintenance_tasks = [self._ping_a_staled_rnode,
                                self._ping_a_query_received_node,
                                self._ping_a_found_node,
                                self._ping_a_replacement_node,
                                ]
     self._num_pending_filling_lookups = NUM_FILLING_LOOKUPS
コード例 #14
0
ファイル: node.py プロジェクト: mtasic85/routingtable
    def __init__(self,
                 loop,
                 id=None,
                 listen_host='0.0.0.0',
                 listen_port=6633,
                 bootstrap=False):
        self.loop = loop

        if id == None:
            id = str(uuid.uuid4())

        self.id = id

        self.listen_host = listen_host
        self.listen_port = listen_port
        self.bootstrap = bootstrap

        # routing table
        self.rt = RoutingTable()

        # default protocol_commands
        self.protocol_commands = {}

        protocol_command = PingProtocolCommand(self, 1, 0, 0)
        self.add_protocol_command(protocol_command)

        protocol_command = DiscoverProtocolCommand(self, 1, 0, 1)
        self.add_protocol_command(protocol_command)

        # socket
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.sock.bind((self.listen_host, self.listen_port))

        self.recv_buffer = {
        }  # {(remote_host: remote_port): [socket_data, ...]}
        self.recv_packs = {}  # {msg_id: {pack_index: pack_data}}
        self.loop.add_reader(self.sock, self.rect_sock_data)

        # tasks
        self.loop.call_soon(self.check_recv_buffer)
        self.loop.call_soon(self.remove_dead_contacts)
コード例 #15
0
ファイル: core.py プロジェクト: yluo42/neurokernel
    def __init__(self,
                 required_args=[
                     'sel', 'sel_in', 'sel_out', 'sel_gpot', 'sel_spike'
                 ],
                 ctrl_tag=CTRL_TAG):
        super(Manager, self).__init__(ctrl_tag)

        # Required constructor args:
        self.required_args = required_args

        # One-to-one mapping between MPI rank and module ID:
        self.rank_to_id = bidict.bidict()

        # Unique object ID:
        self.id = uid()

        # Set up a dynamic table to contain the routing table:
        self.routing_table = RoutingTable()

        # Number of emulation steps to run:
        self.steps = np.inf

        # Variables for timing run loop:
        self.start_time = 0.0
        self.stop_time = 0.0

        # Variables for computing throughput:
        self.counter = 0
        self.total_sync_time = 0.0
        self.total_sync_nbytes = 0.0
        self.received_data = {}

        # Average step synchronization time:
        self._average_step_sync_time = 0.0

        # Computed throughput (only updated after an emulation run):
        self._average_throughput = 0.0
        self._total_throughput = 0.0
        self.log_info('manager instantiated')
コード例 #16
0
ファイル: router.py プロジェクト: yushu-liu/GerogiaTech
 def __init__(self, router_num, N, protocol):
     self._router_num = router_num
     self._N = N
     self._neighbors = {}
     self._routing_table = RoutingTable(N, self._router_num, protocol)
コード例 #17
0
ファイル: viro_routing.py プロジェクト: dumb0002/viro-geni
class ViroRouting(object):
  
  # Time interval to discover neighbors failures
  # i.e. if we didn't hear from a neighbor for ALER_FAILURE
  # seconds, then we assume this neighbor has failed
  ALERT_FAILURE = 15
  
  def __init__(self, dpid, vid):
        self.dpid = dpid
        self.vid = vid
        self.L = VidAddr.L
        self.routingTable = RoutingTable(vid, dpid)
        self.rdvStore = RdvStore()
        self.liveNbrs = {}
        self.portNbrs = {} # mapping of Nbrs = port #
        self.rdvRequestTracker = cl.defaultdict(dict)
    
      
  def discoveryEchoReplyReceived(self, nbrVid, port):
    """ Process a discoveryEchoReply form a direct neighbor """
    dist = nbrVid.delta(self.vid)
                 
    bucket = Bucket(dist, nbrVid, self.vid, port)
    self.routingTable.addBucket(bucket)
    
    self.liveNbrs[nbrVid] = time.time()
    self.portNbrs[port] = nbrVid
    
    log.debug(str(self.routingTable))
    log.debug(str(self.rdvStore))    

    
  def removeFailedNode(self, vid):
    """ Remove failed neighbor from routing table
    All entries in which "vid" is a nexthop or gateway node must be removed """
    log.debug("Removing failed neighbor {}".format(vid))
    
    buckets = {}
    if vid in self.routingTable.nbrs:
      buckets.update(self.routingTable.nbrs[vid])
    
    if vid in self.routingTable.gtws:
      buckets.update(self.routingTable.gtws[vid])    
    
    for bkt in buckets:
      self.routingTable.removeBucket(bkt)
    
    if vid in self.liveNbrs:    
      del self.liveNbrs[vid]
    

  def getNextHop(self, dst, op=None):
    """ Find the nexthop node for the give dst """        
    nexthop = None
    port = None
        
    while not nexthop: 
      level = self.vid.delta(dst)
      if level == 0:
        return (nexthop, port)
      
      if level in self.routingTable:
        buckets = self.routingTable[level]
        if len(buckets) > 0 :
          bkt = buckets.iterkeys().next()
          return (bkt.nexthop, bkt.port)
      
      if (op != viroctrl.RDV_PUBLISH) and (op != viroctrl.RDV_QUERY):
        return (nexthop, port)
      
      # flip the 'level' bit to get closer
      dst = dst.flipBit(level)
        
  def selfRVDQuery(self, svid, k):  
    # search in rdv store for the logically closest gateway to reach kth distance away neighbor
    gw = self.rdvStore.findAGW(k, svid)
  
    # if found then form the reply packet and send to svid
    if not gw: # No gateway found
        log.debug('Node : {} has no gateway for the rdv_query packet to reach bucket: {}  for node: '.format(str(self.vid), k, str(svid)))
        return
    
    if k in self.routingTable:
      log.debug('Node {} has already have an entry to reach neighbors at distance - {}'.format(svid, k))
      return
  
    nexthop, port = self.getNextHop(gw)
    if not nexthop:
      log.debug('No nexthop found for the gateway: {}'.format(str(gw)))
      return
    
    # Destination Subtree-k
    bucket = Bucket(k, nexthop, gw, port)
    self.routingTable.addBucket(bucket)

  
  
  def rdvWithDraw(self, svid, failedGw):            
    log.debug('Node : {} has received rdv_withdraw from {}'.format(str(self.vid), str(svid)))
  
    #levels = self.rdvStore.findLevelsForGateway(failedGw)
  
    self.rdvStore.deleteGateway(failedGw)
    
    if self.vid != svid: # I am the rvd itself: no need to update routing table.
        self.removeFailedNode(failedGw)  # update the Routing Table
    else:
      log.debug("I am the rdv point. My routing table is already updated.")
コード例 #18
0
ファイル: controller.py プロジェクト: dumb0002/viro-geni
class LocalViro(EventMixin):
    """
  Waits for OpenFlow switches to connect and makes them viro switches.
  This module is supposed to be the management plane for viro
  """

    _core_name = "viro_local"

    CONTROLLER_ID = 2
    RemoteViro_CONTROLLER_ID = 1

    # ROUND_TIME this is the waiting time for each round in number of seconds
    ROUND_TIME = 5

    # NEIGHBOUR DISCOVERY wait time
    DISCOVER_TIME = 5

    # Wait time for populating routing table
    UPDATE_RT_TIME = 10

    # Failure time interval
    FAILURE_TIME = 7

    # temporary variable (used to push routing table)
    counter = 0

    # Nov.2015 Count of control packets
    numOfVIRO = 0
    numOfVIRO_CONTROLLER_ECHO = 0
    numOfVIRO_LOCAL_HOST = 0
    numOfVIRO_DISC_ECHO_REQ = 0
    numOfVIRO_DISC_ECHO_REPLY = 0
    numOfVIRO_DISC_ECHO_REPLY_SENT = 0
    numOfVIRO_RDV_PUBLISH = 0
    numOfVIRO_RDV_QUERY = 0
    numOfVIRO_RDV_REPLY = 0
    numOfVIRO_RDV_REPLY_SENT = 0
    numOfVIRO_RDV_WITHDRAW = 0
    numOfVIRO_RDV_WITHDRAW_SENT = 0
    numOfVIRO_GW_WITHDRAW = 0
    numOfVIRO_GW_WITHDRAW_SENT = 0

    def __init__(self):
        self.listenTo(core.viro_core)
        self.sw = Switch(None, None, None)
        self.routing = ViroRouting(None, None)
        self.L = VidAddr.L
        self.local_topo = Topology()

    def _handle_ViroSwitchUp(self, event):
        """ Local VIRO switch is connecting to the local controller
    We set the controller Id to our local VIRO controller """
        log.debug("Connection %s %s" %
                  (event.connection, dpidToStr(event.dpid)))

        self.sw.dpid = event.dpid
        self.sw.connection = event.connection

        self.routing.dpid = event.dpid
        self.routing.routingTable.dpid = event.dpid

        # Guobao added 02/05/2015
        #self.previousRoutingTable = copy.deepcopy(self.routing.routingTable)
        self.previousRoutingTable = RoutingTable(None, None)

        # Turn on ability to specify table in flow_mods
        msg = nx.nx_flow_mod_table_id()
        self.sw.connection.send(msg)

        msg = msgFactory.ofpControllerId(LocalViro.CONTROLLER_ID)
        self.sw.connection.send(msg)

        msg = msgFactory.ctrlPktsLocalViroPacketIn(
            LocalViro.CONTROLLER_ID)  # set the rule for Viro Packets
        self.sw.connection.send(msg)

        msg_ip = msgFactory.IPv4LocalViroPacketIn(
            LocalViro.CONTROLLER_ID)  # set the rule for IP Packets
        self.sw.connection.send(msg_ip)

        # Guobao -- Fallback rule for Table 0
        msg = msgFactory.FallBack(1)
        self.sw.connection.send(msg)

        # Start periodic function of the local controller
        # 1. Neighbor discovery
        # 2. Routing table rounds
        # 3. Failure discovery
        # We don't use recurring timers to avoid function call overlaps in case of delays
        # So, we call the timer function after the end of the callback function
        Timer(LocalViro.DISCOVER_TIME, self.neibghoorDiscoverCallback)
        #Timer(LocalViro.FAILURE_TIME, self.discoveryFailureCallback) # comment here

        self.round = 1
        Timer(LocalViro.UPDATE_RT_TIME, self.startRoundCallback)

        # Sync RT Table every UPDATE_RT_TIME?
        Timer(LocalViro.UPDATE_RT_TIME, self.pushRTHelper)

    def _handle_ViroSwitchPortStatus(self, event):
        #Handle switch modified port status
        # check if the modified port is connected to a host or switch
        # if a) "host": remove the specific host from its topology table
        # notifies the remote controller that the host was removed
        # from the switch topology; b) "switch": initiates VIRO failure recovery mechanism

        log.debug("Receiving a port status event")

        ofp = event.ofp

        if ofp.reason == of.OFPPR_MODIFY:
            port = ofp.desc.port_no  # modified or deleted port number
            known_host = self.local_topo.findHostByPort(port)

            if not known_host == None:

                log.debug(
                    "Updating its local topology: local host was disconnected")
                ip = known_host.ip
                self.local_topo.deleteHostByIP(ip)  # remove host

                # sends a notification to remote controller
                # sending host withdraw message to remote controller
                packet = pktFactory.hostWithdraw(ip)
                msg = msgFactory.controllerEcho(
                    LocalViro.RemoteViro_CONTROLLER_ID, packet)
                self.sw.connection.send(msg)

                return

            else:
                # the removed port is connected to a switch
                log.debug(
                    "Discovered neibghor failed: starting VIRO failure recovery mechanism"
                )
                self.discoveryFailure(port)  #uncomment here

    def _handle_ViroPacketInIP(self, event):

        IPv4_frame = event.packet
        inport = event.port
        IPv4pkt = IPv4_frame.payload  # Ip packet payload

        log.debug(
            "IPv4 packet in_port=%d, srcvid=%s, dstvid=%s, type=%#04x",
            inport,
            IPv4_frame.src,
            IPv4_frame.dst,
            IPv4_frame.type,
        )

        # Is it to us? (eg. not a DHCP packet)
        if isinstance(IPv4pkt, pkt.dhcp):
            log.debug("%s: packet is a DHCP: it is not for us",
                      str(event.connection))
            return

        # obtain the frame header info.
        srcMac = IPv4_frame.src
        dstMac = IPv4_frame.dst
        ethtype = IPv4_frame.type
        ipv4 = IPv4pkt.srcip

        log.debug("This is the ipv4 packet: {}".format(ipv4))

        n = dstMac.toRaw()
        dst_sw, dst_host = self.sw.vid.getSWHost(n)
        dst_vid = VidAddr(dst_sw, dst_host)

        # find src and dst host objects
        #dist_host = self.local_topo.findHostByVid(dst_vid)
        dist_host = self.local_topo.findHostByMac(dstMac)
        local_host = self.local_topo.findHostByMac(srcMac)

        # if local host does not exist in our topology
        # send a vid request to the remote controller
        # Drop the IPv4 pkts
        if local_host == None:
            packet = pktFactory.vidRequest(srcMac, ipv4, inport)
            msg = msgFactory.controllerEcho(LocalViro.RemoteViro_CONTROLLER_ID,
                                            packet)
            self.sw.connection.send(msg)

            log.debug("Sending a Vid Request pkt to the remote controller")
            return

        # obtain the src VidAddress
        src_vid = local_host.vid

        if dist_host != None:
            ###### Handling packets to src and dst in the same switch ######
            log.debug("src and dst attached to the same switch!")

            lport = dist_host.port
            #dst_mac = dist_host.mac
            #etherpkt = event.packet

            # set the destination macAddress and the source vid
            #etherpkt.src = src_vid.to_raw()
            #etherpkt.dst = dst_mac

            log.debug("forwarding the packet to the next host")
            msg = msgFactory.packetOut(IPv4_frame, lport)
            self.sw.connection.send(msg)

            #---------------------------------------------
            # Add a rule to the switch for future packets
            #---------------------------------------------
            log.debug("pushing the rules to the switch")
            #msg = msgFactory.rewriteMac(src_vid, dstMac, dst_mac, lport)

            # Pushing the rule in both directons
            # a) src --> dst
            msg = msgFactory.rewriteMac(srcMac, dstMac, lport)
            self.sw.connection.send(msg)

            # b) dst --> src
            msg = msgFactory.rewriteMac(dstMac, srcMac, inport)
            self.sw.connection.send(msg)

            return

        else:

            ###### Handling packets to src and dst in different switch ######
            log.debug("Converting IPv4pkts to Viropkts")

            # Generates a Viropkt frame and route it
            log.debug('VidAddress parameters - Src: {} , Dst: {}'.format(
                src_vid, dst_vid))

            viropkt = pktFactory.ViroPacket(src_vid, dst_vid, None, ethtype,
                                            IPv4pkt)
            self.routeViroPacket(viropkt, True)

            log.debug("routing Viro packets")

            #---------------------------------------------
            # Add a rule to the switch for future packets
            #---------------------------------------------
            # log.debug("Port is {}".format(outport))
            msg = msgFactory.encapsulate(srcMac, src_vid, dstMac, dst_vid)
            self.sw.connection.send(msg)

            return

    def _handle_ViroPacketInVIROData(self, event):
        """ VIRO data packet received """

        # FIXME we need to add code to handle data packet missed by the routing
        # table inside the switch. This can happend due to inconsistency between
        # the controller routing table and the switch routing table
        viropkt = event.packet
        inport = event.port

        log.debug(
            "PacketInVIROData in_port=%d, srcvid=%s, dstvid=%s, type=%#04x, fd=%s, eth_type=%#04x",
            inport, viropkt.src, viropkt.dst, viropkt.type, viropkt.fd,
            viropkt.effective_ethertype)

        self.processDataPacket(viropkt, inport)

    def _handle_ViroPacketInVIROCtrl(self, event):
        """ VIRO control packet received """

        viropkt = event.packet
        inport = event.port

        log.debug(str(viropkt))

        ctrlpkt = viropkt.payload

        if ctrlpkt.op == viroctrl.CONTROLLER_ECHO:
            self.numOfVIRO_CONTROLLER_ECHO = self.numOfVIRO_CONTROLLER_ECHO + 1
            logging.info("VIRO_CONTROLLER_ECHO " +
                         str(self.numOfVIRO_CONTROLLER_ECHO) + " " +
                         str(time.time()) + " R")

            # remote controller echo message received, which is used to receive
            # the vid of the local switch from the remote controller
            echo = ctrlpkt.payload
            self.sw.vid = echo.vid
            self.routing.vid = echo.vid
            self.routing.routingTable.vid = echo.vid

            return

        if ctrlpkt.op == viroctrl.LOCAL_HOST:
            self.numOfVIRO_LOCAL_HOST = self.numOfVIRO_LOCAL_HOST + 1
            logging.info("VIRO_LOCAL_HOST " + str(self.numOfVIRO_LOCAL_HOST) +
                         " " + str(time.time()) + " R")

            # remote controller host message, which contains
            # It contains the mac,vid,ip of the host attached to this local controller
            log.debug(
                'Receiving local host information from remote controller')
            payload = ctrlpkt.payload
            host = payload.host
            host.sw = self.sw.vid
            self.local_topo.addHost(host)

            return

        if not self.sw.vid:
            # if we didn't receive the switch vid from the remote controller yet
            # we return immediately since this means that we don't know our vid yet
            return

        self.processCtrlPacket(viropkt, inport)

    def processViroPacket(self, viropkt, inport):
        """ Porcess a locally generated viro packet """
        if viropkt.effective_ethertype == viroctrl.VIRO_CTRL_TYPE:
            self.processCtrlPacket(viropkt, inport)
        else:
            self.processDataPacket(viropkt, inport)

    def processDataPacket(self, viropkt, inport):
        """ Process a VIRO data packet """
        # 1.proces the packet
        # 2. Add a rule to the switch to match future requests
        # 3. Need to check if I am the destination of the packet
        # If so remove the forwarding directive, replace the
        # Vid with the mac address with the client mac address and forward
        # the packet to the host: I need to save to which port the client is
        # connected to

        dst = viropkt.dst
        src = viropkt.src
        dst_pkt = VidAddr(dst.sw, 0x00)

        if dst_pkt == self.sw.vid:

            # I am the destination for the packet:
            # converting a Viropkt frame to ethernet frame

            log.debug("Destination VID: {}   myVid: {}".format(
                dst, self.sw.vid))
            datapkt = viropkt.payload
            local_host = self.local_topo.findHostByVid(dst)

            if local_host == None:
                log.debug(
                    "Unknown destination Vid to hosts attached to this switch!"
                )
                return

            srcMac = src.to_raw()
            dstMac = local_host.mac
            lport = local_host.port
            type = viropkt.effective_ethertype

            # Generates a internet frame and route it
            etherpkt = pktFactory.ethernetPkt(type, srcMac, dstMac, datapkt)
            msg = msgFactory.packetOut(etherpkt, lport)
            self.sw.connection.send(msg)

            log.debug("Sending data packets to local hosts")

            #---------------------------------------------
            # Add a rule to the switch for future packets
            #---------------------------------------------
            msg = msgFactory.decapsulate(dstMac, dst, lport)
            self.sw.connection.send(msg)

            return

        else:

            # Viropkt is not for us then route it!
            port = self.routeViroPacket(viropkt)

            #---------------------------------------------
            # Add a rule to the switch for future packets
            #---------------------------------------------
            level = self.sw.vid.delta(dst_pkt)
            prefix = self.sw.vid.bucketPrefix(level)
            dst_vid = int(prefix.replace("*", "0"), 2)
            dst_vid_mask = int(prefix.replace("0", "1").replace("*", "0"), 2)

            msg = msgFactory.pushRoutingTable(dst_vid, dst_vid_mask, port)
            self.sw.connection.send(msg)

            msg = msgFactory.pushRoutingTableETH(dst_vid, dst_vid_mask, port)
            self.sw.connection.send(msg)

            return

        #raise Exception("processDataPacket not implemented yet")

    def processCtrlPacket(self, viropkt, inport):
        """ Comsume a viro control packet """
        ctrlpkt = viropkt.payload

        if ctrlpkt.op == viroctrl.DISC_ECHO_REQ:
            self.numOfVIRO_DISC_ECHO_REQ = self.numOfVIRO_DISC_ECHO_REQ + 1
            logging.info("VIRO_DISC_ECHO_REQ " +
                         str(self.numOfVIRO_DISC_ECHO_REQ) + " " +
                         str(time.time()) + " R")

            echo = ctrlpkt.payload
            nvid = viropkt.src

            reply = pktFactory.discoverEchoReply(self.sw.vid, nvid)
            self.numOfVIRO_DISC_ECHO_REPLY_SENT = self.numOfVIRO_DISC_ECHO_REPLY_SENT + 1
            logging.info("VIRO_DISC_ECHO_REPLY " +
                         str(self.numOfVIRO_DISC_ECHO_REPLY_SENT) + " " +
                         str(time.time()) + " S")
            msg = msgFactory.packetOut(reply, inport)

            self.sw.connection.send(msg)
            log.debug("Neighbor discovery reply sent")

            return

        elif ctrlpkt.op == viroctrl.DISC_ECHO_REPLY:
            self.numOfVIRO_DISC_ECHO_REPLY = self.numOfVIRO_DISC_ECHO_REPLY + 1
            logging.info("VIRO_DISC_ECHO_REPLY " +
                         str(self.numOfVIRO_DISC_ECHO_REPLY) + " " +
                         str(time.time()) + " R")

            nvid = viropkt.src
            self.routing.discoveryEchoReplyReceived(nvid, inport)

            return

        if not viropkt.dst == self.sw.vid:
            # forward the control packet since its not for me
            self.routeViroPacket(viropkt)
            return

        # handle viro routing packets i.e. publish, query, etc.
        if ctrlpkt.op == viroctrl.RDV_PUBLISH:
            self.numOfVIRO_RDV_PUBLISH = self.numOfVIRO_RDV_PUBLISH + 1
            logging.info("VIRO_RDV_PUBLISH " +
                         str(self.numOfVIRO_RDV_PUBLISH) + " " +
                         str(time.time()) + " R")

            nexthop = ctrlpkt.payload.vid
            svid = viropkt.src
            log.debug("RDV_PUBLISH message received from: ".format(str(svid)))

            dist = self.sw.vid.delta(nexthop)
            self.routing.rdvStore.addRdvPoint(dist, svid, nexthop)

        elif ctrlpkt.op == viroctrl.RDV_QUERY:
            self.numOfVIRO_RDV_QUERY = self.numOfVIRO_RDV_QUERY + 1
            logging.info("VIRO_RDV_QUERY " + str(self.numOfVIRO_RDV_QUERY) +
                         " " + str(time.time()) + " R")

            log.debug("RDV_QUERY message received")
            src = viropkt.src
            if src == self.sw.vid:
                log.debug("I am the rdv point - processing the packet")
                self.routing.selfRVDQuery(src, ctrlpkt.payload.bucket_dist)
            else:
                svid = viropkt.src
                k = ctrlpkt.payload.bucket_dist
                log.debug("RDV_QUERY message received from: {}".format(svid))

                # search in rdv store for the logically closest gateway to reach kth distance away neighbor
                gw = self.routing.rdvStore.findAGW(k, svid)

                # if found then form the reply packet and send to svid
                if not gw:
                    # No gateway found
                    log.debug(
                        'Node : {} has no gateway for the rdv_query packet to reach bucket: {} for node: {}'
                        .format(self.sw.vid, k, svid))
                    return

                # create a RDV_REPLY packet and send it
                rvdReplyPacket = pktFactory.rdvReply(self.sw.vid, svid, k, gw)

                # Keeps track of the Nodes that requests each Gateways at specific level
                nh = self.routing.rdvStore.findNextHop(
                    gw, k)  # nexthop associated with the selected gateway
                self.routing.rdvRequestTracker[gw][svid] = nh

                self.numOfVIRO_RDV_REPLY_SENT = self.numOfVIRO_RDV_REPLY_SENT + 1
                logging.info("VIRO_RDV_REPLY " +
                             str(self.numOfVIRO_RDV_REPLY_SENT) + " " +
                             str(time.time()) + " S")

                msg = msgFactory.packetOut(rvdReplyPacket, inport)
                self.sw.connection.send(msg)
                log.debug("RDV_REPLY message sent")

        elif ctrlpkt.op == viroctrl.RDV_REPLY:
            self.numOfVIRO_RDV_REPLY = self.numOfVIRO_RDV_REPLY + 1
            logging.info("VIRO_RDV_REPLY " + str(self.numOfVIRO_RDV_REPLY) +
                         " " + str(time.time()) + " R")

            log.debug("RDV_REPLY message received")
            # Fill my routing table using this new information
            rtbl = self.routing.routingTable
            gw = ctrlpkt.payload.gw
            k = ctrlpkt.payload.bucket_dist

            if k in self.routing.routingTable:
                log.debug(
                    'Node {} has already have an entry to reach neighbors at distance - {}'
                    .format(self.sw.vid, k))
                return

            dist = self.sw.vid.delta(gw)
            if dist not in rtbl:
                log.debug('ERROR: no nexthop found for the gateway: {}'.format(
                    str(gw)))
                return

            bucket = rtbl[dist].iterkeys().next()
            nexthop = bucket.nexthop
            port = bucket.port

            bucket = Bucket(k, nexthop, gw, port)
            rtbl.addBucket(bucket)

        elif ctrlpkt.op == viroctrl.RDV_WITHDRAW:
            self.numOfVIRO_RDV_WITHDRAW = self.numOfVIRO_RDV_WITHDRAW + 1
            logging.info("VIRO_RDV_WITHDRAW " +
                         str(self.numOfVIRO_RDV_WITHDRAW) + " " +
                         str(time.time()) + " R")

            #log.debug('RDV_WITHDRAW message received from: {}'.format())
            svid = viropkt.src
            gw = ctrlpkt.payload.gw
            k = ctrlpkt.payload.bucket_dist

            log.debug('RDV_WITHDRAW message received from: {} for gateway: {}'.
                      format(svid, gw))
            self.routing.rdvWithDraw(svid, gw)

            # Sends Remove Gateway messages to the appropriated nodes
            if gw in self.routing.rdvRequestTracker:
                for dst in self.routing.rdvRequestTracker[gw]:
                    # Sends the GW_WITHDRAW message to nodes
                    pkt = pktFactory.gatewayWithdraw(self.sw.vid, dst, gw)
                    nexthop, port = self.routing.getNextHop(dst)

                    if nexthop:
                        self.numOfVIRO_GW_WITHDRAW_SENT = self.numOfVIRO_GW_WITHDRAW_SENT + 1
                        logging.info("VIRO_GW_WITHDRAW " +
                                     str(self.numOfVIRO_GW_WITHDRAW_SENT) +
                                     " " + str(time.time()) + " S")

                        log.debug(
                            'Sending GW_WITHDRAW message received from: {} to destination: {}'
                            .format(self.sw.vid, dst))
                        self.routeViroPacket(pkt)
                    else:
                        log.debug("No next hop found!")

                # delete the failed gateway from the rdvRequestTracker - House Keeping
                del self.routing.rdvRequestTracker[gw]
                log.debug(
                    'Removed the failed gateway: {} from rvd request-tracker'.
                    format(gw))

            # Remove all the Gateways using the "failed node" as nexthop (Fixme! simply this code)
            gw_entries = self.routing.rdvStore.findGWByNextHop(
                gw)  # list of gateways using "failed node" as its nexthop

            delete_entries = []

            for gw_other in gw_entries:
                for dst in self.routing.rdvRequestTracker[gw_other]:

                    if self.routing.rdvRequestTracker[gw_other][dst] == gw:
                        # Sends the GW_WITHDRAW message to nodes
                        pkt = pktFactory.gatewayWithdraw(
                            self.sw.vid, dst, gw_other)
                        nexthop, port = self.routing.getNextHop(dst)

                        if nexthop:
                            self.numOfVIRO_GW_WITHDRAW_SENT = self.numOfVIRO_GW_WITHDRAW_SENT + 1
                            logging.info("VIRO_GW_WITHDRAW " +
                                         str(self.numOfVIRO_GW_WITHDRAW_SENT) +
                                         " " + str(time.time()) + " S")

                            log.debug(
                                'Sending GW_WITHDRAW message received from: {} to destination: {}'
                                .format(self.sw.vid, dst))
                            self.routeViroPacket(pkt)
                        else:
                            log.debug("No next hop found!")

                        delete_entries.append((gw_other, dst))

                        # deleting entry from the RDVStore
                        self.routing.rdvStore.deleteGatewayPerNextHop(
                            gw_other, gw)
                        log.debug(
                            'Removed the failed gateway entry: {} from RDVStore per level'
                            .format(gw_other))

            # delete any remaining gateway using the failed nexthop - House Keeping
            self.routing.rdvStore.deleteGatewayForNextHop(gw)

            # delete the failed gateway entry from the rdvRequestTracker - House Keeping
            for gw_other, dst in delete_entries:
                del self.routing.rdvRequestTracker[gw_other][dst]

                log.debug(
                    'Removed the failed gateway entry: {} from rvd request-tracker'
                    .format(gw_other))

        elif ctrlpkt.op == viroctrl.GW_WITHDRAW:
            self.numOfVIRO_GW_WITHDRAW = self.numOfVIRO_GW_WITHDRAW + 1
            logging.info("VIRO_GW_WITHDRAW " +
                         str(self.numOfVIRO_GW_WITHDRAW) + " " +
                         str(time.time()) + " R")

            failed_gw = ctrlpkt.payload.failed_gw
            log.debug(
                'Received Gateway Withdraw for node: {}'.format(failed_gw))
            self.routing.removeFailedNode(failed_gw)

            #---------------------------------------------
            # Push New Routing Table
            #---------------------------------------------
            # self.pushRT()

    def neibghoorDiscoverCallback(self):
        """ Periodically send neighbor discovery to my neighbors """
        if not self.sw.vid:
            log.debug(
                "Local controller didn't receive the switch VID yet from the remote controller!!"
            )
            return

        try:
            self.numOfVIRO_DISC_ECHO_REQ = self.numOfVIRO_DISC_ECHO_REQ + 1
            logging.info("VIRO_DISC_ECHO_REQ " +
                         str(self.numOfVIRO_DISC_ECHO_REQ) + " " +
                         str(time.time()) + " S")

            packet = pktFactory.discoverEchoRequest(self.sw.vid)
            # FIXME do we really want to flood here?
            msg = msgFactory.flood(packet)
            self.sw.connection.send(msg)
            log.debug("Neighbor discovery packets sent to neighbors")
        except Exception as e:
            log.error(
                "Unable to send discovery packets. Exception({})".format(e))

        # register the callback again
        Timer(LocalViro.DISCOVER_TIME, self.neibghoorDiscoverCallback)

    def discoveryFailureCallback(self):
        """ Periodically discover link failures
    First, we find failed neighbors i.e. nodes that we didn't receive any discoverEchoReply from in a while
    Second, we delete those neighbors and update the RDV points """
        log.debug("Starting discovering neighbor failures")

        delete = []  # list of nodes to be deleted
        for nbrVid, timestamp in self.routing.liveNbrs.iteritems():
            now = time.time()

            if (now - timestamp) > ViroRouting.ALERT_FAILURE:
                log.debug("Failed neighbor detected {}".format(nbrVid))

                delete.append(nbrVid)
                for bkt in self.routing.routingTable.nbrs[nbrVid]:
                    if bkt.gateway == self.sw.vid or bkt.gateway == nbrVid:
                        # notify the failure to RDV points
                        k = bkt.k  # level
                        rdvVid = self.sw.vid.getRendezvousID(k)

                        if not rdvVid == nbrVid:  # if the rdvVid is not the failed node
                            self.numOfVIRO_RDV_WITHDRAW_SENT = self.numOfVIRO_RDV_WITHDRAW_SENT + 1
                            logging.info(
                                "VIRO_RDV_WITHDRAW " +
                                str(self.numOfVIRO_RDV_WITHDRAW_SENT) + " " +
                                str(time.time()) + " S")

                            #pkt = pktFactory.rdvWithdraw(self.sw.vid, rdvVid, bkt.gateway)
                            pkt = pktFactory.rdvWithdraw(
                                self.sw.vid, rdvVid, k, nbrVid)
                            self.routeViroPacket(pkt)
                        else:
                            log.debug(
                                "RDV destination is not reachable to notify failure of node :{}"
                                .format(str(nbrVid)))

        flag = False

        # remove the failed neighbor from the routing table
        for nbrVid in delete:
            flag = True
            self.routing.removeFailedNode(nbrVid)

        #---------------------------------------------
        # Push New Routing Table
        #---------------------------------------------
        #if flag == True:
        #self.pushRT()

        # register the callback again
        Timer(LocalViro.FAILURE_TIME, self.discoveryFailureCallback)

    def discoveryFailure(self, port):
        """ First, we find failed neighbors i.e. nodes use Openflow portStatus events
      Second, we delete those neighbors and update the RDV points """
        log.debug("Neighbor failure discovered ")

        nbrVid = self.routing.portNbrs[port]

        log.debug("Neighbor {} failed at port {}".format(nbrVid, port))

        for bkt in self.routing.routingTable.nbrs[nbrVid]:
            if bkt.gateway == self.sw.vid or bkt.gateway == nbrVid:
                # notify the failure to RDV points
                k = bkt.k  # level
                rdvVid = self.sw.vid.getRendezvousID(k)

                if not rdvVid == nbrVid:  # if the rdvVid is not the failed node
                    self.numOfVIRO_RDV_WITHDRAW_SENT = self.numOfVIRO_RDV_WITHDRAW_SENT + 1
                    logging.info("VIRO_RDV_WITHDRAW " +
                                 str(self.numOfVIRO_RDV_WITHDRAW_SENT) + " " +
                                 str(time.time()) + " S")

                    #pkt = pktFactory.rdvWithdraw(self.sw.vid, rdvVid, bkt.gateway)
                    pkt = pktFactory.rdvWithdraw(self.sw.vid, rdvVid, k,
                                                 nbrVid)
                    self.routeViroPacket(pkt)
                else:
                    log.debug(
                        "RDV destination is not reachable to notify failure of node :{}"
                        .format(str(nbrVid)))

        # remove the failed neighbor from the routing table:
        self.routing.removeFailedNode(nbrVid)

    def routeViroPacket(self, pkt, inport=None):
        """ Route the viro packet closer to destination """

        dst_pkt = VidAddr(pkt.dst.sw, 0x00)

        if (dst_pkt == self.sw.vid):
            # consume the packet since its sent to me
            self.processViroPacket(pkt, inport)
            return

        if pkt.effective_ethertype == viroctrl.VIRO_CTRL_TYPE:
            op = pkt.payload.op
            nexthop, port = self.routing.getNextHop(dst_pkt, op)
        else:
            nexthop, port = self.routing.getNextHop(dst_pkt)

        if nexthop != None:
            msg = msgFactory.packetOut(pkt, port)
            self.sw.connection.send(msg)
        else:
            log.debug("routeViroPacket nexthop not found")

        # use for handling data packets
        if inport:
            return port

    def startRoundCallback(self):
        log.debug("Starting round {}".format(self.round))
        self.runARound()
        self.round += 1

        #if self.round == self.L: # push the routing table after populating all the levels
        #log.debug("Starting pushing the routing table to the switch......")
        #self.pushRT()

        if self.round > self.L:
            self.round = self.L

        log.debug(str(self.routing.routingTable))  # print Routing Table
        log.debug(str(self.routing.rdvStore))  # print RDV store
        # register the callback again
        Timer(LocalViro.UPDATE_RT_TIME, self.startRoundCallback)

    def pushRTHelper(self):
        self.pushRT()
        Timer(LocalViro.UPDATE_RT_TIME, self.pushRTHelper)

    def pushRT(self):
        # push the local controller routing table to the switch

        for level in range(1, self.L + 1):
            if level in self.routing.routingTable:

                prefix = self.sw.vid.bucketPrefix(level)
                dst_vid = int(prefix.replace("*", "0"), 2)
                dst_vid_mask = int(
                    prefix.replace("0", "1").replace("*", "0"), 2)

                # Fixme: improve this code for multipath
                # We use 1000 as "maximum infinity" here since the maximum level will be no larger than 32.
                closestDistance = 1000
                closestBucket = None
                for bucket in self.routing.routingTable[level]:
                    distance = self.sw.vid.delta(bucket.gateway)
                    if (distance < closestDistance):
                        closestDistance = distance
                        closestBucket = bucket

                port = closestBucket.port
                bkt_key = closestBucket.key
                bkt_dict = self.previousRoutingTable.bkts_hash

                if bkt_key in bkt_dict:
                    log.debug("No routing table pushed. No changes happened.")
                else:
                    log.debug(
                        "Pushing rule for level: {}  dst_vid: {}  dst_mask: {}"
                        .format(level, dst_vid, dst_vid_mask))
                    # Guobao Nov.2015 Fixed duplicate pushes
                    if level in self.previousRoutingTable:
                        self.previousRoutingTable.removeAllBucketsInLevel(
                            level)

                    self.previousRoutingTable.addBucket(closestBucket)
                    msg = msgFactory.pushRoutingTable(dst_vid, dst_vid_mask,
                                                      port)
                    self.sw.connection.send(msg)
                    msg = msgFactory.pushRoutingTableETH(
                        dst_vid, dst_vid_mask, port)
                    self.sw.connection.send(msg)

            else:
                log.debug(
                    "Failure to push rule for level {} : -- EMPTY LEVEL --".
                    format(level))

    def runARound(self):
        round = self.round
        rtbl = self.routing.routingTable
        myvid = self.sw.vid

        # start from round 2 since connectivity in round 1 is already learned using the physical neighbors
        for i in range(2, round + 1):
            # see if routing entry for this round is already available in the routing table.
            if i in rtbl:
                if len(rtbl[i]) > 0:
                    #publish the information if it is already there
                    for bkt in rtbl[i]:
                        if bkt.gateway == myvid:
                            log.debug("Sending rdv publish messages")

                            self.numOfVIRO_RDV_PUBLISH = self.numOfVIRO_RDV_PUBLISH + 1
                            logging.info("VIRO_RDV_PUBLISH " +
                                         str(self.numOfVIRO_RDV_PUBLISH) +
                                         " " + str(time.time()) + " S")

                            dst = myvid.getRendezvousID(i)
                            pkt = pktFactory.rdvPublish(
                                myvid, dst, bkt.nexthop)
                            self.routeViroPacket(pkt)
                            log.debug(
                                "RDV publish messages sent to:: {} for level {}"
                                .format(dst, i))
                else:
                    log.debug("Sending rdv query messages")

                    self.numOfVIRO_RDV_QUERY = self.numOfVIRO_RDV_QUERY + 1
                    logging.info("VIRO_RDV_QUERY " +
                                 str(self.numOfVIRO_RDV_QUERY) + " " +
                                 str(time.time()) + " S")

                    dst = myvid.getRendezvousID(i)
                    pkt = pktFactory.rdvQuery(myvid, dst, i)
                    self.routeViroPacket(pkt)

                    log.debug(
                        "RDV query message sent to:: {} for level {}".format(
                            dst, i))
            else:
                log.debug("Sending rdv query messages")

                self.numOfVIRO_RDV_QUERY = self.numOfVIRO_RDV_QUERY + 1
                logging.info("VIRO_RDV_QUERY " +
                             str(self.numOfVIRO_RDV_QUERY) + " " +
                             str(time.time()) + " S")

                dst = myvid.getRendezvousID(i)
                pkt = pktFactory.rdvQuery(myvid, dst, i)
                self.routeViroPacket(pkt)

                log.debug("RDV query message sent to:: {} for level {}".format(
                    dst, i))
コード例 #19
0
class RoutingManager(object):
    
    def __init__(self, my_node, querier, bootstrap_nodes):
        self.my_node = my_node
        self.querier = querier
        #Copy the bootstrap list
        self.bootstrap_nodes = [n for n in bootstrap_nodes]
        
        self.main = RoutingTable(my_node, NODES_PER_BUCKET)
        self.replacement = RoutingTable(my_node, NODES_PER_BUCKET)
        self.ping_msg = message.OutgoingPingQuery(my_node.id)
        self.find_node_msg = message.OutgoingFindNodeQuery(
            my_node.id,
            my_node.id)
        self.mode = BOOTSTRAP_MODE
        self.num_concurrent_refresh_msgs = 0
        #This must be called by an external party: self.do_bootstrap()
        #After initializing callbacks

        # Add myself to the routing table
        rnode = self.main.add(my_node)
        self._reset_refresh_task(rnode)

    def do_bootstrap(self):
        if self.main.num_rnodes > MIN_RNODES_BOOTSTRAP:
            # Enough nodes. Stop bootstrap.
            return
        for _ in xrange(NUM_NODES_PER_BOOTSTRAP_STEP):
            if not self.bootstrap_nodes:
                self.mode = NORMAL_MODE
                return
            index = random.randint(0,
                                   len(self.bootstrap_nodes) - 1)
            self.querier.send_query(self.find_node_msg,
                                    self.bootstrap_nodes[index],
                                    self._do_nothing,
                                    self._do_nothing,
                                    self._do_nothing)
            del self.bootstrap_nodes[index]
        #TODO2: Don't use querier's rpc_m
        self.querier.rpc_m.call_later(BOOTSTRAP_DELAY,
                                      self.do_bootstrap)
    
    def on_query_received(self, node_):
        try:
            rnode = self.main.get_rnode(node_)
        except RnodeNotFound:
            pass # node is not in the main table
        else:
            # node in routing table: inform rnode
            rnode.on_query_received()
            self._reset_refresh_task(rnode)
            return
        # Node is not in routing table
        # Check reachability (if the bucket is not full)
        if self.main.there_is_room(node_):
            # there is room in the bucket: ping node to check reachability
            self._refresh_now(node_)
            return
        # No room in the main routing table
        # Add to replacement table (if the bucket is not full)
        bucket = self.replacement.get_bucket(node_)
        worst_rnode = self._worst_rnode(bucket.rnodes)
        if worst_rnode \
                and worst_rnode.timeouts_in_a_row() > MAX_NUM_TIMEOUTS:
            self.replacement.remove(worst_rnode)
            self.replacement.add(node_)

            
    def on_response_received(self, node_): #TODO2:, rtt=0):
        try:
            rnode = self.main.get_rnode(node_)
        except (RnodeNotFound):
            pass
        else:
            # node in routing table: refresh it
            rnode.on_response_received()
            self._reset_refresh_task(rnode)
            return
        # The node is not in main
        try:
            rnode = self.replacement.get_rnode(node_)
        except (RnodeNotFound):
            pass
        else:
            # node in replacement table
            # let's see whether there is room in the main
            rnode.on_response_received()
            if self.main.there_is_room(node_):
                rnode = self.main.add(rnode)
                self._reset_refresh_task(rnode)
                self.replacement.remove(rnode)
            return
        # The node is nowhere
        # Add to replacement table (if the bucket is not full)
        bucket = self.replacement.get_bucket(node_)
        if self.main.there_is_room(node_):
            if not bucket.rnodes:
                # Replacement is empty
                rnode = self.main.add(node_)
                self._reset_refresh_task(rnode)
                return
        # The main bucket is full or the repl bucket is not empty
        worst_rnode = self._worst_rnode(bucket.rnodes)
        # Get the worst node in replacement bucket and see whether
        # it's bad enough to be replaced by node_
        if worst_rnode \
                and worst_rnode.timeouts_in_a_row() > MAX_NUM_TIMEOUTS:
            # This node is better candidate than worst_rnode
            self.replacement.remove(worst_rnode)
        try:
            self.replacement.add(node_)
        except (BucketFullError):
            pass

        
    def on_error_received(self, node_):
        pass
    
    def on_timeout(self, node_):
        if node_ is self.my_node:
            raise Exception, 'I got a timeout from myself!!!' 
        if not node_.id:
            return # This is a bootstrap node (just addr, no id)
        try:
            rnode = self.main.get_rnode(node_)
        except RnodeNotFound:
            pass
        else:
            # node in routing table: check whether it should be removed
            rnode.on_timeout()
            replacement_bucket = self.replacement.get_bucket(node_)
            self._refresh_replacement_bucket(replacement_bucket)
            self.main.remove(rnode)
            try:
                self.replacement.add(rnode)
            except (BucketFullError):
                worst_rnode = self._worst_rnode(replacement_bucket.rnodes)
                if worst_rnode:
                    # Replace worst node in replacement table
                    self.replacement.remove(worst_rnode)
                    self._refresh_replacement_bucket(replacement_bucket)
                    # We don't want to ping the node which just did timeout
                    self.replacement.add(rnode)
        # Node is not in main table
        try:
            rnode = self.replacement.get_rnode(node_)
        except RnodeNotFound:
            pass # the node is not in any table. Nothing to do here.
        else:
            # Node in replacement table: just update rnode
            rnode.on_timeout()
            
    def on_nodes_found(self, nodes):
        #FIXME: this will send ping at exponential rate
        #not good!!!!
        logging.debug('nodes found: %r', nodes)
        for node_ in nodes:
            try:
                rnode = self.main.get_rnode(node_)
            except RnodeNotFound:
                # Not in the main: ping it if there is room in main
                if self.main.there_is_room(node_):
                    logging.debug('pinging node found: %r', node_)
                    self._refresh_now(node_, NO_PRIORITY)
                    #TODO2: prefer NS

    def get_closest_rnodes(self, target_id, num_nodes=DEFAULT_NUM_NODES):
        return self.main.get_closest_rnodes(target_id, num_nodes)

    def get_all_rnodes(self):
        return (self.main.get_all_rnodes(),
                self.replacement.get_all_rnodes())

    def _refresh_now(self, node_, priority=PRIORITY):
        if priority == NO_PRIORITY and \
                self.num_concurrent_refresh_msgs > MAX_CONCURRENT_REFRESH_MSGS:
            return
        self.num_concurrent_refresh_msgs += 1
        return self.querier.send_query(self.find_node_msg,
                                       node_,
                                       self._refresh_now_callback,
                                       self._refresh_now_callback,
                                       self._refresh_now_callback)
    
    def _reset_refresh_task(self, rnode):
        if rnode.refresh_task:
            # Cancel the current refresh task
            rnode.refresh_task.cancel()
        if rnode.in_quarantine:
            rnode.refresh_task = self._refresh_later(rnode,
                                                     QUARANTINE_PERIOD)
        else:
            rnode.refresh_task = self._refresh_later(rnode)


    def _refresh_later(self, rnode, delay=REFRESH_PERIOD):
        return self.querier.send_query_later(delay,
                                             self.find_node_msg,
                                             rnode,
                                             self._do_nothing,
                                             self._do_nothing,
                                             self._do_nothing)
    def _do_nothing(self, *args, **kwargs):
        pass

    def _refresh_now_callback(self, *args, **kwargs):
        self.num_concurrent_refresh_msgs -= 1


    def _refresh_replacement_bucket(self, bucket):
        for rnode in bucket.rnodes:
            if rnode.is_ns:
                # We give advantage to NS nodes
                self._refresh_now(rnode)
            else:
                self._refresh_later(rnode, REFRESH_DELAY_FOR_NON_NS)
    
    def _worst_rnode(self, rnodes):
        max_num_timeouts = -1
        worst_rnode_so_far = None
        for rnode in rnodes:
            num_timeouots = rnode.timeouts_in_a_row()
            if num_timeouots >= max_num_timeouts:
                max_num_timeouts = num_timeouots
                worst_rnode_so_far = rnode
        return worst_rnode_so_far
コード例 #20
0
class KademliaNode(DatagramRPCProtocol):
    def __init__(self, alpha=3, k=20, identifier=None):

        # Initialiaze DatagramRPCProtocol
        super(KademliaNode, self).__init__()

        # TODO: Make the node id a function of node's public key
        # Just like Bitcoin wallet IDs use HASH160
        if identifier is None:
            identifier = random_id()

        self.identifier = identifier

        # Constants from the kademlia protocol
        self.k = k
        self.alpha = alpha

        # Each node has their own dictionary
        self.storage = {}

        # The k-bucket based kademlia routing table
        self.routing_table = RoutingTable(self.identifier, k=self.k)

    @rpc
    def ping(self, peer, peer_identifier):
        logger.info('handling ping(%r, %r)', peer, peer_identifier)

        # The 1st identifier is consumed by kademlia
        # While the 2nd is sent as a reply back to the caller
        return (self.identifier, self.identifier)

    @rpc
    def store(self, peer, peer_identifier, key, value):
        logger.info('handling store(%r, %r, %r, %r)', peer, peer_identifier,
                    key, value)

        self.storage[key] = value
        return (self.identifier, True)

    @rpc
    def find_node(self, peer, peer_identifier, key):
        logger.info('handling find_node(from=%r, peer_id=%r, find=%r)', peer,
                    peer_identifier, key)

        response = self.routing_table.find_closest_peers(
            key, excluding=peer_identifier)
        return (self.identifier, response)

    @rpc
    def find_value(self, peer, peer_identifier, key):
        logger.info('handling find_value(%r, %r, %r)', peer, peer_identifier,
                    key)

        if key in self.storage:
            response = ('found', self.storage[key])
            return (self.identifier, response)

        response = ('notfound',
                    self.routing_table.find_closest_peers(
                        key, excluding=peer_identifier))
        return (self.identifier, response)

    @asyncio.coroutine
    def ping_all_neighbors(self):
        for node_id, peer in list(self.routing_table):
            yield from self.ping(peer, self.identifier)

    @asyncio.coroutine
    def join(self, known_node):
        """
        Run by a node when it wants to join the network.

        http://xlattice.sourceforge.net/components/protocol/kademlia/specs.html#join
        """

        # When a new node is created, ping some known_node
        logger.info("Pinging %r", known_node)

        try:
            yield from self.ping(known_node, self.identifier)
        except socket.timeout:
            logger.warn("Could not ping %r", known_node)
            return

        # Try to find all peers close to myself
        # (this'll update my routing table)
        yield from self.lookup_node(self.identifier)

        # Pinging all neighbors will update their routing tables
        logger.info("Pinging all neighbors")
        yield from self.ping_all_neighbors()

        try:
            # Check if my public key is already in the network
            yield from self.get(self.identifier)
        except KeyError:
            # Store my information onto the network
            # (allowing others to find me)
            yield from self.put(self.identifier,
                                (self.socket_addr, self.pub_key))

            logger.info("Sending my genesis transaction %r",
                        self.ledger.genesis_tx)
            yield from self.add_tx_to_ledger(
                known_node, self.identifier,
                self.ledger.genesis_tx)  # add it to the ledger of bootstrapper

            ledger_bootstrap = yield from self.get_ledger(
                known_node, self.identifier)  # get the bootstrapper's ledger
            logger.info("Got Ledger %r", ledger_bootstrap)
            self.ledger.record = ledger_bootstrap.record  # replace my ledger with that of bootstrappers

            yield from self.broadcast(
                random_id(), 'add_tx_to_ledger', self.identifier, self.ledger.
                genesis_tx)  # broadcast my genesis transaction to everyone

    # TODO: Refactor the hashed part
    @asyncio.coroutine
    def put(self,
            raw_key,
            value,
            hashed=True):  # hashed True key being passed is already hashe
        if (not hashed
            ):  # hashed False => key passed needs to be hashed to 160bit
            hashed_key = sha1_int(raw_key)
        else:
            hashed_key = raw_key  # dht key is node_id already hashed

        peers_close_to_key = yield from self.lookup_node(hashed_key,
                                                         find_value=False)

        store_tasks = [
            self.store(peer, self.identifier, hashed_key, value)
            for _, peer in peers_close_to_key
        ]

        results = yield from asyncio.gather(*store_tasks,
                                            return_exceptions=True)
        successful = [r for r in results if r is True]

        return len(successful)

    @asyncio.coroutine
    def get(self,
            raw_key,
            hashed=True):  # hashed True key being passed is already hashe
        if (not hashed
            ):  # hashed False => key passed needs to be hashed to 160bit
            hashed_key = sha1_int(raw_key)
        else:
            hashed_key = raw_key
        if hashed_key in self.storage:
            return self.storage[hashed_key]
        try:
            response = yield from self.lookup_node(hashed_key, find_value=True)
        except KeyError as e:
            raise e

        return response

    @asyncio.coroutine
    def lookup_node(self, hashed_key, find_value=False):
        def distance(peer):
            return peer[0] ^ hashed_key

        contacted, dead = set(), set()

        peers = {(peer_identifier, peer)
                 for peer_identifier, peer in
                 self.routing_table.find_closest_peers(hashed_key)}

        if not peers:
            raise KeyError(hashed_key, 'No peers available.')

        while True:
            uncontacted = peers - contacted

            if not uncontacted:
                break

            closest = sorted(uncontacted, key=distance)[:self.alpha]

            for peer_identifier, peer in closest:

                contacted.add((peer_identifier, peer))

                try:
                    if find_value:
                        result, contacts = yield from self.find_value(
                            peer, self.identifier, hashed_key)
                        if result == 'found':
                            return contacts
                    else:
                        contacts = yield from self.find_node(
                            peer, self.identifier, hashed_key)

                except socket.timeout:
                    self.routing_table.forget_peer(peer_identifier)
                    dead.add((peer_identifier, peer))
                    continue

                for new_peer_identifier, new_peer in contacts:
                    if new_peer_identifier == self.identifier:
                        continue
                    peers.add((new_peer_identifier, new_peer))

        if find_value:
            raise KeyError(hashed_key, 'Not found among any available peers.')
        else:
            return sorted(peers - dead, key=distance)[:self.k]

    @asyncio.coroutine
    def broadcast(self, message_identifier, procedure_name, *args, **kwargs):
        """
        Broadcast a message containing a procedure_name to all the nodes
        who will then execute it.

        Arguments:
            message_identifier : unique msg id for each broadcast
            procedure_name : name of the remote procedure to be executed
            args : parameters for that procedure
        """
        logger.info("sending a broadcast of procedure %r transaction: %r",
                    procedure_name, args[1:])
        if message_identifier not in self.broadcast_list:
            self.broadcast_list.append(message_identifier)

        # Create a mesage with its type, procedure_name and args
        obj = ('broadcast', message_identifier, procedure_name, *args)
        message = pickle.dumps(obj, protocol=0)

        # Send the msg to each connected peer
        for _, peer in self.routing_table:
            self.transport.sendto(message, peer)
コード例 #21
0
ファイル: mapit.py プロジェクト: yongquanf/MAP-IT
def main():
    parser = ArgumentParser()
    parser.add_argument('-a', '--adjacencies', help='Adjacencies derived from traceroutes')
    parser.add_argument('-b', '--ip2as', help='BGP prefixes')
    parser.add_argument('-c', '--addresses', help='List of addresses')
    parser.add_argument('-f', '--factor', type=float, default=0, help='Factor used in the paper')
    parser.add_argument('-i', '--interfaces', dest='interfaces', help='Interface information')
    parser.add_argument('-o', '--as2org', help='AS2ORG mappings')
    parser.add_argument('-v', dest='verbose', action='count', default=0, help='Increase verbosity for each v')
    parser.add_argument('-w', '--output', type=FileType('w'), default='-', help='Output filename')
    parser.add_argument('--addresses-exit', dest='addresses_exit', type=FileType('w'), help='Extract addresses from traces and exit.')
    parser.add_argument('--potaroo', action='store_true', help='Include AS identifiers and names from http://bgp.potaroo.net/cidr/autnums.html')
    parser.add_argument('--trace-exit', type=FileType('w'), help='Extract adjacencies and addresses from the traceroutes and exit')
    providers_group = parser.add_mutually_exclusive_group()
    providers_group.add_argument('-r', '--rel-graph', help='CAIDA relationship graph')
    providers_group.add_argument('-p', '--asn-providers', help='List of ISP ASes')
    providers_group.add_argument('-q', '--org-providers', help='List of ISP ORGs')
    parser.add_argument('-I', '--iterations', type=int, default=100)
    args = parser.parse_args()

    log.setLevel(max((3 - args.verbose) * 10, 10))

    ip2as = RoutingTable.ip2as(args.ip2as)
    as2org = AS2Org(args.as2org, include_potaroo=False)

    adjacencies = read_adjacencies(args.adjacencies)
    neighbors = defaultdict(list)
    for x, y in adjacencies:
        neighbors[(x, True)].append(y)
        neighbors[(y, False)].append(x)
    status('Extracting addresses from adjacencies')
    unique_interfaces = {u for u, _ in adjacencies} | {v for _, v in adjacencies}
    finish_status('Found {:,d}'.format(len(unique_interfaces)))
    status('Converting addresses to ipnums')
    addresses = {struct.unpack("!L", socket.inet_aton(addr.strip()))[0] for addr in unique_interfaces}
    finish_status()
    log.info('Mapping IP addresses to ASes.')
    asns = {}
    for address in unique_interfaces:
        asn = ip2as[address]
        if asn != -2:
            asns[address] = asn
    if as2org:
        log.info('Mapping ASes to Orgs.')
        orgs = {address: as2org[asn] for address, asn in asns.items()}
    else:
        orgs = asns
    log.info('Determining other sides for each address (assuming point-to-point).')
    othersides = {address: determine_otherside(address, addresses) for address in asns}
    log.info('Creating interface halves.')
    halves_dict = {
        (address, direction): InterfaceHalf(address, asns[address], orgs[address], direction, othersides[address])
        for (address, direction) in neighbors if address in asns
        }
    for (address, direction), half in halves_dict.items():
        half.set_otherhalf(halves_dict.get((address, not direction)))
        half.set_otherside(halves_dict.get((half.otherside_address, not direction)))
        half.set_neighbors([halves_dict[(neighbor, not direction)] for neighbor in neighbors[(address, direction)] if
                            neighbor in asns])
    allhalves = list(halves_dict.values())
    if args.asn_providers:
        with File2(args.providers) as f:
            providers = {int(asn.strip()) for asn in f}
    elif args.org_providers:
        with File2(args.providers) as f:
            providers = {asn.strip() for asn in f}
    elif args.rel_graph:
        rels = pd.read_csv(args.rel_graph, sep='|', comment='#', names=['AS1', 'AS2', 'Rel'], usecols=[0, 1, 2])
        providers = set(rels[rels.Rel == -1].AS1.unique())
    else:
        providers = None
    updates = algorithm(allhalves, factor=args.factor, providers=providers, iterations=args.iterations)
    updates.write(args.output)
コード例 #22
0
ファイル: layers.py プロジェクト: matwitke/carsim
 def __init__(self, owner):
     self.is_router = False
     self.rt = RoutingTable()
     self.it = InterfacesTable()
     self.owner = owner
コード例 #23
0
ファイル: router.py プロジェクト: yushu-liu/GerogiaTech
class Router(object):
    def __init__(self, router_num, N, protocol):
        self._router_num = router_num
        self._N = N
        self._neighbors = {}
        self._routing_table = RoutingTable(N, self._router_num, protocol)

    def get_router_num(self):
        '''
        Returns the router num for this router
        :return: int
        '''
        return self._router_num

    def set_neighbor(self, neighbor, cost):
        '''
        If cost is -1, this will remove a link. Otherwise, this will set or reset the cost of a neighbor
        :param neighbor: Router
        :param cost: int
        :return: None
        '''
        if (neighbor is self):
            return
        if (cost == -1):
            if (self._neighbors.get(neighbor.get_router_num) is not None):
                del self._neighbors[neighbor.get_router_num()]
        else:
            self._neighbors[neighbor.get_router_num()] = neighbor
        self._routing_table.set_neighbor(neighbor.get_router_num(), cost)

    def receive_distance_vector(self, src, vector):
        '''
        Meant to be called by other routers that want to send over their distance vector. This router will store the
        vector and their source
        :param src: int
        :param vector: {}
        :return: None
        '''
        self._routing_table.set_neighbor_vector(src, vector)

    def send_distance_vector_to_neigbors(self):
        '''
        Sends this routers distance vector to all of its neighbors.
        :return: None
        '''
        for n in self._neighbors:
            dv = self._routing_table.get_distance_vector_for_neighbor(n)
            self._neighbors[n].receive_distance_vector(self.get_router_num(),
                                                       dv)

    def update_distance_vector(self):
        '''
        Meant to be called after recieving all distance vector. Will recalculate the distance vector and return a bool
        indicating whether or not any changes where made.
        :return: boolean
        '''
        old_dv = deepcopy(
            self._routing_table.get_distance_vector_for_neighbor(-1))
        self._routing_table.update_distance_vector()
        dv = self._routing_table.get_distance_vector_for_neighbor(-1)
        return (dv != old_dv)

    def __str__(self):
        return str(self._routing_table)
コード例 #24
0
class RoutingManager(object):
    def __init__(self, my_node, querier, bootstrap_nodes):
        self.my_node = my_node
        self.querier = querier
        #Copy the bootstrap list
        self.bootstrap_nodes = [n for n in bootstrap_nodes]

        self.main = RoutingTable(my_node, NODES_PER_BUCKET)
        self.replacement = RoutingTable(my_node, NODES_PER_BUCKET)
        self.ping_msg = message.OutgoingPingQuery(my_node.id)
        self.find_node_msg = message.OutgoingFindNodeQuery(
            my_node.id, my_node.id)
        self.mode = BOOTSTRAP_MODE
        self.num_concurrent_refresh_msgs = 0
        #This must be called by an external party: self.do_bootstrap()
        #After initializing callbacks

        # Add myself to the routing table
        rnode = self.main.add(my_node)
        self._reset_refresh_task(rnode)

    def do_bootstrap(self):
        if self.main.num_rnodes > MIN_RNODES_BOOTSTRAP:
            # Enough nodes. Stop bootstrap.
            return
        for _ in xrange(NUM_NODES_PER_BOOTSTRAP_STEP):
            if not self.bootstrap_nodes:
                self.mode = NORMAL_MODE
                return
            index = random.randint(0, len(self.bootstrap_nodes) - 1)
            self.querier.send_query(self.find_node_msg,
                                    self.bootstrap_nodes[index],
                                    self._do_nothing, self._do_nothing,
                                    self._do_nothing)
            del self.bootstrap_nodes[index]
        #TODO2: Don't use querier's rpc_m
        self.querier.rpc_m.call_later(BOOTSTRAP_DELAY, self.do_bootstrap)

    def on_query_received(self, node_):
        try:
            rnode = self.main.get_rnode(node_)
        except RnodeNotFound:
            pass  # node is not in the main table
        else:
            # node in routing table: inform rnode
            rnode.on_query_received()
            self._reset_refresh_task(rnode)
            return
        # Node is not in routing table
        # Check reachability (if the bucket is not full)
        if self.main.there_is_room(node_):
            # there is room in the bucket: ping node to check reachability
            self._refresh_now(node_)
            return
        # No room in the main routing table
        # Add to replacement table (if the bucket is not full)
        bucket = self.replacement.get_bucket(node_)
        worst_rnode = self._worst_rnode(bucket.rnodes)
        if worst_rnode \
                and worst_rnode.timeouts_in_a_row() > MAX_NUM_TIMEOUTS:
            self.replacement.remove(worst_rnode)
            self.replacement.add(node_)

    def on_response_received(self, node_):  #TODO2:, rtt=0):
        try:
            rnode = self.main.get_rnode(node_)
        except (RnodeNotFound):
            pass
        else:
            # node in routing table: refresh it
            rnode.on_response_received()
            self._reset_refresh_task(rnode)
            return
        # The node is not in main
        try:
            rnode = self.replacement.get_rnode(node_)
        except (RnodeNotFound):
            pass
        else:
            # node in replacement table
            # let's see whether there is room in the main
            rnode.on_response_received()
            if self.main.there_is_room(node_):
                rnode = self.main.add(rnode)
                self._reset_refresh_task(rnode)
                self.replacement.remove(rnode)
            return
        # The node is nowhere
        # Add to replacement table (if the bucket is not full)
        bucket = self.replacement.get_bucket(node_)
        if self.main.there_is_room(node_):
            if not bucket.rnodes:
                # Replacement is empty
                rnode = self.main.add(node_)
                self._reset_refresh_task(rnode)
                return
        # The main bucket is full or the repl bucket is not empty
        worst_rnode = self._worst_rnode(bucket.rnodes)
        # Get the worst node in replacement bucket and see whether
        # it's bad enough to be replaced by node_
        if worst_rnode \
                and worst_rnode.timeouts_in_a_row() > MAX_NUM_TIMEOUTS:
            # This node is better candidate than worst_rnode
            self.replacement.remove(worst_rnode)
        try:
            self.replacement.add(node_)
        except (BucketFullError):
            pass

    def on_error_received(self, node_):
        pass

    def on_timeout(self, node_):
        if node_ is self.my_node:
            raise Exception, 'I got a timeout from myself!!!'
        if not node_.id:
            return  # This is a bootstrap node (just addr, no id)
        try:
            rnode = self.main.get_rnode(node_)
        except RnodeNotFound:
            pass
        else:
            # node in routing table: check whether it should be removed
            rnode.on_timeout()
            replacement_bucket = self.replacement.get_bucket(node_)
            self._refresh_replacement_bucket(replacement_bucket)
            self.main.remove(rnode)
            try:
                self.replacement.add(rnode)
            except (BucketFullError):
                worst_rnode = self._worst_rnode(replacement_bucket.rnodes)
                if worst_rnode:
                    # Replace worst node in replacement table
                    self.replacement.remove(worst_rnode)
                    self._refresh_replacement_bucket(replacement_bucket)
                    # We don't want to ping the node which just did timeout
                    self.replacement.add(rnode)
        # Node is not in main table
        try:
            rnode = self.replacement.get_rnode(node_)
        except RnodeNotFound:
            pass  # the node is not in any table. Nothing to do here.
        else:
            # Node in replacement table: just update rnode
            rnode.on_timeout()

    def on_nodes_found(self, nodes):
        #FIXME: this will send ping at exponential rate
        #not good!!!!
        log.debug('nodes found: %r', nodes)
        for node_ in nodes:
            try:
                rnode = self.main.get_rnode(node_)
            except RnodeNotFound:
                # Not in the main: ping it if there is room in main
                if self.main.there_is_room(node_):
                    log.debug('pinging node found: %r', node_)
                    self._refresh_now(node_, NO_PRIORITY)
                    #TODO2: prefer NS

    def get_closest_rnodes(self, target_id, num_nodes=DEFAULT_NUM_NODES):
        return self.main.get_closest_rnodes(target_id, num_nodes)

    def get_all_rnodes(self):
        return (self.main.get_all_rnodes(), self.replacement.get_all_rnodes())

    def _refresh_now(self, node_, priority=PRIORITY):
        if priority == NO_PRIORITY and \
                self.num_concurrent_refresh_msgs > MAX_CONCURRENT_REFRESH_MSGS:
            return
        self.num_concurrent_refresh_msgs += 1
        return self.querier.send_query(self.find_node_msg, node_,
                                       self._refresh_now_callback,
                                       self._refresh_now_callback,
                                       self._refresh_now_callback)

    def _reset_refresh_task(self, rnode):
        if rnode.refresh_task:
            # Cancel the current refresh task
            rnode.refresh_task.cancel()
        if rnode.in_quarantine:
            rnode.refresh_task = self._refresh_later(rnode, QUARANTINE_PERIOD)
        else:
            rnode.refresh_task = self._refresh_later(rnode)

    def _refresh_later(self, rnode, delay=REFRESH_PERIOD):
        return self.querier.send_query_later(delay, self.find_node_msg, rnode,
                                             self._do_nothing,
                                             self._do_nothing,
                                             self._do_nothing)

    def _do_nothing(self, *args, **kwargs):
        pass

    def _refresh_now_callback(self, *args, **kwargs):
        self.num_concurrent_refresh_msgs -= 1

    def _refresh_replacement_bucket(self, bucket):
        for rnode in bucket.rnodes:
            if rnode.is_ns:
                # We give advantage to NS nodes
                self._refresh_now(rnode)
            else:
                self._refresh_later(rnode, REFRESH_DELAY_FOR_NON_NS)

    def _worst_rnode(self, rnodes):
        max_num_timeouts = -1
        worst_rnode_so_far = None
        for rnode in rnodes:
            num_timeouots = rnode.timeouts_in_a_row()
            if num_timeouots >= max_num_timeouts:
                max_num_timeouts = num_timeouots
                worst_rnode_so_far = rnode
        return worst_rnode_so_far
コード例 #25
0
class RoutingManager(object):
    def __init__(self, my_node, bootstrap_nodes, msg_f):
        self.my_node = my_node
        self.bootstrapper = bootstrap.OverlayBootstrapper(
            my_node.id, bootstrap_nodes, msg_f)
        self.msg_f = msg_f
        self.table = RoutingTable(my_node, NODES_PER_BUCKET)
        # maintenance variables
        self._next_stale_maintenance_index = 0
        self._maintenance_mode = BOOTSTRAP_MODE
        self._replacement_queue = _ReplacementQueue(self.table)
        self._query_received_queue = _QueryReceivedQueue(self.table)
        self._found_nodes_queue = _FoundNodesQueue(self.table)
        self._maintenance_tasks = [
            self._ping_a_staled_rnode,
            self._ping_a_query_received_node,
            self._ping_a_found_node,
            self._ping_a_replacement_node,
        ]
        self._num_pending_filling_lookups = NUM_FILLING_LOOKUPS

    def _get_maintenance_lookup(self, lookup_target=None, nodes=[]):
        if not lookup_target:
            lookup_target = identifier.RandomId()
        if not nodes:
            log_distance = lookup_target.distance(self.my_node.id).log
            nodes = self.get_closest_rnodes(log_distance, 0, True)
        return lookup_target, nodes

    def do_maintenance(self):
        queries_to_send = []
        maintenance_lookup = None
        maintenance_delay = 0
        if self._maintenance_mode == BOOTSTRAP_MODE:
            (queries_to_send, maintenance_lookup,
             bootstrap_delay) = self.bootstrapper.do_bootstrap(
                 self.table.num_rnodes)
            if bootstrap_delay:
                maintenance_delay = bootstrap_delay
            else:
                self._maintenance_mode = FILL_BUCKETS
        elif self._maintenance_mode == FILL_BUCKETS:
            if self._num_pending_filling_lookups:
                self._num_pending_filling_lookups -= 1
                maintenance_lookup = self._get_maintenance_lookup()
            else:
                self._maintenance_mode = NORMAL_MODE
        elif self._maintenance_mode == NORMAL_MODE:
            for _ in range(len(self._maintenance_tasks)):
                # We try maintenance tasks till one of them actually does work
                # or we have tried them all (whatever happens first) We loop
                # in range because I'm going to modify self._maintenance_tasks
                task = self._maintenance_tasks.pop(0)
                self._maintenance_tasks.append(task)
                node_ = task()
                if node_:
                    queries_to_send.append(self._get_maintenance_query(node_))
                    # This task did do some work. We are done here!
                    break
        if self.table.num_rnodes < MIN_RNODES:
            # Ping more found nodes when routing table has few nodes
            node_ = self._ping_a_found_node()
            if node_:
                queries_to_send.append(
                    self._get_maintenance_query(node_, do_fill_up=True))
        if not maintenance_delay:
            maintenance_delay = _MAINTENANCE_DELAY[self._maintenance_mode]
        return (maintenance_delay, queries_to_send, maintenance_lookup)

    def _ping_a_staled_rnode(self):
        starting_index = self._next_stale_maintenance_index
        result = None
        while not result:
            # Find a non-empty bucket
            sbucket = self.table.get_sbucket(
                self._next_stale_maintenance_index)
            m_bucket = sbucket.main
            self._next_stale_maintenance_index = (
                self._next_stale_maintenance_index + 1) % (NUM_BUCKETS - 1)
            if m_bucket:
                rnode = m_bucket.get_stalest_rnode()
                if time.time() > rnode.last_seen + QUARANTINE_PERIOD:
                    result = rnode
            if self._next_stale_maintenance_index == starting_index:
                # No node to be pinged in the whole table.
                break
        return result

    def _ping_a_found_node(self):
        node_ = self._found_nodes_queue.pop(0)
        if node_:
            logger.debug('pinging node found: %r', node_)
        return node_

    def _ping_a_query_received_node(self):
        return self._query_received_queue.pop(0)

    def _ping_a_replacement_node(self):
        return self._replacement_queue.pop(0)

    def _get_maintenance_query(self, node_, do_fill_up=False):
        '''
        if not node_.id: 
            # Bootstrap nodes don't have id
            return message.OutgoingFindNodeQuery(node_,
                                                 self.my_node.id,
                                                 self.my_node.id, None)
        '''
        if do_fill_up or random.choice((False, True)):

            # 50% chance to send a find_node to fill up a non-full bucket
            target_log_distance = self.table.find_next_bucket_with_room_index(
                node_=node_)
            if target_log_distance:
                target = self.my_node.id.generate_close_id(target_log_distance)
                msg = self.msg_f.outgoing_find_node_query(node_, target, None)
            else:
                # Every bucket is full. We send a ping instead.
                msg = self.msg_f.outgoing_ping_query(node_)
        else:
            # 50% chance to send find_node with my id as target
            msg = self.msg_f.outgoing_find_node_query(node_, self.my_node.id,
                                                      None)
        return msg

    def on_query_received(self, node_):
        '''
        Return None when nothing to do
        Return a list of queries when queries need to be sent (the queries
        will be sent out by the caller)
        '''
        if self.bootstrapper.is_bootstrap_node(node_):
            return

        log_distance = self.my_node.distance(node_).log
        try:
            sbucket = self.table.get_sbucket(log_distance)
        except (IndexError):
            return  # Got a query from myself. Just ignore it.

        m_bucket = sbucket.main
        r_bucket = sbucket.replacement
        if node_.ip in m_bucket.ips_in_table:
            rnode = m_bucket.get_rnode(node_)
            if rnode:
                # node in routing table: update rnode
                self._update_rnode_on_query_received(rnode)
            # This IP is in the table. Stop here to avoid multiple entries
            # with the same IP
            return

        # Now, consider adding this node to the routing table
        if m_bucket.there_is_room():
            # There is room in the bucket: queue it
            self._query_received_queue.add(node_, log_distance)
            return
        # No room in the main routing table
        # Add to replacement table (if the bucket is not full)
        worst_rnode = self._worst_rnode(r_bucket.rnodes)
        if worst_rnode \
                and worst_rnode.timeouts_in_a_row() > MAX_NUM_TIMEOUTS:
            r_bucket.remove(worst_rnode)
            rnode = node_.get_rnode(log_distance)
            r_bucket.add(rnode)
            self._update_rnode_on_query_received(rnode)
        return

    def on_response_received(self, node_, rtt, nodes):
        if self.bootstrapper.is_bootstrap_node(node_):
            return

        if nodes:
            logger.debug('nodes found: %r', nodes)
        self._found_nodes_queue.add(nodes)

        logger.debug('on response received %f', rtt)
        log_distance = self.my_node.distance(node_).log
        try:
            sbucket = self.table.get_sbucket(log_distance)
        except (IndexError):
            return  # Got a response from myself. Just ignore it.
        m_bucket = sbucket.main
        r_bucket = sbucket.replacement
        rnode = m_bucket.get_rnode(node_)
        if node_.ip in m_bucket.ips_in_table:
            rnode = m_bucket.get_rnode(node_)
            if rnode:
                # node in routing table: update rnode
                self._update_rnode_on_response_received(rnode, rtt)
            # This IP is in the table. Stop here to avoid multiple entries
            # with the same IP
            return

        # Now, consider adding this node to the routing table
        rnode = r_bucket.get_rnode(node_)
        if rnode:
            # node in replacement table
            # let's see whether there is room in the main
            self._update_rnode_on_response_received(rnode, rtt)
            #TODO: leave this for the maintenance task
            if m_bucket.there_is_room():
                m_bucket.add(rnode)
                self.table.num_rnodes += 1
                self._update_rnode_on_response_received(rnode, rtt)
                r_bucket.remove(rnode)
            return
        # The node is nowhere
        # Add to main table (if the bucket is not full)
        #TODO: check whether in replacement_mode
        if m_bucket.there_is_room():
            rnode = node_.get_rnode(log_distance)
            m_bucket.add(rnode)
            self.table.num_rnodes += 1
            self._update_rnode_on_response_received(rnode, rtt)
            return
        # The main bucket is full
        # Let's see whether this node's latency is good
        current_time = time.time()
        rnode_to_be_replaced = None
        m_bucket.rnodes.sort(key=attrgetter('rtt'), reverse=True)
        for rnode in m_bucket.rnodes:
            rnode_age = current_time - rnode.bucket_insertion_ts
            if rtt < rnode.rtt * (1 - (rnode_age / 7200)):
                # A rnode can only be replaced when the candidate node's RTT
                # is shorter by a factor. Over time, this factor
                # decreases. For instance, when rnode has been in the bucket
                # for 30 mins (1800 secs), a candidate's RTT must be at most
                # 25% of the rnode's RTT (ie. two times faster). After two
                # hours, a rnode cannot be replaced by this method.
                #                print 'RTT replacement: newRTT: %f, oldRTT: %f, age: %f' % (
                #                rtt, rnode.rtt, current_time - rnode.bucket_insertion_ts)
                rnode_to_be_replaced = rnode
                break
        if rnode_to_be_replaced:
            m_bucket.remove(rnode_to_be_replaced)
            rnode = node_.get_rnode(log_distance)
            m_bucket.add(rnode)
            # No need to update table
            self.table.num_rnodes += 0
            self._update_rnode_on_response_received(rnode, rtt)
            return

        # Get the worst node in replacement bucket and see whether
        # it's bad enough to be replaced by node_
        worst_rnode = self._worst_rnode(r_bucket.rnodes)
        if worst_rnode \
                and worst_rnode.timeouts_in_a_row() > MAX_NUM_TIMEOUTS:
            # This node is better candidate than worst_rnode
            r_bucket.remove(worst_rnode)
            rnode = node_.get_rnode(log_distance)
            r_bucket.add(rnode)
            self._update_rnode_on_response_received(rnode, rtt)
        return

    def on_error_received(self, node_addr):
        # if self.bootstrapper.is_bootstrap_node(node_):
        #     return
        return

    def on_timeout(self, node_):
        if self.bootstrapper.is_bootstrap_node(node_):
            return

        log_distance = self.my_node.distance(node_).log
        try:
            sbucket = self.table.get_sbucket(log_distance)
        except (IndexError):
            return []  # Got a timeout from myself, WTF? Just ignore.
        m_bucket = sbucket.main
        r_bucket = sbucket.replacement
        rnode = m_bucket.get_rnode(node_)
        if rnode:
            # node in routing table: kick it out
            self._update_rnode_on_timeout(rnode)
            m_bucket.remove(rnode)
            self.table.num_rnodes -= 1

            for r_rnode in r_bucket.sorted_by_rtt():
                self._replacement_queue.add(r_rnode)
            if r_bucket.there_is_room():
                r_bucket.add(rnode)
            else:
                worst_rnode = self._worst_rnode(r_bucket.rnodes)
                if worst_rnode:
                    # Replace worst node in replacement table
                    r_bucket.remove(worst_rnode)
                    r_bucket.add(rnode)
        # Node is not in main table
        rnode = r_bucket.get_rnode(node_)
        if rnode:
            # Node in replacement table: just update rnode
            self._update_rnode_on_timeout(rnode)
        return []

    def get_closest_rnodes(self, log_distance, num_nodes, exclude_myself):
        if not num_nodes:
            num_nodes = NODES_PER_BUCKET[log_distance]
        return self.table.get_closest_rnodes(log_distance, num_nodes,
                                             exclude_myself)

    def get_main_rnodes(self):
        return self.table.get_main_rnodes()

    def print_stats(self):
        self.table.print_stats()

    def _update_rnode_on_query_received(self, rnode):
        """Register a query from node.

        You should call this method when receiving a query from this node.

        """
        current_time = time.time()
        rnode.last_action_ts = time.time()
        rnode.msgs_since_timeout += 1
        rnode.num_queries += 1
        rnode.add_event(current_time, node.QUERY)
        rnode.last_seen = current_time

    def _update_rnode_on_response_received(self, rnode, rtt):
        """Register a reply from rnode.

        You should call this method when receiving a response from this rnode.

        """
        rnode.rtt = rtt
        current_time = time.time()
        #rnode._reset_refresh_task()
        if rnode.in_quarantine:
            rnode.in_quarantine = \
                rnode.last_action_ts < current_time - QUARANTINE_PERIOD

        rnode.last_action_ts = current_time
        rnode.num_responses += 1
        rnode.add_event(time.time(), node.RESPONSE)
        rnode.last_seen = current_time

    def _update_rnode_on_timeout(self, rnode):
        """Register a timeout for this rnode.

        You should call this method when getting a timeout for this node.

        """
        rnode.last_action_ts = time.time()
        rnode.msgs_since_timeout = 0
        rnode.num_timeouts += 1
        rnode.add_event(time.time(), node.TIMEOUT)

    def _worst_rnode(self, rnodes):
        max_num_timeouts = -1
        worst_rnode_so_far = None
        for rnode in rnodes:
            num_timeouots = rnode.timeouts_in_a_row()
            if num_timeouots >= max_num_timeouts:
                max_num_timeouts = num_timeouots
                worst_rnode_so_far = rnode
        return worst_rnode_so_far
コード例 #26
0
class RoutingManager(object):
    
    def __init__(self, my_node, bootstrap_nodes, msg_f):
        self.my_node = my_node
        self.bootstrapper = bootstrap.OverlayBootstrapper(my_node.id,
                                                          bootstrap_nodes, msg_f)
        self.msg_f = msg_f
        self.table = RoutingTable(my_node, NODES_PER_BUCKET)
        # maintenance variables
        self._next_stale_maintenance_index = 0
        self._maintenance_mode = BOOTSTRAP_MODE
        self._replacement_queue = _ReplacementQueue(self.table)
        self._query_received_queue = _QueryReceivedQueue(self.table)
        self._found_nodes_queue = _FoundNodesQueue(self.table)
        self._maintenance_tasks = [self._ping_a_staled_rnode,
                                   self._ping_a_query_received_node,
                                   self._ping_a_found_node,
                                   self._ping_a_replacement_node,
                                   ]
        self._num_pending_filling_lookups = NUM_FILLING_LOOKUPS

    def _get_maintenance_lookup(self, lookup_target=None, nodes=[]):
        if not lookup_target:
            lookup_target = identifier.RandomId()
        if not nodes:
            log_distance = lookup_target.distance(self.my_node.id).log
            nodes = self.get_closest_rnodes(log_distance, 0, True)
        return lookup_target, nodes
        
                
    def do_maintenance(self):
        queries_to_send = []
        maintenance_lookup = None
        maintenance_delay = 0
        if self._maintenance_mode == BOOTSTRAP_MODE: 
                (queries_to_send,
                 maintenance_lookup,
                 bootstrap_delay) = self.bootstrapper.do_bootstrap(
                    self.table.num_rnodes)
                if bootstrap_delay:
                    maintenance_delay = bootstrap_delay
                else:
                    self._maintenance_mode = FILL_BUCKETS
        elif self._maintenance_mode == FILL_BUCKETS:
            if self._num_pending_filling_lookups:
                self._num_pending_filling_lookups -= 1
                maintenance_lookup = self._get_maintenance_lookup()
            else:
                self._maintenance_mode = NORMAL_MODE
        elif self._maintenance_mode == NORMAL_MODE:
            for _ in range(len(self._maintenance_tasks)):
                # We try maintenance tasks till one of them actually does work
                # or we have tried them all (whatever happens first) We loop
                # in range because I'm going to modify self._maintenance_tasks
                task = self._maintenance_tasks.pop(0)
                self._maintenance_tasks.append(task)
                node_ = task()
                if node_:
                    queries_to_send.append(self._get_maintenance_query(node_))
                    # This task did do some work. We are done here!
                    break
        if self.table.num_rnodes < MIN_RNODES:
            # Ping more found nodes when routing table has few nodes
            node_ = self._ping_a_found_node()
            if node_:
                queries_to_send.append(self._get_maintenance_query(
                        node_, do_fill_up=True))
        if not maintenance_delay:
            maintenance_delay = _MAINTENANCE_DELAY[self._maintenance_mode]
        return (maintenance_delay, queries_to_send, maintenance_lookup)

    def _ping_a_staled_rnode(self):
        starting_index = self._next_stale_maintenance_index
        result = None
        while not result:
            # Find a non-empty bucket
            sbucket = self.table.get_sbucket(
                self._next_stale_maintenance_index)
            m_bucket = sbucket.main
            self._next_stale_maintenance_index = (
                self._next_stale_maintenance_index + 1) % (NUM_BUCKETS - 1)
            if m_bucket:
                rnode = m_bucket.get_stalest_rnode()
                if time.time() > rnode.last_seen + QUARANTINE_PERIOD:
                    result = rnode
            if self._next_stale_maintenance_index == starting_index:
                # No node to be pinged in the whole table.
                break
        return result

    def _ping_a_found_node(self):
        node_ = self._found_nodes_queue.pop(0)
        if node_:
            logger.debug('pinging node found: %r', node_)
        return node_
        
    def _ping_a_query_received_node(self):
        return self._query_received_queue.pop(0)

    def _ping_a_replacement_node(self):
        return self._replacement_queue.pop(0)
                                  
    def _get_maintenance_query(self, node_, do_fill_up=False):
        '''
        if not node_.id: 
            # Bootstrap nodes don't have id
            return message.OutgoingFindNodeQuery(node_,
                                                 self.my_node.id,
                                                 self.my_node.id, None)
        '''
        if do_fill_up or random.choice((False, True)):

            # 50% chance to send a find_node to fill up a non-full bucket
            target_log_distance = self.table.find_next_bucket_with_room_index(
                node_=node_)
            if target_log_distance:
                target = self.my_node.id.generate_close_id(target_log_distance)
                msg = self.msg_f.outgoing_find_node_query(node_,
                                                          target, None)
            else:
                # Every bucket is full. We send a ping instead.
                msg = self.msg_f.outgoing_ping_query(node_)
        else:
            # 50% chance to send find_node with my id as target
            msg = self.msg_f.outgoing_find_node_query(node_,
                                                      self.my_node.id, None)
        return msg
        
    def on_query_received(self, node_):
        '''
        Return None when nothing to do
        Return a list of queries when queries need to be sent (the queries
        will be sent out by the caller)
        '''
        if self.bootstrapper.is_bootstrap_node(node_):
            return
        
        log_distance = self.my_node.distance(node_).log
        try:
            sbucket = self.table.get_sbucket(log_distance)
        except(IndexError):
            return # Got a query from myself. Just ignore it.

        m_bucket = sbucket.main
        r_bucket = sbucket.replacement
        if node_.ip in m_bucket.ips_in_table:
            rnode = m_bucket.get_rnode(node_)
            if rnode:
                # node in routing table: update rnode
                self._update_rnode_on_query_received(rnode)
            # This IP is in the table. Stop here to avoid multiple entries
            # with the same IP
            return
        
        # Now, consider adding this node to the routing table
        if m_bucket.there_is_room():
            # There is room in the bucket: queue it
            self._query_received_queue.add(node_, log_distance)
            return
        # No room in the main routing table
        # Add to replacement table (if the bucket is not full)
        worst_rnode = self._worst_rnode(r_bucket.rnodes)
        if worst_rnode \
                and worst_rnode.timeouts_in_a_row() > MAX_NUM_TIMEOUTS:
            r_bucket.remove(worst_rnode)
            rnode = node_.get_rnode(log_distance)
            r_bucket.add(rnode)
            self._update_rnode_on_query_received(rnode)
        return
            
    def on_response_received(self, node_, rtt, nodes):
        if self.bootstrapper.is_bootstrap_node(node_):
            return

        if nodes:
            logger.debug('nodes found: %r', nodes)
        self._found_nodes_queue.add(nodes)

        logger.debug('on response received %f', rtt)
        log_distance = self.my_node.distance(node_).log
        try:
            sbucket = self.table.get_sbucket(log_distance)
        except(IndexError):
            return # Got a response from myself. Just ignore it.
        m_bucket = sbucket.main
        r_bucket = sbucket.replacement
        rnode = m_bucket.get_rnode(node_)
        if node_.ip in m_bucket.ips_in_table:
            rnode = m_bucket.get_rnode(node_)
            if rnode:
                # node in routing table: update rnode
                self._update_rnode_on_response_received(rnode, rtt)
            # This IP is in the table. Stop here to avoid multiple entries
            # with the same IP
            return
        
        # Now, consider adding this node to the routing table
        rnode = r_bucket.get_rnode(node_)
        if rnode:
            # node in replacement table
            # let's see whether there is room in the main
            self._update_rnode_on_response_received(rnode, rtt)
            #TODO: leave this for the maintenance task
            if m_bucket.there_is_room():
                m_bucket.add(rnode)
                self.table.num_rnodes += 1
                self._update_rnode_on_response_received(rnode, rtt)
                r_bucket.remove(rnode)
            return
        # The node is nowhere
        # Add to main table (if the bucket is not full)
        #TODO: check whether in replacement_mode
        if m_bucket.there_is_room():
            rnode = node_.get_rnode(log_distance)
            m_bucket.add(rnode)
            self.table.num_rnodes += 1
            self._update_rnode_on_response_received(rnode, rtt)
            return
        # The main bucket is full
        # Let's see whether this node's latency is good
        current_time = time.time()
        rnode_to_be_replaced = None
        m_bucket.rnodes.sort(key=attrgetter('rtt'), reverse=True)
        for rnode in m_bucket.rnodes:
            rnode_age = current_time - rnode.bucket_insertion_ts
            if rtt < rnode.rtt * (1 - (rnode_age / 7200)):
                # A rnode can only be replaced when the candidate node's RTT
                # is shorter by a factor. Over time, this factor
                # decreases. For instance, when rnode has been in the bucket
                # for 30 mins (1800 secs), a candidate's RTT must be at most
                # 25% of the rnode's RTT (ie. two times faster). After two
                # hours, a rnode cannot be replaced by this method.
#                print 'RTT replacement: newRTT: %f, oldRTT: %f, age: %f' % (
#                rtt, rnode.rtt, current_time - rnode.bucket_insertion_ts)
                rnode_to_be_replaced = rnode
                break
        if rnode_to_be_replaced:
            m_bucket.remove(rnode_to_be_replaced)
            rnode = node_.get_rnode(log_distance)
            m_bucket.add(rnode)
            # No need to update table
            self.table.num_rnodes += 0
            self._update_rnode_on_response_received(rnode, rtt)
            return
            
        # Get the worst node in replacement bucket and see whether
        # it's bad enough to be replaced by node_
        worst_rnode = self._worst_rnode(r_bucket.rnodes)
        if worst_rnode \
                and worst_rnode.timeouts_in_a_row() > MAX_NUM_TIMEOUTS:
            # This node is better candidate than worst_rnode
            r_bucket.remove(worst_rnode)
            rnode = node_.get_rnode(log_distance)
            r_bucket.add(rnode)
            self._update_rnode_on_response_received(rnode, rtt)
        return
        
    def on_error_received(self, node_addr):
        # if self.bootstrapper.is_bootstrap_node(node_):
        #     return
        return
    
    def on_timeout(self, node_):
        if self.bootstrapper.is_bootstrap_node(node_):
            return

        log_distance = self.my_node.distance(node_).log
        try:
            sbucket = self.table.get_sbucket(log_distance)
        except (IndexError):
            return [] # Got a timeout from myself, WTF? Just ignore.
        m_bucket = sbucket.main
        r_bucket = sbucket.replacement
        rnode = m_bucket.get_rnode(node_)
        if rnode:
            # node in routing table: kick it out
            self._update_rnode_on_timeout(rnode)
            m_bucket.remove(rnode)
            self.table.num_rnodes -= 1

            for r_rnode in r_bucket.sorted_by_rtt():
                self._replacement_queue.add(r_rnode)
            if r_bucket.there_is_room():
                r_bucket.add(rnode)
            else:
                worst_rnode = self._worst_rnode(r_bucket.rnodes)
                if worst_rnode:
                    # Replace worst node in replacement table
                    r_bucket.remove(worst_rnode)
                    r_bucket.add(rnode)
        # Node is not in main table
        rnode = r_bucket.get_rnode(node_)
        if rnode:
            # Node in replacement table: just update rnode
            self._update_rnode_on_timeout(rnode)
        return []
            
    def get_closest_rnodes(self, log_distance, num_nodes, exclude_myself):
        if not num_nodes:
            num_nodes = NODES_PER_BUCKET[log_distance]
        return self.table.get_closest_rnodes(log_distance, num_nodes,
                                             exclude_myself)

    def get_main_rnodes(self):
        return self.table.get_main_rnodes()

    def print_stats(self):
        self.table.print_stats()

    def _update_rnode_on_query_received(self, rnode):
        """Register a query from node.

        You should call this method when receiving a query from this node.

        """
        current_time = time.time()
        rnode.last_action_ts = time.time()
        rnode.msgs_since_timeout += 1
        rnode.num_queries += 1
        rnode.add_event(current_time, node.QUERY)
        rnode.last_seen = current_time

    def _update_rnode_on_response_received(self, rnode, rtt):
        """Register a reply from rnode.

        You should call this method when receiving a response from this rnode.

        """
        rnode.rtt = rtt
        current_time = time.time()
        #rnode._reset_refresh_task()
        if rnode.in_quarantine:
            rnode.in_quarantine = \
                rnode.last_action_ts < current_time - QUARANTINE_PERIOD
                
        rnode.last_action_ts = current_time
        rnode.num_responses += 1
        rnode.add_event(time.time(), node.RESPONSE)
        rnode.last_seen = current_time

    def _update_rnode_on_timeout(self, rnode):
        """Register a timeout for this rnode.

        You should call this method when getting a timeout for this node.

        """
        rnode.last_action_ts = time.time()
        rnode.msgs_since_timeout = 0
        rnode.num_timeouts += 1
        rnode.add_event(time.time(), node.TIMEOUT)

    def _worst_rnode(self, rnodes):
        max_num_timeouts = -1
        worst_rnode_so_far = None
        for rnode in rnodes:
            num_timeouots = rnode.timeouts_in_a_row()
            if num_timeouots >= max_num_timeouts:
                max_num_timeouts = num_timeouots
                worst_rnode_so_far = rnode
        return worst_rnode_so_far