Esempio n. 1
0
    def _update_rnode_on_timeout(self, rnode):
        """Register a timeout for this rnode.

        You should call this method when getting a timeout for this node.

        """
        rnode.last_action_ts = time.time()
        rnode.msgs_since_timeout = 0
        rnode.num_timeouts += 1
        rnode.add_event(time.time(), node.TIMEOUT)
Esempio n. 2
0
    def _update_rnode_on_query_received(self, rnode):
        """Register a query from node.

        You should call this method when receiving a query from this node.

        """
        current_time = time.time()
        rnode.last_action_ts = time.time()
        rnode.msgs_since_timeout += 1
        rnode.num_queries += 1
        rnode.add_event(current_time, node.QUERY)
        rnode.last_seen = current_time
Esempio n. 3
0
    def _update_rnode_on_response_received(self, rnode, rtt):
        """Register a reply from rnode.

        You should call this method when receiving a response from this rnode.

        """
        rnode.real_rtt = rtt
        current_time = time.time()
        #rnode._reset_refresh_task()
        if rnode.in_quarantine:
            rnode.in_quarantine = \
                rnode.last_action_ts < current_time - QUARANTINE_PERIOD
                
        rnode.last_action_ts = current_time
        rnode.num_responses += 1
        rnode.add_event(time.time(), node.RESPONSE)
        rnode.last_seen = current_time
Esempio n. 4
0
 def add(self, node_, log_distance):
     # The caller already checked that there is room in the bucket
     #        print 'received queue', len(self._queue)
     if node_ in self._queued_nodes_set:
         # This node is already queued
         return
     num_nodes_queued = self._nodes_queued_per_bucket[log_distance]
     if num_nodes_queued >= 8:
         # many nodes queued for this bucket already
         return
     self._queued_nodes_set.add(node_)
     self._nodes_queued_per_bucket[log_distance] = (num_nodes_queued + 1)
     self._queue.append((time.time(), node_))
    def add(self, node_, log_distance):
        # The caller already checked that there is room in the bucket
#        print 'received queue', len(self._queue)
        if node_ in self._queued_nodes_set:
            # This node is already queued
            return
        num_nodes_queued = self._nodes_queued_per_bucket[log_distance]
        if num_nodes_queued >= 8:
            # many nodes queued for this bucket already
            return
        self._queued_nodes_set.add(node_)
        self._nodes_queued_per_bucket[log_distance] = (
            num_nodes_queued + 1)
        self._queue.append((time.time(), node_))
Esempio n. 6
0
    def __init__(self, info_hash, queue_size):
        self.info_hash = info_hash
        self.queue_size = queue_size
        self.queue = [_QueuedNode(None, identifier.ID_SIZE_BITS+1, None)]
        # *_ips is used to prevent that many Ids are
        # claimed from a single IP address.
        self.queued_ips = set()
        self.queried_ips = set()
        self.queued_qnodes = []
        self.responded_qnodes = []

#        self.max_queued_qnodes = 16
        self.max_responded_qnodes = 16

        self.last_query_ts = time.time()
Esempio n. 7
0
    def __init__(self, info_hash, queue_size):
        self.info_hash = info_hash
        self.queue_size = queue_size
        self.queue = [_QueuedNode(None, identifier.ID_SIZE_BITS + 1, None)]
        # *_ips is used to prevent that many Ids are
        # claimed from a single IP address.
        self.queued_ips = set()
        self.queried_ips = set()
        self.queued_qnodes = []
        self.responded_qnodes = []

        #        self.max_queued_qnodes = 16
        self.max_responded_qnodes = 16

        self.last_query_ts = time.time()
Esempio n. 8
0
 def _pop_nodes_to_query(self, max_nodes):
     if len(self.responded_qnodes) > MARK_INDEX:
         mark = self.responded_qnodes[MARK_INDEX].distance.log
     else:
         mark = identifier.ID_SIZE_BITS
     nodes_to_query = [] 
     for _ in range(max_nodes):
         try:
             qnode = self.queued_qnodes[0]
         except (IndexError):
             break # no more queued nodes left
         if qnode.distance is None or qnode.distance.log < mark:
             self.queried_ips.add(qnode.node.ip)
             nodes_to_query.append(qnode.node)
             del self.queued_qnodes[0]
             self.queued_ips.remove(qnode.node.ip)
     self.last_query_ts = time.time()
     return nodes_to_query
Esempio n. 9
0
 def _pop_nodes_to_query(self, max_nodes):
     if len(self.responded_qnodes) > MARK_INDEX:
         mark = self.responded_qnodes[MARK_INDEX].distance.log
     else:
         mark = identifier.ID_SIZE_BITS
     nodes_to_query = []
     for _ in range(max_nodes):
         try:
             qnode = self.queued_qnodes[0]
         except (IndexError):
             break  # no more queued nodes left
         if qnode.distance is None or qnode.distance.log < mark:
             self.queried_ips.add(qnode.node.ip)
             nodes_to_query.append(qnode.node)
             del self.queued_qnodes[0]
             self.queued_ips.remove(qnode.node.ip)
     self.last_query_ts = time.time()
     return nodes_to_query
Esempio n. 10
0
 def pop(self, _):
     while self._queue:
         ts, node_ = self._queue[0]
         time_in_queue = time.time() - ts
         if time_in_queue < QUARANTINE_PERIOD:
             return
         # Quarantine period passed
         log_distance = self.table.my_node.log_distance(node_)
         self._queued_nodes_set.remove(node_)
         self._nodes_queued_per_bucket[log_distance] = (
             self._nodes_queued_per_bucket[log_distance] - 1)
         del self._queue[0]
         sbucket = self.table.get_sbucket(log_distance)
         m_bucket = sbucket.main
         if m_bucket.there_is_room():
             # room in main: return it
             return node_
     return
Esempio n. 11
0
 def _ping_a_staled_rnode(self):
     starting_index = self._next_stale_maintenance_index
     result = None
     while not result:
         # Find a non-empty bucket
         sbucket = self.table.get_sbucket(
             self._next_stale_maintenance_index)
         m_bucket = sbucket.main
         self._next_stale_maintenance_index = (
             self._next_stale_maintenance_index + 1) % (NUM_BUCKETS - 1)
         if m_bucket:
             rnode = m_bucket.get_stalest_rnode()
             if time.time() > rnode.last_seen + QUARANTINE_PERIOD:
                 result = rnode
         if self._next_stale_maintenance_index == starting_index:
             # No node to be pinged in the whole table.
             break
     return result
Esempio n. 12
0
    def on_response_received(self, node_, rtt, nodes):
        if self.bootstrapper.is_bootstrap_node(node_):
            return

        if nodes:
            logger.debug('nodes found: %r', nodes)
        self._found_nodes_queue.add(nodes)

        logger.debug('on response received %f', rtt)
        log_distance = self.my_node.distance(node_).log
        try:
            sbucket = self.table.get_sbucket(log_distance)
        except (IndexError):
            return  # Got a response from myself. Just ignore it.
        m_bucket = sbucket.main
        r_bucket = sbucket.replacement
        rnode = m_bucket.get_rnode(node_)
        if node_.ip in m_bucket.ips_in_table:
            rnode = m_bucket.get_rnode(node_)
            if rnode:
                # node in routing table: update rnode
                self._update_rnode_on_response_received(rnode, rtt)
            # This IP is in the table. Stop here to avoid multiple entries
            # with the same IP
            return

        # Now, consider adding this node to the routing table
        rnode = r_bucket.get_rnode(node_)
        if rnode:
            # node in replacement table
            # let's see whether there is room in the main
            self._update_rnode_on_response_received(rnode, rtt)
            #TODO: leave this for the maintenance task
            if m_bucket.there_is_room():
                m_bucket.add(rnode)
                self.table.num_rnodes += 1
                self._update_rnode_on_response_received(rnode, rtt)
                r_bucket.remove(rnode)
            return
        # The node is nowhere
        # Add to main table (if the bucket is not full)
        #TODO: check whether in replacement_mode
        if m_bucket.there_is_room():
            rnode = node_.get_rnode(log_distance)
            m_bucket.add(rnode)
            self.table.num_rnodes += 1
            self._update_rnode_on_response_received(rnode, rtt)
            return
        # The main bucket is full
        # Let's see whether this node's latency is good
        current_time = time.time()
        rnode_to_be_replaced = None
        m_bucket.rnodes.sort(key=attrgetter('rtt'), reverse=True)
        for rnode in m_bucket.rnodes:
            rnode_age = current_time - rnode.bucket_insertion_ts
            if rtt < rnode.rtt * (1 - (rnode_age / 7200)):
                # A rnode can only be replaced when the candidate node's RTT
                # is shorter by a factor. Over time, this factor
                # decreases. For instance, when rnode has been in the bucket
                # for 30 mins (1800 secs), a candidate's RTT must be at most
                # 25% of the rnode's RTT (ie. two times faster). After two
                # hours, a rnode cannot be replaced by this method.
                #                print 'RTT replacement: newRTT: %f, oldRTT: %f, age: %f' % (
                #                rtt, rnode.rtt, current_time - rnode.bucket_insertion_ts)
                rnode_to_be_replaced = rnode
                break
        if rnode_to_be_replaced:
            m_bucket.remove(rnode_to_be_replaced)
            rnode = node_.get_rnode(log_distance)
            m_bucket.add(rnode)
            # No need to update table
            self.table.num_rnodes += 0
            self._update_rnode_on_response_received(rnode, rtt)
            return

        # Get the worst node in replacement bucket and see whether
        # it's bad enough to be replaced by node_
        worst_rnode = self._worst_rnode(r_bucket.rnodes)
        if worst_rnode \
                and worst_rnode.timeouts_in_a_row() > MAX_NUM_TIMEOUTS:
            # This node is better candidate than worst_rnode
            r_bucket.remove(worst_rnode)
            rnode = node_.get_rnode(log_distance)
            r_bucket.add(rnode)
            self._update_rnode_on_response_received(rnode, rtt)
        return