def _update_rnode_on_timeout(self, rnode): """Register a timeout for this rnode. You should call this method when getting a timeout for this node. """ rnode.last_action_ts = time.time() rnode.msgs_since_timeout = 0 rnode.num_timeouts += 1 rnode.last_events.append((time.time(), node.TIMEOUT)) rnode.last_events[:rnode.max_last_events]
def on_response_received(self, node_, rtt, nodes): log_distance = self.my_node.log_distance(node_) try: sbucket = self.table.get_sbucket(log_distance) except(IndexError): return # Got a response from myself. Just ignore it. m_bucket = sbucket.main rnode = m_bucket.get_rnode(node_) if rnode: # node in routing table: update self._update_rnode_on_response_received(rnode, rtt) if self._maintenance_mode == NORMAL_MODE: m_bucket.last_changed_ts = time.time() if node_ in self._pinged_q_rnodes: # This node is questionable. This response proves that it is # alive. Remove it from the questionable dict. del self._pinged_q_rnodes[node_] return # The node is not in main if m_bucket.there_is_room(): rnode = node_.get_rnode(log_distance) m_bucket.add(rnode) self.table.update_lowest_index(log_distance) self.table.num_rnodes += 1 self._update_rnode_on_response_received(rnode, rtt) if self._maintenance_mode == NORMAL_MODE: m_bucket.last_changed_ts = time.time() return # The main bucket is full # if there is a bad node inside the bucket, # replace it with the sending node_ bad_rnode = self._pop_bad_rnode(m_bucket) if bad_rnode: rnode = node_.get_rnode(log_distance) m_bucket.add(rnode) self._update_rnode_on_response_received(rnode, rtt) if self._maintenance_mode == NORMAL_MODE: m_bucket.last_changed_ts = time.time() self.table.update_lowest_index(log_distance) self.table.num_rnodes += 0 return # There are no bad nodes. Ping questionable nodes (if any) q_rnodes = self._get_questionable_rnodes(m_bucket) queries_to_send = [] for q_rnode in q_rnodes: # (0 timeouts so far, candidate node) c_rnode = node_.get_rnode(log_distance) self._update_rnode_on_response_received(c_rnode, rtt) self._pinged_q_rnodes[q_rnode] = [0, c_rnode] queries_to_send.append(Query(self.ping_msg, q_rnode)) return queries_to_send
def _update_rnode_on_query_received(self, rnode): """Register a query from node. You should call this method when receiving a query from this node. """ current_time = time.time() rnode.last_action_ts = time.time() rnode.msgs_since_timeout += 1 rnode.num_queries += 1 rnode.last_events.append((current_time, node.QUERY)) rnode.last_events[:rnode.max_last_events] rnode.last_seen = current_time
def _update_rnode_on_response_received(self, rnode, rtt): """Register a reply from rnode. You should call this method when receiving a response from this rnode. """ rnode.rtt = rtt current_time = time.time() #rnode._reset_refresh_task() if rnode.in_quarantine: rnode.in_quarantine = rnode.last_action_ts < ( current_time - QUARANTINE_PERIOD) rnode.last_action_ts = current_time rnode.num_responses += 1 rnode.last_events.append((time.time(), node.RESPONSE)) rnode.last_events[:rnode.max_last_events] rnode.last_seen = current_time
def _get_questionable_rnodes(self, m_bucket): q_rnodes = [] for rnode in m_bucket.rnodes: inactivity_time = time.time() - rnode.last_seen if inactivity_time > REFRESH_PERIOD: q_rnodes.append(rnode) if rnode.num_responses == 0: q_rnodes.append(rnode) return q_rnodes
def __init__(self, info_hash, queue_size): self.info_hash = info_hash self.queue_size = queue_size self.queue = [_QueuedNode(None, identifier.ID_SIZE_BITS + 1, None)] # *_ips is used to prevent that many Ids are # claimed from a single IP address. self.queued_ips = set() self.queried_ips = set() self.queued_qnodes = [] self.responded_qnodes = [] self.max_queued_qnodes = 16 self.max_responded_qnodes = 16 self.last_query_ts = time.time()
def __init__(self, info_hash, queue_size): self.info_hash = info_hash self.queue_size = queue_size self.queue = [_QueuedNode(None, identifier.ID_SIZE_BITS+1, None)] # *_ips is used to prevent that many Ids are # claimed from a single IP address. self.queued_ips = set() self.queried_ips = set() self.queued_qnodes = [] self.responded_qnodes = [] self.max_queued_qnodes = 16 self.max_responded_qnodes = 16 self.last_query_ts = time.time()
def pop(self, _): while self._queue: ts, node_ = self._queue[0] time_in_queue = time.time() - ts if time_in_queue < QUARANTINE_PERIOD: return # Quarantine period passed del self._queue[0] log_distance = self.table.my_node.log_distance(node_) sbucket = self.table.get_sbucket(log_distance) m_bucket = sbucket.main if m_bucket.there_is_room(): # room in main: return it return node_ return
def _pop_nodes_to_query(self, max_nodes): if len(self.responded_qnodes) > MARK_INDEX: mark = self.responded_qnodes[MARK_INDEX].log_distance else: mark = identifier.ID_SIZE_BITS nodes_to_query = [] for _ in range(max_nodes): try: qnode = self.queued_qnodes[0] except (IndexError): break # no more queued nodes left if qnode.log_distance < mark: self.queried_ips.add(qnode.node.ip) nodes_to_query.append(qnode.node) del self.queued_qnodes[0] self.queued_ips.remove(qnode.node.ip) self.last_query_ts = time.time() return nodes_to_query
def _refresh_stale_bucket(self): maintenance_lookup_target = None current_time = time.time() for i in xrange(self.table.lowest_index, NUM_BUCKETS): sbucket = self.table.get_sbucket(i) m_bucket = sbucket.main if not m_bucket: continue inactivity_time = current_time - m_bucket.last_changed_ts if inactivity_time > REFRESH_PERIOD: # print time.time(), '>>>>>>>>>>>>>>> refreshing bucket %d after %f secs' % ( # i, inactivity_time) maintenance_lookup_target = self.my_node.id.generate_close_id( i) m_bucket.last_changed_ts = current_time return maintenance_lookup_target self._maintenance_mode = NORMAL_MODE return None
def _ping_a_staled_rnode(self): # Don't have self._next_stale_maintenance_index lower than # lowest_bucket starting_index = self._next_stale_maintenance_index result = None while not result: # Find a non-empty bucket sbucket = self.table.get_sbucket( self._next_stale_maintenance_index) m_bucket = sbucket.main self._next_stale_maintenance_index = ( self._next_stale_maintenance_index + 1) % (NUM_BUCKETS - 1) if m_bucket: rnode = m_bucket.get_stalest_rnode() if time.time() > rnode.last_seen + QUARANTINE_PERIOD: result = rnode if self._next_stale_maintenance_index == starting_index: # No node to be pinged in the whole table. break return result
def add(self, node_): self._queue.append((time.time(), node_))
def on_response_received(self, node_, rtt, nodes): if nodes: logger.debug('nodes found: %r', nodes) self._found_nodes_queue.add(nodes) logger.debug('on response received %f', rtt) log_distance = self.my_node.log_distance(node_) try: sbucket = self.table.get_sbucket(log_distance) except(IndexError): return # Got a response from myself. Just ignore it. m_bucket = sbucket.main r_bucket = sbucket.replacement rnode = m_bucket.get_rnode(node_) if rnode: # node in routing table: update self._update_rnode_on_response_received(rnode, rtt) return # The node is not in main rnode = r_bucket.get_rnode(node_) if rnode: # node in replacement table # let's see whether there is room in the main self._update_rnode_on_response_received(rnode, rtt) #TODO: leave this for the maintenance task if m_bucket.there_is_room(): m_bucket.add(rnode) self.table.update_lowest_index(log_distance) self.table.num_rnodes += 1 self._update_rnode_on_response_received(rnode, rtt) r_bucket.remove(rnode) return # The node is nowhere # Add to main table (if the bucket is not full) #TODO: check whether in replacement_mode if m_bucket.there_is_room(): rnode = node_.get_rnode(log_distance) m_bucket.add(rnode) self.table.update_lowest_index(log_distance) self.table.num_rnodes += 1 self._update_rnode_on_response_received(rnode, rtt) return # The main bucket is full # Let's see whether this node's latency is good current_time = time.time() rnode_to_be_replaced = None for rnode in reversed(m_bucket.rnodes): rnode_age = current_time - rnode.bucket_insertion_ts if rtt < rnode.rtt * (.5 - (rnode_age / 7200)): # A rnode can only be replaced when the candidate node's RTT # is (at most) 50% of the rnode's. Over time, this factor # decreases. For instance, when rnode has been in the bucket # for 30 mins (1800 secs), a candidate's RTT must be at most # 25% of the rnode's RTT (ie. four times faster). After an # hour, a rnode cannot be replaced becouse of better RTT. print 'RTT replacement: newRTT: %f, oldRTT: %f, age: %f' % ( rtt, rnode.rtt, current_time - rnode.bucket_insertion_ts) rnode_to_be_replaced = rnode break if rnode_to_be_replaced: m_bucket.remove(rnode_to_be_replaced) rnode = node_.get_rnode(log_distance) m_bucket.add(rnode) # No need to update table self.table.num_rnodes += 0 self._update_rnode_on_response_received(rnode, rtt) return # Get the worst node in replacement bucket and see whether # it's bad enough to be replaced by node_ worst_rnode = self._worst_rnode(r_bucket.rnodes) if worst_rnode \ and worst_rnode.timeouts_in_a_row() > MAX_NUM_TIMEOUTS: # This node is better candidate than worst_rnode r_bucket.remove(worst_rnode) rnode = node_.get_rnode(log_distance) r_bucket.add(rnode) self._update_rnode_on_response_received(rnode, rtt) return