def _update_rnode_on_query_received(self, rnode): current_time = time.time() rnode.last_action_ts = time.time() rnode.msgs_since_timeout += 1 rnode.num_queries += 1 rnode.add_event(current_time, node.QUERY) rnode.last_seen = current_time
def main_loop(self): self.next_main_loop_ts = time.time() + EXTRACTION_DELAY if time.time() > self.last_msg_ts + 4: # self.rcrawler.done: print 'ind | ok dead | ok dead' self.rcrawler.print_result() print 'total OK/DEAD', len(self.rcrawler.ok_nodes), print len(self.rcrawler.dead_nodes) print self.num_msgs, 'messages sent' for n in sorted(self.ok_nodes, key=attrgetter('ip')): print n return target = None msgs_to_send = [] if ((self.num_msgs < 20 and self.num_msgs % 5 == 0) or (self.num_msgs < 100 and self.num_msgs % 10 == 0) or (self.num_msgs > 100 and self.num_msgs % 20 == 0) or (self.num_msgs > 100 and not self.pending_nodes)): dst_node, target = self.rcrawler.next_bootstrap_msg() if target: print 'O', else: print 'F', if not target and self.pending_nodes: dst_node = self.pending_nodes.pop(0) if dst_node.id.bin_str.startswith(self.rcrawler.target_prefix): self.rcrawler.pinged_node_handler(dst_node) target = dst_node.id else: target = self.target if target: msg = self.msg_f.outgoing_find_node_query(dst_node, target, None, self) #print 'target', `target`, 'to node', `node_.id` #print 'sending query to', extracting_node.node, #print extracting_node.node.id.log_distance(TARGET) msgs_to_send.append(msg) # Take care of timeouts (self._next_timeout_ts, timeout_queries) = self.querier.get_timeout_queries() for related_query in timeout_queries: #print 'timeout' timeout_node = related_query.dst_node self.dead_nodes.add(timeout_node) if timeout_node.id.bin_str.startswith( self.rcrawler.target_prefix): self.rcrawler.timeout_handler(timeout_node) if msgs_to_send: timeout_call_ts, datagrams_to_send = self.querier.register_queries( msgs_to_send) self.last_msg_ts = time.time() else: datagrams_to_send = [] self.num_msgs += len(datagrams_to_send) if datagrams_to_send and self.num_msgs % PRINT_DOT_EACH == 0: #print target.hex sys.stdout.write('.') sys.stdout.flush() return self.next_main_loop_ts, datagrams_to_send
def main_loop(self): self.next_main_loop_ts = time.time() + EXTRACTION_DELAY if time.time() > self.last_msg_ts + 4:# self.rcrawler.done: print 'ind | ok dead | ok dead' self.rcrawler.print_result() print 'total OK/DEAD', len(self.rcrawler.ok_nodes), print len(self.rcrawler.dead_nodes) print self.num_msgs, 'messages sent' for n in sorted(self.ok_nodes, key=attrgetter('ip')): print n return target = None msgs_to_send = [] if ((self.num_msgs < 20 and self.num_msgs % 5 == 0) or (self.num_msgs < 100 and self.num_msgs % 10 == 0) or (self.num_msgs > 100 and self.num_msgs % 20 == 0) or (self.num_msgs > 100 and not self.pending_nodes)): dst_node, target = self.rcrawler.next_bootstrap_msg() if target: print 'O', else: print 'F', if not target and self.pending_nodes: dst_node = self.pending_nodes.pop(0) if dst_node.id.bin_str.startswith(self.rcrawler.target_prefix): self.rcrawler.pinged_node_handler(dst_node) target = dst_node.id else: target = self.target if target: msg = self.msg_f.outgoing_find_node_query( dst_node, target, None, self) #print 'target', `target`, 'to node', `node_.id` #print 'sending query to', extracting_node.node, #print extracting_node.node.id.log_distance(TARGET) msgs_to_send.append(msg) # Take care of timeouts (self._next_timeout_ts, timeout_queries) = self.querier.get_timeout_queries() for related_query in timeout_queries: #print 'timeout' timeout_node = related_query.dst_node self.dead_nodes.add(timeout_node) if timeout_node.id.bin_str.startswith(self.rcrawler.target_prefix): self.rcrawler.timeout_handler(timeout_node) if msgs_to_send: timeout_call_ts, datagrams_to_send = self.querier.register_queries( msgs_to_send) self.last_msg_ts = time.time() else: datagrams_to_send = [] self.num_msgs += len(datagrams_to_send) if datagrams_to_send and self.num_msgs % PRINT_DOT_EACH == 0: #print target.hex sys.stdout.write('.') sys.stdout.flush() return self.next_main_loop_ts, datagrams_to_send
def _update_rnode_on_response_received(self, rnode, rtt): rnode.rtt = rtt current_time = time.time() if rnode.in_quarantine: rnode.in_quarantine = rnode.last_action_ts < current_time - QUARANTINE_PERIOD rnode.last_action_ts = current_time rnode.num_responses += 1 rnode.add_event(time.time(), node.RESPONSE) rnode.last_seen = current_time
def next(self): if self.done: return None, None, None if self.rcrawlers: if self.rcrawlers[self.next_rcrawler].done: self.next_rcrawler = self.next_rcrawler ^ 1 #round-robin if self.rcrawlers[self.next_rcrawler].done: self.done = True return None, None, None node_, target, rcrawler_obj = self.rcrawlers[ self.next_rcrawler].next() self.next_rcrawler = self.next_rcrawler ^ 1 #round-robin return node_, target, rcrawler_obj if self.pending_nodes: node_ = self.pending_nodes.pop(0) self.last_query_ts = time.time() return node_, node_.id, self if time.time() < self.last_query_ts + 2: # wait for timeouts return None, None, None if self.fix_prefix_len == LEAF_PREFIX_LEN: # len(self.ok_nodes[0]) + len(self.ok_nodes[1]) < 6: # this is a leaf self.done = True return None, None, None b_node = None if len(self.ok_nodes[0]) < 3 or len(self.ok_nodes[1]) < 3: if len(self.ok_nodes[0]) < 3: bootstrap_gen = self.ok_nodes[1].__iter__() if len(self.ok_nodes[1]) < 3: bootstrap_gen = self.ok_nodes[0].__iter__() i = 0 try: while i <= self.bootstrap_index: i += 1 b_node = bootstrap_gen.next() self.bootstrap_index += 1 except (StopIteration): print 'cross boostrap failed' self.leaf = True self.done = True if b_node: print 'cross bootstrap' self.last_query_ts = time.time() return b_node, b_node.id.generate_close_id( NUM_BITS - self.fix_prefix_len), self print 'R SPLIT', self.fix_prefix_len, '>', self.fix_prefix_len + 1 self.rcrawlers = (RCrawler( self.ok_nodes[0], self.dead_nodes[0], self.fix_prefix_len + 1, self.t.set_bit(NUM_BITS - (self.fix_prefix_len + 1), 0)), RCrawler( self.ok_nodes[1], self.dead_nodes[1], self.fix_prefix_len + 1, self.t.set_bit( NUM_BITS - (self.fix_prefix_len + 1), 1))) return None, None, None
def _update_rnode_on_timeout(self, rnode): """Register a timeout for this rnode. You should call this method when getting a timeout for this node. """ rnode.last_action_ts = time.time() rnode.msgs_since_timeout = 0 rnode.num_timeouts += 1 rnode.add_event(time.time(), node.TIMEOUT)
def _on_peers_found(self, start_ts, peers,src_node): if peers: t=time.time() - start_ts, len(peers), src_node, peers wx.CallAfter(self.display_on_grid,t) else: t=time.time() - start_ts, "-","-","-" wx.CallAfter(self.display_on_grid,t) self.toolbar.EnableTool(1, True) self.packets=self.dht.stop_and_get_capture() self.toolbar.EnableTool(2, True)
def on_response_received(self, node_, rtt, nodes): log_distance = self.my_node.log_distance(node_) try: sbucket = self.table.get_sbucket(log_distance) except(IndexError): return # Got a response from myself. Just ignore it. m_bucket = sbucket.main rnode = m_bucket.get_rnode(node_) if rnode: # node in routing table: update self._update_rnode_on_response_received(rnode, rtt) if self._maintenance_mode == NORMAL_MODE: m_bucket.last_changed_ts = time.time() if node_ in self._pinged_q_rnodes: # This node is questionable. This response proves that it is # alive. Remove it from the questionable dict. del self._pinged_q_rnodes[node_] return # The node is not in main if m_bucket.there_is_room(): rnode = node_.get_rnode(log_distance) m_bucket.add(rnode) self.table.update_lowest_index(log_distance) self.table.num_rnodes += 1 self._update_rnode_on_response_received(rnode, rtt) if self._maintenance_mode == NORMAL_MODE: m_bucket.last_changed_ts = time.time() return # The main bucket is full # if there is a bad node inside the bucket, # replace it with the sending node_ bad_rnode = self._pop_bad_rnode(m_bucket) if bad_rnode: rnode = node_.get_rnode(log_distance) m_bucket.add(rnode) self._update_rnode_on_response_received(rnode, rtt) if self._maintenance_mode == NORMAL_MODE: m_bucket.last_changed_ts = time.time() self.table.update_lowest_index(log_distance) self.table.num_rnodes += 0 return # There are no bad nodes. Ping questionable nodes (if any) q_rnodes = self._get_questionable_rnodes(m_bucket) queries_to_send = [] for q_rnode in q_rnodes: # (0 timeouts so far, candidate node) c_rnode = node_.get_rnode(log_distance) self._update_rnode_on_response_received(c_rnode, rtt) self._pinged_q_rnodes[q_rnode] = [0, c_rnode] queries_to_send.append(Query(self.ping_msg, q_rnode)) return queries_to_send
def on_response_received(self, node_, rtt, nodes): log_distance = self.my_node.log_distance(node_) try: sbucket = self.table.get_sbucket(log_distance) except (IndexError): return # Got a response from myself. Just ignore it. m_bucket = sbucket.main rnode = m_bucket.get_rnode(node_) if rnode: # node in routing table: update self._update_rnode_on_response_received(rnode, rtt) if self._maintenance_mode == NORMAL_MODE: m_bucket.last_changed_ts = time.time() if node_ in self._pinged_q_rnodes: # This node is questionable. This response proves that it is # alive. Remove it from the questionable dict. del self._pinged_q_rnodes[node_] return # The node is not in main if m_bucket.there_is_room(): rnode = node_.get_rnode(log_distance) m_bucket.add(rnode) self.table.update_lowest_index(log_distance) self.table.num_rnodes += 1 self._update_rnode_on_response_received(rnode, rtt) if self._maintenance_mode == NORMAL_MODE: m_bucket.last_changed_ts = time.time() return # The main bucket is full # if there is a bad node inside the bucket, # replace it with the sending node_ bad_rnode = self._pop_bad_rnode(m_bucket) if bad_rnode: rnode = node_.get_rnode(log_distance) m_bucket.add(rnode) self._update_rnode_on_response_received(rnode, rtt) if self._maintenance_mode == NORMAL_MODE: m_bucket.last_changed_ts = time.time() self.table.update_lowest_index(log_distance) self.table.num_rnodes += 0 return # There are no bad nodes. Ping questionable nodes (if any) q_rnodes = self._get_questionable_rnodes(m_bucket) queries_to_send = [] for q_rnode in q_rnodes: # (0 timeouts so far, candidate node) c_rnode = node_.get_rnode(log_distance) self._update_rnode_on_response_received(c_rnode, rtt) self._pinged_q_rnodes[q_rnode] = [0, c_rnode] queries_to_send.append(Query(self.ping_msg, q_rnode)) return queries_to_send
def _update_rnode_on_query_received(self, rnode): """Register a query from node. You should call this method when receiving a query from this node. """ current_time = time.time() rnode.last_action_ts = time.time() rnode.msgs_since_timeout += 1 rnode.num_queries += 1 rnode.add_event(current_time, node.QUERY) rnode.last_seen = current_time
def next(self): if self.done: return None, None, None if self.rcrawlers: if self.rcrawlers[self.next_rcrawler].done: self.next_rcrawler = self.next_rcrawler ^ 1 #round-robin if self.rcrawlers[self.next_rcrawler].done: self.done = True return None, None, None node_, target, rcrawler_obj = self.rcrawlers[self.next_rcrawler].next() self.next_rcrawler = self.next_rcrawler ^ 1 #round-robin return node_, target, rcrawler_obj if self.pending_nodes: node_ = self.pending_nodes.pop(0) self.last_query_ts = time.time() return node_, node_.id, self if time.time() < self.last_query_ts + 2: # wait for timeouts return None, None, None if self.fix_prefix_len == LEAF_PREFIX_LEN:# len(self.ok_nodes[0]) + len(self.ok_nodes[1]) < 6: # this is a leaf self.done = True return None, None, None b_node = None if len(self.ok_nodes[0]) < 3 or len(self.ok_nodes[1]) < 3: if len(self.ok_nodes[0]) < 3: bootstrap_gen = self.ok_nodes[1].__iter__() if len(self.ok_nodes[1]) < 3: bootstrap_gen = self.ok_nodes[0].__iter__() i = 0 try: while i <= self.bootstrap_index: i += 1 b_node = bootstrap_gen.next() self.bootstrap_index += 1 except (StopIteration): print 'cross boostrap failed' self.leaf = True self.done = True if b_node: print 'cross bootstrap' self.last_query_ts = time.time() return b_node, b_node.id.generate_close_id(NUM_BITS - self.fix_prefix_len), self print 'R SPLIT', self.fix_prefix_len, '>', self.fix_prefix_len + 1 self.rcrawlers = (RCrawler( self.ok_nodes[0], self.dead_nodes[0], self.fix_prefix_len + 1, self.t.set_bit(NUM_BITS - (self.fix_prefix_len + 1), 0)), RCrawler( self.ok_nodes[1], self.dead_nodes[1], self.fix_prefix_len + 1, self.t.set_bit(NUM_BITS - (self.fix_prefix_len + 1), 1))) return None, None, None
def on_timeout(self, related_query): if related_query.experimental_obj: elapsed_time = time.time( ) - related_query.experimental_obj.query_ts print 'prove FAILED Due to Time-Out', related_query.experimental_obj.value print 'RTT = ', elapsed_time self.pinged_ips[related_query.dst_node.ip] = STATUS_FAIL
def _update_rnode_on_response_received(self, rnode, rtt): """Register a reply from rnode. You should call this method when receiving a response from this rnode. """ rnode.rtt = rtt current_time = time.time() # rnode._reset_refresh_task() if rnode.in_quarantine: rnode.in_quarantine = rnode.last_action_ts < current_time - QUARANTINE_PERIOD rnode.last_action_ts = current_time rnode.num_responses += 1 rnode.add_event(time.time(), node.RESPONSE) rnode.last_seen = current_time
def main_loop(self): current_time = time.time() if current_time > self.last_extraction_ts + 4: return #crawler DONE msgs_to_send = [] only_inrange = len(self.nodes_inrange_w_response) > 4 extracting_node, step_target = \ self.extracting_queue.next_node_step_target(only_inrange) if step_target: msg = self.msg_f.outgoing_find_node_query( extracting_node.lookup_node, step_target, None, extracting_node) #print 'sending query to', extracting_node.node, #print extracting_node.node.id.log_distance(TARGET) msgs_to_send.append(msg) self.last_extraction_ts = current_time # Take care of timeouts (self._next_timeout_ts, timeout_queries) = self.querier.get_timeout_queries() for query in timeout_queries: #print 'timeout' query.experimental_obj.timeout_handler() if msgs_to_send: timeout_call_ts, datagrams_to_send = self.querier.register_queries( msgs_to_send) else: datagrams_to_send = [] self.num_msgs += len(datagrams_to_send) if datagrams_to_send and self.num_msgs % 100 == 0: sys.stdout.write('.') sys.stdout.flush() return current_time + .01, datagrams_to_send
def main(options, args): my_addr = (options.ip, int(options.port)) logs_path = options.path logging_conf.setup(logs_path, logs_level) print 'Using the following plug-ins:' print '*', options.routing_m_file print '*', options.lookup_m_file routing_m_name = '.'.join(os.path.split(options.routing_m_file))[:-3] routing_m_mod = __import__(routing_m_name, fromlist=['']) lookup_m_name = '.'.join(os.path.split(options.lookup_m_file))[:-3] lookup_m_mod = __import__(lookup_m_name, fromlist=['']) dht = pymdht.Pymdht(my_addr, logs_path, routing_m_mod, lookup_m_mod) print '\nType "exit" to stop the DHT and exit' print 'Type an info_hash (in hex digits): ', while (1): input = sys.stdin.readline()[:-1] if input == 'exit': dht.stop() break try: info_hash = identifier.Id(input) except (identifier.IdError): print 'Invalid input (%s)' % input continue print 'Getting peers for info_hash %r' % info_hash global start_ts start_ts = time.time() dht.get_peers(None, info_hash, _on_peers_found)
def main(options, args): my_addr = (options.ip, int(options.port)) logs_path = options.path print 'Using the following plug-ins:' print '*', options.routing_m_file print '*', options.lookup_m_file print 'Private DHT name:', options.private_dht_name routing_m_name = '.'.join(os.path.split(options.routing_m_file))[:-3] routing_m_mod = __import__(routing_m_name, fromlist=['']) lookup_m_name = '.'.join(os.path.split(options.lookup_m_file))[:-3] lookup_m_mod = __import__(lookup_m_name, fromlist=['']) dht = pymdht.Pymdht(my_addr, logs_path, routing_m_mod, lookup_m_mod, options.private_dht_name, logs_level) print '\nType "exit" to stop the DHT and exit' print 'Type "help" if you need' while (1): input = sys.stdin.readline().strip().split() if not input: continue command = input[0] if command == 'help': print ''' Available commands are: - help - fast info_hash bt_port - exit - m Memory information ''' elif command == 'exit': dht.stop() break elif command == 'm': import guppy h = guppy.hpy() print h.heap() elif command == 'fast': if len(input) != 3: print 'usage: fast info_hash bt_port' continue try: info_hash = identifier.Id(input[1]) except (identifier.IdError): print 'Invalid info_hash (%s)' % input[1] try: bt_port = int(input[2]) except: print 'Invalid bt_port (%r)' % input[2] continue success, peers = dht.get_peers(time.time(), info_hash, _on_peers_found, bt_port) if not success: print 'Lookup failed' if peers: print '[local] %d peer(s)' % (len(peers)) print peers
def _update_rnode_on_response_received(self, rnode, rtt): """Register a reply from rnode. You should call this method when receiving a response from this rnode. """ rnode.rtt = rtt current_time = time.time() #rnode._reset_refresh_task() if rnode.in_quarantine: rnode.in_quarantine = rnode.last_action_ts < (current_time - QUARANTINE_PERIOD) rnode.last_action_ts = current_time rnode.num_responses += 1 rnode.add_event(time.time(), node.RESPONSE) rnode.last_seen = current_time
def __init__(self, msg_f, my_id, lookup_id, info_hash, callback_f, bt_port=0): self.msg_f = msg_f self.bootstrap_alpha = 4 self.normal_alpha = 4 self.normal_m = 1 self.slowdown_alpha = 4 self.slowdown_m = 1 self.start_ts = time.time() logger.debug('New lookup (info_hash: %r) %d' % (info_hash, bt_port)) self._my_id = my_id self.lookup_id = lookup_id self.callback_f = callback_f self._lookup_queue = _LookupQueue(info_hash, 20) self.info_hash = info_hash self._bt_port = bt_port self._lock = threading.RLock() self._num_parallel_queries = 0 self.num_queries = 0 self.num_responses = 0 self.num_timeouts = 0 self.num_errors = 0 self._running = False self._slow_down = False self._msg_factory = msg_f.outgoing_get_peers_query
def on_timeout(self, related_query): if related_query.experimental_obj: elapsed_time = time.time() - related_query.experimental_obj.query_ts print 'prove FAILED Due to Time-Out' ,related_query.experimental_obj.value print 'RTT = ',elapsed_time self.pinged_ips[related_query.dst_node.ip] = STATUS_FAIL self.num_fail += 1
def on_response_received(self, node_, rtt, nodes): if nodes: logger.debug('nodes found: %r', nodes) self._found_nodes_queue.add(nodes) logger.debug('on response received %f', rtt) log_distance = self.my_node.log_distance(node_) try: sbucket = self.table.get_sbucket(log_distance) except(IndexError): return # Got a response from myself. Just ignore it. m_bucket = sbucket.main r_bucket = sbucket.replacement rnode = m_bucket.get_rnode(node_) if rnode: # node in routing table: update self._update_rnode_on_response_received(rnode, rtt) return # The node is not in main rnode = r_bucket.get_rnode(node_) if rnode: # node in replacement table # let's see whether there is room in the main self._update_rnode_on_response_received(rnode, rtt) #TODO: leave this for the maintenance task if m_bucket.there_is_room(): m_bucket.add(rnode) self.table.update_lowest_index(log_distance) self.table.num_rnodes += 1 self._update_rnode_on_response_received(rnode, rtt) r_bucket.remove(rnode) return # The node is nowhere # Add to main table (if the bucket is not full) #TODO: check whether in replacement_mode if m_bucket.there_is_room(): rnode = node_.get_rnode(log_distance) m_bucket.add(rnode) self.table.update_lowest_index(log_distance) self.table.num_rnodes += 1 self._update_rnode_on_response_received(rnode, rtt) return # The main bucket is full # Let's see whether this node's latency is good current_time = time.time() rnode_to_be_replaced = None for rnode in reversed(m_bucket.rnodes): rnode_age = current_time - rnode.bucket_insertion_ts if rtt < rnode.rtt * (1 - (rnode_age / 7200)): # A rnode can only be replaced when the candidate node's RTT # is shorter by a factor. Over time, this factor # decreases. For instance, when rnode has been in the bucket # for 30 mins (1800 secs), a candidate's RTT must be at most # 50% of the rnode's RTT (ie. two times faster). After two # hours, a rnode cannot be replaced becouse of better RTT. # print 'RTT replacement: newRTT: %f, oldRTT: %f, age: %f' % ( rtt, rnode.rtt, current_time - rnode.bucket_insertion_ts) rnode_to_be_replaced = rnode break
def add(self, node_, log_distance): if node_ in self._queued_nodes_set: return num_nodes_queued = self._nodes_queued_per_bucket[log_distance] if num_nodes_queued >= 8: return self._queued_nodes_set.add(node_) self._nodes_queued_per_bucket[log_distance] = num_nodes_queued + 1 self._queue.append((time.time(), node_))
def __init__(self, value): self.value = value self.query_ts = time.time() print 'Got query at Time :',self.query_ts pass
def _get_questionable_rnodes(self, m_bucket): q_rnodes = [] for rnode in m_bucket.rnodes: inactivity_time = time.time() - rnode.last_seen if inactivity_time > REFRESH_PERIOD: q_rnodes.append(rnode) if rnode.num_responses == 0: q_rnodes.append(rnode) return q_rnodes
def on_response_received(self, msg, related_query): if self.pinged_ips.get(msg.src_node.ip) == STATUS_PINGED: print 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' if related_query.experimental_obj: print "probe OK (%r) (%r)" % (related_query.experimental_obj.value, msg.src_node) self.pinged_ips[msg.src_node.ip] = STATUS_OK elapsed_time = time.time() - related_query.experimental_obj.query_ts print 'RTT = ', elapsed_time pass
def _get_questionable_rnodes(self, m_bucket): q_rnodes = [] for rnode in m_bucket.rnodes: inactivity_time = time.time() - rnode.last_seen if (inactivity_time > REFRESH_PERIOD or rnode.num_responses == 0): is_questionable = getattr(rnode, 'questionable', False) if not is_questionable: rnode.questionable = True q_rnodes.append(rnode) return q_rnodes
def command_user_interface(dht): print '\nType "exit" to stop the DHT and exit' print 'Type "help" if you need' while (1): input = sys.stdin.readline().strip().split() if not input: continue command = input[0] if command == 'help': print ''' Available commands are: - help - fast info_hash bt_port - exit - m Memory information - r Print routing table stats - rr Print routing table (full) ''' elif command == 'exit': dht.stop() break elif command == 'm': import guppy h = guppy.hpy() print h.heap() elif command == 'r': dht.print_routing_table_stats() elif command == 'rr': dht.print_routing_table() elif command == 'fast': if len(input) != 3: print 'usage: fast info_hash bt_port' continue try: info_hash = identifier.Id(input[1]) except (identifier.IdError): print 'Invalid info_hash (%s)' % input[1] continue try: bt_port = int(input[2]) except: print 'Invalid bt_port (%r)' % input[2] continue if 0 < bt_port < MIN_BT_PORT: print 'Mmmm, you are using reserved ports (<1024). Try again.' continue if bt_port > MAX_BT_PORT: print "I don't know about you, but I find difficult", print "to represent %d with only two bytes." % (bt_port), print "Try again." continue dht.get_peers(time.time(), info_hash, _on_peers_found, bt_port) else: print 'Invalid input: type help'
def on_response_received(self, node_, rtt, nodes): if nodes: logger.debug('nodes found: %r', nodes) self._found_nodes_queue.add(nodes) logger.debug('on response received %f', rtt) log_distance = self.my_node.log_distance(node_) try: sbucket = self.table.get_sbucket(log_distance) except IndexError: return m_bucket = sbucket.main r_bucket = sbucket.replacement rnode = m_bucket.get_rnode(node_) if rnode: self._update_rnode_on_response_received(rnode, rtt) return rnode = r_bucket.get_rnode(node_) if rnode: self._update_rnode_on_response_received(rnode, rtt) if m_bucket.there_is_room(): m_bucket.add(rnode) self.table.update_lowest_index(log_distance) self.table.num_rnodes += 1 self._update_rnode_on_response_received(rnode, rtt) r_bucket.remove(rnode) return if m_bucket.there_is_room(): rnode = node_.get_rnode(log_distance) m_bucket.add(rnode) self.table.update_lowest_index(log_distance) self.table.num_rnodes += 1 self._update_rnode_on_response_received(rnode, rtt) return current_time = time.time() rnode_to_be_replaced = None for rnode in reversed(m_bucket.rnodes): rnode_age = current_time - rnode.bucket_insertion_ts if rtt < rnode.rtt * (1 - rnode_age / 7200): rnode_to_be_replaced = rnode break if rnode_to_be_replaced: m_bucket.remove(rnode_to_be_replaced) rnode = node_.get_rnode(log_distance) m_bucket.add(rnode) self.table.num_rnodes += 0 self._update_rnode_on_response_received(rnode, rtt) return worst_rnode = self._worst_rnode(r_bucket.rnodes) if worst_rnode and worst_rnode.timeouts_in_a_row() > MAX_NUM_TIMEOUTS: r_bucket.remove(worst_rnode) rnode = node_.get_rnode(log_distance) r_bucket.add(rnode) self._update_rnode_on_response_received(rnode, rtt)
def main(options, args): my_addr = (options.ip, int(options.port)) logs_path = options.path print 'Using the following plug-ins:' print '*', options.routing_m_file print '*', options.lookup_m_file print 'Private DHT name:', options.private_dht_name routing_m_name = '.'.join(os.path.split(options.routing_m_file))[:-3] routing_m_mod = __import__(routing_m_name, fromlist=['']) lookup_m_name = '.'.join(os.path.split(options.lookup_m_file))[:-3] lookup_m_mod = __import__(lookup_m_name, fromlist=['']) dht = pymdht.Pymdht(my_addr, logs_path, routing_m_mod, lookup_m_mod, options.private_dht_name, logs_level) print '\nType "exit" to stop the DHT and exit' print 'Type "help" if you need' while (1): input = sys.stdin.readline().strip().split() if not input: continue command = input[0] if command == 'help': print ''' Available commands are: - help - fast info_hash bt_port - exit - m Memory information ''' elif command == 'exit': dht.stop() break elif command == 'm': import guppy h = guppy.hpy() print h.heap() elif command == 'fast': if len(input) != 3: print 'usage: fast info_hash bt_port' continue try: info_hash = identifier.Id(input[1]) except (identifier.IdError): print 'Invalid info_hash (%s)' % input[1] try: bt_port = int(input[2]) except: print 'Invalid bt_port (%r)' % input[2] continue success = dht.get_peers(time.time(), info_hash, _on_peers_found, bt_port) if not success: print 'Lookup failed'
def __init__(self, info_hash, queue_size): self.info_hash = info_hash self.queue_size = queue_size self.queue = [_QueuedNode(None, identifier.ID_SIZE_BITS + 1, None)] self.queued_ips = set() self.queried_ips = set() self.queued_qnodes = [] self.responded_qnodes = [] self.max_queued_qnodes = 16 self.max_responded_qnodes = 16 self.last_query_ts = time.time()
def on_response_received(self, msg, related_query): if self.pinged_ips.get(msg.src_node.ip) == STATUS_PINGED: print 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' if related_query.experimental_obj: print "probe OK (%r) (%r)" % (related_query.experimental_obj.value, msg.src_node) self.pinged_ips[msg.src_node.ip] = STATUS_OK elapsed_time = time.time( ) - related_query.experimental_obj.query_ts print 'RTT = ', elapsed_time pass
def __init__(self, bootstrap_nodes): self.target = RandomId() self.extracting_queue = ExtractingQueue(self.target) for node_ in bootstrap_nodes: is_new_node = self.extracting_queue.add_node(node_) self.my_id = self._my_id = RandomId() self.msg_f = message.MsgFactory(PYMDHT_VERSION, self.my_id, None) self.querier = Querier() self.last_extraction_ts = time.time() self.num_msgs = 0 self.nodes_inrange_w_response = set()
def add(self, node_, log_distance): # The caller already checked that there is room in the bucket # print 'received queue', len(self._queue) if node_ in self._queued_nodes_set: # This node is already queued return num_nodes_queued = self._nodes_queued_per_bucket[log_distance] if num_nodes_queued >= 8: # many nodes queued for this bucket already return self._queued_nodes_set.add(node_) self._nodes_queued_per_bucket[log_distance] = (num_nodes_queued + 1) self._queue.append((time.time(), node_))
def add(self, node_, log_distance): # The caller already checked that there is room in the bucket if node_ in self._queued_nodes_set: # This node is already queued return num_nodes_queued = self._nodes_queued_per_bucket[log_distance] if num_nodes_queued >= 8: # many nodes queued for this bucket already return self._queued_nodes_set.add(node_) self._nodes_queued_per_bucket[log_distance] = ( num_nodes_queued + 1) self._queue.append((time.time(), node_))
def __init__(self, info_hash, queue_size): self.info_hash = info_hash self.queue_size = queue_size self.queue = [_QueuedNode(None, identifier.ID_SIZE_BITS+1, None)] # *_ips is used to prevent that many Ids are # claimed from a single IP address. self.queued_ips = set() self.queried_ips = set() self.queued_qnodes = [] self.responded_qnodes = [] self.max_queued_qnodes = 16 self.max_responded_qnodes = 16 self.last_query_ts = time.time()
def pop(self, _): while self._queue: ts, node_ = self._queue[0] time_in_queue = time.time() - ts if time_in_queue < QUARANTINE_PERIOD: return log_distance = self.table.my_node.log_distance(node_) self._queued_nodes_set.remove(node_) self._nodes_queued_per_bucket[log_distance] = self._nodes_queued_per_bucket[log_distance] - 1 del self._queue[0] sbucket = self.table.get_sbucket(log_distance) m_bucket = sbucket.main if m_bucket.there_is_room(): return node_
def __init__(self, info_hash, queue_size): self.info_hash = info_hash self.queue_size = queue_size self.queue = [_QueuedNode(None, identifier.ID_SIZE_BITS + 1, None)] # *_ips is used to prevent that many Ids are # claimed from a single IP address. self.queued_ips = set() self.queried_ips = set() self.queued_qnodes = [] self.responded_qnodes = [] self.max_queued_qnodes = 16 self.max_responded_qnodes = 16 self.last_query_ts = time.time()
def pop(self, _): while self._queue: ts, node_ = self._queue[0] time_in_queue = time.time() - ts if time_in_queue < QUARANTINE_PERIOD: return log_distance = self.table.my_node.log_distance(node_) self._queued_nodes_set.remove(node_) self._nodes_queued_per_bucket[ log_distance] = self._nodes_queued_per_bucket[log_distance] - 1 del self._queue[0] sbucket = self.table.get_sbucket(log_distance) m_bucket = sbucket.main if m_bucket.there_is_room(): return node_
def pop(self, _): while self._queue: ts, node_ = self._queue[0] time_in_queue = time.time() - ts if time_in_queue < QUARANTINE_PERIOD: return # Quarantine period passed del self._queue[0] log_distance = self.table.my_node.log_distance(node_) sbucket = self.table.get_sbucket(log_distance) m_bucket = sbucket.main if m_bucket.there_is_room(): # room in main: return it return node_ return
def _ping_a_staled_rnode(self): starting_index = self._next_stale_maintenance_index result = None while not result: sbucket = self.table.get_sbucket(self._next_stale_maintenance_index) m_bucket = sbucket.main self._next_stale_maintenance_index = (self._next_stale_maintenance_index + 1) % (NUM_BUCKETS - 1) if m_bucket: rnode = m_bucket.get_stalest_rnode() if time.time() > rnode.last_seen + QUARANTINE_PERIOD: result = rnode if self._next_stale_maintenance_index == starting_index: break return result
def __init__(self, bootstrap_nodes): self.target = bootstrap_nodes[0].id target_prefix = self.target.get_prefix(START_PREFIX_LEN) print target_prefix self.rcrawler = RCrawler(target_prefix) for n in bootstrap_nodes: self.rcrawler.found_node_handler(n) self.pending_nodes = bootstrap_nodes self.my_id = self._my_id = RandomId() self.msg_f = message.MsgFactory(PYMDHT_VERSION, self.my_id, None) self.querier = Querier() self.next_main_loop_ts = 0 self.num_msgs = 0 self.known_nodes = set(bootstrap_nodes) self.ok_nodes = set() self.dead_nodes = set() self.last_msg_ts = time.time()
def _ping_a_staled_rnode(self): starting_index = self._next_stale_maintenance_index result = None while not result: sbucket = self.table.get_sbucket( self._next_stale_maintenance_index) m_bucket = sbucket.main self._next_stale_maintenance_index = ( self._next_stale_maintenance_index + 1) % (NUM_BUCKETS - 1) if m_bucket: rnode = m_bucket.get_stalest_rnode() if time.time() > rnode.last_seen + QUARANTINE_PERIOD: result = rnode if self._next_stale_maintenance_index == starting_index: break return result
def _pop_nodes_to_query(self, max_nodes): if len(self.responded_qnodes) > MARK_INDEX: mark = self.responded_qnodes[MARK_INDEX].distance.log else: mark = identifier.ID_SIZE_BITS nodes_to_query = [] for _ in range(max_nodes): try: qnode = self.queued_qnodes[0] except (IndexError): break # no more queued nodes left if qnode.distance is None or qnode.distance.log < mark: self.queried_ips.add(qnode.node.ip) nodes_to_query.append(qnode.node) del self.queued_qnodes[0] self.queued_ips.remove(qnode.node.ip) self.last_query_ts = time.time() return nodes_to_query
def _refresh_stale_bucket(self): maintenance_lookup_target = None current_time = time.time() for i in xrange(NUM_BUCKETS): sbucket = self.table.get_sbucket(i) m_bucket = sbucket.main if not m_bucket: continue inactivity_time = current_time - m_bucket.last_changed_ts if inactivity_time > REFRESH_PERIOD: # print time.time(), '>>>>>>>>>>>>>>> refreshing bucket %d after %f secs' % ( # i, inactivity_time) maintenance_lookup_target = self.my_node.id.generate_close_id( i) m_bucket.last_changed_ts = current_time return maintenance_lookup_target self._maintenance_mode = NORMAL_MODE return None
def _refresh_stale_bucket(self): maintenance_lookup_target = None current_time = time.time() for i in xrange(self.table.lowest_index, NUM_BUCKETS): sbucket = self.table.get_sbucket(i) m_bucket = sbucket.main if not m_bucket: continue inactivity_time = current_time - m_bucket.last_changed_ts if inactivity_time > REFRESH_PERIOD: # print time.time(), '>>>>>>>>>>>>>>> refreshing bucket %d after %f secs' % ( # i, inactivity_time) maintenance_lookup_target = self.my_node.id.generate_close_id( i) m_bucket.last_changed_ts = current_time return maintenance_lookup_target self._maintenance_mode = NORMAL_MODE return None