Exemplo n.º 1
0
 def test_timeouts_in_a_row(self):
     rnode = RoutingNode(tc.NODES[0], 1)
     eq_(rnode.timeouts_in_a_row(), 0)
     eq_(rnode.timeouts_in_a_row(True), 0)
     eq_(rnode.timeouts_in_a_row(False), 0)
     # got query
     rnode.last_events.append((time.time(), node.QUERY))
     eq_(rnode.timeouts_in_a_row(), 0)
     eq_(rnode.timeouts_in_a_row(True), 0)
     eq_(rnode.timeouts_in_a_row(False), 0)
     # got timeout
     rnode.last_events.append((time.time(), node.TIMEOUT))
     eq_(rnode.timeouts_in_a_row(), 1)
     eq_(rnode.timeouts_in_a_row(True), 1)
     eq_(rnode.timeouts_in_a_row(False), 1)
     # got query
     rnode.last_events.append((time.time(), node.QUERY))
     eq_(rnode.timeouts_in_a_row(), 0)
     eq_(rnode.timeouts_in_a_row(True), 0)
     eq_(rnode.timeouts_in_a_row(False), 1)
     # got timeout
     rnode.last_events.append((time.time(), node.TIMEOUT))
     eq_(rnode.timeouts_in_a_row(), 1)
     eq_(rnode.timeouts_in_a_row(True), 1)
     eq_(rnode.timeouts_in_a_row(False), 2)
     # got response
     rnode.last_events.append((time.time(), node.RESPONSE))
     eq_(rnode.timeouts_in_a_row(), 0)
     eq_(rnode.timeouts_in_a_row(True), 0)
     eq_(rnode.timeouts_in_a_row(False), 0)
Exemplo n.º 2
0
 def _try_do_lookup(self):
     queries_to_send = []
     current_time = time.time()
     while self._pending_lookups:
         pending_lookup = self._pending_lookups[0]
         # Drop all pending lookups older than PENDING_LOOKUP_TIMEOUT
         if time.time() > pending_lookup.start_ts + PENDING_LOOKUP_TIMEOUT:
             del self._pending_lookups[0]
         else:
             break
     if self._pending_lookups:
         lookup_obj = self._pending_lookups[0]
     else:
         return queries_to_send
     distance = lookup_obj.info_hash.distance(self._my_id)
     bootstrap_rnodes = self._routing_m.get_closest_rnodes(
         distance.log, 0, True)
     # TODO: get the full bucket
     if bootstrap_rnodes:
         del self._pending_lookups[0]
         # look if I'm tracking this info_hash
         peers = self._tracker.get(lookup_obj.info_hash)
         callback_f = lookup_obj.callback_f
         if peers:
             self._add_cache_peers(lookup_obj.info_hash, peers)
             if callback_f and callable(callback_f):
                 callback_f(lookup_obj.lookup_id, peers, None)
         # do the lookup
         queries_to_send = lookup_obj.start(bootstrap_rnodes)
     else:
         next_lookup_attempt_ts = time.time() + .2
         self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                            next_lookup_attempt_ts)
     return queries_to_send
Exemplo n.º 3
0
    def test_capture(self):
        self.reactor.start_capture()
        ts1 = time.time()
        time.sleep(tc.TASK_INTERVAL/2)
        # out > DATAGRAM1 (main_loop)
        self.reactor.run_one_step()
        ts2 = time.time()
        incoming_datagram = Datagram(DATA1, tc.SERVER_ADDR)
        self.reactor.s.put_datagram_received(incoming_datagram)
        time.sleep(tc.TASK_INTERVAL/2)
        self.reactor.run_one_step() 
        # in < incoming_datagram (socket)
        # out > DATAGRAM3 (on_datagram_received)
        captured_msgs = self.reactor.stop_and_get_capture()

        eq_(len(captured_msgs), 3)
        for msg in  captured_msgs:
            print msg
        assert ts1 < captured_msgs[0][0] < ts2
        eq_(captured_msgs[0][1], tc.SERVER_ADDR)
        eq_(captured_msgs[0][2], True) #outgoing
        eq_(captured_msgs[0][3], DATA1)
        assert captured_msgs[1][0] > ts2
        eq_(captured_msgs[1][1], DATAGRAM1.addr)
        eq_(captured_msgs[1][2], False) #incoming
        eq_(captured_msgs[1][3], DATAGRAM1.data)
        assert captured_msgs[2][0] > captured_msgs[1][0]
        eq_(captured_msgs[2][1], DATAGRAM3.addr)
        eq_(captured_msgs[2][2], True) #outgoing
        eq_(captured_msgs[2][3], DATAGRAM3.data)
Exemplo n.º 4
0
    def main_loop(self):
        """
        Perform maintenance operations. The main operation is routing table
        maintenance where staled nodes are added/probed/replaced/removed as
        needed. The routing management module specifies the implementation
        details.  This includes keeping track of queries that have not been
        responded for a long time (timeout) with the help of
        querier.Querier. The routing manager and the lookup manager will be
        informed of those timeouts.

        This method is designed to be used as minitwisted's heartbeat handler.

        """

        queries_to_send = []
        current_ts = time.time()
        #TODO: I think this if should be removed
        # At most, 1 second between calls to main_loop after the first call
        if current_ts >= self._next_main_loop_call_ts:
            self._next_main_loop_call_ts = current_ts + 1
        else:
            # It's too early
            return self._next_main_loop_call_ts, []
        # Retry failed lookup (if any)
        queries_to_send.extend(self._try_do_lookup())
        
        # Take care of timeouts
        if current_ts >= self._next_timeout_ts:
            (self._next_timeout_ts,
             timeout_queries) = self._querier.get_timeout_queries()
            for query in timeout_queries:
                queries_to_send.extend(self._on_timeout(query))

        # Routing table maintenance
        if time.time() >= self._next_maintenance_ts:
            (maintenance_delay,
             queries,
             maintenance_lookup) = self._routing_m.do_maintenance()
            self._next_maintenance_ts = current_ts + maintenance_delay
            self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                               self._next_maintenance_ts)
            queries_to_send.extend(queries)
            if maintenance_lookup:
                target, rnodes = maintenance_lookup
                lookup_obj = self._lookup_m.maintenance_lookup(target)
                queries_to_send.extend(lookup_obj.start(rnodes))
            
        # Auto-save routing table
        if current_ts >= self._next_save_state_ts:
            state.save(self._my_id,
                       self._routing_m.get_main_rnodes(),
                       self.state_filename)
            self._next_save_state_ts = current_ts + SAVE_STATE_DELAY
            self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                               self._next_maintenance_ts,
                                               self._next_timeout_ts,
                                               self._next_save_state_ts)
        # Return control to reactor
        datagrams_to_send = self._register_queries(queries_to_send)
        return self._next_main_loop_call_ts, datagrams_to_send
Exemplo n.º 5
0
 def test_timeouts_in_a_row(self):
     rnode = RoutingNode(tc.NODES[0], 1)
     eq_(rnode.timeouts_in_a_row(), 0)
     eq_(rnode.timeouts_in_a_row(True), 0)
     eq_(rnode.timeouts_in_a_row(False), 0)
     # got query
     rnode.add_event(time.time(), node.QUERY)
     eq_(rnode.timeouts_in_a_row(), 0)
     eq_(rnode.timeouts_in_a_row(True), 0)
     eq_(rnode.timeouts_in_a_row(False), 0)
     # got timeout
     rnode.add_event(time.time(), node.TIMEOUT)
     eq_(rnode.timeouts_in_a_row(), 1)
     eq_(rnode.timeouts_in_a_row(True), 1)
     eq_(rnode.timeouts_in_a_row(False), 1)
     # got query
     rnode.add_event(time.time(), node.QUERY)
     eq_(rnode.timeouts_in_a_row(), 0)
     eq_(rnode.timeouts_in_a_row(True), 0)
     eq_(rnode.timeouts_in_a_row(False), 1)
     # got timeout
     rnode.add_event(time.time(), node.TIMEOUT)
     eq_(rnode.timeouts_in_a_row(), 1)
     eq_(rnode.timeouts_in_a_row(True), 1)
     eq_(rnode.timeouts_in_a_row(False), 2)
     # got response
     rnode.add_event(time.time(), node.RESPONSE)
     eq_(rnode.timeouts_in_a_row(), 0)
     eq_(rnode.timeouts_in_a_row(True), 0)
     eq_(rnode.timeouts_in_a_row(False), 0)
Exemplo n.º 6
0
 def _try_do_lookup(self):
     queries_to_send = []
     current_time = time.time()
     while self._pending_lookups:
         pending_lookup = self._pending_lookups[0]
         # Drop all pending lookups older than PENDING_LOOKUP_TIMEOUT
         if time.time() > pending_lookup.start_ts + PENDING_LOOKUP_TIMEOUT:
             del self._pending_lookups[0]
         else:
             break
     if self._pending_lookups:
         lookup_obj = self._pending_lookups[0]
     else:
         return queries_to_send
     distance = lookup_obj.info_hash.distance(self._my_id)
     bootstrap_rnodes = self._routing_m.get_closest_rnodes(distance.log,
                                                           0,
                                                           True)
     #TODO: get the full bucket
     if bootstrap_rnodes:
         del self._pending_lookups[0]
         # look if I'm tracking this info_hash
         peers = self._tracker.get(lookup_obj.info_hash)
         callback_f = lookup_obj.callback_f
         if peers:
             self._add_cache_peers(lookup_obj.info_hash, peers)
             if callback_f and callable(callback_f):
                 callback_f(lookup_obj.lookup_id, peers, None)
         # do the lookup
         queries_to_send = lookup_obj.start(bootstrap_rnodes)
     else:
         next_lookup_attempt_ts = time.time() + .2
         self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                            next_lookup_attempt_ts)
     return queries_to_send
Exemplo n.º 7
0
    def main_loop(self):
        """
        Perform maintenance operations. The main operation is routing table
        maintenance where staled nodes are added/probed/replaced/removed as
        needed. The routing management module specifies the implementation
        details.  This includes keeping track of queries that have not been
        responded for a long time (timeout) with the help of
        querier.Querier. The routing manager and the lookup manager will be
        informed of those timeouts.

        This method is designed to be used as minitwisted's heartbeat handler.

        """

        queries_to_send = []
        current_ts = time.time()
        #TODO: I think this if should be removed
        # At most, 1 second between calls to main_loop after the first call
        if current_ts >= self._next_main_loop_call_ts:
            self._next_main_loop_call_ts = current_ts + 1
        else:
            # It's too early
            return self._next_main_loop_call_ts, []
        # Retry failed lookup (if any)
        queries_to_send.extend(self._try_do_lookup())
        
        # Take care of timeouts
        if current_ts >= self._next_timeout_ts:
            (self._next_timeout_ts,
             timeout_queries) = self._querier.get_timeout_queries()
            for query in timeout_queries:
                queries_to_send.extend(self._on_timeout(query))

        # Routing table maintenance
        if time.time() >= self._next_maintenance_ts:
            (maintenance_delay,
             queries,
             maintenance_lookup) = self._routing_m.do_maintenance()
            self._next_maintenance_ts = current_ts + maintenance_delay
            self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                               self._next_maintenance_ts)
            queries_to_send.extend(queries)
            if maintenance_lookup:
                target, rnodes = maintenance_lookup
                lookup_obj = self._lookup_m.maintenance_lookup(target)
                queries_to_send.extend(lookup_obj.start(rnodes))
            
        # Auto-save routing table
        if current_ts >= self._next_save_state_ts:
            state.save(self._my_id,
                       self._routing_m.get_main_rnodes(),
                       self.state_filename)
            self._next_save_state_ts = current_ts + SAVE_STATE_DELAY
            self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                               self._next_maintenance_ts,
                                               self._next_timeout_ts,
                                               self._next_save_state_ts)
        # Return control to reactor
        datagrams_to_send = self._register_queries(queries_to_send)
        return self._next_main_loop_call_ts, datagrams_to_send
Exemplo n.º 8
0
 def _add_cache_peers(self, info_hash, peers):
     oldest_valid_ts = time.time() - CACHE_VALID_PERIOD
     while self._cached_lookups and self._cached_lookups[0][0] < oldest_valid_ts:
         # clean up old entries
         del self._cached_lookups[0]
     if self._cached_lookups and self._cached_lookups[-1][1] == info_hash:
         self._cached_lookups[-1][2].extend(peers)
     else:
         self._cached_lookups.append((time.time(), info_hash, peers))
Exemplo n.º 9
0
 def _add_cache_peers(self, info_hash, peers):
     oldest_valid_ts = time.time() - CACHE_VALID_PERIOD
     while self._cached_lookups and self._cached_lookups[0][0] < oldest_valid_ts:
         # clean up old entries
         del self._cached_lookups[0]
     if self._cached_lookups and self._cached_lookups[-1][1] == info_hash:
         self._cached_lookups[-1][2].extend(peers)
     else:
         self._cached_lookups.append((time.time(), info_hash, peers))
Exemplo n.º 10
0
    def _update_rnode_on_timeout(self, rnode):
        """Register a timeout for this rnode.

        You should call this method when getting a timeout for this node.

        """
        rnode.last_action_ts = time.time()
        rnode.msgs_since_timeout = 0
        rnode.num_timeouts += 1
        rnode.add_event(time.time(), node.TIMEOUT)
Exemplo n.º 11
0
 def mark(self, msg=''):
     if self.disabled: 
         return
     t1 = ptime.time()
     msg2 = "  "+self.msg+" "+msg+" "+"%gms" % ((t1-self.t1)*1000)
     if self.delayed:
         self.msgs.append(msg2)
     else:
         print msg2
     self.t1 = ptime.time()  ## don't measure time it took to print
Exemplo n.º 12
0
    def _update_rnode_on_timeout(self, rnode):
        """Register a timeout for this rnode.

        You should call this method when getting a timeout for this node.

        """
        rnode.last_action_ts = time.time()
        rnode.msgs_since_timeout = 0
        rnode.num_timeouts += 1
        rnode.add_event(time.time(), node.TIMEOUT)
Exemplo n.º 13
0
 def mark(self, msg=''):
     if self.disabled:
         return
     t1 = ptime.time()
     msg2 = "  " + self.msg + " " + msg + " " + "%gms" % (
         (t1 - self.t1) * 1000)
     if self.delayed:
         self.msgs.append(msg2)
     else:
         print msg2
     self.t1 = ptime.time()  ## don't measure time it took to print
Exemplo n.º 14
0
    def _update_rnode_on_query_received(self, rnode):
        """Register a query from node.

        You should call this method when receiving a query from this node.

        """
        current_time = time.time()
        rnode.last_action_ts = time.time()
        rnode.msgs_since_timeout += 1
        rnode.num_queries += 1
        rnode.add_event(current_time, node.QUERY)
        rnode.last_seen = current_time
Exemplo n.º 15
0
    def _update_rnode_on_query_received(self, rnode):
        """Register a query from node.

        You should call this method when receiving a query from this node.

        """
        current_time = time.time()
        rnode.last_action_ts = time.time()
        rnode.msgs_since_timeout += 1
        rnode.num_queries += 1
        rnode.add_event(current_time, node.QUERY)
        rnode.last_seen = current_time
Exemplo n.º 16
0
    def run_one_step(self):
        """Main loop activated by calling self.start()"""

        # Deal with call_asap requests
        # TODO: retry for 5 seconds if no msgs_to_send (inside controller?)
        call_asap_tuple = None
        self._lock.acquire()
        try:
            if self._call_asap_queue:
                call_asap_tuple = self._call_asap_queue.pop(0)
        finally:
            self._lock.release()
        if call_asap_tuple:
            callback_f, args, kwds = call_asap_tuple
            datagrams_to_send = callback_f(*args, **kwds)
            for datagram in datagrams_to_send:
                self._sendto(datagram)

        # Call main_loop
        if time.time() >= self._next_main_loop_call_ts:
            (self._next_main_loop_call_ts,
             datagrams_to_send) = self._main_loop_f()
            for datagram in datagrams_to_send:
                self._sendto(datagram)

        # Get data from the network
        try:
            data, addr = self.s.recvfrom(BUFFER_SIZE)
        except (socket.timeout):
            pass  # timeout
        except (socket.error) as e:
            logger.warning(
                'Got socket.error when receiving data:\n%s' % e)
        else:
            self._add_capture((time.time(), addr, False, data))
            ip_is_blocked = self.floodbarrier_active and \
                self.floodbarrier.ip_blocked(addr[0])
            if ip_is_blocked:
                import sys
#                print >>sys.stderr, '>>>>>>>>>>>>>>>>>>', addr
#                print >>sys.stderr, '>>>>>>>>>>>>>>>>>>', `addr`
                logger.warning("blocked")
#                print >>sys.stderr, '>>>>>>>>>>>>>>>>>> DONE'
                return
            datagram_received = Datagram(data, addr)
            (self._next_main_loop_call_ts,
             datagrams_to_send) = self._on_datagram_received_f(
                 datagram_received)
            for datagram in datagrams_to_send:
                self._sendto(datagram)
Exemplo n.º 17
0
 def _find_related_query(self, msg):
     addr = msg.src_addr
     try:
         addr_query_list = self._pending[addr]
     except (KeyError):
         logger.warning('No pending queries for %s', addr)
         return  # Ignore response
     for related_query in addr_query_list:
         if related_query.match_response(msg):
             logger.debug(
                 'response node: %s, related query: (%s), delay %f s. %r' %
                 ( ` addr `, ` related_query.query `, time.time() -
                   related_query.sending_ts, related_query.lookup_obj))
             # Do not delete this query (the timeout will delete it)
             return related_query
Exemplo n.º 18
0
    def run_one_step(self):
        """Main loop activated by calling self.start()"""

        # Deal with call_asap requests
        # TODO: retry for 5 seconds if no msgs_to_send (inside controller?)
        call_asap_tuple = None
        self._lock.acquire()
        try:
            if self._call_asap_queue:
                call_asap_tuple = self._call_asap_queue.pop(0)
        finally:
            self._lock.release()
        if call_asap_tuple:
            callback_f, args, kwds = call_asap_tuple
            datagrams_to_send = callback_f(*args, **kwds)
            for datagram in datagrams_to_send:
                self._sendto(datagram)

        # Call main_loop
        if time.time() >= self._next_main_loop_call_ts:
            (self._next_main_loop_call_ts,
             datagrams_to_send) = self._main_loop_f()
            for datagram in datagrams_to_send:
                self._sendto(datagram)

        # Get data from the network
        try:
            data, addr = self.s.recvfrom(BUFFER_SIZE)
        except (socket.timeout):
            pass  # timeout
        except (socket.error) as e:
            logger.warning('Got socket.error when receiving data:\n%s' % e)
        else:
            self._add_capture((time.time(), addr, False, data))
            ip_is_blocked = self.floodbarrier_active and \
                self.floodbarrier.ip_blocked(addr[0])
            if ip_is_blocked:
                import sys
                #                print >>sys.stderr, '>>>>>>>>>>>>>>>>>>', addr
                #                print >>sys.stderr, '>>>>>>>>>>>>>>>>>>', `addr`
                logger.warning("blocked")
                #                print >>sys.stderr, '>>>>>>>>>>>>>>>>>> DONE'
                return
            datagram_received = Datagram(data, addr)
            (self._next_main_loop_call_ts, datagrams_to_send
             ) = self._on_datagram_received_f(datagram_received)
            for datagram in datagrams_to_send:
                self._sendto(datagram)
Exemplo n.º 19
0
    def __init__(self, dht_addr, state_filename,
                 routing_m_mod, lookup_m_mod,
                 experimental_m_mod,
                 private_dht_name):
        #TODO: don't do this evil stuff!!!
        message.private_dht_name = private_dht_name

        if size_estimation:
            self._size_estimation_file = open('size_estimation.dat', 'w')
        
        
        self.state_filename = state_filename
        saved_id, saved_bootstrap_nodes = state.load(self.state_filename)
        if saved_id:
            self._my_id = saved_id
        else:
            self._my_id = identifier.RandomId()
        self._my_node = Node(dht_addr, self._my_id)
        self._tracker = tracker.Tracker()
        self._token_m = token_manager.TokenManager()

        self._querier = Querier()
        self._routing_m = routing_m_mod.RoutingManager(self._my_node, 
                                                       saved_bootstrap_nodes)
        self._lookup_m = lookup_m_mod.LookupManager(self._my_id)
        self._experimental_m = experimental_m_mod.ExperimentalManager(self._my_node.id) 
                  
        current_ts = time.time()
        self._next_save_state_ts = current_ts + SAVE_STATE_DELAY
        self._next_maintenance_ts = current_ts
        self._next_timeout_ts = current_ts
        self._next_main_loop_call_ts = current_ts
        self._pending_lookups = []
Exemplo n.º 20
0
 def _try_do_lookup(self):
     queries_to_send = []
     if self._pending_lookups:
         lookup_obj = self._pending_lookups[0]
     else:
         return queries_to_send
     log_distance = lookup_obj.info_hash.log_distance(self._my_id)
     bootstrap_rnodes = self._routing_m.get_closest_rnodes(log_distance,
                                                           0,
                                                           True)
     #TODO: get the full bucket
     if bootstrap_rnodes:
         del self._pending_lookups[0]
         # look if I'm tracking this info_hash
         peers = self._tracker.get(lookup_obj.info_hash)
         callback_f = lookup_obj.callback_f
         if peers and callback_f and callable(callback_f):
             callback_f(lookup_obj.lookup_id, peers)
         # do the lookup
         queries_to_send = lookup_obj.start(bootstrap_rnodes)
     else:
         next_lookup_attempt_ts = time.time() + .2
         self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                            next_lookup_attempt_ts)
     return queries_to_send
Exemplo n.º 21
0
    def _clean_peer_caches(self):
        oldest_valid_ts = time.time() - CACHE_VALID_PERIOD

        for key, values in self._cached_lookups.items():
            ts, _ = values
            if ts < oldest_valid_ts:
                del self._cached_lookups[key]
Exemplo n.º 22
0
    def __init__(self, version_label,
                 my_node, conf_path,
                 routing_m_mod, lookup_m_mod,
                 experimental_m_mod,
                 private_dht_name,
                 bootstrap_mode):
        self.bootstrapper = bootstrap.OverlayBootstrapper(conf_path)
        my_addr = my_node.addr
        self._my_id = my_node.id # id indicated by user 
        if not self._my_id:
            self._my_id = self._my_id = identifier.RandomId() # random id
        self._my_node = Node(my_addr, self._my_id, version=version_label)
        self.msg_f = message.MsgFactory(version_label, self._my_id,
                                        private_dht_name)
        self._querier = Querier()
        self._routing_m = routing_m_mod.RoutingManager(
            self._my_node, self.msg_f, self.bootstrapper)

        self._responder = responder.Responder(self._my_id, self._routing_m,
                                              self.msg_f, bootstrap_mode)
        self._tracker = self._responder._tracker
        
        self._lookup_m = lookup_m_mod.LookupManager(self._my_id, self.msg_f,
                                                    self.bootstrapper)
        self._experimental_m = experimental_m_mod.ExperimentalManager(
            self._my_node.id, self.msg_f) 
                  
        current_ts = time.time()
        self._next_maintenance_ts = current_ts
        self._next_timeout_ts = current_ts
        self._next_main_loop_call_ts = current_ts
        self._cached_lookups = []
Exemplo n.º 23
0
 def _clean_peer_caches(self):
     oldest_valid_ts = time.time() - CACHE_VALID_PERIOD
     
     for key, values in self._cached_lookups.items():
         ts, _ = values
         if ts < oldest_valid_ts:
             del self._cached_lookups[key]
Exemplo n.º 24
0
 def __init__(self, checking_period = CHECKING_PERIOD, max_packets_per_period = MAX_PACKETS_PER_PERIOD, blocking_period = BLOCKING_PERIOD):
     self.checking_period = checking_period
     self.max_packets_per_period = max_packets_per_period
     self.blocking_period = blocking_period
     self.last_half_period_time = time.time()
     self.ip_registers = [HalfPeriodRegister(), HalfPeriodRegister()]
     self.blocked_ips = {}
Exemplo n.º 25
0
 def put_cached_lookup(self, cached_lookup):
     # first remove expired chached lookups
     for i in range(len(self.cached_lookups), 0, -1):
         if time.time() > (self.cached_lookups[i-1].start_ts +
                           self.validity_time):
             del self.cached_lookups[i-1]
     self.cached_lookups.append(cached_lookup)
Exemplo n.º 26
0
    def __init__(self, version_label, my_node, conf_path, routing_m_mod,
                 lookup_m_mod, experimental_m_mod, private_dht_name,
                 bootstrap_mode):
        self.bootstrapper = bootstrap.OverlayBootstrapper(conf_path)
        my_addr = my_node.addr
        self._my_id = my_node.id  # id indicated by user
        if not self._my_id:
            self._my_id = self._my_id = identifier.RandomId()  # random id
        self._my_node = Node(my_addr, self._my_id, version=version_label)
        self.msg_f = message.MsgFactory(version_label, self._my_id,
                                        private_dht_name)
        self._querier = Querier()
        self._routing_m = routing_m_mod.RoutingManager(self._my_node,
                                                       self.msg_f,
                                                       self.bootstrapper)

        self._responder = responder.Responder(self._my_id, self._routing_m,
                                              self.msg_f, bootstrap_mode)
        self._tracker = self._responder._tracker

        self._lookup_m = lookup_m_mod.LookupManager(self._my_id, self.msg_f,
                                                    self.bootstrapper)
        self._experimental_m = experimental_m_mod.ExperimentalManager(
            self._my_node.id, self.msg_f)

        current_ts = time.time()
        self._next_maintenance_ts = current_ts
        self._next_timeout_ts = current_ts
        self._next_main_loop_call_ts = current_ts
        self._cached_lookups = []
Exemplo n.º 27
0
    def print_table(self):
        header_format = '%6s %40s %10s %15s %5s %4s %8s'
        data_format =   '%6d %40r %10s %15s %5d %4d %9.2f'
        header = header_format % (
            'bucket', 'id', 'version', 'ip', 'port', 'rtt', 'uptime(h)')
        #TODO: format uptime as hh:mm
        thick_line = '=' * 95
        thin_line = '-' * 95
        print thick_line
        print data_format % (-1, self.my_node.id,
                             version_repr(self.my_node.version),
                             self.my_node.addr[0], self.my_node.addr[1],
                             0, 0)
        print thin_line
        print header
        print thin_line

        current_time = time.time()
        for rnode in self.get_main_rnodes():
            if rnode.rtt == 99:
                rtt = rnode.real_rtt
            else:
                rtt = rnode.rtt
            print data_format % (
                self.my_node.id.distance(rnode.id).log,
                rnode.id, version_repr(rnode.version),
                rnode.addr[0], rnode.addr[1],
                rtt * 1000,
                (current_time - rnode.creation_ts)/3600)
        print thin_line
        print header
        print thick_line
Exemplo n.º 28
0
    def test_complete_coverage(self):

        eq_(self.rt.get_closest_rnodes(76, 8, False), [tc.CLIENT_NODE])
        log_distance = self.my_node.log_distance(tc.SERVER_NODE)
        str(self.rt.get_sbucket(log_distance).main)
        repr(self.rt)

        ok_(Bucket(1) != Bucket(2))

        buckets = [Bucket(2), Bucket(2)]
        buckets[0].add(tc.CLIENT_NODE.get_rnode(1))
        buckets[1].add(tc.CLIENT_NODE.get_rnode(1))
        buckets[0].add(tc.NODES[0].get_rnode(1))
        buckets[1].add(tc.NODES[1].get_rnode(1))
        ok_(buckets[0] != buckets[1])

        eq_(buckets[0].get_freshest_rnode(), tc.NODES[0])
        stalest_rnode = buckets[0].get_stalest_rnode()
        eq_(stalest_rnode, tc.CLIENT_NODE)
        # Dangerous!!!
        stalest_rnode.last_seen = time.time()
        eq_(buckets[0].get_freshest_rnode(), tc.CLIENT_NODE)

        eq_(self.rt.find_next_bucket_with_room_index(tc.CLIENT_NODE), 0)
        eq_(self.rt.find_next_bucket_with_room_index(log_distance=6), 7)
        eq_(self.rt.find_next_bucket_with_room_index(log_distance=106), 107)

        self.rt.print_stats()
 def ip_blocked(self, ip):
     current_time = time.time()
     if current_time > self.last_half_period_time + self.checking_period / 2:
         self.half_period_timeout = current_time
         self.ip_registers = [self.ip_registers[1], HalfPeriodRegister()]
         if current_time > self.last_half_period_time + self.checking_period:
             self.ip_registers = [
                 self.ip_registers[1],
                 HalfPeriodRegister()
             ]
     self.ip_registers[1].register_ip(ip)
     num_packets = self.ip_registers[0].get_num_packets(
         ip) + self.ip_registers[1].get_num_packets(ip)
     if num_packets > self.max_packets_per_period:
         logger.debug('Got %d packets: blocking %r...' % (num_packets, ip))
         self.blocked_ips[ip] = current_time + self.blocking_period
         return True
     if ip in self.blocked_ips:
         logger.debug('Ip %r (%d) currently blocked' % (ip, num_packets))
         if current_time > self.blocked_ips[ip]:
             logger.debug('Block for %r (%d) has expired: unblocking...' %
                          (ip, num_packets))
             del self.blocked_ips[ip]
             return False
         else:
             return True
     else:
         return False
Exemplo n.º 30
0
    def test_complete_coverage(self):

        eq_(self.rt.get_closest_rnodes(76, 8, False), [tc.CLIENT_NODE])
        log_distance = self.my_node.log_distance(tc.SERVER_NODE)
        str(self.rt.get_sbucket(log_distance).main)
        repr(self.rt)
        
        ok_(Bucket(1) != Bucket(2))

        buckets = [Bucket(2), Bucket(2)]
        buckets[0].add(tc.CLIENT_NODE.get_rnode(1))
        buckets[1].add(tc.CLIENT_NODE.get_rnode(1))
        buckets[0].add(tc.NODES[0].get_rnode(1))
        buckets[1].add(tc.NODES[1].get_rnode(1))
        ok_(buckets[0] != buckets[1])

        eq_(buckets[0].get_freshest_rnode(), tc.NODES[0])
        stalest_rnode = buckets[0].get_stalest_rnode()
        eq_(stalest_rnode, tc.CLIENT_NODE)
        # Dangerous!!!
        stalest_rnode.last_seen = time.time()
        eq_(buckets[0].get_freshest_rnode(), tc.CLIENT_NODE)
            
        eq_(self.rt.find_next_bucket_with_room_index(tc.CLIENT_NODE), 0)
        eq_(self.rt.find_next_bucket_with_room_index(log_distance=6), 7)
        eq_(self.rt.find_next_bucket_with_room_index(log_distance=106), 107)

        self.rt.print_stats()
Exemplo n.º 31
0
 def _try_do_lookup(self):
     queries_to_send = []
     if self._pending_lookups:
         lookup_obj = self._pending_lookups[0]
     else:
         return queries_to_send
     log_distance = lookup_obj.info_hash.log_distance(self._my_id)
     bootstrap_rnodes = self._routing_m.get_closest_rnodes(log_distance,
                                                           0,
                                                           True)
     #TODO: get the full bucket
     if bootstrap_rnodes:
         del self._pending_lookups[0]
         # look if I'm tracking this info_hash
         peers = self._tracker.get(lookup_obj.info_hash)
         callback_f = lookup_obj.callback_f
         if peers and callback_f and callable(callback_f):
             callback_f(lookup_obj.lookup_id, peers, None)
         # do the lookup
         queries_to_send = lookup_obj.start(bootstrap_rnodes)
     else:
         next_lookup_attempt_ts = time.time() + .2
         self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                            next_lookup_attempt_ts)
     return queries_to_send
Exemplo n.º 32
0
    def run_one_step(self):
        """Main loop activated by calling self.start()"""

        # Deal with call_asap requests
        #TODO: retry for 5 seconds if no msgs_to_send (inside controller?)
        call_asap_tuple = None
        self._lock.acquire()
        try:
            if self._call_asap_queue:
                call_asap_tuple = self._call_asap_queue.pop(0)
        finally:
            self._lock.release()
        if call_asap_tuple:
            callback_f, args, kwds = call_asap_tuple
            datagrams_to_send = callback_f(*args, **kwds)
            for datagram in datagrams_to_send:
                self._sendto(datagram)

        # Call main_loop
        if time.time() >= self._next_main_loop_call_ts:
            (self._next_main_loop_call_ts,
             datagrams_to_send) = self._main_loop_f()
            for datagram in datagrams_to_send:
                self._sendto(datagram)

        # Get data from the network
        try:
            data, addr = self.s.recvfrom(BUFFER_SIZE)
        except (socket.timeout):
            pass #timeout
        except (socket.error), e:
            logger.warning(
                'Got socket.error when receiving data:\n%s' % e)
Exemplo n.º 33
0
 def put_cached_lookup(self, cached_lookup):
     # first remove expired chached lookups
     for i in range(len(self.cached_lookups), 0, -1):
         if time.time() > (self.cached_lookups[i - 1].start_ts +
                           self.validity_time):
             del self.cached_lookups[i - 1]
     self.cached_lookups.append(cached_lookup)
Exemplo n.º 34
0
 def get_timeout_queries(self):
     """
     Return a tupla with two items: (1) timestamp for next timeout, (2)
     list of message.OutgoingQueryBase objects of those queries that have
     timed-out.
     
     """
     current_ts = time.time()
     timeout_queries = []
     while self._timeouts:
         timeout_ts, query = self._timeouts[0]
         if current_ts < timeout_ts:
             next_timeout_ts = timeout_ts
             break
         self._timeouts = self._timeouts[1:]
         addr_query_list = self._pending[query.dst_node.addr]
         popped_query = addr_query_list.pop(0)
         assert query == popped_query
         if not addr_query_list:
             # The list is empty. Remove the whole list.
             del self._pending[query.dst_node.addr]
         if not query.got_response:
             timeout_queries.append(query)
     if not self._timeouts:
         next_timeout_ts = current_ts + TIMEOUT_DELAY
     return next_timeout_ts, timeout_queries
Exemplo n.º 35
0
    def get_timeout_queries(self):
        """
        Return a tupla with two items: (1) timestamp for next timeout, (2)
        list of message.OutgoingQueryBase objects of those queries that have
        timed-out.

        """
        current_ts = time.time()
        timeout_queries = []
        while self._timeouts:
            timeout_ts, query = self._timeouts[0]
            if current_ts < timeout_ts:
                next_timeout_ts = timeout_ts
                break
            self._timeouts = self._timeouts[1:]
            addr_query_list = self._pending[query.dst_node.addr]
            popped_query = addr_query_list.pop(0)
            assert query == popped_query
            if not addr_query_list:
                # The list is empty. Remove the whole list.
                del self._pending[query.dst_node.addr]
            if not query.got_response:
                timeout_queries.append(query)
        if not self._timeouts:
            next_timeout_ts = current_ts + TIMEOUT_DELAY
        return next_timeout_ts, timeout_queries
Exemplo n.º 36
0
    def __init__(self, dht_addr, state_path,
                 routing_m_mod, lookup_m_mod,
                 private_dht_name):
        #TODO: don't do this evil stuff!!!
        message.private_dht_name = private_dht_name
        
        self.state_filename = os.path.join(state_path, STATE_FILENAME)
        self.load_state()
        if not self._my_id:
            self._my_id = identifier.RandomId()
        self._my_node = Node(dht_addr, self._my_id)
        self._tracker = tracker.Tracker()
        self._token_m = token_manager.TokenManager()

        self._reactor = ThreadedReactor()
        self._reactor.listen_udp(self._my_node.addr[1],
                                 self._on_datagram_received)
        #self._rpc_m = RPCManager(self._reactor)
        self._querier = Querier(self._my_id)
        bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES
        del self.loaded_nodes
        self._routing_m = routing_m_mod.RoutingManager(self._my_node, 
                                                       bootstrap_nodes)
#        self._responder = Responder(self._my_id, self._routing_m,
#                                    self._tracker, self._token_m)

        self._lookup_m = lookup_m_mod.LookupManager(self._my_id)
        current_time = time.time()
        self._next_maintenance_ts = current_time
        self._next_save_state_ts = current_time + SAVE_STATE_DELAY
        
        self._running = False
Exemplo n.º 37
0
    def _main_loop(self):
        current_time = time.time()
        # Routing table
        if current_time > self._next_maintenance_ts:
            (maintenance_delay,
             queries_to_send,
             maintenance_lookup_target) = self._routing_m.do_maintenance()
            self._send_queries(queries_to_send)
            if maintenance_lookup_target:
                log_distance = maintenance_lookup_target.log_distance(
                    self._my_id)
                bootstrap_nodes = self._routing_m.get_closest_rnodes(
                    log_distance, None, True)
                lookup_obj = self._lookup_m.maintenance_lookup(
                    maintenance_lookup_target)
                lookup_queries_to_send = lookup_obj.start(bootstrap_nodes)
                self._send_queries(lookup_queries_to_send)
            self._next_maintenance_ts = (current_time
                                         + maintenance_delay)
        # Auto-save routing table
        if current_time > self._next_save_state_ts:
            self.save_state()
            self._next_save_state_ts = current_time + SAVE_STATE_DELAY

        # Schedule next call
        delay = (min(self._next_maintenance_ts, self._next_save_state_ts)
                 - current_time)
        self._reactor.call_later(delay, self._main_loop)
Exemplo n.º 38
0
    def __init__(self, dht_addr, state_path, routing_m_mod, lookup_m_mod):
        self.state_filename = os.path.join(state_path, STATE_FILENAME)
        self.load_state()
        if not self._my_id:
            self._my_id = identifier.RandomId()
        self._my_node = Node(dht_addr, self._my_id)
        self._tracker = tracker.Tracker()
        self._token_m = token_manager.TokenManager()

        self._reactor = ThreadedReactor()
        self._reactor.listen_udp(self._my_node.addr[1],
                                 self._on_datagram_received)
        #self._rpc_m = RPCManager(self._reactor)
        self._querier = Querier(self._my_id)
        bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES
        del self.loaded_nodes
        self._routing_m = routing_m_mod.RoutingManager(self._my_node, 
                                                       bootstrap_nodes)
#        self._responder = Responder(self._my_id, self._routing_m,
#                                    self._tracker, self._token_m)

        self._lookup_m = lookup_m_mod.LookupManager(self._my_id)
        current_time = time.time()
        self._next_maintenance_ts = current_time
        self._next_save_state_ts = current_time + SAVE_STATE_DELAY
        
        self._running = False
Exemplo n.º 39
0
    def run_one_step(self):
        """Main loop activated by calling self.start()"""

        # Deal with call_asap requests
        #TODO: retry for 5 seconds if no msgs_to_send (inside controller?)
        call_asap_tuple = None
        self._lock.acquire()
        try:
            if self._call_asap_queue:
                call_asap_tuple = self._call_asap_queue.pop(0)
        finally:
            self._lock.release()
        if call_asap_tuple:
            callback_f, args, kwds = call_asap_tuple
            datagrams_to_send = callback_f(*args, **kwds)
            for datagram in datagrams_to_send:
                self._sendto(datagram)

        # Call main_loop
        if time.time() >= self._next_main_loop_call_ts:
            (self._next_main_loop_call_ts,
             datagrams_to_send) = self._main_loop_f()
            for datagram in datagrams_to_send:
                self._sendto(datagram)

        # Get data from the network
        try:
            data, addr = self.s.recvfrom(BUFFER_SIZE)
        except (socket.timeout):
            pass  #timeout
        except (socket.error), e:
            logger.warning('Got socket.error when receiving data:\n%s' % e)
Exemplo n.º 40
0
    def _main_loop(self):
        current_time = time.time()
        # Routing table
        if current_time > self._next_maintenance_ts:
            (maintenance_delay,
             queries_to_send,
             maintenance_lookup_target) = self._routing_m.do_maintenance()
            self._send_queries(queries_to_send)
            if maintenance_lookup_target:
                log_distance = maintenance_lookup_target.log_distance(
                    self._my_id)
                bootstrap_nodes = self._routing_m.get_closest_rnodes(
                    log_distance, None, True)
                lookup_obj = self._lookup_m.maintenance_lookup(
                    maintenance_lookup_target)
                lookup_queries_to_send = lookup_obj.start(bootstrap_nodes)
                self._send_queries(lookup_queries_to_send)
            self._next_maintenance_ts = (current_time
                                         + maintenance_delay)
        # Auto-save routing table
        if current_time > self._next_save_state_ts:
            self.save_state()
            self._next_save_state_ts = current_time + SAVE_STATE_DELAY

        # Schedule next call
        delay = (min(self._next_maintenance_ts, self._next_save_state_ts)
                 - current_time)
        self._reactor.call_later(delay, self._main_loop)
Exemplo n.º 41
0
    def _update_rnode_on_response_received(self, rnode, rtt):
        """Register a reply from rnode.

        You should call this method when receiving a response from this rnode.

        """
        rnode.rtt = rtt
        current_time = time.time()
        #rnode._reset_refresh_task()
        if rnode.in_quarantine:
            rnode.in_quarantine = \
                rnode.last_action_ts < current_time - QUARANTINE_PERIOD

        rnode.last_action_ts = current_time
        rnode.num_responses += 1
        rnode.add_event(time.time(), node.RESPONSE)
        rnode.last_seen = current_time
 def register_query(self, query, timeout_task):
     query.tid = self._next_tid()
     logger.debug('sending to node: %r\n%r' % (query.dstnode, query.msg))
     query.timeout_task = timeout_task
     query.query_ts = time.time()
     self.pending.setdefault(query.dstnode.addr, []).append(query)
     bencoded_msg = query.msg.encode(query.tid)
     return bencoded_msg
Exemplo n.º 43
0
 def get_stalest_rnode(self):
     oldest_ts = time.time()
     stalest_rnode = None
     for rnode in self.rnodes:
         if rnode.last_seen < oldest_ts:
             oldest_ts = rnode.last_seen
             stalest_rnode = rnode
     return stalest_rnode
Exemplo n.º 44
0
 def signal(self, *args):
     """Received signal, queue to be forwarded later."""
     if self.block:
         return
     self.waitUntil = time() + self.delay
     self.args = args
     self.timers += 1
     QtCore.QTimer.singleShot((self.delay*1000)+1, self.tryEmit)
Exemplo n.º 45
0
    def _update_rnode_on_response_received(self, rnode, rtt):
        """Register a reply from rnode.

        You should call this method when receiving a response from this rnode.

        """
        rnode.rtt = rtt
        current_time = time.time()
        #rnode._reset_refresh_task()
        if rnode.in_quarantine:
            rnode.in_quarantine = \
                rnode.last_action_ts < current_time - QUARANTINE_PERIOD
                
        rnode.last_action_ts = current_time
        rnode.num_responses += 1
        rnode.add_event(time.time(), node.RESPONSE)
        rnode.last_seen = current_time
Exemplo n.º 46
0
 def _find_related_query(self, msg):
     addr = msg.src_addr
     try:
         addr_query_list = self._pending[addr]
     except (KeyError):
         logger.warning('No pending queries for %s', addr)
         return # Ignore response
     for related_query in addr_query_list:
         if related_query.match_response(msg):
             logger.debug(
                 'response node: %s, related query: (%s), delay %f s. %r' % (
                     `addr`,
                     `related_query.query`,
                     time.time() - related_query.sending_ts,
                     related_query.lookup_obj))
             # Do not delete this query (the timeout will delete it)
             return related_query
Exemplo n.º 47
0
 def get_stalest_rnode(self):
     oldest_ts = time.time()
     stalest_rnode = None
     for rnode in self.rnodes:
         if rnode.last_seen < oldest_ts:
             oldest_ts = rnode.last_seen
             stalest_rnode = rnode
     return stalest_rnode
Exemplo n.º 48
0
 def register_query(self, query, timeout_task):
     query.tid = self._next_tid()
     logger.debug('sending to node: %r\n%r' % (query.dstnode, query.msg))
     query.timeout_task = timeout_task
     query.query_ts = time.time()
     self.pending.setdefault(query.dstnode.addr, []).append(query)
     bencoded_msg = query.msg.encode(query.tid)
     return bencoded_msg
Exemplo n.º 49
0
 def register_query(self, query, timeout_task):
     query.tid = self._next_tid()
     logger.debug("sending to node: %r\n%r" % (query.dstnode, query.msg))
     query.timeout_task = timeout_task
     query.query_ts = time.time()
     # if node is not in the dictionary, it will create an empty list
     self.pending.setdefault(query.dstnode.addr, []).append(query)
     bencoded_msg = query.msg.encode(query.tid)
     return bencoded_msg
Exemplo n.º 50
0
 def _cleanup_key(self, k):
     '''
     Clean up the list as side effect.
     '''
     ts_peers = self._tracker_dict.get(k, None)
     oldest_valid_ts = time.time() - self.validity_period
     while ts_peers and  ts_peers[0][0] < oldest_valid_ts:
         del ts_peers[0]
         self.num_peers -= 1
Exemplo n.º 51
0
 def _cleanup_key(self, k):
     '''
     Clean up the list as side effect.
     '''
     ts_peers = self._tracker_dict.get(k, None)
     oldest_valid_ts = time.time() - self.validity_period
     while ts_peers and ts_peers[0][0] < oldest_valid_ts:
         del ts_peers[0]
         self.num_peers -= 1
Exemplo n.º 52
0
 def register_query(self, query, timeout_task):
     query.tid = self._next_tid()
     logger.debug('sending to node: %r\n%r' % (query.dstnode, query.msg))
     query.timeout_task = timeout_task
     query.query_ts = time.time()
     # if node is not in the dictionary, it will create an empty list
     self.pending.setdefault(query.dstnode.addr, []).append(query)
     bencoded_msg = query.msg.encode(query.tid)
     return bencoded_msg
Exemplo n.º 53
0
    def test_adding_and_removing_node(self):
        # The routing table is initially empty
        eq_(self.controller._routing_m.get_main_rnodes(), [])

        q = self.controller.msg_f.outgoing_ping_query(tc.SERVER_NODE)
        expected_ts, expected_datagrams = self.querier2.register_queries([q])
        # main_loop is called by reactor.start()
        # It returns a maintenance ping
        ts, datagrams = self.controller.main_loop()
        #FIXME: assert_almost_equal(ts, expected_ts)
        eq_(len(datagrams), 1)
        eq_(datagrams[0], expected_datagrams[0])
        time.sleep((ts - time.time()) / 2)
        # SERVER_NODE gets msg and replies before the timeout
        tid = self.servers_msg_f.incoming_msg(
            Datagram(datagrams[0].data, tc.CLIENT_ADDR)).tid
        data = self.servers_msg_f.outgoing_ping_response(
            tc.CLIENT_NODE).stamp(tid)
        eq_(self.controller._routing_m.get_main_rnodes(), [])
        datagram = message.Datagram(data, tc.SERVER_ADDR)
        self.controller.on_datagram_received(datagram)
        # SERVER_NODE is added to the routing table
        eq_(self.controller._routing_m.get_main_rnodes(), [tc.SERVER_NODE])

        time.sleep((ts - time.time()))
        # main_loop is called to trigger timeout
        # It returns a maintenance lookup
        ts, datagrams = self.controller.main_loop()
        q = self.controller.msg_f.outgoing_find_node_query(
            tc.SERVER_NODE, self.my_id, None)
        expected_ts, expected_datagrams = self.querier2.register_queries([q])
        #FIXME: assert_almost_equal(ts, expected_ts)
        #FIXME: eq_(len(datagrams), 1)
        #FIXME: eq_(datagrams[0], expected_datagrams[0])

        time.sleep(ts - time.time())
        # main_loop is called to trigger timeout
        # It triggers a timeout (removing SERVER_NODE from the routing table
        # returns a maintenance ping
        ts, datagrams = self.controller.main_loop()
        #FIXME: eq_(self.controller._routing_m.get_main_rnodes(), [])
        # No reply for this query
        #this call should trigger timeout
        self.controller.main_loop()
Exemplo n.º 54
0
    def test_adding_and_removing_node(self):
        # The routing table is initially empty
        eq_(self.controller._routing_m.get_main_rnodes(), [])

        q = self.controller.msg_f.outgoing_ping_query(tc.SERVER_NODE)
        expected_ts, expected_datagrams = self.querier2.register_queries([q])
        # main_loop is called by reactor.start()
        # It returns a maintenance ping
        ts, datagrams = self.controller.main_loop()
        #FIXME: assert_almost_equal(ts, expected_ts)
        eq_(len(datagrams), 1)
        eq_(datagrams[0], expected_datagrams[0])
        time.sleep((ts - time.time()) / 2)
        # SERVER_NODE gets msg and replies before the timeout
        tid = self.servers_msg_f.incoming_msg(
            Datagram(datagrams[0].data, tc.CLIENT_ADDR)).tid
        data = self.servers_msg_f.outgoing_ping_response(
            tc.CLIENT_NODE).stamp(tid)
        eq_(self.controller._routing_m.get_main_rnodes(), [])
        datagram = message.Datagram(data, tc.SERVER_ADDR)
        self.controller.on_datagram_received(datagram)
        # SERVER_NODE is added to the routing table
        eq_(self.controller._routing_m.get_main_rnodes(), [tc.SERVER_NODE])

        time.sleep((ts - time.time()))
        # main_loop is called to trigger timeout
        # It returns a maintenance lookup
        ts, datagrams = self.controller.main_loop() 
        q = self.controller.msg_f.outgoing_find_node_query(tc.SERVER_NODE,
                                                           self.my_id, None)
        expected_ts, expected_datagrams = self.querier2.register_queries([q])
        #FIXME: assert_almost_equal(ts, expected_ts)
        #FIXME: eq_(len(datagrams), 1)
        #FIXME: eq_(datagrams[0], expected_datagrams[0])
        
        time.sleep(ts - time.time())
        # main_loop is called to trigger timeout
        # It triggers a timeout (removing SERVER_NODE from the routing table
        # returns a maintenance ping
        ts, datagrams = self.controller.main_loop()
        #FIXME: eq_(self.controller._routing_m.get_main_rnodes(), [])
        # No reply for this query
        #this call should trigger timeout
        self.controller.main_loop()
 def __init__(self, delay, callback_fs, *args, **kwds):
     self.delay = delay
     if callable(callback_fs):
         self.callback_fs = [callback_fs]
     else:
         self.callback_fs = callback_fs
     self.args = args
     self.kwds = kwds
     self.call_time = time.time() + self.delay
     self._cancelled = False
Exemplo n.º 56
0
 def flush(self):
     """If there is a signal queued up, send it now."""
     if self.args is None or self.block:
         return False
     #self.emit(self.signal, *self.args)
     self.sigDelayed.emit(self.args)
     self.args = None
     self.timer.stop()
     self.lastFlushTime = time()
     return True
Exemplo n.º 57
0
 def __init__(self, msg_f, my_id, lookup_id,
              info_hash, callback_f,
              bt_port=0):
     self._my_id = my_id
     self.lookup_id = lookup_id
     self.info_hash = info_hash
     self.callback_f = callback_f
     self.bt_port = bt_port
     self._msg_factory = msg_f.outgoing_get_peers_query
     self.start_ts = time.time()