Example #1
0
class _TestSocketErrors:
    def _main_loop(self):
        return time.time() + tc.TASK_INTERVAL * 10000, []

    def _main_loop_send(self):
        self.main_loop_send_called = True
        logger.critical('main loop returns datagram!!!!')
        return time.time() + tc.TASK_INTERVAL * 10000, [DATAGRAM1]

    def _callback(self, *args, **kwargs):
        self.callback_fired = True

    def _on_datagram_received(self, datagram):
        return time.time() + 100, []

    def setup(self):
        self.main_loop_send_called = False
        self.callback_fired = False
        self.r = ThreadedReactor(self._main_loop_send, tc.CLIENT_PORT,
                                 self._on_datagram_received)
        self.r.s = _SocketErrorMock()
        #self.r.listen_udp(tc.CLIENT_PORT, lambda x,y:None)

    def test_sendto(self):
        logger.critical('TESTING: IGNORE CRITICAL MESSAGE')
        assert not self.main_loop_send_called
        #        self.r.start()
        while not self.r.running:
            time.sleep(tc.TASK_INTERVAL)
        while not self.main_loop_send_called:
            time.sleep(tc.TASK_INTERVAL)
        assert self.r.s.error_raised
        assert self.r.running  # reactor doesn't crashed

    def _test_recvfrom(self):
        #self.r.start()
        r2 = ThreadedReactor(self._main_loop,
                             tc.CLIENT_PORT,
                             self._on_datagram_received,
                             task_interval=tc.TASK_INTERVAL)
        r2.s = _SocketErrorMock()
        assert not r2.running
        #        r2.start()
        assert r2.running
        logger.critical('TESTING: IGNORE CRITICAL MESSAGE')
        # self.r will call recvfrom (which raises socket.error)
        while not r2.s.error_raised:
            time.sleep(tc.TASK_INTERVAL)
        assert r2.running  # the error is ignored
        ok_(not self.callback_fired)


#        r2.stop()

    def _test_sendto_too_large_data_string(self):
        logger.critical('TESTING: IGNORE CRITICAL MESSAGE')
        self.r.sendto('z' * 12345, tc.NO_ADDR)

    def tear_down(self):
        pass
Example #2
0
    def __init__(self, dht_addr):
        self.my_addr = dht_addr
        self.my_id = identifier.RandomId()
        self.my_node = Node(self.my_addr, self.my_id)
        self.tracker = tracker.Tracker()
        self.token_m = token_manager.TokenManager()

        self.reactor = ThreadedReactor()
        self.rpc_m = RPCManager(self.reactor, self.my_addr[1])
        self.querier = Querier(self.rpc_m, self.my_id)
        self.routing_m = RoutingManager(self.my_node, self.querier,
                                        bootstrap_nodes)
        self.responder = Responder(self.my_id, self.routing_m,
                                   self.tracker, self.token_m)

        self.responder.set_on_query_received_callback(
            self.routing_m.on_query_received)
        self.querier.set_on_response_received_callback(
            self.routing_m.on_response_received)
        self.querier.set_on_error_received_callback(
            self.routing_m.on_error_received)
        self.querier.set_on_timeout_callback(self.routing_m.on_timeout)
        self.querier.set_on_nodes_found_callback(self.routing_m.on_nodes_found)

        self.routing_m.do_bootstrap()

        self.rpc_m.add_msg_callback(QUERY,
                                    self.responder.on_query_received)

        self.lookup_m = LookupManager(self.my_id, self.querier,
                                      self.routing_m)
Example #3
0
    def __init__(self, dht_addr, state_path, routing_m_mod, lookup_m_mod):
        self.state_filename = os.path.join(state_path, STATE_FILENAME)
        self.load_state()
        if not self._my_id:
            self._my_id = identifier.RandomId()
        self._my_node = Node(dht_addr, self._my_id)
        self._tracker = tracker.Tracker()
        self._token_m = token_manager.TokenManager()

        self._reactor = ThreadedReactor()
        self._reactor.listen_udp(self._my_node.addr[1],
                                 self._on_datagram_received)
        #self._rpc_m = RPCManager(self._reactor)
        self._querier = Querier(self._my_id)
        bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES
        del self.loaded_nodes
        self._routing_m = routing_m_mod.RoutingManager(self._my_node, 
                                                       bootstrap_nodes)
#        self._responder = Responder(self._my_id, self._routing_m,
#                                    self._tracker, self._token_m)

        self._lookup_m = lookup_m_mod.LookupManager(self._my_id)
        current_time = time.time()
        self._next_maintenance_ts = current_time
        self._next_save_state_ts = current_time + SAVE_STATE_DELAY
        
        self._running = False
Example #4
0
    def __init__(self, dht_addr):
        my_addr = dht_addr
        my_id = identifier.RandomId()
        my_node = Node(my_addr, my_id)
        tracker_ = tracker.Tracker()
        token_m = token_manager.TokenManager()

        self.reactor = ThreadedReactor()
        rpc_m = RPCManager(self.reactor, my_addr[1])
        querier_ = Querier(rpc_m, my_id)
        routing_m = RoutingManager(my_node, querier_, bootstrap_nodes)
        responder_ = Responder(my_id, routing_m, tracker_, token_m)

        responder_.set_on_query_received_callback(routing_m.on_query_received)
        querier_.set_on_response_received_callback(
            routing_m.on_response_received)
        querier_.set_on_error_received_callback(routing_m.on_error_received)
        querier_.set_on_timeout_callback(routing_m.on_timeout)
        querier_.set_on_nodes_found_callback(routing_m.on_nodes_found)

        routing_m.do_bootstrap()

        rpc_m.add_msg_callback(QUERY, responder_.on_query_received)

        self.lookup_m = LookupManager(my_id, querier_, routing_m)
        self._routing_m = routing_m
Example #5
0
class _TestError:

    def _main_loop(self):
        return time.time() + 100, []

    def _very_long_callback(self):
        time.sleep(tc.TASK_INTERVAL*15)
        return time.time() + 100, []

    def _on_datagram_received(self, datagram):
        return time.time() + 100, []

    def _crashing_callback(self):
        raise Exception, 'Crash testing'

    def test_failed_join(self):
        self.lock = threading.RLock()
        self.reactor = ThreadedReactor(self._main_loop,
                                       tc.CLIENT_PORT,
                                       self._on_datagram_received,
                                       task_interval=tc.TASK_INTERVAL)
        self.reactor.s = _SocketMock(tc.TASK_INTERVAL)
#        self.reactor.start()
        self.reactor.call_asap(self._very_long_callback)
        time.sleep(tc.TASK_INTERVAL*2)
        assert_raises(Exception, self.reactor.stop)
Example #6
0
class TestSocketError:

    def _main_loop(self):
        return time.time() + tc.TASK_INTERVAL*10000, [DATAGRAM1]

    def _on_datagram_received(self):
        return
    
    def setup(self):
        self.main_loop_call_counter = 0
        self.callback_values = []
        self.datagrams_received = []
        
        self.reactor = ThreadedReactor(self._main_loop,
                                       tc.CLIENT_PORT,
                                       self._on_datagram_received,
                                       task_interval=tc.TASK_INTERVAL)
        self.reactor.s = _SocketMock()

    def test_socket_error(self):
        self.reactor.s.raise_error_on_next_sendto()
        self.reactor.run_one_step()
        self.reactor.s.raise_error_on_next_recvfrom()
        self.reactor.run_one_step()

    def teardown(self):
        return
Example #7
0
class _TestSocketErrors:

    def _main_loop(self): 
        return time.time() + tc.TASK_INTERVAL*10000, []
   
    def _main_loop_send(self):
        self.main_loop_send_called = True
        logger.critical('main loop returns datagram!!!!')
        return time.time() + tc.TASK_INTERVAL*10000, [DATAGRAM1]
   
    def _callback(self, *args, **kwargs):
        self.callback_fired = True

    def _on_datagram_received(self, datagram):
        return time.time() + 100, []

    def setup(self):
        self.main_loop_send_called = False
        self.callback_fired = False
        self.r = ThreadedReactor(self._main_loop_send, tc.CLIENT_PORT,
                                 self._on_datagram_received)
        self.r.s = _SocketErrorMock()
        #self.r.listen_udp(tc.CLIENT_PORT, lambda x,y:None)

    def test_sendto(self):
        logger.critical('TESTING: IGNORE CRITICAL MESSAGE')
        assert not self.main_loop_send_called
#        self.r.start()
        while not self.r.running:
            time.sleep(tc.TASK_INTERVAL)
        while not self.main_loop_send_called:
            time.sleep(tc.TASK_INTERVAL)
        assert self.r.s.error_raised
        assert self.r.running # reactor doesn't crashed

    def _test_recvfrom(self):
        #self.r.start()
        r2 = ThreadedReactor(self._main_loop, tc.CLIENT_PORT,
                             self._on_datagram_received,
                             task_interval=tc.TASK_INTERVAL)
        r2.s = _SocketErrorMock()
        assert not r2.running
#        r2.start()
        assert r2.running
        logger.critical('TESTING: IGNORE CRITICAL MESSAGE')
        # self.r will call recvfrom (which raises socket.error)
        while not r2.s.error_raised:
            time.sleep(tc.TASK_INTERVAL)
        assert r2.running # the error is ignored
        ok_(not self.callback_fired)
#        r2.stop()

    def _test_sendto_too_large_data_string(self):
        logger.critical('TESTING: IGNORE CRITICAL MESSAGE')
        self.r.sendto('z'*12345, tc.NO_ADDR)

    def tear_down(self):
        pass
Example #8
0
 def setup(self):
     self.main_loop_call_counter = 0
     self.callback_values = []
     self.datagrams_received = []
     
     self.reactor = ThreadedReactor(self._main_loop,
                                    tc.CLIENT_PORT,
                                    self._on_datagram_received,
                                    task_interval=tc.TASK_INTERVAL)
     self.reactor.s = _SocketMock()
Example #9
0
 def setup(self):
     self.lock = threading.Lock()
     self.datagrams_received = []
     self.callback_order = []
     self.client_r = ThreadedReactor(task_interval=tc.TASK_INTERVAL)
     self.server_r = ThreadedReactor(task_interval=tc.TASK_INTERVAL)
     self.client_r.listen_udp(tc.CLIENT_ADDR[1], self.on_datagram_received)
     self.server_r.listen_udp(tc.SERVER_ADDR[1], self.on_datagram_received)
     self.client_r.start()
     self.server_r.start()
Example #10
0
 def setup(self):
     self.lock = threading.Lock()
     self.datagrams_received = []
     self.callback_order = []
     self.client_r = ThreadedReactor(task_interval=tc.TASK_INTERVAL)
     self.server_r = ThreadedReactor(task_interval=tc.TASK_INTERVAL)
     self.client_r.listen_udp(tc.CLIENT_ADDR[1], self.on_datagram_received)
     self.server_r.listen_udp(tc.SERVER_ADDR[1], self.on_datagram_received)
     self.client_r.start()
     self.server_r.start()
Example #11
0
 def test_recvfrom(self):
     self.r.start()
     r2 = ThreadedReactor()
     r2.listen_udp(tc.SERVER_ADDR[1], lambda x,y:None)
     logger.critical('TESTING: IGNORE CRITICAL MESSAGE')
     r2.sendto('z', tc.CLIENT_ADDR)
     # self.r will call recvfrom (which raises socket.error)
     time.sleep(tc.TASK_INTERVAL)
     ok_(not self.callback_fired)
     self.r.stop()
Example #12
0
    def test_failed_join(self):
        self.lock = threading.RLock()
        self.reactor = ThreadedReactor(self._main_loop,
                                       tc.CLIENT_PORT,
                                       self._on_datagram_received,
                                       task_interval=tc.TASK_INTERVAL)
        self.reactor.s = _SocketMock(tc.TASK_INTERVAL)
#        self.reactor.start()
        self.reactor.call_asap(self._very_long_callback)
        time.sleep(tc.TASK_INTERVAL*2)
        assert_raises(Exception, self.reactor.stop)
Example #13
0
    def _test_recvfrom(self):
        #self.r.start()
        r2 = ThreadedReactor(self._main_loop, tc.CLIENT_PORT,
                             self._on_datagram_received,
                             task_interval=tc.TASK_INTERVAL)
        r2.s = _SocketErrorMock()
        assert not r2.running
#        r2.start()
        assert r2.running
        logger.critical('TESTING: IGNORE CRITICAL MESSAGE')
        # self.r will call recvfrom (which raises socket.error)
        while not r2.s.error_raised:
            time.sleep(tc.TASK_INTERVAL)
        assert r2.running # the error is ignored
        ok_(not self.callback_fired)
Example #14
0
 def test_start_and_stop(self):
     '''
     NOTE:
     This is the only test using real threading
     '''
     self.reactor = ThreadedReactor(self._main_loop,
                                    tc.CLIENT_PORT,
                                    self._on_datagram_received,
                                    task_interval=tc.TASK_INTERVAL)
     ok_(not self.reactor.running)
     self.reactor.start()
     time.sleep(.1)
     ok_(self.reactor.running)
     self.reactor.stop()
     ok_(not self.reactor.running)
Example #15
0
class Controller:
    
    def __init__(self, dht_addr):
        my_addr = dht_addr
        my_id = identifier.RandomId()
        my_node = Node(my_addr, my_id)
        tracker_ = tracker.Tracker()
        token_m = token_manager.TokenManager()

        self.reactor = ThreadedReactor()
        rpc_m = RPCManager(self.reactor, my_addr[1])
        querier_ = Querier(rpc_m, my_id)
        routing_m = RoutingManager(my_node, querier_,
                                   bootstrap_nodes)
        responder_ = Responder(my_id, routing_m,
                              tracker_, token_m)

        responder_.set_on_query_received_callback(
            routing_m.on_query_received)
        querier_.set_on_response_received_callback(
            routing_m.on_response_received)
        querier_.set_on_error_received_callback(
            routing_m.on_error_received)
        querier_.set_on_timeout_callback(routing_m.on_timeout)
        querier_.set_on_nodes_found_callback(routing_m.on_nodes_found)

        routing_m.do_bootstrap()

        rpc_m.add_msg_callback(QUERY,
                               responder_.on_query_received)

        self.lookup_m = LookupManager(my_id, querier_,
                                      routing_m)
        self._routing_m = routing_m
        

    def start(self):
        self.reactor.start()

    def stop(self):
        #TODO2: stop each manager
        self.reactor.stop()

    def get_peers(self, info_hash, callback_f, bt_port=None):
        return self.lookup_m.get_peers(info_hash, callback_f, bt_port)

    def print_routing_table_stats(self):
        self._routing_m.print_stats()
Example #16
0
    def __init__(self, dht_addr):
        self.my_addr = dht_addr
        self.my_id = identifier.RandomId()
        self.my_node = Node(self.my_addr, self.my_id)
        self.tracker = tracker.Tracker()
        self.token_m = token_manager.TokenManager()

        self.reactor = ThreadedReactor()
        self.rpc_m = RPCManager(self.reactor, self.my_addr[1])
        self.querier = Querier(self.rpc_m, self.my_id)
        self.routing_m = RoutingManager(self.my_node, self.querier,
                                        bootstrap_nodes)
        self.responder = Responder(self.my_id, self.routing_m,
                                   self.tracker, self.token_m)

        self.responder.set_on_query_received_callback(
            self.routing_m.on_query_received)
        self.querier.set_on_response_received_callback(
            self.routing_m.on_response_received)
        self.querier.set_on_error_received_callback(
            self.routing_m.on_error_received)
        self.querier.set_on_timeout_callback(self.routing_m.on_timeout)
        self.querier.set_on_nodes_found_callback(self.routing_m.on_nodes_found)

        self.routing_m.do_bootstrap()

        self.rpc_m.add_msg_callback(QUERY,
                                    self.responder.on_query_received)

        self.lookup_m = LookupManager(self.my_id, self.querier,
                                      self.routing_m)
Example #17
0
    def __init__(self, dht_addr):
        my_addr = dht_addr
        my_id = identifier.RandomId()
        my_node = Node(my_addr, my_id)
        tracker_ = tracker.Tracker()
        token_m = token_manager.TokenManager()

        self.reactor = ThreadedReactor()
        rpc_m = RPCManager(self.reactor, my_addr[1])
        querier_ = Querier(rpc_m, my_id)
        routing_m = RoutingManager(my_node, querier_,
                                   bootstrap_nodes)
        responder_ = Responder(my_id, routing_m,
                              tracker_, token_m)

        responder_.set_on_query_received_callback(
            routing_m.on_query_received)
        querier_.set_on_response_received_callback(
            routing_m.on_response_received)
        querier_.set_on_error_received_callback(
            routing_m.on_error_received)
        querier_.set_on_timeout_callback(routing_m.on_timeout)
        querier_.set_on_nodes_found_callback(routing_m.on_nodes_found)

        routing_m.do_bootstrap()

        rpc_m.add_msg_callback(QUERY,
                               responder_.on_query_received)

        self.lookup_m = LookupManager(my_id, querier_,
                                      routing_m)
        self._routing_m = routing_m
Example #18
0
    def __init__(self, dht_addr, state_path,
                 routing_m_mod, lookup_m_mod,
                 private_dht_name):
        #TODO: don't do this evil stuff!!!
        message.private_dht_name = private_dht_name
        
        self.state_filename = os.path.join(state_path, STATE_FILENAME)
        self.load_state()
        if not self._my_id:
            self._my_id = identifier.RandomId()
        self._my_node = Node(dht_addr, self._my_id)
        self._tracker = tracker.Tracker()
        self._token_m = token_manager.TokenManager()

        self._reactor = ThreadedReactor()
        self._reactor.listen_udp(self._my_node.addr[1],
                                 self._on_datagram_received)
        #self._rpc_m = RPCManager(self._reactor)
        self._querier = Querier(self._my_id)
        bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES
        del self.loaded_nodes
        self._routing_m = routing_m_mod.RoutingManager(self._my_node, 
                                                       bootstrap_nodes)
#        self._responder = Responder(self._my_id, self._routing_m,
#                                    self._tracker, self._token_m)

        self._lookup_m = lookup_m_mod.LookupManager(self._my_id)
        current_time = time.time()
        self._next_maintenance_ts = current_time
        self._next_save_state_ts = current_time + SAVE_STATE_DELAY
        
        self._running = False
Example #19
0
    def setup(self):
        global time
        #TODO: mock time and socket
        #time = minitwisted.time = MockTime()
        #minitwisted.socket = MockSocket()

        self.lock = threading.Lock()
        self.datagrams_received = []
        self.callback_order = []
        self.client_r = ThreadedReactor(task_interval=tc.TASK_INTERVAL)
        self.server_r = ThreadedReactor(task_interval=tc.TASK_INTERVAL)
        self.client_s = self.client_r.listen_udp(tc.CLIENT_ADDR[1],
                                                 self.on_datagram_received)
        self.server_s = self.server_r.listen_udp(tc.SERVER_ADDR[1],
                                                 self.on_datagram_received)
        self.client_r.start()
        self.server_r.start()
Example #20
0
 def setup(self):
     global time
     #TODO: mock time and socket
     #time = minitwisted.time = MockTime()
     #minitwisted.socket = MockSocket()
     
     self.lock = threading.Lock()
     self.datagrams_received = []
     self.callback_order = []
     self.client_r = ThreadedReactor(task_interval=tc.TASK_INTERVAL)
     self.server_r = ThreadedReactor(task_interval=tc.TASK_INTERVAL)
     self.client_s = self.client_r.listen_udp(tc.CLIENT_ADDR[1],
                                              self.on_datagram_received)
     self.server_s = self.server_r.listen_udp(tc.SERVER_ADDR[1],
                                              self.on_datagram_received)
     self.client_r.start()
     self.server_r.start()
Example #21
0
class Controller:
    
    def __init__(self, dht_addr):
        self.my_addr = dht_addr
        self.my_id = identifier.RandomId()
        self.my_node = Node(self.my_addr, self.my_id)
        self.tracker = tracker.Tracker()
        self.token_m = token_manager.TokenManager()

        self.reactor = ThreadedReactor()
        self.rpc_m = RPCManager(self.reactor, self.my_addr[1])
        self.querier = Querier(self.rpc_m, self.my_id)
        self.routing_m = RoutingManager(self.my_node, self.querier,
                                        bootstrap_nodes)
        self.responder = Responder(self.my_id, self.routing_m,
                                   self.tracker, self.token_m)

        self.responder.set_on_query_received_callback(
            self.routing_m.on_query_received)
        self.querier.set_on_response_received_callback(
            self.routing_m.on_response_received)
        self.querier.set_on_error_received_callback(
            self.routing_m.on_error_received)
        self.querier.set_on_timeout_callback(self.routing_m.on_timeout)
        self.querier.set_on_nodes_found_callback(self.routing_m.on_nodes_found)

        self.routing_m.do_bootstrap()

        self.rpc_m.add_msg_callback(QUERY,
                                    self.responder.on_query_received)

        self.lookup_m = LookupManager(self.my_id, self.querier,
                                      self.routing_m)

    def start(self):
        self.reactor.start()

    def stop(self):
        #TODO2: stop each manager
        self.reactor.stop()

    def get_peers(self, info_hash, callback_f, bt_port=None):
        return self.lookup_m.get_peers(info_hash, callback_f, bt_port)
Example #22
0
class Controller:
    
    def __init__(self, dht_addr):
        self.my_addr = dht_addr
        self.my_id = identifier.RandomId()
        self.my_node = Node(self.my_addr, self.my_id)
        self.tracker = tracker.Tracker()
        self.token_m = token_manager.TokenManager()

        self.reactor = ThreadedReactor()
        self.rpc_m = RPCManager(self.reactor, self.my_addr[1])
        self.querier = Querier(self.rpc_m, self.my_id)
        self.routing_m = RoutingManager(self.my_node, self.querier,
                                        bootstrap_nodes)
        self.responder = Responder(self.my_id, self.routing_m,
                                   self.tracker, self.token_m)

        self.responder.set_on_query_received_callback(
            self.routing_m.on_query_received)
        self.querier.set_on_response_received_callback(
            self.routing_m.on_response_received)
        self.querier.set_on_error_received_callback(
            self.routing_m.on_error_received)
        self.querier.set_on_timeout_callback(self.routing_m.on_timeout)
        self.querier.set_on_nodes_found_callback(self.routing_m.on_nodes_found)

        self.routing_m.do_bootstrap()

        self.rpc_m.add_msg_callback(QUERY,
                                    self.responder.on_query_received)

        self.lookup_m = LookupManager(self.my_id, self.querier,
                                      self.routing_m)

    def start(self):
        self.reactor.start()

    def stop(self):
        #TODO2: stop each manager
        self.reactor.stop()

    def get_peers(self, info_hash, callback_f, bt_port=None):
        return self.lookup_m.get_peers(info_hash, callback_f, bt_port)
Example #23
0
 def test_listen_upd(self):
     r = ThreadedReactor()
     r.start()
     logger.warning(''.join(
         ('TESTING LOGS ** IGNORE EXPECTED WARNING ** ',
          '(udp_listen has not been called)')))
     self.client_r.sendto(DATA, tc.SERVER_ADDR)
     while 1: #waiting for data
         with self.lock:
             if self.datagrams_received:
                 break
         time.sleep(tc.TASK_INTERVAL)
     with self.lock:
         first_datagram = self.datagrams_received.pop(0)
         logger.debug('first_datagram: %s, %s' % (
                 first_datagram,
                 (DATA, tc.CLIENT_ADDR)))
         assert first_datagram, (DATA, tc.CLIENT_ADDR)
     r.stop()
Example #24
0
class Controller:
    def __init__(self, dht_addr):
        my_addr = dht_addr
        my_id = identifier.RandomId()
        my_node = Node(my_addr, my_id)
        tracker_ = tracker.Tracker()
        token_m = token_manager.TokenManager()

        self.reactor = ThreadedReactor()
        rpc_m = RPCManager(self.reactor, my_addr[1])
        querier_ = Querier(rpc_m, my_id)
        routing_m = RoutingManager(my_node, querier_, bootstrap_nodes)
        responder_ = Responder(my_id, routing_m, tracker_, token_m)

        responder_.set_on_query_received_callback(routing_m.on_query_received)
        querier_.set_on_response_received_callback(
            routing_m.on_response_received)
        querier_.set_on_error_received_callback(routing_m.on_error_received)
        querier_.set_on_timeout_callback(routing_m.on_timeout)
        querier_.set_on_nodes_found_callback(routing_m.on_nodes_found)

        routing_m.do_bootstrap()

        rpc_m.add_msg_callback(QUERY, responder_.on_query_received)

        self.lookup_m = LookupManager(my_id, querier_, routing_m)
        self._routing_m = routing_m

    def start(self):
        self.reactor.start()

    def stop(self):
        #TODO2: stop each manager
        self.reactor.stop()

    def get_peers(self, info_hash, callback_f, bt_port=None):
        return self.lookup_m.get_peers(info_hash, callback_f, bt_port)

    def print_routing_table_stats(self):
        self._routing_m.print_stats()
Example #25
0
class TestMinitwistedRealThreading:

    def _main_loop(self):
        return time.time() + 1, []

    def _on_datagram_received(self, datagram):
        return time.time() + 1, []
        
    def test_start_and_stop(self):
        '''
        NOTE:
        This is the only test using real threading
        '''
        self.reactor = ThreadedReactor(self._main_loop,
                                       tc.CLIENT_PORT,
                                       self._on_datagram_received,
                                       task_interval=tc.TASK_INTERVAL)
        ok_(not self.reactor.running)
        self.reactor.start()
        time.sleep(.1)
        ok_(self.reactor.running)
        self.reactor.stop()
        ok_(not self.reactor.running)
Example #26
0
class TestSocketError:
    def _main_loop(self):
        return time.time() + tc.TASK_INTERVAL * 10000, [DATAGRAM1]

    def _on_datagram_received(self):
        return

    def setup(self):
        self.main_loop_call_counter = 0
        self.callback_values = []
        self.datagrams_received = []

        self.lock = threading.RLock()
        self.reactor = ThreadedReactor(
            self._main_loop, tc.CLIENT_PORT, self._on_datagram_received, task_interval=tc.TASK_INTERVAL
        )
        self.reactor.s = _SocketErrorMock()
        self.reactor.start()

    def test_sendto_socket_error(self):
        time.sleep(tc.TASK_INTERVAL / 5)

    def teardown(self):
        self.reactor.stop()
Example #27
0
 def test_recvfrom(self):
     self.r.start()
     r2 = ThreadedReactor()
     r2.listen_udp(tc.SERVER_ADDR[1], lambda x, y: None)
     logger.critical('TESTING: IGNORE CRITICAL MESSAGE')
     r2.sendto('z', tc.CLIENT_ADDR)
     # self.r will call recvfrom (which raises socket.error)
     time.sleep(tc.TASK_INTERVAL)
     ok_(not self.callback_fired)
     self.r.stop()
Example #28
0
class TestSend:
    def _main_loop(self):
        return time.time() + 100, [DATAGRAM1]

    def _callback(self, value):
        with self.lock:
            self.callback_values.append(value)
        return time.time() + 100, [DATAGRAM2]

    def _on_datagram_received(self, datagram):
        with self.lock:
            self.datagrams_received.append(datagram)
        return time.time() + 100, [DATAGRAM3]

    def _crashing_callback(self):
        raise Exception, "Crash testing"

    def setup(self):
        self.main_loop_call_counter = 0
        self.callback_values = []
        self.datagrams_received = []

        self.lock = threading.RLock()
        self.reactor = ThreadedReactor(
            self._main_loop, tc.CLIENT_PORT, self._on_datagram_received, task_interval=tc.TASK_INTERVAL
        )
        self.reactor.s = _SocketMock()
        self.s = self.reactor.s
        self.reactor.start()

    def test_main_loop_send_data(self):
        time.sleep(tc.TASK_INTERVAL)
        eq_(self.s.get_datagrams_sent(), [DATAGRAM1])
        return

    def test_call_asap_send_data(self):
        time.sleep(tc.TASK_INTERVAL)
        eq_(self.s.get_datagrams_sent(), [DATAGRAM1])
        self.reactor.call_asap(self._callback, 1)
        time.sleep(tc.TASK_INTERVAL * 2)
        eq_(self.s.get_datagrams_sent(), [DATAGRAM1, DATAGRAM2])

    def test_on_datagram_received_send_data(self):
        time.sleep(tc.TASK_INTERVAL)
        eq_(self.s.get_datagrams_sent(), [DATAGRAM1])
        self.s.put_datagram_received(Datagram(DATA1, tc.SERVER_ADDR))
        time.sleep(tc.TASK_INTERVAL / 2)
        eq_(self.s.get_datagrams_sent(), [DATAGRAM1, DATAGRAM3])

    def teardown(self):
        self.reactor.stop()
Example #29
0
 def test_listen_upd(self):
     r = ThreadedReactor()
     r.start()
     logger.warning(''.join(('TESTING LOGS ** IGNORE EXPECTED WARNING ** ',
                             '(udp_listen has not been called)')))
     self.client_r.sendto(DATA, tc.SERVER_ADDR)
     while 1:  #waiting for data
         with self.lock:
             if self.datagrams_received:
                 break
         time.sleep(tc.TASK_INTERVAL)
     with self.lock:
         first_datagram = self.datagrams_received.pop(0)
         logger.debug('first_datagram: %s, %s' % (first_datagram,
                                                  (DATA, tc.CLIENT_ADDR)))
         assert first_datagram, (DATA, tc.CLIENT_ADDR)
     r.stop()
Example #30
0
class TestMinitwisted:
    def setup(self):
        global time
        #TODO: mock time and socket
        #time = minitwisted.time = MockTime()
        #minitwisted.socket = MockSocket()

        self.lock = threading.Lock()
        self.datagrams_received = []
        self.callback_order = []
        self.client_r = ThreadedReactor(task_interval=tc.TASK_INTERVAL)
        self.server_r = ThreadedReactor(task_interval=tc.TASK_INTERVAL)
        self.client_s = self.client_r.listen_udp(tc.CLIENT_ADDR[1],
                                                 self.on_datagram_received)
        self.server_s = self.server_r.listen_udp(tc.SERVER_ADDR[1],
                                                 self.on_datagram_received)
        self.client_r.start()
        self.server_r.start()

    def test_listen_upd(self):
        r = ThreadedReactor()
        r.start()
        logger.warning(''.join(('TESTING LOGS ** IGNORE EXPECTED WARNING ** ',
                                '(udp_listen has not been called)')))
        self.client_r.sendto(DATA, tc.SERVER_ADDR)
        while 1:  #waiting for data
            with self.lock:
                if self.datagrams_received:
                    break
            time.sleep(tc.TASK_INTERVAL)
        with self.lock:
            first_datagram = self.datagrams_received.pop(0)
            logger.debug('first_datagram: %s, %s' % (first_datagram,
                                                     (DATA, tc.CLIENT_ADDR)))
            assert first_datagram, (DATA, tc.CLIENT_ADDR)
        r.stop()

    def test_network_callback(self):
        self.client_r.sendto(DATA, tc.SERVER_ADDR)
        time.sleep(tc.TASK_INTERVAL)
        with self.lock:
            first_datagram = self.datagrams_received.pop(0)
            logger.debug('first_datagram: %s, %s' % (first_datagram,
                                                     (DATA, tc.CLIENT_ADDR)))
            assert first_datagram, (DATA, tc.CLIENT_ADDR)

    def test_block_flood(self):
        from floodbarrier import MAX_PACKETS_PER_PERIOD as FLOOD_LIMIT

        for _ in xrange(FLOOD_LIMIT):
            self.client_r.sendto(DATA, tc.SERVER_ADDR)
        for _ in xrange(10):
            self.client_r.sendto(DATA, tc.SERVER_ADDR)
            logger.warning("TESTING LOGS ** IGNORE EXPECTED WARNING **")
            time.sleep(tc.TASK_INTERVAL)
        return
        ######################################
        with self.lock:
            logger.debug('datagram processed: %d/%d' %
                         (len(self.datagrams_received), FLOOD_LIMIT))
            print len(self.datagrams_received)
            assert len(self.datagrams_received) <= FLOOD_LIMIT

    def test_call_later(self):
        self.client_r.call_later(.13, self.callback_f, 1)
        self.client_r.call_later(.11, self.callback_f, 2)
        self.client_r.call_later(.01, self.callback_f, 3)
        task4 = self.client_r.call_later(.01, self.callback_f, 4)
        task4.cancel()
        time.sleep(.03)
        with self.lock:
            logger.debug('callback_order: %s' % self.callback_order)
            eq_(self.callback_order, [3])
            self.callback_order = []
        self.client_r.call_now(self.callback_f, 5)
        time.sleep(.03)
        with self.lock:
            logger.debug('callback_order: %s' % self.callback_order)
            eq_(self.callback_order, [5])
            self.callback_order = []
        task6 = self.client_r.call_later(.03, self.callback_f, 6)
        task6.cancel()
        time.sleep(.1)
        with self.lock:
            logger.debug('callback_order: %s' % self.callback_order)
            eq_(self.callback_order, [2, 1])

    def test_network_and_delayed(self):
        self.client_r.call_later(.2, self.callback_f, 0)
        self.client_r.call_now(self.callback_f, 1)
        task2 = self.client_r.call_later(.2, self.callback_f, 2)
        with self.lock:
            eq_(self.callback_order, [])
        time.sleep(.1)

        with self.lock:
            logger.debug('callback_order: %s' % self.callback_order)
            assert self.callback_order == [1]
            self.callback_order = []
            assert not self.datagrams_received
        self.server_r.sendto(DATA, tc.CLIENT_ADDR)
        time.sleep(.02)  # wait for network interruption
        with self.lock:
            logger.debug('callback_order: %s' % self.callback_order)
            assert self.callback_order == []
            logger.debug('callback_order: %s' % self.callback_order)
            assert self.datagrams_received.pop(0) == (DATA, tc.SERVER_ADDR)
            task2.cancel()  #inside critical region??
        time.sleep(.1)  # wait for task 0 (task 2 should be cancelled)
        with self.lock:
            assert self.callback_order == [0]
            assert not self.datagrams_received

    def test_sendto_socket_error(self):
        logger.critical('TESTING: IGNORE CRITICAL MESSAGE')
        self.client_r.sendto('z', (tc.NO_ADDR[0], 0))

    def teardown(self):
        self.client_r.stop()
        self.server_r.stop()

    def on_datagram_received(self, data, addr):
        with self.lock:
            self.datagrams_received.append((data, addr))

    def callback_f(self, callback_id):
        with self.lock:
            self.callback_order.append(callback_id)
Example #31
0
class TestMinitwisted:
    def _main_loop(self):
        with self.lock:
            self.main_loop_call_counter += 1
        return time.time() + tc.TASK_INTERVAL * 10, []

    def _main_loop_return_datagrams(self):
        return time.time() + tc.TASK_INTERVAL * 10, [DATAGRAM1]

    def _callback(self, value):
        with self.lock:
            self.callback_values.append(value)
        return time.time() + 100, []

    def _very_long_callback(self, value):
        time.sleep(tc.TASK_INTERVAL * 11)

    def _on_datagram_received(self, datagram):
        print "on_datagram", datagram, datagram.data, datagram.addr
        with self.lock:
            self.datagrams_received.append(datagram)
        return time.time() + 100, []

    def _crashing_callback(self):
        raise Exception, "Crash testing"

    def setup(self):
        self.main_loop_call_counter = 0
        self.callback_values = []
        self.datagrams_received = []

        self.lock = threading.RLock()
        self.reactor = ThreadedReactor(
            self._main_loop, tc.CLIENT_PORT, self._on_datagram_received, task_interval=tc.TASK_INTERVAL
        )
        self.reactor.s = _SocketMock()
        self.s = self.reactor.s
        self.reactor.start()

    def test_call_main_loop(self):
        time.sleep(tc.TASK_INTERVAL)
        # main_loop is called right away
        with self.lock:
            # FIXME: this assert fails sometimes!!!!
            eq_(self.main_loop_call_counter, 1)
        time.sleep(0.1 + tc.TASK_INTERVAL)
        with self.lock:
            # FIXME: this crashes when recompiling
            eq_(self.main_loop_call_counter, 2)

    def test_call_asap(self):
        with self.lock:
            eq_(self.callback_values, [])
        self.reactor.call_asap(self._callback, 0)
        time.sleep(tc.TASK_INTERVAL * 2)
        with self.lock:
            eq_(self.callback_values, [0])

        for i in xrange(1, 5):
            self.reactor.call_asap(self._callback, i)
            time.sleep(tc.TASK_INTERVAL * 3)
            with self.lock:
                eq_(self.callback_values, range(i + 1))

    def test_minitwisted_crashed(self):
        self.reactor.call_asap(self._crashing_callback)
        time.sleep(tc.TASK_INTERVAL * 3)
        # from now on, the minitwisted thread is dead
        ok_(not self.reactor.running)

    def test_on_datagram_received_callback(self):
        # This is equivalent to sending a datagram to reactor
        self.s.put_datagram_received(Datagram(DATA1, tc.SERVER_ADDR))
        datagram = Datagram(DATA1, tc.SERVER_ADDR)
        print "--------------", datagram, datagram.data, datagram.addr
        time.sleep(tc.TASK_INTERVAL * 1)
        with self.lock:
            datagram = self.datagrams_received.pop(0)
            print "popped>>>>>>>>>>>>>>>", datagram
            eq_(datagram.data, DATA1)
            eq_(datagram.addr, tc.SERVER_ADDR)

    def test_block_flood(self):
        from floodbarrier import MAX_PACKETS_PER_PERIOD as FLOOD_LIMIT

        for _ in xrange(FLOOD_LIMIT):
            self.s.put_datagram_received(Datagram(DATA1, tc.SERVER_ADDR))
        time.sleep(tc.TASK_INTERVAL * 5)
        with self.lock:
            eq_(len(self.datagrams_received), FLOOD_LIMIT)
        for _ in xrange(10):
            self.s.put_datagram_received(Datagram(DATA1, tc.SERVER_ADDR))
            time.sleep(tc.TASK_INTERVAL * 3)
            with self.lock:
                eq_(len(self.datagrams_received), FLOOD_LIMIT)
                logger.warning("TESTING LOGS ** IGNORE EXPECTED WARNING **")

    def _test_network_and_delayed(self):
        # TODO
        self.client_r.call_later(0.2, self.callback_f, 0)
        self.client_r.call_asap(self.callback_f, 1)
        task2 = self.client_r.call_later(0.2, self.callback_f, 2)
        with self.lock:
            eq_(self.callback_order, [])
        time.sleep(0.1)

        with self.lock:
            logger.debug("callback_order: %s" % self.callback_order)
            assert self.callback_order == [1]
            self.callback_order = []
            assert not self.datagrams_received
        self.server_r.sendto(DATA, tc.CLIENT_PORT)
        time.sleep(0.02)  # wait for network interruption
        with self.lock:
            logger.debug("callback_order: %s" % self.callback_order)
            assert self.callback_order == []
            logger.debug("callback_order: %s" % self.callback_order)
            datagram = self.datagrams_received.pop(0)
            eq_(datagram.data, DATA)
            eq_(datagram.addr, tc.SERVER_ADDR)
            task2.cancel()  # inside critical region??
        time.sleep(0.1)  # wait for task 0 (task 2 should be cancelled)
        with self.lock:
            assert self.callback_order == [0]
            assert not self.datagrams_received

    def teardown(self):
        self.reactor.stop()
Example #32
0
class Controller():

    def __init__(self, dht_addr, state_path, routing_m_mod, lookup_m_mod, private_dht_name):
        message.private_dht_name = private_dht_name
        self.state_filename = os.path.join(state_path, STATE_FILENAME)
        self.load_state()
        if not self._my_id:
            self._my_id = identifier.RandomId()
        self._my_node = Node(dht_addr, self._my_id)
        self._tracker = tracker.Tracker()
        self._token_m = token_manager.TokenManager()
        self._reactor = ThreadedReactor()
        self._reactor.listen_udp(self._my_node.addr[1], self._on_datagram_received)
        self._querier = Querier(self._my_id)
        bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES
        del self.loaded_nodes
        self._routing_m = routing_m_mod.RoutingManager(self._my_node, bootstrap_nodes)
        self._lookup_m = lookup_m_mod.LookupManager(self._my_id)
        current_time = time.time()
        self._next_maintenance_ts = current_time
        self._next_save_state_ts = current_time + SAVE_STATE_DELAY
        self._running = False

    def start(self):
        self._running = True
        self._reactor.start()
        self._main_loop()

    def stop(self):
        self._reactor.stop()

    def save_state(self):
        rnodes = self._routing_m.get_main_rnodes()
        f = open(self.state_filename, 'w')
        f.write('%r\n' % self._my_id)
        for rnode in rnodes:
            f.write('%d\t%r\t%s\t%d\t%f\n' % (self._my_id.log_distance(rnode.id),
             rnode.id,
             rnode.addr[0],
             rnode.addr[1],
             rnode.rtt * 1000))

        f.close()

    def load_state(self):
        self._my_id = None
        self.loaded_nodes = []
        try:
            f = open(self.state_filename)
        except IOError:
            return

        try:
            hex_id = f.readline().strip()
            self._my_id = Id(hex_id)
            for line in f:
                _, hex_id, ip, port, _ = line.split()
                addr = (ip, int(port))
                node_ = Node(addr, Id(hex_id))
                self.loaded_nodes.append(node_)

            f.close()
        except:
            self._my_id = None
            self.loaded_nodes = []
            logger.error('state.dat is corrupted')

    def get_peers(self, lookup_id, info_hash, callback_f, bt_port = 0):
        logger.critical('get_peers %d %r' % (bt_port, info_hash))
        if time.time() > self._next_maintenance_ts + 1:
            logger.critical('minitwisted crashed or stopped!')
            return
        peers = self._tracker.get(info_hash)
        if peers:
            callback_f(lookup_id, peers)
        log_distance = info_hash.log_distance(self._my_id)
        bootstrap_rnodes = self._routing_m.get_closest_rnodes(log_distance, None, True)
        lookup_obj = self._lookup_m.get_peers(lookup_id, info_hash, callback_f, bt_port)
        lookup_queries_to_send = lookup_obj.start(bootstrap_rnodes)
        self._send_queries(lookup_queries_to_send)
        return len(lookup_queries_to_send)

    def print_routing_table_stats(self):
        self._routing_m.print_stats()

    def _main_loop(self):
        current_time = time.time()
        if current_time > self._next_maintenance_ts:
            maintenance_delay, queries_to_send, maintenance_lookup_target = self._routing_m.do_maintenance()
            self._send_queries(queries_to_send)
            if maintenance_lookup_target:
                log_distance = maintenance_lookup_target.log_distance(self._my_id)
                bootstrap_nodes = self._routing_m.get_closest_rnodes(log_distance, None, True)
                lookup_obj = self._lookup_m.maintenance_lookup(maintenance_lookup_target)
                lookup_queries_to_send = lookup_obj.start(bootstrap_nodes)
                self._send_queries(lookup_queries_to_send)
            self._next_maintenance_ts = current_time + maintenance_delay
        if current_time > self._next_save_state_ts:
            self.save_state()
            self._next_save_state_ts = current_time + SAVE_STATE_DELAY
        delay = min(self._next_maintenance_ts, self._next_save_state_ts) - current_time
        self._reactor.call_later(delay, self._main_loop)

    def _maintenance_lookup(self, target):
        self._lookup_m.maintenance_lookup(target)

    def _on_datagram_received(self, data, addr):
        try:
            msg = message.IncomingMsg(data, addr)
        except message.MsgError:
            return

        if msg.type == message.QUERY:
            if msg.sender_id == self._my_id:
                logger.debug('Got a msg from myself:\n%r', msg)
                return
            response_msg = self._get_response(msg)
            if response_msg:
                bencoded_response = response_msg.encode(msg.tid)
                self._reactor.sendto(bencoded_response, addr)
            maintenance_queries_to_send = self._routing_m.on_query_received(msg.sender_node)
        elif msg.type == message.RESPONSE:
            related_query = self._querier.on_response_received(msg, addr)
            if not related_query:
                return
            if related_query.lookup_obj:
                if msg.type == message.RESPONSE:
                    lookup_queries_to_send, peers, num_parallel_queries, lookup_done = related_query.lookup_obj.on_response_received(msg, msg.sender_node)
                self._send_queries(lookup_queries_to_send)
                if related_query.lookup_obj.callback_f:
                    lookup_id = related_query.lookup_obj.lookup_id
                    if peers:
                        related_query.lookup_obj.callback_f(lookup_id, peers)
                    if lookup_done:
                        self._announce(related_query.lookup_obj)
                        related_query.lookup_obj.callback_f(lookup_id, None)
            maintenance_queries_to_send = self._routing_m.on_response_received(msg.sender_node, related_query.rtt, msg.all_nodes)
        elif msg.type == message.ERROR:
            related_query = self._querier.on_error_received(msg, addr)
            if not related_query:
                return
            if related_query.lookup_obj:
                peers = None
                lookup_queries_to_send, num_parallel_queries, lookup_done = related_query.lookup_obj.on_error_received(msg, addr)
                self._send_queries(lookup_queries_to_send)
            if related_query.lookup_obj.callback_f:
                lookup_id = related_query.lookup_obj.lookup_id
                if lookup_done:
                    self._announce(related_query.lookup_obj)
                    related_query.lookup_obj.callback_f(lookup_id, None)
            maintenance_queries_to_send = self._routing_m.on_error_received(addr)
        else:
            return
        self._send_queries(maintenance_queries_to_send)

    def _get_response(self, msg):
        if msg.query == message.PING:
            return message.OutgoingPingResponse(self._my_id)
        if msg.query == message.FIND_NODE:
            log_distance = msg.target.log_distance(self._my_id)
            rnodes = self._routing_m.get_closest_rnodes(log_distance, NUM_NODES, False)
            return message.OutgoingFindNodeResponse(self._my_id, rnodes)
        if msg.query == message.GET_PEERS:
            token = self._token_m.get()
            log_distance = msg.info_hash.log_distance(self._my_id)
            rnodes = self._routing_m.get_closest_rnodes(log_distance, NUM_NODES, False)
            peers = self._tracker.get(msg.info_hash)
            if peers:
                logger.debug('RESPONDING with PEERS:\n%r' % peers)
            return message.OutgoingGetPeersResponse(self._my_id, token, nodes=rnodes, peers=peers)
        if msg.query == message.ANNOUNCE_PEER:
            peer_addr = (msg.sender_addr[0], msg.bt_port)
            self._tracker.put(msg.info_hash, peer_addr)
            return message.OutgoingAnnouncePeerResponse(self._my_id)
        logger.debug('Invalid QUERY: %r' % msg.query)

    def _on_response_received(self, msg):
        pass

    def _on_timeout(self, addr):
        related_query = self._querier.on_timeout(addr)
        if not related_query:
            return
        if related_query.lookup_obj:
            lookup_queries_to_send, num_parallel_queries, lookup_done = related_query.lookup_obj.on_timeout(related_query.dstnode)
            self._send_queries(lookup_queries_to_send)
            if lookup_done and related_query.lookup_obj.callback_f:
                self._announce(related_query.lookup_obj)
                lookup_id = related_query.lookup_obj.lookup_id
                related_query.lookup_obj.callback_f(lookup_id, None)
        maintenance_queries_to_send = self._routing_m.on_timeout(related_query.dstnode)
        self._send_queries(maintenance_queries_to_send)

    def _announce(self, lookup_obj):
        queries_to_send, announce_to_myself = lookup_obj.announce()
        self._send_queries(queries_to_send)

    def _send_queries(self, queries_to_send, lookup_obj = None):
        if queries_to_send is None:
            return
        for query in queries_to_send:
            timeout_task = self._reactor.call_later(TIMEOUT_DELAY, self._on_timeout, query.dstnode.addr)
            bencoded_query = self._querier.register_query(query, timeout_task)
            self._reactor.sendto(bencoded_query, query.dstnode.addr)
class Controller():
    def __init__(self, dht_addr, state_path, routing_m_mod, lookup_m_mod,
                 private_dht_name):
        message.private_dht_name = private_dht_name
        self.state_filename = os.path.join(state_path, STATE_FILENAME)
        self.load_state()
        if not self._my_id:
            self._my_id = identifier.RandomId()
        self._my_node = Node(dht_addr, self._my_id)
        self._tracker = tracker.Tracker()
        self._token_m = token_manager.TokenManager()
        self._reactor = ThreadedReactor()
        self._reactor.listen_udp(self._my_node.addr[1],
                                 self._on_datagram_received)
        self._querier = Querier(self._my_id)
        bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES
        del self.loaded_nodes
        self._routing_m = routing_m_mod.RoutingManager(self._my_node,
                                                       bootstrap_nodes)
        self._lookup_m = lookup_m_mod.LookupManager(self._my_id)
        current_time = time.time()
        self._next_maintenance_ts = current_time
        self._next_save_state_ts = current_time + SAVE_STATE_DELAY
        self._running = False

    def start(self):
        self._running = True
        self._reactor.start()
        self._main_loop()

    def stop(self):
        self._reactor.stop()

    def save_state(self):
        rnodes = self._routing_m.get_main_rnodes()
        f = open(self.state_filename, 'w')
        f.write('%r\n' % self._my_id)
        for rnode in rnodes:
            f.write('%d\t%r\t%s\t%d\t%f\n' %
                    (self._my_id.log_distance(rnode.id), rnode.id,
                     rnode.addr[0], rnode.addr[1], rnode.rtt * 1000))

        f.close()

    def load_state(self):
        self._my_id = None
        self.loaded_nodes = []
        try:
            f = open(self.state_filename)
        except IOError:
            return

        try:
            hex_id = f.readline().strip()
            self._my_id = Id(hex_id)
            for line in f:
                _, hex_id, ip, port, _ = line.split()
                addr = (ip, int(port))
                node_ = Node(addr, Id(hex_id))
                self.loaded_nodes.append(node_)

            f.close()
        except:
            self._my_id = None
            self.loaded_nodes = []
            logger.error('state.dat is corrupted')

    def get_peers(self, lookup_id, info_hash, callback_f, bt_port=0):
        logger.critical('get_peers %d %r' % (bt_port, info_hash))
        if time.time() > self._next_maintenance_ts + 1:
            logger.critical('minitwisted crashed or stopped!')
            return
        peers = self._tracker.get(info_hash)
        if peers:
            callback_f(lookup_id, peers)
        log_distance = info_hash.log_distance(self._my_id)
        bootstrap_rnodes = self._routing_m.get_closest_rnodes(
            log_distance, None, True)
        lookup_obj = self._lookup_m.get_peers(lookup_id, info_hash, callback_f,
                                              bt_port)
        lookup_queries_to_send = lookup_obj.start(bootstrap_rnodes)
        self._send_queries(lookup_queries_to_send)
        return len(lookup_queries_to_send)

    def print_routing_table_stats(self):
        self._routing_m.print_stats()

    def _main_loop(self):
        current_time = time.time()
        if current_time > self._next_maintenance_ts:
            maintenance_delay, queries_to_send, maintenance_lookup_target = self._routing_m.do_maintenance(
            )
            self._send_queries(queries_to_send)
            if maintenance_lookup_target:
                log_distance = maintenance_lookup_target.log_distance(
                    self._my_id)
                bootstrap_nodes = self._routing_m.get_closest_rnodes(
                    log_distance, None, True)
                lookup_obj = self._lookup_m.maintenance_lookup(
                    maintenance_lookup_target)
                lookup_queries_to_send = lookup_obj.start(bootstrap_nodes)
                self._send_queries(lookup_queries_to_send)
            self._next_maintenance_ts = current_time + maintenance_delay
        if current_time > self._next_save_state_ts:
            self.save_state()
            self._next_save_state_ts = current_time + SAVE_STATE_DELAY
        delay = min(self._next_maintenance_ts,
                    self._next_save_state_ts) - current_time
        self._reactor.call_later(delay, self._main_loop)

    def _maintenance_lookup(self, target):
        self._lookup_m.maintenance_lookup(target)

    def _on_datagram_received(self, data, addr):
        try:
            msg = message.IncomingMsg(data, addr)
        except message.MsgError:
            return

        if msg.type == message.QUERY:
            if msg.sender_id == self._my_id:
                logger.debug('Got a msg from myself:\n%r', msg)
                return
            response_msg = self._get_response(msg)
            if response_msg:
                bencoded_response = response_msg.encode(msg.tid)
                self._reactor.sendto(bencoded_response, addr)
            maintenance_queries_to_send = self._routing_m.on_query_received(
                msg.sender_node)
        elif msg.type == message.RESPONSE:
            related_query = self._querier.on_response_received(msg, addr)
            if not related_query:
                return
            if related_query.lookup_obj:
                if msg.type == message.RESPONSE:
                    lookup_queries_to_send, peers, num_parallel_queries, lookup_done = related_query.lookup_obj.on_response_received(
                        msg, msg.sender_node)
                self._send_queries(lookup_queries_to_send)
                if related_query.lookup_obj.callback_f:
                    lookup_id = related_query.lookup_obj.lookup_id
                    if peers:
                        related_query.lookup_obj.callback_f(lookup_id, peers)
                    if lookup_done:
                        self._announce(related_query.lookup_obj)
                        related_query.lookup_obj.callback_f(lookup_id, None)
            maintenance_queries_to_send = self._routing_m.on_response_received(
                msg.sender_node, related_query.rtt, msg.all_nodes)
        elif msg.type == message.ERROR:
            related_query = self._querier.on_error_received(msg, addr)
            if not related_query:
                return
            if related_query.lookup_obj:
                peers = None
                lookup_queries_to_send, num_parallel_queries, lookup_done = related_query.lookup_obj.on_error_received(
                    msg, addr)
                self._send_queries(lookup_queries_to_send)
            if related_query.lookup_obj.callback_f:
                lookup_id = related_query.lookup_obj.lookup_id
                if lookup_done:
                    self._announce(related_query.lookup_obj)
                    related_query.lookup_obj.callback_f(lookup_id, None)
            maintenance_queries_to_send = self._routing_m.on_error_received(
                addr)
        else:
            return
        self._send_queries(maintenance_queries_to_send)

    def _get_response(self, msg):
        if msg.query == message.PING:
            return message.OutgoingPingResponse(self._my_id)
        if msg.query == message.FIND_NODE:
            log_distance = msg.target.log_distance(self._my_id)
            rnodes = self._routing_m.get_closest_rnodes(
                log_distance, NUM_NODES, False)
            return message.OutgoingFindNodeResponse(self._my_id, rnodes)
        if msg.query == message.GET_PEERS:
            token = self._token_m.get()
            log_distance = msg.info_hash.log_distance(self._my_id)
            rnodes = self._routing_m.get_closest_rnodes(
                log_distance, NUM_NODES, False)
            peers = self._tracker.get(msg.info_hash)
            if peers:
                logger.debug('RESPONDING with PEERS:\n%r' % peers)
            return message.OutgoingGetPeersResponse(self._my_id,
                                                    token,
                                                    nodes=rnodes,
                                                    peers=peers)
        if msg.query == message.ANNOUNCE_PEER:
            peer_addr = (msg.sender_addr[0], msg.bt_port)
            self._tracker.put(msg.info_hash, peer_addr)
            return message.OutgoingAnnouncePeerResponse(self._my_id)
        logger.debug('Invalid QUERY: %r' % msg.query)

    def _on_response_received(self, msg):
        pass

    def _on_timeout(self, addr):
        related_query = self._querier.on_timeout(addr)
        if not related_query:
            return
        if related_query.lookup_obj:
            lookup_queries_to_send, num_parallel_queries, lookup_done = related_query.lookup_obj.on_timeout(
                related_query.dstnode)
            self._send_queries(lookup_queries_to_send)
            if lookup_done and related_query.lookup_obj.callback_f:
                self._announce(related_query.lookup_obj)
                lookup_id = related_query.lookup_obj.lookup_id
                related_query.lookup_obj.callback_f(lookup_id, None)
        maintenance_queries_to_send = self._routing_m.on_timeout(
            related_query.dstnode)
        self._send_queries(maintenance_queries_to_send)

    def _announce(self, lookup_obj):
        queries_to_send, announce_to_myself = lookup_obj.announce()
        self._send_queries(queries_to_send)

    def _send_queries(self, queries_to_send, lookup_obj=None):
        if queries_to_send is None:
            return
        for query in queries_to_send:
            timeout_task = self._reactor.call_later(TIMEOUT_DELAY,
                                                    self._on_timeout,
                                                    query.dstnode.addr)
            bencoded_query = self._querier.register_query(query, timeout_task)
            self._reactor.sendto(bencoded_query, query.dstnode.addr)
Example #34
0
class Controller:

    def __init__(self, dht_addr, state_path, routing_m_mod, lookup_m_mod):
        self.state_filename = os.path.join(state_path, STATE_FILENAME)
        self.load_state()
        if not self._my_id:
            self._my_id = identifier.RandomId()
        self._my_node = Node(dht_addr, self._my_id)
        self._tracker = tracker.Tracker()
        self._token_m = token_manager.TokenManager()

        self._reactor = ThreadedReactor()
        self._reactor.listen_udp(self._my_node.addr[1],
                                 self._on_datagram_received)
        #self._rpc_m = RPCManager(self._reactor)
        self._querier = Querier(self._my_id)
        bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES
        del self.loaded_nodes
        self._routing_m = routing_m_mod.RoutingManager(self._my_node, 
                                                       bootstrap_nodes)
#        self._responder = Responder(self._my_id, self._routing_m,
#                                    self._tracker, self._token_m)

        self._lookup_m = lookup_m_mod.LookupManager(self._my_id)
        current_time = time.time()
        self._next_maintenance_ts = current_time
        self._next_save_state_ts = current_time + SAVE_STATE_DELAY
        
        self._running = False
        

    def start(self):
        assert not self._running
        self._running = True
        self._reactor.start()
        self._main_loop()

    def stop(self):
        assert self._running
        #TODO2: stop each manager
        self._reactor.stop()

    def save_state(self):
        rnodes = self._routing_m.get_main_rnodes()
        f = open(self.state_filename, 'w')
        f.write('%r\n' % self._my_id)
        for rnode in rnodes:
            f.write('%d\t%r\t%s\t%d\t%f\n' % (
                    self._my_id.log_distance(rnode.id),
                    rnode.id, rnode.addr[0], rnode.addr[1],
                    rnode.rtt * 1000))
        f.close()

    def load_state(self):
        self._my_id = None
        self.loaded_nodes = []
        try:
            f = open(self.state_filename)
        except(IOError):
            return
        # the first line contains this node's identifier
        hex_id = f.readline().strip()
        self._my_id = Id(hex_id)
        # the rest of the lines contain routing table nodes
        # FORMAT
        # log_distance hex_id ip port rtt
        for line in f:
            _, hex_id, ip, port, _ = line.split()
            addr = (ip, int(port))
            node_ = Node(addr, Id(hex_id))
            self.loaded_nodes.append(node_)
        f.close
        
    def get_peers(self, lookup_id, info_hash, callback_f, bt_port=0):
        assert self._running
        log_distance = info_hash.log_distance(self._my_id)
        bootstrap_rnodes = self._routing_m.get_closest_rnodes(log_distance,
                                                              None,
                                                              True)
        lookup_obj = self._lookup_m.get_peers(info_hash, callback_f, bt_port)
        #TODO: propagate lookup_id to the lookup plugin
        lookup_obj.lookup_id = lookup_id
        ################################################
        lookup_queries_to_send = lookup_obj.start(bootstrap_rnodes)
        self._send_queries(lookup_queries_to_send)
        return len(lookup_queries_to_send)
        
    def print_routing_table_stats(self):
        self._routing_m.print_stats()

    def _main_loop(self):
        current_time = time.time()
        # Routing table
        if current_time > self._next_maintenance_ts:
            (maintenance_delay,
             queries_to_send,
             maintenance_lookup_target) = self._routing_m.do_maintenance()
            self._send_queries(queries_to_send)
            if maintenance_lookup_target:
                log_distance = maintenance_lookup_target.log_distance(
                    self._my_id)
                bootstrap_nodes = self._routing_m.get_closest_rnodes(
                    log_distance, None, True)
                lookup_obj = self._lookup_m.maintenance_lookup(
                    maintenance_lookup_target)
                lookup_queries_to_send = lookup_obj.start(bootstrap_nodes)
                self._send_queries(lookup_queries_to_send)
            self._next_maintenance_ts = (current_time
                                         + maintenance_delay)
        # Auto-save routing table
        if current_time > self._next_save_state_ts:
            self.save_state()
            self._next_save_state_ts = current_time + SAVE_STATE_DELAY

        # Schedule next call
        delay = (min(self._next_maintenance_ts, self._next_save_state_ts)
                 - current_time)
        self._reactor.call_later(delay, self._main_loop)

    def _maintenance_lookup(self, target):
        self._lookup_m.maintenance_lookup(target)

    def _on_datagram_received(self, data, addr):
        try:
            msg = message.IncomingMsg(data, addr)
        except(message.MsgError):
            return # ignore message
        
        if msg.type == message.QUERY:
            response_msg = self._get_response(msg)
            if response_msg:
                bencoded_response = response_msg.encode(msg.tid)
                self._reactor.sendto(bencoded_response, addr)

            maintenance_queries_to_send = self._routing_m.on_query_received(
                msg.sender_node)
            
        elif msg.type in (message.RESPONSE, message.ERROR):
            related_query = self._querier.on_response_received(msg, addr)
            if not related_query:
                # Query timed out or unrequested response
                return
            # lookup related tasks
            if related_query.lookup_obj:
                if msg.type == message.RESPONSE:
                    (lookup_queries_to_send,
                     peers,
                     num_parallel_queries,
                     lookup_done
                     ) = related_query.lookup_obj.on_response_received(
                        msg, msg.sender_node)
                else: #ERROR
                    peers = None # an error msg doesn't have peers
                    (lookup_queries_to_send,
                     num_parallel_queries,
                     lookup_done
                     ) = related_query.lookup_obj.on_error_received(
                        msg, msg.sender_node)
                self._send_queries(lookup_queries_to_send)
                
                if related_query.lookup_obj.callback_f:
                    lookup_id = related_query.lookup_obj.lookup_id
                    if peers:
                        related_query.lookup_obj.callback_f(lookup_id, peers)
                    if lookup_done:
                        related_query.lookup_obj.callback_f(lookup_id, None)
            # maintenance related tasks
            if msg.type == message.RESPONSE:
                maintenance_queries_to_send = \
                    self._routing_m.on_response_received(
                    msg.sender_node, related_query.rtt, msg.all_nodes)
            else:
                maintenance_queries_to_send = \
                    self._routing_m.on_error_received(
                    msg.sender_node)
        else: # unknown type
            return
        self._send_queries(maintenance_queries_to_send)

    def _get_response(self, msg):
        if msg.query == message.PING:
            return message.OutgoingPingResponse(self._my_id)
        elif msg.query == message.FIND_NODE:
            log_distance = msg.target.log_distance(self._my_id)
            rnodes = self._routing_m.get_closest_rnodes(log_distance,
                                                       NUM_NODES, False)
            return message.OutgoingFindNodeResponse(self._my_id,
                                                    rnodes)
        elif msg.query == message.GET_PEERS:
            token = self._token_m.get()
            log_distance = msg.info_hash.log_distance(self._my_id)
            rnodes = self._routing_m.get_closest_rnodes(log_distance,
                                                       NUM_NODES, False)
            peers = self._tracker.get(msg.info_hash)
            if peers:
                logger.debug('RESPONDING with PEERS:\n%r' % peers)
            return message.OutgoingGetPeersResponse(self._my_id,
                                                    token,
                                                    nodes=rnodes,
                                                    peers=peers)
        elif msg.query == message.ANNOUNCE_PEER:
            peer_addr = (msg.sender_addr[0], msg.bt_port)
            self._tracker.put(msg.info_hash, peer_addr)
            return message.OutgoingAnnouncePeerResponse(self._my_id)
        else:
            logger.debug('Invalid QUERY: %r' % (msg.query))
            #TODO: maybe send an error back?
        
    def _on_response_received(self, msg):
        pass

    def _on_timeout(self, addr):
        related_query = self._querier.on_timeout(addr)
        if not related_query:
            return # timeout cancelled (got response/error already)
        if related_query.lookup_obj:
            (lookup_queries_to_send,
             num_parallel_queries,
             lookup_done
             ) = related_query.lookup_obj.on_timeout(related_query.dstnode)
            self._send_queries(lookup_queries_to_send)
            if lookup_done and related_query.lookup_obj.callback_f:
                lookup_id = related_query.lookup_obj.lookup_id
                related_query.lookup_obj.callback_f(lookup_id, None)
        maintenance_queries_to_send = self._routing_m.on_timeout(
            related_query.dstnode)
        self._send_queries(maintenance_queries_to_send)

    def _send_queries(self, queries_to_send, lookup_obj=None):
        if queries_to_send is None:
            return
        for query in queries_to_send:
            timeout_task = self._reactor.call_later(TIMEOUT_DELAY,
                                                    self._on_timeout,
                                                    query.dstnode.addr)
            bencoded_query = self._querier.register_query(query, timeout_task)
            self._reactor.sendto(bencoded_query, query.dstnode.addr)
Example #35
0
class TestMinitwisted:

    def _main_loop(self):
        print 'main loop call'
        self.main_loop_call_counter += 1
        return time.time() + self.main_loop_delay, []

    def _main_loop_return_datagrams(self):
        return time.time() + self.main_loop_delay, [DATAGRAM1]

    def _callback(self, value):
        self.callback_values.append(value)
        return []

    def _very_long_callback(self, value):
        time.sleep(tc.TASK_INTERVAL*11)
        return []

    def _on_datagram_received(self, datagram):
        print 'on_datagram', datagram, datagram.data, datagram.addr
        self.datagrams_received.append(datagram)
        return time.time() + 100, []

    def _crashing_callback(self):
        raise CrashError, 'Crash testing'

    def setup(self):
        time.mock_mode()
        self.main_loop_call_counter = 0
        self.callback_values = []
        self.datagrams_received = []
        
        self.main_loop_delay = MAIN_LOOP_DELAY
        self.reactor = ThreadedReactor(self._main_loop,
                                       tc.CLIENT_PORT,
                                       self._on_datagram_received,
                                       task_interval=tc.TASK_INTERVAL)
        self.reactor.s = _SocketMock()
        #self.reactor.start() >> instead of usint start(), we use run_one_step()

    def test_call_main_loop(self):
        eq_(self.main_loop_call_counter, 0)
        self.reactor.run_one_step()
        # main_loop is called right away
        eq_(self.main_loop_call_counter, 1)
        self.reactor.run_one_step()
        # no events
        eq_(self.main_loop_call_counter, 1)
        time.sleep(self.main_loop_delay)
        self.reactor.run_one_step()
        # main_loop is called again after 
        eq_(self.main_loop_call_counter, 2)
        
    def test_call_asap(self):
        eq_(self.callback_values, [])
        self.reactor.call_asap(self._callback, 0)
        eq_(self.callback_values, []) # stil nothing
        self.reactor.run_one_step()
        eq_(self.callback_values, [0]) #callback triggered
        for i in xrange(1, 5):
            self.reactor.call_asap(self._callback, i)
            self.reactor.run_one_step()
            eq_(self.callback_values, range(i + 1))
    
    def test_minitwisted_crashed(self):
        self.reactor.call_asap(self._crashing_callback)
        assert_raises(CrashError, self.reactor.run_one_step)

    def test_on_datagram_received_callback(self):
        eq_(self.datagrams_received, [])
        self.reactor.run_one_step()
        eq_(self.datagrams_received, [])
        datagram = Datagram(DATA1, tc.SERVER_ADDR)
        # This is equivalent to sending a datagram to reactor
        self.reactor.s.put_datagram_received(datagram)
        self.reactor.run_one_step()
        eq_(len(self.datagrams_received), 1)
        eq_(self.datagrams_received[0], datagram)

    def test_block_flood(self):
        from floodbarrier import MAX_PACKETS_PER_PERIOD as FLOOD_LIMIT
        for _ in xrange(FLOOD_LIMIT * 2):
            self.reactor.s.put_datagram_received(Datagram(DATA1, tc.SERVER_ADDR))
        for i in xrange(FLOOD_LIMIT): 
            eq_(len(self.datagrams_received), i)
            self.reactor.run_one_step()
        eq_(len(self.datagrams_received), FLOOD_LIMIT)
        for i in xrange(FLOOD_LIMIT):
            eq_(len(self.datagrams_received), FLOOD_LIMIT)
            logger.warning(
                "TESTING LOGS ** IGNORE EXPECTED WARNING **")
            self.reactor.run_one_step()
        eq_(len(self.datagrams_received), FLOOD_LIMIT)

    def test_network_and_callback(self):
        self.reactor.call_asap(self._callback, 1)
        eq_(self.main_loop_call_counter, 0)
        eq_(self.callback_values, [])
        time.sleep(.1)
        self.reactor.run_one_step()
        # call_asap and main_loop triggered
        eq_(self.callback_values, [1])
        eq_(self.main_loop_call_counter, 1)

        self.reactor.s.put_datagram_received(DATAGRAM1)
        eq_(self.datagrams_received, [])
        self.reactor.run_one_step()
        eq_(self.datagrams_received, [DATAGRAM1])

        self.reactor.call_asap(self._callback, 2)
        self.reactor.s.put_datagram_received(DATAGRAM3)
        self.reactor.run_one_step() # receive AND call_asap
        eq_(self.datagrams_received, [DATAGRAM1, DATAGRAM3])
        eq_(self.callback_values, [1, 2])

        
    def teardown(self):
        #self.reactor.stop() >> reactor is not really running
        time.normal_mode()
Example #36
0
class TestMinitwisted:

    def setup(self):
        global time
        #TODO: mock time and socket
        #time = minitwisted.time = MockTime()
        #minitwisted.socket = MockSocket()
        
        self.lock = threading.Lock()
        self.datagrams_received = []
        self.callback_order = []
        self.client_r = ThreadedReactor(task_interval=tc.TASK_INTERVAL)
        self.server_r = ThreadedReactor(task_interval=tc.TASK_INTERVAL)
        self.client_s = self.client_r.listen_udp(tc.CLIENT_ADDR[1],
                                                 self.on_datagram_received)
        self.server_s = self.server_r.listen_udp(tc.SERVER_ADDR[1],
                                                 self.on_datagram_received)
        self.client_r.start()
        self.server_r.start()

    def test_listen_upd(self):
        r = ThreadedReactor()
        r.start()
        logger.warning(''.join(
            ('TESTING LOGS ** IGNORE EXPECTED WARNING ** ',
             '(udp_listen has not been called)')))
        self.client_r.sendto(DATA, tc.SERVER_ADDR)
        while 1: #waiting for data
            with self.lock:
                if self.datagrams_received:
                    break
            time.sleep(tc.TASK_INTERVAL)
        with self.lock:
            first_datagram = self.datagrams_received.pop(0)
            logger.debug('first_datagram: %s, %s' % (
                    first_datagram,
                    (DATA, tc.CLIENT_ADDR)))
            assert first_datagram, (DATA, tc.CLIENT_ADDR)
        r.stop()
            
    def test_network_callback(self):
        self.client_r.sendto(DATA, tc.SERVER_ADDR)
        time.sleep(tc.TASK_INTERVAL)
        with self.lock:
            first_datagram = self.datagrams_received.pop(0)
            logger.debug('first_datagram: %s, %s' % (
                    first_datagram,
                    (DATA, tc.CLIENT_ADDR)))
            assert first_datagram, (DATA, tc.CLIENT_ADDR)

    def test_block_flood(self):
        from floodbarrier import MAX_PACKETS_PER_PERIOD as FLOOD_LIMIT

        for _ in xrange(FLOOD_LIMIT):
            self.client_r.sendto(DATA, tc.SERVER_ADDR)
        for _ in xrange(10):
            self.client_r.sendto(DATA, tc.SERVER_ADDR)
            logger.warning(
                "TESTING LOGS ** IGNORE EXPECTED WARNING **")
            time.sleep(tc.TASK_INTERVAL)
        return
######################################
        with self.lock:
            logger.debug('datagram processed: %d/%d' % (
                              len(self.datagrams_received),
                              FLOOD_LIMIT))
            print len(self.datagrams_received)
            assert len(self.datagrams_received) <= FLOOD_LIMIT

    def test_call_later(self):
        self.client_r.call_later(.13, self.callback_f, 1)
        self.client_r.call_later(.11, self.callback_f, 2)
        self.client_r.call_later(.01, self.callback_f, 3)
        task4 = self.client_r.call_later(.01, self.callback_f, 4)
        task4.cancel()
        time.sleep(.03)
        with self.lock:
            logger.debug('callback_order: %s' % self.callback_order)
            eq_(self.callback_order, [3])
            self.callback_order = []
        self.client_r.call_now(self.callback_f, 5)
        time.sleep(.03)
        with self.lock:
            logger.debug('callback_order: %s' % self.callback_order)
            eq_(self.callback_order, [5])
            self.callback_order = []
        task6 = self.client_r.call_later(.03, self.callback_f, 6)
        task6.cancel()
        time.sleep(.1)
        with self.lock:
            logger.debug('callback_order: %s' % self.callback_order)
            eq_(self.callback_order, [2, 1])

    def test_network_and_delayed(self):
        self.client_r.call_later(.2, self.callback_f, 0)
        self.client_r.call_now(self.callback_f, 1)
        task2 = self.client_r.call_later(.2, self.callback_f, 2)
        with self.lock:
            eq_(self.callback_order, [])
        time.sleep(.1)

        with self.lock:
            logger.debug('callback_order: %s' % self.callback_order)
            assert self.callback_order == [1]
            self.callback_order = []
            assert not self.datagrams_received
        self.server_r.sendto(DATA, tc.CLIENT_ADDR)
        time.sleep(.02) # wait for network interruption
        with self.lock:
            logger.debug('callback_order: %s' % self.callback_order)
            assert self.callback_order == []
            logger.debug('callback_order: %s' % self.callback_order)
            assert self.datagrams_received.pop(0) == (DATA, tc.SERVER_ADDR)
            task2.cancel() #inside critical region??
        time.sleep(.1) # wait for task 0 (task 2 should be cancelled)
        with self.lock:
            assert self.callback_order == [0]
            assert not self.datagrams_received

    def test_sendto_socket_error(self): 
        logger.critical('TESTING: IGNORE CRITICAL MESSAGE')
        self.client_r.sendto('z', (tc.NO_ADDR[0], 0))

    def teardown(self):
        self.client_r.stop()
        self.server_r.stop()

    def on_datagram_received(self, data, addr):
        with self.lock:
            self.datagrams_received.append((data, addr))

    def callback_f(self, callback_id):
        with self.lock:
            self.callback_order.append(callback_id)
Example #37
0
 def setup(self):
     self.main_loop_send_called = False
     self.callback_fired = False
     self.r = ThreadedReactor(self._main_loop_send, tc.CLIENT_PORT,
                              self._on_datagram_received)
     self.r.s = _SocketErrorMock()
Example #38
0
    def test_mock_threaded_reactor(self):
        '''
        Just making sure that the interface is the same

        '''
        r = ThreadedReactor(task_interval=.1)
        rm = ThreadedReactorMock(task_interval=.1)

        r.listen_udp(tc.CLIENT_ADDR[1], lambda x,y:None)
        rm.listen_udp(tc.CLIENT_ADDR[1], lambda x,y:None)

        r.start()
        rm.start()

        r.sendto(DATA, tc.CLIENT_ADDR)
        rm.sendto(DATA, tc.CLIENT_ADDR)
        
        r.call_later(.1, self._callback)
        rm.call_later(.1, self._callback)
#        time.sleep(.002)
        r.stop()
        rm.stop()
Example #39
0
class Controller:

    def __init__(self, dht_addr, state_path,
                 routing_m_mod, lookup_m_mod,
                 private_dht_name):
        #TODO: don't do this evil stuff!!!
        message.private_dht_name = private_dht_name
        
        self.state_filename = os.path.join(state_path, STATE_FILENAME)
        self.load_state()
        if not self._my_id:
            self._my_id = identifier.RandomId()
        self._my_node = Node(dht_addr, self._my_id)
        self._tracker = tracker.Tracker()
        self._token_m = token_manager.TokenManager()

        self._reactor = ThreadedReactor()
        self._reactor.listen_udp(self._my_node.addr[1],
                                 self._on_datagram_received)
        #self._rpc_m = RPCManager(self._reactor)
        self._querier = Querier(self._my_id)
        bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES
        del self.loaded_nodes
        self._routing_m = routing_m_mod.RoutingManager(self._my_node, 
                                                       bootstrap_nodes)
#        self._responder = Responder(self._my_id, self._routing_m,
#                                    self._tracker, self._token_m)

        self._lookup_m = lookup_m_mod.LookupManager(self._my_id)
        current_time = time.time()
        self._next_maintenance_ts = current_time
        self._next_save_state_ts = current_time + SAVE_STATE_DELAY
        
        self._running = False
        

    def start(self):
        assert not self._running
        self._running = True
        self._reactor.start()
        self._main_loop()

    def stop(self):
        assert self._running
        #TODO2: stop each manager
        self._reactor.stop()

    def save_state(self):
        rnodes = self._routing_m.get_main_rnodes()
        f = open(self.state_filename, 'w')
        f.write('%r\n' % self._my_id)
        for rnode in rnodes:
            f.write('%d\t%r\t%s\t%d\t%f\n' % (
                    self._my_id.log_distance(rnode.id),
                    rnode.id, rnode.addr[0], rnode.addr[1],
                    rnode.rtt * 1000))
        f.close()

    def load_state(self):
        self._my_id = None
        self.loaded_nodes = []
        try:
            f = open(self.state_filename)
        except(IOError):
            return
        # the first line contains this node's identifier
        hex_id = f.readline().strip()
        self._my_id = Id(hex_id)
        # the rest of the lines contain routing table nodes
        # FORMAT
        # log_distance hex_id ip port rtt
        for line in f:
            _, hex_id, ip, port, _ = line.split()
            addr = (ip, int(port))
            node_ = Node(addr, Id(hex_id))
            self.loaded_nodes.append(node_)
        f.close
        
    def get_peers(self, lookup_id, info_hash, callback_f, bt_port=0):
        assert self._running
        # look if I'm tracking this info_hash
        local_peers = self._tracker.get(info_hash)
        # do the lookup
        log_distance = info_hash.log_distance(self._my_id)
        bootstrap_rnodes = self._routing_m.get_closest_rnodes(log_distance,
                                                              None,
                                                              True)
        lookup_obj = self._lookup_m.get_peers(lookup_id, info_hash,
                                              callback_f, bt_port)
        lookup_queries_to_send = lookup_obj.start(bootstrap_rnodes)
        self._send_queries(lookup_queries_to_send)
        if not lookup_queries_to_send:
            # There are no nodes in my routing table, announce to myself
            self._announce(lookup_obj)
            # NOTICE: the callback is NOT triggered, zero is returned.
        return len(lookup_queries_to_send), local_peers
        
    def print_routing_table_stats(self):
        self._routing_m.print_stats()

    def _main_loop(self):
        current_time = time.time()
        # Routing table
        if current_time > self._next_maintenance_ts:
            (maintenance_delay,
             queries_to_send,
             maintenance_lookup_target) = self._routing_m.do_maintenance()
            self._send_queries(queries_to_send)
            if maintenance_lookup_target:
                log_distance = maintenance_lookup_target.log_distance(
                    self._my_id)
                bootstrap_nodes = self._routing_m.get_closest_rnodes(
                    log_distance, None, True)
                lookup_obj = self._lookup_m.maintenance_lookup(
                    maintenance_lookup_target)
                lookup_queries_to_send = lookup_obj.start(bootstrap_nodes)
                self._send_queries(lookup_queries_to_send)
            self._next_maintenance_ts = (current_time
                                         + maintenance_delay)
        # Auto-save routing table
        if current_time > self._next_save_state_ts:
            self.save_state()
            self._next_save_state_ts = current_time + SAVE_STATE_DELAY

        # Schedule next call
        delay = (min(self._next_maintenance_ts, self._next_save_state_ts)
                 - current_time)
        self._reactor.call_later(delay, self._main_loop)

    def _maintenance_lookup(self, target):
        self._lookup_m.maintenance_lookup(target)

    def _on_datagram_received(self, data, addr):
        try:
            msg = message.IncomingMsg(data, addr)
        except(message.MsgError):
            return # ignore message
        if msg.sender_id == self._my_id:
            logger.debug('Got a msg from myself:\n%r', msg)
            return
        
        if msg.type == message.QUERY:
            response_msg = self._get_response(msg)
            if response_msg:
                bencoded_response = response_msg.encode(msg.tid)
                self._reactor.sendto(bencoded_response, addr)

            maintenance_queries_to_send = self._routing_m.on_query_received(
                msg.sender_node)
            
        elif msg.type in (message.RESPONSE, message.ERROR):
            related_query = self._querier.on_response_received(msg, addr)
            if not related_query:
                # Query timed out or unrequested response
                return
            # lookup related tasks
            if related_query.lookup_obj:
                if msg.type == message.RESPONSE:
                    (lookup_queries_to_send,
                     peers,
                     num_parallel_queries,
                     lookup_done
                     ) = related_query.lookup_obj.on_response_received(
                        msg, msg.sender_node)
                else: #ERROR
                    peers = None # an error msg doesn't have peers
                    (lookup_queries_to_send,
                     num_parallel_queries,
                     lookup_done
                     ) = related_query.lookup_obj.on_error_received(
                        msg, msg.sender_node)
                self._send_queries(lookup_queries_to_send)
                
                if related_query.lookup_obj.callback_f:
                    
                    lookup_id = related_query.lookup_obj.lookup_id
                    if peers:
                        related_query.lookup_obj.callback_f(lookup_id, peers)
                    if lookup_done:
                        self._announce(related_query.lookup_obj)
                        related_query.lookup_obj.callback_f(lookup_id, None)
            # maintenance related tasks
            if msg.type == message.RESPONSE:
                maintenance_queries_to_send = \
                    self._routing_m.on_response_received(
                    msg.sender_node, related_query.rtt, msg.all_nodes)
            else:
                maintenance_queries_to_send = \
                    self._routing_m.on_error_received(
                    msg.sender_node)
        else: # unknown type
            return
        self._send_queries(maintenance_queries_to_send)

    def _get_response(self, msg):
        if msg.query == message.PING:
            return message.OutgoingPingResponse(self._my_id)
        elif msg.query == message.FIND_NODE:
            log_distance = msg.target.log_distance(self._my_id)
            rnodes = self._routing_m.get_closest_rnodes(log_distance,
                                                       NUM_NODES, False)
            return message.OutgoingFindNodeResponse(self._my_id,
                                                    rnodes)
        elif msg.query == message.GET_PEERS:
            token = self._token_m.get()
            log_distance = msg.info_hash.log_distance(self._my_id)
            rnodes = self._routing_m.get_closest_rnodes(log_distance,
                                                       NUM_NODES, False)
            peers = self._tracker.get(msg.info_hash)
            if peers:
                logger.debug('RESPONDING with PEERS:\n%r' % peers)
            return message.OutgoingGetPeersResponse(self._my_id,
                                                    token,
                                                    nodes=rnodes,
                                                    peers=peers)
        elif msg.query == message.ANNOUNCE_PEER:
            peer_addr = (msg.sender_addr[0], msg.bt_port)
            self._tracker.put(msg.info_hash, peer_addr)
            return message.OutgoingAnnouncePeerResponse(self._my_id)
        else:
            logger.debug('Invalid QUERY: %r' % (msg.query))
            #TODO: maybe send an error back?
        
    def _on_response_received(self, msg):
        pass

    def _on_timeout(self, addr):
        related_query = self._querier.on_timeout(addr)
        if not related_query:
            return # timeout cancelled (got response/error already)
        if related_query.lookup_obj:
            (lookup_queries_to_send,
             num_parallel_queries,
             lookup_done
             ) = related_query.lookup_obj.on_timeout(related_query.dstnode)
            self._send_queries(lookup_queries_to_send)
            if lookup_done and related_query.lookup_obj.callback_f:
                self._announce(related_query.lookup_obj)
                lookup_id = related_query.lookup_obj.lookup_id
                related_query.lookup_obj.callback_f(lookup_id, None)
        maintenance_queries_to_send = self._routing_m.on_timeout(
            related_query.dstnode)
        self._send_queries(maintenance_queries_to_send)

    def _announce(self, lookup_obj):
        queries_to_send, announce_to_myself = lookup_obj.announce()
        self._send_queries(queries_to_send)
        '''
        if announce_to_myself:
            self._tracker.put(lookup_obj._info_hash,
                              (self._my_node.addr[0], lookup_obj._bt_port))
        '''
        
    def _send_queries(self, queries_to_send, lookup_obj=None):
        if queries_to_send is None:
            return
        for query in queries_to_send:
            timeout_task = self._reactor.call_later(TIMEOUT_DELAY,
                                                    self._on_timeout,
                                                    query.dstnode.addr)
            bencoded_query = self._querier.register_query(query, timeout_task)
            self._reactor.sendto(bencoded_query, query.dstnode.addr)
Example #40
0
    def test_mock_threaded_reactor(self):
        '''
        Just making sure that the interface is the same

        '''
        r = ThreadedReactor(task_interval=.1)
        rm = ThreadedReactorMock(task_interval=.1)

        r.listen_udp(tc.CLIENT_ADDR[1], lambda x, y: None)
        rm.listen_udp(tc.CLIENT_ADDR[1], lambda x, y: None)

        r.start()
        rm.start()

        r.sendto(DATA, tc.CLIENT_ADDR)
        rm.sendto(DATA, tc.CLIENT_ADDR)

        r.call_later(.1, self._callback)
        rm.call_later(.1, self._callback)
        #        time.sleep(.002)
        r.stop()
        rm.stop()
Example #41
0
class TestSend:
    
    def _main_loop(self):
        return time.time() + MAIN_LOOP_DELAY, [DATAGRAM1]

    def _callback(self, value):
        self.callback_values.append(value)
        return [DATAGRAM2]

    def _on_datagram_received(self, datagram):
        self.datagrams_received.append(datagram)
        return time.time() + MAIN_LOOP_DELAY, [DATAGRAM3]

    def _crashing_callback(self):
        raise CrashError, 'Crash testing'

    def setup(self):
        self.main_loop_call_counter = 0
        self.callback_values = []
        self.datagrams_received = []
        
        self.reactor = ThreadedReactor(self._main_loop,
                                       tc.CLIENT_PORT,
                                       self._on_datagram_received,
                                       task_interval=tc.TASK_INTERVAL)
        self.reactor.s = _SocketMock()
        
    def test_main_loop_send_data(self):
        eq_(self.reactor.s.get_datagrams_sent(), [])
        self.reactor.run_one_step()
        # main_loop sends DATAGRAM1
        eq_(self.reactor.s.get_datagrams_sent(), [DATAGRAM1])
    
    def test_call_asap_send_data(self):
        self.reactor.run_one_step()
        eq_(self.reactor.s.get_datagrams_sent(), [DATAGRAM1])
        self.reactor.call_asap(self._callback, 1)
        self.reactor.run_one_step()
        eq_(self.reactor.s.get_datagrams_sent(), [DATAGRAM1, DATAGRAM2])
        
    def test_on_datagram_received_send_data(self): 
        self.reactor.run_one_step()
        eq_(self.reactor.s.get_datagrams_sent(), [DATAGRAM1])
        self.reactor.s.put_datagram_received(Datagram(DATA1, tc.SERVER_ADDR))
        self.reactor.run_one_step()
        eq_(self.reactor.s.get_datagrams_sent(), [DATAGRAM1, DATAGRAM3])
        
    def test_capture(self):
        self.reactor.start_capture()
        ts1 = time.time()
        time.sleep(tc.TASK_INTERVAL/2)
        # out > DATAGRAM1 (main_loop)
        self.reactor.run_one_step()
        ts2 = time.time()
        incoming_datagram = Datagram(DATA1, tc.SERVER_ADDR)
        self.reactor.s.put_datagram_received(incoming_datagram)
        time.sleep(tc.TASK_INTERVAL/2)
        self.reactor.run_one_step() 
        # in < incoming_datagram (socket)
        # out > DATAGRAM3 (on_datagram_received)
        captured_msgs = self.reactor.stop_and_get_capture()

        eq_(len(captured_msgs), 3)
        for msg in  captured_msgs:
            print msg
        assert ts1 < captured_msgs[0][0] < ts2
        eq_(captured_msgs[0][1], tc.SERVER_ADDR)
        eq_(captured_msgs[0][2], True) #outgoing
        eq_(captured_msgs[0][3], DATA1)
        assert captured_msgs[1][0] > ts2
        eq_(captured_msgs[1][1], DATAGRAM1.addr)
        eq_(captured_msgs[1][2], False) #incoming
        eq_(captured_msgs[1][3], DATAGRAM1.data)
        assert captured_msgs[2][0] > captured_msgs[1][0]
        eq_(captured_msgs[2][1], DATAGRAM3.addr)
        eq_(captured_msgs[2][2], True) #outgoing
        eq_(captured_msgs[2][3], DATAGRAM3.data)
        
    def teardown(self):

        return