def test_mock_threaded_reactor(self): ''' Just making sure that the interface is the same ''' r = ThreadedReactor(task_interval=.1) rm = ThreadedReactorMock(task_interval=.1) r.listen_udp(tc.CLIENT_ADDR[1], lambda x, y: None) rm.listen_udp(tc.CLIENT_ADDR[1], lambda x, y: None) r.start() rm.start() r.sendto(DATA, tc.CLIENT_ADDR) rm.sendto(DATA, tc.CLIENT_ADDR) r.call_later(.1, self._callback) rm.call_later(.1, self._callback) # time.sleep(.002) r.stop() rm.stop()
def test_mock_threaded_reactor(self): ''' Just making sure that the interface is the same ''' r = ThreadedReactor(task_interval=.1) rm = ThreadedReactorMock(task_interval=.1) r.listen_udp(tc.CLIENT_ADDR[1], lambda x,y:None) rm.listen_udp(tc.CLIENT_ADDR[1], lambda x,y:None) r.start() rm.start() r.sendto(DATA, tc.CLIENT_ADDR) rm.sendto(DATA, tc.CLIENT_ADDR) r.call_later(.1, self._callback) rm.call_later(.1, self._callback) # time.sleep(.002) r.stop() rm.stop()
class TestMinitwisted: def setup(self): global time #TODO: mock time and socket #time = minitwisted.time = MockTime() #minitwisted.socket = MockSocket() self.lock = threading.Lock() self.datagrams_received = [] self.callback_order = [] self.client_r = ThreadedReactor(task_interval=tc.TASK_INTERVAL) self.server_r = ThreadedReactor(task_interval=tc.TASK_INTERVAL) self.client_s = self.client_r.listen_udp(tc.CLIENT_ADDR[1], self.on_datagram_received) self.server_s = self.server_r.listen_udp(tc.SERVER_ADDR[1], self.on_datagram_received) self.client_r.start() self.server_r.start() def test_listen_upd(self): r = ThreadedReactor() r.start() logger.warning(''.join(('TESTING LOGS ** IGNORE EXPECTED WARNING ** ', '(udp_listen has not been called)'))) self.client_r.sendto(DATA, tc.SERVER_ADDR) while 1: #waiting for data with self.lock: if self.datagrams_received: break time.sleep(tc.TASK_INTERVAL) with self.lock: first_datagram = self.datagrams_received.pop(0) logger.debug('first_datagram: %s, %s' % (first_datagram, (DATA, tc.CLIENT_ADDR))) assert first_datagram, (DATA, tc.CLIENT_ADDR) r.stop() def test_network_callback(self): self.client_r.sendto(DATA, tc.SERVER_ADDR) time.sleep(tc.TASK_INTERVAL) with self.lock: first_datagram = self.datagrams_received.pop(0) logger.debug('first_datagram: %s, %s' % (first_datagram, (DATA, tc.CLIENT_ADDR))) assert first_datagram, (DATA, tc.CLIENT_ADDR) def test_block_flood(self): from floodbarrier import MAX_PACKETS_PER_PERIOD as FLOOD_LIMIT for _ in xrange(FLOOD_LIMIT): self.client_r.sendto(DATA, tc.SERVER_ADDR) for _ in xrange(10): self.client_r.sendto(DATA, tc.SERVER_ADDR) logger.warning("TESTING LOGS ** IGNORE EXPECTED WARNING **") time.sleep(tc.TASK_INTERVAL) return ###################################### with self.lock: logger.debug('datagram processed: %d/%d' % (len(self.datagrams_received), FLOOD_LIMIT)) print len(self.datagrams_received) assert len(self.datagrams_received) <= FLOOD_LIMIT def test_call_later(self): self.client_r.call_later(.13, self.callback_f, 1) self.client_r.call_later(.11, self.callback_f, 2) self.client_r.call_later(.01, self.callback_f, 3) task4 = self.client_r.call_later(.01, self.callback_f, 4) task4.cancel() time.sleep(.03) with self.lock: logger.debug('callback_order: %s' % self.callback_order) eq_(self.callback_order, [3]) self.callback_order = [] self.client_r.call_now(self.callback_f, 5) time.sleep(.03) with self.lock: logger.debug('callback_order: %s' % self.callback_order) eq_(self.callback_order, [5]) self.callback_order = [] task6 = self.client_r.call_later(.03, self.callback_f, 6) task6.cancel() time.sleep(.1) with self.lock: logger.debug('callback_order: %s' % self.callback_order) eq_(self.callback_order, [2, 1]) def test_network_and_delayed(self): self.client_r.call_later(.2, self.callback_f, 0) self.client_r.call_now(self.callback_f, 1) task2 = self.client_r.call_later(.2, self.callback_f, 2) with self.lock: eq_(self.callback_order, []) time.sleep(.1) with self.lock: logger.debug('callback_order: %s' % self.callback_order) assert self.callback_order == [1] self.callback_order = [] assert not self.datagrams_received self.server_r.sendto(DATA, tc.CLIENT_ADDR) time.sleep(.02) # wait for network interruption with self.lock: logger.debug('callback_order: %s' % self.callback_order) assert self.callback_order == [] logger.debug('callback_order: %s' % self.callback_order) assert self.datagrams_received.pop(0) == (DATA, tc.SERVER_ADDR) task2.cancel() #inside critical region?? time.sleep(.1) # wait for task 0 (task 2 should be cancelled) with self.lock: assert self.callback_order == [0] assert not self.datagrams_received def test_sendto_socket_error(self): logger.critical('TESTING: IGNORE CRITICAL MESSAGE') self.client_r.sendto('z', (tc.NO_ADDR[0], 0)) def teardown(self): self.client_r.stop() self.server_r.stop() def on_datagram_received(self, data, addr): with self.lock: self.datagrams_received.append((data, addr)) def callback_f(self, callback_id): with self.lock: self.callback_order.append(callback_id)
class Controller: def __init__(self, dht_addr, state_path, routing_m_mod, lookup_m_mod, private_dht_name): #TODO: don't do this evil stuff!!! message.private_dht_name = private_dht_name self.state_filename = os.path.join(state_path, STATE_FILENAME) self.load_state() if not self._my_id: self._my_id = identifier.RandomId() self._my_node = Node(dht_addr, self._my_id) self._tracker = tracker.Tracker() self._token_m = token_manager.TokenManager() self._reactor = ThreadedReactor() self._reactor.listen_udp(self._my_node.addr[1], self._on_datagram_received) #self._rpc_m = RPCManager(self._reactor) self._querier = Querier(self._my_id) bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES del self.loaded_nodes self._routing_m = routing_m_mod.RoutingManager(self._my_node, bootstrap_nodes) # self._responder = Responder(self._my_id, self._routing_m, # self._tracker, self._token_m) self._lookup_m = lookup_m_mod.LookupManager(self._my_id) current_time = time.time() self._next_maintenance_ts = current_time self._next_save_state_ts = current_time + SAVE_STATE_DELAY self._running = False def start(self): assert not self._running self._running = True self._reactor.start() self._main_loop() def stop(self): assert self._running #TODO2: stop each manager self._reactor.stop() def save_state(self): rnodes = self._routing_m.get_main_rnodes() f = open(self.state_filename, 'w') f.write('%r\n' % self._my_id) for rnode in rnodes: f.write('%d\t%r\t%s\t%d\t%f\n' % ( self._my_id.log_distance(rnode.id), rnode.id, rnode.addr[0], rnode.addr[1], rnode.rtt * 1000)) f.close() def load_state(self): self._my_id = None self.loaded_nodes = [] try: f = open(self.state_filename) except(IOError): return # the first line contains this node's identifier hex_id = f.readline().strip() self._my_id = Id(hex_id) # the rest of the lines contain routing table nodes # FORMAT # log_distance hex_id ip port rtt for line in f: _, hex_id, ip, port, _ = line.split() addr = (ip, int(port)) node_ = Node(addr, Id(hex_id)) self.loaded_nodes.append(node_) f.close def get_peers(self, lookup_id, info_hash, callback_f, bt_port=0): assert self._running # look if I'm tracking this info_hash local_peers = self._tracker.get(info_hash) # do the lookup log_distance = info_hash.log_distance(self._my_id) bootstrap_rnodes = self._routing_m.get_closest_rnodes(log_distance, None, True) lookup_obj = self._lookup_m.get_peers(lookup_id, info_hash, callback_f, bt_port) lookup_queries_to_send = lookup_obj.start(bootstrap_rnodes) self._send_queries(lookup_queries_to_send) if not lookup_queries_to_send: # There are no nodes in my routing table, announce to myself self._announce(lookup_obj) # NOTICE: the callback is NOT triggered, zero is returned. return len(lookup_queries_to_send), local_peers def print_routing_table_stats(self): self._routing_m.print_stats() def _main_loop(self): current_time = time.time() # Routing table if current_time > self._next_maintenance_ts: (maintenance_delay, queries_to_send, maintenance_lookup_target) = self._routing_m.do_maintenance() self._send_queries(queries_to_send) if maintenance_lookup_target: log_distance = maintenance_lookup_target.log_distance( self._my_id) bootstrap_nodes = self._routing_m.get_closest_rnodes( log_distance, None, True) lookup_obj = self._lookup_m.maintenance_lookup( maintenance_lookup_target) lookup_queries_to_send = lookup_obj.start(bootstrap_nodes) self._send_queries(lookup_queries_to_send) self._next_maintenance_ts = (current_time + maintenance_delay) # Auto-save routing table if current_time > self._next_save_state_ts: self.save_state() self._next_save_state_ts = current_time + SAVE_STATE_DELAY # Schedule next call delay = (min(self._next_maintenance_ts, self._next_save_state_ts) - current_time) self._reactor.call_later(delay, self._main_loop) def _maintenance_lookup(self, target): self._lookup_m.maintenance_lookup(target) def _on_datagram_received(self, data, addr): try: msg = message.IncomingMsg(data, addr) except(message.MsgError): return # ignore message if msg.sender_id == self._my_id: logger.debug('Got a msg from myself:\n%r', msg) return if msg.type == message.QUERY: response_msg = self._get_response(msg) if response_msg: bencoded_response = response_msg.encode(msg.tid) self._reactor.sendto(bencoded_response, addr) maintenance_queries_to_send = self._routing_m.on_query_received( msg.sender_node) elif msg.type in (message.RESPONSE, message.ERROR): related_query = self._querier.on_response_received(msg, addr) if not related_query: # Query timed out or unrequested response return # lookup related tasks if related_query.lookup_obj: if msg.type == message.RESPONSE: (lookup_queries_to_send, peers, num_parallel_queries, lookup_done ) = related_query.lookup_obj.on_response_received( msg, msg.sender_node) else: #ERROR peers = None # an error msg doesn't have peers (lookup_queries_to_send, num_parallel_queries, lookup_done ) = related_query.lookup_obj.on_error_received( msg, msg.sender_node) self._send_queries(lookup_queries_to_send) if related_query.lookup_obj.callback_f: lookup_id = related_query.lookup_obj.lookup_id if peers: related_query.lookup_obj.callback_f(lookup_id, peers) if lookup_done: self._announce(related_query.lookup_obj) related_query.lookup_obj.callback_f(lookup_id, None) # maintenance related tasks if msg.type == message.RESPONSE: maintenance_queries_to_send = \ self._routing_m.on_response_received( msg.sender_node, related_query.rtt, msg.all_nodes) else: maintenance_queries_to_send = \ self._routing_m.on_error_received( msg.sender_node) else: # unknown type return self._send_queries(maintenance_queries_to_send) def _get_response(self, msg): if msg.query == message.PING: return message.OutgoingPingResponse(self._my_id) elif msg.query == message.FIND_NODE: log_distance = msg.target.log_distance(self._my_id) rnodes = self._routing_m.get_closest_rnodes(log_distance, NUM_NODES, False) return message.OutgoingFindNodeResponse(self._my_id, rnodes) elif msg.query == message.GET_PEERS: token = self._token_m.get() log_distance = msg.info_hash.log_distance(self._my_id) rnodes = self._routing_m.get_closest_rnodes(log_distance, NUM_NODES, False) peers = self._tracker.get(msg.info_hash) if peers: logger.debug('RESPONDING with PEERS:\n%r' % peers) return message.OutgoingGetPeersResponse(self._my_id, token, nodes=rnodes, peers=peers) elif msg.query == message.ANNOUNCE_PEER: peer_addr = (msg.sender_addr[0], msg.bt_port) self._tracker.put(msg.info_hash, peer_addr) return message.OutgoingAnnouncePeerResponse(self._my_id) else: logger.debug('Invalid QUERY: %r' % (msg.query)) #TODO: maybe send an error back? def _on_response_received(self, msg): pass def _on_timeout(self, addr): related_query = self._querier.on_timeout(addr) if not related_query: return # timeout cancelled (got response/error already) if related_query.lookup_obj: (lookup_queries_to_send, num_parallel_queries, lookup_done ) = related_query.lookup_obj.on_timeout(related_query.dstnode) self._send_queries(lookup_queries_to_send) if lookup_done and related_query.lookup_obj.callback_f: self._announce(related_query.lookup_obj) lookup_id = related_query.lookup_obj.lookup_id related_query.lookup_obj.callback_f(lookup_id, None) maintenance_queries_to_send = self._routing_m.on_timeout( related_query.dstnode) self._send_queries(maintenance_queries_to_send) def _announce(self, lookup_obj): queries_to_send, announce_to_myself = lookup_obj.announce() self._send_queries(queries_to_send) ''' if announce_to_myself: self._tracker.put(lookup_obj._info_hash, (self._my_node.addr[0], lookup_obj._bt_port)) ''' def _send_queries(self, queries_to_send, lookup_obj=None): if queries_to_send is None: return for query in queries_to_send: timeout_task = self._reactor.call_later(TIMEOUT_DELAY, self._on_timeout, query.dstnode.addr) bencoded_query = self._querier.register_query(query, timeout_task) self._reactor.sendto(bencoded_query, query.dstnode.addr)
class Controller: def __init__(self, dht_addr, state_path, routing_m_mod, lookup_m_mod): self.state_filename = os.path.join(state_path, STATE_FILENAME) self.load_state() if not self._my_id: self._my_id = identifier.RandomId() self._my_node = Node(dht_addr, self._my_id) self._tracker = tracker.Tracker() self._token_m = token_manager.TokenManager() self._reactor = ThreadedReactor() self._reactor.listen_udp(self._my_node.addr[1], self._on_datagram_received) #self._rpc_m = RPCManager(self._reactor) self._querier = Querier(self._my_id) bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES del self.loaded_nodes self._routing_m = routing_m_mod.RoutingManager(self._my_node, bootstrap_nodes) # self._responder = Responder(self._my_id, self._routing_m, # self._tracker, self._token_m) self._lookup_m = lookup_m_mod.LookupManager(self._my_id) current_time = time.time() self._next_maintenance_ts = current_time self._next_save_state_ts = current_time + SAVE_STATE_DELAY self._running = False def start(self): assert not self._running self._running = True self._reactor.start() self._main_loop() def stop(self): assert self._running #TODO2: stop each manager self._reactor.stop() def save_state(self): rnodes = self._routing_m.get_main_rnodes() f = open(self.state_filename, 'w') f.write('%r\n' % self._my_id) for rnode in rnodes: f.write('%d\t%r\t%s\t%d\t%f\n' % ( self._my_id.log_distance(rnode.id), rnode.id, rnode.addr[0], rnode.addr[1], rnode.rtt * 1000)) f.close() def load_state(self): self._my_id = None self.loaded_nodes = [] try: f = open(self.state_filename) except(IOError): return # the first line contains this node's identifier hex_id = f.readline().strip() self._my_id = Id(hex_id) # the rest of the lines contain routing table nodes # FORMAT # log_distance hex_id ip port rtt for line in f: _, hex_id, ip, port, _ = line.split() addr = (ip, int(port)) node_ = Node(addr, Id(hex_id)) self.loaded_nodes.append(node_) f.close def get_peers(self, lookup_id, info_hash, callback_f, bt_port=0): assert self._running log_distance = info_hash.log_distance(self._my_id) bootstrap_rnodes = self._routing_m.get_closest_rnodes(log_distance, None, True) lookup_obj = self._lookup_m.get_peers(info_hash, callback_f, bt_port) #TODO: propagate lookup_id to the lookup plugin lookup_obj.lookup_id = lookup_id ################################################ lookup_queries_to_send = lookup_obj.start(bootstrap_rnodes) self._send_queries(lookup_queries_to_send) return len(lookup_queries_to_send) def print_routing_table_stats(self): self._routing_m.print_stats() def _main_loop(self): current_time = time.time() # Routing table if current_time > self._next_maintenance_ts: (maintenance_delay, queries_to_send, maintenance_lookup_target) = self._routing_m.do_maintenance() self._send_queries(queries_to_send) if maintenance_lookup_target: log_distance = maintenance_lookup_target.log_distance( self._my_id) bootstrap_nodes = self._routing_m.get_closest_rnodes( log_distance, None, True) lookup_obj = self._lookup_m.maintenance_lookup( maintenance_lookup_target) lookup_queries_to_send = lookup_obj.start(bootstrap_nodes) self._send_queries(lookup_queries_to_send) self._next_maintenance_ts = (current_time + maintenance_delay) # Auto-save routing table if current_time > self._next_save_state_ts: self.save_state() self._next_save_state_ts = current_time + SAVE_STATE_DELAY # Schedule next call delay = (min(self._next_maintenance_ts, self._next_save_state_ts) - current_time) self._reactor.call_later(delay, self._main_loop) def _maintenance_lookup(self, target): self._lookup_m.maintenance_lookup(target) def _on_datagram_received(self, data, addr): try: msg = message.IncomingMsg(data, addr) except(message.MsgError): return # ignore message if msg.type == message.QUERY: response_msg = self._get_response(msg) if response_msg: bencoded_response = response_msg.encode(msg.tid) self._reactor.sendto(bencoded_response, addr) maintenance_queries_to_send = self._routing_m.on_query_received( msg.sender_node) elif msg.type in (message.RESPONSE, message.ERROR): related_query = self._querier.on_response_received(msg, addr) if not related_query: # Query timed out or unrequested response return # lookup related tasks if related_query.lookup_obj: if msg.type == message.RESPONSE: (lookup_queries_to_send, peers, num_parallel_queries, lookup_done ) = related_query.lookup_obj.on_response_received( msg, msg.sender_node) else: #ERROR peers = None # an error msg doesn't have peers (lookup_queries_to_send, num_parallel_queries, lookup_done ) = related_query.lookup_obj.on_error_received( msg, msg.sender_node) self._send_queries(lookup_queries_to_send) if related_query.lookup_obj.callback_f: lookup_id = related_query.lookup_obj.lookup_id if peers: related_query.lookup_obj.callback_f(lookup_id, peers) if lookup_done: related_query.lookup_obj.callback_f(lookup_id, None) # maintenance related tasks if msg.type == message.RESPONSE: maintenance_queries_to_send = \ self._routing_m.on_response_received( msg.sender_node, related_query.rtt, msg.all_nodes) else: maintenance_queries_to_send = \ self._routing_m.on_error_received( msg.sender_node) else: # unknown type return self._send_queries(maintenance_queries_to_send) def _get_response(self, msg): if msg.query == message.PING: return message.OutgoingPingResponse(self._my_id) elif msg.query == message.FIND_NODE: log_distance = msg.target.log_distance(self._my_id) rnodes = self._routing_m.get_closest_rnodes(log_distance, NUM_NODES, False) return message.OutgoingFindNodeResponse(self._my_id, rnodes) elif msg.query == message.GET_PEERS: token = self._token_m.get() log_distance = msg.info_hash.log_distance(self._my_id) rnodes = self._routing_m.get_closest_rnodes(log_distance, NUM_NODES, False) peers = self._tracker.get(msg.info_hash) if peers: logger.debug('RESPONDING with PEERS:\n%r' % peers) return message.OutgoingGetPeersResponse(self._my_id, token, nodes=rnodes, peers=peers) elif msg.query == message.ANNOUNCE_PEER: peer_addr = (msg.sender_addr[0], msg.bt_port) self._tracker.put(msg.info_hash, peer_addr) return message.OutgoingAnnouncePeerResponse(self._my_id) else: logger.debug('Invalid QUERY: %r' % (msg.query)) #TODO: maybe send an error back? def _on_response_received(self, msg): pass def _on_timeout(self, addr): related_query = self._querier.on_timeout(addr) if not related_query: return # timeout cancelled (got response/error already) if related_query.lookup_obj: (lookup_queries_to_send, num_parallel_queries, lookup_done ) = related_query.lookup_obj.on_timeout(related_query.dstnode) self._send_queries(lookup_queries_to_send) if lookup_done and related_query.lookup_obj.callback_f: lookup_id = related_query.lookup_obj.lookup_id related_query.lookup_obj.callback_f(lookup_id, None) maintenance_queries_to_send = self._routing_m.on_timeout( related_query.dstnode) self._send_queries(maintenance_queries_to_send) def _send_queries(self, queries_to_send, lookup_obj=None): if queries_to_send is None: return for query in queries_to_send: timeout_task = self._reactor.call_later(TIMEOUT_DELAY, self._on_timeout, query.dstnode.addr) bencoded_query = self._querier.register_query(query, timeout_task) self._reactor.sendto(bencoded_query, query.dstnode.addr)
class TestMinitwisted: def setup(self): global time #TODO: mock time and socket #time = minitwisted.time = MockTime() #minitwisted.socket = MockSocket() self.lock = threading.Lock() self.datagrams_received = [] self.callback_order = [] self.client_r = ThreadedReactor(task_interval=tc.TASK_INTERVAL) self.server_r = ThreadedReactor(task_interval=tc.TASK_INTERVAL) self.client_s = self.client_r.listen_udp(tc.CLIENT_ADDR[1], self.on_datagram_received) self.server_s = self.server_r.listen_udp(tc.SERVER_ADDR[1], self.on_datagram_received) self.client_r.start() self.server_r.start() def test_listen_upd(self): r = ThreadedReactor() r.start() logger.warning(''.join( ('TESTING LOGS ** IGNORE EXPECTED WARNING ** ', '(udp_listen has not been called)'))) self.client_r.sendto(DATA, tc.SERVER_ADDR) while 1: #waiting for data with self.lock: if self.datagrams_received: break time.sleep(tc.TASK_INTERVAL) with self.lock: first_datagram = self.datagrams_received.pop(0) logger.debug('first_datagram: %s, %s' % ( first_datagram, (DATA, tc.CLIENT_ADDR))) assert first_datagram, (DATA, tc.CLIENT_ADDR) r.stop() def test_network_callback(self): self.client_r.sendto(DATA, tc.SERVER_ADDR) time.sleep(tc.TASK_INTERVAL) with self.lock: first_datagram = self.datagrams_received.pop(0) logger.debug('first_datagram: %s, %s' % ( first_datagram, (DATA, tc.CLIENT_ADDR))) assert first_datagram, (DATA, tc.CLIENT_ADDR) def test_block_flood(self): from floodbarrier import MAX_PACKETS_PER_PERIOD as FLOOD_LIMIT for _ in xrange(FLOOD_LIMIT): self.client_r.sendto(DATA, tc.SERVER_ADDR) for _ in xrange(10): self.client_r.sendto(DATA, tc.SERVER_ADDR) logger.warning( "TESTING LOGS ** IGNORE EXPECTED WARNING **") time.sleep(tc.TASK_INTERVAL) return ###################################### with self.lock: logger.debug('datagram processed: %d/%d' % ( len(self.datagrams_received), FLOOD_LIMIT)) print len(self.datagrams_received) assert len(self.datagrams_received) <= FLOOD_LIMIT def test_call_later(self): self.client_r.call_later(.13, self.callback_f, 1) self.client_r.call_later(.11, self.callback_f, 2) self.client_r.call_later(.01, self.callback_f, 3) task4 = self.client_r.call_later(.01, self.callback_f, 4) task4.cancel() time.sleep(.03) with self.lock: logger.debug('callback_order: %s' % self.callback_order) eq_(self.callback_order, [3]) self.callback_order = [] self.client_r.call_now(self.callback_f, 5) time.sleep(.03) with self.lock: logger.debug('callback_order: %s' % self.callback_order) eq_(self.callback_order, [5]) self.callback_order = [] task6 = self.client_r.call_later(.03, self.callback_f, 6) task6.cancel() time.sleep(.1) with self.lock: logger.debug('callback_order: %s' % self.callback_order) eq_(self.callback_order, [2, 1]) def test_network_and_delayed(self): self.client_r.call_later(.2, self.callback_f, 0) self.client_r.call_now(self.callback_f, 1) task2 = self.client_r.call_later(.2, self.callback_f, 2) with self.lock: eq_(self.callback_order, []) time.sleep(.1) with self.lock: logger.debug('callback_order: %s' % self.callback_order) assert self.callback_order == [1] self.callback_order = [] assert not self.datagrams_received self.server_r.sendto(DATA, tc.CLIENT_ADDR) time.sleep(.02) # wait for network interruption with self.lock: logger.debug('callback_order: %s' % self.callback_order) assert self.callback_order == [] logger.debug('callback_order: %s' % self.callback_order) assert self.datagrams_received.pop(0) == (DATA, tc.SERVER_ADDR) task2.cancel() #inside critical region?? time.sleep(.1) # wait for task 0 (task 2 should be cancelled) with self.lock: assert self.callback_order == [0] assert not self.datagrams_received def test_sendto_socket_error(self): logger.critical('TESTING: IGNORE CRITICAL MESSAGE') self.client_r.sendto('z', (tc.NO_ADDR[0], 0)) def teardown(self): self.client_r.stop() self.server_r.stop() def on_datagram_received(self, data, addr): with self.lock: self.datagrams_received.append((data, addr)) def callback_f(self, callback_id): with self.lock: self.callback_order.append(callback_id)
class Controller(): def __init__(self, dht_addr, state_path, routing_m_mod, lookup_m_mod, private_dht_name): message.private_dht_name = private_dht_name self.state_filename = os.path.join(state_path, STATE_FILENAME) self.load_state() if not self._my_id: self._my_id = identifier.RandomId() self._my_node = Node(dht_addr, self._my_id) self._tracker = tracker.Tracker() self._token_m = token_manager.TokenManager() self._reactor = ThreadedReactor() self._reactor.listen_udp(self._my_node.addr[1], self._on_datagram_received) self._querier = Querier(self._my_id) bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES del self.loaded_nodes self._routing_m = routing_m_mod.RoutingManager(self._my_node, bootstrap_nodes) self._lookup_m = lookup_m_mod.LookupManager(self._my_id) current_time = time.time() self._next_maintenance_ts = current_time self._next_save_state_ts = current_time + SAVE_STATE_DELAY self._running = False def start(self): self._running = True self._reactor.start() self._main_loop() def stop(self): self._reactor.stop() def save_state(self): rnodes = self._routing_m.get_main_rnodes() f = open(self.state_filename, 'w') f.write('%r\n' % self._my_id) for rnode in rnodes: f.write('%d\t%r\t%s\t%d\t%f\n' % (self._my_id.log_distance(rnode.id), rnode.id, rnode.addr[0], rnode.addr[1], rnode.rtt * 1000)) f.close() def load_state(self): self._my_id = None self.loaded_nodes = [] try: f = open(self.state_filename) except IOError: return try: hex_id = f.readline().strip() self._my_id = Id(hex_id) for line in f: _, hex_id, ip, port, _ = line.split() addr = (ip, int(port)) node_ = Node(addr, Id(hex_id)) self.loaded_nodes.append(node_) f.close() except: self._my_id = None self.loaded_nodes = [] logger.error('state.dat is corrupted') def get_peers(self, lookup_id, info_hash, callback_f, bt_port=0): logger.critical('get_peers %d %r' % (bt_port, info_hash)) if time.time() > self._next_maintenance_ts + 1: logger.critical('minitwisted crashed or stopped!') return peers = self._tracker.get(info_hash) if peers: callback_f(lookup_id, peers) log_distance = info_hash.log_distance(self._my_id) bootstrap_rnodes = self._routing_m.get_closest_rnodes( log_distance, None, True) lookup_obj = self._lookup_m.get_peers(lookup_id, info_hash, callback_f, bt_port) lookup_queries_to_send = lookup_obj.start(bootstrap_rnodes) self._send_queries(lookup_queries_to_send) return len(lookup_queries_to_send) def print_routing_table_stats(self): self._routing_m.print_stats() def _main_loop(self): current_time = time.time() if current_time > self._next_maintenance_ts: maintenance_delay, queries_to_send, maintenance_lookup_target = self._routing_m.do_maintenance( ) self._send_queries(queries_to_send) if maintenance_lookup_target: log_distance = maintenance_lookup_target.log_distance( self._my_id) bootstrap_nodes = self._routing_m.get_closest_rnodes( log_distance, None, True) lookup_obj = self._lookup_m.maintenance_lookup( maintenance_lookup_target) lookup_queries_to_send = lookup_obj.start(bootstrap_nodes) self._send_queries(lookup_queries_to_send) self._next_maintenance_ts = current_time + maintenance_delay if current_time > self._next_save_state_ts: self.save_state() self._next_save_state_ts = current_time + SAVE_STATE_DELAY delay = min(self._next_maintenance_ts, self._next_save_state_ts) - current_time self._reactor.call_later(delay, self._main_loop) def _maintenance_lookup(self, target): self._lookup_m.maintenance_lookup(target) def _on_datagram_received(self, data, addr): try: msg = message.IncomingMsg(data, addr) except message.MsgError: return if msg.type == message.QUERY: if msg.sender_id == self._my_id: logger.debug('Got a msg from myself:\n%r', msg) return response_msg = self._get_response(msg) if response_msg: bencoded_response = response_msg.encode(msg.tid) self._reactor.sendto(bencoded_response, addr) maintenance_queries_to_send = self._routing_m.on_query_received( msg.sender_node) elif msg.type == message.RESPONSE: related_query = self._querier.on_response_received(msg, addr) if not related_query: return if related_query.lookup_obj: if msg.type == message.RESPONSE: lookup_queries_to_send, peers, num_parallel_queries, lookup_done = related_query.lookup_obj.on_response_received( msg, msg.sender_node) self._send_queries(lookup_queries_to_send) if related_query.lookup_obj.callback_f: lookup_id = related_query.lookup_obj.lookup_id if peers: related_query.lookup_obj.callback_f(lookup_id, peers) if lookup_done: self._announce(related_query.lookup_obj) related_query.lookup_obj.callback_f(lookup_id, None) maintenance_queries_to_send = self._routing_m.on_response_received( msg.sender_node, related_query.rtt, msg.all_nodes) elif msg.type == message.ERROR: related_query = self._querier.on_error_received(msg, addr) if not related_query: return if related_query.lookup_obj: peers = None lookup_queries_to_send, num_parallel_queries, lookup_done = related_query.lookup_obj.on_error_received( msg, addr) self._send_queries(lookup_queries_to_send) if related_query.lookup_obj.callback_f: lookup_id = related_query.lookup_obj.lookup_id if lookup_done: self._announce(related_query.lookup_obj) related_query.lookup_obj.callback_f(lookup_id, None) maintenance_queries_to_send = self._routing_m.on_error_received( addr) else: return self._send_queries(maintenance_queries_to_send) def _get_response(self, msg): if msg.query == message.PING: return message.OutgoingPingResponse(self._my_id) if msg.query == message.FIND_NODE: log_distance = msg.target.log_distance(self._my_id) rnodes = self._routing_m.get_closest_rnodes( log_distance, NUM_NODES, False) return message.OutgoingFindNodeResponse(self._my_id, rnodes) if msg.query == message.GET_PEERS: token = self._token_m.get() log_distance = msg.info_hash.log_distance(self._my_id) rnodes = self._routing_m.get_closest_rnodes( log_distance, NUM_NODES, False) peers = self._tracker.get(msg.info_hash) if peers: logger.debug('RESPONDING with PEERS:\n%r' % peers) return message.OutgoingGetPeersResponse(self._my_id, token, nodes=rnodes, peers=peers) if msg.query == message.ANNOUNCE_PEER: peer_addr = (msg.sender_addr[0], msg.bt_port) self._tracker.put(msg.info_hash, peer_addr) return message.OutgoingAnnouncePeerResponse(self._my_id) logger.debug('Invalid QUERY: %r' % msg.query) def _on_response_received(self, msg): pass def _on_timeout(self, addr): related_query = self._querier.on_timeout(addr) if not related_query: return if related_query.lookup_obj: lookup_queries_to_send, num_parallel_queries, lookup_done = related_query.lookup_obj.on_timeout( related_query.dstnode) self._send_queries(lookup_queries_to_send) if lookup_done and related_query.lookup_obj.callback_f: self._announce(related_query.lookup_obj) lookup_id = related_query.lookup_obj.lookup_id related_query.lookup_obj.callback_f(lookup_id, None) maintenance_queries_to_send = self._routing_m.on_timeout( related_query.dstnode) self._send_queries(maintenance_queries_to_send) def _announce(self, lookup_obj): queries_to_send, announce_to_myself = lookup_obj.announce() self._send_queries(queries_to_send) def _send_queries(self, queries_to_send, lookup_obj=None): if queries_to_send is None: return for query in queries_to_send: timeout_task = self._reactor.call_later(TIMEOUT_DELAY, self._on_timeout, query.dstnode.addr) bencoded_query = self._querier.register_query(query, timeout_task) self._reactor.sendto(bencoded_query, query.dstnode.addr)
class Controller(): def __init__(self, dht_addr, state_path, routing_m_mod, lookup_m_mod, private_dht_name): message.private_dht_name = private_dht_name self.state_filename = os.path.join(state_path, STATE_FILENAME) self.load_state() if not self._my_id: self._my_id = identifier.RandomId() self._my_node = Node(dht_addr, self._my_id) self._tracker = tracker.Tracker() self._token_m = token_manager.TokenManager() self._reactor = ThreadedReactor() self._reactor.listen_udp(self._my_node.addr[1], self._on_datagram_received) self._querier = Querier(self._my_id) bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES del self.loaded_nodes self._routing_m = routing_m_mod.RoutingManager(self._my_node, bootstrap_nodes) self._lookup_m = lookup_m_mod.LookupManager(self._my_id) current_time = time.time() self._next_maintenance_ts = current_time self._next_save_state_ts = current_time + SAVE_STATE_DELAY self._running = False def start(self): self._running = True self._reactor.start() self._main_loop() def stop(self): self._reactor.stop() def save_state(self): rnodes = self._routing_m.get_main_rnodes() f = open(self.state_filename, 'w') f.write('%r\n' % self._my_id) for rnode in rnodes: f.write('%d\t%r\t%s\t%d\t%f\n' % (self._my_id.log_distance(rnode.id), rnode.id, rnode.addr[0], rnode.addr[1], rnode.rtt * 1000)) f.close() def load_state(self): self._my_id = None self.loaded_nodes = [] try: f = open(self.state_filename) except IOError: return try: hex_id = f.readline().strip() self._my_id = Id(hex_id) for line in f: _, hex_id, ip, port, _ = line.split() addr = (ip, int(port)) node_ = Node(addr, Id(hex_id)) self.loaded_nodes.append(node_) f.close() except: self._my_id = None self.loaded_nodes = [] logger.error('state.dat is corrupted') def get_peers(self, lookup_id, info_hash, callback_f, bt_port = 0): logger.critical('get_peers %d %r' % (bt_port, info_hash)) if time.time() > self._next_maintenance_ts + 1: logger.critical('minitwisted crashed or stopped!') return peers = self._tracker.get(info_hash) if peers: callback_f(lookup_id, peers) log_distance = info_hash.log_distance(self._my_id) bootstrap_rnodes = self._routing_m.get_closest_rnodes(log_distance, None, True) lookup_obj = self._lookup_m.get_peers(lookup_id, info_hash, callback_f, bt_port) lookup_queries_to_send = lookup_obj.start(bootstrap_rnodes) self._send_queries(lookup_queries_to_send) return len(lookup_queries_to_send) def print_routing_table_stats(self): self._routing_m.print_stats() def _main_loop(self): current_time = time.time() if current_time > self._next_maintenance_ts: maintenance_delay, queries_to_send, maintenance_lookup_target = self._routing_m.do_maintenance() self._send_queries(queries_to_send) if maintenance_lookup_target: log_distance = maintenance_lookup_target.log_distance(self._my_id) bootstrap_nodes = self._routing_m.get_closest_rnodes(log_distance, None, True) lookup_obj = self._lookup_m.maintenance_lookup(maintenance_lookup_target) lookup_queries_to_send = lookup_obj.start(bootstrap_nodes) self._send_queries(lookup_queries_to_send) self._next_maintenance_ts = current_time + maintenance_delay if current_time > self._next_save_state_ts: self.save_state() self._next_save_state_ts = current_time + SAVE_STATE_DELAY delay = min(self._next_maintenance_ts, self._next_save_state_ts) - current_time self._reactor.call_later(delay, self._main_loop) def _maintenance_lookup(self, target): self._lookup_m.maintenance_lookup(target) def _on_datagram_received(self, data, addr): try: msg = message.IncomingMsg(data, addr) except message.MsgError: return if msg.type == message.QUERY: if msg.sender_id == self._my_id: logger.debug('Got a msg from myself:\n%r', msg) return response_msg = self._get_response(msg) if response_msg: bencoded_response = response_msg.encode(msg.tid) self._reactor.sendto(bencoded_response, addr) maintenance_queries_to_send = self._routing_m.on_query_received(msg.sender_node) elif msg.type == message.RESPONSE: related_query = self._querier.on_response_received(msg, addr) if not related_query: return if related_query.lookup_obj: if msg.type == message.RESPONSE: lookup_queries_to_send, peers, num_parallel_queries, lookup_done = related_query.lookup_obj.on_response_received(msg, msg.sender_node) self._send_queries(lookup_queries_to_send) if related_query.lookup_obj.callback_f: lookup_id = related_query.lookup_obj.lookup_id if peers: related_query.lookup_obj.callback_f(lookup_id, peers) if lookup_done: self._announce(related_query.lookup_obj) related_query.lookup_obj.callback_f(lookup_id, None) maintenance_queries_to_send = self._routing_m.on_response_received(msg.sender_node, related_query.rtt, msg.all_nodes) elif msg.type == message.ERROR: related_query = self._querier.on_error_received(msg, addr) if not related_query: return if related_query.lookup_obj: peers = None lookup_queries_to_send, num_parallel_queries, lookup_done = related_query.lookup_obj.on_error_received(msg, addr) self._send_queries(lookup_queries_to_send) if related_query.lookup_obj.callback_f: lookup_id = related_query.lookup_obj.lookup_id if lookup_done: self._announce(related_query.lookup_obj) related_query.lookup_obj.callback_f(lookup_id, None) maintenance_queries_to_send = self._routing_m.on_error_received(addr) else: return self._send_queries(maintenance_queries_to_send) def _get_response(self, msg): if msg.query == message.PING: return message.OutgoingPingResponse(self._my_id) if msg.query == message.FIND_NODE: log_distance = msg.target.log_distance(self._my_id) rnodes = self._routing_m.get_closest_rnodes(log_distance, NUM_NODES, False) return message.OutgoingFindNodeResponse(self._my_id, rnodes) if msg.query == message.GET_PEERS: token = self._token_m.get() log_distance = msg.info_hash.log_distance(self._my_id) rnodes = self._routing_m.get_closest_rnodes(log_distance, NUM_NODES, False) peers = self._tracker.get(msg.info_hash) if peers: logger.debug('RESPONDING with PEERS:\n%r' % peers) return message.OutgoingGetPeersResponse(self._my_id, token, nodes=rnodes, peers=peers) if msg.query == message.ANNOUNCE_PEER: peer_addr = (msg.sender_addr[0], msg.bt_port) self._tracker.put(msg.info_hash, peer_addr) return message.OutgoingAnnouncePeerResponse(self._my_id) logger.debug('Invalid QUERY: %r' % msg.query) def _on_response_received(self, msg): pass def _on_timeout(self, addr): related_query = self._querier.on_timeout(addr) if not related_query: return if related_query.lookup_obj: lookup_queries_to_send, num_parallel_queries, lookup_done = related_query.lookup_obj.on_timeout(related_query.dstnode) self._send_queries(lookup_queries_to_send) if lookup_done and related_query.lookup_obj.callback_f: self._announce(related_query.lookup_obj) lookup_id = related_query.lookup_obj.lookup_id related_query.lookup_obj.callback_f(lookup_id, None) maintenance_queries_to_send = self._routing_m.on_timeout(related_query.dstnode) self._send_queries(maintenance_queries_to_send) def _announce(self, lookup_obj): queries_to_send, announce_to_myself = lookup_obj.announce() self._send_queries(queries_to_send) def _send_queries(self, queries_to_send, lookup_obj = None): if queries_to_send is None: return for query in queries_to_send: timeout_task = self._reactor.call_later(TIMEOUT_DELAY, self._on_timeout, query.dstnode.addr) bencoded_query = self._querier.register_query(query, timeout_task) self._reactor.sendto(bencoded_query, query.dstnode.addr)