def _on_get_peers(self, query_msg): #get peers from the tracker (if any) token = self.token_m.get() peers = self.tracker.get(query_msg.info_hash) if peers: return message.OutgoingGetPeersResponse(self.my_id, token, peers=peers) rnodes = self.routing_m.get_closest_rnodes(query_msg.info_hash) return message.OutgoingGetPeersResponse(self.my_id, token, nodes2=rnodes)
def _gen_nodes_args(node_, nodes): out_msg = message.OutgoingGetPeersResponse(node_.id, tc.TOKEN, nodes2=nodes).encode(tc.TID) in_msg = message.IncomingMsg(out_msg) in_msg.sanitize_response(message.GET_PEERS) return in_msg, node_
def _get_response(self, msg): if msg.query == message.PING: return message.OutgoingPingResponse(self._my_id) elif msg.query == message.FIND_NODE: log_distance = msg.target.log_distance(self._my_id) rnodes = self._routing_m.get_closest_rnodes(log_distance, NUM_NODES, False) return message.OutgoingFindNodeResponse(self._my_id, rnodes) elif msg.query == message.GET_PEERS: token = self._token_m.get() log_distance = msg.info_hash.log_distance(self._my_id) rnodes = self._routing_m.get_closest_rnodes(log_distance, NUM_NODES, False) peers = self._tracker.get(msg.info_hash) if peers: logger.debug('RESPONDING with PEERS:\n%r' % peers) return message.OutgoingGetPeersResponse(self._my_id, token, nodes=rnodes, peers=peers) elif msg.query == message.ANNOUNCE_PEER: peer_addr = (msg.sender_addr[0], msg.bt_port) self._tracker.put(msg.info_hash, peer_addr) return message.OutgoingAnnouncePeerResponse(self._my_id) else: logger.debug('Invalid QUERY: %r' % (msg.query))
def setup(self): self.queries = [ m.OutgoingPingQuery(tc.CLIENT_ID), m.OutgoingFindNodeQuery(tc.CLIENT_ID, tc.TARGET_ID), m.OutgoingGetPeersQuery(tc.CLIENT_ID, tc.INFO_HASH), m.OutgoingAnnouncePeerQuery(tc.CLIENT_ID, tc.INFO_HASH, tc.BT_PORT, tc.TOKEN), ] self.responses = [ m.OutgoingPingResponse(tc.SERVER_ID), m.OutgoingFindNodeResponse(tc.SERVER_ID, tc.NODES), m.OutgoingGetPeersResponse(tc.SERVER_ID, tc.TOKEN, tc.NODES, tc.PEERS), m.OutgoingAnnouncePeerResponse(tc.SERVER_ID), ]
def test_get_peers_nodes(self): #client outgoing_query = m.OutgoingGetPeersQuery(tc.CLIENT_ID, tc.INFO_HASH) data = outgoing_query.encode(tc.TID) #server incoming_query = m.IncomingMsg(data, tc.CLIENT_ADDR) assert incoming_query.type is m.QUERY outgoing_response = m.OutgoingGetPeersResponse(tc.SERVER_ID, tc.TOKEN, tc.NODES) data = outgoing_response.encode(incoming_query.tid) #client incoming_response = m.IncomingMsg(data, tc.SERVER_ADDR) assert incoming_response.type is m.RESPONSE #incoming_response.sanitize_response(outgoing_query.query) for n1, n2 in zip(tc.NODES, incoming_response.all_nodes): assert n1 == n2
def test_get_peers_peers(self): #client outgoing_query = m.OutgoingGetPeersQuery(tc.CLIENT_ID, tc.INFO_HASH) data = outgoing_query.encode(tc.TID) #server incoming_query = m.IncomingMsg(data) assert incoming_query.type is m.QUERY outgoing_response = m.OutgoingGetPeersResponse(tc.SERVER_ID, tc.TOKEN, peers=tc.PEERS) data = outgoing_response.encode(incoming_query.tid) #client incoming_response = m.IncomingMsg(data) assert incoming_response.type is m.RESPONSE incoming_response.sanitize_response(outgoing_query.query) for p1, p2 in zip(tc.PEERS, incoming_response.peers): assert p1[0] == p2[0] assert p1[1] == p2[1]
def test_return_response_for_get_peers_when_no_peers(self): # client side query_msg = message.OutgoingGetPeersQuery(tc.CLIENT_ID, tc.NODE_ID) # rpc_manager.sendto() encodes query_data = query_msg.encode(tc.TID) # server side # rpc_manager.datagram_received() decodes query_msg = message.IncomingMsg(query_data) assert not self.notification_callback_done response_msg = self.responder.on_query_received( query_msg, tc.CLIENT_ADDR) response_data = response_msg.encode(query_msg.tid) assert self.notification_callback_done expected_msg = message.OutgoingGetPeersResponse(tc.SERVER_ID, self.token_m.get(), nodes2=tc.NODES) expected_data = expected_msg.encode(query_msg.tid) eq_(response_data, expected_data)
def test_get_peers_with_response(self): # Client creates a query fn_msg = message.OutgoingGetPeersQuery(tc.CLIENT_ID, tc.INFO_HASH) # the destination's ID is unknown # This query belongs to a lookup q = Query(fn_msg, node.Node(tc.SERVER_ADDR), LOOKUP_OBJ) # Querier.register_query sets a TID and timeout_task q.tid = tc.TID q.timeout_task = minitwisted.Task(TIMEOUT_DELAY, None) q.query_ts = time.time() # The query is sent # The server creates a response fn_r_out = message.OutgoingGetPeersResponse(tc.SERVER_ID, nodes=tc.NODES) bencoded_fn_r = fn_r_out.encode(tc.TID) time.sleep(1) # The client receives the bencoded message fn_r_in = message.IncomingMsg(bencoded_fn_r, tc.SERVER_ADDR) q.on_response_received(fn_r_in) assert 1 < q.rtt < 1.1 assert q.lookup_obj is LOOKUP_OBJ
def test_return_response_for_get_peers_when_peers(self): # server's tracker has peers for peer in tc.PEERS: self.tracker.put(tc.INFO_HASH, peer) # client side query_msg = message.OutgoingGetPeersQuery(tc.CLIENT_ID, tc.INFO_HASH) # querier encodes query_data = query_msg.encode(tc.TID) # server side # rpc_manager.datagram_received() decodes query_msg = message.IncomingMsg(query_data) # rpc calls responder assert not self.notification_callback_done response_msg = self.responder.on_query_received( query_msg, tc.CLIENT_ADDR) response_data = response_msg.encode(query_msg.tid) assert self.notification_callback_done expected_msg = message.OutgoingGetPeersResponse(tc.SERVER_ID, self.token_m.get(), peers=tc.PEERS) expected_data = expected_msg.encode(tc.TID) eq_(response_data, expected_data)
def test_msg_exhanges(self): self._exchange_msgs(m.OutgoingPingQuery(tc.CLIENT_ID), m.OutgoingPingResponse(tc.SERVER_ID)) self._exchange_msgs( m.OutgoingFindNodeQuery(tc.CLIENT_ID, tc.TARGET_ID), m.OutgoingFindNodeResponse(tc.SERVER_ID, tc.NODES)) # Test different combinations of token, nodes and peers self._exchange_msgs( m.OutgoingGetPeersQuery(tc.CLIENT_ID, tc.INFO_HASH), m.OutgoingGetPeersResponse(tc.SERVER_ID, tc.TOKEN, tc.NODES, tc.PEERS)) self._exchange_msgs( m.OutgoingGetPeersQuery(tc.CLIENT_ID, tc.INFO_HASH), m.OutgoingGetPeersResponse(tc.SERVER_ID, tc.TOKEN, tc.NODES)) self._exchange_msgs( m.OutgoingGetPeersQuery(tc.CLIENT_ID, tc.INFO_HASH), m.OutgoingGetPeersResponse(tc.SERVER_ID, tc.TOKEN, peers=tc.PEERS)) assert_raises(AssertionError, m.OutgoingGetPeersResponse, tc.SERVER_ID, tc.TOKEN) self._exchange_msgs( m.OutgoingGetPeersQuery(tc.CLIENT_ID, tc.INFO_HASH), m.OutgoingGetPeersResponse(tc.SERVER_ID, peers=tc.PEERS)) self._exchange_msgs( m.OutgoingGetPeersQuery(tc.CLIENT_ID, tc.INFO_HASH), m.OutgoingGetPeersResponse(tc.SERVER_ID, nodes=tc.NODES)) self._exchange_msgs( m.OutgoingGetPeersQuery(tc.CLIENT_ID, tc.INFO_HASH), m.OutgoingGetPeersResponse(tc.SERVER_ID, nodes=tc.NODES, peers=tc.PEERS)) assert_raises(AssertionError, m.OutgoingGetPeersResponse, tc.SERVER_ID) self._exchange_msgs( m.OutgoingAnnouncePeerQuery(tc.CLIENT_ID, tc.INFO_HASH, tc.BT_PORT, tc.TOKEN), m.OutgoingAnnouncePeerResponse(tc.SERVER_ID))
def test_nodes2(self): response = m.OutgoingGetPeersResponse(tc.CLIENT_ID, peers=tc.PEERS) response._dict[m.RESPONSE][m.NODES2] = mt.compact_nodes2(tc.NODES) bencoded = response.encode(tc.TID) m.IncomingMsg(bencoded, tc.CLIENT_ADDR)
# unknown m.QUERY is not an error at this point # responder will process it and send an errror msg if necesary self.ping_d[m.QUERY] = 'a' m.IncomingMsg(bencode.encode(self.ping_d), tc.CLIENT_ADDR) def test_announce(self): # Port must be integer self.ap_d[m.ARGS][m.PORT] = 'a' assert_raises(m.MsgError, m.IncomingMsg, bencode.encode(self.ap_d), tc.CLIENT_ADDR) b_ping_r = m.OutgoingPingResponse(tc.CLIENT_ID).encode(tc.TID) b_fn2_r = m.OutgoingFindNodeResponse(tc.CLIENT_ID, tc.NODES).encode(tc.TID) b_gp_r = m.OutgoingGetPeersResponse(tc.CLIENT_ID, tc.TOKEN, tc.NODES, peers=tc.PEERS).encode(tc.TID) b_ap_r = m.OutgoingAnnouncePeerResponse(tc.CLIENT_ID).encode(tc.TID) class TestSanitizeResponseError: def setup(self): self.ping_r = m.IncomingMsg(b_ping_r, tc.SERVER_ADDR) self.fn2_r = m.IncomingMsg(b_fn2_r, tc.SERVER_ADDR) self.gp_r = m.IncomingMsg(b_gp_r, tc.SERVER_ADDR) self.ap_r = m.IncomingMsg(b_ap_r, tc.SERVER_ADDR) ''' def _test_sanitize(self): self.ping_r.sanitize_response(m.PING)
def test_complete(self): to_send = self.lookup.start() # The node won't query itself del self.bootstrap_nodes[2] expected = [Query(self.get_peers_msg, n) for n in self.bootstrap_nodes] for result, expected in zip(to_send, expected): eq_(result.msg, expected.msg) eq_(result.dstnode, expected.dstnode) eq_(self.lookup.num_parallel_queries, 4) # Node receives a response from a node node_ = tc.NODES_LD_IH[158][1] nodes = [tc.NODES_LD_IH[156][5]] msg = message.OutgoingGetPeersResponse(node_.id, 'token', nodes) msg = msg.encode('Z') msg = message.IncomingMsg(msg, node_.addr) #print 'nodes2', msg.nodes2 result = self.lookup.on_response_received(msg, node_) expected = [Query(self.get_peers_msg, nodes[0])] eq_(result[0].msg, expected[0].msg) eq_(result[0].dstnode, expected[0].dstnode) eq_(self.lookup.num_parallel_queries, 4) # Timeout eq_(self.lookup.on_timeout(tc.NODES_LD_IH[159][0]), []) eq_(self.lookup.num_parallel_queries, 3) return """Start sends two parallel queries to the closest bootstrap nodes (to the INFO_HASH) """ # Ongoing queries to (sorted: oldest first): # 155-4, 157-3, # Queued nodes to query (sorted by log_distance to info_hash): # 158-1, 159-0 # Notice 159-2 is kicked out from the queue logger.critical("") eq_(self.lookup.num_parallel_queries, 2) nodes = [ tc.NODES_LD_IH[157][5], tc.NODES_LD_IH[152][6], tc.NODES_LD_IH[158][7] ] self.lookup.on_response_received( *_gen_nodes_args(tc.NODES_LD_IH[157][3], nodes)) eq_(self.lookup._get_announce_candidates(), [ tc.NODES_LD_IH[157][3], ]) # This response triggers a new query (to 152-6) eq_(self.lookup.num_parallel_queries, 2) # Ongoing queries to (sorted: oldest first): # 155-4, 152-6 # Queued nodes to query (sorted by log_distance to info_hash): # 157-5, 158-1, 158-7, 159-0 self.lookup.on_timeout(tc.NODES_LD_IH[155][4]) eq_(self.lookup.num_parallel_queries, 2) # This timeout triggers a new query (to 157-5) eq_(self.lookup.num_parallel_queries, 2) # Ongoing queries to (sorted: oldest first): # 155-4, 157-5 # Queued nodes to query (sorted by log_distance to info_hash): # 158-1, 158-7, 159-0 self.lookup.on_timeout(tc.NODES_LD_IH[155][4]) # This timeout triggers a new query (to 158-1) eq_(self.lookup.num_parallel_queries, 2) # Ongoing queries to (sorted: oldest first): # 152-6, 158-1 # Queued nodes to query (sorted by log_distance to info_hash): # 158-7, 159-0 nodes = [tc.NODES_LD_IH[151][8], tc.NODES_LD_IH[150][9]] self.lookup.on_response( *_gen_nodes_args(tc.NODES_LD_IH[152][6], nodes)) eq_(self.lookup._get_announce_candidates(), [ tc.NODES_LD_IH[152][6], tc.NODES_LD_IH[157][3], ]) # This response triggers a new query (to 150-9) eq_(self.lookup.num_parallel_queries, 2) # Ongoing queries to (sorted: oldest first): # 157-5, 150-9 # Queued nodes to query (sorted by log_distance to info_hash): # 151-8, 158-7, 159-0 nodes = [ tc.NODES_LD_IH[151][10], tc.NODES_LD_IH[151][11], tc.NODES_LD_IH[156][12], tc.NODES_LD_IH[156][13], ] self.lookup.on_response_received( *_gen_nodes_args(tc.NODES_LD_IH[157][5], nodes)) eq_(self.lookup._get_announce_candidates(), [ tc.NODES_LD_IH[152][6], tc.NODES_LD_IH[157][3], tc.NODES_LD_IH[157][5], ]) # This response triggers a new query (to 151-8) eq_(self.lookup.num_parallel_queries, 2) # Ongoing queries to (sorted: oldest first): # 150-9, 151-8 # Queued nodes to query (sorted by log_distance to info_hash): # 151-10, 151-11, 156-12, 156-13 # Notice that the lookup queue size limit is 4, therefore # 158-7 and 159-0 are removed from the queue self.lookup.on_error_received(None, tc.NODES_LD_IH[151][8]) # This error triggers a new query (to 151-8) eq_(self.lookup.num_parallel_queries, 2) # Ongoing queries to (sorted: oldest first): # 150-9, 151-10 # Queued nodes to query (sorted by log_distance to info_hash): # 151-11, 156-12, 156-13 self.lookup.on_timeout(tc.NODES_LD_IH[151][8]) # This timeout triggers a new query (to 151-11) eq_(self.lookup.num_parallel_queries, 2) # Ongoing queries to (sorted: oldest first): # 151-10, 151-11 # Queued nodes to query (sorted by log_distance to info_hash): # 156-12, 156-13 nodes = [ tc.NODES_LD_IH[144][14], tc.NODES_LD_IH[145][15], tc.NODES_LD_IH[145][16], tc.NODES_LD_IH[145][17], ] self.lookup.on_response_received( *_gen_nodes_args(tc.NODES_LD_IH[151][10], nodes)) eq_(self.lookup._get_announce_candidates(), [ tc.NODES_LD_IH[151][10], tc.NODES_LD_IH[152][6], tc.NODES_LD_IH[157][3], ]) # This response triggers a new query (to 144-14) eq_(self.lookup.num_parallel_queries, 2) # Ongoing queries to (sorted: oldest first): # 151-11, 144-14 # Queued nodes to query (sorted by log_distance to info_hash): # Notice 156-13 is removed # 145-15, 145-16, 145-17, 156-12 peers = [tc.NO_ADDR] ok_(not self.got_peers) self.lookup.on_response_received( *_gen_peers_args(tc.NODES_LD_IH[144][14], peers)) eq_(self.lookup._get_announce_candidates(), [ tc.NODES_LD_IH[144][14], tc.NODES_LD_IH[151][10], tc.NODES_LD_IH[152][6], ]) ok_(self.got_peers) self.got_peers = False # The response with peers halves parallelism to 1. # No new query is triggered. eq_(self.lookup.num_parallel_queries, 1) # Ongoing queries to (sorted: oldest first): # 151-11 # Queued nodes to query (sorted by log_distance to info_hash): # 145-15, 145-16, 156-12 self.lookup.on_timeout(tc.NODES_LD_IH[151][11]) # This timeout triggers a new query (to 145-15) eq_(self.lookup.num_parallel_queries, 1) # Ongoing queries to (sorted: oldest first): # 145-15 # Queued nodes to query (sorted by log_distance to info_hash): # 145-16, 145-17, 156-12 peers = [tc.NO_ADDR] ok_(not self.got_peers) self.lookup.on_response_received( *_gen_peers_args(tc.NODES_LD_IH[145][15], peers)) # This response triggers a new query (to 145-16) # The parallelism is not halved (remains 1). eq_(self.lookup.num_parallel_queries, 1) # Ongoing queries to (sorted: oldest first): # 145-16 # Queued nodes to query (sorted by log_distance to info_hash): # 145-17, 156-12 eq_(self.lookup._get_announce_candidates(), [ tc.NODES_LD_IH[144][14], tc.NODES_LD_IH[145][15], tc.NODES_LD_IH[151][10], ]) ok_(self.got_peers) self.got_peers = False self.lookup.on_timeout(tc.NODES_LD_IH[145][16]) # This timeout triggers a new query (to 145-17) eq_(self.lookup.num_parallel_queries, 1) # Ongoing queries to (sorted: oldest first): # 145-17 # Queued nodes to query (sorted by log_distance to info_hash): # 156-12 self.lookup.on_timeout(tc.NODES_LD_IH[145][17]) # This timeout triggers a new query (to 156-12) return eq_(self.lookup.num_parallel_queries, 1) # Ongoing queries to (sorted: oldest first): # 156-12 # Queued nodes to query (sorted by log_distance to info_hash): # nodes = [ tc.NODES_LD_IH[144][18], tc.NODES_LD_IH[145][19], ] self.lookup.on_response_received( *_gen_nodes_args(tc.NODES_LD_IH[156][12], nodes)) eq_(self.lookup._get_announce_candidates(), [ tc.NODES_LD_IH[144][14], tc.NODES_LD_IH[145][15], tc.NODES_LD_IH[151][10], ]) # This response triggers a new query (to 144-18) eq_(self.lookup.num_parallel_queries, 1) # Ongoing queries to (sorted: oldest first): # 144-18 # Queued nodes to query (sorted by log_distance to info_hash): # 145-19 peers = [tc.NO_ADDR] ok_(not self.got_peers) self.lookup.on_response_received( *_gen_peers_args(tc.NODES_LD_IH[144][18], peers)) eq_(self.lookup._get_announce_candidates(), [ tc.NODES_LD_IH[144][14], tc.NODES_LD_IH[144][18], tc.NODES_LD_IH[145][15], ]) ok_(self.got_peers) self.got_peers = False # This timeout triggers a new query (145-19) eq_(self.lookup.num_parallel_queries, 0) # Ongoing queries to (sorted: oldest first): # 145-19 # Queued nodes to query (sorted by log_distance to info_hash): # ok_(not self.lookup.is_done) self.lookup.on_timeout(tc.NODES_LD_IH[145][19]) # THE END eq_(self.lookup.num_parallel_queries, 0) ok_(self.lookup.is_done)
def _gen_peers_args(node_, peers): out_msg = message.OutgoingGetPeersResponse(node_.id, tc.TOKEN, peers=peers).encode(tc.TID) in_msg = message.IncomingMsg(out_msg, tc.SERVER_ADDR) in_msg.sanitize_response(message.GET_PEERS) return in_msg, node_