async def _make_protocol(self, other_peer, node_id, address, udp_port, tcp_port): proto = KademliaProtocol( self.loop, PeerManager(self.loop), node_id, address, udp_port, tcp_port ) await self.loop.create_datagram_endpoint(lambda: proto, (address, 4444)) proto.start() return proto, make_kademlia_peer(node_id, address, udp_port=udp_port)
async def test_add_peer_after_handle_request(self): with dht_mocks.mock_network_loop(self.loop): node_id1 = constants.generate_id() node_id2 = constants.generate_id() node_id3 = constants.generate_id() node_id4 = constants.generate_id() peer1 = KademliaProtocol( self.loop, PeerManager(self.loop), node_id1, '1.2.3.4', 4444, 3333 ) await self.loop.create_datagram_endpoint(lambda: peer1, ('1.2.3.4', 4444)) peer1.start() peer2, peer_2_from_peer_1 = await self._make_protocol(peer1, node_id2, '1.2.3.5', 4444, 3333) peer3, peer_3_from_peer_1 = await self._make_protocol(peer1, node_id3, '1.2.3.6', 4444, 3333) peer4, peer_4_from_peer_1 = await self._make_protocol(peer1, node_id4, '1.2.3.7', 4444, 3333) # peers who reply should be added await peer1.get_rpc_peer(peer_2_from_peer_1).ping() await asyncio.sleep(0.5) self.assertListEqual([peer_2_from_peer_1], peer1.routing_table.get_peers()) peer1.routing_table.remove_peer(peer_2_from_peer_1) # peers not known by be good/bad should be enqueued to maybe-ping peer1_from_peer3 = peer3.get_rpc_peer(make_kademlia_peer(node_id1, '1.2.3.4', 4444)) self.assertEqual(0, len(peer1.ping_queue._pending_contacts)) pong = await peer1_from_peer3.ping() self.assertEqual(b'pong', pong) self.assertEqual(1, len(peer1.ping_queue._pending_contacts)) peer1.ping_queue._pending_contacts.clear() # peers who are already good should be added peer1_from_peer4 = peer4.get_rpc_peer(make_kademlia_peer(node_id1, '1.2.3.4', 4444)) peer1.peer_manager.update_contact_triple(node_id4,'1.2.3.7', 4444) peer1.peer_manager.report_last_replied('1.2.3.7', 4444) self.assertEqual(0, len(peer1.ping_queue._pending_contacts)) pong = await peer1_from_peer4.ping() self.assertEqual(b'pong', pong) await asyncio.sleep(0.5) self.assertEqual(1, len(peer1.routing_table.get_peers())) self.assertEqual(0, len(peer1.ping_queue._pending_contacts)) peer1.routing_table.buckets[0].peers.clear() # peers who are known to be bad recently should not be added or maybe-pinged peer1_from_peer4 = peer4.get_rpc_peer(make_kademlia_peer(node_id1, '1.2.3.4', 4444)) peer1.peer_manager.update_contact_triple(node_id4,'1.2.3.7', 4444) peer1.peer_manager.report_failure('1.2.3.7', 4444) peer1.peer_manager.report_failure('1.2.3.7', 4444) self.assertEqual(0, len(peer1.ping_queue._pending_contacts)) pong = await peer1_from_peer4.ping() self.assertEqual(b'pong', pong) self.assertEqual(0, len(peer1.routing_table.get_peers())) self.assertEqual(0, len(peer1.ping_queue._pending_contacts)) for p in [peer1, peer2, peer3, peer4]: p.stop() p.disconnect()
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int, internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.rpc_timeout, split_buckets_under_index: int = constants.split_buckets_under_index): self.loop = loop self.internal_udp_port = internal_udp_port self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout, split_buckets_under_index) self.listening_port: asyncio.DatagramTransport = None self.joined = asyncio.Event(loop=self.loop) self._join_task: asyncio.Task = None self._refresh_task: asyncio.Task = None
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int, internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.RPC_TIMEOUT, split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, storage: typing.Optional['SQLiteStorage'] = None): self.loop = loop self.internal_udp_port = internal_udp_port self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout, split_buckets_under_index) self.listening_port: asyncio.DatagramTransport = None self.joined = asyncio.Event(loop=self.loop) self._join_task: asyncio.Task = None self._refresh_task: asyncio.Task = None self._storage = storage
class Node: def __init__(self, loop: asyncio.BaseEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int, internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.rpc_timeout, split_buckets_under_index: int = constants. split_buckets_under_index): self.loop = loop self.internal_udp_port = internal_udp_port self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout, split_buckets_under_index) self.listening_port: asyncio.DatagramTransport = None self.joined = asyncio.Event(loop=self.loop) self._join_task: asyncio.Task = None self._refresh_task: asyncio.Task = None async def refresh_node(self, force_once=False): while True: # remove peers with expired blob announcements from the datastore self.protocol.data_store.removed_expired_peers() total_peers: typing.List['KademliaPeer'] = [] # add all peers in the routing table total_peers.extend(self.protocol.routing_table.get_peers()) # add all the peers who have announed blobs to us total_peers.extend(self.protocol.data_store.get_storing_contacts()) # get ids falling in the midpoint of each bucket that hasn't been recently updated node_ids = self.protocol.routing_table.get_refresh_list(0, True) # if we have 3 or fewer populated buckets get two random ids in the range of each to try and # populate/split the buckets further buckets_with_contacts = self.protocol.routing_table.buckets_with_contacts( ) if buckets_with_contacts <= 3: for i in range(buckets_with_contacts): node_ids.append( self.protocol.routing_table.random_id_in_bucket_range( i)) node_ids.append( self.protocol.routing_table.random_id_in_bucket_range( i)) if self.protocol.routing_table.get_peers(): # if we have node ids to look up, perform the iterative search until we have k results while node_ids: peers = await self.peer_search(node_ids.pop()) total_peers.extend(peers) else: if force_once: break fut = asyncio.Future(loop=self.loop) self.loop.call_later(constants.refresh_interval // 4, fut.set_result, None) await fut continue # ping the set of peers; upon success/failure the routing able and last replied/failed time will be updated to_ping = [ peer for peer in set(total_peers) if self.protocol.peer_manager.peer_is_good(peer) is not True ] if to_ping: self.protocol.ping_queue.enqueue_maybe_ping(*to_ping, delay=0) if force_once: break fut = asyncio.Future(loop=self.loop) self.loop.call_later(constants.refresh_interval, fut.set_result, None) await fut async def announce_blob(self, blob_hash: str) -> typing.List[bytes]: hash_value = binascii.unhexlify(blob_hash.encode()) assert len(hash_value) == constants.hash_length peers = await self.peer_search(hash_value) if not self.protocol.external_ip: raise Exception("Cannot determine external IP") log.debug("Store to %i peers", len(peers)) for peer in peers: log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port) stored_to_tup = await asyncio.gather( *(self.protocol.store_to_peer(hash_value, peer) for peer in peers), loop=self.loop) stored_to = [ node_id for node_id, contacted in stored_to_tup if contacted ] if stored_to: log.info("Stored %s to %i of %i attempted peers", binascii.hexlify(hash_value).decode()[:8], len(stored_to), len(peers)) else: log.warning("Failed announcing %s, stored to 0 peers", blob_hash[:8]) return stored_to def stop(self) -> None: if self.joined.is_set(): self.joined.clear() if self._join_task: self._join_task.cancel() if self._refresh_task and not (self._refresh_task.done() or self._refresh_task.cancelled()): self._refresh_task.cancel() if self.protocol and self.protocol.ping_queue.running: self.protocol.ping_queue.stop() self.protocol.stop() if self.listening_port is not None: self.listening_port.close() self._join_task = None self.listening_port = None log.info("Stopped DHT node") async def start_listening(self, interface: str = '') -> None: if not self.listening_port: self.listening_port, _ = await self.loop.create_datagram_endpoint( lambda: self.protocol, (interface, self.internal_udp_port)) log.info("DHT node listening on UDP %s:%i", interface, self.internal_udp_port) self.protocol.start() else: log.warning("Already bound to port %s", self.listening_port) async def join_network(self, interface: typing.Optional[str] = '', known_node_urls: typing.Optional[typing.List[ typing.Tuple[str, int]]] = None): if not self.listening_port: await self.start_listening(interface) self.protocol.ping_queue.start() self._refresh_task = self.loop.create_task(self.refresh_node()) # resolve the known node urls known_node_addresses = [] url_to_addr = {} if known_node_urls: for host, port in known_node_urls: address = await resolve_host(host, port, proto='udp') if (address, port) not in known_node_addresses and\ (address, port) != (self.protocol.external_ip, self.protocol.udp_port): known_node_addresses.append((address, port)) url_to_addr[address] = host if known_node_addresses: peers = [ KademliaPeer(self.loop, address, udp_port=port) for (address, port) in known_node_addresses ] while True: if not self.protocol.routing_table.get_peers(): if self.joined.is_set(): self.joined.clear() self.protocol.peer_manager.reset() self.protocol.ping_queue.enqueue_maybe_ping(*peers, delay=0.0) peers.extend(await self.peer_search(self.protocol.node_id, shortlist=peers, count=32)) if self.protocol.routing_table.get_peers(): self.joined.set() log.info( "Joined DHT, %i peers known in %i buckets", len(self.protocol.routing_table.get_peers()), self.protocol.routing_table.buckets_with_contacts( )) else: continue await asyncio.sleep(1, loop=self.loop) log.info("Joined DHT, %i peers known in %i buckets", len(self.protocol.routing_table.get_peers()), self.protocol.routing_table.buckets_with_contacts()) self.joined.set() def start(self, interface: str, known_node_urls: typing.List[typing.Tuple[str, int]]): self._join_task = self.loop.create_task( self.join_network(interface=interface, known_node_urls=known_node_urls)) def get_iterative_node_finder( self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None, bottom_out_limit: int = constants.bottom_out_limit, max_results: int = constants.k) -> IterativeNodeFinder: return IterativeNodeFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol, key, bottom_out_limit, max_results, None, shortlist) def get_iterative_value_finder( self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None, bottom_out_limit: int = 40, max_results: int = -1) -> IterativeValueFinder: return IterativeValueFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol, key, bottom_out_limit, max_results, None, shortlist) async def peer_search( self, node_id: bytes, count=constants.k, max_results=constants.k * 2, bottom_out_limit=20, shortlist: typing.Optional[typing.List['KademliaPeer']] = None ) -> typing.List['KademliaPeer']: peers = [] async for iteration_peers in self.get_iterative_node_finder( node_id, shortlist=shortlist, bottom_out_limit=bottom_out_limit, max_results=max_results): peers.extend(iteration_peers) distance = Distance(node_id) peers.sort(key=lambda peer: distance(peer.node_id)) return peers[:count] async def _accumulate_search_junction(self, search_queue: asyncio.Queue, result_queue: asyncio.Queue): tasks = [] try: while True: blob_hash = await search_queue.get() tasks.append( self.loop.create_task( self._value_producer(blob_hash, result_queue))) finally: for task in tasks: task.cancel() async def _value_producer(self, blob_hash: str, result_queue: asyncio.Queue): async for results in self.get_iterative_value_finder( binascii.unhexlify(blob_hash.encode())): result_queue.put_nowait(results) def accumulate_peers( self, search_queue: asyncio.Queue, peer_queue: typing.Optional[asyncio.Queue] = None ) -> typing.Tuple[asyncio.Queue, asyncio.Task]: q = peer_queue or asyncio.Queue(loop=self.loop) return q, self.loop.create_task( self._accumulate_search_junction(search_queue, q))
class Node: def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int, internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.RPC_TIMEOUT, split_buckets_under_index: int = constants. SPLIT_BUCKETS_UNDER_INDEX, storage: typing.Optional['SQLiteStorage'] = None): self.loop = loop self.internal_udp_port = internal_udp_port self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout, split_buckets_under_index) self.listening_port: asyncio.DatagramTransport = None self.joined = asyncio.Event(loop=self.loop) self._join_task: asyncio.Task = None self._refresh_task: asyncio.Task = None self._storage = storage async def refresh_node(self, force_once=False): while True: # remove peers with expired blob announcements from the datastore self.protocol.data_store.removed_expired_peers() total_peers: typing.List['KademliaPeer'] = [] # add all peers in the routing table total_peers.extend(self.protocol.routing_table.get_peers()) # add all the peers who have announced blobs to us total_peers.extend(self.protocol.data_store.get_storing_contacts()) # get ids falling in the midpoint of each bucket that hasn't been recently updated node_ids = self.protocol.routing_table.get_refresh_list(0, True) # if we have 3 or fewer populated buckets get two random ids in the range of each to try and # populate/split the buckets further buckets_with_contacts = self.protocol.routing_table.buckets_with_contacts( ) if buckets_with_contacts <= 3: for i in range(buckets_with_contacts): node_ids.append( self.protocol.routing_table.random_id_in_bucket_range( i)) node_ids.append( self.protocol.routing_table.random_id_in_bucket_range( i)) if self.protocol.routing_table.get_peers(): # if we have node ids to look up, perform the iterative search until we have k results while node_ids: peers = await self.peer_search(node_ids.pop()) total_peers.extend(peers) else: if force_once: break fut = asyncio.Future(loop=self.loop) self.loop.call_later(constants.REFRESH_INTERVAL // 4, fut.set_result, None) await fut continue # ping the set of peers; upon success/failure the routing able and last replied/failed time will be updated to_ping = [ peer for peer in set(total_peers) if self.protocol.peer_manager.peer_is_good(peer) is not True ] if to_ping: self.protocol.ping_queue.enqueue_maybe_ping(*to_ping, delay=0) if self._storage: await self._storage.save_kademlia_peers( self.protocol.routing_table.get_peers()) if force_once: break fut = asyncio.Future(loop=self.loop) self.loop.call_later(constants.REFRESH_INTERVAL, fut.set_result, None) await fut async def announce_blob(self, blob_hash: str) -> typing.List[bytes]: hash_value = binascii.unhexlify(blob_hash.encode()) assert len(hash_value) == constants.HASH_LENGTH peers = await self.peer_search(hash_value) if not self.protocol.external_ip: raise Exception("Cannot determine external IP") log.debug("Store to %i peers", len(peers)) for peer in peers: log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port) stored_to_tup = await asyncio.gather( *(self.protocol.store_to_peer(hash_value, peer) for peer in peers), loop=self.loop) stored_to = [ node_id for node_id, contacted in stored_to_tup if contacted ] if stored_to: log.debug("Stored %s to %i of %i attempted peers", binascii.hexlify(hash_value).decode()[:8], len(stored_to), len(peers)) else: log.debug("Failed announcing %s, stored to 0 peers", blob_hash[:8]) return stored_to def stop(self) -> None: if self.joined.is_set(): self.joined.clear() if self._join_task: self._join_task.cancel() if self._refresh_task and not (self._refresh_task.done() or self._refresh_task.cancelled()): self._refresh_task.cancel() if self.protocol and self.protocol.ping_queue.running: self.protocol.ping_queue.stop() self.protocol.stop() if self.listening_port is not None: self.listening_port.close() self._join_task = None self.listening_port = None log.info("Stopped DHT node") async def start_listening(self, interface: str = '0.0.0.0') -> None: if not self.listening_port: self.listening_port, _ = await self.loop.create_datagram_endpoint( lambda: self.protocol, (interface, self.internal_udp_port)) log.info("DHT node listening on UDP %s:%i", interface, self.internal_udp_port) self.protocol.start() else: log.warning("Already bound to port %s", self.listening_port) async def join_network(self, interface: str = '0.0.0.0', known_node_urls: typing.Optional[typing.List[ typing.Tuple[str, int]]] = None): def peers_from_urls( urls: typing.Optional[typing.List[typing.Tuple[bytes, str, int, int]]]): peer_addresses = [] for node_id, address, udp_port, tcp_port in urls: if (node_id, address, udp_port, tcp_port) not in peer_addresses and \ (address, udp_port) != (self.protocol.external_ip, self.protocol.udp_port): peer_addresses.append( (node_id, address, udp_port, tcp_port)) return [ make_kademlia_peer(*peer_address) for peer_address in peer_addresses ] if not self.listening_port: await self.start_listening(interface) self.protocol.ping_queue.start() self._refresh_task = self.loop.create_task(self.refresh_node()) while True: if self.protocol.routing_table.get_peers(): if not self.joined.is_set(): self.joined.set() log.info( "joined dht, %i peers known in %i buckets", len(self.protocol.routing_table.get_peers()), self.protocol.routing_table.buckets_with_contacts()) else: if self.joined.is_set(): self.joined.clear() seed_peers = peers_from_urls( await self._storage.get_persisted_kademlia_peers( )) if self._storage else [] if not seed_peers: try: seed_peers.extend( peers_from_urls([ (None, await resolve_host(address, udp_port, 'udp'), udp_port, None) for address, udp_port in known_node_urls or [] ])) except socket.gaierror: await asyncio.sleep(30, loop=self.loop) continue self.protocol.peer_manager.reset() self.protocol.ping_queue.enqueue_maybe_ping(*seed_peers, delay=0.0) await self.peer_search(self.protocol.node_id, shortlist=seed_peers, count=32) await asyncio.sleep(1, loop=self.loop) def start(self, interface: str, known_node_urls: typing.Optional[typing.List[typing.Tuple[ str, int]]] = None): self._join_task = self.loop.create_task( self.join_network(interface, known_node_urls)) def get_iterative_node_finder( self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None, bottom_out_limit: int = constants.BOTTOM_OUT_LIMIT, max_results: int = constants.K) -> IterativeNodeFinder: return IterativeNodeFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol, key, bottom_out_limit, max_results, None, shortlist) def get_iterative_value_finder( self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None, bottom_out_limit: int = 40, max_results: int = -1) -> IterativeValueFinder: return IterativeValueFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol, key, bottom_out_limit, max_results, None, shortlist) async def peer_search( self, node_id: bytes, count=constants.K, max_results=constants.K * 2, bottom_out_limit=20, shortlist: typing.Optional[typing.List['KademliaPeer']] = None ) -> typing.List['KademliaPeer']: peers = [] async for iteration_peers in self.get_iterative_node_finder( node_id, shortlist=shortlist, bottom_out_limit=bottom_out_limit, max_results=max_results): peers.extend(iteration_peers) distance = Distance(node_id) peers.sort(key=lambda peer: distance(peer.node_id)) return peers[:count] async def _accumulate_peers_for_value(self, search_queue: asyncio.Queue, result_queue: asyncio.Queue): tasks = [] try: while True: blob_hash = await search_queue.get() tasks.append( self.loop.create_task( self._peers_for_value_producer(blob_hash, result_queue))) finally: for task in tasks: task.cancel() async def _peers_for_value_producer(self, blob_hash: str, result_queue: asyncio.Queue): async def put_into_result_queue_after_pong(_peer): try: await self.protocol.get_rpc_peer(_peer).ping() result_queue.put_nowait([_peer]) log.debug("pong from %s:%i for %s", _peer.address, _peer.udp_port, blob_hash) except asyncio.TimeoutError: pass # prioritize peers who reply to a dht ping first # this minimizes attempting to make tcp connections that won't work later to dead or unreachable peers async for results in self.get_iterative_value_finder( binascii.unhexlify(blob_hash.encode())): to_put = [] for peer in results: if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port: continue is_good = self.protocol.peer_manager.peer_is_good(peer) if is_good: # the peer has replied recently over UDP, it can probably be reached on the TCP port to_put.append(peer) elif is_good is None: if not peer.udp_port: # TODO: use the same port for TCP and UDP # the udp port must be guessed # default to the ports being the same. if the TCP port appears to be <=0.48.0 default, # including on a network with several nodes, then assume the udp port is proportionately # based on a starting port of 4444 udp_port_to_try = peer.tcp_port if 3400 > peer.tcp_port > 3332: udp_port_to_try = (peer.tcp_port - 3333) + 4444 self.loop.create_task( put_into_result_queue_after_pong( make_kademlia_peer(peer.node_id, peer.address, udp_port_to_try, peer.tcp_port))) else: self.loop.create_task( put_into_result_queue_after_pong(peer)) else: # the peer is known to be bad/unreachable, skip trying to connect to it over TCP log.debug("skip bad peer %s:%i for %s", peer.address, peer.tcp_port, blob_hash) if to_put: result_queue.put_nowait(to_put) def accumulate_peers( self, search_queue: asyncio.Queue, peer_queue: typing.Optional[asyncio.Queue] = None ) -> typing.Tuple[asyncio.Queue, asyncio.Task]: queue = peer_queue or asyncio.Queue(loop=self.loop) return queue, self.loop.create_task( self._accumulate_peers_for_value(search_queue, queue))
async def test_store_to_peer(self): loop = asyncio.get_event_loop() with dht_mocks.mock_network_loop(loop): node_id1 = constants.generate_id() peer1 = KademliaProtocol(loop, PeerManager(loop), node_id1, '1.2.3.4', 4444, 3333) peer2 = KademliaProtocol(loop, PeerManager(loop), constants.generate_id(), '1.2.3.5', 4444, 3333) await loop.create_datagram_endpoint(lambda: peer1, ('1.2.3.4', 4444)) await loop.create_datagram_endpoint(lambda: peer2, ('1.2.3.5', 4444)) peer = make_kademlia_peer(node_id1, '1.2.3.4', udp_port=4444) peer2_from_peer1 = make_kademlia_peer(peer2.node_id, peer2.external_ip, udp_port=peer2.udp_port) peer2_from_peer1.update_tcp_port(3333) peer3 = make_kademlia_peer(constants.generate_id(), '1.2.3.6', udp_port=4444) store_result = await peer2.store_to_peer(b'2' * 48, peer) self.assertEqual(store_result[0], peer.node_id) self.assertEqual(True, store_result[1]) self.assertEqual(True, peer1.data_store.has_peers_for_blob(b'2' * 48)) self.assertEqual(False, peer1.data_store.has_peers_for_blob(b'3' * 48)) self.assertListEqual([peer2_from_peer1], peer1.data_store.get_storing_contacts()) peer1.data_store.completed_blobs.add( binascii.hexlify(b'2' * 48).decode()) find_value_response = peer1.node_rpc.find_value(peer3, b'2' * 48) self.assertEqual(len(find_value_response[b'contacts']), 0) self.assertSetEqual( {b'2' * 48, b'token', b'protocolVersion', b'contacts', b'p'}, set(find_value_response.keys())) self.assertEqual(2, len(find_value_response[b'2' * 48])) self.assertEqual(find_value_response[b'2' * 48][0], peer2_from_peer1.compact_address_tcp()) self.assertDictEqual(bdecode(bencode(find_value_response)), find_value_response) find_value_page_above_pages_response = peer1.node_rpc.find_value( peer3, b'2' * 48, page=10) self.assertNotIn(b'2' * 48, find_value_page_above_pages_response) peer1.stop() peer2.stop() peer1.disconnect() peer2.disconnect()
async def test_update_token(self): loop = asyncio.get_event_loop() with dht_mocks.mock_network_loop(loop): node_id1 = constants.generate_id() peer1 = KademliaProtocol(loop, PeerManager(loop), node_id1, '1.2.3.4', 4444, 3333) peer2 = KademliaProtocol(loop, PeerManager(loop), constants.generate_id(), '1.2.3.5', 4444, 3333) await loop.create_datagram_endpoint(lambda: peer1, ('1.2.3.4', 4444)) await loop.create_datagram_endpoint(lambda: peer2, ('1.2.3.5', 4444)) peer = make_kademlia_peer(node_id1, '1.2.3.4', udp_port=4444) self.assertEqual(None, peer2.peer_manager.get_node_token(peer.node_id)) await peer2.get_rpc_peer(peer).find_value(b'1' * 48) self.assertNotEqual( None, peer2.peer_manager.get_node_token(peer.node_id)) peer1.stop() peer2.stop() peer1.disconnect() peer2.disconnect()
async def test_ping(self): loop = asyncio.get_event_loop() with dht_mocks.mock_network_loop(loop): node_id1 = constants.generate_id() peer1 = KademliaProtocol(loop, PeerManager(loop), node_id1, '1.2.3.4', 4444, 3333) peer2 = KademliaProtocol(loop, PeerManager(loop), constants.generate_id(), '1.2.3.5', 4444, 3333) await loop.create_datagram_endpoint(lambda: peer1, ('1.2.3.4', 4444)) await loop.create_datagram_endpoint(lambda: peer2, ('1.2.3.5', 4444)) peer = make_kademlia_peer(node_id1, '1.2.3.4', udp_port=4444) result = await peer2.get_rpc_peer(peer).ping() self.assertEqual(result, b'pong') peer1.stop() peer2.stop() peer1.disconnect() peer2.disconnect()