async def test_popular_blob(self): peer_count = 150 addresses = [ (constants.generate_id(i + 1), socket.inet_ntoa(int(i + 1).to_bytes(length=4, byteorder='big'))) for i in range(peer_count) ] blob_hash = b'1' * 48 async with self._test_network_context(peer_addresses=addresses): total_seen = set() announced_to = self.nodes[0] for i in range(1, peer_count): node = self.nodes[i] kad_peer = make_kademlia_peer(node.protocol.node_id, node.protocol.external_ip, node.protocol.udp_port) await announced_to.protocol._add_peer(kad_peer) peer = node.protocol.get_rpc_peer( make_kademlia_peer(announced_to.protocol.node_id, announced_to.protocol.external_ip, announced_to.protocol.udp_port)) response = await peer.store(blob_hash) self.assertEqual(response, b'OK') peers_for_blob = await peer.find_value(blob_hash, 0) if i == 1: self.assertNotIn(blob_hash, peers_for_blob) self.assertEqual(peers_for_blob[b'p'], 0) else: self.assertEqual(len(peers_for_blob[blob_hash]), min(i - 1, constants.k)) self.assertEqual( len( announced_to.protocol.data_store. get_peers_for_blob(blob_hash)), i) if i - 1 > constants.k: self.assertEqual(len(peers_for_blob[b'contacts']), constants.k) self.assertEqual(peers_for_blob[b'p'], ((i - 1) // (constants.k + 1)) + 1) seen = set(peers_for_blob[blob_hash]) self.assertEqual(len(seen), constants.k) self.assertEqual(len(peers_for_blob[blob_hash]), len(seen)) for pg in range(1, peers_for_blob[b'p']): page_x = await peer.find_value(blob_hash, pg) self.assertNotIn(b'contacts', page_x) page_x_set = set(page_x[blob_hash]) self.assertEqual(len(page_x[blob_hash]), len(page_x_set)) self.assertGreater(len(page_x_set), 0) self.assertSetEqual(seen.intersection(page_x_set), set()) seen.intersection_update(page_x_set) total_seen.update(page_x_set) else: self.assertEqual(len(peers_for_blob[b'contacts']), i - 1) self.assertEqual(len(total_seen), peer_count - 2)
async def test_add_peer_after_handle_request(self): with dht_mocks.mock_network_loop(self.loop): node_id1 = constants.generate_id() node_id2 = constants.generate_id() node_id3 = constants.generate_id() node_id4 = constants.generate_id() peer1 = KademliaProtocol( self.loop, PeerManager(self.loop), node_id1, '1.2.3.4', 4444, 3333 ) await self.loop.create_datagram_endpoint(lambda: peer1, ('1.2.3.4', 4444)) peer1.start() peer2, peer_2_from_peer_1 = await self._make_protocol(peer1, node_id2, '1.2.3.5', 4444, 3333) peer3, peer_3_from_peer_1 = await self._make_protocol(peer1, node_id3, '1.2.3.6', 4444, 3333) peer4, peer_4_from_peer_1 = await self._make_protocol(peer1, node_id4, '1.2.3.7', 4444, 3333) # peers who reply should be added await peer1.get_rpc_peer(peer_2_from_peer_1).ping() await asyncio.sleep(0.5) self.assertListEqual([peer_2_from_peer_1], peer1.routing_table.get_peers()) peer1.routing_table.remove_peer(peer_2_from_peer_1) # peers not known by be good/bad should be enqueued to maybe-ping peer1_from_peer3 = peer3.get_rpc_peer(make_kademlia_peer(node_id1, '1.2.3.4', 4444)) self.assertEqual(0, len(peer1.ping_queue._pending_contacts)) pong = await peer1_from_peer3.ping() self.assertEqual(b'pong', pong) self.assertEqual(1, len(peer1.ping_queue._pending_contacts)) peer1.ping_queue._pending_contacts.clear() # peers who are already good should be added peer1_from_peer4 = peer4.get_rpc_peer(make_kademlia_peer(node_id1, '1.2.3.4', 4444)) peer1.peer_manager.update_contact_triple(node_id4,'1.2.3.7', 4444) peer1.peer_manager.report_last_replied('1.2.3.7', 4444) self.assertEqual(0, len(peer1.ping_queue._pending_contacts)) pong = await peer1_from_peer4.ping() self.assertEqual(b'pong', pong) await asyncio.sleep(0.5) self.assertEqual(1, len(peer1.routing_table.get_peers())) self.assertEqual(0, len(peer1.ping_queue._pending_contacts)) peer1.routing_table.buckets[0].peers.clear() # peers who are known to be bad recently should not be added or maybe-pinged peer1_from_peer4 = peer4.get_rpc_peer(make_kademlia_peer(node_id1, '1.2.3.4', 4444)) peer1.peer_manager.update_contact_triple(node_id4,'1.2.3.7', 4444) peer1.peer_manager.report_failure('1.2.3.7', 4444) peer1.peer_manager.report_failure('1.2.3.7', 4444) self.assertEqual(0, len(peer1.ping_queue._pending_contacts)) pong = await peer1_from_peer4.ping() self.assertEqual(b'pong', pong) self.assertEqual(0, len(peer1.routing_table.get_peers())) self.assertEqual(0, len(peer1.ping_queue._pending_contacts)) for p in [peer1, peer2, peer3, peer4]: p.stop() p.disconnect()
def setUp(self): self.loop = asyncio.get_event_loop() self.peer_manager = PeerManager(self.loop) self.node_ids = [generate_id(), generate_id(), generate_id()] self.first_contact = make_kademlia_peer(self.node_ids[1], '127.0.0.1', udp_port=1000) self.second_contact = make_kademlia_peer(self.node_ids[0], '192.168.0.1', udp_port=1000)
async def test_store_to_peer(self): loop = asyncio.get_event_loop() with dht_mocks.mock_network_loop(loop): node_id1 = constants.generate_id() peer1 = KademliaProtocol(loop, PeerManager(loop), node_id1, '1.2.3.4', 4444, 3333) peer2 = KademliaProtocol(loop, PeerManager(loop), constants.generate_id(), '1.2.3.5', 4444, 3333) await loop.create_datagram_endpoint(lambda: peer1, ('1.2.3.4', 4444)) await loop.create_datagram_endpoint(lambda: peer2, ('1.2.3.5', 4444)) peer = make_kademlia_peer(node_id1, '1.2.3.4', udp_port=4444) peer2_from_peer1 = make_kademlia_peer(peer2.node_id, peer2.external_ip, udp_port=peer2.udp_port) peer2_from_peer1.update_tcp_port(3333) peer3 = make_kademlia_peer(constants.generate_id(), '1.2.3.6', udp_port=4444) store_result = await peer2.store_to_peer(b'2' * 48, peer) self.assertEqual(store_result[0], peer.node_id) self.assertEqual(True, store_result[1]) self.assertEqual(True, peer1.data_store.has_peers_for_blob(b'2' * 48)) self.assertEqual(False, peer1.data_store.has_peers_for_blob(b'3' * 48)) self.assertListEqual([peer2_from_peer1], peer1.data_store.get_storing_contacts()) peer1.data_store.completed_blobs.add( binascii.hexlify(b'2' * 48).decode()) find_value_response = peer1.node_rpc.find_value(peer3, b'2' * 48) self.assertEqual(len(find_value_response[b'contacts']), 0) self.assertSetEqual( {b'2' * 48, b'token', b'protocolVersion', b'contacts', b'p'}, set(find_value_response.keys())) self.assertEqual(2, len(find_value_response[b'2' * 48])) self.assertEqual(find_value_response[b'2' * 48][0], peer2_from_peer1.compact_address_tcp()) self.assertDictEqual(bdecode(bencode(find_value_response)), find_value_response) find_value_page_above_pages_response = peer1.node_rpc.find_value( peer3, b'2' * 48, page=10) self.assertNotIn(b'2' * 48, find_value_page_above_pages_response) peer1.stop() peer2.stop() peer1.disconnect() peer2.disconnect()
def test_add_peer(self): peer = make_kademlia_peer(constants.generate_id(2), "1.2.3.4", udp_port=4444) peer_update2 = make_kademlia_peer(constants.generate_id(2), "1.2.3.4", udp_port=4445) self.assertListEqual([], self.kbucket.peers) # add the peer self.kbucket.add_peer(peer) self.assertListEqual([peer], self.kbucket.peers) # re-add it self.kbucket.add_peer(peer) self.assertListEqual([peer], self.kbucket.peers) self.assertEqual(self.kbucket.peers[0].udp_port, 4444) # add a new peer object with the same id and address but a different port self.kbucket.add_peer(peer_update2) self.assertListEqual([peer_update2], self.kbucket.peers) self.assertEqual(self.kbucket.peers[0].udp_port, 4445) # modify the peer object to have a different port peer_update2.udp_port = 4444 self.kbucket.add_peer(peer_update2) self.assertListEqual([peer_update2], self.kbucket.peers) self.assertEqual(self.kbucket.peers[0].udp_port, 4444) self.kbucket.peers.clear() # Test if contacts can be added to empty list # Add k contacts to bucket for i in range(constants.k): peer = make_kademlia_peer(generate_id(), next(self.address_generator), 4444) self.assertTrue(self.kbucket.add_peer(peer)) self.assertEqual(peer, self.kbucket.peers[i]) # Test if contact is not added to full list peer = make_kademlia_peer(generate_id(), next(self.address_generator), 4444) self.assertFalse(self.kbucket.add_peer(peer)) # Test if an existing contact is updated correctly if added again existing_peer = self.kbucket.peers[0] self.assertTrue(self.kbucket.add_peer(existing_peer)) self.assertEqual(existing_peer, self.kbucket.peers[-1])
def handle_request_datagram(self, address: typing.Tuple[str, int], request_datagram: RequestDatagram): # This is an RPC method request self.peer_manager.report_last_requested(address[0], address[1]) try: peer = self.routing_table.get_peer(request_datagram.node_id) except IndexError: peer = make_kademlia_peer(request_datagram.node_id, address[0], address[1]) try: self._handle_rpc(peer, request_datagram) # if the contact is not known to be bad (yet) and we haven't yet queried it, send it a ping so that it # will be added to our routing table if successful is_good = self.peer_manager.peer_is_good(peer) if is_good is None: self.ping_queue.enqueue_maybe_ping(peer) # only add a requesting contact to the routing table if it has replied to one of our requests elif is_good is True: self.add_peer(peer) except ValueError as err: log.debug("error raised handling %s request from %s:%i - %s(%s)", request_datagram.method, peer.address, peer.udp_port, str(type(err)), str(err)) self.send_error( peer, ErrorDatagram(ERROR_TYPE, request_datagram.rpc_id, self.node_id, str(type(err)).encode(), str(err).encode()) ) except Exception as err: log.warning("error raised handling %s request from %s:%i - %s(%s)", request_datagram.method, peer.address, peer.udp_port, str(type(err)), str(err)) self.send_error( peer, ErrorDatagram(ERROR_TYPE, request_datagram.rpc_id, self.node_id, str(type(err)).encode(), str(err).encode()) )
def test_make_contact_error_cases(self): self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '1.2.3.4', 100000) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '1.2.3.4.5', 1000) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], 'this is not an ip', 1000) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '1.2.3.4', -1000) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '1.2.3.4', 0) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '1.2.3.4', 70000) self.assertRaises(ValueError, make_kademlia_peer, b'not valid node id', '1.2.3.4', 1000) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '0.0.0.0', 1000) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '10.0.0.1', 1000) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '100.64.0.1', 1000) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '127.0.0.1', 1000) self.assertIsNotNone(make_kademlia_peer(self.node_ids[1], '127.0.0.1', 1000, allow_localhost=True)) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '192.168.0.1', 1000) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '172.16.0.1', 1000) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '169.254.1.1', 1000) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '192.0.0.2', 1000) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '192.0.2.2', 1000) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '192.88.99.2', 1000) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '198.18.1.1', 1000) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '198.51.100.2', 1000) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '198.51.100.2', 1000) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '203.0.113.4', 1000) for i in range(32): self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], f"{224 + i}.0.0.0", 1000) self.assertRaises(ValueError, make_kademlia_peer, self.node_ids[1], '255.255.255.255', 1000)
async def test_transfer_stream_bad_first_peer_good_second(self): await self.setup_stream(2) mock_node = mock.Mock(spec=Node) bad_peer = make_kademlia_peer(b'2' * 48, "127.0.0.1", tcp_port=3334, allow_localhost=True) def _mock_accumulate_peers(q1, q2): async def _task(): pass q2.put_nowait([bad_peer]) self.loop.call_later(1, q2.put_nowait, [self.server_from_client]) return q2, self.loop.create_task(_task()) mock_node.accumulate_peers = _mock_accumulate_peers self.stream.downloader.node = mock_node await self.stream.save_file() await self.stream.finished_writing.wait() self.assertTrue(os.path.isfile(self.stream.full_path)) with open(self.stream.full_path, 'rb') as f: self.assertEqual(f.read(), self.stream_bytes) await self.stream.stop()
def handle_response_datagram(self, address: typing.Tuple[str, int], response_datagram: ResponseDatagram): # Find the message that triggered this response if response_datagram.rpc_id in self.sent_messages: peer, df, request = self.sent_messages[response_datagram.rpc_id] if peer.address != address[0]: df.set_exception(RemoteException( f"response from {address[0]}, expected {peer.address}") ) return # We got a result from the RPC if peer.node_id == self.node_id: df.set_exception(RemoteException("node has our node id")) return elif response_datagram.node_id == self.node_id: df.set_exception(RemoteException("incoming message is from our node id")) return peer = make_kademlia_peer(response_datagram.node_id, address[0], address[1]) self.peer_manager.report_last_replied(address[0], address[1]) self.peer_manager.update_contact_triple(peer.node_id, address[0], address[1]) if not df.cancelled(): df.set_result(response_datagram) self.add_peer(peer) else: log.warning("%s:%i replied, but after we cancelled the request attempt", peer.address, peer.udp_port) else: # If the original message isn't found, it must have timed out # TODO: we should probably do something with this... pass
async def test_host_same_blob_to_multiple_peers_at_once(self): blob_hash = "7f5ab2def99f0ddd008da71db3a3772135f4002b19b7605840ed1034c8955431bd7079549e65e6b2a3b9c17c773073ed" mock_blob_bytes = b'1' * ((2 * 2**20) - 1) second_client_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, second_client_dir) second_client_conf = Config() second_client_storage = SQLiteStorage( second_client_conf, os.path.join(second_client_dir, "lbrynet.sqlite")) second_client_blob_manager = BlobManager(self.loop, second_client_dir, second_client_storage, second_client_conf) server_from_second_client = make_kademlia_peer(b'1' * 48, "127.0.0.1", tcp_port=33333, allow_localhost=True) await second_client_storage.open() await second_client_blob_manager.setup() await self._add_blob_to_server(blob_hash, mock_blob_bytes) second_client_blob = second_client_blob_manager.get_blob(blob_hash) # download the blob await asyncio.gather( request_blob(self.loop, second_client_blob, server_from_second_client.address, server_from_second_client.tcp_port, 2, 3), self._test_transfer_blob(blob_hash)) await second_client_blob.verified.wait() self.assertTrue(second_client_blob.get_is_verified())
def peers_from_urls(urls: typing.Optional[typing.List[typing.Tuple[bytes, str, int, int]]]): peer_addresses = [] for node_id, address, udp_port, tcp_port in urls: if (node_id, address, udp_port, tcp_port) not in peer_addresses and \ (address, udp_port) != (self.protocol.external_ip, self.protocol.udp_port): peer_addresses.append((node_id, address, udp_port, tcp_port)) return [make_kademlia_peer(*peer_address) for peer_address in peer_addresses]
async def test_update_get_peers(self): node_id = hashlib.sha384("1234".encode()).digest() args = (node_id, '73.186.148.72', 4444, None) fake_peer = make_kademlia_peer(*args) await self.storage.save_kademlia_peers([fake_peer]) peers = await self.storage.get_persisted_kademlia_peers() self.assertTupleEqual(args, peers[0])
async def test_host_different_blobs_to_multiple_peers_at_once(self): blob_hash = "7f5ab2def99f0ddd008da71db3a3772135f4002b19b7605840ed1034c8955431bd7079549e65e6b2a3b9c17c773073ed" mock_blob_bytes = b'1' * ((2 * 2 ** 20) - 1) sd_hash = "3e2706157a59aaa47ef52bc264fce488078b4026c0b9bab649a8f2fe1ecc5e5cad7182a2bb7722460f856831a1ac0f02" mock_sd_blob_bytes = b"""{"blobs": [{"blob_hash": "6f53c72de100f6f007aa1b9720632e2d049cc6049e609ad790b556dba262159f739d5a14648d5701afc84b991254206a", "blob_num": 0, "iv": "3b6110c2d8e742bff66e4314863dee7e", "length": 2097152}, {"blob_hash": "18493bc7c5164b00596153859a0faffa45765e47a6c3f12198a4f7be4658111505b7f8a15ed0162306a0672c4a9b505d", "blob_num": 1, "iv": "df973fa64e73b4ff2677d682cdc32d3e", "length": 2097152}, {"blob_num": 2, "iv": "660d2dc2645da7c7d4540a466fcb0c60", "length": 0}], "key": "6465616462656566646561646265656664656164626565666465616462656566", "stream_hash": "22423c6786584974bd6b462af47ecb03e471da0ef372fe85a4e71a78bef7560c4afb0835c689f03916105404653b7bdf", "stream_name": "746573745f66696c65", "stream_type": "lbryfile", "suggested_file_name": "746573745f66696c65"}""" second_client_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, second_client_dir) second_client_conf = Config() second_client_storage = SQLiteStorage(second_client_conf, os.path.join(second_client_dir, "lbrynet.sqlite")) second_client_blob_manager = BlobManager( self.loop, second_client_dir, second_client_storage, second_client_conf ) server_from_second_client = make_kademlia_peer(b'1' * 48, "127.0.0.1", tcp_port=33333, allow_localhost=True) await second_client_storage.open() await second_client_blob_manager.setup() await self._add_blob_to_server(blob_hash, mock_blob_bytes) await self._add_blob_to_server(sd_hash, mock_sd_blob_bytes) second_client_blob = self.client_blob_manager.get_blob(blob_hash) await asyncio.gather( request_blob( self.loop, second_client_blob, server_from_second_client.address, server_from_second_client.tcp_port, 2, 3 ), self._test_transfer_blob(sd_hash), second_client_blob.verified.wait() ) self.assertTrue(second_client_blob.get_is_verified())
async def _value_producer(self, blob_hash: str, result_queue: asyncio.Queue): async def ping(_peer): try: await self.protocol.get_rpc_peer(_peer).ping() result_queue.put_nowait([_peer]) except asyncio.TimeoutError: pass async for results in self.get_iterative_value_finder(binascii.unhexlify(blob_hash.encode())): to_put = [] for peer in results: if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port: continue is_good = self.protocol.peer_manager.peer_is_good(peer) if is_good: to_put.append(peer) elif is_good is None: udp_port_to_try = 4444 if 3400 > peer.tcp_port > 3332: if not peer.udp_port: udp_port_to_try = (peer.tcp_port - 3333) + 4444 elif 4500 > peer.tcp_port > 4443: if not peer.udp_port: udp_port_to_try = peer.tcp_port if not peer.udp_port: peer = make_kademlia_peer(peer.node_id, peer.address, udp_port_to_try, peer.tcp_port) self.loop.create_task(ping(peer)) else: log.debug("skip bad peer %s:%i for %s", peer.address, peer.tcp_port, blob_hash) if to_put: result_queue.put_nowait(to_put)
async def asyncSetUp(self): self.loop = asyncio.get_event_loop() self.client_dir = tempfile.mkdtemp() self.server_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.client_dir) self.addCleanup(shutil.rmtree, self.server_dir) self.server_config = Config(data_dir=self.server_dir, download_dir=self.server_dir, wallet=self.server_dir, reflector_servers=[]) self.server_storage = SQLiteStorage(self.server_config, os.path.join(self.server_dir, "lbrynet.sqlite")) self.server_blob_manager = BlobManager(self.loop, self.server_dir, self.server_storage, self.server_config) self.server = BlobServer(self.loop, self.server_blob_manager, 'bQEaw42GXsgCAGio1nxFncJSyRmnztSCjP') self.client_config = Config(data_dir=self.client_dir, download_dir=self.client_dir, wallet=self.client_dir, reflector_servers=[]) self.client_storage = SQLiteStorage(self.client_config, os.path.join(self.client_dir, "lbrynet.sqlite")) self.client_blob_manager = BlobManager(self.loop, self.client_dir, self.client_storage, self.client_config) self.client_peer_manager = PeerManager(self.loop) self.server_from_client = make_kademlia_peer(b'1' * 48, "127.0.0.1", tcp_port=33333, allow_localhost=True) await self.client_storage.open() await self.server_storage.open() await self.client_blob_manager.setup() await self.server_blob_manager.setup() self.server.start_server(33333, '127.0.0.1') self.addCleanup(self.server.stop_server) await self.server.started_listening.wait()
async def _make_protocol(self, other_peer, node_id, address, udp_port, tcp_port): proto = KademliaProtocol( self.loop, PeerManager(self.loop), node_id, address, udp_port, tcp_port ) await self.loop.create_datagram_endpoint(lambda: proto, (address, 4444)) proto.start() return proto, make_kademlia_peer(node_id, address, udp_port=udp_port)
def test_peer_is_good_unknown_peer(self): # Scenario: peer replied, but caller doesn't know the node_id. # Outcome: We can't say it's good or bad. # (yes, we COULD tell the node id, but not here. It would be # a side effect and the caller is responsible to discover it) peer = make_kademlia_peer(None, '1.2.3.4', 4444) self.peer_manager.report_last_requested('1.2.3.4', 4444) self.peer_manager.report_last_replied('1.2.3.4', 4444) self.assertIsNone(self.peer_manager.peer_is_good(peer))
async def add_peer(self, node_id, address, add_to_routing_table=True): n = Node(self.loop, PeerManager(self.loop), node_id, 4444, 4444, 3333, address) await n.start_listening(address) self.nodes.update({len(self.nodes): n}) if add_to_routing_table: self.node.protocol.add_peer( make_kademlia_peer(n.protocol.node_id, n.protocol.external_ip, n.protocol.udp_port))
def test_remove_peer(self): # try remove contact from empty list peer = make_kademlia_peer(generate_id(), next(self.address_generator), 4444) self.assertRaises(ValueError, self.kbucket.remove_peer, peer) added = [] # Add couple contacts for i in range(constants.k - 2): peer = make_kademlia_peer(generate_id(), next(self.address_generator), 4444) self.assertTrue(self.kbucket.add_peer(peer)) added.append(peer) while added: peer = added.pop() self.assertIn(peer, self.kbucket.peers) self.kbucket.remove_peer(peer) self.assertNotIn(peer, self.kbucket.peers)
async def test_cant_add_peer_without_a_node_id_gracefully(self): loop = asyncio.get_event_loop() node = Node(loop, PeerManager(loop), constants.generate_id(), 4444, 4444, 3333, '1.2.3.4') bad_peer = make_kademlia_peer(None, '1.2.3.4', 5555) with self.assertLogs(level='WARNING') as logged: self.assertFalse(await node.protocol._add_peer(bad_peer)) self.assertEqual(1, len(logged.output)) self.assertTrue(logged.output[0].endswith( 'Tried adding a peer with no node id!'))
async def chain_peer(self, node_id, address): previous_last_node = self.nodes[len(self.nodes) - 1] await self.add_peer(node_id, address, False) last_node = self.nodes[len(self.nodes) - 1] peer = last_node.protocol.get_rpc_peer( make_kademlia_peer(previous_last_node.protocol.node_id, previous_last_node.protocol.external_ip, previous_last_node.protocol.udp_port)) await peer.ping() return peer
def _delayed_add_fixed_peers(): self.added_fixed_peers = True self.peer_queue.put_nowait([ make_kademlia_peer(None, address, None, tcp_port=port + 1, allow_localhost=True) for address, port in addresses ])
def _test_add_peer_to_blob(self, blob=b'2' * 48, node_id=b'1' * 48, address='1.2.3.4', tcp_port=3333, udp_port=4444): peer = make_kademlia_peer(node_id, address, udp_port) peer.update_tcp_port(tcp_port) before = self.data_store.get_peers_for_blob(blob) self.data_store.add_peer_to_blob(peer, blob) self.assertListEqual(before + [peer], self.data_store.get_peers_for_blob(blob)) return peer
async def _handle_probe_result(self, peer: 'KademliaPeer', response: FindResponse): self._add_active(peer) for contact_triple in response.get_close_triples(): node_id, address, udp_port = contact_triple try: self._add_active(make_kademlia_peer(node_id, address, udp_port)) except ValueError: log.warning( "misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer.address, peer.udp_port, address, udp_port) self.check_result_ready(response)
async def join_network(self, interface: typing.Optional[str] = '', known_node_urls: typing.Optional[typing.List[ typing.Tuple[str, int]]] = None): if not self.listening_port: await self.start_listening(interface) self.protocol.ping_queue.start() self._refresh_task = self.loop.create_task(self.refresh_node()) # resolve the known node urls known_node_addresses = [] url_to_addr = {} if known_node_urls: for host, port in known_node_urls: address = await resolve_host(host, port, proto='udp') if (address, port) not in known_node_addresses and\ (address, port) != (self.protocol.external_ip, self.protocol.udp_port): known_node_addresses.append((address, port)) url_to_addr[address] = host if known_node_addresses: peers = [ make_kademlia_peer(None, address, port) for (address, port) in known_node_addresses ] while True: if not self.protocol.routing_table.get_peers(): if self.joined.is_set(): self.joined.clear() self.protocol.peer_manager.reset() self.protocol.ping_queue.enqueue_maybe_ping(*peers, delay=0.0) peers.extend(await self.peer_search(self.protocol.node_id, shortlist=peers, count=32)) if self.protocol.routing_table.get_peers(): self.joined.set() log.info( "Joined DHT, %i peers known in %i buckets", len(self.protocol.routing_table.get_peers()), self.protocol.routing_table.buckets_with_contacts( )) else: continue await asyncio.sleep(1, loop=self.loop) log.info("Joined DHT, %i peers known in %i buckets", len(self.protocol.routing_table.get_peers()), self.protocol.routing_table.buckets_with_contacts()) self.joined.set()
async def _peers_for_value_producer(self, blob_hash: str, result_queue: asyncio.Queue): async def put_into_result_queue_after_pong(_peer): try: await self.protocol.get_rpc_peer(_peer).ping() result_queue.put_nowait([_peer]) log.debug("pong from %s:%i for %s", _peer.address, _peer.udp_port, blob_hash) except asyncio.TimeoutError: pass # prioritize peers who reply to a dht ping first # this minimizes attempting to make tcp connections that won't work later to dead or unreachable peers async with aclosing( self.get_iterative_value_finder( bytes.fromhex(blob_hash))) as value_finder: async for results in value_finder: to_put = [] for peer in results: if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port: continue is_good = self.protocol.peer_manager.peer_is_good(peer) if is_good: # the peer has replied recently over UDP, it can probably be reached on the TCP port to_put.append(peer) elif is_good is None: if not peer.udp_port: # TODO: use the same port for TCP and UDP # the udp port must be guessed # default to the ports being the same. if the TCP port appears to be <=0.48.0 default, # including on a network with several nodes, then assume the udp port is proportionately # based on a starting port of 4444 udp_port_to_try = peer.tcp_port if 3400 > peer.tcp_port > 3332: udp_port_to_try = (peer.tcp_port - 3333) + 4444 self.loop.create_task( put_into_result_queue_after_pong( make_kademlia_peer(peer.node_id, peer.address, udp_port_to_try, peer.tcp_port))) else: self.loop.create_task( put_into_result_queue_after_pong(peer)) else: # the peer is known to be bad/unreachable, skip trying to connect to it over TCP log.debug("skip bad peer %s:%i for %s", peer.address, peer.tcp_port, blob_hash) if to_put: result_queue.put_nowait(to_put)
async def get_kademlia_peers_from_hosts( peer_list: typing.List[typing.Tuple[str, int]]) -> typing.List['KademliaPeer']: peer_address_list = [(await resolve_host(url, port, proto='tcp'), port) for url, port in peer_list] kademlia_peer_list = [ make_kademlia_peer(None, address, None, tcp_port=port, allow_localhost=True) for address, port in peer_address_list ] return kademlia_peer_list
async def test_popular_blob(self): peer_count = 150 blob_hash = constants.generate_id(99999) async with self._test_network_context(peer_count=peer_count): total_seen = set() announced_to = self.nodes.pop(0) for i, node in enumerate(self.nodes.values()): self.add_peer_to_routing_table(announced_to, node) peer = node.protocol.get_rpc_peer( make_kademlia_peer( announced_to.protocol.node_id, announced_to.protocol.external_ip, announced_to.protocol.udp_port ) ) response = await peer.store(blob_hash) self.assertEqual(response, b'OK') peers_for_blob = await peer.find_value(blob_hash, 0) if i == 0: self.assertNotIn(blob_hash, peers_for_blob) self.assertEqual(peers_for_blob[b'p'], 0) else: self.assertEqual(len(peers_for_blob[blob_hash]), min(i, constants.K)) self.assertEqual(len(announced_to.protocol.data_store.get_peers_for_blob(blob_hash)), i + 1) if i - 1 > constants.K: self.assertEqual(len(peers_for_blob[b'contacts']), constants.K) self.assertEqual(peers_for_blob[b'p'], (i // (constants.K + 1)) + 1) seen = set(peers_for_blob[blob_hash]) self.assertEqual(len(seen), constants.K) self.assertEqual(len(peers_for_blob[blob_hash]), len(seen)) for pg in range(1, peers_for_blob[b'p']): page_x = await peer.find_value(blob_hash, pg) self.assertNotIn(b'contacts', page_x) page_x_set = set(page_x[blob_hash]) self.assertEqual(len(page_x[blob_hash]), len(page_x_set)) self.assertGreater(len(page_x_set), 0) self.assertSetEqual(seen.intersection(page_x_set), set()) seen.intersection_update(page_x_set) total_seen.update(page_x_set) else: self.assertEqual(len(peers_for_blob[b'contacts']), 8) # we always add 8 on first page self.assertEqual(len(total_seen), peer_count - 2)
async def test_split_buckets(self): loop = asyncio.get_event_loop() peer_addresses = [ (constants.generate_id(1), '1.2.3.1'), ] for i in range(2, 200): peer_addresses.append((constants.generate_id(i), f'1.2.3.{i}')) with dht_mocks.mock_network_loop(loop): nodes = { i: Node(loop, PeerManager(loop), node_id, 4444, 4444, 3333, address) for i, (node_id, address) in enumerate(peer_addresses) } node_1 = nodes[0] for i in range(1, len(peer_addresses)): node = nodes[i] peer = make_kademlia_peer(node.protocol.node_id, node.protocol.external_ip, udp_port=node.protocol.udp_port) # set all of the peers to good (as to not attempt pinging stale ones during split) node_1.protocol.peer_manager.report_last_replied( peer.address, peer.udp_port) node_1.protocol.peer_manager.report_last_replied( peer.address, peer.udp_port) await node_1.protocol._add_peer(peer) # check that bucket 0 is always the one covering the local node id self.assertEqual( True, node_1.protocol.routing_table.buckets[0].key_in_range( node_1.protocol.node_id)) self.assertEqual(40, len(node_1.protocol.routing_table.get_peers())) self.assertEqual(len(expected_ranges), len(node_1.protocol.routing_table.buckets)) covered = 0 for (expected_min, expected_max), bucket in zip( expected_ranges, node_1.protocol.routing_table.buckets): self.assertEqual(expected_min, bucket.range_min) self.assertEqual(expected_max, bucket.range_max) covered += bucket.range_max - bucket.range_min self.assertEqual(2**384, covered) for node in nodes.values(): node.stop()
async def test_peer_persistance(self): num_nodes = 6 start_port = 40000 num_seeds = 2 external_ip = '127.0.0.1' # Start a node await self.setup_network(num_nodes, start_port=start_port, seed_nodes=num_seeds) await asyncio.gather(*[node.joined.wait() for node in self.nodes]) node1 = self.nodes[-1] peer_args = [(n.protocol.node_id, n.protocol.external_ip, n.protocol.udp_port, n.protocol.peer_port) for n in self.nodes[:num_seeds]] peers = [make_kademlia_peer(*args) for args in peer_args] # node1 is bootstrapped from the fixed seeds self.assertCountEqual(peers, node1.protocol.routing_table.get_peers()) # Refresh and assert that the peers were persisted await node1.refresh_node(True) self.assertEqual( len(peer_args), len(await node1._storage.get_persisted_kademlia_peers())) node1.stop() # Start a fresh node with the same node_id and storage, but no known peers node2 = await self.create_node(constants.generate_id(num_nodes - 1), start_port + num_nodes - 1) node2._storage = node1._storage node2.start(external_ip, []) await node2.joined.wait() # The peers are restored self.assertEqual(num_seeds, len(node2.protocol.routing_table.get_peers())) for bucket1, bucket2 in zip(node1.protocol.routing_table.buckets, node2.protocol.routing_table.buckets): self.assertEqual((bucket1.range_min, bucket1.range_max), (bucket2.range_min, bucket2.range_max))