def test_add_peer(self): peer = KademliaPeer(None, '1.2.3.4', constants.generate_id(2), udp_port=4444) peer_update2 = KademliaPeer(None, '1.2.3.4', constants.generate_id(2), udp_port=4445) self.assertListEqual([], self.kbucket.peers) # add the peer self.kbucket.add_peer(peer) self.assertListEqual([peer], self.kbucket.peers) # re-add it self.kbucket.add_peer(peer) self.assertListEqual([peer], self.kbucket.peers) self.assertEqual(self.kbucket.peers[0].udp_port, 4444) # add a new peer object with the same id and address but a different port self.kbucket.add_peer(peer_update2) self.assertListEqual([peer_update2], self.kbucket.peers) self.assertEqual(self.kbucket.peers[0].udp_port, 4445) # modify the peer object to have a different port peer_update2.udp_port = 4444 self.kbucket.add_peer(peer_update2) self.assertListEqual([peer_update2], self.kbucket.peers) self.assertEqual(self.kbucket.peers[0].udp_port, 4444) self.kbucket.peers.clear() # Test if contacts can be added to empty list # Add k contacts to bucket for i in range(constants.k): peer = self.peer_manager.get_kademlia_peer( generate_id(), next(self.address_generator), 4444) self.assertTrue(self.kbucket.add_peer(peer)) self.assertEqual(peer, self.kbucket.peers[i]) # Test if contact is not added to full list peer = self.peer_manager.get_kademlia_peer( generate_id(), next(self.address_generator), 4444) self.assertFalse(self.kbucket.add_peer(peer)) # Test if an existing contact is updated correctly if added again existing_peer = self.kbucket.peers[0] self.assertTrue(self.kbucket.add_peer(existing_peer)) self.assertEqual(existing_peer, self.kbucket.peers[-1])
async def test_host_different_blobs_to_multiple_peers_at_once(self): blob_hash = "7f5ab2def99f0ddd008da71db3a3772135f4002b19b7605840ed1034c8955431bd7079549e65e6b2a3b9c17c773073ed" mock_blob_bytes = b'1' * ((2 * 2**20) - 1) sd_hash = "3e2706157a59aaa47ef52bc264fce488078b4026c0b9bab649a8f2fe1ecc5e5cad7182a2bb7722460f856831a1ac0f02" mock_sd_blob_bytes = b"""{"blobs": [{"blob_hash": "6f53c72de100f6f007aa1b9720632e2d049cc6049e609ad790b556dba262159f739d5a14648d5701afc84b991254206a", "blob_num": 0, "iv": "3b6110c2d8e742bff66e4314863dee7e", "length": 2097152}, {"blob_hash": "18493bc7c5164b00596153859a0faffa45765e47a6c3f12198a4f7be4658111505b7f8a15ed0162306a0672c4a9b505d", "blob_num": 1, "iv": "df973fa64e73b4ff2677d682cdc32d3e", "length": 2097152}, {"blob_num": 2, "iv": "660d2dc2645da7c7d4540a466fcb0c60", "length": 0}], "key": "6465616462656566646561646265656664656164626565666465616462656566", "stream_hash": "22423c6786584974bd6b462af47ecb03e471da0ef372fe85a4e71a78bef7560c4afb0835c689f03916105404653b7bdf", "stream_name": "746573745f66696c65", "stream_type": "lbryfile", "suggested_file_name": "746573745f66696c65"}""" second_client_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, second_client_dir) second_client_storage = SQLiteStorage( Config(), os.path.join(second_client_dir, "lbrynet.sqlite")) second_client_blob_manager = BlobFileManager(self.loop, second_client_dir, second_client_storage) server_from_second_client = KademliaPeer(self.loop, "127.0.0.1", b'1' * 48, tcp_port=33333) await second_client_storage.open() await second_client_blob_manager.setup() await self._add_blob_to_server(blob_hash, mock_blob_bytes) await self._add_blob_to_server(sd_hash, mock_sd_blob_bytes) second_client_blob = self.client_blob_manager.get_blob(blob_hash) await asyncio.gather( request_blob(self.loop, second_client_blob, server_from_second_client.address, server_from_second_client.tcp_port, 2, 3), self._test_transfer_blob(sd_hash), second_client_blob.finished_writing.wait()) self.assertEqual(second_client_blob.get_is_verified(), True)
async def test_host_same_blob_to_multiple_peers_at_once(self): blob_hash = "7f5ab2def99f0ddd008da71db3a3772135f4002b19b7605840ed1034c8955431bd7079549e65e6b2a3b9c17c773073ed" mock_blob_bytes = b'1' * ((2 * 2**20) - 1) second_client_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, second_client_dir) second_client_storage = SQLiteStorage( Config(), os.path.join(second_client_dir, "lbrynet.sqlite")) second_client_blob_manager = BlobFileManager(self.loop, second_client_dir, second_client_storage) server_from_second_client = KademliaPeer(self.loop, "127.0.0.1", b'1' * 48, tcp_port=33333) await second_client_storage.open() await second_client_blob_manager.setup() await self._add_blob_to_server(blob_hash, mock_blob_bytes) second_client_blob = self.client_blob_manager.get_blob(blob_hash) # download the blob await asyncio.gather( request_blob(self.loop, second_client_blob, server_from_second_client.address, server_from_second_client.tcp_port, 2, 3), self._test_transfer_blob(blob_hash)) await second_client_blob.finished_writing.wait() self.assertEqual(second_client_blob.get_is_verified(), True)
async def asyncSetUp(self): self.loop = asyncio.get_event_loop() self.client_dir = tempfile.mkdtemp() self.server_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.client_dir) self.addCleanup(shutil.rmtree, self.server_dir) self.server_storage = SQLiteStorage( Config(), os.path.join(self.server_dir, "lbrynet.sqlite")) self.server_blob_manager = BlobFileManager(self.loop, self.server_dir, self.server_storage) self.server = BlobServer(self.loop, self.server_blob_manager, 'bQEaw42GXsgCAGio1nxFncJSyRmnztSCjP') self.client_storage = SQLiteStorage( Config(), os.path.join(self.client_dir, "lbrynet.sqlite")) self.client_blob_manager = BlobFileManager(self.loop, self.client_dir, self.client_storage) self.client_peer_manager = PeerManager(self.loop) self.server_from_client = KademliaPeer(self.loop, "127.0.0.1", b'1' * 48, tcp_port=33333) await self.client_storage.open() await self.server_storage.open() await self.client_blob_manager.setup() await self.server_blob_manager.setup() self.server.start_server(33333, '127.0.0.1') await self.server.started_listening.wait()
async def test_transfer_stream_bad_first_peer_good_second(self): await self.setup_stream(2) mock_node = mock.Mock(spec=Node) q = asyncio.Queue() bad_peer = KademliaPeer(self.loop, "127.0.0.1", b'2' * 48, tcp_port=3334) def _mock_accumulate_peers(q1, q2): async def _task(): pass q2.put_nowait([bad_peer]) self.loop.call_later(1, q2.put_nowait, [self.server_from_client]) return q2, self.loop.create_task(_task()) mock_node.accumulate_peers = _mock_accumulate_peers self.downloader.download(mock_node) await self.downloader.stream_finished_event.wait() self.assertTrue(os.path.isfile(self.downloader.output_path)) with open(self.downloader.output_path, 'rb') as f: self.assertEqual(f.read(), self.stream_bytes)
async def _add_fixed_peers(): self.peer_queue.put_nowait([ KademliaPeer(self.loop, address=(await resolve_host(url)), tcp_port=port + 1) for url, port in self.config.reflector_servers ])
async def test_transfer_stream_bad_first_peer_good_second(self): await self.setup_stream(2) mock_node = mock.Mock(spec=Node) bad_peer = KademliaPeer(self.loop, "127.0.0.1", b'2' * 48, tcp_port=3334) @contextlib.asynccontextmanager async def mock_peer_search(*_): async def _gen(): await asyncio.sleep(0.05, loop=self.loop) yield [bad_peer] await asyncio.sleep(0.1, loop=self.loop) yield [self.server_from_client] return yield _gen() mock_node.stream_peer_search_junction = mock_peer_search self.downloader.download(mock_node) await self.downloader.stream_finished_event.wait() await self.downloader.stop() self.assertTrue(os.path.isfile(self.downloader.output_path)) with open(self.downloader.output_path, 'rb') as f: self.assertEqual(f.read(), self.stream_bytes)
async def join_network(self, interface: typing.Optional[str] = '', known_node_urls: typing.Optional[typing.List[ typing.Tuple[str, int]]] = None, known_node_addresses: typing.Optional[typing.List[ typing.Tuple[str, int]]] = None): if not self.listening_port: await self.start_listening(interface) self.protocol.ping_queue.start() self._refresh_task = self.loop.create_task(self.refresh_node()) # resolve the known node urls known_node_addresses = known_node_addresses or [] url_to_addr = {} if known_node_urls: for host, port in known_node_urls: info = await self.loop.getaddrinfo( host, 'https', proto=socket.IPPROTO_TCP, ) if (info[0][4][0], port) not in known_node_addresses: known_node_addresses.append((info[0][4][0], port)) url_to_addr[info[0][4][0]] = host if known_node_addresses: while not self.protocol.routing_table.get_peers(): success = False # ping the seed nodes, this will set their node ids (since we don't know them ahead of time) for address, port in known_node_addresses: peer = self.protocol.get_rpc_peer( KademliaPeer(self.loop, address, udp_port=port)) try: await peer.ping() success = True except asyncio.TimeoutError: log.warning("seed node (%s:%i) timed out in %s", url_to_addr.get(address, address), port, round(self.protocol.rpc_timeout, 2)) if success: break # now that we have the seed nodes in routing, to an iterative lookup of our own id to populate the buckets # in the routing table with good peers who are near us async with self.peer_search_junction(self.protocol.node_id, max_results=16) as junction: async for peers in junction: for peer in peers: try: await self.protocol.get_rpc_peer(peer).ping() except (asyncio.TimeoutError, RemoteException): pass log.info("Joined DHT, %i peers known in %i buckets", len(self.protocol.routing_table.get_peers()), self.protocol.routing_table.buckets_with_contacts()) self.joined.set()
async def _add_fixed_peers(): addresses = [(await resolve_host(url, port + 1, proto='tcp'), port) for url, port in self.config.reflector_servers] delay = self.config.fixed_peer_delay if ( 'dht' not in self.config.components_to_skip and self.node and len(self.node.protocol.routing_table.get_peers())) else 0.0 self.loop.call_later( delay, lambda: self.peer_queue.put_nowait([ KademliaPeer(self.loop, address=address, tcp_port=port + 1) for address, port in addresses ]))
async def join_network(self, interface: typing.Optional[str] = '', known_node_urls: typing.Optional[typing.List[ typing.Tuple[str, int]]] = None): if not self.listening_port: await self.start_listening(interface) self.protocol.ping_queue.start() self._refresh_task = self.loop.create_task(self.refresh_node()) # resolve the known node urls known_node_addresses = [] url_to_addr = {} if known_node_urls: for host, port in known_node_urls: address = await resolve_host(host, port, proto='udp') if (address, port) not in known_node_addresses and\ (address, port) != (self.protocol.external_ip, self.protocol.udp_port): known_node_addresses.append((address, port)) url_to_addr[address] = host if known_node_addresses: peers = [ KademliaPeer(self.loop, address, udp_port=port) for (address, port) in known_node_addresses ] while True: if not self.protocol.routing_table.get_peers(): if self.joined.is_set(): self.joined.clear() self.protocol.peer_manager.reset() self.protocol.ping_queue.enqueue_maybe_ping(*peers, delay=0.0) peers.extend(await self.peer_search(self.protocol.node_id, shortlist=peers, count=32)) if self.protocol.routing_table.get_peers(): self.joined.set() log.info( "Joined DHT, %i peers known in %i buckets", len(self.protocol.routing_table.get_peers()), self.protocol.routing_table.buckets_with_contacts( )) else: continue await asyncio.sleep(1, loop=self.loop) log.info("Joined DHT, %i peers known in %i buckets", len(self.protocol.routing_table.get_peers()), self.protocol.routing_table.buckets_with_contacts()) self.joined.set()
async def test_replace_bad_nodes(self): await self.setup_network(20) self.assertEquals(len(self.nodes), 20) node = self.nodes[0] bad_peers = [] for candidate in self.nodes[1:10]: address, port, node_id = candidate.protocol.external_ip, candidate.protocol.udp_port, candidate.protocol.node_id peer = KademliaPeer(self.loop, address, node_id, port) bad_peers.append(peer) node.protocol.add_peer(peer) candidate.stop() await asyncio.sleep(.3) # let pending events settle for bad_peer in bad_peers: self.assertIn(bad_peer, node.protocol.routing_table.get_peers()) await node.refresh_node(True) await asyncio.sleep(.3) # let pending events settle good_nodes = {good_node.protocol.node_id for good_node in self.nodes[10:]} for peer in node.protocol.routing_table.get_peers(): self.assertIn(peer.node_id, good_nodes)
async def start(self): blob_manager = self.component_manager.get_component(BLOB_COMPONENT) storage = self.component_manager.get_component(DATABASE_COMPONENT) wallet = self.component_manager.get_component(WALLET_COMPONENT) try: node = self.component_manager.get_component(DHT_COMPONENT) except NameError: node = None log.info('Starting the file manager') loop = asyncio.get_event_loop() self.stream_manager = StreamManager( loop, blob_manager, wallet, storage, node, self.conf.blob_download_timeout, self.conf.peer_connect_timeout, [ KademliaPeer(loop, address=(await resolve_host(loop, url)), tcp_port=port + 1) for url, port in self.conf.reflector_servers ], self.conf.reflector_servers) await self.stream_manager.start() log.info('Done setting up file manager')
async def test_peer_search_removes_bad_peers(self): # that's an edge case discovered by Tom, but an important one # imagine that you only got bad peers and refresh will happen in one hour # instead of failing for one hour we should be able to recover by scheduling pings to bad peers we find await self.setup_network(2, seed_nodes=2) node1, node2 = self.nodes node2.stop() # forcefully make it a bad peer but dont remove it from routing table address, port, node_id = node2.protocol.external_ip, node2.protocol.udp_port, node2.protocol.node_id peer = KademliaPeer(self.loop, address, node_id, port) self.assertTrue(node1.protocol.peer_manager.peer_is_good(peer)) node1.protocol.peer_manager.report_failure(node2.protocol.external_ip, node2.protocol.udp_port) node1.protocol.peer_manager.report_failure(node2.protocol.external_ip, node2.protocol.udp_port) self.assertFalse(node1.protocol.peer_manager.peer_is_good(peer)) # now a search happens, which removes bad peers while contacting them self.assertTrue(node1.protocol.routing_table.get_peers()) await node1.peer_search(node2.protocol.node_id) await asyncio.sleep(.3) # let pending events settle self.assertFalse(node1.protocol.routing_table.get_peers())
def _delayed_add_fixed_peers(): self.added_fixed_peers = True self.peer_queue.put_nowait([ KademliaPeer(self.loop, address=address, tcp_port=port + 1) for address, port in addresses ])