Beispiel #1
0
    async def asyncSetUp(self):
        self.loop = asyncio.get_event_loop()

        self.client_dir = tempfile.mkdtemp()
        self.server_dir = tempfile.mkdtemp()
        self.addCleanup(shutil.rmtree, self.client_dir)
        self.addCleanup(shutil.rmtree, self.server_dir)
        self.server_config = Config(data_dir=self.server_dir, download_dir=self.server_dir, wallet=self.server_dir,
                                    reflector_servers=[])
        self.server_storage = SQLiteStorage(self.server_config, os.path.join(self.server_dir, "lbrynet.sqlite"))
        self.server_blob_manager = BlobManager(self.loop, self.server_dir, self.server_storage, self.server_config)
        self.server = BlobServer(self.loop, self.server_blob_manager, 'bQEaw42GXsgCAGio1nxFncJSyRmnztSCjP')

        self.client_config = Config(data_dir=self.client_dir, download_dir=self.client_dir, wallet=self.client_dir,
                                    reflector_servers=[])
        self.client_storage = SQLiteStorage(self.client_config, os.path.join(self.client_dir, "lbrynet.sqlite"))
        self.client_blob_manager = BlobManager(self.loop, self.client_dir, self.client_storage, self.client_config)
        self.client_peer_manager = PeerManager(self.loop)
        self.server_from_client = KademliaPeer(self.loop, "127.0.0.1", b'1' * 48, tcp_port=33333)

        await self.client_storage.open()
        await self.server_storage.open()
        await self.client_blob_manager.setup()
        await self.server_blob_manager.setup()

        self.server.start_server(33333, '127.0.0.1')
        self.addCleanup(self.server.stop_server)
        await self.server.started_listening.wait()
Beispiel #2
0
    async def test_host_same_blob_to_multiple_peers_at_once(self):
        blob_hash = "7f5ab2def99f0ddd008da71db3a3772135f4002b19b7605840ed1034c8955431bd7079549e65e6b2a3b9c17c773073ed"
        mock_blob_bytes = b'1' * ((2 * 2**20) - 1)

        second_client_dir = tempfile.mkdtemp()
        self.addCleanup(shutil.rmtree, second_client_dir)
        second_client_conf = Config()
        second_client_storage = SQLiteStorage(
            second_client_conf,
            os.path.join(second_client_dir, "lbrynet.sqlite"))
        second_client_blob_manager = BlobManager(self.loop, second_client_dir,
                                                 second_client_storage,
                                                 second_client_conf)
        server_from_second_client = KademliaPeer(self.loop,
                                                 "127.0.0.1",
                                                 b'1' * 48,
                                                 tcp_port=33333)

        await second_client_storage.open()
        await second_client_blob_manager.setup()

        await self._add_blob_to_server(blob_hash, mock_blob_bytes)

        second_client_blob = second_client_blob_manager.get_blob(blob_hash)

        # download the blob
        await asyncio.gather(
            request_blob(self.loop, second_client_blob,
                         server_from_second_client.address,
                         server_from_second_client.tcp_port, 2, 3),
            self._test_transfer_blob(blob_hash))
        await second_client_blob.verified.wait()
        self.assertEqual(second_client_blob.get_is_verified(), True)
Beispiel #3
0
    async def test_host_different_blobs_to_multiple_peers_at_once(self):
        blob_hash = "7f5ab2def99f0ddd008da71db3a3772135f4002b19b7605840ed1034c8955431bd7079549e65e6b2a3b9c17c773073ed"
        mock_blob_bytes = b'1' * ((2 * 2 ** 20) - 1)

        sd_hash = "3e2706157a59aaa47ef52bc264fce488078b4026c0b9bab649a8f2fe1ecc5e5cad7182a2bb7722460f856831a1ac0f02"
        mock_sd_blob_bytes = b"""{"blobs": [{"blob_hash": "6f53c72de100f6f007aa1b9720632e2d049cc6049e609ad790b556dba262159f739d5a14648d5701afc84b991254206a", "blob_num": 0, "iv": "3b6110c2d8e742bff66e4314863dee7e", "length": 2097152}, {"blob_hash": "18493bc7c5164b00596153859a0faffa45765e47a6c3f12198a4f7be4658111505b7f8a15ed0162306a0672c4a9b505d", "blob_num": 1, "iv": "df973fa64e73b4ff2677d682cdc32d3e", "length": 2097152}, {"blob_num": 2, "iv": "660d2dc2645da7c7d4540a466fcb0c60", "length": 0}], "key": "6465616462656566646561646265656664656164626565666465616462656566", "stream_hash": "22423c6786584974bd6b462af47ecb03e471da0ef372fe85a4e71a78bef7560c4afb0835c689f03916105404653b7bdf", "stream_name": "746573745f66696c65", "stream_type": "lbryfile", "suggested_file_name": "746573745f66696c65"}"""

        second_client_dir = tempfile.mkdtemp()
        self.addCleanup(shutil.rmtree, second_client_dir)
        second_client_conf = Config()

        second_client_storage = SQLiteStorage(second_client_conf, os.path.join(second_client_dir, "lbrynet.sqlite"))
        second_client_blob_manager = BlobManager(
            self.loop, second_client_dir, second_client_storage, second_client_conf
        )
        server_from_second_client = KademliaPeer(self.loop, "127.0.0.1", b'1' * 48, tcp_port=33333)

        await second_client_storage.open()
        await second_client_blob_manager.setup()

        await self._add_blob_to_server(blob_hash, mock_blob_bytes)
        await self._add_blob_to_server(sd_hash, mock_sd_blob_bytes)

        second_client_blob = self.client_blob_manager.get_blob(blob_hash)

        await asyncio.gather(
            request_blob(
                self.loop, second_client_blob, server_from_second_client.address,
                server_from_second_client.tcp_port, 2, 3
            ),
            self._test_transfer_blob(sd_hash),
            second_client_blob.verified.wait()
        )
        self.assertEqual(second_client_blob.get_is_verified(), True)
    async def test_transfer_stream_bad_first_peer_good_second(self):
        await self.setup_stream(2)

        mock_node = mock.Mock(spec=Node)
        q = asyncio.Queue()

        bad_peer = KademliaPeer(self.loop,
                                "127.0.0.1",
                                b'2' * 48,
                                tcp_port=3334)

        def _mock_accumulate_peers(q1, q2):
            async def _task():
                pass

            q2.put_nowait([bad_peer])
            self.loop.call_later(1, q2.put_nowait, [self.server_from_client])
            return q2, self.loop.create_task(_task())

        mock_node.accumulate_peers = _mock_accumulate_peers

        await self.stream.save_file(node=mock_node)
        await self.stream.finished_writing.wait()
        self.assertTrue(os.path.isfile(self.stream.full_path))
        with open(self.stream.full_path, 'rb') as f:
            self.assertEqual(f.read(), self.stream_bytes)
        await self.stream.stop()
Beispiel #5
0
 async def test_announce_using_helper_function(self):
     info_hash = random.getrandbits(160).to_bytes(20, "big", signed=False)
     queue = asyncio.Queue()
     enqueue_tracker_search(info_hash, queue)
     peers = await queue.get()
     self.assertEqual(peers, [
         KademliaPeer('127.0.0.1', None, None, 4444, allow_localhost=True)
     ])
Beispiel #6
0
    def test_add_peer(self):
        peer = KademliaPeer(None, '1.2.3.4', constants.generate_id(2), udp_port=4444)
        peer_update2 = KademliaPeer(None, '1.2.3.4', constants.generate_id(2), udp_port=4445)

        self.assertListEqual([], self.kbucket.peers)

        # add the peer
        self.kbucket.add_peer(peer)
        self.assertListEqual([peer], self.kbucket.peers)

        # re-add it
        self.kbucket.add_peer(peer)
        self.assertListEqual([peer], self.kbucket.peers)
        self.assertEqual(self.kbucket.peers[0].udp_port, 4444)

        # add a new peer object with the same id and address but a different port
        self.kbucket.add_peer(peer_update2)
        self.assertListEqual([peer_update2], self.kbucket.peers)
        self.assertEqual(self.kbucket.peers[0].udp_port, 4445)

        # modify the peer object to have a different port
        peer_update2.udp_port = 4444
        self.kbucket.add_peer(peer_update2)
        self.assertListEqual([peer_update2], self.kbucket.peers)
        self.assertEqual(self.kbucket.peers[0].udp_port, 4444)

        self.kbucket.peers.clear()

        # Test if contacts can be added to empty list
        # Add k contacts to bucket
        for i in range(constants.k):
            peer = self.peer_manager.get_kademlia_peer(generate_id(), next(self.address_generator), 4444)
            self.assertTrue(self.kbucket.add_peer(peer))
            self.assertEqual(peer, self.kbucket.peers[i])

        # Test if contact is not added to full list
        peer = self.peer_manager.get_kademlia_peer(generate_id(), next(self.address_generator), 4444)
        self.assertFalse(self.kbucket.add_peer(peer))

        # Test if an existing contact is updated correctly if added again
        existing_peer = self.kbucket.peers[0]
        self.assertTrue(self.kbucket.add_peer(existing_peer))
        self.assertEqual(existing_peer, self.kbucket.peers[-1])
Beispiel #7
0
    async def join_network(self,
                           interface: typing.Optional[str] = '',
                           known_node_urls: typing.Optional[typing.List[
                               typing.Tuple[str, int]]] = None):
        if not self.listening_port:
            await self.start_listening(interface)
        self.protocol.ping_queue.start()
        self._refresh_task = self.loop.create_task(self.refresh_node())

        # resolve the known node urls
        known_node_addresses = []
        url_to_addr = {}

        if known_node_urls:
            for host, port in known_node_urls:
                address = await resolve_host(host, port, proto='udp')
                if (address, port) not in known_node_addresses and\
                        (address, port) != (self.protocol.external_ip, self.protocol.udp_port):
                    known_node_addresses.append((address, port))
                    url_to_addr[address] = host

        if known_node_addresses:
            peers = [
                KademliaPeer(self.loop, address, udp_port=port)
                for (address, port) in known_node_addresses
            ]
            while True:
                if not self.protocol.routing_table.get_peers():
                    if self.joined.is_set():
                        self.joined.clear()
                    self.protocol.peer_manager.reset()
                    self.protocol.ping_queue.enqueue_maybe_ping(*peers,
                                                                delay=0.0)
                    peers.extend(await self.peer_search(self.protocol.node_id,
                                                        shortlist=peers,
                                                        count=32))
                    if self.protocol.routing_table.get_peers():
                        self.joined.set()
                        log.info(
                            "Joined DHT, %i peers known in %i buckets",
                            len(self.protocol.routing_table.get_peers()),
                            self.protocol.routing_table.buckets_with_contacts(
                            ))
                    else:
                        continue
                await asyncio.sleep(1, loop=self.loop)

        log.info("Joined DHT, %i peers known in %i buckets",
                 len(self.protocol.routing_table.get_peers()),
                 self.protocol.routing_table.buckets_with_contacts())
        self.joined.set()
Beispiel #8
0
 async def test_replace_bad_nodes(self):
     await self.setup_network(20)
     self.assertEqual(len(self.nodes), 20)
     node = self.nodes[0]
     bad_peers = []
     for candidate in self.nodes[1:10]:
         address, port, node_id = candidate.protocol.external_ip, candidate.protocol.udp_port, candidate.protocol.node_id
         peer = KademliaPeer(self.loop, address, node_id, port)
         bad_peers.append(peer)
         node.protocol.add_peer(peer)
         candidate.stop()
     await asyncio.sleep(.3)  # let pending events settle
     for bad_peer in bad_peers:
         self.assertIn(bad_peer, node.protocol.routing_table.get_peers())
     await node.refresh_node(True)
     await asyncio.sleep(.3)  # let pending events settle
     good_nodes = {good_node.protocol.node_id for good_node in self.nodes[10:]}
     for peer in node.protocol.routing_table.get_peers():
         self.assertIn(peer.node_id, good_nodes)
Beispiel #9
0
    async def test_peer_search_removes_bad_peers(self):
        # that's an edge case discovered by Tom, but an important one
        # imagine that you only got bad peers and refresh will happen in one hour
        # instead of failing for one hour we should be able to recover by scheduling pings to bad peers we find
        await self.setup_network(2, seed_nodes=2)
        node1, node2 = self.nodes
        node2.stop()
        # forcefully make it a bad peer but dont remove it from routing table
        address, port, node_id = node2.protocol.external_ip, node2.protocol.udp_port, node2.protocol.node_id
        peer = KademliaPeer(self.loop, address, node_id, port)
        self.assertTrue(node1.protocol.peer_manager.peer_is_good(peer))
        node1.protocol.peer_manager.report_failure(node2.protocol.external_ip, node2.protocol.udp_port)
        node1.protocol.peer_manager.report_failure(node2.protocol.external_ip, node2.protocol.udp_port)
        self.assertFalse(node1.protocol.peer_manager.peer_is_good(peer))

        # now a search happens, which removes bad peers while contacting them
        self.assertTrue(node1.protocol.routing_table.get_peers())
        await node1.peer_search(node2.protocol.node_id)
        await asyncio.sleep(.3)  # let pending events settle
        self.assertFalse(node1.protocol.routing_table.get_peers())
Beispiel #10
0
 def _delayed_add_fixed_peers():
     self.added_fixed_peers = True
     self.peer_queue.put_nowait([
         KademliaPeer(self.loop, address=address, tcp_port=port + 1)
         for address, port in addresses
     ])