Пример #1
0
    async def asyncSetUp(self):
        self.loop = asyncio.get_event_loop()

        self.client_dir = tempfile.mkdtemp()
        self.server_dir = tempfile.mkdtemp()
        self.addCleanup(shutil.rmtree, self.client_dir)
        self.addCleanup(shutil.rmtree, self.server_dir)

        self.server_storage = SQLiteStorage(
            Config(), os.path.join(self.server_dir, "lbrynet.sqlite"))
        self.server_blob_manager = BlobFileManager(self.loop, self.server_dir,
                                                   self.server_storage)
        self.server = BlobServer(self.loop, self.server_blob_manager,
                                 'bQEaw42GXsgCAGio1nxFncJSyRmnztSCjP')

        self.client_storage = SQLiteStorage(
            Config(), os.path.join(self.client_dir, "lbrynet.sqlite"))
        self.client_blob_manager = BlobFileManager(self.loop, self.client_dir,
                                                   self.client_storage)
        self.client_peer_manager = PeerManager(self.loop)
        self.server_from_client = KademliaPeer(self.loop,
                                               "127.0.0.1",
                                               b'1' * 48,
                                               tcp_port=33333)

        await self.client_storage.open()
        await self.server_storage.open()
        await self.client_blob_manager.setup()
        await self.server_blob_manager.setup()

        self.server.start_server(33333, '127.0.0.1')
        await self.server.started_listening.wait()
Пример #2
0
    async def asyncSetUp(self):
        self.loop = asyncio.get_event_loop()
        self.key = b'deadbeef' * 4
        self.cleartext = os.urandom(20000000)

        tmp_dir = tempfile.mkdtemp()
        self.addCleanup(lambda: shutil.rmtree(tmp_dir))
        self.conf = Config()
        self.storage = SQLiteStorage(self.conf, os.path.join(tmp_dir, "lbrynet.sqlite"))
        await self.storage.open()
        self.blob_manager = BlobManager(self.loop, tmp_dir, self.storage, self.conf)
        self.stream_manager = StreamManager(self.loop, Config(), self.blob_manager, None, self.storage, None)

        server_tmp_dir = tempfile.mkdtemp()
        self.addCleanup(lambda: shutil.rmtree(server_tmp_dir))
        self.server_conf = Config()
        self.server_storage = SQLiteStorage(self.server_conf, os.path.join(server_tmp_dir, "lbrynet.sqlite"))
        await self.server_storage.open()
        self.server_blob_manager = BlobManager(self.loop, server_tmp_dir, self.server_storage, self.server_conf)

        download_dir = tempfile.mkdtemp()
        self.addCleanup(lambda: shutil.rmtree(download_dir))

        # create the stream
        file_path = os.path.join(tmp_dir, "test_file")
        with open(file_path, 'wb') as f:
            f.write(self.cleartext)

        self.stream = await self.stream_manager.create_stream(file_path)
Пример #3
0
def get_argument_parser():
    main = ArgumentParser(
        'lbrynet',
        description='An interface to the LBRY Network.',
        allow_abbrev=False,
    )
    main.add_argument('-v',
                      '--version',
                      dest='cli_version',
                      action="store_true",
                      help='Show lbrynet CLI version and exit.')
    main.set_defaults(group=None, command=None)
    CLIConfig.contribute_to_argparse(main)
    sub = main.add_subparsers(metavar='COMMAND')
    start = sub.add_parser(
        'start',
        usage=
        'lbrynet start [--config FILE] [--data-dir DIR] [--wallet-dir DIR] [--download-dir DIR] ...',
        help='Start LBRY Network interface.')
    start.add_argument('--quiet',
                       dest='quiet',
                       action="store_true",
                       help='Disable all console output.')
    start.add_argument(
        '--verbose',
        nargs="*",
        help=
        ('Enable debug output. Optionally specify loggers for which debug output '
         'should selectively be applied.'))
    Config.contribute_to_argparse(start)
    start.set_defaults(command='start',
                       start_parser=start,
                       doc=start.format_help())

    api = Daemon.get_api_definitions()
    groups = {}
    for group_name in sorted(api['groups']):
        group_parser = sub.add_parser(group_name,
                                      group_name=group_name,
                                      help=api['groups'][group_name])
        groups[group_name] = group_parser.add_subparsers(metavar='COMMAND')

    nicer_order = ['stop', 'get', 'publish', 'resolve']
    for command_name in sorted(api['commands']):
        if command_name not in nicer_order:
            nicer_order.append(command_name)

    for command_name in nicer_order:
        command = api['commands'][command_name]
        if command['group'] is None:
            add_command_parser(sub, command)
        else:
            add_command_parser(groups[command['group']], command)

    return main
Пример #4
0
    async def test_create_and_decrypt_one_blob_stream(self, corrupt=False):
        tmp_dir = tempfile.mkdtemp()
        self.addCleanup(lambda: shutil.rmtree(tmp_dir))
        self.storage = SQLiteStorage(Config(), ":memory:")
        await self.storage.open()
        self.blob_manager = BlobFileManager(self.loop, tmp_dir, self.storage)

        download_dir = tempfile.mkdtemp()
        self.addCleanup(lambda: shutil.rmtree(download_dir))

        # create the stream
        file_path = os.path.join(tmp_dir, "test_file")
        with open(file_path, 'wb') as f:
            f.write(self.cleartext)

        sd = await StreamDescriptor.create_stream(self.loop, tmp_dir, file_path, key=self.key)

        # copy blob files
        sd_hash = sd.calculate_sd_hash()
        shutil.copy(os.path.join(tmp_dir, sd_hash), os.path.join(download_dir, sd_hash))
        for blob_info in sd.blobs:
            if blob_info.blob_hash:
                shutil.copy(os.path.join(tmp_dir, blob_info.blob_hash), os.path.join(download_dir, blob_info.blob_hash))
                if corrupt and blob_info.length == MAX_BLOB_SIZE:
                    with open(os.path.join(download_dir, blob_info.blob_hash), "rb+") as handle:
                        handle.truncate()
                        handle.flush()

        downloader_storage = SQLiteStorage(Config(), os.path.join(download_dir, "lbrynet.sqlite"))
        await downloader_storage.open()

        # add the blobs to the blob table (this would happen upon a blob download finishing)
        downloader_blob_manager = BlobFileManager(self.loop, download_dir, downloader_storage)
        descriptor = await downloader_blob_manager.get_stream_descriptor(sd_hash)

        # assemble the decrypted file
        assembler = StreamAssembler(self.loop, downloader_blob_manager, descriptor.sd_hash)
        await assembler.assemble_decrypted_stream(download_dir)
        if corrupt:
            return self.assertFalse(os.path.isfile(os.path.join(download_dir, "test_file")))

        with open(os.path.join(download_dir, "test_file"), "rb") as f:
            decrypted = f.read()
        self.assertEqual(decrypted, self.cleartext)
        self.assertEqual(True, self.blob_manager.get_blob(sd_hash).get_is_verified())
        self.assertEqual(True, self.blob_manager.get_blob(descriptor.blobs[0].blob_hash).get_is_verified())
        # its all blobs + sd blob - last blob, which is the same size as descriptor.blobs
        self.assertEqual(len(descriptor.blobs), len(await downloader_storage.get_all_finished_blobs()))
        self.assertEqual(
            [descriptor.sd_hash, descriptor.blobs[0].blob_hash], await downloader_storage.get_blobs_to_announce()
        )

        await downloader_storage.close()
        await self.storage.close()
Пример #5
0
    async def test_create_and_decrypt_one_blob_stream(self):
        tmp_dir = tempfile.mkdtemp()
        self.addCleanup(lambda: shutil.rmtree(tmp_dir))
        self.storage = SQLiteStorage(Config(),
                                     os.path.join(tmp_dir, "lbrynet.sqlite"))
        await self.storage.open()
        self.blob_manager = BlobFileManager(self.loop, tmp_dir, self.storage)

        download_dir = tempfile.mkdtemp()
        self.addCleanup(lambda: shutil.rmtree(download_dir))

        # create the stream
        file_path = os.path.join(tmp_dir, "test_file")
        with open(file_path, 'wb') as f:
            f.write(self.cleartext)

        sd = await StreamDescriptor.create_stream(self.loop,
                                                  tmp_dir,
                                                  file_path,
                                                  key=self.key)

        # copy blob files
        sd_hash = sd.calculate_sd_hash()
        shutil.copy(os.path.join(tmp_dir, sd_hash),
                    os.path.join(download_dir, sd_hash))
        for blob_info in sd.blobs:
            if blob_info.blob_hash:
                shutil.copy(os.path.join(tmp_dir, blob_info.blob_hash),
                            os.path.join(download_dir, blob_info.blob_hash))
        downloader_storage = SQLiteStorage(
            Config(), os.path.join(download_dir, "lbrynet.sqlite"))
        await downloader_storage.open()

        # add the blobs to the blob table (this would happen upon a blob download finishing)
        downloader_blob_manager = BlobFileManager(self.loop, download_dir,
                                                  downloader_storage)
        descriptor = await downloader_blob_manager.get_stream_descriptor(
            sd_hash)

        # assemble the decrypted file
        assembler = StreamAssembler(self.loop, downloader_blob_manager,
                                    descriptor.sd_hash)
        await assembler.assemble_decrypted_stream(download_dir)

        with open(os.path.join(download_dir, "test_file"), "rb") as f:
            decrypted = f.read()
        self.assertEqual(decrypted, self.cleartext)
        self.assertEqual(True,
                         self.blob_manager.get_blob(sd_hash).get_is_verified())

        await downloader_storage.close()
        await self.storage.close()
Пример #6
0
    async def test_old_key_sort_sd_blob(self):
        loop = asyncio.get_event_loop()
        tmp_dir = tempfile.mkdtemp()
        self.addCleanup(lambda: shutil.rmtree(tmp_dir))
        storage = SQLiteStorage(Config(), ":memory:")
        await storage.open()
        blob_manager = BlobFileManager(loop, tmp_dir, storage)

        sd_bytes = b'{"stream_name": "4f62616d6120446f6e6b65792d322e73746c", "blobs": [{"length": 1153488, "blob_num' \
                   b'": 0, "blob_hash": "9fa32a249ce3f2d4e46b78599800f368b72f2a7f22b81df443c7f6bdbef496bd61b4c0079c7' \
                   b'3d79c8bb9be9a6bf86592", "iv": "0bf348867244019c9e22196339016ea6"}, {"length": 0, "blob_num": 1,' \
                   b' "iv": "9f36abae16955463919b07ed530a3d18"}], "stream_type": "lbryfile", "key": "a03742b87628aa7' \
                   b'228e48f1dcd207e48", "suggested_file_name": "4f62616d6120446f6e6b65792d322e73746c", "stream_hash' \
                   b'": "b43f4b1379780caf60d20aa06ac38fb144df61e514ebfa97537018ba73bce8fe37ae712f473ff0ba0be0eef44e1' \
                   b'60207"}'
        sd_hash = '9313d1807551186126acc3662e74d9de29cede78d4f133349ace846273ef116b9bb86be86c54509eb84840e4b032f6b2'
        stream_hash = 'b43f4b1379780caf60d20aa06ac38fb144df61e514ebfa97537018ba73bce8fe37ae712f473ff0ba0be0eef44e160207'

        blob = blob_manager.get_blob(sd_hash)
        blob.set_length(len(sd_bytes))
        writer = blob.open_for_writing()
        writer.write(sd_bytes)
        await blob.verified.wait()
        descriptor = await StreamDescriptor.from_stream_descriptor_blob(
            loop, blob_manager.blob_dir, blob)
        self.assertEqual(stream_hash, descriptor.get_stream_hash())
        self.assertEqual(sd_hash, descriptor.calculate_old_sort_sd_hash())
        self.assertNotEqual(sd_hash, descriptor.calculate_sd_hash())
Пример #7
0
    async def test_create_blob(self):
        blob_hash = "7f5ab2def99f0ddd008da71db3a3772135f4002b19b7605840ed1034c8955431bd7079549e65e6b2a3b9c17c773073ed"
        blob_bytes = b'1' * ((2 * 2**20) - 1)

        loop = asyncio.get_event_loop()
        tmp_dir = tempfile.mkdtemp()
        self.addCleanup(lambda: shutil.rmtree(tmp_dir))

        storage = SQLiteStorage(Config(),
                                os.path.join(tmp_dir, "lbrynet.sqlite"))
        blob_manager = BlobFileManager(loop, tmp_dir, storage)

        await storage.open()
        await blob_manager.setup()

        # add the blob on the server
        blob = blob_manager.get_blob(blob_hash, len(blob_bytes))
        self.assertEqual(blob.get_is_verified(), False)
        self.assertNotIn(blob_hash, blob_manager.completed_blob_hashes)

        writer = blob.open_for_writing()
        writer.write(blob_bytes)
        await blob.finished_writing.wait()
        self.assertTrue(os.path.isfile(blob.file_path), True)
        self.assertEqual(blob.get_is_verified(), True)
        self.assertIn(blob_hash, blob_manager.completed_blob_hashes)
Пример #8
0
    async def test_host_different_blobs_to_multiple_peers_at_once(self):
        blob_hash = "7f5ab2def99f0ddd008da71db3a3772135f4002b19b7605840ed1034c8955431bd7079549e65e6b2a3b9c17c773073ed"
        mock_blob_bytes = b'1' * ((2 * 2**20) - 1)

        sd_hash = "3e2706157a59aaa47ef52bc264fce488078b4026c0b9bab649a8f2fe1ecc5e5cad7182a2bb7722460f856831a1ac0f02"
        mock_sd_blob_bytes = b"""{"blobs": [{"blob_hash": "6f53c72de100f6f007aa1b9720632e2d049cc6049e609ad790b556dba262159f739d5a14648d5701afc84b991254206a", "blob_num": 0, "iv": "3b6110c2d8e742bff66e4314863dee7e", "length": 2097152}, {"blob_hash": "18493bc7c5164b00596153859a0faffa45765e47a6c3f12198a4f7be4658111505b7f8a15ed0162306a0672c4a9b505d", "blob_num": 1, "iv": "df973fa64e73b4ff2677d682cdc32d3e", "length": 2097152}, {"blob_num": 2, "iv": "660d2dc2645da7c7d4540a466fcb0c60", "length": 0}], "key": "6465616462656566646561646265656664656164626565666465616462656566", "stream_hash": "22423c6786584974bd6b462af47ecb03e471da0ef372fe85a4e71a78bef7560c4afb0835c689f03916105404653b7bdf", "stream_name": "746573745f66696c65", "stream_type": "lbryfile", "suggested_file_name": "746573745f66696c65"}"""

        second_client_dir = tempfile.mkdtemp()
        self.addCleanup(shutil.rmtree, second_client_dir)

        second_client_storage = SQLiteStorage(
            Config(), os.path.join(second_client_dir, "lbrynet.sqlite"))
        second_client_blob_manager = BlobFileManager(self.loop,
                                                     second_client_dir,
                                                     second_client_storage)
        server_from_second_client = KademliaPeer(self.loop,
                                                 "127.0.0.1",
                                                 b'1' * 48,
                                                 tcp_port=33333)

        await second_client_storage.open()
        await second_client_blob_manager.setup()

        await self._add_blob_to_server(blob_hash, mock_blob_bytes)
        await self._add_blob_to_server(sd_hash, mock_sd_blob_bytes)

        second_client_blob = self.client_blob_manager.get_blob(blob_hash)

        await asyncio.gather(
            request_blob(self.loop, second_client_blob,
                         server_from_second_client.address,
                         server_from_second_client.tcp_port, 2, 3),
            self._test_transfer_blob(sd_hash),
            second_client_blob.finished_writing.wait())
        self.assertEqual(second_client_blob.get_is_verified(), True)
Пример #9
0
    def test_init_with_wrong_overrides(self):
        class FakeRandomComponent:
            component_name = "someComponent"
            depends_on = []

        with self.assertRaises(SyntaxError):
            ComponentManager(Config(), randomComponent=FakeRandomComponent)
Пример #10
0
 async def asyncSetUp(self):
     self.storage = SQLiteStorage(Config(), ':memory:')
     self.blob_dir = tempfile.mkdtemp()
     self.addCleanup(shutil.rmtree, self.blob_dir)
     self.blob_manager = BlobFileManager(asyncio.get_event_loop(),
                                         self.blob_dir, self.storage)
     await self.storage.open()
Пример #11
0
async def main(blob_hash: str, url: str):
    conf = Config()
    loop = asyncio.get_running_loop()
    host_url, port = url.split(":")
    try:
        host = None
        if ipaddress.ip_address(host_url):
            host = host_url
    except ValueError:
        host = None
    if not host:
        host_info = await loop.getaddrinfo(
            host_url,
            'https',
            proto=socket.IPPROTO_TCP,
        )
        host = host_info[0][4][0]

    storage = SQLiteStorage(conf, os.path.join(conf.data_dir,
                                               "lbrynet.sqlite"))
    blob_manager = BlobManager(loop, os.path.join(conf.data_dir, "blobfiles"),
                               storage)
    await storage.open()
    await blob_manager.setup()

    blob = blob_manager.get_blob(blob_hash)
    success, keep = await request_blob(loop, blob, host, int(port),
                                       conf.peer_connect_timeout,
                                       conf.blob_download_timeout)
    print(
        f"{'downloaded' if success else 'failed to download'} {blob_hash} from {host}:{port}\n"
        f"keep connection: {keep}")
    if blob.get_is_verified():
        await blob_manager.delete_blobs([blob.blob_hash])
        print(f"deleted {blob_hash}")
Пример #12
0
    async def test_host_same_blob_to_multiple_peers_at_once(self):
        blob_hash = "7f5ab2def99f0ddd008da71db3a3772135f4002b19b7605840ed1034c8955431bd7079549e65e6b2a3b9c17c773073ed"
        mock_blob_bytes = b'1' * ((2 * 2**20) - 1)

        second_client_dir = tempfile.mkdtemp()
        self.addCleanup(shutil.rmtree, second_client_dir)

        second_client_storage = SQLiteStorage(
            Config(), os.path.join(second_client_dir, "lbrynet.sqlite"))
        second_client_blob_manager = BlobFileManager(self.loop,
                                                     second_client_dir,
                                                     second_client_storage)
        server_from_second_client = KademliaPeer(self.loop,
                                                 "127.0.0.1",
                                                 b'1' * 48,
                                                 tcp_port=33333)

        await second_client_storage.open()
        await second_client_blob_manager.setup()

        await self._add_blob_to_server(blob_hash, mock_blob_bytes)

        second_client_blob = self.client_blob_manager.get_blob(blob_hash)

        # download the blob
        await asyncio.gather(
            request_blob(self.loop, second_client_blob,
                         server_from_second_client.address,
                         server_from_second_client.tcp_port, 2, 3),
            self._test_transfer_blob(blob_hash))
        await second_client_blob.finished_writing.wait()
        self.assertEqual(second_client_blob.get_is_verified(), True)
Пример #13
0
    def setUp(self):
        def noop():
            return None

        test_utils.reset_time(self)
        self.test_daemon = get_test_daemon(Config())
        self.test_daemon.wallet_manager.get_best_blockhash = noop
Пример #14
0
 async def setup_blob_manager(self, save_blobs=True):
     tmp_dir = tempfile.mkdtemp()
     self.addCleanup(lambda: shutil.rmtree(tmp_dir))
     self.config = Config(save_blobs=save_blobs)
     self.storage = SQLiteStorage(self.config, os.path.join(tmp_dir, "lbrynet.sqlite"))
     self.blob_manager = BlobManager(self.loop, tmp_dir, self.storage, self.config)
     await self.storage.open()
Пример #15
0
 async def asyncSetUp(self):
     self.tmp_dir = tempfile.mkdtemp()
     self.addCleanup(lambda: shutil.rmtree(self.tmp_dir))
     self.loop = asyncio.get_running_loop()
     self.config = Config()
     self.storage = SQLiteStorage(self.config, ":memory:", self.loop)
     self.blob_manager = BlobManager(self.loop, self.tmp_dir, self.storage, self.config)
     await self.storage.open()
Пример #16
0
async def main(uris=None):
    if not uris:
        uris = await get_frontpage_uris()
    conf = Config()
    try:
        await daemon_rpc(conf, 'status')
    except (ClientConnectorError, ConnectionError):
        print("Could not connect to daemon")
        return 1
    print(f"Checking {len(uris)} uris from the front page")
    print("**********************************************")

    resolvable = []
    for name in uris:
        resolved = await daemon_rpc(conf, 'resolve', uri=name)
        if 'error' not in resolved.get(name, {}):
            resolvable.append(name)

    print(f"{len(resolvable)}/{len(uris)} are resolvable")

    first_byte_times = []
    downloaded_times = []
    failures = []
    download_failures = []

    for uri in resolvable:
        await daemon_rpc(conf, 'file_delete', delete_from_download_dir=True, claim_name=parse_lbry_uri(uri).name)

    for i, uri in enumerate(resolvable):
        start = time.time()
        try:
            await daemon_rpc(conf, 'get', uri)
            first_byte = time.time()
            first_byte_times.append(first_byte - start)
            print(f"{i + 1}/{len(resolvable)} - {first_byte - start} {uri}")
            # downloaded, msg, blobs_in_stream = await wait_for_done(conf, uri)
            # if downloaded:
            #     downloaded_times.append((time.time()-start) / downloaded)
            #     print(f"\tdownloaded {uri} @ {(time.time()-start) / blobs_in_stream} seconds per blob")
            # else:
            #     print(f"\tfailed to download {uri}, got {msg}")
            #     download_failures.append(uri)
        except:
            print(f"{i + 1}/{len(uris)} -  timeout in {time.time() - start} {uri}")
            failures.append(uri)
        await daemon_rpc(conf, 'file_delete', delete_from_download_dir=True, claim_name=parse_lbry_uri(uri).name)
        await asyncio.sleep(0.1)

    print("**********************************************")
    result = f"Tried to start downloading {len(resolvable)} streams from the front page\n" \
             f"95% confidence time-to-first-byte: {confidence(first_byte_times, 1.984)}\n" \
             f"99% confidence time-to-first-byte:  {confidence(first_byte_times, 2.626)}\n" \
             f"Variance: {variance(first_byte_times)}\n" \
             f"Started {len(first_byte_times)}/{len(resolvable)} streams"
    if failures:
        nt = '\n\t'
        result += f"\nFailures:\n\t{nt.join([f for f in failures])}"
    print(result)
Пример #17
0
    def test_max_key_fee_from_args(self):
        parser = argparse.ArgumentParser()
        Config.contribute_to_argparse(parser)

        # default
        args = parser.parse_args([])
        c = Config.create_from_arguments(args)
        self.assertEqual(c.max_key_fee, {'amount': 50.0, 'currency': 'USD'})

        # disabled
        args = parser.parse_args(['--no-max-key-fee'])
        c = Config.create_from_arguments(args)
        self.assertEqual(c.max_key_fee, None)

        # set
        args = parser.parse_args(['--max-key-fee', '1.0', 'BTC'])
        c = Config.create_from_arguments(args)
        self.assertEqual(c.max_key_fee, {'amount': 1.0, 'currency': 'BTC'})
Пример #18
0
    async def test_create_managed_stream_announces(self):
        # setup a blob manager
        storage = SQLiteStorage(Config(), ":memory:")
        await storage.open()
        tmp_dir = tempfile.mkdtemp()
        self.addCleanup(lambda: shutil.rmtree(tmp_dir))
        blob_manager = BlobFileManager(self.loop, tmp_dir, storage)
        stream_manager = StreamManager(self.loop, Config(), blob_manager, None, storage, None)
        # create the stream
        download_dir = tempfile.mkdtemp()
        self.addCleanup(lambda: shutil.rmtree(download_dir))
        file_path = os.path.join(download_dir, "test_file")
        with open(file_path, 'wb') as f:
            f.write(b'testtest')

        stream = await stream_manager.create_stream(file_path)
        self.assertEqual(
            [stream.sd_hash, stream.descriptor.blobs[0].blob_hash],
            await storage.get_blobs_to_announce())
Пример #19
0
 def setUp(self):
     self.component_manager = ComponentManager(
         Config(),
         skip_components=[
             DATABASE_COMPONENT, DHT_COMPONENT, HASH_ANNOUNCER_COMPONENT,
             PEER_PROTOCOL_SERVER_COMPONENT, UPNP_COMPONENT,
             HEADERS_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT
         ],
         wallet=FakeDelayedWallet,
         stream_manager=FakeDelayedStreamManager,
         blob_manager=FakeDelayedBlobManager)
Пример #20
0
async def main():
    conf = Config()
    try:
        init_curses()
        c = None
        while c not in [ord('q'), ord('Q')]:
            routing_info = await daemon_rpc(conf, 'routing_table_get')
            refresh(routing_info)
            c = stdscr.getch()
            time.sleep(0.1)
    finally:
        teardown_curses()
Пример #21
0
 async def setup_stream(self, blob_count: int = 10):
     self.stream_bytes = b''
     for _ in range(blob_count):
         self.stream_bytes += os.urandom((MAX_BLOB_SIZE - 1))
     # create the stream
     file_path = os.path.join(self.server_dir, "test_file")
     with open(file_path, 'wb') as f:
         f.write(self.stream_bytes)
     descriptor = await StreamDescriptor.create_stream(self.loop, self.server_blob_manager.blob_dir, file_path)
     self.sd_hash = descriptor.calculate_sd_hash()
     conf = Config(data_dir=self.server_dir, wallet_dir=self.server_dir, download_dir=self.server_dir,
                   reflector_servers=[])
     self.downloader = StreamDownloader(self.loop, conf, self.client_blob_manager, self.sd_hash)
Пример #22
0
 async def setup_node(self, peer_addresses, address, node_id):
     self.nodes: typing.Dict[int, Node] = {}
     self.advance = dht_mocks.get_time_accelerator(self.loop, self.loop.time())
     self.conf = Config()
     self.storage = SQLiteStorage(self.conf, ":memory:", self.loop, self.loop.time)
     await self.storage.open()
     self.peer_manager = PeerManager(self.loop)
     self.node = Node(self.loop, self.peer_manager, node_id, 4444, 4444, 3333, address)
     await self.node.start_listening(address)
     self.blob_announcer = BlobAnnouncer(self.loop, self.node, self.storage)
     for node_id, address in peer_addresses:
         await self.add_peer(node_id, address)
     self.node.joined.set()
Пример #23
0
 def test_linux_defaults(self):
     c = Config()
     self.assertEqual(c.data_dir,
                      os.path.expanduser('~/.local/share/lbry/lbrynet'))
     self.assertEqual(c.wallet_dir,
                      os.path.expanduser('~/.local/share/lbry/lbryum'))
     self.assertEqual(c.download_dir, os.path.expanduser('~/Downloads'))
     self.assertEqual(
         c.config,
         os.path.expanduser(
             '~/.local/share/lbry/lbrynet/daemon_settings.yml'))
     self.assertEqual(c.api_connection_url, 'http://localhost:5279/lbryapi')
     self.assertEqual(
         c.log_file_path,
         os.path.expanduser('~/.local/share/lbry/lbrynet/lbrynet.log'))
Пример #24
0
def get_test_daemon(conf: Config, with_fee=False):
    conf.data_dir = '/tmp'
    rates = {
        'BTCLBC': {'spot': 3.0, 'ts': test_utils.DEFAULT_ISO_TIME + 1},
        'USDBTC': {'spot': 2.0, 'ts': test_utils.DEFAULT_ISO_TIME + 2}
    }
    component_manager = ComponentManager(
        conf, skip_components=[
            DATABASE_COMPONENT, DHT_COMPONENT, WALLET_COMPONENT, UPNP_COMPONENT,
            PEER_PROTOCOL_SERVER_COMPONENT, HASH_ANNOUNCER_COMPONENT,
            EXCHANGE_RATE_MANAGER_COMPONENT, BLOB_COMPONENT, HEADERS_COMPONENT,
            RATE_LIMITER_COMPONENT],
        file_manager=FakeFileManager
    )
    daemon = LBRYDaemon(conf, component_manager=component_manager)
    daemon.payment_rate_manager = OnlyFreePaymentsManager()
    daemon.wallet_manager = mock.Mock(spec=LbryWalletManager)
    daemon.wallet_manager.wallet = mock.Mock(spec=Wallet)
    daemon.wallet_manager.use_encryption = False
    daemon.wallet_manager.network = FakeNetwork()
    daemon.storage = mock.Mock(spec=SQLiteStorage)
    market_feeds = [BTCLBCFeed(), USDBTCFeed()]
    daemon.exchange_rate_manager = DummyExchangeRateManager(market_feeds, rates)
    daemon.stream_manager = component_manager.get_component(FILE_MANAGER_COMPONENT)

    metadata = {
        "author": "fake author",
        "language": "en",
        "content_type": "fake/format",
        "description": "fake description",
        "license": "fake license",
        "license_url": "fake license url",
        "nsfw": False,
        "sources": {
            "lbry_sd_hash": 'd2b8b6e907dde95245fe6d144d16c2fdd60c4e0c6463ec98'
                            'b85642d06d8e9414e8fcfdcb7cb13532ec5454fb8fe7f280'
        },
        "thumbnail": "fake thumbnail",
        "title": "fake title",
        "ver": "0.0.3"
    }
    if with_fee:
        metadata.update(
            {"fee": {"USD": {"address": "bQ6BGboPV2SpTMEP7wLNiAcnsZiH8ye6eA", "amount": 0.75}}})
    migrated = smart_decode(json.dumps(metadata))
    daemon._resolve = daemon.resolve = lambda *_: defer.succeed(
        {"test": {'claim': {'value': migrated.claim_dict}}})
    return daemon
Пример #25
0
    def setUp(self):
        test_utils.reset_time(self)
        self.test_daemon = get_test_daemon(Config())
        self.test_daemon.file_manager.lbry_files = self._get_fake_lbry_files()

        self.test_points_paid = [
            2.5, 4.8, 5.9, 5.9, 5.9, 6.1, 7.1, 8.2, 8.4, 9.1
        ]
        self.test_file_names = [
            'add.mp3', 'any.mov', 'day.tiff', 'decade.odt', 'different.json', 'hotel.bmp',
            'might.bmp', 'physical.json', 'remember.mp3', 'than.ppt'
        ]
        self.test_authors = [
            'ashlee27', 'bfrederick', 'brittanyhicks', 'davidsonjeffrey', 'heidiherring',
            'jlewis', 'kswanson', 'michelle50', 'richard64', 'xsteele'
        ]
        return f2d(self.test_daemon.component_manager.start())
Пример #26
0
    def test_init_with_overrides(self):
        class FakeWallet:
            component_name = "wallet"
            depends_on = []

            def __init__(self, component_manager):
                self.component_manager = component_manager

            @property
            def component(self):
                return self

        new_component_manager = ComponentManager(Config(), wallet=FakeWallet)
        fake_wallet = new_component_manager.get_component("wallet")
        # wallet should be an instance of FakeWallet and not WalletComponent from Components.py
        self.assertIsInstance(fake_wallet, FakeWallet)
        self.assertNotIsInstance(fake_wallet, Components.WalletComponent)
Пример #27
0
 def setUp(self):
     self.default_components_sort = [
         [
             Components.HeadersComponent, Components.DatabaseComponent,
             Components.ExchangeRateManagerComponent,
             Components.UPnPComponent
         ],
         [
             Components.BlobComponent, Components.DHTComponent,
             Components.WalletComponent
         ],
         [
             Components.HashAnnouncerComponent,
             Components.PeerProtocolServerComponent,
             Components.StreamManagerComponent,
         ]
     ]
     self.component_manager = ComponentManager(Config())
Пример #28
0
    async def test_sync_blob_manager_on_startup(self):
        loop = asyncio.get_event_loop()
        tmp_dir = tempfile.mkdtemp()
        self.addCleanup(lambda: shutil.rmtree(tmp_dir))

        storage = SQLiteStorage(Config(),
                                os.path.join(tmp_dir, "lbrynet.sqlite"))
        blob_manager = BlobFileManager(loop, tmp_dir, storage)

        # add a blob file
        blob_hash = "7f5ab2def99f0ddd008da71db3a3772135f4002b19b7605840ed1034c8955431bd7079549e65e6b2a3b9c17c773073ed"
        blob_bytes = b'1' * ((2 * 2**20) - 1)
        with open(os.path.join(blob_manager.blob_dir, blob_hash), 'wb') as f:
            f.write(blob_bytes)

        # it should not have been added automatically on startup
        await storage.open()
        await blob_manager.setup()
        self.assertSetEqual(blob_manager.completed_blob_hashes, set())

        # make sure we can add the blob
        await blob_manager.blob_completed(
            blob_manager.get_blob(blob_hash, len(blob_bytes)))
        self.assertSetEqual(blob_manager.completed_blob_hashes, {blob_hash})

        # stop the blob manager and restart it, make sure the blob is there
        blob_manager.stop()
        self.assertSetEqual(blob_manager.completed_blob_hashes, set())
        await blob_manager.setup()
        self.assertSetEqual(blob_manager.completed_blob_hashes, {blob_hash})

        # test that the blob is removed upon the next startup after the file being manually deleted
        blob_manager.stop()

        # manually delete the blob file and restart the blob manager
        os.remove(os.path.join(blob_manager.blob_dir, blob_hash))
        await blob_manager.setup()
        self.assertSetEqual(blob_manager.completed_blob_hashes, set())

        # check that the deleted blob was updated in the database
        self.assertEqual('pending', (await storage.run_and_return_one_or_none(
            'select status from blob where blob_hash=?', blob_hash)))
Пример #29
0
 def test_max_key_fee_from_yaml(self):
     with tempfile.TemporaryDirectory() as temp_dir:
         config = os.path.join(temp_dir, 'settings.yml')
         with open(config, 'w') as fd:
             fd.write('max_key_fee: {currency: USD, amount: 1}\n')
         c = Config.create_from_arguments(
             types.SimpleNamespace(config=config))
         self.assertEqual(c.max_key_fee['currency'], 'USD')
         self.assertEqual(c.max_key_fee['amount'], 1)
         with self.assertRaises(InvalidCurrencyError):
             c.max_key_fee = {'currency': 'BCH', 'amount': 1}
         with c.update_config():
             c.max_key_fee = {'currency': 'BTC', 'amount': 1}
         with open(config, 'r') as fd:
             self.assertEqual(
                 fd.read(), 'max_key_fee:\n  amount: 1\n  currency: BTC\n')
         with c.update_config():
             c.max_key_fee = None
         with open(config, 'r') as fd:
             self.assertEqual(fd.read(), 'max_key_fee: null\n')
Пример #30
0
 async def asyncSetUp(self):
     conf = Config()
     conf.data_dir = '/tmp'
     conf.share_usage_data = False
     conf.api = 'localhost:5299'
     conf.components_to_skip = (DATABASE_COMPONENT, BLOB_COMPONENT,
                                HEADERS_COMPONENT, WALLET_COMPONENT,
                                DHT_COMPONENT, HASH_ANNOUNCER_COMPONENT,
                                STREAM_MANAGER_COMPONENT,
                                PEER_PROTOCOL_SERVER_COMPONENT,
                                UPNP_COMPONENT,
                                EXCHANGE_RATE_MANAGER_COMPONENT)
     Daemon.component_attributes = {}
     self.daemon = Daemon(conf)
     await self.daemon.start()