def test_failover_fail(caplog):
    daemon = Daemon(coin, urls[0])
    with caplog.at_level(logging.INFO):
        result = daemon.failover()
    assert result is False
    assert daemon.current_url() == urls[0]
    assert not in_caplog(caplog, f'failing over')
def test_set_urls_one(caplog):
    with caplog.at_level(logging.INFO):
        daemon = Daemon(coin, urls[0])
    assert daemon.current_url() == urls[0]
    assert len(daemon.urls) == 1
    logged_url = daemon.logged_url()
    assert logged_url == '127.0.0.1:8332/'
    assert in_caplog(caplog, f'daemon #1 at {logged_url} (current)')
def test_set_urls_two(caplog):
    with caplog.at_level(logging.INFO):
        daemon = Daemon(coin, ','.join(urls))
    assert daemon.current_url() == urls[0]
    assert len(daemon.urls) == 2
    logged_url = daemon.logged_url()
    assert logged_url == '127.0.0.1:8332/'
    assert in_caplog(caplog, f'daemon #1 at {logged_url} (current)')
    assert in_caplog(caplog, 'daemon #2 at 192.168.0.1:8332')
Esempio n. 4
0
    async def serve(self, shutdown_event):
        '''Start the RPC server and wait for the mempool to synchronize.  Then
        start serving external clients.
        '''
        if not (0, 22) <= aiorpcx_version < (0, 23):
            raise RuntimeError('aiorpcX version 0.22.x is required')

        env = self.env
        min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
        self.logger.info(f'software version: {electrumx.version}')
        self.logger.info(f'aiorpcX version: {version_string(aiorpcx_version)}')
        self.logger.info(f'supported protocol versions: {min_str}-{max_str}')
        self.logger.info(f'event loop policy: {env.loop_policy}')
        self.logger.info(f'reorg limit is {env.reorg_limit:,d} blocks')

        notifications = Notifications()

        async with Daemon(env.coin, env.daemon_url) as daemon:
            db = DB(env)
            bp = block_proc.BlockProcessor(env, db, daemon, notifications)

            # Set notifications up to implement the MemPoolAPI
            def get_db_height():
                return db.state.height

            notifications.height = daemon.height
            notifications.db_height = get_db_height
            notifications.cached_height = daemon.cached_height
            notifications.mempool_hashes = daemon.mempool_hashes
            notifications.raw_transactions = daemon.getrawtransactions
            notifications.lookup_utxos = db.lookup_utxos
            MemPoolAPI.register(Notifications)
            mempool = MemPool(env.coin, notifications)

            session_mgr = SessionManager(env, db, bp, daemon, mempool,
                                         shutdown_event)

            # Test daemon authentication, and also ensure it has a cached
            # height.  Do this before entering the task group.
            await daemon.height()

            caught_up_event = Event()
            mempool_event = Event()

            async def wait_for_catchup():
                await caught_up_event.wait()
                await group.spawn(db.populate_header_merkle_cache())
                await group.spawn(mempool.keep_synchronized(mempool_event))

            async with TaskGroup() as group:
                await group.spawn(
                    session_mgr.serve(notifications, mempool_event))
                await group.spawn(bp.fetch_and_process_blocks(caught_up_event))
                await group.spawn(bp.check_cache_size_loop())
                await group.spawn(wait_for_catchup())

                async for task in group:
                    if not task.cancelled():
                        task.result()
def test_set_urls_short():
    no_prefix_urls = ['/'.join(part for part in url.split('/')[2:])
                      for url in urls]
    daemon = Daemon(coin, ','.join(no_prefix_urls))
    assert daemon.current_url() == urls[0]
    assert len(daemon.urls) == 2

    no_slash_urls = [url[:-1] for url in urls]
    daemon = Daemon(coin, ','.join(no_slash_urls))
    assert daemon.current_url() == urls[0]
    assert len(daemon.urls) == 2

    no_port_urls = [url[:url.rfind(':')] for url in urls]
    daemon = Daemon(coin, ','.join(no_port_urls))
    assert daemon.current_url() == urls[0]
    assert len(daemon.urls) == 2
def test_failover_good(caplog):
    daemon = Daemon(coin, ','.join(urls))
    with caplog.at_level(logging.INFO):
        result = daemon.failover()
    assert result is True
    assert daemon.current_url() == urls[1]
    logged_url = daemon.logged_url()
    assert in_caplog(caplog, f'failing over to {logged_url}')
    # And again
    result = daemon.failover()
    assert result is True
    assert daemon.current_url() == urls[0]
def test_set_urls_bad():
    with pytest.raises(CoinError):
        Daemon(coin, '')
    with pytest.raises(CoinError):
        Daemon(coin, 'a')
Esempio n. 8
0
def daemon():
    return Daemon(Bitcoin, ','.join(urls))