Ejemplo n.º 1
0
    def __init__(self, env):
        '''Initialize everything that doesn't require the event loop.'''
        super().__init__(env)

        if aiorpcx_version < self.AIORPCX_MIN:
            raise RuntimeError('ElectrumX requires aiorpcX >= '
                               f'{version_string(self.AIORPCX_MIN)}')

        min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
        self.logger.info(f'software version: {electrumx.version}')
        self.logger.info(f'aiorpcX version: {version_string(aiorpcx_version)}')
        self.logger.info(f'supported protocol versions: {min_str}-{max_str}')
        self.logger.info(f'event loop policy: {env.loop_policy}')
        self.logger.info(f'reorg limit is {env.reorg_limit:,d} blocks')

        notifications = Notifications()
        daemon = env.coin.DAEMON(env)
        BlockProcessor = env.coin.BLOCK_PROCESSOR
        self.bp = BlockProcessor(env, self.tasks, daemon, notifications)
        self.mempool = MemPool(env.coin, self.tasks, daemon, notifications,
                               self.bp.lookup_utxos)
        self.chain_state = ChainState(env, self.tasks, daemon, self.bp,
                                      notifications)
        self.peer_mgr = PeerManager(env, self.tasks, self.chain_state)
        self.session_mgr = SessionManager(env, self.tasks, self.chain_state,
                                          self.mempool, self.peer_mgr,
                                          notifications, self.shutdown_event)
Ejemplo n.º 2
0
    async def serve(self, shutdown_event):
        '''Start the RPC server and wait for the mempool to synchronize.  Then
        start serving external clients.
        '''
        if not (0, 22) <= aiorpcx_version < (0, 23):
            raise RuntimeError('aiorpcX version 0.22.x is required')

        env = self.env
        min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
        self.logger.info(f'software version: {electrumx.version}')
        self.logger.info(f'aiorpcX version: {version_string(aiorpcx_version)}')
        self.logger.info(f'supported protocol versions: {min_str}-{max_str}')
        self.logger.info(f'event loop policy: {env.loop_policy}')
        self.logger.info(f'reorg limit is {env.reorg_limit:,d} blocks')

        notifications = Notifications()
        Daemon = env.coin.DAEMON
        BlockProcessor = env.coin.BLOCK_PROCESSOR

        async with Daemon(env.coin, env.daemon_url) as daemon:
            db = DB(env)
            bp = BlockProcessor(env, db, daemon, notifications)

            # Set notifications up to implement the MemPoolAPI
            def get_db_height():
                return db.db_height

            notifications.height = daemon.height
            notifications.db_height = get_db_height
            notifications.cached_height = daemon.cached_height
            notifications.mempool_hashes = daemon.mempool_hashes
            notifications.raw_transactions = daemon.getrawtransactions
            notifications.lookup_utxos = db.lookup_utxos
            MemPoolAPI.register(Notifications)
            mempool = MemPool(env.coin, notifications)

            session_mgr = SessionManager(env, db, bp, daemon, mempool,
                                         shutdown_event)

            # Test daemon authentication, and also ensure it has a cached
            # height.  Do this before entering the task group.
            await daemon.height()

            caught_up_event = Event()
            mempool_event = Event()

            async def wait_for_catchup():
                await caught_up_event.wait()
                await group.spawn(db.populate_header_merkle_cache())
                await group.spawn(mempool.keep_synchronized(mempool_event))

            async with TaskGroup() as group:
                await group.spawn(
                    session_mgr.serve(notifications, mempool_event))
                await group.spawn(bp.fetch_and_process_blocks(caught_up_event))
                await group.spawn(wait_for_catchup())

                async for task in group:
                    if not task.cancelled():
                        task.result()
Ejemplo n.º 3
0
    def __init__(self, env):
        '''Initialize everything that doesn't require the event loop.'''
        super().__init__(env)

        version_string = util.version_string
        if aiorpcx_version < self.AIORPCX_MIN:
            raise RuntimeError('ElectrumX requires aiorpcX >= '
                               f'{version_string(self.AIORPCX_MIN)}')

        min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
        self.logger.info(f'software version: {electrumx.version}')
        self.logger.info(f'aiorpcX version: {version_string(aiorpcx_version)}')
        self.logger.info(f'supported protocol versions: {min_str}-{max_str}')
        self.logger.info(f'event loop policy: {env.loop_policy}')

        self.coin = env.coin
        self.tasks = TaskSet()
        self.history_cache = pylru.lrucache(256)
        self.header_cache = pylru.lrucache(8)
        self.cache_height = 0
        self.cache_mn_height = 0
        self.mn_cache = pylru.lrucache(256)
        env.max_send = max(350000, env.max_send)

        self.loop = asyncio.get_event_loop()
        self.executor = ThreadPoolExecutor()
        self.loop.set_default_executor(self.executor)

        # The complex objects.  Note PeerManager references self.loop (ugh)
        self.session_mgr = SessionManager(env, self)
        self.daemon = self.coin.DAEMON(env)
        self.bp = self.coin.BLOCK_PROCESSOR(env, self, self.daemon)
        self.mempool = MemPool(self.bp, self)
        self.peer_mgr = PeerManager(env, self)
Ejemplo n.º 4
0
    async def serve(self, shutdown_event):
        '''Start the RPC server and wait for the mempool to synchronize.  Then
        start serving external clients.
        '''
        if not (0, 7, 1) <= aiorpcx_version < (0, 8):
            raise RuntimeError('aiorpcX version 0.7.x required with x >= 1')

        env = self.env
        min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
        self.logger.info(f'software version: {electrumx.version}')
        self.logger.info(f'aiorpcX version: {version_string(aiorpcx_version)}')
        self.logger.info(f'supported protocol versions: {min_str}-{max_str}')
        self.logger.info(f'event loop policy: {env.loop_policy}')
        self.logger.info(f'reorg limit is {env.reorg_limit:,d} blocks')

        notifications = Notifications()
        daemon = env.coin.DAEMON(env)
        BlockProcessor = env.coin.BLOCK_PROCESSOR
        bp = BlockProcessor(env, daemon, notifications)
        mempool = MemPool(env.coin, daemon, notifications, bp.lookup_utxos)
        chain_state = ChainState(env, daemon, bp)
        session_mgr = SessionManager(env, chain_state, mempool, notifications,
                                     shutdown_event)

        caught_up_event = Event()
        serve_externally_event = Event()
        synchronized_event = Event()

        async with TaskGroup() as group:
            await group.spawn(session_mgr.serve(serve_externally_event))
            await group.spawn(bp.fetch_and_process_blocks(caught_up_event))
            await caught_up_event.wait()
            await group.spawn(mempool.keep_synchronized(synchronized_event))
            await synchronized_event.wait()
            serve_externally_event.set()
Ejemplo n.º 5
0
async def test_mempool_removals():
    api = API()
    api.initialize()
    mempool = MemPool(coin, api, refresh_secs=0.01)
    event = Event()
    async with OldTaskGroup() as group:
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        # Remove half the TXs from the mempool
        start = len(api.ordered_adds) // 2
        for tx_hash in api.ordered_adds[start:]:
            del api.txs[tx_hash]
            del api.raw_txs[tx_hash]
        await event.wait()
        await _test_summaries(mempool, api)
        # Removed hashXs should have key destroyed
        assert all(mempool.hashXs.values())
        # Remove the rest
        api.txs.clear()
        api.raw_txs.clear()
        await event.wait()
        await _test_summaries(mempool, api)
        assert not mempool.hashXs
        assert not mempool.txs
        await group.cancel_remaining()
Ejemplo n.º 6
0
async def test_notifications():
    # Tests notifications over a cycle of:
    # 1) A first batch of txs come in
    # 2) A second batch of txs come in
    # 3) A block comes in confirming the first batch only
    api = API()
    api.initialize()
    mempool = MemPool(coin, api, refresh_secs=0.001, log_status_secs=0)
    event = Event()

    n = len(api.ordered_adds) // 2
    raw_txs = api.raw_txs.copy()
    txs = api.txs.copy()
    first_hashes = api.ordered_adds[:n]
    first_touched = api.touched(first_hashes)
    second_hashes = api.ordered_adds[n:]
    second_touched = api.touched(second_hashes)

    async with TaskGroup() as group:
        # First batch enters the mempool
        api.raw_txs = {hash: raw_txs[hash] for hash in first_hashes}
        api.txs = {hash: txs[hash] for hash in first_hashes}
        first_utxos = api.mempool_utxos()
        first_spends = api.mempool_spends()
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        assert len(api.on_mempool_calls) == 1
        touched, height = api.on_mempool_calls[0]
        assert height == api._height == api._cached_height
        assert touched == first_touched
        # Second batch enters the mempool
        api.raw_txs = raw_txs
        api.txs = txs
        await event.wait()
        assert len(api.on_mempool_calls) == 2
        touched, height = api.on_mempool_calls[1]
        assert height == api._height == api._cached_height
        # Touched is incremental
        assert touched == second_touched
        # Block found; first half confirm
        new_height = 2
        api._height = new_height
        api.db_utxos.update(first_utxos)
        for spend in first_spends:
            if is_gen_outpoint(*spend):
                continue
            del api.db_utxos[spend]
        api.raw_txs = {hash: raw_txs[hash] for hash in second_hashes}
        api.txs = {hash: txs[hash] for hash in second_hashes}
        await event.wait()
        assert len(api.on_mempool_calls) == 3
        touched, height = api.on_mempool_calls[2]
        assert height == api._height == api._cached_height == new_height
        assert touched == first_touched
        await group.cancel_remaining()
Ejemplo n.º 7
0
async def test_daemon_drops_txs():
    # Tests things work if the daemon drops some transactions between
    # returning their hashes and the mempool requesting the raw txs
    api = DropAPI(10)
    api.initialize()
    mempool = MemPool(coin, api, refresh_secs=0.01)
    event = Event()
    async with OldTaskGroup() as group:
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        await _test_summaries(mempool, api)
        await group.cancel_remaining()
Ejemplo n.º 8
0
    async def serve(self, shutdown_event):
        '''Start the RPC server and wait for the mempool to synchronize.  Then
        start serving external clients.
        '''
        if not (0, 7, 1) <= aiorpcx_version < (0, 8):
            raise RuntimeError('aiorpcX version 0.7.x required with x >= 1')

        env = self.env
        min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
        self.logger.info(f'software version: {electrumx.version}')
        self.logger.info(f'aiorpcX version: {version_string(aiorpcx_version)}')
        self.logger.info(f'supported protocol versions: {min_str}-{max_str}')
        self.logger.info(f'event loop policy: {env.loop_policy}')
        self.logger.info(f'reorg limit is {env.reorg_limit:,d} blocks')

        notifications = Notifications()
        Daemon = env.coin.DAEMON
        BlockProcessor = env.coin.BLOCK_PROCESSOR

        daemon = Daemon(env.coin, env.daemon_url)
        db = DB(env)
        bp = BlockProcessor(env, db, daemon, notifications)

        # Set ourselves up to implement the MemPoolAPI
        self.height = daemon.height
        self.cached_height = daemon.cached_height
        self.mempool_hashes = daemon.mempool_hashes
        self.raw_transactions = daemon.getrawtransactions
        self.lookup_utxos = db.lookup_utxos
        self.on_mempool = notifications.on_mempool
        MemPoolAPI.register(Controller)
        mempool = MemPool(env.coin, self)

        session_mgr = SessionManager(env, db, bp, daemon, mempool,
                                     notifications, shutdown_event)

        # Test daemon authentication, and also ensure it has a cached
        # height.  Do this before entering the task group.
        await daemon.height()

        caught_up_event = Event()
        serve_externally_event = Event()
        synchronized_event = Event()
        async with TaskGroup() as group:
            await group.spawn(session_mgr.serve(serve_externally_event))
            await group.spawn(bp.fetch_and_process_blocks(caught_up_event))
            await caught_up_event.wait()
            await group.spawn(db.populate_header_merkle_cache())
            await group.spawn(mempool.keep_synchronized(synchronized_event))
            await synchronized_event.wait()
            serve_externally_event.set()
Ejemplo n.º 9
0
    def __init__(self, env):
        '''Initialize everything that doesn't require the event loop.'''
        super().__init__(env)
        if aiorpcx_version < self.AIORPCX_MIN:
            raise RuntimeError('ElectrumX requires aiorpcX >= '
                               f'{version_string(self.AIORPCX_MIN)}')

        self.logger.info(f'software version: {self.VERSION}')
        self.logger.info(f'aiorpcX version: {version_string(aiorpcx_version)}')
        self.logger.info(f'supported protocol versions: '
                         f'{self.PROTOCOL_MIN}-{self.PROTOCOL_MAX}')
        self.logger.info(f'event loop policy: {env.loop_policy}')

        self.coin = env.coin
        self.servers = {}
        self.tasks = TaskSet()
        self.sessions = set()
        self.cur_group = SessionGroup(0)
        self.txs_sent = 0
        self.next_log_sessions = 0
        self.state = self.CATCHING_UP
        self.max_sessions = env.max_sessions
        self.low_watermark = self.max_sessions * 19 // 20
        self.max_subs = env.max_subs
        # Cache some idea of room to avoid recounting on each subscription
        self.subs_room = 0
        self.next_stale_check = 0
        self.history_cache = pylru.lrucache(256)
        self.header_cache = pylru.lrucache(8)
        self.cache_height = 0
        self.cache_mn_height = 0
        self.mn_cache = pylru.lrucache(256)
        env.max_send = max(350000, env.max_send)
        # Set up the RPC request handlers
        cmds = ('add_peer daemon_url disconnect getinfo groups log peers '
                'reorg sessions stop'.split())
        self.rpc_handlers = {cmd: getattr(self, 'rpc_' + cmd) for cmd in cmds}

        self.loop = asyncio.get_event_loop()
        self.executor = ThreadPoolExecutor()
        self.loop.set_default_executor(self.executor)

        # The complex objects.  Note PeerManager references self.loop (ugh)
        self.daemon = self.coin.DAEMON(env)
        self.bp = self.coin.BLOCK_PROCESSOR(env, self, self.daemon)
        self.mempool = MemPool(self.bp, self)
        self.peer_mgr = PeerManager(env, self)

        # Event triggered when electrumx is listening for incoming requests.
        self.server_listening = asyncio.Event()
Ejemplo n.º 10
0
async def test_keep_synchronized(caplog):
    api = API()
    mempool = MemPool(coin, api)
    event = Event()
    with caplog.at_level(logging.INFO):
        async with TaskGroup() as group:
            await group.spawn(mempool.keep_synchronized, event)
            await event.wait()
            await group.cancel_remaining()

    assert in_caplog(caplog, 'beginning processing of daemon mempool')
    assert in_caplog(caplog, 'compact fee histogram')
    assert in_caplog(caplog, 'synced in ')
    assert in_caplog(caplog, '0 txs touching 0 addresses')
    assert not in_caplog(caplog, 'txs dropped')
Ejemplo n.º 11
0
async def test_dropped_txs(caplog):
    api = API()
    api.initialize()
    mempool = MemPool(coin, api)
    event = Event()
    # Remove a single TX_HASH that is used in another mempool tx
    for prev_hash, prev_idx in api.mempool_spends():
        if prev_hash in api.txs:
            del api.txs[prev_hash]

    with caplog.at_level(logging.INFO):
        async with OldTaskGroup() as group:
            await group.spawn(mempool.keep_synchronized, event)
            await event.wait()
            await group.cancel_remaining()

    assert in_caplog(caplog, 'txs dropped')
Ejemplo n.º 12
0
async def test_transaction_summaries(caplog):
    api = API()
    api.initialize()
    mempool = MemPool(coin, api)
    event = Event()
    with caplog.at_level(logging.INFO):
        async with OldTaskGroup() as group:
            await group.spawn(mempool.keep_synchronized, event)
            await event.wait()
            await group.cancel_remaining()

    # Check the default dict is handled properly
    prior_len = len(mempool.hashXs)
    assert await mempool.transaction_summaries(os.urandom(HASHX_LEN)) == []
    assert prior_len == len(mempool.hashXs)

    await _test_summaries(mempool, api)
    assert not in_caplog(caplog, 'txs dropped')
Ejemplo n.º 13
0
async def test_compact_fee_histogram():
    api = API()
    api.initialize()
    mempool = MemPool(coin, api)
    event = Event()
    async with OldTaskGroup() as group:
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        await group.cancel_remaining()

    histogram = await mempool.compact_fee_histogram()
    assert histogram == []
    bin_size = 1000
    mempool._update_histogram(bin_size)
    histogram = await mempool.compact_fee_histogram()
    assert len(histogram) > 0
    rates, sizes = zip(*histogram)
    assert all(rates[n] < rates[n - 1] for n in range(1, len(rates)))
Ejemplo n.º 14
0
async def test_balance_delta():
    api = API()
    api.initialize()
    mempool = MemPool(coin, api)
    event = Event()
    async with OldTaskGroup() as group:
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        await group.cancel_remaining()

    # Check the default dict is handled properly
    prior_len = len(mempool.hashXs)
    assert await mempool.balance_delta(os.urandom(HASHX_LEN)) == 0
    assert prior_len == len(mempool.hashXs)

    # Test all hashXs
    deltas = api.balance_deltas()
    for hashX in api.hashXs:
        expected = deltas.get(hashX, 0)
        assert await mempool.balance_delta(hashX) == expected
Ejemplo n.º 15
0
async def test_potential_spends():
    api = API()
    api.initialize()
    mempool = MemPool(coin, api)
    event = Event()
    async with OldTaskGroup() as group:
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        await group.cancel_remaining()

    # Check the default dict is handled properly
    prior_len = len(mempool.hashXs)
    assert await mempool.potential_spends(os.urandom(HASHX_LEN)) == set()
    assert prior_len == len(mempool.hashXs)

    # Test all hashXs
    spends = api.spends()
    for hashX in api.hashXs:
        ps = await mempool.potential_spends(hashX)
        assert all(spend in ps for spend in spends[hashX])
Ejemplo n.º 16
0
async def test_unordered_UTXOs():
    api = API()
    api.initialize()
    mempool = MemPool(coin, api)
    event = Event()
    async with TaskGroup() as group:
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        await group.cancel_remaining()

    # Check the default dict is handled properly
    prior_len = len(mempool.hashXs)
    assert await mempool.unordered_UTXOs(os.urandom(HASHX_LEN)) == []
    assert prior_len == len(mempool.hashXs)

    # Test all hashXs
    utxos = api.UTXOs()
    for hashX in api.hashXs:
        mempool_result = await mempool.unordered_UTXOs(hashX)
        our_result = utxos.get(hashX, [])
        assert set(our_result) == set(mempool_result)
Ejemplo n.º 17
0
 def __init__(self, env, tasks, shutdown_event):
     self.env = env
     self.tasks = tasks
     self.shutdown_event = shutdown_event
     self.daemon = env.coin.DAEMON(env)
     self.bp = env.coin.BLOCK_PROCESSOR(env, tasks, self.daemon)
     self.mempool = MemPool(env.coin, self, self.tasks,
                            self.bp.add_new_block_callback)
     self.history_cache = pylru.lrucache(256)
     # External interface: pass-throughs for mempool.py
     self.cached_mempool_hashes = self.daemon.cached_mempool_hashes
     self.mempool_refresh_event = self.daemon.mempool_refresh_event
     self.getrawtransactions = self.daemon.getrawtransactions
     self.utxo_lookup = self.bp.db_utxo_lookup
     # External interface pass-throughs for session.py
     self.force_chain_reorg = self.bp.force_chain_reorg
     self.mempool_fee_histogram = self.mempool.get_fee_histogram
     self.mempool_get_utxos = self.mempool.get_utxos
     self.mempool_potential_spends = self.mempool.potential_spends
     self.mempool_transactions = self.mempool.transactions
     self.mempool_value = self.mempool.value
     self.tx_branch_and_root = self.bp.merkle.branch_and_root
     self.read_headers = self.bp.read_headers
Ejemplo n.º 18
0
async def test_notifications(caplog):
    # Tests notifications over a cycle of:
    # 1) A first batch of txs come in
    # 2) A second batch of txs come in
    # 3) A block comes in confirming the first batch only
    api = API()
    api.initialize()
    mempool = MemPool(coin, api, refresh_secs=0.001, log_status_secs=0)
    event = Event()

    n = len(api.ordered_adds) // 2
    raw_txs = api.raw_txs.copy()
    txs = api.txs.copy()
    first_hashes = api.ordered_adds[:n]
    first_touched = api.touched(first_hashes)
    second_hashes = api.ordered_adds[n:]
    second_touched = api.touched(second_hashes)

    caplog.set_level(logging.DEBUG)

    async with OldTaskGroup() as group:
        # First batch enters the mempool
        api.raw_txs = {hash: raw_txs[hash] for hash in first_hashes}
        api.txs = {hash: txs[hash] for hash in first_hashes}
        first_utxos = api.mempool_utxos()
        first_spends = api.mempool_spends()
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        assert len(api.on_mempool_calls) == 1
        touched, height = api.on_mempool_calls[0]
        assert height == api._height == api._db_height == api._cached_height
        assert touched == first_touched
        # Second batch enters the mempool
        api.raw_txs = raw_txs
        api.txs = txs
        await event.wait()
        assert len(api.on_mempool_calls) == 2
        touched, height = api.on_mempool_calls[1]
        assert height == api._height == api._db_height == api._cached_height
        # Touched is incremental
        assert touched == second_touched
        # Block found; first half confirm
        new_height = 2
        api._height = new_height
        api.raw_txs = {hash: raw_txs[hash] for hash in second_hashes}
        api.txs = {hash: txs[hash] for hash in second_hashes}
        # Delay the DB update
        assert not in_caplog(caplog, 'waiting for DB to sync')
        async with ignore_after(max(mempool.refresh_secs * 2, 0.5)):
            await event.wait()
        assert in_caplog(caplog, 'waiting for DB to sync')
        assert len(api.on_mempool_calls) == 2
        assert not event.is_set()
        assert api._height == api._cached_height == new_height
        assert touched == second_touched
        # Now update the DB
        api.db_utxos.update(first_utxos)
        api._db_height = new_height
        for spend in first_spends:
            del api.db_utxos[spend]
        await event.wait()
        assert len(api.on_mempool_calls) == 3
        touched, height = api.on_mempool_calls[2]
        assert height == api._db_height == new_height
        assert touched == first_touched
        await group.cancel_remaining()