示例#1
0
    def __init__(self, env: 'Env', db: 'DB'):
        self.logger = class_logger(__name__, self.__class__.__name__)
        # Initialise the Peer class
        Peer.DEFAULT_PORTS = env.coin.PEER_DEFAULT_PORTS
        self.env = env
        self.db = db

        # Our reported clearnet and Tor Peers, if any
        sclass = env.coin.SESSIONCLS
        self.myselves = [
            Peer(str(service.host), sclass.server_features(env), 'env')
            for service in env.report_services
        ]
        self.server_version_args = sclass.server_version_args()
        # Peers have one entry per hostname.  Once connected, the
        # ip_addr property is either None, an onion peer, or the
        # IP address that was connected to.  Adding a peer will evict
        # any other peers with the same host name or IP address.
        self.peers = set()
        self.permit_onion_peer_time = time.time()
        self.proxy = None
        self.group = OldTaskGroup()
        self.recent_peer_adds = {}
        # refreshed
        self.blacklist = set()
示例#2
0
async def test_mempool_removals():
    api = API()
    api.initialize()
    mempool = MemPool(coin, api, refresh_secs=0.01)
    event = Event()
    async with OldTaskGroup() as group:
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        # Remove half the TXs from the mempool
        start = len(api.ordered_adds) // 2
        for tx_hash in api.ordered_adds[start:]:
            del api.txs[tx_hash]
            del api.raw_txs[tx_hash]
        await event.wait()
        await _test_summaries(mempool, api)
        # Removed hashXs should have key destroyed
        assert all(mempool.hashXs.values())
        # Remove the rest
        api.txs.clear()
        api.raw_txs.clear()
        await event.wait()
        await _test_summaries(mempool, api)
        assert not mempool.hashXs
        assert not mempool.txs
        await group.cancel_remaining()
示例#3
0
    async def serve(self, shutdown_event):
        '''Start the RPC server and wait for the mempool to synchronize.  Then
        start serving external clients.
        '''
#<<<<<<< HEAD
#        self.logger.info(f'aiorpcX version: {version_string(aiorpcx_version)}')
#        # if not (0, 18, 5) <= aiorpcx_version < (0, 19):
#        if aiorpcx_version <= (0, 18, 5):
#            raise RuntimeError('aiorpcX version 0.18.5+ is required')
#=======
        if not (0, 22, 0) <= aiorpcx_version < (0, 23):
            raise RuntimeError('aiorpcX version 0.22.x is required')
#>>>>>>> 7e53936f34d2f1c979c64f91d15d6745fb8103c2

        env = self.env
        min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
        self.logger.info(f'software version: {electrumx.version}')
        self.logger.info(f'supported protocol versions: {min_str}-{max_str}')
        self.logger.info(f'event loop policy: {env.loop_policy}')
        self.logger.info(f'reorg limit is {env.reorg_limit:,d} blocks')

        notifications = Notifications()
        Daemon = env.coin.DAEMON
        BlockProcessor = env.coin.BLOCK_PROCESSOR

        async with Daemon(env.coin, env.daemon_url) as daemon:
            db = DB(env)
            bp = BlockProcessor(env, db, daemon, notifications)

            # Set notifications up to implement the MemPoolAPI
            def get_db_height():
                return db.db_height
            notifications.height = daemon.height
            notifications.db_height = get_db_height
            notifications.cached_height = daemon.cached_height
            notifications.mempool_hashes = daemon.mempool_hashes
            notifications.raw_transactions = daemon.getrawtransactions
            notifications.lookup_utxos = db.lookup_utxos
            MemPoolAPI.register(Notifications)
            mempool = MemPool(env.coin, notifications)

            session_mgr = SessionManager(env, db, bp, daemon, mempool,
                                         shutdown_event)

            # Test daemon authentication, and also ensure it has a cached
            # height.  Do this before entering the task group.
            await daemon.height()

            caught_up_event = Event()
            mempool_event = Event()

            async def wait_for_catchup():
                await caught_up_event.wait()
                await group.spawn(db.populate_header_merkle_cache())
                await group.spawn(mempool.keep_synchronized(mempool_event))

            async with OldTaskGroup() as group:
                await group.spawn(session_mgr.serve(notifications, mempool_event))
                await group.spawn(bp.fetch_and_process_blocks(caught_up_event))
                await group.spawn(wait_for_catchup())
示例#4
0
async def test_daemon_drops_txs():
    # Tests things work if the daemon drops some transactions between
    # returning their hashes and the mempool requesting the raw txs
    api = DropAPI(10)
    api.initialize()
    mempool = MemPool(coin, api, refresh_secs=0.01)
    event = Event()
    async with OldTaskGroup() as group:
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        await _test_summaries(mempool, api)
        await group.cancel_remaining()
示例#5
0
async def test_keep_synchronized(caplog):
    api = API()
    mempool = MemPool(coin, api)
    event = Event()
    with caplog.at_level(logging.INFO):
        async with OldTaskGroup() as group:
            await group.spawn(mempool.keep_synchronized, event)
            await event.wait()
            await group.cancel_remaining()

    assert in_caplog(caplog, 'beginning processing of daemon mempool')
    assert in_caplog(caplog, 'compact fee histogram')
    assert in_caplog(caplog, 'synced in ')
    assert in_caplog(caplog, '0 txs')
    assert in_caplog(caplog, 'touching 0 addresses')
    assert not in_caplog(caplog, 'txs dropped')
示例#6
0
async def test_dropped_txs(caplog):
    api = API()
    api.initialize()
    mempool = MemPool(coin, api)
    event = Event()
    # Remove a single TX_HASH that is used in another mempool tx
    for prev_hash, prev_idx in api.mempool_spends():
        if prev_hash in api.txs:
            del api.txs[prev_hash]

    with caplog.at_level(logging.INFO):
        async with OldTaskGroup() as group:
            await group.spawn(mempool.keep_synchronized, event)
            await event.wait()
            await group.cancel_remaining()

    assert in_caplog(caplog, 'txs dropped')
示例#7
0
    async def _process_mempool(self, all_hashes, touched, mempool_height):
        # Re-sync with the new set of hashes
        txs = self.txs
        hashXs = self.hashXs

        if mempool_height != self.api.db_height():
            raise DBSyncError

        # First handle txs that have disappeared
        for tx_hash in (set(txs) - all_hashes):
            tx = txs.pop(tx_hash)
            tx_hashXs = {hashX for hashX, value in tx.in_pairs}
            tx_hashXs.update(hashX for hashX, value in tx.out_pairs)
            for hashX in tx_hashXs:
                hashXs[hashX].remove(tx_hash)
                if not hashXs[hashX]:
                    del hashXs[hashX]
            touched |= tx_hashXs

        # Process new transactions
        new_hashes = list(all_hashes.difference(txs))
        if new_hashes:
            group = OldTaskGroup()
            for hashes in chunks(new_hashes, 200):
                coro = self._fetch_and_accept(hashes, all_hashes, touched)
                await group.spawn(coro)
            if mempool_height != self.api.db_height():
                raise DBSyncError

            tx_map = {}
            utxo_map = {}
            async for task in group:
                deferred, unspent = task.result()
                tx_map.update(deferred)
                utxo_map.update(unspent)

            prior_count = 0
            # FIXME: this is not particularly efficient
            while tx_map and len(tx_map) != prior_count:
                prior_count = len(tx_map)
                tx_map, utxo_map = self._accept_transactions(tx_map, utxo_map,
                                                             touched)
            if tx_map:
                self.logger.error(f'{len(tx_map)} txs dropped')

        return touched
示例#8
0
async def test_transaction_summaries(caplog):
    api = API()
    api.initialize()
    mempool = MemPool(coin, api)
    event = Event()
    with caplog.at_level(logging.INFO):
        async with OldTaskGroup() as group:
            await group.spawn(mempool.keep_synchronized, event)
            await event.wait()
            await group.cancel_remaining()

    # Check the default dict is handled properly
    prior_len = len(mempool.hashXs)
    assert await mempool.transaction_summaries(os.urandom(HASHX_LEN)) == []
    assert prior_len == len(mempool.hashXs)

    await _test_summaries(mempool, api)
    assert not in_caplog(caplog, 'txs dropped')
示例#9
0
async def test_compact_fee_histogram():
    api = API()
    api.initialize()
    mempool = MemPool(coin, api)
    event = Event()
    async with OldTaskGroup() as group:
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        await group.cancel_remaining()

    histogram = await mempool.compact_fee_histogram()
    assert histogram == []
    bin_size = 1000
    mempool._update_histogram(bin_size)
    histogram = await mempool.compact_fee_histogram()
    assert len(histogram) > 0
    rates, sizes = zip(*histogram)
    assert all(rates[n] < rates[n - 1] for n in range(1, len(rates)))
示例#10
0
async def test_potential_spends():
    api = API()
    api.initialize()
    mempool = MemPool(coin, api)
    event = Event()
    async with OldTaskGroup() as group:
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        await group.cancel_remaining()

    # Check the default dict is handled properly
    prior_len = len(mempool.hashXs)
    assert await mempool.potential_spends(os.urandom(HASHX_LEN)) == set()
    assert prior_len == len(mempool.hashXs)

    # Test all hashXs
    spends = api.spends()
    for hashX in api.hashXs:
        ps = await mempool.potential_spends(hashX)
        assert all(spend in ps for spend in spends[hashX])
示例#11
0
async def test_balance_delta():
    api = API()
    api.initialize()
    mempool = MemPool(coin, api)
    event = Event()
    async with OldTaskGroup() as group:
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        await group.cancel_remaining()

    # Check the default dict is handled properly
    prior_len = len(mempool.hashXs)
    assert await mempool.balance_delta(os.urandom(HASHX_LEN)) == 0
    assert prior_len == len(mempool.hashXs)

    # Test all hashXs
    deltas = api.balance_deltas()
    for hashX in api.hashXs:
        expected = deltas.get(hashX, 0)
        assert await mempool.balance_delta(hashX) == expected
示例#12
0
    async def fetch_and_process_blocks(self, caught_up_event):
        '''Fetch, process and index blocks from the daemon.

        Sets caught_up_event when first caught up.  Flushes to disk
        and shuts down cleanly if cancelled.

        This is mainly because if, during initial sync ElectrumX is
        asked to shut down when a large number of blocks have been
        processed but not written to disk, it should write those to
        disk before exiting, as otherwise a significant amount of work
        could be lost.
        '''
        self._caught_up_event = caught_up_event
        await self._first_open_dbs()
        try:
            async with OldTaskGroup() as group:
                await group.spawn(self.prefetcher.main_loop(self.height))
                await group.spawn(self._process_prefetched_blocks())
        # Don't flush for arbitrary exceptions as they might be a cause or consequence of
        # corrupted data
        except CancelledError:
            self.logger.info('flushing to DB for a clean shutdown...')
            await self.flush(True)
示例#13
0
async def test_unordered_UTXOs():
    api = API()
    api.initialize()
    mempool = MemPool(coin, api)
    event = Event()
    async with OldTaskGroup() as group:
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        await group.cancel_remaining()

    # Check the default dict is handled properly
    prior_len = len(mempool.hashXs)
    assert await mempool.unordered_UTXOs(os.urandom(HASHX_LEN)) == []
    assert prior_len == len(mempool.hashXs)

    # Test all hashXs
    utxos = api.UTXOs()
    for hashX in api.hashXs:
        mempool_result = await mempool.unordered_UTXOs(hashX)
        our_result = utxos.get(hashX, [])
        assert set(our_result) == {
            dataclasses.astuple(mr)
            for mr in mempool_result
        }
示例#14
0
async def test_notifications(caplog):
    # Tests notifications over a cycle of:
    # 1) A first batch of txs come in
    # 2) A second batch of txs come in
    # 3) A block comes in confirming the first batch only
    api = API()
    api.initialize()
    mempool = MemPool(coin, api, refresh_secs=0.001, log_status_secs=0)
    event = Event()

    n = len(api.ordered_adds) // 2
    raw_txs = api.raw_txs.copy()
    txs = api.txs.copy()
    first_hashes = api.ordered_adds[:n]
    first_touched = api.touched(first_hashes)
    second_hashes = api.ordered_adds[n:]
    second_touched = api.touched(second_hashes)

    caplog.set_level(logging.DEBUG)

    async with OldTaskGroup() as group:
        # First batch enters the mempool
        api.raw_txs = {hash: raw_txs[hash] for hash in first_hashes}
        api.txs = {hash: txs[hash] for hash in first_hashes}
        first_utxos = api.mempool_utxos()
        first_spends = api.mempool_spends()
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        assert len(api.on_mempool_calls) == 1
        touched, height = api.on_mempool_calls[0]
        assert height == api._height == api._db_height == api._cached_height
        assert touched == first_touched
        # Second batch enters the mempool
        api.raw_txs = raw_txs
        api.txs = txs
        await event.wait()
        assert len(api.on_mempool_calls) == 2
        touched, height = api.on_mempool_calls[1]
        assert height == api._height == api._db_height == api._cached_height
        # Touched is incremental
        assert touched == second_touched
        # Block found; first half confirm
        new_height = 2
        api._height = new_height
        api.raw_txs = {hash: raw_txs[hash] for hash in second_hashes}
        api.txs = {hash: txs[hash] for hash in second_hashes}
        # Delay the DB update
        assert not in_caplog(caplog, 'waiting for DB to sync')
        async with ignore_after(max(mempool.refresh_secs * 2, 0.5)):
            await event.wait()
        assert in_caplog(caplog, 'waiting for DB to sync')
        assert len(api.on_mempool_calls) == 2
        assert not event.is_set()
        assert api._height == api._cached_height == new_height
        assert touched == second_touched
        # Now update the DB
        api.db_utxos.update(first_utxos)
        api._db_height = new_height
        for spend in first_spends:
            del api.db_utxos[spend]
        await event.wait()
        assert len(api.on_mempool_calls) == 3
        touched, height = api.on_mempool_calls[2]
        assert height == api._db_height == new_height
        assert touched == first_touched
        await group.cancel_remaining()
示例#15
0
 async def keep_synchronized(self, synchronized_event):
     '''Keep the mempool synchronized with the daemon.'''
     async with OldTaskGroup() as group:
         await group.spawn(self._refresh_hashes(synchronized_event))
         await group.spawn(self._refresh_histogram(synchronized_event))
         await group.spawn(self._logging(synchronized_event))
示例#16
0
    async def _verify_peer(self, session, peer):
        # store IP address for peer
        if not peer.is_tor:
            address = session.remote_address()
            if address and isinstance(address.host,
                                      (IPv4Address, IPv6Address)):
                peer.ip_addr = str(address.host)

        if self._is_blacklisted(peer):
            raise BadPeerError('blacklisted')

        # Bucket good recent peers; forbid many servers from similar IPs
        # FIXME there's a race here, when verifying multiple peers
        #       that belong to the same bucket ~simultaneously
        recent_peers = self._get_recent_good_peers()
        if peer in recent_peers:
            recent_peers.remove(peer)
        onion_peers = []
        buckets = defaultdict(list)
        for other_peer in recent_peers:
            if other_peer.is_tor:
                onion_peers.append(other_peer)
            else:
                buckets[other_peer.bucket_for_internal_purposes()].append(
                    other_peer)
        if peer.is_tor:
            # keep number of onion peers below half of all peers,
            # but up to 100 is OK regardless
            if len(onion_peers) > len(recent_peers) // 2 >= 100:
                raise BadPeerError('too many onion peers already')
        else:
            bucket = peer.bucket_for_internal_purposes()
            if buckets[bucket]:
                raise BadPeerError(
                    f'too many peers already in bucket {bucket}')

        # server.version goes first
        message = 'server.version'
        try:
            result = await session.send_request(message,
                                                self.server_version_args)
        except asyncio.CancelledError:
            raise BadPeerError('terminated before server.version response')

        assert_good(message, result, list)

        # Protocol version 1.1 returns a pair with the version first
        if len(result) != 2 or not all(isinstance(x, str) for x in result):
            raise BadPeerError(f'bad server.version result: {result}')
        server_version, _protocol_version = result
        peer.server_version = server_version
        peer.features['server_version'] = server_version

        async with OldTaskGroup() as g:
            await g.spawn(self._send_headers_subscribe(session))
            await g.spawn(self._send_server_features(session, peer))
            peers_task = await g.spawn(
                self._send_peers_subscribe(session, peer))

        # Process reported peers if remote peer is good
        peers = peers_task.result()
        await self._note_peers(peers)

        features = self._features_to_register(peer, peers)
        if features:
            self.logger.info(f'registering ourself with {peer}')
            # We only care to wait for the response
            try:
                await session.send_request('server.add_peer', [features])
            except asyncio.CancelledError:
                raise BadPeerError(
                    'terminated before server.add_peer response')