Ejemplo n.º 1
0
    def __init__(self, env, db, bp, daemon, mempool, shutdown_event):
        env.max_send = max(350000, env.max_send)
        self.env = env
        self.db = db
        self.bp = bp
        self.daemon = daemon
        self.mempool = mempool
        self.peer_mgr = PeerManager(env, db)
        self.shutdown_event = shutdown_event
        self.logger = util.class_logger(__name__, self.__class__.__name__)
        self.servers = {}
        self.sessions = set()
        self.max_subs = env.max_subs
        self.cur_group = SessionGroup(0)
        self.txs_sent = 0
        self.start_time = time.time()
        self.history_cache = pylru.lrucache(256)
        self.notified_height = None
        # Cache some idea of room to avoid recounting on each subscription
        self.subs_room = 0
        # Event triggered when electrumx is listening for incoming requests.
        self.server_listening = Event()
        self.session_event = Event()

        # Set up the RPC request handlers
        cmds = ('add_peer daemon_url disconnect getinfo groups log peers '
                'query reorg sessions stop'.split())
        LocalRPC.request_handlers = {cmd: getattr(self, 'rpc_' + cmd)
                                     for cmd in cmds}
Ejemplo n.º 2
0
async def test_mempool_removals():
    api = API()
    api.initialize()
    mempool = MemPool(coin, api, refresh_secs=0.01)
    event = Event()
    async with OldTaskGroup() as group:
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        # Remove half the TXs from the mempool
        start = len(api.ordered_adds) // 2
        for tx_hash in api.ordered_adds[start:]:
            del api.txs[tx_hash]
            del api.raw_txs[tx_hash]
        await event.wait()
        await _test_summaries(mempool, api)
        # Removed hashXs should have key destroyed
        assert all(mempool.hashXs.values())
        # Remove the rest
        api.txs.clear()
        api.raw_txs.clear()
        await event.wait()
        await _test_summaries(mempool, api)
        assert not mempool.hashXs
        assert not mempool.txs
        await group.cancel_remaining()
Ejemplo n.º 3
0
    async def _note_peers(self, peers, limit=2, check_ports=False, source=None):
        '''Add a limited number of peers that are not already present.'''
        new_peers = []
        match_set = self.peers.copy()
        for peer in peers:
            if not peer.is_public or (peer.is_tor and not self.proxy):
                continue

            matches = peer.matches(match_set)
            if matches:
                if check_ports:
                    for match in matches:
                        if match.check_ports(peer):
                            self.logger.info(f'ports changed for {peer}')
                            match.retry_event.set()
            else:
                match_set.add(peer)
                new_peers.append(peer)

        if new_peers:
            source = source or new_peers[0].source
            if limit:
                random.shuffle(new_peers)
                use_peers = new_peers[:limit]
            else:
                use_peers = new_peers
            for peer in use_peers:
                self.logger.info(f'accepted new peer {peer} from {source}')
                peer.retry_event = Event()
                self.peers.add(peer)
                await self.group.spawn(self._monitor_peer(peer))

        return True
Ejemplo n.º 4
0
    async def discover_peers(self):
        '''Perform peer maintenance.  This includes

          1) Forgetting unreachable peers.
          2) Verifying connectivity of new peers.
          3) Retrying old peers at regular intervals.
        '''
        if self.env.peer_discovery != self.env.PD_ON:
            self.logger.info('peer discovery is disabled')
            return

        self.logger.info(f'beginning peer discovery. Force use of '
                         f'proxy: {self.env.force_proxy}')
        forever = Event()
        async with self.group as group:
            await group.spawn(forever.wait())
            await group.spawn(self._refresh_blacklist())
            await group.spawn(self._detect_proxy())
            await group.spawn(self._import_peers())
            # Consume tasks as they complete, logging unexpected failures
            async for task in group:
                if not task.cancelled():
                    try:
                        task.result()
                    except Exception:
                        self.logger.exception('task failed unexpectedly')
Ejemplo n.º 5
0
async def test_notifications():
    # Tests notifications over a cycle of:
    # 1) A first batch of txs come in
    # 2) A second batch of txs come in
    # 3) A block comes in confirming the first batch only
    api = API()
    api.initialize()
    mempool = MemPool(coin, api, refresh_secs=0.001, log_status_secs=0)
    event = Event()

    n = len(api.ordered_adds) // 2
    raw_txs = api.raw_txs.copy()
    txs = api.txs.copy()
    first_hashes = api.ordered_adds[:n]
    first_touched = api.touched(first_hashes)
    second_hashes = api.ordered_adds[n:]
    second_touched = api.touched(second_hashes)

    async with TaskGroup() as group:
        # First batch enters the mempool
        api.raw_txs = {hash: raw_txs[hash] for hash in first_hashes}
        api.txs = {hash: txs[hash] for hash in first_hashes}
        first_utxos = api.mempool_utxos()
        first_spends = api.mempool_spends()
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        assert len(api.on_mempool_calls) == 1
        touched, height = api.on_mempool_calls[0]
        assert height == api._height == api._cached_height
        assert touched == first_touched
        # Second batch enters the mempool
        api.raw_txs = raw_txs
        api.txs = txs
        await event.wait()
        assert len(api.on_mempool_calls) == 2
        touched, height = api.on_mempool_calls[1]
        assert height == api._height == api._cached_height
        # Touched is incremental
        assert touched == second_touched
        # Block found; first half confirm
        new_height = 2
        api._height = new_height
        api.db_utxos.update(first_utxos)
        for spend in first_spends:
            if is_gen_outpoint(*spend):
                continue
            del api.db_utxos[spend]
        api.raw_txs = {hash: raw_txs[hash] for hash in second_hashes}
        api.txs = {hash: txs[hash] for hash in second_hashes}
        await event.wait()
        assert len(api.on_mempool_calls) == 3
        touched, height = api.on_mempool_calls[2]
        assert height == api._height == api._cached_height == new_height
        assert touched == first_touched
        await group.cancel_remaining()
Ejemplo n.º 6
0
    def __init__(self, merkle, source_func):
        '''Initialise a cache hashes taken from source_func:

           async def source_func(index, count):
              ...
        '''
        self.merkle = merkle
        self.source_func = source_func
        self.length = 0
        self.depth_higher = 0
        self.initialized = Event()
Ejemplo n.º 7
0
async def test_daemon_drops_txs():
    # Tests things work if the daemon drops some transactions between
    # returning their hashes and the mempool requesting the raw txs
    api = DropAPI(10)
    api.initialize()
    mempool = MemPool(coin, api, refresh_secs=0.01)
    event = Event()
    async with OldTaskGroup() as group:
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        await _test_summaries(mempool, api)
        await group.cancel_remaining()
async def test_keep_synchronized(caplog):
    api = API()
    mempool = MemPool(coin, api)
    event = Event()
    with caplog.at_level(logging.INFO):
        async with TaskGroup() as group:
            await group.spawn(mempool.keep_synchronized, event)
            await event.wait()
            await group.cancel_remaining()

    assert in_caplog(caplog, 'beginning processing of daemon mempool')
    assert in_caplog(caplog, 'compact fee histogram')
    assert in_caplog(caplog, 'synced in ')
    assert in_caplog(caplog, '0 txs touching 0 addresses')
    assert not in_caplog(caplog, 'txs dropped')
Ejemplo n.º 9
0
async def test_dropped_txs(caplog):
    api = API()
    api.initialize()
    mempool = MemPool(coin, api)
    event = Event()
    # Remove a single TX_HASH that is used in another mempool tx
    for prev_hash, prev_idx in api.mempool_spends():
        if prev_hash in api.txs:
            del api.txs[prev_hash]

    with caplog.at_level(logging.INFO):
        async with OldTaskGroup() as group:
            await group.spawn(mempool.keep_synchronized, event)
            await event.wait()
            await group.cancel_remaining()

    assert in_caplog(caplog, 'txs dropped')
Ejemplo n.º 10
0
async def test_transaction_summaries(caplog):
    api = API()
    api.initialize()
    mempool = MemPool(coin, api)
    event = Event()
    with caplog.at_level(logging.INFO):
        async with OldTaskGroup() as group:
            await group.spawn(mempool.keep_synchronized, event)
            await event.wait()
            await group.cancel_remaining()

    # Check the default dict is handled properly
    prior_len = len(mempool.hashXs)
    assert await mempool.transaction_summaries(os.urandom(HASHX_LEN)) == []
    assert prior_len == len(mempool.hashXs)

    await _test_summaries(mempool, api)
    assert not in_caplog(caplog, 'txs dropped')
Ejemplo n.º 11
0
async def test_compact_fee_histogram():
    api = API()
    api.initialize()
    mempool = MemPool(coin, api)
    event = Event()
    async with OldTaskGroup() as group:
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        await group.cancel_remaining()

    histogram = await mempool.compact_fee_histogram()
    assert histogram == []
    bin_size = 1000
    mempool._update_histogram(bin_size)
    histogram = await mempool.compact_fee_histogram()
    assert len(histogram) > 0
    rates, sizes = zip(*histogram)
    assert all(rates[n] < rates[n - 1] for n in range(1, len(rates)))
Ejemplo n.º 12
0
    async def discover_peers(self):
        '''Perform peer maintenance.  This includes

          1) Forgetting unreachable peers.
          2) Verifying connectivity of new peers.
          3) Retrying old peers at regular intervals.
        '''
        if self.env.peer_discovery != self.env.PD_ON:
            self.logger.info('peer discovery is disabled')
            return

        self.logger.info(f'beginning peer discovery. Force use of '
                         f'proxy: {self.env.force_proxy}')
        forever = Event()
        async with self.group as group:
            await group.spawn(forever.wait())
            await group.spawn(self._refresh_blacklist())
            await group.spawn(self._detect_proxy())
            await group.spawn(self._import_peers())
Ejemplo n.º 13
0
    async def _note_peers(self,
                          peers,
                          limit=2,
                          check_ports=False,
                          source=None):
        '''Add a limited number of peers that are not already present.'''
        new_peers = []
        match_set = self.peers.copy()
        for peer in peers:
            if not peer.is_public or (peer.is_tor and not self.proxy):
                continue

            matches = peer.matches(match_set)
            if matches:
                if check_ports:
                    for match in matches:
                        if match.check_ports(peer):
                            self.logger.info(f'ports changed for {peer}')
                            # Retry connecting to the peer. First we will try the existing
                            # ports and then try the new ports. Note that check_ports above
                            # had a side_effect to temporarily store the new ports.
                            # If we manage to connect, we will call 'server.features',
                            # and the ports for this peer will be updated to the return values.
                            match.retry_event.set()
            else:
                match_set.add(peer)
                new_peers.append(peer)

        if new_peers:
            source = source or new_peers[0].source
            if limit:
                random.shuffle(new_peers)
                use_peers = new_peers[:limit]
            else:
                use_peers = new_peers
            for peer in use_peers:
                self.logger.info(f'accepted new peer {peer} from {source}')
                peer.retry_event = Event()
                self.peers.add(peer)
                await self.group.spawn(self._monitor_peer(peer))

        return True
Ejemplo n.º 14
0
    async def _note_peers(self, peers, limit=2, check_ports=False,
                          source=None):
        '''Add a limited number of peers that are not already present.'''
        new_peers = []
        match_set = self.peers.copy()
        for peer in peers:
            if not peer.is_public:
                continue
            if peer.is_tor:
                if not self.proxy:
                    # silently ignore tor if no tor proxy
                    continue
                if not self.env.peer_discovery_tor:
                    self.logger.warning(f'refusing peer "{peer}" (tor peer discovery is disabled)')
                    continue
            banned_suffix = self.session_mgr.does_peer_match_hostname_ban(peer)
            if banned_suffix:
                self.logger.warning(f'refusing peer "{peer}" (banned: {banned_suffix})')
                continue

            matches = peer.matches(match_set)
            if not matches:
                new_peers.append(peer)
                match_set.add(peer)
            elif check_ports:
                for match in matches:
                    if match.check_ports(peer):
                        self.logger.info(f'ports changed for {peer}')
                        match.retry_event.set()

        if new_peers:
            source = source or new_peers[0].source
            if limit:
                random.shuffle(new_peers)
                use_peers = new_peers[:limit]
            else:
                use_peers = new_peers
            for peer in use_peers:
                self.logger.info(f'accepted new peer {peer} from {source}')
                peer.retry_event = Event()
                self.peers.add(peer)
                await self.group.spawn(self._monitor_peer(peer))
Ejemplo n.º 15
0
async def test_potential_spends():
    api = API()
    api.initialize()
    mempool = MemPool(coin, api)
    event = Event()
    async with OldTaskGroup() as group:
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        await group.cancel_remaining()

    # Check the default dict is handled properly
    prior_len = len(mempool.hashXs)
    assert await mempool.potential_spends(os.urandom(HASHX_LEN)) == set()
    assert prior_len == len(mempool.hashXs)

    # Test all hashXs
    spends = api.spends()
    for hashX in api.hashXs:
        ps = await mempool.potential_spends(hashX)
        assert all(spend in ps for spend in spends[hashX])
Ejemplo n.º 16
0
async def test_balance_delta():
    api = API()
    api.initialize()
    mempool = MemPool(coin, api)
    event = Event()
    async with OldTaskGroup() as group:
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        await group.cancel_remaining()

    # Check the default dict is handled properly
    prior_len = len(mempool.hashXs)
    assert await mempool.balance_delta(os.urandom(HASHX_LEN)) == 0
    assert prior_len == len(mempool.hashXs)

    # Test all hashXs
    deltas = api.balance_deltas()
    for hashX in api.hashXs:
        expected = deltas.get(hashX, 0)
        assert await mempool.balance_delta(hashX) == expected
Ejemplo n.º 17
0
async def test_unordered_UTXOs():
    api = API()
    api.initialize()
    mempool = MemPool(coin, api)
    event = Event()
    async with TaskGroup() as group:
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        await group.cancel_remaining()

    # Check the default dict is handled properly
    prior_len = len(mempool.hashXs)
    assert await mempool.unordered_UTXOs(os.urandom(HASHX_LEN)) == []
    assert prior_len == len(mempool.hashXs)

    # Test all hashXs
    utxos = api.UTXOs()
    for hashX in api.hashXs:
        mempool_result = await mempool.unordered_UTXOs(hashX)
        our_result = utxos.get(hashX, [])
        assert set(our_result) == set(mempool_result)
Ejemplo n.º 18
0
 def _event(self, request, request_id):
     event = Event()
     self._requests[request_id] = (request, event)
     return event
Ejemplo n.º 19
0
async def test_notifications(caplog):
    # Tests notifications over a cycle of:
    # 1) A first batch of txs come in
    # 2) A second batch of txs come in
    # 3) A block comes in confirming the first batch only
    api = API()
    api.initialize()
    mempool = MemPool(coin, api, refresh_secs=0.001, log_status_secs=0)
    event = Event()

    n = len(api.ordered_adds) // 2
    raw_txs = api.raw_txs.copy()
    txs = api.txs.copy()
    first_hashes = api.ordered_adds[:n]
    first_touched = api.touched(first_hashes)
    second_hashes = api.ordered_adds[n:]
    second_touched = api.touched(second_hashes)

    caplog.set_level(logging.DEBUG)

    async with OldTaskGroup() as group:
        # First batch enters the mempool
        api.raw_txs = {hash: raw_txs[hash] for hash in first_hashes}
        api.txs = {hash: txs[hash] for hash in first_hashes}
        first_utxos = api.mempool_utxos()
        first_spends = api.mempool_spends()
        await group.spawn(mempool.keep_synchronized, event)
        await event.wait()
        assert len(api.on_mempool_calls) == 1
        touched, height = api.on_mempool_calls[0]
        assert height == api._height == api._db_height == api._cached_height
        assert touched == first_touched
        # Second batch enters the mempool
        api.raw_txs = raw_txs
        api.txs = txs
        await event.wait()
        assert len(api.on_mempool_calls) == 2
        touched, height = api.on_mempool_calls[1]
        assert height == api._height == api._db_height == api._cached_height
        # Touched is incremental
        assert touched == second_touched
        # Block found; first half confirm
        new_height = 2
        api._height = new_height
        api.raw_txs = {hash: raw_txs[hash] for hash in second_hashes}
        api.txs = {hash: txs[hash] for hash in second_hashes}
        # Delay the DB update
        assert not in_caplog(caplog, 'waiting for DB to sync')
        async with ignore_after(max(mempool.refresh_secs * 2, 0.5)):
            await event.wait()
        assert in_caplog(caplog, 'waiting for DB to sync')
        assert len(api.on_mempool_calls) == 2
        assert not event.is_set()
        assert api._height == api._cached_height == new_height
        assert touched == second_touched
        # Now update the DB
        api.db_utxos.update(first_utxos)
        api._db_height = new_height
        for spend in first_spends:
            del api.db_utxos[spend]
        await event.wait()
        assert len(api.on_mempool_calls) == 3
        touched, height = api.on_mempool_calls[2]
        assert height == api._db_height == new_height
        assert touched == first_touched
        await group.cancel_remaining()