Esempio n. 1
0
async def fetch_witnesses(
        peer: ETHProxyPeer,
        block_hash: Hash32,
        block_number: BlockNumber,
        event_bus: EndpointAPI,
        database_ipc_path: pathlib.Path,
        metrics_registry: MetricsRegistry,
        logger: ExtendedDebugLogger,
) -> Tuple[Hash32, ...]:
    """
    Fetch witness hashes for the given block from the given peer and emit a
    CollectMissingTrieNodes event to trigger the download of the trie nodes referred by them.
    """
    block_str = f"<Block #{block_number}-0x{humanize_hash(block_hash)}>"
    try:
        logger.debug(
            "Attempting to fetch witness hashes for %s from %s", block_str, peer)
        witness_hashes = await peer.wit_api.get_block_witness_hashes(block_hash)
    except asyncio.TimeoutError:
        logger.debug(
            "Timed out trying to fetch witnesses for %s from %s", block_str, peer)
        return tuple()
    except Exception as err:
        logger.warning(
            "Error fetching witnesses for %s from %s: %s", block_str, peer, err)
        return tuple()
    else:
        if witness_hashes:
            logger.debug(
                "Got witness hashes for %s, asking BeamSyncer to fetch them", block_str)
            # XXX: Consider using urgent=False if the new block is more than a couple blocks ahead
            # of our tip, as otherwise when beam sync start to falls behind it may be more
            # difficult to catch up.
            urgent = True
            try:
                with trio.fail_after(1):
                    # Sometimes we get a NewBlock/NewBlockHashes msg before the BeamSyncer service
                    # has started, and there will be no subscribers to CollectMissingTrieNodes in
                    # that case. This ensures we wait for it to start before attempting to fire
                    # CollectMissingTrieNodes events.
                    await event_bus.wait_until_any_endpoint_subscribed_to(CollectMissingTrieNodes)
            except trio.TooSlowError:
                logger.warning(
                    "No subscribers for CollectMissingTrieNodes, cannot fetch witnesses for %s",
                    block_str,
                )
                return witness_hashes
            await event_bus.broadcast(CollectMissingTrieNodes(witness_hashes, urgent, block_number))
            base_db = DBClient.connect(database_ipc_path)
            with base_db:
                wit_db = AsyncWitnessDB(base_db)
                wit_db.persist_witness_hashes(block_hash, witness_hashes)
        else:
            logger.debug(
                "%s announced %s but doesn't have witness hashes for it. "
                "This could be a peer that does not support the wit protocol, though",
                peer,
                block_str,
            )
        return witness_hashes
Esempio n. 2
0
    async def import_block(self, block: BlockAPI) -> BlockImportResult:
        self.logger.debug(
            "Beam importing %s (%d txns, %s gas) ...",
            block.header,
            len(block.transactions),
            f'{block.header.gas_used:,d}',
        )

        if not isinstance(self.metrics_registry, NoopMetricsRegistry):
            wit_db = AsyncWitnessDB(self._db)
            try:
                wit_hashes = wit_db.get_witness_hashes(block.hash)
            except WitnessHashesUnavailable:
                self.logger.info(
                    "No witness hashes for block %s. Import will be slow",
                    block)
                self.metrics_registry.counter(
                    'trinity.sync/block_witness_hashes_missing').inc()
            else:
                block_witness_uncollected = self._state_downloader._get_unique_missing_hashes(
                    wit_hashes)
                self.logger.debug(
                    "Missing %d nodes out of %d from witness of block %s",
                    len(block_witness_uncollected), len(wit_hashes), block)
                if block_witness_uncollected:
                    self.metrics_registry.counter(
                        'trinity.sync/block_witness_incomplete').inc()
                else:
                    self.metrics_registry.counter(
                        'trinity.sync/block_witness_complete').inc()

        parent_header = await self._chain.coro_get_block_header_by_hash(
            block.header.parent_hash)
        new_account_nodes, collection_time = await self._load_address_state(
            block.header,
            parent_header.state_root,
            block.transactions,
        )
        self._preloaded_account_state += new_account_nodes
        self._preloaded_account_time += collection_time

        import_timer = Timer()
        import_done = await self._event_bus.request(
            DoStatelessBlockImport(block))
        self._import_time += import_timer.elapsed

        if not import_done.completed:
            raise ValidationError(
                "Block import was cancelled, probably a shutdown")
        if import_done.exception:
            raise ValidationError(
                "Block import failed") from import_done.exception
        if import_done.block.hash != block.hash:
            raise ValidationError(
                f"Requsted {block} to be imported, but ran {import_done.block}"
            )
        self._blocks_imported += 1
        self._log_stats()
        return import_done.result
Esempio n. 3
0
async def test_witness_history_on_repeat_blocks():
    """
    Repeated blocks should not consume more slots in the limited history of block witnesses
    """
    wit_db = AsyncWitnessDB(MemoryDB())
    hash1 = Hash32Factory()
    hash1_witnesses = tuple(Hash32Factory.create_batch(5))
    await wit_db.coro_persist_witness_hashes(hash1, hash1_witnesses)

    hash2 = Hash32Factory()
    await wit_db.coro_persist_witness_hashes(hash2, tuple(Hash32Factory.create_batch(5)))

    # *almost* push the first witness out of history
    for _ in range(wit_db._max_witness_history - 2):
        await wit_db.coro_persist_witness_hashes(Hash32Factory(), Hash32Factory.create_batch(2))

    # It should still be there...
    loaded_hashes = await wit_db.coro_get_witness_hashes(hash1)
    assert set(loaded_hashes) == set(hash1_witnesses)

    # Add one more new witness, for an existing block
    await wit_db.coro_persist_witness_hashes(hash2, Hash32Factory.create_batch(2))

    # That new witness should *not* consume a block slot in history, so the first hash's
    #   witness should still be available.
    loaded_hashes = await wit_db.coro_get_witness_hashes(hash1)
    assert set(loaded_hashes) == set(hash1_witnesses)
Esempio n. 4
0
    async def _ensure_witness(self, event: FetchBlockWitness) -> None:
        block_hash = event.hash
        block_number = event.number
        block_str = f"Block #{block_number}-0x{humanize_hash(block_hash)}"
        self.logger.debug("Attempting to fetch witness for %s", block_str)
        try:
            existing_wit = AsyncWitnessDB(
                self.base_db).get_witness_hashes(block_hash)
        except WitnessHashesUnavailable:
            pass
        else:
            self.logger.debug(
                "Already have witness hashes for %s, not fetching again",
                block_str)
            await self.event_bus.broadcast(BlockWitnessResult(existing_wit),
                                           event.broadcast_config())
            return

        witness_hashes: Tuple[Hash32, ...] = tuple()
        if event.preferred_peer is not None:
            try:
                preferred_peer = cast(
                    ETHPeer,
                    self.peer_pool.connected_nodes[event.preferred_peer])
            except KeyError:
                # This means the peer has disconnected since we fired the FetchBlockWitness event.
                pass
            else:
                if hasattr(preferred_peer, 'wit_api'):
                    witness_hashes = await _fetch_witness(
                        preferred_peer, block_hash, block_number,
                        self.event_bus, self.base_db, self.metrics_registry,
                        self.logger)
                else:
                    self.logger.debug(
                        "%s does not support the wit protocol, can't fetch witness for %s",
                        preferred_peer, block_str)
        else:
            queried_peers: List[ETHPeer] = []
            while not witness_hashes:
                pending_peers = [
                    peer for peer in self.peer_pool.connected_nodes.values()
                    if hasattr(peer, 'wit_api') and peer not in queried_peers
                ]
                if not pending_peers:
                    self.logger.debug(
                        "None of our peers have witness hashes for %s",
                        block_str)
                    break
                peer = cast(ETHPeer, pending_peers[0])
                queried_peers.append(peer)
                witness_hashes = await _fetch_witness(
                    peer, block_hash, block_number, self.event_bus,
                    self.base_db, self.metrics_registry, self.logger)

        await self.event_bus.broadcast(BlockWitnessResult(witness_hashes),
                                       event.broadcast_config())
Esempio n. 5
0
async def test_persisting_and_looking_up():
    wit_db = AsyncWitnessDB(AtomicDB())

    hash1 = Hash32Factory()
    with pytest.raises(WitnessHashesUnavailable):
        await wit_db.coro_get_witness_hashes(hash1)

    hash1_witnesses = tuple(Hash32Factory.create_batch(5))
    await wit_db.coro_persist_witness_hashes(hash1, hash1_witnesses)
    assert await wit_db.coro_get_witness_hashes(hash1) == hash1_witnesses
Esempio n. 6
0
async def test_witness_eviction_on_repeat_blocks():
    """
    After witnesses are persisted twice for the same block, make sure that eviction
    does not cause any failures.
    """
    wit_db = AsyncWitnessDB(MemoryDB())
    hash_ = Hash32Factory()
    await wit_db.coro_persist_witness_hashes(hash_, Hash32Factory.create_batch(2))
    await wit_db.coro_persist_witness_hashes(hash_, Hash32Factory.create_batch(2))
    for _ in range(wit_db._max_witness_history):
        await wit_db.coro_persist_witness_hashes(Hash32Factory(), Hash32Factory.create_batch(2))
Esempio n. 7
0
async def test_witness_for_recent_blocks():
    wit_db = AsyncWitnessDB(AtomicDB())
    hash1 = Hash32Factory()
    hash1_witnesses = tuple(Hash32Factory.create_batch(5))
    await wit_db.coro_persist_witness_hashes(hash1, hash1_witnesses)

    # *almost* push the first witness out of history
    for _ in range(wit_db._max_witness_history - 1):
        await wit_db.coro_persist_witness_hashes(Hash32Factory(), Hash32Factory.create_batch(2))

    # It should still be there...
    assert await wit_db.coro_get_witness_hashes(hash1) == hash1_witnesses

    # Until one more new witness is added.
    await wit_db.coro_persist_witness_hashes(Hash32Factory(), Hash32Factory.create_batch(2))

    # Now the old witness has been flushed out of the history
    with pytest.raises(WitnessHashesUnavailable):
        await wit_db.coro_get_witness_hashes(hash1)

    assert len(wit_db._get_recent_blocks_with_witnesses()) == wit_db._max_witness_history
Esempio n. 8
0
async def test_witness_union():
    wit_db = AsyncWitnessDB(MemoryDB())
    hash1 = Hash32Factory()
    hash1_witnesses_unique1 = set(Hash32Factory.create_batch(3))
    hash1_witnesses_unique2 = set(Hash32Factory.create_batch(3))
    hash1_witnesses_both = set(Hash32Factory.create_batch(2))
    hash1_witnesses1 = tuple(hash1_witnesses_unique1 | hash1_witnesses_both)
    hash1_witnesses2 = tuple(hash1_witnesses_unique2 | hash1_witnesses_both)

    await wit_db.coro_persist_witness_hashes(hash1, hash1_witnesses1)
    await wit_db.coro_persist_witness_hashes(hash1, hash1_witnesses2)

    stored_hashes = await wit_db.coro_get_witness_hashes(hash1)

    expected = hash1_witnesses_unique1 | hash1_witnesses_both | hash1_witnesses_unique2
    assert set(stored_hashes) == expected
Esempio n. 9
0
    async def _fetch_witnesses(
            self, peer: ETHProxyPeer, block_hash: Hash32, block_number: BlockNumber) -> None:
        base_db = DBClient.connect(self._boot_info.trinity_config.database_ipc_path)
        with base_db:
            try:
                AsyncWitnessDB(base_db).get_witness_hashes(block_hash)
            except WitnessHashesUnavailable:
                pass
            else:
                block_str = f"Block #{block_number}-0x{humanize_hash(block_hash)}"
                self.logger.debug(
                    "Already have witness hashes for %s, not fetching again", block_str)
                return

        await fetch_witnesses(
            peer, block_hash, block_number, self._event_bus,
            self._boot_info.trinity_config.database_ipc_path,
            self._metrics_registry, self.logger)
Esempio n. 10
0
async def test_proxy_peer_requests(request, event_bus, other_event_bus,
                                   event_loop, chaindb_20, client_and_server):
    server_event_bus = event_bus
    client_event_bus = other_event_bus
    client_peer, server_peer = client_and_server

    client_peer_pool = MockPeerPoolWithConnectedPeers(
        [client_peer], event_bus=client_event_bus)
    server_peer_pool = MockPeerPoolWithConnectedPeers(
        [server_peer], event_bus=server_event_bus)

    async with contextlib.AsyncExitStack() as stack:
        await stack.enter_async_context(
            run_peer_pool_event_server(client_event_bus,
                                       client_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))

        await stack.enter_async_context(
            run_peer_pool_event_server(server_event_bus,
                                       server_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))

        base_db = chaindb_20.db
        await stack.enter_async_context(
            background_asyncio_service(
                ETHRequestServer(
                    server_event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    AsyncChainDB(base_db),
                )))
        await stack.enter_async_context(
            background_asyncio_service(
                WitRequestServer(
                    server_event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    base_db,
                )))

        client_proxy_peer_pool = ETHProxyPeerPool(
            client_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(
            background_asyncio_service(client_proxy_peer_pool))

        proxy_peer_pool = ETHProxyPeerPool(server_event_bus,
                                           TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(
            background_asyncio_service(proxy_peer_pool))

        proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(
            client_peer.session)

        headers = await proxy_peer.eth_api.get_block_headers(0, 1, 0, False)

        assert len(headers) == 1
        block_header = headers[0]
        assert block_header.block_number == 0

        receipts = await proxy_peer.eth_api.get_receipts(headers)
        assert len(receipts) == 1
        receipt = receipts[0]
        assert receipt[1][0] == block_header.receipt_root

        block_bundles = await proxy_peer.eth_api.get_block_bodies(headers)
        assert len(block_bundles) == 1
        first_bundle = block_bundles[0]
        assert first_bundle[1][0] == block_header.transaction_root

        node_data = await proxy_peer.eth_api.get_node_data(
            (block_header.state_root, ))
        assert node_data[0][0] == block_header.state_root

        block_hash = block_header.hash
        node_hashes = tuple(Hash32Factory.create_batch(5))
        # Populate the server's witness DB so that it can reply to our request.
        wit_db = AsyncWitnessDB(base_db)
        wit_db.persist_witness_hashes(block_hash, node_hashes)
        response = await proxy_peer.wit_api.get_block_witness_hashes(block_hash
                                                                     )
        assert set(response) == set(node_hashes)
Esempio n. 11
0
async def _fetch_witness(
    peer: ETHPeer,
    block_hash: Hash32,
    block_number: BlockNumber,
    event_bus: EndpointAPI,
    db: DatabaseAPI,
    metrics_registry: MetricsRegistry,
    logger: ExtendedDebugLogger,
) -> Tuple[Hash32, ...]:
    """
    Fetch witness hashes for the given block from the given peer, emit a CollectMissingTrieNodes
    event to trigger the download of the trie nodes referred by them and wait for the missing
    trie nodes to arrive.

    Returns the trie node hashes for the block witness, or an empty tuple if we cannot fetch them.
    """
    block_str = f"<Block #{block_number}-0x{humanize_hash(block_hash)}>"
    try:
        logger.debug("Asking %s for witness hashes for %s", peer, block_str)
        witness_hashes = await peer.wit_api.get_block_witness_hashes(block_hash
                                                                     )
    except asyncio.TimeoutError:
        logger.debug("Timed out trying to fetch witness hashes for %s from %s",
                     block_str, peer)
        return tuple()
    except Exception as err:
        logger.warning("Error fetching witness hashes for %s from %s: %s",
                       block_str, peer, err)
        return tuple()
    else:
        if witness_hashes:
            logger.debug(
                "Got witness hashes for %s, asking BeamSyncer to fetch trie nodes",
                block_str)
            # XXX: Consider using urgent=False if the new block is more than a couple blocks ahead
            # of our tip, as otherwise when beam sync start to falls behind it may be more
            # difficult to catch up.
            urgent = True
            try:
                # These events are handled by BeamSyncer, which gets restarted whenever we pivot,
                # so we sometimes have to wait a bit before we can fire those events. And we use
                # a long timeout because we want to be sure we fetch the witness once we have the
                # node hashes for it.
                await asyncio.wait_for(
                    event_bus.wait_until_any_endpoint_subscribed_to(
                        CollectMissingTrieNodes),
                    timeout=5,
                )
            except asyncio.TimeoutError:
                logger.warning(
                    "No subscribers for CollectMissingTrieNodes, cannot fetch witness for %s",
                    block_str,
                )
                return witness_hashes
            wit_db = AsyncWitnessDB(db)
            wit_db.persist_witness_hashes(block_hash, witness_hashes)
            result = await event_bus.request(
                CollectMissingTrieNodes(witness_hashes, urgent, block_number))
            logger.debug(
                "Collected %d missing trie nodes from %s witness",
                result.num_nodes_collected,
                block_str,
            )
        else:
            logger.debug("Got empty witness hashes for %s from %s", block_str,
                         peer)
        return witness_hashes