Exemple #1
0
async def persist_headers(
    logger: ExtendedDebugLogger,
    db: BaseAsyncHeaderDB,
    syncer: BaseHeaderChainSyncer[TChainPeer],
    exit_condition: Callable[[Sequence[BlockHeaderAPI]],
                             Awaitable[bool]] = _always_false
) -> AsyncIterator[HeaderPersistInfo]:

    async for headers in syncer.new_sync_headers(HEADER_QUEUE_SIZE_TARGET):

        syncer._chain.validate_chain_extension(headers)

        timer = Timer()

        should_stop = await exit_condition(headers)
        if should_stop:
            break

        new_canon_headers, old_canon_headers = await db.coro_persist_header_chain(
            headers)

        logger.debug(
            "Header import details: %s..%s, old canon: %s..%s, new canon: %s..%s",
            headers[0],
            headers[-1],
            old_canon_headers[0] if len(old_canon_headers) else None,
            old_canon_headers[-1] if len(old_canon_headers) else None,
            new_canon_headers[0] if len(new_canon_headers) else None,
            new_canon_headers[-1] if len(new_canon_headers) else None,
        )
        yield HeaderPersistInfo(imported_headers=headers,
                                old_canon_headers=old_canon_headers,
                                new_canon_headers=new_canon_headers,
                                elapsed_time=timer.elapsed)
Exemple #2
0
def _extract_nodes_from_payload(
        sender: AddressAPI, payload: List[Tuple[str, bytes, bytes, bytes]],
        logger: ExtendedDebugLogger) -> Iterator[NodeAPI]:
    for item in payload:
        ip, udp_port, tcp_port, node_id = item
        address = Address.from_endpoint(ip, udp_port, tcp_port)
        if check_relayed_addr(sender, address):
            yield Node(keys.PublicKey(node_id), address)
        else:
            logger.debug("Skipping invalid address %s relayed by %s", address,
                         sender)
Exemple #3
0
async def fetch_witnesses(
    peer: ETHProxyPeer,
    block_hash: Hash32,
    block_number: BlockNumber,
    event_bus: EndpointAPI,
    database_ipc_path: pathlib.Path,
    metrics_registry: MetricsRegistry,
    logger: ExtendedDebugLogger,
) -> Tuple[Hash32, ...]:
    """
    Fetch witness hashes for the given block from the given peer and emit a
    CollectMissingTrieNodes event to trigger the download of the trie nodes referred by them.
    """
    block_str = f"Block #{block_number}-0x{humanize_hash(block_hash)}"
    try:
        logger.debug("Attempting to fetch witness hashes for %s from %s",
                     block_str, peer)
        witness_hashes = await peer.wit_api.get_block_witness_hashes(block_hash
                                                                     )
    except asyncio.TimeoutError:
        logger.debug("Timed out trying to fetch witnesses for %s from %s",
                     block_str, peer)
        return tuple()
    except Exception as err:
        logger.warning("Error fetching witnesses for %s from %s: %s",
                       block_str, peer, err)
        return tuple()
    else:
        if witness_hashes:
            metrics_registry.counter(
                'trinity.sync/block_witness_hashes.hit').inc()
            logger.debug(
                "Got witness hashes for %s, asking BeamSyncer to fetch them",
                block_str)
            # XXX: Consider using urgent=False if the new block is more than a couple blocks ahead
            # of our tip, as otherwise when beam sync start to falls behind it may be more
            # difficult to catch up.
            urgent = True
            try:
                with trio.fail_after(1):
                    # Sometimes we get a NewBlock/NewBlockHashes msg before the BeamSyncer service
                    # has started, and there will be no subscribers to CollectMissingTrieNodes in
                    # that case. This ensures we wait for it to start before attempting to fire
                    # CollectMissingTrieNodes events.
                    await event_bus.wait_until_any_endpoint_subscribed_to(
                        CollectMissingTrieNodes)
            except trio.TooSlowError:
                logger.warning(
                    "No subscribers for CollectMissingTrieNodes, cannot fetch witnesses for %s",
                    block_str,
                )
                return witness_hashes
            await event_bus.broadcast(
                CollectMissingTrieNodes(witness_hashes, urgent, block_number))
            base_db = DBClient.connect(database_ipc_path)
            with base_db:
                wit_db = AsyncWitnessDB(base_db)
                wit_db.persist_witness_hashes(block_hash, witness_hashes)
        else:
            metrics_registry.counter(
                'trinity.sync/block_witness_hashes.miss').inc()
            logger.debug(
                "%s announced %s but doesn't have witness hashes for it", peer,
                block_str)
        return witness_hashes
Exemple #4
0
async def _fetch_witness(
    peer: ETHPeer,
    block_hash: Hash32,
    block_number: BlockNumber,
    event_bus: EndpointAPI,
    db: DatabaseAPI,
    metrics_registry: MetricsRegistry,
    logger: ExtendedDebugLogger,
) -> Tuple[Hash32, ...]:
    """
    Fetch witness hashes for the given block from the given peer, emit a CollectMissingTrieNodes
    event to trigger the download of the trie nodes referred by them and wait for the missing
    trie nodes to arrive.

    Returns the trie node hashes for the block witness, or an empty tuple if we cannot fetch them.
    """
    block_str = f"<Block #{block_number}-0x{humanize_hash(block_hash)}>"
    try:
        logger.debug("Asking %s for witness hashes for %s", peer, block_str)
        witness_hashes = await peer.wit_api.get_block_witness_hashes(block_hash
                                                                     )
    except asyncio.TimeoutError:
        logger.debug("Timed out trying to fetch witness hashes for %s from %s",
                     block_str, peer)
        return tuple()
    except Exception as err:
        logger.warning("Error fetching witness hashes for %s from %s: %s",
                       block_str, peer, err)
        return tuple()
    else:
        if witness_hashes:
            logger.debug(
                "Got witness hashes for %s, asking BeamSyncer to fetch trie nodes",
                block_str)
            # XXX: Consider using urgent=False if the new block is more than a couple blocks ahead
            # of our tip, as otherwise when beam sync start to falls behind it may be more
            # difficult to catch up.
            urgent = True
            try:
                # These events are handled by BeamSyncer, which gets restarted whenever we pivot,
                # so we sometimes have to wait a bit before we can fire those events. And we use
                # a long timeout because we want to be sure we fetch the witness once we have the
                # node hashes for it.
                await asyncio.wait_for(
                    event_bus.wait_until_any_endpoint_subscribed_to(
                        CollectMissingTrieNodes),
                    timeout=5,
                )
            except asyncio.TimeoutError:
                logger.warning(
                    "No subscribers for CollectMissingTrieNodes, cannot fetch witness for %s",
                    block_str,
                )
                return witness_hashes
            wit_db = AsyncWitnessDB(db)
            wit_db.persist_witness_hashes(block_hash, witness_hashes)
            result = await event_bus.request(
                CollectMissingTrieNodes(witness_hashes, urgent, block_number))
            logger.debug(
                "Collected %d missing trie nodes from %s witness",
                result.num_nodes_collected,
                block_str,
            )
        else:
            logger.debug("Got empty witness hashes for %s from %s", block_str,
                         peer)
        return witness_hashes