Example #1
0
    async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None:
        config = boot_info.trinity_config
        db = DBClient.connect(config.database_ipc_path)

        if boot_info.args.disable_discovery:
            discovery_service: async_service.Service = StaticDiscoveryService(
                event_bus,
                config.preferred_nodes,
            )
        else:
            vm_config = config.get_app_config(Eth1AppConfig).get_chain_config().vm_configuration
            headerdb = TrioHeaderDB(db)
            eth_cap_provider = functools.partial(generate_eth_cap_enr_field, vm_config, headerdb)
            socket = trio.socket.socket(family=trio.socket.AF_INET, type=trio.socket.SOCK_DGRAM)
            await socket.bind(("0.0.0.0", config.port))
            base_db = LevelDB(config.node_db_dir)
            node_db = NodeDB(default_identity_scheme_registry, base_db)
            discovery_service = PreferredNodeDiscoveryService(
                config.nodekey,
                config.port,
                config.port,
                config.bootstrap_nodes,
                config.preferred_nodes,
                event_bus,
                socket,
                node_db,
                (eth_cap_provider,),
            )

        try:
            with db:
                await async_service.run_trio_service(discovery_service)
        except Exception:
            event_bus.broadcast_nowait(ShutdownRequest("Discovery ended unexpectedly"))
            raise
Example #2
0
    async def launch_sync(cls, node: Node[BasePeer],
                          strategy: BaseSyncStrategy, boot_info: BootInfo,
                          event_bus: EndpointAPI) -> None:
        await node.get_manager().wait_started()
        await strategy.sync(boot_info.args, cls.logger, node.get_chain(),
                            node.base_db, node.get_peer_pool(), event_bus,
                            node.master_cancel_token)

        if strategy.shutdown_node_on_halt:
            cls.logger.error("Sync ended unexpectedly. Shutting down trinity")
            event_bus.broadcast_nowait(
                ShutdownRequest("Sync ended unexpectedly"))
Example #3
0
def _broadcast_import_complete(
        event_bus: EndpointAPI, block: BlockAPI,
        broadcast_config: BroadcastConfig,
        future: 'asyncio.Future[BlockImportResult]') -> None:
    completed = not future.cancelled()
    event_bus.broadcast_nowait(
        StatelessBlockImportDone(
            block,
            completed,
            future.result() if completed else None,
            future.exception() if completed else None,
        ),
        broadcast_config,
    )
Example #4
0
    def on_ready(self, manager_eventbus: EndpointAPI) -> None:

        light_mode = self.boot_info.args.sync_mode == SYNC_LIGHT
        is_enabled = self.boot_info.args.tx_pool and not light_mode

        unsupported = self.boot_info.args.tx_pool and light_mode

        if is_enabled and not unsupported:
            self.start()
        elif unsupported:
            unsupported_msg = "Transaction pool not available in light mode"
            self.logger.error(unsupported_msg)
            manager_eventbus.broadcast_nowait(
                ShutdownRequest(unsupported_msg, ))
Example #5
0
async def _do_mock_response(request_type: Type[BaseEvent], response: BaseEvent,
                            event_bus: EndpointAPI,
                            ready: asyncio.Event) -> None:
    ready.set()
    async for req in event_bus.stream(request_type):
        await event_bus.broadcast(response, req.broadcast_config())
        break
Example #6
0
 def on_ready(self, manager_eventbus: EndpointAPI) -> None:
     if self.boot_info.args.disable_networkdb_component:
         self.logger.warning("Network Database disabled via CLI flag")
         # Allow this component to be disabled for extreme cases such as the
         # user swapping in an equivalent experimental version.
         return
     else:
         try:
             get_tracking_database(get_networkdb_path(self.boot_info.trinity_config))
         except BadDatabaseError as err:
             manager_eventbus.broadcast_nowait(ShutdownRequest(
                 "Error loading network database.  Trying removing database "
                 f"with `remove-network-db` command:\n{err}"
             ))
         else:
             self.start()
Example #7
0
    async def serve(self, event_bus: EndpointAPI,
                    beam_chain: BeamChain) -> None:
        """
        Listen to DoStatelessBlockImport events, and import block when received.
        Reply with StatelessBlockImportDone when import is complete.
        """

        loop = asyncio.get_event_loop()
        async for event in event_bus.stream(DoStatelessBlockImport):
            # launch in new thread, so we don't block the event loop!
            import_completion = loop.run_in_executor(
                # Maybe build the pausing chain inside the new process?
                None,
                partial_import_block(beam_chain, event.block),
            )

            # Wrapped in `asyncio.shield` because we want to hang the service from
            #   shutting down until block import is complete.
            # In the tests, for example, we await cancel() this service, so that we know
            #   that the in-progress block is complete. Then below, we do not send back
            #   the import completion (so the import server won't get triggered again).
            await asyncio.shield(import_completion)

            if self.manager.is_running:
                _broadcast_import_complete(
                    event_bus,
                    event.block,
                    event.broadcast_config(),
                    import_completion,  # type: ignore
                )
            else:
                break
Example #8
0
    async def serve(
            self,
            event_bus: EndpointAPI,
            beam_chain: BeamChain) -> None:
        """
        Listen to DoStatelessBlockPreview events, and execute the transactions to prefill
        all the needed state data.
        """
        with futures.ThreadPoolExecutor(
            max_workers=MAX_SPECULATIVE_EXECUTIONS_PER_PROCESS,
            thread_name_prefix="trinity-spec-exec-",
        ) as speculative_thread_executor:

            async for event in event_bus.stream(DoStatelessBlockPreview):
                if event.header.block_number % NUM_PREVIEW_SHARDS != self._shard_num:
                    continue

                self.logger.debug(
                    "DoStatelessBlockPreview-%d is previewing new block: %s",
                    self._shard_num,
                    event.header,
                )
                # Parallel Execution:
                # Run a complete block end-to-end
                asyncio.get_event_loop().run_in_executor(
                    # Maybe build the pausing chain inside the new process,
                    # so we can use process pool?
                    None,
                    partial_trigger_missing_state_downloads(
                        beam_chain,
                        event.header,
                        event.transactions,
                    )
                )

                # Speculative Execution:
                # Split transactions into groups by sender, and run them independently.
                # This effectively assumes that the transactions by each sender are not
                #   affected by any other transactions in the block. This is often true,
                #   so it helps speed up the search for data.
                # Being able to retrieve this predicted data in parallel, asking for more
                # trie nodes in each GetNodeData request, can help make the difference
                # between keeping up and falling behind, on the network.
                transaction_groups = groupby(attrgetter('sender'), event.transactions)
                for sender_transactions in transaction_groups.values():
                    asyncio.get_event_loop().run_in_executor(
                        speculative_thread_executor,
                        partial_speculative_execute(
                            beam_chain,
                            event.header,
                            sender_transactions,
                        )
                    )
Example #9
0
    async def _auto_connect_new_announced_endpoints(
        self,
        endpoint: EndpointAPI,
    ) -> None:
        """
        Connect the given endpoint to all new endpoints on the given stream
        """
        async for ev in endpoint.stream(AvailableEndpointsUpdated):
            # We only connect to Endpoints that appear after our own Endpoint in the set.
            # This ensures that we don't try to connect to an Endpoint while that remote
            # Endpoint also wants to connect to us.
            endpoints_to_connect_to = tuple(
                connection_config
                for index, val in enumerate(ev.available_endpoints)
                if val.name == endpoint.name
                for connection_config in ev.available_endpoints[index:]
                if not endpoint.is_connected_to(connection_config.name))
            if not endpoints_to_connect_to:
                continue

            endpoint_names = ",".join(
                (config.name for config in endpoints_to_connect_to))
            self.logger.debug(
                "EventBus Endpoint %s connecting to other Endpoints: %s",
                endpoint.name,
                endpoint_names,
            )
            try:
                await endpoint.connect_to_endpoints(*endpoints_to_connect_to)
            except Exception as e:
                self.logger.warning(
                    "Failed to connect %s to one of %s: %s",
                    endpoint.name,
                    endpoint_names,
                    e,
                )
                raise
Example #10
0
 async def run_process(self, event_bus: EndpointAPI) -> None:
     try:
         if self._boot_info.profile:
             with profiler(f'profile_{self.get_endpoint_name()}'):
                 await self.do_run(event_bus)
         else:
             await self.do_run(event_bus)
     except (trio.Cancelled, trio.MultiError):
         # These are expected, when trinity is terminating because of a Ctrl-C
         raise
     except BaseException:
         # Leaving trinity running after a component crashes can lead to unexpected
         # behavior that'd be hard to debug/reproduce, so for now we shut it down if
         # any component crashes unexpectedly.
         event_bus.broadcast_nowait(
             ShutdownRequest(f"Unexpected error in {self}"))
         # Because of an issue in the ComponentManager (see comment in
         # _cleanup_component_task), when a component crashes and requests trinity to
         # shutdown, there's still a chance its exception could be lost, so we log it
         # here as well.
         self.logger.exception(
             "Unexpected error in component %s, shutting down trinity",
             self)
         raise
Example #11
0
    async def serve(
            self,
            event_bus: EndpointAPI,
            beam_chain: BaseAsyncChain) -> None:
        """
        Listen to DoStatelessBlockImport events, and import block when received.
        Reply with StatelessBlockImportDone when import is complete.
        """

        async for event in self.wait_iter(event_bus.stream(DoStatelessBlockImport)):
            # launch in new thread, so we don't block the event loop!
            import_completion = self.get_event_loop().run_in_executor(
                # Maybe build the pausing chain inside the new process?
                None,
                partial(
                    beam_chain.import_block,
                    event.block,
                    perform_validation=True,
                ),
            )

            # Intentionally don't use .wait() below, because we want to hang the service from
            #   shutting down until block import is complete.
            # In the tests, for example, we await cancel() this service, so that we know
            #   that the in-progress block is complete. Then below, we do not send back
            #   the import completion (so the import server won't get triggered again).
            await import_completion

            if self.is_running:
                _broadcast_import_complete(  # type: ignore
                    event_bus,
                    event.block,
                    event.broadcast_config(),
                    import_completion,
                )
            else:
                break
Example #12
0
async def execute_with_retries(
        event_bus: EndpointAPI, func: Func, params: Any,
        chain: Union[AsyncChainAPI, BaseAsyncBeaconChainDB]) -> None:
    """
    If a beam sync (or anything which responds to CollectMissingAccount) is running then
    attempt to fetch missing data from it before giving up.
    """
    retryable = is_retryable(func)

    for iteration in itertools.count():
        try:
            return await func(*params)
        except MissingAccountTrieNode as exc:
            if not retryable:
                raise

            if iteration > MAX_RETRIES:
                raise Exception(
                    f"Failed to collect all necessary state after {MAX_RETRIES} attempts"
                ) from exc

            if not event_bus.is_any_endpoint_subscribed_to(
                    CollectMissingAccount):
                raise

            requested_header = await check_requested_block_age(
                chain, func, params)

            await event_bus.request(
                CollectMissingAccount(
                    exc.missing_node_hash,
                    exc.address_hash,
                    exc.state_root_hash,
                    urgent=True,
                    block_number=requested_header.block_number,
                ))
        except MissingBytecode as exc:
            if not retryable:
                raise

            if iteration > MAX_RETRIES:
                raise Exception(
                    f"Failed to collect all necessary state after {MAX_RETRIES} attempts"
                ) from exc

            if not event_bus.is_any_endpoint_subscribed_to(
                    CollectMissingBytecode):
                raise

            requested_header = await check_requested_block_age(
                chain, func, params)

            await event_bus.request(
                CollectMissingBytecode(
                    bytecode_hash=exc.missing_code_hash,
                    urgent=True,
                    block_number=requested_header.block_number,
                ))
        except MissingStorageTrieNode as exc:
            if not retryable:
                raise

            if iteration > MAX_RETRIES:
                raise Exception(
                    f"Failed to collect all necessary state after {MAX_RETRIES} attempts"
                ) from exc

            if not event_bus.is_any_endpoint_subscribed_to(
                    CollectMissingStorage):
                raise

            requested_header = await check_requested_block_age(
                chain, func, params)

            await event_bus.request(
                CollectMissingStorage(
                    missing_node_hash=exc.missing_node_hash,
                    storage_key=exc.requested_key,
                    storage_root_hash=exc.storage_root_hash,
                    account_address=exc.account_address,
                    urgent=True,
                    block_number=requested_header.block_number,
                ))
Example #13
0
 async def _handle_sync_status_requests(self,
                                        event_bus: EndpointAPI) -> None:
     async for req in self.wait_iter(event_bus.stream(SyncingRequest)):
         await event_bus.broadcast(
             SyncingResponse(*self._get_sync_status()),
             req.broadcast_config())
Example #14
0
async def _fetch_witness(
    peer: ETHPeer,
    block_hash: Hash32,
    block_number: BlockNumber,
    event_bus: EndpointAPI,
    db: DatabaseAPI,
    metrics_registry: MetricsRegistry,
    logger: ExtendedDebugLogger,
) -> Tuple[Hash32, ...]:
    """
    Fetch witness hashes for the given block from the given peer, emit a CollectMissingTrieNodes
    event to trigger the download of the trie nodes referred by them and wait for the missing
    trie nodes to arrive.

    Returns the trie node hashes for the block witness, or an empty tuple if we cannot fetch them.
    """
    block_str = f"<Block #{block_number}-0x{humanize_hash(block_hash)}>"
    try:
        logger.debug("Asking %s for witness hashes for %s", peer, block_str)
        witness_hashes = await peer.wit_api.get_block_witness_hashes(block_hash
                                                                     )
    except asyncio.TimeoutError:
        logger.debug("Timed out trying to fetch witness hashes for %s from %s",
                     block_str, peer)
        return tuple()
    except Exception as err:
        logger.warning("Error fetching witness hashes for %s from %s: %s",
                       block_str, peer, err)
        return tuple()
    else:
        if witness_hashes:
            logger.debug(
                "Got witness hashes for %s, asking BeamSyncer to fetch trie nodes",
                block_str)
            # XXX: Consider using urgent=False if the new block is more than a couple blocks ahead
            # of our tip, as otherwise when beam sync start to falls behind it may be more
            # difficult to catch up.
            urgent = True
            try:
                # These events are handled by BeamSyncer, which gets restarted whenever we pivot,
                # so we sometimes have to wait a bit before we can fire those events. And we use
                # a long timeout because we want to be sure we fetch the witness once we have the
                # node hashes for it.
                await asyncio.wait_for(
                    event_bus.wait_until_any_endpoint_subscribed_to(
                        CollectMissingTrieNodes),
                    timeout=5,
                )
            except asyncio.TimeoutError:
                logger.warning(
                    "No subscribers for CollectMissingTrieNodes, cannot fetch witness for %s",
                    block_str,
                )
                return witness_hashes
            wit_db = AsyncWitnessDB(db)
            wit_db.persist_witness_hashes(block_hash, witness_hashes)
            result = await event_bus.request(
                CollectMissingTrieNodes(witness_hashes, urgent, block_number))
            logger.debug(
                "Collected %d missing trie nodes from %s witness",
                result.num_nodes_collected,
                block_str,
            )
        else:
            logger.debug("Got empty witness hashes for %s from %s", block_str,
                         peer)
        return witness_hashes