Esempio n. 1
0
    async def import_block(
        self, block: BaseBlock
    ) -> Tuple[BaseBlock, Tuple[BaseBlock, ...], Tuple[BaseBlock, ...]]:
        self.logger.info("Beam importing %s (%d txns) ...", block.header,
                         len(block.transactions))

        parent_header = await self._chain.coro_get_block_header_by_hash(
            block.header.parent_hash)
        new_account_nodes, collection_time = await self._load_address_state(
            block.header,
            parent_header.state_root,
            block.transactions,
        )
        self._preloaded_account_state += new_account_nodes
        self._preloaded_account_time += collection_time

        import_timer = Timer()
        import_done = await self._event_bus.request(
            DoStatelessBlockImport(block))
        self._import_time += import_timer.elapsed

        if not import_done.completed:
            raise ValidationError(
                "Block import was cancelled, probably a shutdown")
        if import_done.exception:
            raise ValidationError(
                "Block import failed") from import_done.exception
        if import_done.block.hash != block.hash:
            raise ValidationError(
                f"Requsted {block} to be imported, but ran {import_done.block}"
            )
        self._blocks_imported += 1
        self._log_stats()
        return import_done.result
Esempio n. 2
0
    async def _import_ready_blocks(self) -> None:
        """
        Wait for block bodies to be downloaded, then import the blocks.
        """
        await self.wait(self._got_first_header.wait())
        while self.is_operational:
            timer = Timer()

            # wait for block bodies to become ready for execution
            completed_headers = await self.wait(
                self._block_import_tracker.ready_tasks())

            if self._block_import_tracker.has_ready_tasks():
                # Even after clearing out a big batch, there is no available capacity, so
                # pause any coroutines that might wait for capacity
                self._db_buffer_capacity.clear()
            else:
                # There is available capacity, let any waiting coroutines continue
                self._db_buffer_capacity.set()

            await self._import_blocks(completed_headers)

            head = await self.wait(self.db.coro_get_canonical_head())
            self.logger.info(
                "Synced chain segment with %d blocks in %.2f seconds, new head: %s",
                len(completed_headers),
                timer.elapsed,
                head,
            )
Esempio n. 3
0
    async def _import_blocks(self, headers: Tuple[BlockHeader, ...]) -> None:
        """
        Import the blocks for the corresponding headers

        :param headers: headers that have the block bodies downloaded
        """
        unimported_blocks = self._headers_to_blocks(headers)

        for block in unimported_blocks:
            timer = Timer()
            _, new_canonical_blocks, old_canonical_blocks = await self.wait(
                self._block_importer.import_block(block)
            )

            if new_canonical_blocks == (block,):
                # simple import of a single new block.
                self.logger.info("Imported block %d (%d txs) in %.2f seconds",
                                 block.number, len(block.transactions), timer.elapsed)
            elif not new_canonical_blocks:
                # imported block from a fork.
                self.logger.info("Imported non-canonical block %d (%d txs) in %.2f seconds",
                                 block.number, len(block.transactions), timer.elapsed)
            elif old_canonical_blocks:
                self.logger.info(
                    "Chain Reorganization: Imported block %d (%d txs) in %.2f "
                    "seconds, %d blocks discarded and %d new canonical blocks added",
                    block.number,
                    len(block.transactions),
                    timer.elapsed,
                    len(old_canonical_blocks),
                    len(new_canonical_blocks),
                )
            else:
                raise Exception("Invariant: unreachable code path")
Esempio n. 4
0
    async def _load_address_state(self,
                                  header: BlockHeader,
                                  parent_state_root: Hash32,
                                  transactions: Tuple[BaseTransaction, ...],
                                  urgent: bool = True) -> Tuple[int, float]:
        """
        Load all state needed to read transaction account status.
        """

        address_timer = Timer()
        num_accounts, new_account_nodes = await self._request_address_nodes(
            header,
            parent_state_root,
            transactions,
            urgent,
        )
        collection_time = address_timer.elapsed

        self.logger.debug(
            "Previewed %s state for %d addresses in %.2fs; got %d trie nodes; urgent? %r",
            header,
            num_accounts,
            collection_time,
            new_account_nodes,
            urgent,
        )

        return new_account_nodes, collection_time
Esempio n. 5
0
    async def _import_ready_blocks(self) -> None:
        """
        Wait for block bodies to be downloaded, then import the blocks.
        """
        await self.wait(self._got_first_header.wait())
        while self.is_operational:
            timer = Timer()

            # This tracker waits for all prerequisites to be complete, and returns headers in
            # order, so that each header's parent is already persisted.
            get_ready_coro = self._block_import_tracker.ready_tasks(
                BLOCK_IMPORT_QUEUE_SIZE_TARGET)
            completed_headers = await self.wait(get_ready_coro)

            if self._block_import_tracker.has_ready_tasks():
                # Even after clearing out a big batch, there is no available capacity, so
                # pause any coroutines that might wait for capacity
                self._db_buffer_capacity.clear()
            else:
                # There is available capacity, let any waiting coroutines continue
                self._db_buffer_capacity.set()

            await self._import_blocks(completed_headers)

            head = await self.wait(self.db.coro_get_canonical_head())
            self.logger.info(
                "Synced chain segment with %d blocks in %.2f seconds, new head: %s",
                len(completed_headers),
                timer.elapsed,
                head,
            )
Esempio n. 6
0
    def _import_block() -> BlockImportResult:
        t = Timer()
        beam_chain.clear_first_vm()
        try:
            reorg_info = beam_chain.import_block(block,
                                                 perform_validation=True)
        except StateUnretrievable as exc:
            import_time = t.elapsed

            vm = beam_chain.get_first_vm()
            beam_stats = vm.get_beam_stats()
            vm.logger.debug(
                "Beam pivot over %s (%d txns) because %r after %.1fs, %%exec %.0f, stats: %s",
                block.header,
                len(block.transactions),
                exc,
                import_time,
                100 * (import_time - beam_stats.data_pause_time) / import_time,
                beam_stats,
            )
            raise
        else:
            import_time = t.elapsed

            vm = beam_chain.get_first_vm()
            beam_stats = vm.get_beam_stats()
            beam_chain.logger.debug(
                "BeamImport %s (%d txns) total time: %.1f s, %%exec %.0f, stats: %s",
                block.header,
                len(block.transactions),
                import_time,
                100 * (import_time - beam_stats.data_pause_time) / import_time,
                beam_stats,
            )
            return reorg_info
Esempio n. 7
0
async def persist_headers(
    logger: ExtendedDebugLogger,
    db: BaseAsyncHeaderDB,
    syncer: BaseHeaderChainSyncer[TChainPeer],
    exit_condition: Callable[[Sequence[BlockHeaderAPI]],
                             Awaitable[bool]] = _always_false
) -> AsyncIterator[HeaderPersistInfo]:

    async for headers in syncer.new_sync_headers(HEADER_QUEUE_SIZE_TARGET):

        syncer._chain.validate_chain_extension(headers)

        timer = Timer()

        should_stop = await exit_condition(headers)
        if should_stop:
            break

        new_canon_headers, old_canon_headers = await db.coro_persist_header_chain(
            headers)

        logger.debug(
            "Header import details: %s..%s, old canon: %s..%s, new canon: %s..%s",
            headers[0],
            headers[-1],
            old_canon_headers[0] if len(old_canon_headers) else None,
            old_canon_headers[-1] if len(old_canon_headers) else None,
            new_canon_headers[0] if len(new_canon_headers) else None,
            new_canon_headers[-1] if len(new_canon_headers) else None,
        )
        yield HeaderPersistInfo(imported_headers=headers,
                                old_canon_headers=old_canon_headers,
                                new_canon_headers=new_canon_headers,
                                elapsed_time=timer.elapsed)
Esempio n. 8
0
    async def _persist_headers(self) -> None:
        async for headers in self._header_syncer.new_sync_headers(
                HEADER_QUEUE_SIZE_TARGET):

            self._header_syncer._chain.validate_chain_extension(headers)

            timer = Timer()

            exited = await self._exit_if_checkpoint(headers)
            if exited:
                break

            new_canon_headers, old_canon_headers = await self._db.coro_persist_header_chain(
                headers)

            head = await self._db.coro_get_canonical_head()

            self.logger.info(
                "Imported %d headers in %0.2f seconds, new head: %s",
                len(headers),
                timer.elapsed,
                head,
            )
            self.logger.debug(
                "Header import details: %s..%s, old canon: %s..%s, new canon: %s..%s",
                headers[0],
                headers[-1],
                old_canon_headers[0] if len(old_canon_headers) else None,
                old_canon_headers[-1] if len(old_canon_headers) else None,
                new_canon_headers[0] if len(new_canon_headers) else None,
                new_canon_headers[-1] if len(new_canon_headers) else None,
            )
Esempio n. 9
0
    def _trigger_missing_state_downloads() -> None:
        vm = beam_chain.get_vm(header)
        unused_header = header.copy(gas_used=0)

        # this won't actually save the results, but all we need to do is generate the trie requests
        t = Timer()
        try:
            _, receipts, _ = vm.apply_all_transactions(transactions,
                                                       unused_header)
        except ValidationError as exc:
            preview_time = t.elapsed
            vm.logger.debug(
                "Speculative transactions %s failed for %s after %.1fs: %s",
                transactions,
                header,
                preview_time,
                exc,
            )
        else:
            preview_time = t.elapsed

            beam_stats = vm.get_beam_stats()
            vm.logger.debug2(
                "Speculative transaction (%d/%d gas) for %s in %.1f s, %%exec %.0f, stats: %s",
                sum(r.gas_used for r in receipts),
                sum(txn.gas for txn in transactions),
                header,
                preview_time,
                100 * (preview_time - beam_stats.data_pause_time) /
                preview_time,
                beam_stats,
            )
Esempio n. 10
0
    async def import_block(self, block: BlockAPI) -> BlockImportResult:
        self.logger.debug(
            "Beam importing %s (%d txns, %s gas) ...",
            block.header,
            len(block.transactions),
            f'{block.header.gas_used:,d}',
        )

        if not isinstance(self.metrics_registry, NoopMetricsRegistry):
            wit_db = AsyncWitnessDB(self._db)
            try:
                wit_hashes = wit_db.get_witness_hashes(block.hash)
            except WitnessHashesUnavailable:
                self.logger.info(
                    "No witness hashes for block %s. Import will be slow",
                    block)
                self.metrics_registry.counter(
                    'trinity.sync/block_witness_hashes_missing').inc()
            else:
                block_witness_uncollected = self._state_downloader._get_unique_missing_hashes(
                    wit_hashes)
                self.logger.debug(
                    "Missing %d nodes out of %d from witness of block %s",
                    len(block_witness_uncollected), len(wit_hashes), block)
                if block_witness_uncollected:
                    self.metrics_registry.counter(
                        'trinity.sync/block_witness_incomplete').inc()
                else:
                    self.metrics_registry.counter(
                        'trinity.sync/block_witness_complete').inc()

        parent_header = await self._chain.coro_get_block_header_by_hash(
            block.header.parent_hash)
        new_account_nodes, collection_time = await self._load_address_state(
            block.header,
            parent_header.state_root,
            block.transactions,
        )
        self._preloaded_account_state += new_account_nodes
        self._preloaded_account_time += collection_time

        import_timer = Timer()
        import_done = await self._event_bus.request(
            DoStatelessBlockImport(block))
        self._import_time += import_timer.elapsed

        if not import_done.completed:
            raise ValidationError(
                "Block import was cancelled, probably a shutdown")
        if import_done.exception:
            raise ValidationError(
                "Block import failed") from import_done.exception
        if import_done.block.hash != block.hash:
            raise ValidationError(
                f"Requsted {block} to be imported, but ran {import_done.block}"
            )
        self._blocks_imported += 1
        self._log_stats()
        return import_done.result
Esempio n. 11
0
    async def _periodically_report_progress(self) -> None:
        try:
            # _work_queue is only defined in python 3.8 -- don't report the stat otherwise
            threadpool_queue = self._threadpool._work_queue  # type: ignore
        except AttributeError:
            threadpool_queue = None

        while self.manager.is_running:
            self._time_on_urgent = 0
            interval_timer = Timer()
            await asyncio.sleep(self._report_interval)

            if threadpool_queue:
                threadpool_queue_len = threadpool_queue.qsize()
            else:
                threadpool_queue_len = "?"

            msg = "all=%d  " % self._total_processed_nodes
            msg += "urgent=%d  " % self._urgent_processed_nodes
            # The percent of time spent in the last interval waiting on an urgent node
            #   from the queen peer:
            msg += "crit=%.0f%%  " % (100 * self._time_on_urgent /
                                      interval_timer.elapsed)
            msg += "pred=%d  " % self._predictive_processed_nodes
            msg += "all/sec=%d  " % (self._total_processed_nodes /
                                     self._timer.elapsed)
            msg += "urgent/sec=%d  " % (self._urgent_processed_nodes /
                                        self._timer.elapsed)
            msg += "urg_reqs=%d  " % (self._urgent_requests)
            msg += "pred_reqs=%d  " % (self._predictive_requests)
            msg += "timeouts=%d" % self._total_timeouts
            msg += "  u_pend=%d" % self._node_tasks.num_pending()
            msg += "  u_prog=%d" % self._node_tasks.num_in_progress()
            msg += "  p_pend=%d" % self._maybe_useful_nodes.num_pending()
            msg += "  p_prog=%d" % self._maybe_useful_nodes.num_in_progress()
            msg += "  p_wait=%d" % len(self._preview_events)
            msg += "  p_woke=%d" % self._predictive_found_nodes_woke_up
            msg += "  p_found=%d" % self._predictive_found_nodes_during_timeout
            msg += "  thread_Q=20+%s" % threadpool_queue_len
            self.logger.debug("beam-sync: %s", msg)

            self._predictive_found_nodes_woke_up = 0
            self._predictive_found_nodes_during_timeout = 0

            # log peer counts
            show_top_n_peers = 5
            self.logger.debug(
                "beam-queen-usage-top-%d: urgent=%s, predictive=%s, spread=%d, reserve_pred=%d",
                show_top_n_peers,
                [(str(peer.remote), num) for peer, num in self.
                 _num_urgent_requests_by_peer.most_common(show_top_n_peers)],
                [(str(peer.remote), num)
                 for peer, num in self._num_predictive_requests_by_peer.
                 most_common(show_top_n_peers)],
                self._spread_factor,
                self._min_predictive_peers,
            )
            self._num_urgent_requests_by_peer.clear()
            self._num_predictive_requests_by_peer.clear()
Esempio n. 12
0
    def __init__(self, head: BlockHeader) -> None:
        # The `head` from the previous time we reported stats
        self.prev_head = head
        # The latest `head` we have synced
        self.latest_head = head

        # A `Timer` object to report elapsed time between reports
        self.timer = Timer()

        # EMA of the blocks per second
        self.blocks_per_second_ema = EMA(initial_value=0, smoothing_factor=0.05)

        # EMA of the transactions per second
        self.transactions_per_second_ema = EMA(initial_value=0, smoothing_factor=0.05)

        # Number of transactions processed
        self.num_transactions = 0
Esempio n. 13
0
    async def _get_nodes_from_peer(self, peer: ETHPeer,
                                   node_hashes: Tuple[Hash32, ...],
                                   urgent_batch_id: int,
                                   urgent_node_hashes: Tuple[Hash32, ...],
                                   predictive_node_hashes: Tuple[Hash32, ...],
                                   predictive_batch_id: int) -> None:
        if urgent_batch_id is not None:
            urgent_timer = Timer()

        nodes = await self._request_nodes(peer, node_hashes)

        urgent_nodes = {
            node_hash: node
            for node_hash, node in nodes if node_hash in urgent_node_hashes
        }
        predictive_nodes = {
            node_hash: node
            for node_hash, node in nodes if node_hash in predictive_node_hashes
        }
        if len(urgent_nodes) + len(predictive_nodes) < len(nodes):
            raise ValidationError(
                f"All nodes must be either urgent or predictive")

        if len(urgent_nodes) == 0 and urgent_batch_id is not None:
            self.logger.debug("%s returned no urgent nodes from %r", peer,
                              urgent_node_hashes)

        # batch all DB writes into one, for performance
        with self._db.atomic_batch() as batch:
            for node_hash, node in nodes:
                batch[node_hash] = node

        if urgent_batch_id is not None:
            self._node_tasks.complete(urgent_batch_id,
                                      tuple(urgent_nodes.keys()))

        if predictive_batch_id is not None:
            self._maybe_useful_nodes.complete(predictive_batch_id,
                                              tuple(predictive_nodes.keys()))

        self._urgent_processed_nodes += len(urgent_nodes)
        for node_hash in predictive_nodes.keys():
            if node_hash not in urgent_node_hashes:
                self._predictive_processed_nodes += 1
        self._total_processed_nodes += len(nodes)

        if len(nodes):
            for new_data in self._new_data_events:
                new_data.set()

        if urgent_batch_id is not None:
            time_on_urgent = urgent_timer.elapsed
            self.logger.debug(
                "beam-rtt: got %d/%d urgent nodes in %.3fs with %d/%d predictive from %s (%s)",
                len(urgent_nodes), len(urgent_node_hashes), time_on_urgent,
                len(predictive_nodes), len(predictive_node_hashes),
                peer.remote, urgent_node_hashes[0][:2].hex())
            self._time_on_urgent += time_on_urgent
Esempio n. 14
0
    async def _find_urgent_nodes(
            self,
            queen: ETHPeer,
            urgent_hashes: Tuple[Hash32, ...],
            batch_id: int) -> None:

        # Generate and schedule the tasks to request the urgent node(s) from multiple peers
        knights = tuple(self._queen_tracker.pop_knights())
        urgent_requests = [
            create_task(
                self._get_nodes(peer, urgent_hashes, urgent=True),
                name=f"BeamDownloader._get_nodes({peer.remote}, ...)",
            )
            for peer in (queen,) + knights
        ]

        # Process the returned nodes, in the order they complete
        urgent_timer = Timer()
        async with cleanup_tasks(*urgent_requests):
            for result_coro in asyncio.as_completed(urgent_requests):
                nodes_returned, new_nodes, peer = await result_coro
                time_on_urgent = urgent_timer.elapsed

                # After the first peer returns something, cancel all other pending tasks
                if len(nodes_returned) > 0:
                    # Stop waiting for other peer responses
                    break
                elif peer == queen:
                    self.logger.debug("queen %s returned 0 urgent nodes of %r", peer, urgent_hashes)
                    # Wait for the next peer response

        # Log the received urgent nodes
        if peer == queen:
            log_header = "beam-queen-urgent-rtt"
        else:
            log_header = "spread-beam-urgent-rtt"
        self.logger.debug(
            "%s: got %d/%d +%d nodes in %.3fs from %s (%s)",
            log_header,
            len(nodes_returned),
            len(urgent_hashes),
            len(new_nodes),
            time_on_urgent,
            peer.remote,
            urgent_hashes[0][:2].hex(),
        )

        # Stat updates
        self._total_processed_nodes += len(new_nodes)
        self._urgent_processed_nodes += len(new_nodes)
        self._time_on_urgent += time_on_urgent

        # Complete the task in the TaskQueue
        self._node_tasks.complete(batch_id, tuple(node_hash for node_hash, _ in nodes_returned))

        # Re-insert the peers for the next request
        for knight in knights:
            self._queen_tracker.insert_peer(knight)
Esempio n. 15
0
    def _trigger_missing_state_downloads() -> None:
        vm = beam_chain.get_vm(header)
        unused_header = header.copy(gas_used=0)

        # this won't actually save the results, but all we need to do is generate the trie requests
        t = Timer()
        try:
            vm.apply_all_transactions(transactions, unused_header)
            # Making the state root can also trigger missing trie nodes, so keep it in the try block
            vm.state.make_state_root()
            # We don't need to persist trie nodes here, they are persisted by the BeamDownloader.
        except StateUnretrievable as exc:
            preview_time = t.elapsed
            beam_stats = vm.get_beam_stats()
            vm.logger.debug(
                "Beam pivot over all %d txns for %s b/c %r after %.1fs, %%exec %.0f, stats: %s",
                len(transactions),
                header,
                exc,
                preview_time,
                100 * (preview_time - beam_stats.data_pause_time) /
                preview_time,
                beam_stats,
            )
        except ValidationError as exc:
            preview_time = t.elapsed
            vm.logger.debug(
                "Preview of all %d transactions failed for %s after %.1fs: %s",
                len(transactions),
                header,
                preview_time,
                exc,
            )
        except OSError:
            vm.logger.info(
                "Got OSError while Beam Sync previewed execution of %s",
                header)
        except BrokenPipeError:
            vm.logger.info(
                "Got BrokenPipeError while Beam Sync previewed execution of %s",
                header)
            vm.logger.debug("BrokenPipeError trace for %s",
                            header,
                            exc_info=True)
        else:
            preview_time = t.elapsed

            beam_stats = vm.get_beam_stats()
            vm.logger.debug(
                "Previewed %d transactions for %s in %.1f s, %%exec %.0f, stats: %s",
                len(transactions),
                header,
                preview_time,
                100 * (preview_time - beam_stats.data_pause_time) /
                preview_time,
                beam_stats,
            )
Esempio n. 16
0
    async def _persist_headers(self) -> None:
        async for headers in self._header_syncer.new_sync_headers():
            timer = Timer()
            await self.wait(self._db.coro_persist_header_chain(headers))

            head = await self.wait(self._db.coro_get_canonical_head())
            self.logger.info(
                "Imported %d headers in %0.2f seconds, new head: %s",
                len(headers), timer.elapsed, head)
Esempio n. 17
0
class ChainSyncPerformanceTracker:
    def __init__(self, head: BlockHeader) -> None:
        # The `head` from the previous time we reported stats
        self.prev_head = head
        # The latest `head` we have synced
        self.latest_head = head

        # A `Timer` object to report elapsed time between reports
        self.timer = Timer()

        # EMA of the blocks per second
        self.blocks_per_second_ema = EMA(initial_value=0,
                                         smoothing_factor=0.05)

        # EMA of the transactions per second
        self.transactions_per_second_ema = EMA(initial_value=0,
                                               smoothing_factor=0.05)

        # Number of transactions processed
        self.num_transactions = 0

    def record_transactions(self, count: int) -> None:
        self.num_transactions += count

    def set_latest_head(self, head: BlockHeader) -> None:
        self.latest_head = head

    def report(self) -> ChainSyncStats:
        elapsed = self.timer.pop_elapsed()

        num_blocks = self.latest_head.block_number - self.prev_head.block_number
        blocks_per_second = num_blocks / elapsed
        transactions_per_second = self.num_transactions / elapsed

        self.blocks_per_second_ema.update(blocks_per_second)
        self.transactions_per_second_ema.update(transactions_per_second)

        stats = ChainSyncStats(
            prev_head=self.prev_head,
            latest_head=self.latest_head,
            elapsed=elapsed,
            num_blocks=num_blocks,
            blocks_per_second=self.blocks_per_second_ema.value,
            num_transactions=self.num_transactions,
            transactions_per_second=self.transactions_per_second_ema.value,
        )

        # reset the counters
        self.num_transactions = 0
        self.prev_head = self.latest_head

        return stats
Esempio n. 18
0
 async def _loop_monitoring_task(self, _: EndpointAPI) -> None:
     while True:
         timer = Timer()
         await asyncio.sleep(self.loop_monitoring_wakeup_interval)
         delay = timer.elapsed - self.loop_monitoring_wakeup_interval
         self.logger.debug2("Loop monitoring task called; delay=%.3fs", delay)
         if delay > self.loop_monitoring_max_delay:
             pending_tasks = len([task for task in asyncio.all_tasks() if not task.done()])
             self.logger.warning(
                 "Event loop blocked or overloaded: delay=%.3fs, tasks=%d",
                 delay,
                 pending_tasks,
             )
Esempio n. 19
0
 async def _loop_monitoring_task(self, _: EndpointAPI) -> None:
     while True:
         timer = Timer()
         await trio.sleep(self.loop_monitoring_wakeup_interval)
         delay = timer.elapsed - self.loop_monitoring_wakeup_interval
         self.logger.debug2("Loop monitoring task called; delay=%.3fs", delay)
         if delay > self.loop_monitoring_max_delay:
             stats = trio.hazmat.current_statistics()
             self.logger.warning(
                 "Event loop blocked or overloaded: delay=%.3fs, tasks=%d, stats=%s",
                 delay,
                 stats.tasks_living,
                 stats.io_statistics,
             )
Esempio n. 20
0
    async def _import_blocks(self, headers: Tuple[BlockHeader, ...]) -> None:
        """
        Import the blocks for the corresponding headers

        :param headers: headers that have the block bodies downloaded
        """
        for header in headers:
            vm_class = self.chain.get_vm_class(header)
            block_class = vm_class.get_block_class()

            if _is_body_empty(header):
                transactions: List[BaseTransaction] = []
                uncles: List[BlockHeader] = []
            else:
                body = self._pending_bodies.pop(header)
                tx_class = block_class.get_transaction_class()
                transactions = [
                    tx_class.from_base_transaction(tx)
                    for tx in body.transactions
                ]
                uncles = body.uncles

            block = block_class(header, transactions, uncles)
            timer = Timer()
            _, new_canonical_blocks, old_canonical_blocks = await self.wait(
                self.chain.coro_import_block(block, perform_validation=True))

            if new_canonical_blocks == (block, ):
                # simple import of a single new block.
                self.logger.info("Imported block %d (%d txs) in %.2f seconds",
                                 block.number, len(transactions),
                                 timer.elapsed)
            elif not new_canonical_blocks:
                # imported block from a fork.
                self.logger.info(
                    "Imported non-canonical block %d (%d txs) in %.2f seconds",
                    block.number, len(transactions), timer.elapsed)
            elif old_canonical_blocks:
                self.logger.info(
                    "Chain Reorganization: Imported block %d (%d txs) in %.2f "
                    "seconds, %d blocks discarded and %d new canonical blocks added",
                    block.number,
                    len(transactions),
                    timer.elapsed,
                    len(old_canonical_blocks),
                    len(new_canonical_blocks),
                )
            else:
                raise Exception("Invariant: unreachable code path")
Esempio n. 21
0
    async def ensure_nodes_present(self,
                                   node_hashes: Collection[Hash32],
                                   urgent: bool = True) -> int:
        """
        Wait until the nodes that are the preimages of `node_hashes` are available in the database.
        If one is not available in the first check, request it from peers.

        :param urgent: Should this node be downloaded urgently? If False, download as backfill

        Note that if your ultimate goal is an account or storage data, it's probably better to use
        download_account or download_storage. This method is useful for other
        scenarios, like bytecode lookups or intermediate node lookups.

        :return: how many nodes had to be downloaded
        """
        if urgent:
            t = Timer()
            num_nodes_found = await self._wait_for_nodes(
                node_hashes,
                self._node_tasks,
                BLOCK_IMPORT_MISSING_STATE_TIMEOUT,
            )
            # If it took to long to get a single urgent node, then increase "spread" factor
            if len(node_hashes
                   ) == 1 and t.elapsed > MAX_ACCEPTABLE_WAIT_FOR_URGENT_NODE:
                new_spread_factor = clamp(
                    0,
                    self._max_spread_beam_factor(),
                    self._spread_factor + 1,
                )
                if new_spread_factor != self._spread_factor:
                    self.logger.debug(
                        "spread-beam-update: Urgent node latency=%.3fs, update factor %d to %d",
                        t.elapsed,
                        self._spread_factor,
                        new_spread_factor,
                    )
                    self._queen_tracker.set_desired_knight_count(
                        new_spread_factor)
                    self._spread_factor = new_spread_factor
        else:
            num_nodes_found = await self._wait_for_nodes(
                node_hashes,
                self._maybe_useful_nodes,
                BLOCK_IMPORT_MISSING_STATE_TIMEOUT,
            )

        return num_nodes_found
Esempio n. 22
0
    async def _import_block(self, block: BlockAPI) -> None:
        timer = Timer()
        _, new_canonical_blocks, old_canonical_blocks = await self.wait(
            self._block_importer.import_block(block)
        )
        # how much is the imported block's header behind the current time?
        lag = time.time() - block.header.timestamp
        humanized_lag = humanize_seconds(lag)

        if new_canonical_blocks == (block,):
            # simple import of a single new block.

            # decide whether to log to info or debug, based on log rate
            if self._import_log_limiter.can_take(1):
                log_fn = self.logger.info
                self._import_log_limiter.take_nowait(1)
            else:
                log_fn = self.logger.debug
            log_fn(
                "Imported block %d (%d txs) in %.2f seconds, with %s lag",
                block.number,
                len(block.transactions),
                timer.elapsed,
                humanized_lag,
            )
        elif not new_canonical_blocks:
            # imported block from a fork.
            self.logger.info(
                "Imported non-canonical block %d (%d txs) in %.2f seconds, with %s lag",
                block.number,
                len(block.transactions),
                timer.elapsed,
                humanized_lag,
            )
        elif old_canonical_blocks:
            self.logger.info(
                "Chain Reorganization: Imported block %d (%d txs) in %.2f seconds, "
                "%d blocks discarded and %d new canonical blocks added, with %s lag",
                block.number,
                len(block.transactions),
                timer.elapsed,
                len(old_canonical_blocks),
                len(new_canonical_blocks),
                humanized_lag,
            )
        else:
            raise Exception("Invariant: unreachable code path")
Esempio n. 23
0
    def _import_block() -> None:
        t = Timer()
        beam_chain.clear_first_vm()
        reorg_info = beam_chain.import_block(block, perform_validation=True)
        import_time = t.elapsed

        vm = beam_chain.get_first_vm()
        beam_stats = vm.get_beam_stats()
        beam_chain.logger.debug(
            "BeamImport %s (%d txns) total time: %.1f s, %%exec %.0f, stats: %s",
            block.header,
            len(block.transactions),
            import_time,
            100 * (import_time - beam_stats.data_pause_time) / import_time,
            vm.get_beam_stats(),
        )
        return reorg_info
Esempio n. 24
0
    async def _persist_headers(self) -> None:
        async for headers in self._header_syncer.new_sync_headers():

            self._header_syncer._chain.validate_chain_extension(headers)

            timer = Timer()
            new_canonical_headers, _ = await self._db.coro_persist_header_chain(
                headers)

            if len(new_canonical_headers) > 0:
                head = new_canonical_headers[-1]
            else:
                head = await self._db.coro_get_canonical_head()

            self.logger.info(
                "Imported %d headers in %0.2f seconds, new head: %s",
                len(headers), timer.elapsed, head)
Esempio n. 25
0
    async def _import_blocks(self, headers: Tuple[BlockHeader, ...]) -> None:
        """
        Import the blocks for the corresponding headers

        :param headers: headers that have the block bodies downloaded
        """
        unimported_blocks = self._headers_to_blocks(headers)

        for block in unimported_blocks:
            timer = Timer()
            _, new_canonical_blocks, old_canonical_blocks = await self.wait(
                self._block_importer.import_block(block))

            if new_canonical_blocks == (block, ):
                # simple import of a single new block.

                # decide whether to log to info or debug, based on log rate
                if self._import_log_limiter.can_take(1):
                    log_fn = self.logger.info
                    self._import_log_limiter.take_nowait(1)
                else:
                    log_fn = self.logger.debug
                log_fn(
                    "Imported block %d (%d txs) in %.2f seconds, with %s lag",
                    block.number,
                    len(block.transactions),
                    timer.elapsed,
                    humanize_seconds(time.time() - block.header.timestamp),
                )
            elif not new_canonical_blocks:
                # imported block from a fork.
                self.logger.info(
                    "Imported non-canonical block %d (%d txs) in %.2f seconds",
                    block.number, len(block.transactions), timer.elapsed)
            elif old_canonical_blocks:
                self.logger.info(
                    "Chain Reorganization: Imported block %d (%d txs) in %.2f "
                    "seconds, %d blocks discarded and %d new canonical blocks added",
                    block.number,
                    len(block.transactions),
                    timer.elapsed,
                    len(old_canonical_blocks),
                    len(new_canonical_blocks),
                )
            else:
                raise Exception("Invariant: unreachable code path")
Esempio n. 26
0
    async def get_queen_peer(self) -> ETHPeer:
        """
        Wait until a queen peer is designated, then return it.
        """
        t = Timer()
        while self._queen_peer is None:
            await self._queen_updated.wait()
            self._queen_updated.clear()

        queen_starve_time = t.elapsed
        if queen_starve_time > WARN_AFTER_QUEEN_STARVED:
            self.logger.info(
                "Blocked for %.2fs waiting for queen=%s",
                queen_starve_time,
                self._queen_peer,
            )

        return self._queen_peer
Esempio n. 27
0
    def _trigger_missing_state_downloads() -> None:
        vm = beam_chain.get_vm(header)
        unused_header = header.copy(gas_used=0)

        # this won't actually save the results, but all we need to do is generate the trie requests
        t = Timer()
        vm.apply_all_transactions(transactions, unused_header)
        vm.state.make_state_root()
        preview_time = t.elapsed

        beam_stats = vm.get_beam_stats()
        vm.logger.debug(
            "Previewed %d transactions for %s in %.1f s, %%exec %.0f, stats: %s",
            len(transactions),
            header,
            preview_time,
            100 * (preview_time - beam_stats.data_pause_time) / preview_time,
            beam_stats,
        )
Esempio n. 28
0
    async def _import_ready_blocks(self) -> None:
        """
        Wait for block bodies to be downloaded, then import the blocks.
        """
        while self.is_operational:
            timer = Timer()

            # wait for block bodies to become ready for execution
            completed_headers = await self.wait(self._block_import_tracker.ready_tasks())

            await self._import_blocks(completed_headers)

            head = await self.wait(self.db.coro_get_canonical_head())
            self.logger.info(
                "Synced chain segment with %d blocks in %.2f seconds, new head: %s",
                len(completed_headers),
                timer.elapsed,
                head,
            )
Esempio n. 29
0
    async def _persist_headers(self) -> None:
        async for headers in self._header_syncer.new_sync_headers(
                HEADER_QUEUE_SIZE_TARGET):
            timer = Timer()

            exited = await self._exit_if_checkpoint(headers)
            if exited:
                break

            await self._persist_header_chain(headers)

            head = await self.wait(self._db.coro_get_canonical_head())

            self.logger.info(
                "Imported %d headers in %0.2f seconds, new head: %s",
                len(headers),
                timer.elapsed,
                head,
            )
Esempio n. 30
0
    async def _periodically_report_progress(self) -> None:
        while self.manager.is_running:
            self._time_on_urgent = 0
            interval_timer = Timer()
            await asyncio.sleep(self._report_interval)

            msg = "all=%d  " % self._total_processed_nodes
            msg += "urgent=%d  " % self._urgent_processed_nodes
            # The percent of time spent in the last interval waiting on an urgent node
            #   from the queen peer:
            msg += "crit=%.0f%%  " % (100 * self._time_on_urgent /
                                      interval_timer.elapsed)
            msg += "pred=%d  " % self._predictive_processed_nodes
            msg += "all/sec=%d  " % (self._total_processed_nodes /
                                     self._timer.elapsed)
            msg += "urgent/sec=%d  " % (self._urgent_processed_nodes /
                                        self._timer.elapsed)
            msg += "urg_reqs=%d  " % (self._urgent_requests)
            msg += "pred_reqs=%d  " % (self._predictive_requests)
            msg += "timeouts=%d" % self._total_timeouts
            msg += "  u_pend=%d" % self._node_tasks.num_pending()
            msg += "  u_prog=%d" % self._node_tasks.num_in_progress()
            msg += "  p_pend=%d" % self._maybe_useful_nodes.num_pending()
            msg += "  p_prog=%d" % self._maybe_useful_nodes.num_in_progress()
            self.logger.debug("beam-sync: %s", msg)

            # log peer counts
            show_top_n_peers = 5
            self.logger.debug(
                "beam-queen-usage-top-%d: urgent=%s, predictive=%s, spread=%d, reserve_pred=%d",
                show_top_n_peers,
                [(str(peer.remote), num) for peer, num in self.
                 _num_urgent_requests_by_peer.most_common(show_top_n_peers)],
                [(str(peer.remote), num)
                 for peer, num in self._num_predictive_requests_by_peer.
                 most_common(show_top_n_peers)],
                self._spread_factor,
                self._min_predictive_peers,
            )
            self._num_urgent_requests_by_peer.clear()
            self._num_predictive_requests_by_peer.clear()