Beispiel #1
0
        def transaction_applied_hook(
                self,
                transaction_index: int,
                transactions: Sequence[SignedTransactionAPI],
                base_header: BlockHeaderAPI,
                partial_header: BlockHeaderAPI,
                computation: ComputationAPI,
                receipt: ReceiptAPI) -> None:

            num_transactions = len(transactions)

            now = time.monotonic()
            if urgent:
                # The currently-importing block
                if transaction_index == num_transactions - 1:
                    logger = self.logger.info
                    log_header = "Beamed"
                elif now - self.last_log_time > MIN_GAS_LOG_WAIT:
                    logger = self.logger.info
                    log_header = "Beaming"
                else:
                    # Logged an update too recently, skipping...
                    return
            else:
                # Don't log anything for preview executions, for now
                return

            beam_stats = self.get_beam_stats()
            fraction_complete = partial_header.gas_used / base_header.gas_used
            if fraction_complete:
                total_est = beam_stats.data_pause_time / fraction_complete
                est_time = humanize_seconds(total_est - beam_stats.data_pause_time)
            else:
                est_time = "?"

            logger(
                "%s: #%d txn %d/%d, rtt: %.3fs, wait: %s, nodes: %d, gas: %s/%s (%.1f%%) ETA %s",
                log_header,
                base_header.block_number,
                transaction_index + 1,
                num_transactions,
                beam_stats.avg_rtt,
                humanize_seconds(beam_stats.data_pause_time),
                beam_stats.num_nodes,
                f"{partial_header.gas_used:,d}",
                f"{base_header.gas_used:,d}",
                100 * fraction_complete,
                est_time,
            )
            self.last_log_time = now
Beispiel #2
0
    async def _periodically_report_stats(self) -> None:
        while self.is_operational:
            inbound_peers = len([
                peer for peer in self.connected_nodes.values() if peer.inbound
            ])
            self.logger.info("Connected peers: %d inbound, %d outbound",
                             inbound_peers,
                             (len(self.connected_nodes) - inbound_peers))
            subscribers = len(self._subscribers)
            if subscribers:
                longest_queue = max(self._subscribers,
                                    key=operator.attrgetter('queue_size'))
                self.logger.info("Peer subscribers: %d, longest queue: %s(%d)",
                                 subscribers, longest_queue.__class__.__name__,
                                 longest_queue.queue_size)

            self.logger.debug("== Peer details == ")
            for peer in self.connected_nodes.values():
                if not peer.is_running:
                    self.logger.warning(
                        "%s is no longer alive but has not been removed from pool",
                        peer)
                    continue
                most_received_type, count = max(peer.received_msgs.items(),
                                                key=operator.itemgetter(1))
                self.logger.debug(
                    "%s: uptime=%s, received_msgs=%d, most_received=%s(%d)",
                    peer, humanize_seconds(peer.uptime),
                    peer.received_msgs_count, most_received_type, count)
                self.logger.debug("client_version_string='%s'",
                                  peer.client_version_string)
                for line in peer.get_extra_stats():
                    self.logger.debug("    %s", line)
            self.logger.debug("== End peer details == ")
            await self.sleep(self._report_interval)
Beispiel #3
0
    def generate_genesis_state(cls, get_genesis_time: Callable[[], Timestamp],
                               network_dir: Path, keymap: Dict[BLSPubkey, int],
                               clients: Tuple[Client, ...]) -> None:
        logger = cls.get_logger()
        state_machine_class = XiaoLongBaoStateMachine
        # NOTE: see https://github.com/ethereum/trinity/issues/786
        override_lengths(XiaoLongBaoStateMachine.config)

        # Since create_mock_genesis takes a long time, update the real genesis_time later
        dummy_time = Timestamp(int(time.time()))
        state, _ = create_mock_genesis(
            pubkeys=cast(
                Sequence[BLSPubkey],
                keymap.keys(),
            ),
            config=state_machine_class.config,
            keymap=keymap,
            genesis_block_class=state_machine_class.block_class,
            genesis_time=dummy_time,
        )
        genesis_time = get_genesis_time()
        logger.info(
            "Genesis time will be %s from now",
            humanize_seconds(genesis_time - int(time.time())),
        )
        state = state.copy(genesis_time=genesis_time, )
        # The output here can be trusted, so use unsafe mode for performance
        yaml = YAML(typ='unsafe')
        with open(network_dir / GENESIS_FILE, "w") as f:
            yaml.dump(to_formatted_dict(state), f)

        # Distribute genesis file to clients
        for client in clients:
            with open(client.client_dir / GENESIS_FILE, "w") as f:
                yaml.dump(to_formatted_dict(state), f)
Beispiel #4
0
    def generate_genesis_state(cls, get_genesis_time: Callable[[], Timestamp],
                               network_dir: Path, keymap: Dict[Any, Any],
                               clients: Tuple[Client, ...]) -> None:
        logger = cls.get_logger()
        state_machine_class = XiaoLongBaoStateMachine

        # Since create_mock_genesis takes a long time, update the real genesis_time later
        dummy_time = Timestamp(int(time.time()))
        state, _ = create_mock_genesis(
            num_validators=len(keymap.keys()),
            config=state_machine_class.config,
            keymap=keymap,
            genesis_block_class=state_machine_class.block_class,
            genesis_time=dummy_time,
        )
        genesis_time = get_genesis_time()
        logger.info(
            "Genesis time will be %s from now",
            humanize_seconds(genesis_time - int(time.time())),
        )
        state = state.copy(genesis_time=genesis_time, )
        # The output here can be trusted, so use unsafe mode for performance
        yaml = YAML(typ='unsafe')
        with open(network_dir / GENESIS_FILE, "w") as f:
            yaml.dump(to_formatted_dict(state), f)

        # Distribute genesis file to clients
        for client in clients:
            with open(client.client_dir / GENESIS_FILE, "w") as f:
                yaml.dump(to_formatted_dict(state), f)
Beispiel #5
0
    async def _display_stats(self) -> None:
        while self.is_operational:
            await self.sleep(5)
            self.logger.debug(
                "(in progress, queued, max size) of bodies, receipts: %r. Write capacity? %s",
                [(q.num_in_progress(), len(q), q._maxsize) for q in (
                    self._block_body_tasks,
                    self._receipt_tasks,
                )],
                "yes" if self._db_buffer_capacity.is_set() else "no",
            )

            stats = self.tracker.report()
            utcnow = int(datetime.datetime.utcnow().timestamp())
            head_age = utcnow - stats.latest_head.timestamp
            self.logger.info(
                ("blks=%-4d  "
                 "txs=%-5d  "
                 "bps=%-3d  "
                 "tps=%-4d  "
                 "elapsed=%0.1f  "
                 "head=#%d %s  "
                 "age=%s"),
                stats.num_blocks,
                stats.num_transactions,
                stats.blocks_per_second,
                stats.transactions_per_second,
                stats.elapsed,
                stats.latest_head.block_number,
                humanize_hash(stats.latest_head.hash),
                humanize_seconds(head_age),
            )
Beispiel #6
0
    def deregister_peer(self, peer: BasePeer) -> None:
        """
        At disconnection we check whether our session with the peer was long
        enough to warrant recording statistics about them in our peer database.
        """
        # prevent circular import
        from trinity.protocol.common.peer import BaseChainPeer
        peer = cast(BaseChainPeer, peer)
        if peer.disconnect_reason is None:
            # we don't care about peers that don't properly disconnect
            self.logger.debug(
                'Not tracking disconnecting peer %s[%s] missing disconnect reason',
                peer,
                'inbound' if peer.inbound else 'outbound',
            )
            return
        elif peer.uptime < MIN_QUALIFYING_UPTIME:
            # we don't register a peer who connects for less than
            # `MIN_QUALIFYING_UPTIME` as having been a successful connection.
            self.logger.debug(
                'Not tracking disconnecting peer %s[%s][%s] due to insufficient uptime (%s < %s)',
                peer,
                peer.disconnect_reason,
                'inbound' if peer.inbound else 'outbound',
                humanize_seconds(peer.uptime),
                humanize_seconds(MIN_QUALIFYING_UPTIME),
            )
            return
        else:
            self.logger.debug(
                'Tracking disconnecting peer %s[%s][%s] with uptime: %s',
                peer,
                'inbound' if peer.inbound else 'outbound',
                peer.disconnect_reason,
                humanize_seconds(peer.uptime),
            )

        self.track_peer_connection(
            remote=peer.remote,
            is_outbound=not peer.inbound,
            last_connected_at=datetime.datetime.utcnow(),
            genesis_hash=peer.genesis_hash,
            protocol=peer.sub_proto.name,
            protocol_version=peer.sub_proto.version,
            network_id=peer.network_id,
        )
Beispiel #7
0
    def __str__(self) -> str:
        avg_rtt = self.avg_rtt

        wait_time = humanize_seconds(self.data_pause_time)

        return (
            f"BeamStat: accts={self.num_accounts}, "
            f"a_nodes={self.num_account_nodes}, codes={self.num_bytecodes}, "
            f"strg={self.num_storages}, s_nodes={self.num_storage_nodes}, "
            f"nodes={self.num_nodes}, rtt={avg_rtt:.3f}s, wait={wait_time}")
Beispiel #8
0
 async def handle_blacklist_command(self) -> None:
     async for command in self.event_bus.stream(BlacklistEvent):
         self.logger.debug2(
             'Received blacklist commmand: remote: %s | timeout: %s | reason: %s',
             command.remote,
             humanize_seconds(command.timeout_seconds),
             command.reason,
         )
         self.tracker.record_blacklist(command.remote,
                                       command.timeout_seconds,
                                       command.reason)
Beispiel #9
0
    def _create_record(self, remote: NodeAPI, expires_at: datetime.datetime, reason: str) -> None:
        record = BlacklistRecord(node_id=to_hex(remote.id), expires_at=expires_at, reason=reason)
        self.session.add(record)
        # mypy doesn't know about the type of the `commit()` function
        self.session.commit()  # type: ignore

        usable_delta = expires_at - datetime.datetime.utcnow()
        self.logger.debug(
            '%s will not be retried for %s because %s',
            remote,
            humanize_seconds(usable_delta.total_seconds()),
            reason,
        )
Beispiel #10
0
    async def _import_block(self, block: BlockAPI) -> None:
        timer = Timer()
        _, new_canonical_blocks, old_canonical_blocks = await self.wait(
            self._block_importer.import_block(block)
        )
        # how much is the imported block's header behind the current time?
        lag = time.time() - block.header.timestamp
        humanized_lag = humanize_seconds(lag)

        if new_canonical_blocks == (block,):
            # simple import of a single new block.

            # decide whether to log to info or debug, based on log rate
            if self._import_log_limiter.can_take(1):
                log_fn = self.logger.info
                self._import_log_limiter.take_nowait(1)
            else:
                log_fn = self.logger.debug
            log_fn(
                "Imported block %d (%d txs) in %.2f seconds, with %s lag",
                block.number,
                len(block.transactions),
                timer.elapsed,
                humanized_lag,
            )
        elif not new_canonical_blocks:
            # imported block from a fork.
            self.logger.info(
                "Imported non-canonical block %d (%d txs) in %.2f seconds, with %s lag",
                block.number,
                len(block.transactions),
                timer.elapsed,
                humanized_lag,
            )
        elif old_canonical_blocks:
            self.logger.info(
                "Chain Reorganization: Imported block %d (%d txs) in %.2f seconds, "
                "%d blocks discarded and %d new canonical blocks added, with %s lag",
                block.number,
                len(block.transactions),
                timer.elapsed,
                len(old_canonical_blocks),
                len(new_canonical_blocks),
                humanized_lag,
            )
        else:
            raise Exception("Invariant: unreachable code path")
Beispiel #11
0
    def _create_record(self, remote: Node, timeout_seconds: int,
                       reason: str) -> None:
        uri = remote.uri()
        expires_at = datetime.datetime.utcnow() + datetime.timedelta(
            seconds=timeout_seconds)

        record = BlacklistRecord(uri=uri, expires_at=expires_at, reason=reason)
        self.session.add(record)
        # mypy doesn't know about the type of the `commit()` function
        self.session.commit()  # type: ignore

        self.logger.debug(
            '%s will not be retried for %s because %s',
            remote,
            humanize_seconds(timeout_seconds),
            reason,
        )
Beispiel #12
0
    async def _import_blocks(self, headers: Tuple[BlockHeader, ...]) -> None:
        """
        Import the blocks for the corresponding headers

        :param headers: headers that have the block bodies downloaded
        """
        unimported_blocks = self._headers_to_blocks(headers)

        for block in unimported_blocks:
            timer = Timer()
            _, new_canonical_blocks, old_canonical_blocks = await self.wait(
                self._block_importer.import_block(block))

            if new_canonical_blocks == (block, ):
                # simple import of a single new block.

                # decide whether to log to info or debug, based on log rate
                if self._import_log_limiter.can_take(1):
                    log_fn = self.logger.info
                    self._import_log_limiter.take_nowait(1)
                else:
                    log_fn = self.logger.debug
                log_fn(
                    "Imported block %d (%d txs) in %.2f seconds, with %s lag",
                    block.number,
                    len(block.transactions),
                    timer.elapsed,
                    humanize_seconds(time.time() - block.header.timestamp),
                )
            elif not new_canonical_blocks:
                # imported block from a fork.
                self.logger.info(
                    "Imported non-canonical block %d (%d txs) in %.2f seconds",
                    block.number, len(block.transactions), timer.elapsed)
            elif old_canonical_blocks:
                self.logger.info(
                    "Chain Reorganization: Imported block %d (%d txs) in %.2f "
                    "seconds, %d blocks discarded and %d new canonical blocks added",
                    block.number,
                    len(block.transactions),
                    timer.elapsed,
                    len(old_canonical_blocks),
                    len(new_canonical_blocks),
                )
            else:
                raise Exception("Invariant: unreachable code path")
Beispiel #13
0
    async def _periodically_report_stats(self) -> None:
        while self.manager.is_running:
            inbound_peers = len([
                peer for peer in self.connected_nodes.values() if peer.inbound
            ])
            self.logger.info("Connected peers: %d inbound, %d outbound",
                             inbound_peers,
                             (len(self.connected_nodes) - inbound_peers))
            subscribers = len(self._subscribers)
            if subscribers:
                longest_queue = max(self._subscribers,
                                    key=operator.attrgetter('queue_size'))
                self.logger.debug(
                    "Peer subscribers: %d, longest queue: %s(%d)", subscribers,
                    longest_queue.__class__.__name__, longest_queue.queue_size)

            self.logger.debug("== Peer details == ")
            # make a copy, because we might edit the original during iteration
            peers = tuple(self.connected_nodes.values())
            for peer in peers:
                if not peer.get_manager().is_running:
                    self.logger.debug(
                        "%s is no longer alive but had not been removed from pool",
                        peer)
                    continue
                self.logger.debug(
                    "%s: uptime=%s, received_msgs=%d",
                    peer,
                    humanize_seconds(peer.uptime),
                    peer.received_msgs_count,
                )
                self.logger.debug(
                    "client_version_string='%s'",
                    peer.safe_client_version_string,
                )
                try:
                    for line in peer.get_extra_stats():
                        self.logger.debug("    %s", line)
                except (UnknownAPI, PeerConnectionLost) as exc:
                    self.logger.debug("    Failure during stats lookup: %r",
                                      exc)

            self.log_extra_stats()
            self.logger.debug("== End peer details == ")
            await asyncio.sleep(self._report_stats_interval)
Beispiel #14
0
    async def should_connect_to(self, remote: Node) -> bool:
        try:
            record = self._get_record(remote.uri())
        except NoResultFound:
            return True

        now = datetime.datetime.utcnow()
        if now < record.expires_at:
            delta = record.expires_at - now
            self.logger.debug(
                'skipping %s, it failed because "%s" and is not usable for %s',
                remote,
                record.reason,
                humanize_seconds(delta.total_seconds()),
            )
            return False

        return True
Beispiel #15
0
    async def _periodically_report_stats(self) -> None:
        while self.is_operational:
            inbound_peers = len([
                peer for peer in self.connected_nodes.values() if peer.inbound
            ])
            self.logger.info("Connected peers: %d inbound, %d outbound",
                             inbound_peers,
                             (len(self.connected_nodes) - inbound_peers))
            subscribers = len(self._subscribers)
            if subscribers:
                longest_queue = max(self._subscribers,
                                    key=operator.attrgetter('queue_size'))
                self.logger.debug(
                    "Peer subscribers: %d, longest queue: %s(%d)", subscribers,
                    longest_queue.__class__.__name__, longest_queue.queue_size)

            self.logger.debug("== Peer details == ")
            # make a copy, because we might edit the original during iteration
            peers = tuple(self.connected_nodes.values())
            for peer in peers:
                if not peer.is_running:
                    self.logger.debug(
                        "%s is no longer alive but had not been removed from pool",
                        peer)
                    continue
                self.logger.debug(
                    "%s: uptime=%s, received_msgs=%d",
                    peer,
                    humanize_seconds(peer.uptime),
                    peer.received_msgs_count,
                )
                self.logger.debug(
                    "client_version_string='%s'",
                    peer.p2p_api.safe_client_version_string,
                )
                if not hasattr(peer, "eth_api"):
                    self.logger.warning("Huh? %s doesn't have an eth API",
                                        peer)
                for line in peer.get_extra_stats():
                    self.logger.debug("    %s", line)
            self.logger.debug("== End peer details == ")
            await self.sleep(self._report_interval)
Beispiel #16
0
    def _update_record(self, remote: NodeAPI, expires_at: datetime.datetime, reason: str) -> None:
        record = self._get_record(remote.id)

        if expires_at > record.expires_at:
            # only update expiration if it is further in the future than the existing expiration
            record.expires_at = expires_at
        record.reason = reason
        record.error_count += 1

        self.session.add(record)
        # mypy doesn't know about the type of the `commit()` function
        self.session.commit()  # type: ignore

        usable_delta = expires_at - datetime.datetime.utcnow()
        self.logger.debug(
            '%s will not be retried for %s because %s',
            remote,
            humanize_seconds(usable_delta.total_seconds()),
            reason,
        )
Beispiel #17
0
    def _update_record(self, remote: Node, timeout_seconds: int,
                       reason: str) -> None:
        uri = remote.uri()
        record = self._get_record(uri)
        record.error_count += 1

        adjusted_timeout_seconds = int(timeout_seconds *
                                       math.sqrt(record.error_count))
        record.expires_at += datetime.timedelta(
            seconds=adjusted_timeout_seconds)
        record.reason = reason
        record.error_count += 1

        self.session.add(record)
        # mypy doesn't know about the type of the `commit()` function
        self.session.commit()  # type: ignore

        self.logger.debug(
            '%s will not be retried for %s because %s',
            remote,
            humanize_seconds(adjusted_timeout_seconds),
            reason,
        )
Beispiel #18
0
    async def fulfill_prerequisites(self) -> None:
        try:
            checkpoint = await self._db.coro_get_block_header_by_hash(
                self._checkpoint.block_hash)
        except HeaderNotFound:
            pass
        else:
            self.logger.debug("Found checkpoint header %s locally", checkpoint)
            self.min_block_number = checkpoint.block_number

            if await self._are_prerequisites_complete(checkpoint):
                self.logger.debug(
                    "Found all needed checkpoint headers locally, skipping download"
                )
                return

        max_attempts = 1000

        for _attempt in range(max_attempts):
            try:
                peer = self._peer_pool.highest_td_peer
            except NoConnectedPeers:
                # Feels appropriate to wait a little longer while we aren't connected
                # to any peers yet.
                self.logger.debug(
                    "No peers are available to fulfill checkpoint prerequisites"
                )
                await asyncio.sleep(2)
                continue

            try:
                headers = await peer.chain_api.get_block_headers(
                    self._checkpoint.block_hash,
                    max_headers=FULL_BLOCKS_NEEDED_TO_START_BEAM,
                    skip=0,
                    reverse=False,
                )
            except NON_RESPONSE_FROM_PEERS as exc:
                # Nothing to do here. The ExchangeManager will disconnect if appropriate
                # and eventually lead us to a better peer.
                self.logger.debug(
                    "%s did not return checkpoint prerequisites: %r", peer,
                    exc)
                # Release the event loop so that "gone" peers don't keep getting returned here
                await asyncio.sleep(0)
                continue
            except PeerConnectionLost as exc:
                self.logger.debug(
                    "%s gone during checkpoint prerequisite request: %s", peer,
                    exc)
                # Wait until peer is fully disconnected before continuing, so we don't reattempt
                # with the same peer repeatedly.
                await peer.disconnect(DisconnectReason.DISCONNECT_REQUESTED)
                continue

            if not headers:
                self.logger.debug(
                    "Disconnecting from %s. Returned no header while resolving checkpoint",
                    peer)
                await peer.disconnect(DisconnectReason.USELESS_PEER)
            else:
                header = headers[0]

                distance_shortage = FULL_BLOCKS_NEEDED_TO_START_BEAM - len(
                    headers)
                if distance_shortage > 0:

                    if len(headers) == 1:
                        # We are exactly at the tip, spin another round so we can make a better ETA
                        self.logger.info(
                            "Checkpoint is too near the chain tip for Beam Sync to launch. "
                            "Beam Sync needs %d more headers to launch. Instead of waiting, "
                            "you can quit and restart with an older checkpoint.",
                            distance_shortage,
                        )
                        await asyncio.sleep(10)
                        continue

                    block_durations = tuple(current.timestamp -
                                            previous.timestamp
                                            for previous, current in zip(
                                                headers[:-1], headers[1:]))

                    avg_blocktime = sum(block_durations) / len(block_durations)
                    wait_seconds = distance_shortage * avg_blocktime

                    self.logger.info(
                        "Checkpoint is too near the chain tip for Beam Sync to launch. "
                        "Beam Sync needs %d more headers to launch. Instead of waiting, "
                        "you can quit and restart with an older checkpoint."
                        "The wait time is roughly %s.",
                        distance_shortage,
                        humanize_seconds(wait_seconds),
                    )

                    await asyncio.sleep(
                        min(wait_seconds, self.MAXIMUM_RETRY_SLEEP))
                    continue

                self.min_block_number = header.block_number
                await self._db.coro_persist_checkpoint_header(
                    header, self._checkpoint.score)

                self.logger.debug(
                    "Successfully fulfilled checkpoint prereqs with %s: %s",
                    peer,
                    header,
                )
                return

            await asyncio.sleep(0.05)

        raise asyncio.TimeoutError(
            f"Failed to get checkpoint header within {max_attempts} attempts")
Beispiel #19
0
    async def _periodically_advertise_content(self) -> None:
        await self._network.routing_table_ready()

        send_channel, receive_channel = trio.open_memory_channel[ContentKey](
            self._concurrency)

        for _ in range(self._concurrency):
            self.manager.run_daemon_task(self._broadcast_worker,
                                         receive_channel)

        async for _ in every(30 * 60):
            start_at = trio.current_time()

            total_keys = len(self.content_storage)
            if not total_keys:
                continue

            first_key = first(
                self.content_storage.iter_closest(
                    NodeID(secrets.token_bytes(32))))

            self.logger.info(
                "content-processing-starting: total=%d  start=%s",
                total_keys,
                first_key.hex(),
            )

            processed_keys = 0

            last_key = first_key
            has_wrapped_around = False

            while self.manager.is_running:
                elapsed = trio.current_time() - start_at
                content_keys = tuple(
                    take(
                        self._concurrency * 2,
                        self.content_storage.enumerate_keys(
                            start_key=last_key),
                    ))

                # TODO: We need to adjust the
                # `ContentStorageAPI.enumerate_keys` to allow a
                # non-inclusive left bound so we can query all the keys
                # **after** the last key we processed.
                if content_keys and content_keys[0] == last_key:
                    content_keys = content_keys[1:]

                if not content_keys:
                    last_key = None
                    has_wrapped_around = True
                    continue

                for content_key in content_keys:
                    await send_channel.send(content_key)

                last_key = content_keys[-1]
                if has_wrapped_around and last_key >= first_key:
                    break

                processed_keys += len(content_keys)
                progress = processed_keys * 100 / total_keys

                self.logger.debug(
                    "content-processing: progress=%0.1f  processed=%d  "
                    "total=%d  at=%s  elapsed=%s",
                    progress,
                    processed_keys,
                    total_keys,
                    "None" if last_key is None else last_key.hex(),
                    humanize_seconds(int(elapsed)),
                )

            self.logger.info(
                "content-processing-finished: processed=%d/%d  elapsed=%s",
                processed_keys,
                total_keys,
                humanize_seconds(int(elapsed)),
            )
Beispiel #20
0
def test_humanize_seconds(seconds, expected):
    actual = humanize_seconds(seconds)
    assert actual == expected