async def _display_stats(self) -> None: while self.is_operational: await self.sleep(5) self.logger.debug( "(in progress, queued, max size) of bodies, receipts: %r", [(q.num_in_progress(), len(q), q._maxsize) for q in ( self._block_body_tasks, self._receipt_tasks, )], ) stats = self.tracker.report() utcnow = int(datetime.datetime.utcnow().timestamp()) head_age = utcnow - stats.latest_head.timestamp self.logger.info( ("blks=%-4d " "txs=%-5d " "bps=%-3d " "tps=%-4d " "elapsed=%0.1f " "head=#%d %s " "age=%s"), stats.num_blocks, stats.num_transactions, stats.blocks_per_second, stats.transactions_per_second, stats.elapsed, stats.latest_head.block_number, humanize_hash(stats.latest_head.hash), humanize_elapsed(head_age), )
async def _full_skeleton_sync( self, skeleton_syncer: SkeletonSyncer[TChainPeer]) -> None: skeleton_generator = skeleton_syncer.next_skeleton_segment() try: first_segment = await skeleton_generator.__anext__() except StopAsyncIteration: self.logger.debug( "Skeleton %s was cancelled before first header was returned", skeleton_syncer.peer, ) return self.logger.debug( "Skeleton syncer asserts that parent (%s) of the first header (%s) is already present", humanize_hash(first_segment[0].parent_hash), first_segment[0], ) first_parent = await self._db.coro_get_block_header_by_hash( first_segment[0].parent_hash) try: self._stitcher.set_finished_dependency(first_parent) except DuplicateTasks: # the first header of this segment was already registered: no problem, carry on pass self._stitcher.register_tasks(first_segment, ignore_duplicates=True) previous_segment = first_segment async for segment in self.wait_iter(skeleton_generator): self._stitcher.register_tasks(segment, ignore_duplicates=True) gap_length = segment[0].block_number - previous_segment[ -1].block_number - 1 if gap_length > MAX_HEADERS_FETCH: raise ValidationError( f"Header skeleton gap of {gap_length} > {MAX_HEADERS_FETCH}" ) elif gap_length == 0: # no need to fill in when there is no gap, just verify against previous header await self.wait( self._chain.coro_validate_chain( previous_segment[-1], segment, SEAL_CHECK_RANDOM_SAMPLE_RATE, )) elif gap_length < 0: raise ValidationError( f"Invalid headers: {gap_length} gap from {previous_segment} to {segment}" ) else: # if the header filler is overloaded, this will pause await self.wait( self._meat.schedule_segment( previous_segment[-1], gap_length, skeleton_syncer.peer, )) previous_segment = segment
async def _fetch_segment(self, peer: TChainPeer, parent_header: BlockHeader, length: int) -> Tuple[BlockHeader, ...]: if length > peer.max_headers_fetch: raise ValidationError( f"Can't request {length} headers, because peer maximum is {peer.max_headers_fetch}" ) headers = await self._request_headers(peer, parent_header.block_number + 1, length) if not headers: return tuple() elif headers[0].parent_hash != parent_header.hash: # Segment doesn't match leading peer, drop this peer # Eventually, we'll do something smarter, in case the leading peer is the divergent one self.logger.warning( "%s returned segment starting %s & parent %s, doesn't match %s, ignoring result...", peer, headers[0], humanize_hash(headers[0].parent_hash), parent_header, ) return tuple() elif len(headers) != length: self.logger.debug( "Ignoring %d headers from %s, because wanted %d", len(headers), peer, length, ) return tuple() else: try: await self.wait( self._chain.coro_validate_chain( parent_header, headers, SEAL_CHECK_RANDOM_SAMPLE_RATE, )) except ValidationError as e: self.logger.warning( "Received invalid header segment from %s against known parent %s, " "disconnecting: %s", peer, parent_header, e, ) await peer.disconnect(DisconnectReason.subprotocol_error) return tuple() else: # stitch headers together in order, ignoring duplicates self._stitcher.register_tasks(headers, ignore_duplicates=True) return headers
def test_humanize_hash(hash32, expected): assert humanize_hash(hash32) == expected