def __init__(self, chain: AsyncChain, db: AsyncHeaderDB, peer_pool: PeerPool, token: CancelToken = None) -> None: self.complete_token = CancelToken( 'trinity.sync.common.BaseHeaderChainSyncer.SyncCompleted') if token is None: master_service_token = self.complete_token else: master_service_token = token.chain(self.complete_token) super().__init__(master_service_token) self.chain = chain self.db = db self.peer_pool = peer_pool self._handler = PeerRequestHandler(self.db, self.logger, self.cancel_token) self._syncing = False self._sync_complete = asyncio.Event() self._sync_requests: asyncio.Queue[ HeaderRequestingPeer] = asyncio.Queue() # pending queue size should be big enough to avoid starving the processing consumers, but # small enough to avoid wasteful over-requests before post-processing can happen max_pending_headers = ETHPeer.max_headers_fetch * 8 self.header_queue = TaskQueue(max_pending_headers, attrgetter('block_number'))
def __init__(self, chain: AsyncChain, db: AsyncHeaderDB, peer_pool: PeerPool, token: CancelToken = None) -> None: super().__init__(token) self.chain = chain self.db = db self.peer_pool = peer_pool self._handler = PeerRequestHandler(self.db, self.logger, self.cancel_token) self._syncing = False self._sync_complete = asyncio.Event() self._sync_requests: asyncio.Queue[HeaderRequestingPeer] = asyncio.Queue()
def __init__(self, chaindb: AsyncChainDB, account_db: AsyncBaseDB, root_hash: bytes, peer_pool: PeerPool, token: CancelToken = None) -> None: super().__init__(token) self.chaindb = chaindb self.peer_pool = peer_pool self.root_hash = root_hash self.scheduler = StateSync(root_hash, account_db, self.logger) self._handler = PeerRequestHandler(self.chaindb, self.logger, self.cancel_token) self.request_tracker = TrieNodeRequestTracker(self._reply_timeout, self.logger) self._peer_missing_nodes: Dict[ETHPeer, Set[Hash32]] = collections.defaultdict(set)
def __init__(self, chain: AsyncChain, db: AsyncHeaderDB, peer_pool: BaseChainPeerPool, token: CancelToken = None) -> None: super().__init__(token) self.chain = chain self.db = db self.peer_pool = peer_pool self._handler = PeerRequestHandler(self.db, self.logger, self.cancel_token) self._peer_header_syncer: 'PeerHeaderSyncer' = None self._last_target_header_hash = None self._tip_monitor = self.tip_monitor_class(peer_pool, token=self.cancel_token) # pending queue size should be big enough to avoid starving the processing consumers, but # small enough to avoid wasteful over-requests before post-processing can happen max_pending_headers = ETHPeer.max_headers_fetch * 8 self.header_queue = TaskQueue(max_pending_headers, attrgetter('block_number'))
def __init__(self, chaindb: AsyncChainDB, account_db: AsyncBaseDB, root_hash: bytes, peer_pool: PeerPool, token: CancelToken = None) -> None: super().__init__(token) self.chaindb = chaindb self.peer_pool = peer_pool self.root_hash = root_hash # We use a LevelDB instance for the nodes cache because a full state download, if run # uninterrupted will visit more than 180M nodes, making an in-memory cache unfeasible. self._nodes_cache_dir = tempfile.TemporaryDirectory(prefix="pyevm-state-sync-cache") self.scheduler = StateSync( root_hash, account_db, LevelDB(cast(Path, self._nodes_cache_dir.name)), self.logger) self._handler = PeerRequestHandler(self.chaindb, self.logger, self.cancel_token) self.request_tracker = TrieNodeRequestTracker(self._reply_timeout, self.logger) self._peer_missing_nodes: Dict[ETHPeer, Set[Hash32]] = collections.defaultdict(set)