async def _sync(self, peer: ETHPeer) -> None: head = await self.chaindb.coro_get_canonical_head() head_td = await self.chaindb.coro_get_score(head.hash) if peer.head_td <= head_td: self.logger.info( "Head TD (%d) announced by %s not higher than ours (%d), not syncing", peer.head_td, peer, head_td) return self.logger.info("Starting sync with %s", peer) # FIXME: Fetch a batch of headers, in reverse order, starting from our current head, and # find the common ancestor between our chain and the peer's. start_at = max(0, head.block_number - eth.MAX_HEADERS_FETCH) while not self._sync_complete.is_set(): self.logger.info("Fetching chain segment starting at #%d", start_at) peer.sub_proto.send_get_block_headers(start_at, eth.MAX_HEADERS_FETCH, reverse=False) try: headers = await wait_with_token( self._new_headers.get(), peer.wait_until_finished(), token=self.cancel_token, timeout=self._reply_timeout) except TimeoutError: self.logger.warn("Timeout waiting for header batch from %s, aborting sync", peer) await peer.stop() break if peer.is_finished(): self.logger.info("%s disconnected, aborting sync", peer) break self.logger.info("Got headers segment starting at #%d", start_at) # TODO: Process headers for consistency. head_number = await self._process_headers(peer, headers) start_at = head_number + 1
async def _sync(self, peer: ETHPeer) -> None: head = await self.chaindb.coro_get_canonical_head() head_td = await self.chaindb.coro_get_score(head.hash) if peer.head_td <= head_td: self.logger.debug( "Head TD (%d) announced by %s not higher than ours (%d), not syncing", peer.head_td, peer, head_td) return # FIXME: Fetch a batch of headers, in reverse order, starting from our current head, and # find the common ancestor between our chain and the peer's. start_at = max(0, head.block_number - eth.MAX_HEADERS_FETCH) self.logger.debug("Asking %s for header batch starting at %d", peer, start_at) peer.sub_proto.send_get_block_headers(start_at, eth.MAX_HEADERS_FETCH, reverse=False) max_consecutive_timeouts = 3 consecutive_timeouts = 0 while True: try: headers = await wait_with_token( self._new_headers.get(), peer.wait_until_finished(), token=self.cancel_token, timeout=3) except OperationCancelled: break except TimeoutError: self.logger.debug("Timeout waiting for header batch from %s", peer) consecutive_timeouts += 1 if consecutive_timeouts > max_consecutive_timeouts: self.logger.debug( "Too many consecutive timeouts waiting for header batch, aborting sync " "with %s", peer) break continue if peer.is_finished(): self.logger.debug("%s disconnected, stopping sync", peer) break consecutive_timeouts = 0 if headers[-1].block_number <= start_at: self.logger.debug( "Ignoring headers from %d to %d as they've been processed already", headers[0].block_number, headers[-1].block_number) continue # TODO: Process headers for consistency. for header in headers: await self.chaindb.coro_persist_header(header) start_at = header.block_number self._body_requests.put_nowait(headers) self._receipt_requests.put_nowait(headers) self.logger.debug("Asking %s for header batch starting at %d", peer, start_at) # TODO: Instead of requesting sequential batches from a single peer, request a header # skeleton and make concurrent requests, using as many peers as possible, to fill the # skeleton. peer.sub_proto.send_get_block_headers(start_at, eth.MAX_HEADERS_FETCH, reverse=False)
async def _sync(self, peer: ETHPeer) -> None: head = await self.chaindb.coro_get_canonical_head() head_td = await self.chaindb.coro_get_score(head.hash) if peer.head_td <= head_td: self.logger.info( "Head TD (%d) announced by %s not higher than ours (%d), not syncing", peer.head_td, peer, head_td) return # FIXME: Fetch a batch of headers, in reverse order, starting from our current head, and # find the common ancestor between our chain and the peer's. start_at = max(0, head.block_number - eth.MAX_HEADERS_FETCH) self.logger.debug("Asking %s for header batch starting at %d", peer, start_at) peer.sub_proto.send_get_block_headers(start_at, eth.MAX_HEADERS_FETCH, reverse=False) while True: # TODO: Consider stalling header fetching if there are more than X blocks/receipts # pending, to avoid timeouts caused by us not being able to process (decode/store) # blocks/receipts fast enough. try: headers = await wait_with_token(self._new_headers.get(), peer.wait_until_finished(), token=self.cancel_token, timeout=self._reply_timeout) except OperationCancelled: break except TimeoutError: self.logger.warn( "Timeout waiting for header batch from %s, aborting sync", peer) await peer.stop() break if peer.is_finished(): self.logger.info("%s disconnected, aborting sync", peer) break # TODO: Process headers for consistency. for header in headers: await self.chaindb.coro_persist_header(header) start_at = header.block_number + 1 self._body_requests.put_nowait(headers) self._receipt_requests.put_nowait(headers) self.logger.debug("Asking %s for header batch starting at %d", peer, start_at) # TODO: Instead of requesting sequential batches from a single peer, request a header # skeleton and make concurrent requests, using as many peers as possible, to fill the # skeleton. peer.sub_proto.send_get_block_headers(start_at, eth.MAX_HEADERS_FETCH, reverse=False)
async def _sync(self, peer: ETHPeer) -> None: head = await self.chaindb.coro_get_canonical_head() head_td = await self.chaindb.coro_get_score(head.hash) if peer.head_td <= head_td: self.logger.info( "Head TD (%d) announced by %s not higher than ours (%d), not syncing", peer.head_td, peer, head_td) return self.logger.info("Starting sync with %s", peer) # FIXME: Fetch a batch of headers, in reverse order, starting from our current head, and # find the common ancestor between our chain and the peer's. start_at = max(0, head.block_number - eth.MAX_HEADERS_FETCH) while True: self.logger.info("Fetching chain segment starting at #%d", start_at) peer.sub_proto.send_get_block_headers(start_at, eth.MAX_HEADERS_FETCH, reverse=False) try: headers = await wait_with_token(self._new_headers.get(), peer.wait_until_finished(), token=self.cancel_token, timeout=self._reply_timeout) except TimeoutError: self.logger.warn( "Timeout waiting for header batch from %s, aborting sync", peer) await peer.stop() break if peer.is_finished(): self.logger.info("%s disconnected, aborting sync", peer) break self.logger.info("Got headers segment starting at #%d", start_at) # TODO: Process headers for consistency. await self._download_block_parts( [header for header in headers if not _is_body_empty(header)], self.request_bodies, self._downloaded_bodies, _body_key, 'body') self.logger.info( "Got block bodies for chain segment starting at #%d", start_at) missing_receipts = [ header for header in headers if not _is_receipts_empty(header) ] # Post-Byzantium blocks may have identical receipt roots (e.g. when they have the same # number of transactions and all succeed/failed: ropsten blocks 2503212 and 2503284), # so we do this to avoid requesting the same receipts multiple times. missing_receipts = list(unique(missing_receipts, key=_receipts_key)) await self._download_block_parts(missing_receipts, self.request_receipts, self._downloaded_receipts, _receipts_key, 'receipt') self.logger.info( "Got block receipts for chain segment starting at #%d", start_at) for header in headers: await self.chaindb.coro_persist_header(header) start_at = header.block_number + 1 self.logger.info("Imported chain segment, new head: #%d", start_at - 1) head = await self.chaindb.coro_get_canonical_head() if head.hash == peer.head_hash: self.logger.info("Chain sync with %s completed", peer) self._sync_complete.set() break