Пример #1
0
def block_ranges(start_block: BlockNumber,
                 last_block: Optional[BlockNumber],
                 step: int = 5) -> Iterable[Tuple[BlockNumber, BlockNumber]]:
    """Returns 2-tuple ranges describing ranges of block from start_block to last_block

       Ranges do not overlap to facilitate use as ``toBlock``, ``fromBlock``
       json-rpc arguments, which are both inclusive.
    """

    if last_block is not None and start_block > last_block:
        raise TypeError("Incompatible start and stop arguments.",
                        "Start must be less than or equal to stop.")

    return ((BlockNumber(from_block), BlockNumber(to_block - 1))
            for from_block, to_block in segment_count(start_block, last_block +
                                                      1, step))
Пример #2
0
 def validate_seal(self, header: BlockHeader) -> None:
     """
     Validate the seal on the given header.
     """
     VM = self.get_vm_class_for_block_number(
         BlockNumber(header.block_number))
     VM.validate_seal(header)
Пример #3
0
    def query_eth_highest_block(self) -> BlockNumber:
        """ Attempts to query an external service for the block height

        Returns the highest blockNumber

        May Raise RemoteError if querying fails
        """

        url = 'https://api.blockcypher.com/v1/eth/main'
        log.debug('Querying blockcypher for ETH highest block', url=url)
        eth_resp: Optional[Dict[str, str]]
        try:
            eth_resp = request_get_dict(url)
        except (RemoteError, UnableToDecryptRemoteData, requests.exceptions.RequestException):
            eth_resp = None

        block_number: Optional[int]
        if eth_resp and 'height' in eth_resp:
            block_number = int(eth_resp['height'])
            log.debug('ETH highest block result', block=block_number)
        else:
            block_number = self.etherscan.get_latest_block_number()
            log.debug('ETH highest block result', block=block_number)

        return BlockNumber(block_number)
Пример #4
0
    def __init__(self, db: AtomicDatabaseAPI, peer_pool: ETHPeerPool,
                 queen_tracker: QueenTrackerAPI,
                 event_bus: EndpointAPI) -> None:
        self.logger = get_logger('trinity.sync.beam.BeamDownloader')
        self._db = db
        self._trie_db = HexaryTrie(db)
        self._event_bus = event_bus

        # Track the needed node data that is urgent and important:
        buffer_size = MAX_STATE_FETCH * REQUEST_BUFFER_MULTIPLIER
        self._node_tasks = TaskQueue[Hash32](buffer_size, lambda task: 0)

        # list of events waiting on new data
        self._new_data_event: asyncio.Event = asyncio.Event()
        self._preview_events = {}

        self._peer_pool = peer_pool

        # Track node data for upcoming blocks
        self._block_number_lookup = defaultdict(lambda: BlockNumber(0))
        self._maybe_useful_nodes = TaskQueue[Hash32](
            buffer_size,
            # Prefer trie nodes from earliest blocks
            lambda node_hash: self._block_number_lookup[node_hash],
        )

        self._num_urgent_requests_by_peer = Counter()
        self._num_predictive_requests_by_peer = Counter()

        self._queen_tracker = queen_tracker
        self._threadpool = ThreadPoolExecutor()
        asyncio.get_event_loop().set_default_executor(self._threadpool)
Пример #5
0
 def get_ancestors(self, limit: int, header: BlockHeader) -> Iterator[BaseBlock]:
     """
     Return `limit` number of ancestor blocks from the current canonical head.
     """
     lower_limit = max(header.block_number - limit, 0)
     for n in reversed(range(lower_limit, header.block_number)):
         yield self.get_canonical_block_by_number(BlockNumber(n))
Пример #6
0
 def create_header_from_parent(
         self, parent_header: BlockHeaderAPI,
         **header_params: HeaderParams) -> BlockHeaderAPI:
     return self.get_vm_class_for_block_number(
         block_number=BlockNumber(parent_header.block_number +
                                  1), ).create_header_from_parent(
                                      parent_header, **header_params)
def test_estimate_block_number_for_period():
    timestamp = maya.now().epoch
    period = timestamp // SECONDS_PER_PERIOD

    three_periods_back = period - 3
    ten_periods_back = period - 10
    latest_block_number = BlockNumber(12345678)

    now = maya.now()
    now_epoch = now.epoch
    # ensure the same time is used in method and in test
    with patch.object(maya, 'now', return_value=maya.MayaDT(epoch=now_epoch)):
        block_number_for_three_periods_back = estimate_block_number_for_period(period=three_periods_back,
                                                                               seconds_per_period=SECONDS_PER_PERIOD,
                                                                               latest_block=latest_block_number)
        block_number_for_ten_periods_back = estimate_block_number_for_period(period=ten_periods_back,
                                                                             seconds_per_period=SECONDS_PER_PERIOD,
                                                                             latest_block=latest_block_number)

    for past_period, block_number_for_past_period in ((three_periods_back, block_number_for_three_periods_back),
                                                      (ten_periods_back, block_number_for_ten_periods_back)):
        start_of_past_period = maya.MayaDT(epoch=(past_period * SECONDS_PER_PERIOD))
        diff_in_seconds = int((now - start_of_past_period).total_seconds())
        diff_in_blocks = diff_in_seconds // AVERAGE_BLOCK_TIME_IN_SECONDS

        assert block_number_for_past_period < latest_block_number
        assert block_number_for_past_period == (latest_block_number - diff_in_blocks)
Пример #8
0
 def test_eth_getBlockByNumber_earliest(
     self, web3: "Web3", empty_block: BlockData
 ) -> None:
     genesis_block = web3.eth.getBlock(BlockNumber(0))
     block = web3.eth.getBlock('earliest')
     assert block['number'] == 0
     assert block['hash'] == genesis_block['hash']
Пример #9
0
def get_discv5_topic(trinity_config: TrinityConfig, protocol: Type[Protocol]) -> bytes:
    db_manager = create_db_consumer_manager(trinity_config.database_ipc_path)

    header_db = db_manager.get_headerdb()  # type: ignore
    genesis_hash = header_db.get_canonical_block_hash(BlockNumber(GENESIS_BLOCK_NUMBER))

    return get_v5_topic(protocol, genesis_hash)
Пример #10
0
 async def _are_prerequisites_complete(self, checkpoint: BlockHeaderAPI) -> bool:
     block_numbers_to_download = range(
         checkpoint.block_number + 1,  # we already have the checkpoint, we can skip it
         checkpoint.block_number + FULL_BLOCKS_NEEDED_TO_START_BEAM,
     )
     last_header = checkpoint
     for block_int in block_numbers_to_download:
         block_num = BlockNumber(block_int)
         try:
             next_header = await self._db.coro_get_canonical_block_header_by_number(block_num)
         except HeaderNotFound:
             self.logger.debug(
                 "Checkpoint validation header at #%d, parent %s, is missing. "
                 "Downloading from peers...",
                 block_num,
                 last_header,
             )
             return False
         else:
             if next_header.parent_hash != last_header.hash:
                 self.logger.warning(
                     "Checkpoint %s is not on the local canonical chain, which has "
                     "%s following %s. Forcing the checkpoint to be canonical...",
                     checkpoint,
                     next_header,
                     last_header,
                 )
                 # re-download from checkpoint to assert that the checkpoint is canonical
                 return False
             else:
                 self.logger.debug("Validated checkpoint %s locally", next_header)
                 last_header = next_header
     else:
         # if loop never breaks, then all headers are validated.
         return True
Пример #11
0
    async def do_run(self, event_bus: EndpointAPI) -> None:
        trinity_config = self._boot_info.trinity_config
        beacon_app_config = trinity_config.get_app_config(BeaconAppConfig)
        chain_config = beacon_app_config.get_chain_config()
        base_db = DBClient.connect(trinity_config.database_ipc_path)

        # TODO: For now we use fake eth1 monitor.
        # if boot_info.args.eth1client_rpc:
        #     w3: Web3 = Web3.HTTPProvider(boot_info.args.eth1client_rpc)
        # else:
        #     w3: Web3 = None

        # TODO: For now we use fake eth1 monitor. So we load validators data from
        # interop setting and hardcode the deposit data into fake eth1 data provider.
        chain = chain_config.beacon_chain_class(base_db,
                                                chain_config.genesis_config)
        config = chain.get_state_machine().config
        key_set = load_yaml_at(
            Path(
                "eth2/beacon/scripts/quickstart_state/keygen_16_validators.yaml"
            ))
        pubkeys, privkeys, withdrawal_credentials = create_keypair_and_mock_withdraw_credentials(
            config,
            key_set  # type: ignore
        )
        initial_deposits = (create_mock_deposit_data(
            config=config,
            pubkey=pubkey,
            privkey=privkey,
            withdrawal_credentials=withdrawal_credential,
        ) for pubkey, privkey, withdrawal_credential in zip(
            pubkeys, privkeys, withdrawal_credentials))

        # Set the timestamp of start block earlier enough so that eth1 monitor
        # can query up to 2 * `ETH1_FOLLOW_DISTANCE` of blocks in the beginning.
        start_block_timestamp = (chain_config.genesis_time -
                                 3 * ETH1_FOLLOW_DISTANCE * AVERAGE_BLOCK_TIME)
        with base_db:
            fake_eth1_data_provider = FakeEth1DataProvider(
                start_block_number=START_BLOCK_NUMBER,
                start_block_timestamp=Timestamp(start_block_timestamp),
                num_deposits_per_block=NUM_DEPOSITS_PER_BLOCK,
                initial_deposits=tuple(initial_deposits),
            )

            eth1_monitor_service: Service = Eth1Monitor(
                eth1_data_provider=fake_eth1_data_provider,
                num_blocks_confirmed=NUM_BLOCKS_CONFIRMED,
                polling_period=POLLING_PERIOD,
                start_block_number=BlockNumber(START_BLOCK_NUMBER - 1),
                event_bus=event_bus,
                base_db=base_db,
            )

            try:
                await TrioManager.run_service(eth1_monitor_service)
            except Exception:
                await event_bus.broadcast(
                    ShutdownRequest("Eth1 Monitor ended unexpectedly"))
                raise
Пример #12
0
def monitoring_service_mock() -> Generator[MonitoringService, None, None]:
    web3_mock = Web3Mock()

    mock_udc = Mock(address=bytes([8] * 20))
    mock_udc.functions.effectiveBalance.return_value.call.return_value = 10000
    mock_udc.functions.token.return_value.call.return_value = to_checksum_address(
        bytes([7] * 20))
    ms = MonitoringService(
        web3=web3_mock,
        private_key=PrivateKey(
            decode_hex(
                "3a1076bf45ab87712ad64ccb3b10217737f7faacbf2872e88fdd9a537d8fe266"
            )),
        db_filename=":memory:",
        contracts={
            CONTRACT_TOKEN_NETWORK_REGISTRY: Mock(address=bytes([9] * 20)),
            CONTRACT_USER_DEPOSIT: mock_udc,
            CONTRACT_MONITORING_SERVICE: Mock(address=bytes([1] * 20)),
            CONTRACT_SERVICE_REGISTRY: Mock(address=bytes([2] * 20)),
        },
        sync_start_block=BlockNumber(0),
        required_confirmations=BlockTimeout(0),
        poll_interval=0,
    )

    yield ms
Пример #13
0
    def __init__(
        self,
        *,
        w3: Web3,
        deposit_contract_address: Address,
        deposit_contract_abi: Dict[str, Any],
        num_blocks_confirmed: int,
        polling_period: float,
        start_block_number: BlockNumber,
        event_bus: EndpointAPI,
        base_db: AtomicDatabaseAPI,
    ) -> None:
        self._w3 = w3
        self._deposit_contract = self._w3.eth.contract(
            address=deposit_contract_address, abi=deposit_contract_abi)
        self._deposit_event_abi = (
            self._deposit_contract.events.DepositEvent._get_event_abi())
        self._deposit_event_topic = encode_hex(
            event_abi_to_log_topic(self._deposit_event_abi))
        self._num_blocks_confirmed = num_blocks_confirmed
        self._polling_period = polling_period
        self._event_bus = event_bus
        self._db: BaseDepositDataDB = ListCachedDepositDataDB(
            base_db, BlockNumber(start_block_number - 1))

        self._block_timestamp_to_number = OrderedDict()
Пример #14
0
 def _get_discv5_topic(self) -> bytes:
     genesis_hash = self.headerdb.get_canonical_block_hash(
         BlockNumber(GENESIS_BLOCK_NUMBER))
     # For now DiscoveryByTopicProtocol supports a single topic, so we use the latest version
     # of our supported protocols.
     proto = self.peer_class._supported_sub_protocols[-1]
     return get_v5_topic(proto, genesis_hash)
Пример #15
0
def iter_latest_block_ranges(
    w3: "Web3",
    from_block: BlockNumber,
    to_block: Optional[Union[BlockNumber, LatestBlockParam]] = None,
) -> Iterable[Tuple[Optional[BlockNumber], Optional[BlockNumber]]]:
    """Returns an iterator unloading ranges of available blocks

    starting from `fromBlock` to the latest mined block,
    until reaching toBlock. e.g.:


    >>> blocks_to_filter = iter_latest_block_ranges(w3, 0, 50)
    >>> next(blocks_to_filter)  # latest block number = 11
    (0, 11)
    >>> next(blocks_to_filter)  # latest block number = 45
    (12, 45)
    >>> next(blocks_to_filter)  # latest block number = 50
    (46, 50)
    """
    for latest_block in iter_latest_block(w3, to_block):
        if latest_block is None:
            yield (None, None)
        elif from_block > latest_block:
            yield (None, None)
        else:
            yield (from_block, latest_block)
            from_block = BlockNumber(latest_block + 1)
Пример #16
0
def _w3_get_block(w3: Web3, *args: Any, **kwargs: Any) -> Eth1Block:
    block_dict = w3.eth.getBlock(*args, **kwargs)
    return Eth1Block(
        block_hash=Hash32(block_dict["hash"]),
        number=BlockNumber(block_dict["number"]),
        timestamp=Timestamp(block_dict["timestamp"]),
    )
Пример #17
0
    def from_genesis(cls,
                     base_db: AtomicDatabaseAPI,
                     genesis_params: Dict[str, HeaderParams],
                     genesis_state: AccountState=None) -> 'BaseChain':
        genesis_vm_class = cls.get_vm_class_for_block_number(BlockNumber(0))

        pre_genesis_header = BlockHeader(difficulty=0, block_number=-1, gas_limit=0)
        chain_context = ChainContext(cls.chain_id)
        state = genesis_vm_class.build_state(base_db, pre_genesis_header, chain_context)

        if genesis_state is None:
            genesis_state = {}

        # mutation
        apply_state_dict(state, genesis_state)
        state.persist()

        if 'state_root' not in genesis_params:
            # If the genesis state_root was not specified, use the value
            # computed from the initialized state database.
            genesis_params = assoc(genesis_params, 'state_root', state.state_root)
        elif genesis_params['state_root'] != state.state_root:
            # If the genesis state_root was specified, validate that it matches
            # the computed state from the initialized state database.
            raise ValidationError(
                "The provided genesis state root does not match the computed "
                f"genesis state root.  Got {state.state_root}.  "
                f"Expected {genesis_params['state_root']}"
            )

        genesis_header = BlockHeader(**genesis_params)
        return cls.from_genesis_header(base_db, genesis_header)
Пример #18
0
    def _get_next_gap(self) -> BlockRange:
        gaps, future_tip_block = self._db.get_chain_gaps()
        header_gaps, future_tip_header = self._db.get_header_chain_gaps()
        try:
            actionable_gap = self.get_topmost_actionable_gap(gaps, header_gaps)

        except NoActionableGap:
            # We do not have gaps in the chain of blocks but we may still have a gap from the last
            # block up until the highest consecutive written header.
            if len(header_gaps) > 0:
                # The header chain has gaps, find out the lowest missing header
                lowest_missing_header, _ = header_gaps[0]
            else:
                # It doesn't have gaps, so the future_tip_header is the lowest missing header
                lowest_missing_header = future_tip_header

            highest_consecutive_header = lowest_missing_header - 1
            if highest_consecutive_header >= future_tip_block:
                # The header before the lowest missing header is the highest consecutive header
                # that exists in the db and it is higher than the future tip block. That's a gap
                # we can try to close.
                return future_tip_block, BlockNumber(highest_consecutive_header)
            else:
                raise ValidationError("No gaps in the chain of blocks")
        else:
            return actionable_gap
Пример #19
0
    async def get_handshakers(self) -> Tuple[HandshakerAPI, ...]:
        headerdb = self.context.headerdb
        wait = self.cancel_token.cancellable_wait

        head = await wait(headerdb.coro_get_canonical_head())
        total_difficulty = await wait(headerdb.coro_get_score(head.hash))
        genesis_hash = await wait(
            headerdb.coro_get_canonical_block_hash(BlockNumber(GENESIS_BLOCK_NUMBER))
        )
        handshake_params_kwargs = dict(
            network_id=self.context.network_id,
            head_td=total_difficulty,
            head_hash=head.hash,
            head_number=head.block_number,
            genesis_hash=genesis_hash,
            serve_headers=True,
            # TODO: these should be configurable to allow us to serve this data.
            serve_chain_since=None,
            serve_state_since=None,
            serve_recent_state=None,
            serve_recent_chain=None,
            tx_relay=None,
            flow_control_bl=None,
            flow_control_mcr=None,
            flow_control_mrr=None,
            announce_type=None,
        )
        v1_handshake_params = LESHandshakeParams(version=1, **handshake_params_kwargs)
        v2_handshake_params = LESHandshakeParams(version=2, **handshake_params_kwargs)

        return (
            LESV1Handshaker(handshake_params=v1_handshake_params),
            LESV2Handshaker(handshake_params=v2_handshake_params),
        )
Пример #20
0
    def test_eth_getLogs_with_logs_topic_args(
        self,
        web3: "Web3",
        block_with_txn_with_log: BlockData,
        emitter_contract_address: ChecksumAddress,
        txn_hash_with_log: HexStr,
    ) -> None:
        def assert_contains_log(result: Sequence[LogReceipt]) -> None:
            assert len(result) == 1
            log_entry = result[0]
            assert log_entry['blockNumber'] == block_with_txn_with_log[
                'number']
            assert log_entry['blockHash'] == block_with_txn_with_log['hash']
            assert log_entry['logIndex'] == 0
            assert is_same_address(log_entry['address'],
                                   emitter_contract_address)
            assert log_entry['transactionIndex'] == 0
            assert log_entry['transactionHash'] == HexBytes(txn_hash_with_log)

        # Test with None event sig

        filter_params: FilterParams = {
            "fromBlock":
            BlockNumber(0),
            "topics": [
                None,
                HexStr(
                    '0x000000000000000000000000000000000000000000000000000000000000d431'
                )
            ],
        }

        result = web3.eth.getLogs(filter_params)
        assert_contains_log(result)

        # Test with None indexed arg
        filter_params = {
            "fromBlock":
            BlockNumber(0),
            "topics": [
                HexStr(
                    '0x057bc32826fbe161da1c110afcdcae7c109a8b69149f727fc37a603c60ef94ca'
                ), None
            ],
        }
        result = web3.eth.getLogs(filter_params)
        assert_contains_log(result)
Пример #21
0
def validate_forkid(
    forkid: ForkID,
    genesis_hash: Hash32,
    head: BlockNumber,
    fork_blocks: Tuple[BlockNumber, ...],
) -> None:
    """
    Validate the given ForkID against our current state.

    Validation rules are described at
      https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2124.md#validation-rules
    """

    fork_blocks_list = list(fork_blocks)
    checksums = [binascii.crc32(genesis_hash)]
    for block_number in fork_blocks_list:
        block_number_as_bytes = block_number.to_bytes(8, 'big')
        checksums.append(binascii.crc32(block_number_as_bytes, checksums[-1]))

    fork_blocks_list.append(BlockNumber(sys.maxsize))
    for i, block_number in enumerate(fork_blocks_list):
        if head > block_number:
            # Our head is beyound this fork, so continue. We have a dummy fork block as the last
            # item in fork_blocks to ensure this check fails eventually.
            continue

        # Found the first unpassed fork block, check if our current state matches
        # the remote checksum (rule #1).
        if _crc_to_bytes(checksums[i]) == forkid.hash:
            # Fork checksum matched, check if a remote future fork block already passed
            # locally without the local node being aware of it (rule #1a).
            if forkid.next > 0 and head >= forkid.next:
                raise LocalChainIncompatibleOrStale("rule 1a")
            # Haven't passed locally a remote-only fork, accept the connection (rule #1b).
            return

        # We're in different forks currently, check if the remote checksum is a subset of our
        # local forks (rule #2).
        for b, checksum in itertools.zip_longest(fork_blocks_list[:i],
                                                 checksums[:i]):
            if _crc_to_bytes(checksum) == forkid.hash:
                # Remote checksum is a subset, validate based on the announced next fork
                if b != forkid.next:
                    raise RemoteChainIsStale()
                return

        # Remote chain is not a subset of our local one, check if it's a superset by
        # any chance, signalling that we're simply out of sync (rule #3).
        for checksum in checksums[i:]:
            if _crc_to_bytes(checksum) == forkid.hash:
                # Remote checksum is a superset, ignore upcoming forks
                return

        # No exact, subset or superset match. We are on differing chains, reject.
        raise LocalChainIncompatibleOrStale("different chains")

    # Something is very wrong if we get here, but better to accept than reject.
    logging.getLogger('p2p').error("Impossible forkid validation for %s",
                                   forkid)
Пример #22
0
    async def _fetch_segment(self, peer: TChainPeer,
                             parent_header: BlockHeader,
                             length: int) -> Tuple[BlockHeader, ...]:
        if length > peer.max_headers_fetch:
            raise ValidationError(
                f"Can't request {length} headers, because peer maximum is {peer.max_headers_fetch}"
            )

        headers = await self._request_headers(
            peer,
            BlockNumber(parent_header.block_number + 1),
            length,
        )
        if not headers:
            return tuple()
        elif headers[0].parent_hash != parent_header.hash:
            # Segment doesn't match leading peer, drop this peer
            # Eventually, we'll do something smarter, in case the leading peer is the divergent one
            self.logger.warning(
                "%s returned segment starting %s & parent %s, doesn't match %s, ignoring result...",
                peer,
                headers[0],
                humanize_hash(headers[0].parent_hash),
                parent_header,
            )
            return tuple()
        elif len(headers) != length:
            self.logger.debug(
                "Ignoring %d headers from %s, because wanted %d",
                len(headers),
                peer,
                length,
            )
            return tuple()
        else:
            try:
                await self.wait(
                    self._chain.coro_validate_chain(
                        parent_header,
                        headers,
                        SEAL_CHECK_RANDOM_SAMPLE_RATE,
                    ))
            except ValidationError as e:
                self.logger.warning(
                    "Received invalid header segment from %s against known parent %s, "
                    ": %s",
                    peer,
                    parent_header,
                    e,
                )
                return tuple()
            else:
                # stitch headers together in order, ignoring duplicates
                self._stitcher.register_tasks(headers, ignore_duplicates=True)
                if self.sync_progress:
                    last_received_header = headers[-1]
                    self.sync_progress = self.sync_progress.update_current_block(
                        last_received_header.block_number, )
                return headers
Пример #23
0
    def _get_closest_eth1_voting_period_start_block(
            self, timestamp: Timestamp) -> BlockNumber:
        """
        Find the timestamp in `self._block_timestamp_to_number` which is the largest timestamp
        smaller than `timestamp`.
        Assume `self._block_timestamp_to_number` is in ascending order, the most naive way to find
        the timestamp is to traverse from the tail of `self._block_timestamp_to_number`.
        """
        # Compare with the largest recoreded block timestamp first before querying
        # for the latest block.
        # If timestamp larger than largest block timestamp, request block from eth1 provider.
        if (self._largest_block_timestamp is None
                or timestamp > self._largest_block_timestamp):
            try:
                block = self._eth1_data_provider.get_block("latest")
            except BlockNotFound:
                raise Eth1MonitorValidationError("Fail to get latest block")
            if block.timestamp <= timestamp:
                return block.number
            else:
                block_number = block.number
                # Try the latest `self._num_blocks_confirmed` blocks until we give up
                for i in range(1, self._num_blocks_confirmed + 1):
                    lookback_number = block_number - i
                    if lookback_number < 0:
                        break
                    else:
                        shifted_block = BlockNumber(lookback_number)
                    block = self._eth1_data_provider.get_block(shifted_block)
                    if block.timestamp <= timestamp:
                        return block.number
                raise Eth1BlockNotFound(
                    "Can not find block with timestamp closest"
                    "to voting period start timestamp: %s",
                    timestamp,
                )
        else:
            # NOTE: It can be done by binary search with web3 queries.
            # Regarding the current block number is around `9000000`, not sure if it is worthwhile
            # to do it through web3 with `log(9000000, 2)` ~= 24 `getBlock` queries.
            # It's quite expensive compared to calculating it by the cached data
            # which involves 0 query.

            # Binary search for the right-most timestamp smaller than `timestamp`.
            all_timestamps = tuple(self._block_timestamp_to_number.keys())
            target_timestamp_index = bisect.bisect_right(
                all_timestamps, timestamp)
            # Though `index < 0` should never happen, check it for safety.
            if target_timestamp_index <= 0:
                raise Eth1BlockNotFound(
                    "Failed to find the closest eth1 voting period start block to "
                    f"timestamp {timestamp}")
            else:
                # `bisect.bisect_right` returns the index we should insert `timestamp` into
                # `all_timestamps`, to make `all_timestamps` still in order. The element we are
                # looking for is actually `index - 1`
                index = target_timestamp_index - 1
                target_key = all_timestamps[index]
                return self._block_timestamp_to_number[target_key]
Пример #24
0
    async def _find_launch_headers(self, peer: TChainPeer) -> Tuple[BlockHeaderAPI, ...]:
        """
        When getting started with a peer, find exactly where the headers start differing from the
        current database of headers by requesting contiguous headers from peer. Return the first
        headers returned that are missing from the local db.

        It is possible that it will be unreasonable to find the exact starting header. For example,
        the canonical head may update while waiting for a response from the skeleton peer. In
        that case, return a *stale* header that we already know about, and there will be some
        duplicate header downloads.
        """
        newest_matching_header = await self._find_newest_matching_skeleton_header(peer)

        # This next gap will have at least one header that's new to us, because it overlaps
        # with the skeleton header that is next in the previous skeleton request, and
        # we chose the starting skeleton header so it goes past our canonical head
        start_num = BlockNumber(newest_matching_header.block_number + 1)
        launch_headers = await self._fetch_headers_from(peer, start_num, skip=0)

        if len(launch_headers) == 0:
            raise ValidationError(
                f"{peer} gave 0 headers when seeking common meat ancestors from {start_num}"
            )

        # identify headers that are not already stored locally
        completed_headers, new_headers = await skip_complete_headers(
            launch_headers, self._is_header_imported)

        if completed_headers:
            self.logger.debug(
                "During header sync launch, skipping over (%d) already stored headers %s: %s..%s",
                len(completed_headers),
                humanize_integer_sequence(h.block_number for h in completed_headers),
                completed_headers[0],
                completed_headers[-1],
            )

        if len(new_headers) == 0:
            self.logger.debug(
                "Canonical head updated while finding new head from %s, returning old %s instead",
                peer,
                launch_headers[-1],
            )
            return (launch_headers[-1], )
        else:
            try:
                launch_parent = await self._db.coro_get_block_header_by_hash(
                    new_headers[0].parent_hash)
            except HeaderNotFound as exc:
                raise ValidationError(
                    f"First header {new_headers[0]} did not have parent in DB"
                ) from exc
            # validate new headers against the parent in the database
            await self._chain.coro_validate_chain(
                launch_parent,
                new_headers,
                SEAL_CHECK_RANDOM_SAMPLE_RATE,
            )
            return new_headers
Пример #25
0
    async def get_starting_block_number(self) -> BlockNumber:
        head = await self._db.coro_get_canonical_head()

        # When we start the sync with a peer, we always request up to MAX_REORG_DEPTH extra
        # headers before our current head's number, in case there were chain reorgs since the last
        # time _sync() was called. All of the extra headers that are already present in our DB
        # will be discarded so we don't unnecessarily process them again.
        return BlockNumber(max(GENESIS_BLOCK_NUMBER, head.block_number - MAX_SKELETON_REORG_DEPTH))
Пример #26
0
def block_hashes_in_range(
        w3: "Web3", block_range: Tuple[BlockNumber,
                                       BlockNumber]) -> Iterable[Hash32]:
    from_block, to_block = block_range
    if from_block is None or to_block is None:
        return
    for block_number in range(from_block, to_block + 1):
        yield getattr(w3.eth.getBlock(BlockNumber(block_number)), "hash", None)
Пример #27
0
    async def handle(self, connection: ConnectionAPI, cmd: NewBlock) -> None:
        header = cmd.payload.block.header
        actual_td = cmd.payload.total_difficulty - header.difficulty

        if actual_td > self.head_td:
            self._head_hash = header.parent_hash
            self._head_td = actual_td
            self._head_number = BlockNumber(header.block_number - 1)
Пример #28
0
 def validate_block(self, block: BlockAPI) -> None:
     if block.is_genesis:
         raise ValidationError("Cannot validate genesis block this way")
     VM_class = self.get_vm_class_for_block_number(BlockNumber(block.number))
     parent_header = self.get_block_header_by_hash(block.header.parent_hash)
     VM_class.validate_header(block.header, parent_header, check_seal=True)
     self.validate_uncles(block)
     self.validate_gaslimit(block.header)
Пример #29
0
 def get_block(self, arg: Union[Hash32, int, str]) -> Optional[Eth1Block]:
     block_dict = self.w3.eth.getBlock(arg)
     if block_dict is None:
         raise BlockNotFound
     return Eth1Block(
         block_hash=Hash32(block_dict["hash"]),
         number=BlockNumber(block_dict["number"]),
         timestamp=Timestamp(block_dict["timestamp"]),
     )
Пример #30
0
    def test_eth_getLogs_with_logs_none_topic_args(self, web3: "Web3") -> None:
        # Test with None overflowing
        filter_params: FilterParams = {
            "fromBlock": BlockNumber(0),
            "topics": [None, None, None],
        }

        result = web3.eth.getLogs(filter_params)
        assert len(result) == 0