Пример #1
0
    async def _find_launch_headers(self, peer: TChainPeer) -> Tuple[BlockHeaderAPI, ...]:
        """
        When getting started with a peer, find exactly where the headers start differing from the
        current database of headers by requesting contiguous headers from peer. Return the first
        headers returned that are missing from the local db.

        It is possible that it will be unreasonable to find the exact starting header. For example,
        the canonical head may update while waiting for a response from the skeleton peer. In
        that case, return a *stale* header that we already know about, and there will be some
        duplicate header downloads.
        """
        newest_matching_header = await self._find_newest_matching_skeleton_header(peer)

        # This next gap will have at least one header that's new to us, because it overlaps
        # with the skeleton header that is next in the previous skeleton request, and
        # we chose the starting skeleton header so it goes past our canonical head
        start_num = BlockNumber(newest_matching_header.block_number + 1)
        launch_headers = await self._fetch_headers_from(peer, start_num, skip=0)

        if len(launch_headers) == 0:
            raise ValidationError(
                f"{peer} gave 0 headers when seeking common meat ancestors from {start_num}"
            )

        # identify headers that are not already stored locally
        completed_headers, new_headers = await skip_complete_headers(
            launch_headers, self._is_header_imported)

        if completed_headers:
            self.logger.debug(
                "During header sync launch, skipping over (%d) already stored headers %s: %s..%s",
                len(completed_headers),
                humanize_integer_sequence(h.block_number for h in completed_headers),
                completed_headers[0],
                completed_headers[-1],
            )

        if len(new_headers) == 0:
            self.logger.debug(
                "Canonical head updated while finding new head from %s, returning old %s instead",
                peer,
                launch_headers[-1],
            )
            return (launch_headers[-1], )
        else:
            try:
                launch_parent = await self._db.coro_get_block_header_by_hash(
                    new_headers[0].parent_hash)
            except HeaderNotFound as exc:
                raise ValidationError(
                    f"First header {new_headers[0]} did not have parent in DB"
                ) from exc
            # validate new headers against the parent in the database
            await self._chain.coro_validate_chain(
                launch_parent,
                new_headers,
                SEAL_CHECK_RANDOM_SAMPLE_RATE,
            )
            return new_headers
Пример #2
0
def validate_forkid(
    forkid: ForkID,
    genesis_hash: Hash32,
    head: BlockNumber,
    fork_blocks: Tuple[BlockNumber, ...],
) -> None:
    """
    Validate the given ForkID against our current state.

    Validation rules are described at
      https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2124.md#validation-rules
    """

    fork_blocks_list = list(fork_blocks)
    checksums = [binascii.crc32(genesis_hash)]
    for block_number in fork_blocks_list:
        block_number_as_bytes = block_number.to_bytes(8, 'big')
        checksums.append(binascii.crc32(block_number_as_bytes, checksums[-1]))

    fork_blocks_list.append(BlockNumber(sys.maxsize))
    for i, block_number in enumerate(fork_blocks_list):
        if head > block_number:
            # Our head is beyound this fork, so continue. We have a dummy fork block as the last
            # item in fork_blocks to ensure this check fails eventually.
            continue

        # Found the first unpassed fork block, check if our current state matches
        # the remote checksum (rule #1).
        if _crc_to_bytes(checksums[i]) == forkid.hash:
            # Fork checksum matched, check if a remote future fork block already passed
            # locally without the local node being aware of it (rule #1a).
            if forkid.next > 0 and head >= forkid.next:
                raise LocalChainIncompatibleOrStale("rule 1a")
            # Haven't passed locally a remote-only fork, accept the connection (rule #1b).
            return

        # We're in different forks currently, check if the remote checksum is a subset of our
        # local forks (rule #2).
        for b, checksum in itertools.zip_longest(fork_blocks_list[:i],
                                                 checksums[:i]):
            if _crc_to_bytes(checksum) == forkid.hash:
                # Remote checksum is a subset, validate based on the announced next fork
                if b != forkid.next:
                    raise RemoteChainIsStale()
                return

        # Remote chain is not a subset of our local one, check if it's a superset by
        # any chance, signalling that we're simply out of sync (rule #3).
        for checksum in checksums[i:]:
            if _crc_to_bytes(checksum) == forkid.hash:
                # Remote checksum is a superset, ignore upcoming forks
                return

        # No exact, subset or superset match. We are on differing chains, reject.
        raise LocalChainIncompatibleOrStale("different chains")

    # Something is very wrong if we get here, but better to accept than reject.
    logging.getLogger('p2p').error("Impossible forkid validation for %s",
                                   forkid)
Пример #3
0
    def _get_closest_eth1_voting_period_start_block(
            self, timestamp: Timestamp) -> BlockNumber:
        """
        Find the timestamp in `self._block_timestamp_to_number` which is the largest timestamp
        smaller than `timestamp`.
        Assume `self._block_timestamp_to_number` is in ascending order, the most naive way to find
        the timestamp is to traverse from the tail of `self._block_timestamp_to_number`.
        """
        # Compare with the largest recoreded block timestamp first before querying
        # for the latest block.
        # If timestamp larger than largest block timestamp, request block from eth1 provider.
        if (self._largest_block_timestamp is None
                or timestamp > self._largest_block_timestamp):
            try:
                block = self._eth1_data_provider.get_block("latest")
            except BlockNotFound:
                raise Eth1MonitorValidationError("Fail to get latest block")
            if block.timestamp <= timestamp:
                return block.number
            else:
                block_number = block.number
                # Try the latest `self._num_blocks_confirmed` blocks until we give up
                for i in range(1, self._num_blocks_confirmed + 1):
                    lookback_number = block_number - i
                    if lookback_number < 0:
                        break
                    else:
                        shifted_block = BlockNumber(lookback_number)
                    block = self._eth1_data_provider.get_block(shifted_block)
                    if block.timestamp <= timestamp:
                        return block.number
                raise Eth1BlockNotFound(
                    "Can not find block with timestamp closest"
                    "to voting period start timestamp: %s",
                    timestamp,
                )
        else:
            # NOTE: It can be done by binary search with web3 queries.
            # Regarding the current block number is around `9000000`, not sure if it is worthwhile
            # to do it through web3 with `log(9000000, 2)` ~= 24 `getBlock` queries.
            # It's quite expensive compared to calculating it by the cached data
            # which involves 0 query.

            # Binary search for the right-most timestamp smaller than `timestamp`.
            all_timestamps = tuple(self._block_timestamp_to_number.keys())
            target_timestamp_index = bisect.bisect_right(
                all_timestamps, timestamp)
            # Though `index < 0` should never happen, check it for safety.
            if target_timestamp_index <= 0:
                raise Eth1BlockNotFound(
                    "Failed to find the closest eth1 voting period start block to "
                    f"timestamp {timestamp}")
            else:
                # `bisect.bisect_right` returns the index we should insert `timestamp` into
                # `all_timestamps`, to make `all_timestamps` still in order. The element we are
                # looking for is actually `index - 1`
                index = target_timestamp_index - 1
                target_key = all_timestamps[index]
                return self._block_timestamp_to_number[target_key]
Пример #4
0
    async def _fetch_segment(self, peer: TChainPeer,
                             parent_header: BlockHeader,
                             length: int) -> Tuple[BlockHeader, ...]:
        if length > peer.max_headers_fetch:
            raise ValidationError(
                f"Can't request {length} headers, because peer maximum is {peer.max_headers_fetch}"
            )

        headers = await self._request_headers(
            peer,
            BlockNumber(parent_header.block_number + 1),
            length,
        )
        if not headers:
            return tuple()
        elif headers[0].parent_hash != parent_header.hash:
            # Segment doesn't match leading peer, drop this peer
            # Eventually, we'll do something smarter, in case the leading peer is the divergent one
            self.logger.warning(
                "%s returned segment starting %s & parent %s, doesn't match %s, ignoring result...",
                peer,
                headers[0],
                humanize_hash(headers[0].parent_hash),
                parent_header,
            )
            return tuple()
        elif len(headers) != length:
            self.logger.debug(
                "Ignoring %d headers from %s, because wanted %d",
                len(headers),
                peer,
                length,
            )
            return tuple()
        else:
            try:
                await self.wait(
                    self._chain.coro_validate_chain(
                        parent_header,
                        headers,
                        SEAL_CHECK_RANDOM_SAMPLE_RATE,
                    ))
            except ValidationError as e:
                self.logger.warning(
                    "Received invalid header segment from %s against known parent %s, "
                    ": %s",
                    peer,
                    parent_header,
                    e,
                )
                return tuple()
            else:
                # stitch headers together in order, ignoring duplicates
                self._stitcher.register_tasks(headers, ignore_duplicates=True)
                if self.sync_progress:
                    last_received_header = headers[-1]
                    self.sync_progress = self.sync_progress.update_current_block(
                        last_received_header.block_number, )
                return headers
Пример #5
0
 def get_ancestors(self, limit: int,
                   header: BlockHeader) -> Iterator[BaseBlock]:
     """
     Return `limit` number of ancestor blocks from the current canonical head.
     """
     lower_limit = max(header.block_number - limit, 0)
     for n in reversed(range(lower_limit, header.block_number)):
         yield self.get_canonical_block_by_number(BlockNumber(n))
Пример #6
0
def block_hashes_in_range(
        w3: "Web3", block_range: Tuple[BlockNumber,
                                       BlockNumber]) -> Iterable[Hash32]:
    from_block, to_block = block_range
    if from_block is None or to_block is None:
        return
    for block_number in range(from_block, to_block + 1):
        yield getattr(w3.eth.getBlock(BlockNumber(block_number)), "hash", None)
Пример #7
0
    async def handle(self, connection: ConnectionAPI, cmd: NewBlock) -> None:
        header = cmd.payload.block.header
        actual_td = cmd.payload.total_difficulty - header.difficulty

        if actual_td > self.head_td:
            self._head_hash = header.parent_hash
            self._head_td = actual_td
            self._head_number = BlockNumber(header.block_number - 1)
Пример #8
0
 def validate_block(self, block: BlockAPI) -> None:
     if block.is_genesis:
         raise ValidationError("Cannot validate genesis block this way")
     VM_class = self.get_vm_class_for_block_number(BlockNumber(block.number))
     parent_header = self.get_block_header_by_hash(block.header.parent_hash)
     VM_class.validate_header(block.header, parent_header, check_seal=True)
     self.validate_uncles(block)
     self.validate_gaslimit(block.header)
Пример #9
0
    async def get_starting_block_number(self) -> BlockNumber:
        head = await self._db.coro_get_canonical_head()

        # When we start the sync with a peer, we always request up to MAX_REORG_DEPTH extra
        # headers before our current head's number, in case there were chain reorgs since the last
        # time _sync() was called. All of the extra headers that are already present in our DB
        # will be discarded so we don't unnecessarily process them again.
        return BlockNumber(max(GENESIS_BLOCK_NUMBER, head.block_number - MAX_SKELETON_REORG_DEPTH))
Пример #10
0
 def _get_discv5_topic(self) -> bytes:
     genesis_hash = self.headerdb.get_canonical_block_hash(
         BlockNumber(GENESIS_BLOCK_NUMBER))
     # For now DiscoveryByTopicProtocol supports a single topic, so we use the latest version
     # of our supported protocols.
     proto = self.peer_pool.peer_factory_class.peer_class._supported_sub_protocols[
         -1]
     return get_v5_topic(proto, genesis_hash)
Пример #11
0
def get_discv5_topic(trinity_config: TrinityConfig, protocol: Type[Protocol]) -> bytes:
    db_manager = create_db_manager(trinity_config.database_ipc_path)
    db_manager.connect()

    header_db = db_manager.get_headerdb()  # type: ignore
    genesis_hash = header_db.get_canonical_block_hash(BlockNumber(GENESIS_BLOCK_NUMBER))

    return get_v5_topic(protocol, genesis_hash)
Пример #12
0
    async def do_run(self, event_bus: EndpointAPI) -> None:
        trinity_config = self._boot_info.trinity_config
        beacon_app_config = trinity_config.get_app_config(BeaconAppConfig)
        chain_config = beacon_app_config.get_chain_config()
        genesis_state = chain_config._genesis_state
        base_db = DBClient.connect(trinity_config.database_ipc_path)

        # TODO: For now we use fake eth1 monitor.
        # if boot_info.args.eth1client_rpc:
        #     w3: Web3 = Web3.HTTPProvider(boot_info.args.eth1client_rpc)
        # else:
        #     w3: Web3 = None

        # TODO: For now we use fake eth1 monitor. So we load validators data from
        # interop setting and hardcode the deposit data into fake eth1 data provider.
        chain = chain_config.beacon_chain_class.from_genesis(base_db, genesis_state)
        config = chain.get_state_machine().config
        key_set = load_yaml_at(
            Path("eth2/beacon/scripts/quickstart_state/keygen_16_validators.yaml")
        )
        pubkeys, privkeys, withdrawal_credentials = create_keypair_and_mock_withdraw_credentials(
            config, key_set  # type: ignore
        )
        initial_deposits = (
            create_mock_deposit_data(
                config=config,
                pubkey=pubkey,
                privkey=privkey,
                withdrawal_credentials=withdrawal_credential,
            )
            for pubkey, privkey, withdrawal_credential in zip(
                pubkeys, privkeys, withdrawal_credentials
            )
        )

        # Set the timestamp of start block earlier enough so that eth1 monitor
        # can query up to 2 * `ETH1_FOLLOW_DISTANCE` of blocks in the beginning.
        start_block_timestamp = (
            chain_config.genesis_time - 3 * ETH1_FOLLOW_DISTANCE * AVERAGE_BLOCK_TIME
        )
        with base_db:
            fake_eth1_data_provider = FakeEth1DataProvider(
                start_block_number=START_BLOCK_NUMBER,
                start_block_timestamp=Timestamp(start_block_timestamp),
                num_deposits_per_block=NUM_DEPOSITS_PER_BLOCK,
                initial_deposits=tuple(initial_deposits),
            )

            eth1_monitor_service: Service = Eth1Monitor(
                eth1_data_provider=fake_eth1_data_provider,
                num_blocks_confirmed=NUM_BLOCKS_CONFIRMED,
                polling_period=POLLING_PERIOD,
                start_block_number=BlockNumber(START_BLOCK_NUMBER - 1),
                event_bus=event_bus,
                base_db=base_db,
            )

            await TrioManager.run_service(eth1_monitor_service)
Пример #13
0
 async def _new_blocks(self) -> AsyncGenerator[Eth1Block, None]:
     """
     Keep polling latest blocks, and yield the blocks whose number is
     `latest_block.number - self._num_blocks_confirmed`.
     """
     highest_processed_block_number = BlockNumber(self._start_block_number -
                                                  1)
     while True:
         block = _w3_get_block(self._w3, "latest")
         target_block_number = BlockNumber(block.number -
                                           self._num_blocks_confirmed)
         if target_block_number > highest_processed_block_number:
             # From `highest_processed_block_number` to `target_block_number`
             for block_number in range(highest_processed_block_number + 1,
                                       target_block_number + 1):
                 yield _w3_get_block(self._w3, block_number)
             highest_processed_block_number = target_block_number
         await trio.sleep(self._polling_period)
Пример #14
0
async def get_header(chain: BaseAsyncChain, at_block: Union[str, int]) -> BlockHeader:
    if at_block == 'pending':
        raise NotImplementedError("RPC interface does not support the 'pending' block at this time")
    elif at_block == 'latest':
        at_header = chain.get_canonical_head()
    elif at_block == 'earliest':
        # TODO find if genesis block can be non-zero. Why does 'earliest' option even exist?
        block = await chain.coro_get_canonical_block_by_number(BlockNumber(0))
        at_header = block.header
    # mypy doesn't have user defined type guards yet
    # https://github.com/python/mypy/issues/5206
    elif is_integer(at_block) and at_block >= 0:  # type: ignore
        block = await chain.coro_get_canonical_block_by_number(BlockNumber(0))
        at_header = block.header
    else:
        raise TypeError("Unrecognized block reference: %r" % at_block)

    return at_header
Пример #15
0
def get_discv5_topic(trinity_config: TrinityConfig,
                     protocol: Type[Protocol]) -> bytes:
    db = DBClient.connect(trinity_config.database_ipc_path)

    header_db = HeaderDB(db)
    genesis_hash = header_db.get_canonical_block_hash(
        BlockNumber(GENESIS_BLOCK_NUMBER))

    return get_v5_topic(protocol, genesis_hash)
Пример #16
0
def block_ranges(
    start_block: BlockNumber, last_block: Optional[BlockNumber], step: int = 5
) -> Iterable[Tuple[BlockNumber, BlockNumber]]:
    """Returns 2-tuple ranges describing ranges of block from start_block to last_block

       Ranges do not overlap to facilitate use as ``toBlock``, ``fromBlock``
       json-rpc arguments, which are both inclusive.
    """
    if last_block is not None and start_block > last_block:
        raise TypeError(
            "Incompatible start and stop arguments.",
            "Start must be less than or equal to stop.")

    return (
        (BlockNumber(from_block), BlockNumber(to_block - 1))
        for from_block, to_block
        in segment_count(start_block, last_block + 1, step)
    )
Пример #17
0
 async def request_stuff() -> None:
     nonlocal peer_pool
     # Request some stuff from ropsten's block 2440319
     # (https://ropsten.etherscan.io/block/2440319), just as a basic test.
     peer = peer_pool.highest_td_peer
     if peer_class == ETHPeer:
         peer = cast(ETHPeer, peer)
         headers = await peer.eth_api.get_block_headers(
             BlockNumber(2440319), max_headers=100)
         hashes = tuple(header.hash for header in headers)
         peer.eth_api.send_get_block_bodies(hashes)
         peer.eth_api.send_get_receipts(hashes)
     else:
         peer = cast(LESPeer, peer)
         headers = await peer.les_api.get_block_headers(
             BlockNumber(2440319), max_headers=100)
         peer.les_api.send_get_block_bodies(list(hashes))
         peer.les_api.send_get_receipts(hashes[:1])
Пример #18
0
 def get_block(self, arg: Union[Hash32, int, str]) -> Optional[Eth1Block]:
     block_dict = self.w3.eth.getBlock(arg)
     if block_dict is None:
         raise BlockNotFound
     return Eth1Block(
         block_hash=Hash32(block_dict["hash"]),
         number=BlockNumber(block_dict["number"]),
         timestamp=Timestamp(block_dict["timestamp"]),
     )
Пример #19
0
    def test_eth_getLogs_with_logs_none_topic_args(self, web3: "Web3") -> None:
        # Test with None overflowing
        filter_params: FilterParams = {
            "fromBlock": BlockNumber(0),
            "topics": [None, None, None],
        }

        result = web3.eth.getLogs(filter_params)
        assert len(result) == 0
Пример #20
0
 def get_block(self, arg: Union[Hash32, int, str]) -> Eth1Block:
     block_dict = self.w3.eth.getBlock(arg)
     if block_dict is None:
         raise Exception("block not found")
     return Eth1Block(
         block_hash=Hash32(block_dict["hash"]),
         parent_hash=block_dict["parentHash"],
         number=BlockNumber(block_dict["number"]),
         timestamp=Timestamp(block_dict["timestamp"]),
     )
Пример #21
0
 def get_topmost_actionable_gap(self,
                                gaps: Tuple[BlockRange, ...],
                                header_gaps: Tuple[BlockRange, ...]) -> BlockRange:
     '''
     Returns the most recent gap of blocks of max size = _max_backfill_block_bodies_at_once
     for which the headers exist in DB, along with the header preceding the gap.
     '''
     for gap in gaps[::-1]:
         if gap[1] - gap[0] > self._max_backfill_block_bodies_at_once:
             gap = (BlockNumber(gap[1] - self._max_backfill_block_bodies_at_once), gap[1])
         # We want to be sure the header preceding the block gap is in DB
         gap_with_prev_block = (BlockNumber(gap[0] - 1), gap[1])
         for header_gap in header_gaps[::-1]:
             if not self._have_empty_intersection(gap_with_prev_block, header_gap):
                 break
         else:
             return gap
     else:
         raise NoActionableGap
Пример #22
0
    def test_eth_getLogs_with_logs(
        self,
        web3: "Web3",
        block_with_txn_with_log: BlockData,
        emitter_contract_address: ChecksumAddress,
        txn_hash_with_log: HexStr,
    ) -> None:
        def assert_contains_log(result: Sequence[LogReceipt]) -> None:
            assert len(result) == 1
            log_entry = result[0]
            assert log_entry['blockNumber'] == block_with_txn_with_log[
                'number']
            assert log_entry['blockHash'] == block_with_txn_with_log['hash']
            assert log_entry['logIndex'] == 0
            assert is_same_address(log_entry['address'],
                                   emitter_contract_address)
            assert log_entry['transactionIndex'] == 0
            assert log_entry['transactionHash'] == HexBytes(txn_hash_with_log)

        # Test with block range

        # the range includes the block where the log resides in
        filter_params: FilterParams = {
            "fromBlock": block_with_txn_with_log['number'],
            "toBlock": block_with_txn_with_log['number'],
        }
        result = web3.eth.getLogs(filter_params)
        assert_contains_log(result)

        # specify only `from_block`. by default `to_block` should be 'latest'
        filter_params = {
            "fromBlock": BlockNumber(0),
        }
        result = web3.eth.getLogs(filter_params)
        assert_contains_log(result)

        # Test with `address`

        # filter with emitter_contract.address
        filter_params = {
            "fromBlock": BlockNumber(0),
            "address": emitter_contract_address,
        }
Пример #23
0
 def get_block(self,
               arg: web3.types.BlockIdentifier) -> Optional[Eth1Block]:
     block_dict = self.w3.eth.getBlock(arg)
     if block_dict is None:
         raise BlockNotFound
     return Eth1Block(
         block_hash=Hash32(block_dict["hash"]),
         number=BlockNumber(block_dict["number"]),
         timestamp=Timestamp(block_dict["timestamp"]),
     )
Пример #24
0
def test_web3_retries_block_not_found(
        patched_web3: Web3, succeed_at: int,
        requests_responses: responses.RequestsMock):
    requests_responses.add_callback(
        responses.POST,
        "http://domain/",
        callback=_make_json_rpc_null_response(succeed_at))

    result = patched_web3.eth.getBlock(BlockNumber(1))
    assert result["number"] == 1
Пример #25
0
 async def backfill_logs(self,
                         from_block: BlockNumber,
                         to_block: BlockNumber,
                         dest: trio.MemorySendChannel,
                         step_slowdown: float = 1.5,
                         step_fail_wait: float = 20.0,
                         step_block_count: int = 1024):
     """
     Backfill deposit logs, for the given block range. Send batches (list) of DepositLog to dest.
     Optionally slow down steps by step_slowdown seconds, ar change the blocks scanned per step.
     :param from_block: Starting point (inclusive)
     :param to_block: End point (exclusive)
     :param dest: A Trio memory channel to send batches (lists) of DepositLog entries to.
     :param step_slowdown: Sleep the given amount of seconds between steps, to avoid rate-limit/stress.
     :param step_fail_wait: How long to wait until retrying a step.
     :param step_block_count: The amount of blocks to scan at a time for logs.
     """
     curr_dep_count = 0
     curr_block_num = from_block
     while curr_block_num < to_block:
         next_block_num = min(curr_block_num + step_block_count, to_block)
         try:
             next_dep_count = self.get_deposit_count(
                 BlockNumber(next_block_num))
             print(
                 f"deposit count {next_dep_count} at block #{next_block_num}"
             )
             if next_dep_count > curr_dep_count:
                 logs = self.get_logs(BlockNumber(curr_block_num),
                                      BlockNumber(next_block_num))
                 print(
                     f"fetched {len(logs)} logs from block {curr_block_num} to {next_block_num}"
                 )
                 if len(logs) > 0:
                     await dest.send(logs)
         except Exception as e:
             print(f"warning: eth1 get-log step in backfill failed: {e}")
             print(f"waiting {step_fail_wait} seconds to try again")
             await trio.sleep(step_fail_wait)
             continue
         await trio.sleep(step_slowdown)
         curr_block_num += step_block_count
Пример #26
0
    def to_block(self) -> BlockNumber:
        if self._to_block is None:
            to_block = self.w3.eth.block_number
        elif self._to_block == "latest":
            to_block = self.w3.eth.block_number
        elif is_hex(self._to_block):
            to_block = BlockNumber(hex_to_integer(self._to_block))  # type: ignore
        else:
            to_block = cast(BlockNumber, self._to_block)

        return to_block
Пример #27
0
 def from_headerdb(cls, headerdb: HeaderDB,
                   **kwargs: Any) -> ETHHandshakeParams:
     head = headerdb.get_canonical_head()
     head_score = headerdb.get_score(head.hash)
     # TODO: https://github.com/ethereum/py-evm/issues/1847
     genesis = headerdb.get_canonical_block_header_by_number(
         BlockNumber(GENESIS_BLOCK_NUMBER))
     return cls(head_hash=head.hash,
                genesis_hash=genesis.hash,
                total_difficulty=head_score,
                **kwargs)
Пример #28
0
 def create_header_from_parent(
         self, parent_header: BlockHeaderAPI,
         **header_params: HeaderParams) -> BlockHeaderAPI:
     """
     Passthrough helper to the VM class of the block descending from the
     given header.
     """
     return self.get_vm_class_for_block_number(
         block_number=BlockNumber(parent_header.block_number +
                                  1), ).create_header_from_parent(
                                      parent_header, **header_params)
def test_logfilter_with_nonexistent_event(web3: Web3) -> None:
    """ Try to create a LogFilter with a nonexistent event """

    with pytest.raises(ValueError):
        LogFilter(
            web3=web3,
            abi=[],
            address=to_checksum_address("0xfake"),
            event_name="ev0",
            from_block=BlockNumber(0),
            to_block="latest",
        )
Пример #30
0
    async def _download_blocks(self, before_header: BlockHeaderAPI) -> None:
        """
        When importing a block, we need to validate uncles against the previous
        six blocks, so download those bodies and persist them to the database.
        """
        parents_needed = FULL_BLOCKS_NEEDED_TO_START_BEAM

        self.logger.info(
            "Downloading %d block bodies for uncle validation, before %s",
            parents_needed,
            before_header,
        )

        # select the recent ancestors to sync block bodies for
        parent_headers = tuple(
            reversed([
                header
                async for header in self._get_ancestors(parents_needed,
                                                        header=before_header)
            ]))

        # identify starting tip and headers with possible uncle conflicts for validation
        if len(parent_headers) < parents_needed:
            self.logger.info(
                "Collecting %d blocks to genesis for uncle validation",
                len(parent_headers),
            )
            sync_from_tip = await self._chain.coro_get_canonical_block_header_by_number(
                BlockNumber(0))
            uncle_conflict_headers = parent_headers
        else:
            sync_from_tip = parent_headers[0]
            uncle_conflict_headers = parent_headers[1:]

        # check if we already have the blocks for the uncle conflict headers
        if await self._all_verification_bodies_present(uncle_conflict_headers):
            self.logger.debug("All needed block bodies are already available")
        else:
            # tell the header syncer to emit those headers
            self._manual_header_syncer.emit(uncle_conflict_headers)

            # tell the fast syncer which tip to start from
            self._fast_syncer.set_starting_tip(sync_from_tip)

            # run the fast syncer (which downloads block bodies and then exits)
            self.logger.info("Getting recent block data for uncle validation")
            async with background_asyncio_service(
                    self._fast_syncer) as manager:
                await manager.wait_finished()

        # When this completes, we have all the uncles needed to validate
        self.logger.info(
            "Have all data needed for Beam validation, continuing...")