示例#1
0
    def _persist_header_chain(
        cls, db: DatabaseAPI, headers: Iterable[BlockHeaderAPI]
    ) -> Tuple[Tuple[BlockHeaderAPI, ...], Tuple[BlockHeaderAPI, ...]]:
        headers_iterator = iter(headers)

        try:
            first_header = first(headers_iterator)
        except StopIteration:
            return tuple(), tuple()

        is_genesis = first_header.parent_hash == GENESIS_PARENT_HASH
        if not is_genesis and not cls._header_exists(db,
                                                     first_header.parent_hash):
            raise ParentNotFound(
                "Cannot persist block header ({}) with unknown parent ({})".
                format(encode_hex(first_header.hash),
                       encode_hex(first_header.parent_hash)))

        if is_genesis:
            score = 0
        else:
            score = cls._get_score(db, first_header.parent_hash)

        curr_chain_head = first_header
        db.set(
            curr_chain_head.hash,
            rlp.encode(curr_chain_head),
        )
        score = cls._set_hash_scores_to_db(db, curr_chain_head, score)

        orig_headers_seq = concat([(first_header, ), headers_iterator])
        for parent, child in sliding_window(2, orig_headers_seq):
            if parent.hash != child.parent_hash:
                raise ValidationError(
                    "Non-contiguous chain. Expected {} to have {} as parent but was {}"
                    .format(
                        encode_hex(child.hash),
                        encode_hex(parent.hash),
                        encode_hex(child.parent_hash),
                    ))

            curr_chain_head = child
            db.set(
                curr_chain_head.hash,
                rlp.encode(curr_chain_head),
            )

            score = cls._set_hash_scores_to_db(db, curr_chain_head, score)

        try:
            previous_canonical_head = cls._get_canonical_head(db).hash
            head_score = cls._get_score(db, previous_canonical_head)
        except CanonicalHeadNotFound:
            return cls._set_as_canonical_chain_head(db, curr_chain_head.hash)

        if score > head_score:
            return cls._set_as_canonical_chain_head(db, curr_chain_head.hash)

        return tuple(), tuple()
示例#2
0
 def get_logs(self) -> List[LogReceipt]:
     return list(
         concat(
             get_logs_multipart(self.w3,
                                self.from_block,
                                self.to_block,
                                self.address,
                                self.topics,
                                max_blocks=MAX_BLOCK_REQUEST)))
示例#3
0
 def _get_all_package_ids(self) -> Iterable[Tuple[bytes]]:
     num_packages = self._num_package_ids()
     # Logic here b/c Solidity Reference Registry implementation returns ids in reverse order
     package_ids = [
         self.registry.functions.getAllPackageIds(index, (index + 4)).call()[0]
         for index in range(0, num_packages, 4)
     ]
     for package_id in concat([x[::-1] for x in package_ids]):
         yield package_id
def process_bytecode(link_refs: Dict[str, Any], bytecode: bytes) -> HexStr:
    """
    Replace link_refs in bytecode with 0's.
    """
    all_offsets = [y for x in link_refs.values() for y in x.values()]
    # Link ref validation.
    validate_link_ref_fns = (
        validate_link_ref(ref["start"] * 2, ref["length"] * 2)
        for ref in concat(all_offsets)
    )
    pipe(bytecode, *validate_link_ref_fns)
    # Convert link_refs in bytecode to 0's
    link_fns = (
        replace_link_ref_in_bytecode(ref["start"] * 2, ref["length"] * 2)
        for ref in concat(all_offsets)
    )
    processed_bytecode = pipe(bytecode, *link_fns)
    return add_0x_prefix(processed_bytecode)
 def _get_all_release_ids(self,
                          package_name: str) -> Iterable[Tuple[bytes]]:
     num_releases = self._num_release_ids(package_name)
     # Logic here b/c Solidity Reference Registry implementation returns ids in reverse order
     release_ids = [
         self.registry.functions.getAllReleaseIds(package_name, index,
                                                  (index + 4)).call()[0]
         for index in range(0, num_releases, 4)
     ]
     for release_id in concat([x[::-1] for x in release_ids]):
         yield release_id
示例#6
0
    def __new__(  # type: ignore
        mcs,
        name: str,
        bases: Tuple[type],
        namespace: Dict[str, Any],
        normalizers: Optional[Dict[str, Any]] = None
    ) -> Type['PropertyCheckingFactory']:
        all_bases = set(concat(base.__mro__ for base in bases))
        all_keys = set(concat(base.__dict__.keys() for base in all_bases))

        for key in namespace:
            verify_attr(name, key, all_keys)

        if normalizers:
            processed_namespace = apply_formatters_to_dict(
                normalizers,
                namespace,
            )
        else:
            processed_namespace = namespace

        return super().__new__(mcs, name, bases, processed_namespace)
示例#7
0
 def _get_filter_changes(self) -> Iterator[List[LogReceipt]]:
     for start, stop in iter_latest_block_ranges(self.w3, self.from_block,
                                                 self.to_block):
         if None in (start, stop):
             yield []
         else:
             yield list(
                 concat(
                     get_logs_multipart(self.w3,
                                        start,
                                        stop,
                                        self.address,
                                        self.topics,
                                        max_blocks=MAX_BLOCK_REQUEST)))
示例#8
0
def state_definition_to_dict(state_definition: GeneralState) -> AccountState:
    """Convert a state definition to the canonical dict form.

    State can either be defined in the canonical form, or as a list of sub states that are then
    merged to one. Sub states can either be given as dictionaries themselves, or as tuples where
    the last element is the value and all others the keys for this value in the nested state
    dictionary. Example:

    ```
        [
            ("0xaabb", "balance", 3),
            ("0xaabb", "storage", {
                4: 5,
            }),
            "0xbbcc", {
                "balance": 6,
                "nonce": 7
            }
        ]
    ```
    """
    if isinstance(state_definition, Mapping):
        state_dict = state_definition
    elif isinstance(state_definition, Iterable):
        state_dicts = [
            assoc_in(
                {},
                state_item[:-1],
                state_item[-1]
            ) if not isinstance(state_item, Mapping) else state_item
            for state_item
            in state_definition
        ]
        if not is_cleanly_mergable(*state_dicts):
            raise ValidationError("Some state item is defined multiple times")
        state_dict = deep_merge(*state_dicts)
    else:
        assert TypeError("State definition must either be a mapping or a sequence")

    seen_keys = set(concat(d.keys() for d in state_dict.values()))
    bad_keys = seen_keys - set(["balance", "nonce", "storage", "code"])
    if bad_keys:
        raise ValidationError(
            "State definition contains the following invalid account fields: {}".format(
                ", ".join(bad_keys)
            )
        )

    return state_dict
def test_ensure_update_eth1_vote_if_exists(genesis_state, config,
                                           vote_offsets):
    # one less than a majority is the majority divided by 2
    threshold = config.SLOTS_PER_ETH1_VOTING_PERIOD // 2
    data_votes = tuple(
        concat((Eth1Data(block_hash=(i).to_bytes(32, "little")), ) *
               (threshold + offset) for i, offset in enumerate(vote_offsets)))
    state = genesis_state

    for vote in data_votes:
        state = process_eth1_data(
            state, BeaconBlock(body=BeaconBlockBody(eth1_data=vote)), config)

    if not vote_offsets:
        assert state.eth1_data == genesis_state.eth1_data

    # we should update the 'latest' entry if we have a majority
    for offset in vote_offsets:
        if offset <= 0:
            assert genesis_state.eth1_data == state.eth1_data
        else:
            assert state.eth1_data == data_votes[0]
示例#10
0
文件: chain.py 项目: nrryuya/trinity
    async def _process_receipts(
            self, peer: ETHPeer,
            all_headers: Tuple[BlockHeader, ...]) -> Tuple[BlockHeader, ...]:
        """
        Downloads and persists the receipts for the given set of block headers.
        Some receipts may be trivial, having a blank root hash, and will not be requested.

        :param peer: to issue the receipt request to
        :param all_headers: attempt to get receipts for as many of these headers as possible
        :return: the headers for receipts that were successfully downloaded (or were trivial)
        """
        # Post-Byzantium blocks may have identical receipt roots (e.g. when they have the same
        # number of transactions and all succeed/failed: ropsten blocks 2503212 and 2503284),
        # so we do this to avoid requesting the same receipts multiple times.

        # combine headers with the same receipt root, so we can mark them as completed, later
        receipt_root_to_headers = groupby(attrgetter('receipt_root'),
                                          all_headers)

        # Ignore headers that have an empty receipt root
        trivial_headers = tuple(
            receipt_root_to_headers.pop(BLANK_ROOT_HASH, tuple()))

        # pick one of the headers for each missing receipt root
        unique_headers_needed = tuple(
            first(headers)
            for root, headers in receipt_root_to_headers.items())

        if not unique_headers_needed:
            return trivial_headers

        receipt_bundles = await self._request_receipts(peer,
                                                       unique_headers_needed)

        if not receipt_bundles:
            return trivial_headers

        try:
            await self._validate_receipts(unique_headers_needed,
                                          receipt_bundles)
        except ValidationError as err:
            self.logger.info(
                "Disconnecting from %s: sent invalid receipt: %s",
                peer,
                err,
            )
            await peer.disconnect(DisconnectReason.bad_protocol)
            return trivial_headers

        # process all of the returned receipts, storing their trie data
        # dicts in the database
        receipts, trie_roots_and_data_dicts = zip(*receipt_bundles)
        receipt_roots, trie_data_dicts = zip(*trie_roots_and_data_dicts)
        for trie_data in trie_data_dicts:
            await self.wait(self.db.coro_persist_trie_data_dict(trie_data))

        # Identify which headers have the receipt roots that are now complete.
        completed_header_groups = tuple(
            headers for root, headers in receipt_root_to_headers.items()
            if root in receipt_roots)
        newly_completed_headers = tuple(concat(completed_header_groups))

        self.logger.debug(
            "Got receipts for %d/%d headers from %s, with %d trivial headers",
            len(newly_completed_headers),
            len(all_headers) - len(trivial_headers),
            peer,
            len(trivial_headers),
        )
        return newly_completed_headers + trivial_headers
示例#11
0
    def _persist_header_chain(
        cls,
        db: DatabaseAPI,
        headers: Iterable[BlockHeaderAPI],
        genesis_parent_hash: Hash32,
    ) -> Tuple[Tuple[BlockHeaderAPI, ...], Tuple[BlockHeaderAPI, ...]]:
        headers_iterator = iter(headers)

        try:
            first_header = first(headers_iterator)
        except StopIteration:
            return tuple(), tuple()

        is_genesis = first_header.parent_hash == genesis_parent_hash
        if not is_genesis and not cls._header_exists(db,
                                                     first_header.parent_hash):
            raise ParentNotFound(
                f"Cannot persist block header ({encode_hex(first_header.hash)}) "
                f"with unknown parent ({encode_hex(first_header.parent_hash)})"
            )

        if is_genesis:
            score = 0
        else:
            score = cls._get_score(db, first_header.parent_hash)

        curr_chain_head = first_header
        db.set(
            curr_chain_head.hash,
            rlp.encode(curr_chain_head),
        )
        score = cls._set_hash_scores_to_db(db, curr_chain_head, score)

        base_gaps = cls._get_header_chain_gaps(db)
        gap_info = cls._update_header_chain_gaps(db, curr_chain_head,
                                                 base_gaps)
        gaps = cls._handle_gap_change(db, gap_info, curr_chain_head,
                                      genesis_parent_hash)

        orig_headers_seq = concat([(first_header, ), headers_iterator])
        for parent, child in sliding_window(2, orig_headers_seq):
            if parent.hash != child.parent_hash:
                raise ValidationError(
                    f"Non-contiguous chain. Expected {encode_hex(child.hash)} "
                    f"to have {encode_hex(parent.hash)} as parent "
                    f"but was {encode_hex(child.parent_hash)}")

            curr_chain_head = child
            db.set(
                curr_chain_head.hash,
                rlp.encode(curr_chain_head),
            )

            score = cls._set_hash_scores_to_db(db, curr_chain_head, score)
            gap_info = cls._update_header_chain_gaps(db, curr_chain_head, gaps)
            gaps = cls._handle_gap_change(db, gap_info, curr_chain_head,
                                          genesis_parent_hash)
        try:
            previous_canonical_head = cls._get_canonical_head_hash(db)
            head_score = cls._get_score(db, previous_canonical_head)
        except CanonicalHeadNotFound:
            return cls._set_as_canonical_chain_head(db, curr_chain_head,
                                                    genesis_parent_hash)

        if score > head_score:
            return cls._set_as_canonical_chain_head(db, curr_chain_head,
                                                    genesis_parent_hash)

        return tuple(), tuple()