Ejemplo n.º 1
0
    def _persist_uncles(db: DatabaseAPI, uncles: Tuple[BlockHeaderAPI,
                                                       ...]) -> Hash32:

        uncles_hash = keccak(rlp.encode(uncles))
        db.set(uncles_hash,
               rlp.encode(uncles, sedes=rlp.sedes.CountableList(HeaderSedes)))
        return cast(Hash32, uncles_hash)
Ejemplo n.º 2
0
 def _add_block_hash_tree_root_to_signing_root_lookup(
     db: DatabaseAPI, block: BaseBeaconBlock
 ) -> None:
     key = SchemaV1.make_block_hash_tree_root_to_signing_root_lookup_key(
         block.hash_tree_root
     )
     db.set(key, block.signing_root)
Ejemplo n.º 3
0
    def _update_header_chain_gaps(cls,
                                  db: DatabaseAPI,
                                  persisting_header: BlockHeaderAPI,
                                  base_gaps: ChainGaps = None) -> GapInfo:
        # The only reason we overwrite this here is to be able to detect when the HeaderDB
        # de-canonicalizes an uncle that should cause us to re-open a block gap.
        gap_change, gaps = super()._update_header_chain_gaps(
            db, persisting_header, base_gaps)

        if gap_change is not GapChange.NoChange or persisting_header.block_number == 0:
            return gap_change, gaps

        # We have written a header for which block number we've already had a header.
        # This might be a sign of a de-canonicalized uncle.
        current_gaps = cls._get_chain_gaps(db)
        if not is_block_number_in_gap(persisting_header.block_number,
                                      current_gaps):
            # ChainDB believes we have that block. If the header has changed, we need to re-open
            # a gap for the corresponding block.
            old_canonical_header = cls._get_canonical_block_header_by_number(
                db, persisting_header.block_number)
            if old_canonical_header != persisting_header:
                updated_gaps = reopen_gap(persisting_header.block_number,
                                          current_gaps)
                db.set(SchemaV1.make_chain_gaps_lookup_key(),
                       rlp.encode(updated_gaps, sedes=chain_gaps))

        return gap_change, gaps
Ejemplo n.º 4
0
 def _add_block_slot_to_root_lookup(db: DatabaseAPI, block: BaseBeaconBlock) -> None:
     """
     Set a record in the database to allow looking up this block by its
     block slot.
     """
     block_slot_to_root_key = SchemaV1.make_block_slot_to_root_lookup_key(block.slot)
     db.set(block_slot_to_root_key, block.message.hash_tree_root)
Ejemplo n.º 5
0
 def _persist_canonical_epoch_info(db: DatabaseAPI, state: BeaconState) -> None:
     epoch_info = EpochInfo(
         previous_justified_checkpoint=state.previous_justified_checkpoint,
         current_justified_checkpoint=state.current_justified_checkpoint,
         finalized_checkpoint=state.finalized_checkpoint,
     )
     db.set(SchemaV1.make_canonical_epoch_info_lookup_key(), ssz.encode(epoch_info))
Ejemplo n.º 6
0
    def _persist_header_chain(
        cls, db: DatabaseAPI, headers: Iterable[BlockHeaderAPI]
    ) -> Tuple[Tuple[BlockHeaderAPI, ...], Tuple[BlockHeaderAPI, ...]]:
        headers_iterator = iter(headers)

        try:
            first_header = first(headers_iterator)
        except StopIteration:
            return tuple(), tuple()

        is_genesis = first_header.parent_hash == GENESIS_PARENT_HASH
        if not is_genesis and not cls._header_exists(db,
                                                     first_header.parent_hash):
            raise ParentNotFound(
                "Cannot persist block header ({}) with unknown parent ({})".
                format(encode_hex(first_header.hash),
                       encode_hex(first_header.parent_hash)))

        if is_genesis:
            score = 0
        else:
            score = cls._get_score(db, first_header.parent_hash)

        curr_chain_head = first_header
        db.set(
            curr_chain_head.hash,
            rlp.encode(curr_chain_head),
        )
        score = cls._set_hash_scores_to_db(db, curr_chain_head, score)

        orig_headers_seq = concat([(first_header, ), headers_iterator])
        for parent, child in sliding_window(2, orig_headers_seq):
            if parent.hash != child.parent_hash:
                raise ValidationError(
                    "Non-contiguous chain. Expected {} to have {} as parent but was {}"
                    .format(
                        encode_hex(child.hash),
                        encode_hex(parent.hash),
                        encode_hex(child.parent_hash),
                    ))

            curr_chain_head = child
            db.set(
                curr_chain_head.hash,
                rlp.encode(curr_chain_head),
            )

            score = cls._set_hash_scores_to_db(db, curr_chain_head, score)

        try:
            previous_canonical_head = cls._get_canonical_head(db).hash
            head_score = cls._get_score(db, previous_canonical_head)
        except CanonicalHeadNotFound:
            return cls._set_as_canonical_chain_head(db, curr_chain_head.hash)

        if score > head_score:
            return cls._set_as_canonical_chain_head(db, curr_chain_head.hash)

        return tuple(), tuple()
Ejemplo n.º 7
0
 def _add_block_root_to_slot_lookup(db: DatabaseAPI, block: BaseBeaconBlock) -> None:
     """
     Set a record in the database to allow looking up the slot number by its
     block root.
     """
     block_root_to_slot_key = SchemaV1.make_block_root_to_slot_lookup_key(
         block.message.hash_tree_root
     )
     db.set(block_root_to_slot_key, ssz.encode(block.slot, sedes=ssz.sedes.uint64))
Ejemplo n.º 8
0
 def _persist_checkpoint_header(cls, db: DatabaseAPI,
                                header: BlockHeaderAPI, score: int) -> None:
     db.set(
         header.hash,
         rlp.encode(header),
     )
     previous_score = score - header.difficulty
     cls._set_hash_scores_to_db(db, header, previous_score)
     cls._set_as_canonical_chain_head(db, header.hash, header.parent_hash)
Ejemplo n.º 9
0
 def _remove_transaction_from_canonical_chain(
         db: DatabaseAPI, transaction_hash: Hash32) -> None:
     """
     Removes the transaction specified by the given hash from the canonical
     chain.
     """
     db.delete(
         SchemaV1.make_transaction_hash_to_block_lookup_key(
             transaction_hash))
Ejemplo n.º 10
0
    def test_database_api_delete(self, db: DatabaseAPI) -> None:
        db[b'key-1'] = b'value-1'

        assert b'key-1' in db

        db.delete(b'key-1')

        assert not db.exists(b'key-1')
        assert b'key-1' not in db
Ejemplo n.º 11
0
    def _set_hash_scores_to_db(cls, db: DatabaseAPI, header: BlockHeaderAPI,
                               score: int) -> int:
        new_score = score + header.difficulty

        db.set(
            SchemaV1.make_block_hash_to_score_lookup_key(header.hash),
            rlp.encode(new_score, sedes=rlp.sedes.big_endian_int),
        )

        return new_score
Ejemplo n.º 12
0
 def _add_attestations_root_to_block_lookup(db: DatabaseAPI,
                                            block: BaseBeaconBlock) -> None:
     root = block.signing_root
     for index, attestation in enumerate(block.body.attestations):
         attestation_key = AttestationKey(root, index)
         db.set(
             SchemaV1.make_attestation_root_to_block_lookup_key(
                 attestation.hash_tree_root),
             ssz.encode(attestation_key),
         )
Ejemplo n.º 13
0
 def _set_block_score_to_db(db: DatabaseAPI, block: BaseBeaconBlock,
                            score: int) -> int:
     # NOTE if we change the score serialization, we will likely need to
     # patch up the fork choice logic.
     # We will decide the score serialization is fixed for now.
     db.set(
         SchemaV1.make_block_root_to_score_lookup_key(block.signing_root),
         ssz.encode(score, sedes=ssz.sedes.uint64),
     )
     return score
Ejemplo n.º 14
0
 def _set_block_score_to_db(
     db: DatabaseAPI, block: BaseBeaconBlock, score: BaseScore
 ) -> BaseScore:
     # NOTE if we change the score serialization, we will likely need to
     # patch up the fork choice logic.
     # We will decide the score serialization is fixed for now.
     db.set(
         SchemaV1.make_block_root_to_score_lookup_key(block.message.hash_tree_root),
         score.serialize(),
     )
     return score
Ejemplo n.º 15
0
 def _add_block_number_to_hash_lookup(db: DatabaseAPI,
                                      header: BlockHeaderAPI) -> None:
     """
     Sets a record in the database to allow looking up this header by its
     block number.
     """
     block_number_to_hash_key = SchemaV1.make_block_number_to_hash_lookup_key(
         header.block_number)
     db.set(
         block_number_to_hash_key,
         rlp.encode(header.hash, sedes=rlp.sedes.binary),
     )
Ejemplo n.º 16
0
 def _add_block_slot_to_root_lookup(db: DatabaseAPI,
                                    block: BaseBeaconBlock) -> None:
     """
     Set a record in the database to allow looking up this block by its
     block slot.
     """
     block_slot_to_root_key = SchemaV1.make_block_slot_to_root_lookup_key(
         block.slot)
     db.set(
         block_slot_to_root_key,
         ssz.encode(block.signing_root, sedes=ssz.sedes.bytes32),
     )
Ejemplo n.º 17
0
    def _decanonicalize_descendant_orphans(
            cls,
            db: DatabaseAPI,
            header: BlockHeaderAPI,
            checkpoints: Tuple[Hash32, ...]) -> None:

        # Determine if any children need to be de-canonicalized because they are not children of
        #   the new chain head
        new_gaps = starting_gaps = cls._get_header_chain_gaps(db)

        child_number = BlockNumber(header.block_number + 1)
        try:
            child = cls._get_canonical_block_header_by_number(db, child_number)
        except HeaderNotFound:
            # There is no canonical block here
            next_invalid_child = None
        else:
            if child.parent_hash != header.hash:
                if child.hash in checkpoints:
                    raise CheckpointsMustBeCanonical(
                        f"Trying to decanonicalize {child} while making {header} the chain tip"
                    )
                else:
                    next_invalid_child = child
            else:
                next_invalid_child = None

        while next_invalid_child:
            # decanonicalize, and add gap for tracking
            db.delete(SchemaV1.make_block_number_to_hash_lookup_key(child_number))
            new_gaps = reopen_gap(child_number, new_gaps)

            # find next child
            child_number = BlockNumber(child_number + 1)
            try:
                # All contiguous children must now be made invalid
                next_invalid_child = cls._get_canonical_block_header_by_number(db, child_number)
            except HeaderNotFound:
                # Found the end of this streak of canonical blocks
                break
            else:
                if next_invalid_child.hash in checkpoints:
                    raise CheckpointsMustBeCanonical(
                        f"Trying to decanonicalize {next_invalid_child} while making {header} the"
                        " chain tip"
                    )

        if new_gaps != starting_gaps:
            db.set(
                SchemaV1.make_header_chain_gaps_lookup_key(),
                rlp.encode(new_gaps, sedes=chain_gaps)
            )
Ejemplo n.º 18
0
    def _persist_checkpoint_header(
            cls,
            db: DatabaseAPI,
            header: BlockHeaderAPI,
            score: int
    ) -> None:
        db.set(
            header.hash,
            rlp.encode(header),
        )

        # Add new checkpoint header
        previous_checkpoints = cls._get_checkpoints(db)
        new_checkpoints = previous_checkpoints + (header.hash,)
        db.set(
            SchemaV1.make_checkpoint_headers_key(),
            b''.join(new_checkpoints),
        )

        previous_score = score - header.difficulty
        cls._set_hash_scores_to_db(db, header, previous_score)
        cls._set_as_canonical_chain_head(db, header, GENESIS_PARENT_HASH)
        _, gaps = cls._update_header_chain_gaps(db, header)

        # check if the parent block number exists, and is not a match for checkpoint.parent_hash
        parent_block_num = BlockNumber(header.block_number - 1)
        try:
            parent_hash = cls._get_canonical_block_hash(db, parent_block_num)
        except HeaderNotFound:
            # no parent to check
            pass
        else:
            # User is asserting that the checkpoint must be canonical, so if the parent doesn't
            # match, then the parent must not be canonical, and should be de-canonicalized.
            if parent_hash != header.parent_hash:
                # does the correct header exist in the database?
                try:
                    true_parent = cls._get_block_header_by_hash(db, header.parent_hash)
                except HeaderNotFound:
                    # True parent unavailable, just delete the now non-canonical one
                    cls._decanonicalize_single(db, parent_block_num, gaps)
                else:
                    # True parent should have already been canonicalized during
                    #   _set_as_canonical_chain_head()
                    raise ValidationError(
                        f"Why was a non-matching parent header {parent_hash!r} left as canonical "
                        f"after _set_as_canonical_chain_head() and {true_parent} is available?"
                    )

        cls._decanonicalize_descendant_orphans(db, header, new_checkpoints)
Ejemplo n.º 19
0
 def _add_transaction_to_canonical_chain(db: DatabaseAPI,
                                         transaction_hash: Hash32,
                                         block_header: BlockHeaderAPI,
                                         index: int) -> None:
     """
     :param bytes transaction_hash: the hash of the transaction to add the lookup for
     :param block_header: The header of the block with the txn that is in the canonical chain
     :param int index: the position of the transaction in the block
     - add lookup from transaction hash to the block number and index that the body is stored at
     - remove transaction hash to body lookup in the pending pool
     """
     transaction_key = TransactionKey(block_header.block_number, index)
     db.set(
         SchemaV1.make_transaction_hash_to_block_lookup_key(transaction_hash),
         rlp.encode(transaction_key),
     )
Ejemplo n.º 20
0
    def _decanonicalize_single(cls, db: DatabaseAPI, block_num: BlockNumber,
                               base_gaps: ChainGaps) -> ChainGaps:
        """
        A single block number was found to no longer be canonical. At doc-time,
        this only happens because it does not link up with a checkpoint header.
        So de-canonicalize this block number and insert a gap in the tracked
        chain gaps.
        """

        db.delete(SchemaV1.make_block_number_to_hash_lookup_key(block_num))

        new_gaps = reopen_gap(block_num, base_gaps)
        if new_gaps != base_gaps:
            db.set(SchemaV1.make_header_chain_gaps_lookup_key(),
                   rlp.encode(new_gaps, sedes=chain_gaps))
        return new_gaps
Ejemplo n.º 21
0
    def _update_chain_gaps(cls,
                           db: DatabaseAPI,
                           persisted_block: BlockAPI,
                           base_gaps: ChainGaps = None) -> GapInfo:

        # If we make many updates in a row, we can avoid reloading the integrity info by
        # continuously caching it and providing it as a parameter to this API
        if base_gaps is None:
            base_gaps = cls._get_chain_gaps(db)

        gap_change, gaps = fill_gap(persisted_block.number, base_gaps)
        if gap_change is not GapChange.NoChange:
            db.set(SchemaV1.make_chain_gaps_lookup_key(),
                   rlp.encode(gaps, sedes=chain_gaps))

        return gap_change, gaps
Ejemplo n.º 22
0
 def _get_checkpoints(cls, db: DatabaseAPI) -> Tuple[Hash32, ...]:
     concatenated_checkpoints = db.get(
         SchemaV1.make_checkpoint_headers_key())
     if concatenated_checkpoints is None:
         return ()
     else:
         return tuple(
             Hash32(concatenated_checkpoints[index:index + 32])
             for index in range(0, len(concatenated_checkpoints), 32))
Ejemplo n.º 23
0
    def _set_as_canonical_chain_head(
        cls,
        db: DatabaseAPI,
        block_hash: Hash32,
    ) -> Tuple[Tuple[BlockHeaderAPI, ...], Tuple[BlockHeaderAPI,
                                                 ...]]:  # noqa: E501
        try:
            header = cls._get_block_header_by_hash(db, block_hash)
        except HeaderNotFound:
            raise ValueError(
                "Cannot use unknown block hash as canonical head: {}".format(
                    header.hash))

        new_canonical_headers = tuple(
            reversed(cls._find_new_ancestors(db, header)))
        old_canonical_headers = []

        # remove transaction lookups for blocks that are no longer canonical
        for h in new_canonical_headers:
            try:
                old_hash = cls._get_canonical_block_hash(db, h.block_number)
            except HeaderNotFound:
                # no old block, and no more possible
                break
            else:
                old_header = cls._get_block_header_by_hash(db, old_hash)
                old_canonical_headers.append(old_header)
                try:
                    for transaction_hash in cls._get_block_transaction_hashes(
                            db, old_header):
                        cls._remove_transaction_from_canonical_chain(
                            db, transaction_hash)
                except MissingTrieNode:
                    # If the transactions were never stored for the (now) non-canonical chain,
                    #   then you don't need to remove them from the canonical chain lookup.
                    pass

        for h in new_canonical_headers:
            cls._add_block_number_to_hash_lookup(db, h)

        db.set(SchemaV1.make_canonical_head_hash_lookup_key(), header.hash)

        return new_canonical_headers, tuple(old_canonical_headers)
Ejemplo n.º 24
0
    def _set_as_canonical_chain_head(
        cls,
        db: DatabaseAPI,
        block_root: SigningRoot,
        block_class: Type[BaseBeaconBlock],
    ) -> Tuple[Tuple[BaseBeaconBlock, ...], Tuple[BaseBeaconBlock, ...]]:
        """
        Set the canonical chain HEAD to the block as specified by the
        given block root.

        :return: a tuple of the blocks that are newly in the canonical chain, and the blocks that
            are no longer in the canonical chain
        """
        try:
            block = cls._get_block_by_root(db, block_root, block_class)
        except BlockNotFound:
            raise ValueError(
                "Cannot use unknown block root as canonical head: {block_root.hex()}"
            )

        new_canonical_blocks = tuple(
            reversed(cls._find_new_ancestors(db, block, block_class)))
        old_canonical_blocks = []

        for block in new_canonical_blocks:
            try:
                old_canonical_root = cls._get_canonical_block_root(
                    db, block.slot)
            except BlockNotFound:
                # no old_canonical block, and no more possible
                break
            else:
                old_canonical_block = cls._get_block_by_root(
                    db, old_canonical_root, block_class)
                old_canonical_blocks.append(old_canonical_block)

        for block in new_canonical_blocks:
            cls._add_block_slot_to_root_lookup(db, block)

        db.set(SchemaV1.make_canonical_head_root_lookup_key(),
               block.signing_root)

        return new_canonical_blocks, tuple(old_canonical_blocks)
Ejemplo n.º 25
0
    def _set_as_canonical_chain_head(
        cls,
        db: DatabaseAPI,
        block_hash: Hash32,
        genesis_parent_hash: Hash32,
    ) -> Tuple[Tuple[BlockHeaderAPI, ...], Tuple[BlockHeaderAPI, ...]]:
        """
        Sets the canonical chain HEAD to the block header as specified by the
        given block hash.

        :return: a tuple of the headers that are newly in the canonical chain, and the headers that
            are no longer in the canonical chain
        """
        try:
            header = cls._get_block_header_by_hash(db, block_hash)
        except HeaderNotFound:
            raise ValueError(
                "Cannot use unknown block hash as canonical head: {}".format(
                    block_hash))

        new_canonical_headers = tuple(
            reversed(cls._find_new_ancestors(db, header, genesis_parent_hash)))
        old_canonical_headers = []

        for h in new_canonical_headers:
            try:
                old_canonical_hash = cls._get_canonical_block_hash(
                    db, h.block_number)
            except HeaderNotFound:
                # no old_canonical block, and no more possible
                break
            else:
                old_canonical_header = cls._get_block_header_by_hash(
                    db, old_canonical_hash)
                old_canonical_headers.append(old_canonical_header)

        for h in new_canonical_headers:
            cls._add_block_number_to_hash_lookup(db, h)

        db.set(SchemaV1.make_canonical_head_hash_lookup_key(), header.hash)

        return new_canonical_headers, tuple(old_canonical_headers)
Ejemplo n.º 26
0
    def _set_as_canonical_chain_head(
        cls,
        db: DatabaseAPI,
        header: BlockHeaderAPI,
        genesis_parent_hash: Hash32,
    ) -> Tuple[Tuple[BlockHeaderAPI, ...], Tuple[BlockHeaderAPI, ...]]:
        """
        Sets the canonical chain HEAD to the block header as specified by the
        given block hash.

        :return: a tuple of the headers that are newly in the canonical chain, and the headers that
            are no longer in the canonical chain
        :raises CheckpointsMustBeCanonical: if trying to set a head that would
            de-canonicalize a checkpoint
        """
        try:
            current_canonical_head = cls._get_canonical_head_hash(db)
        except CanonicalHeadNotFound:
            current_canonical_head = None

        new_canonical_headers: Tuple[BlockHeaderAPI, ...]
        old_canonical_headers: Tuple[BlockHeaderAPI, ...]

        if current_canonical_head and header.parent_hash == current_canonical_head:
            # the calls to _find_new_ancestors and _find_headers_to_decanonicalize are
            # relatively expensive, it's better to skip them in this case, where we're
            # extending the canonical chain by a header
            new_canonical_headers = (header, )
            old_canonical_headers = ()
            cls._add_block_number_to_hash_lookup(db, header)
        else:
            (
                new_canonical_headers,
                old_canonical_headers,
            ) = cls._canonicalize_header(db, header, genesis_parent_hash)

        db.set(SchemaV1.make_canonical_head_hash_lookup_key(), header.hash)

        return new_canonical_headers, old_canonical_headers
Ejemplo n.º 27
0
    def _persist_block_chain(
        cls,
        db: DatabaseAPI,
        blocks: Iterable[BaseBeaconBlock],
        block_class: Type[BaseBeaconBlock],
        fork_choice_scorings: Iterable[ForkChoiceScoringFn],
    ) -> Tuple[Tuple[BaseBeaconBlock, ...], Tuple[BaseBeaconBlock, ...]]:
        blocks_iterator = iter(blocks)
        scorings_iterator = iter(fork_choice_scorings)

        try:
            first_block = first(blocks_iterator)
            first_scoring = first(scorings_iterator)
        except StopIteration:
            return tuple(), tuple()

        try:
            previous_canonical_head = cls._get_canonical_head(
                db, block_class).signing_root
            head_score = cls._get_score(db, previous_canonical_head)
        except CanonicalHeadNotFound:
            no_canonical_head = True
        else:
            no_canonical_head = False

        is_genesis = first_block.is_genesis
        if not is_genesis and not cls._block_exists(db,
                                                    first_block.parent_root):
            raise ParentNotFound(
                "Cannot persist block ({}) with unknown parent ({})".format(
                    encode_hex(first_block.signing_root),
                    encode_hex(first_block.parent_root),
                ))

        score = first_scoring(first_block)

        curr_block_head = first_block
        db.set(curr_block_head.signing_root, ssz.encode(curr_block_head))
        cls._add_block_root_to_slot_lookup(db, curr_block_head)
        cls._set_block_score_to_db(db, curr_block_head, score)
        cls._add_attestations_root_to_block_lookup(db, curr_block_head)

        orig_blocks_seq = concat([(first_block, ), blocks_iterator])

        for parent, child in sliding_window(2, orig_blocks_seq):
            if parent.signing_root != child.parent_root:
                raise ValidationError(
                    "Non-contiguous chain. Expected {} to have {} as parent but was {}"
                    .format(
                        encode_hex(child.signing_root),
                        encode_hex(parent.signing_root),
                        encode_hex(child.parent_root),
                    ))

            curr_block_head = child
            db.set(curr_block_head.signing_root, ssz.encode(curr_block_head))
            cls._add_block_root_to_slot_lookup(db, curr_block_head)
            cls._add_attestations_root_to_block_lookup(db, curr_block_head)

            # NOTE: len(scorings_iterator) should equal len(blocks_iterator)
            try:
                next_scoring = next(scorings_iterator)
            except StopIteration:
                raise MissingForkChoiceScoringFns

            score = next_scoring(curr_block_head)
            cls._set_block_score_to_db(db, curr_block_head, score)

        if no_canonical_head:
            return cls._set_as_canonical_chain_head(
                db, curr_block_head.signing_root, block_class)

        if score > head_score:
            return cls._set_as_canonical_chain_head(
                db, curr_block_head.signing_root, block_class)
        else:
            return tuple(), tuple()
Ejemplo n.º 28
0
 def test_database_api_delete_missing_key(self, db: DatabaseAPI) -> None:
     assert b'key-1' not in db
     db.delete(b'key-1')
Ejemplo n.º 29
0
    def _persist_header_chain(
        cls,
        db: DatabaseAPI,
        headers: Iterable[BlockHeaderAPI],
        genesis_parent_hash: Hash32,
    ) -> Tuple[Tuple[BlockHeaderAPI, ...], Tuple[BlockHeaderAPI, ...]]:
        headers_iterator = iter(headers)

        try:
            first_header = first(headers_iterator)
        except StopIteration:
            return tuple(), tuple()

        is_genesis = first_header.parent_hash == genesis_parent_hash
        if not is_genesis and not cls._header_exists(db,
                                                     first_header.parent_hash):
            raise ParentNotFound(
                f"Cannot persist block header ({encode_hex(first_header.hash)}) "
                f"with unknown parent ({encode_hex(first_header.parent_hash)})"
            )

        if is_genesis:
            score = 0
        else:
            score = cls._get_score(db, first_header.parent_hash)

        curr_chain_head = first_header
        db.set(
            curr_chain_head.hash,
            rlp.encode(curr_chain_head),
        )
        score = cls._set_hash_scores_to_db(db, curr_chain_head, score)

        base_gaps = cls._get_header_chain_gaps(db)
        gap_info = cls._update_header_chain_gaps(db, curr_chain_head,
                                                 base_gaps)
        gaps = cls._handle_gap_change(db, gap_info, curr_chain_head,
                                      genesis_parent_hash)

        orig_headers_seq = concat([(first_header, ), headers_iterator])
        for parent, child in sliding_window(2, orig_headers_seq):
            if parent.hash != child.parent_hash:
                raise ValidationError(
                    f"Non-contiguous chain. Expected {encode_hex(child.hash)} "
                    f"to have {encode_hex(parent.hash)} as parent "
                    f"but was {encode_hex(child.parent_hash)}")

            curr_chain_head = child
            db.set(
                curr_chain_head.hash,
                rlp.encode(curr_chain_head),
            )

            score = cls._set_hash_scores_to_db(db, curr_chain_head, score)
            gap_info = cls._update_header_chain_gaps(db, curr_chain_head, gaps)
            gaps = cls._handle_gap_change(db, gap_info, curr_chain_head,
                                          genesis_parent_hash)
        try:
            previous_canonical_head = cls._get_canonical_head_hash(db)
            head_score = cls._get_score(db, previous_canonical_head)
        except CanonicalHeadNotFound:
            return cls._set_as_canonical_chain_head(db, curr_chain_head,
                                                    genesis_parent_hash)

        if score > head_score:
            return cls._set_as_canonical_chain_head(db, curr_chain_head,
                                                    genesis_parent_hash)

        return tuple(), tuple()
Ejemplo n.º 30
0
 def _write_signed_block(cls, db: DatabaseAPI,
                         block: BaseSignedBeaconBlock) -> None:
     # TODO key by block root or signed block root?
     db.set(block.message.hash_tree_root, ssz.encode(block))
     cls._add_block_root_to_slot_lookup(db, block)
     cls._add_attestations_root_to_block_lookup(db, block.message)