def _set_block_scores_to_db(cls, db: BaseDB, block: BaseBeaconBlock) -> None: # TODO: It's a stub before we implement fork choice rule score = block.slot db.set( SchemaV1.make_block_root_to_score_lookup_key(block.root), rlp.encode(score, sedes=rlp.sedes.big_endian_int), )
def _set_block_scores_to_db(cls, db: BaseDB, block: BaseBeaconBlock) -> int: # TODO: It's a stub before we implement fork choice rule score = block.slot db.set( SchemaV1.make_block_root_to_score_lookup_key(block.signed_root), ssz.encode(score, sedes=ssz.sedes.uint64), ) return score
def _set_deletatable_state( db: BaseDB, deletable_state_roots: Iterable[Hash32]) -> None: """ Set deletable_state_roots. """ lookup_key = SchemaV1.make_deletable_state_roots_lookup_key() db.set( lookup_key, rlp.encode(deletable_state_roots, sedes=CountableList(hash32)), )
def _set_hash_scores_to_db(cls, db: BaseDB, header: BlockHeader, score: int) -> int: new_score = score + header.difficulty db.set( SchemaV1.make_block_hash_to_score_lookup_key(header.hash), rlp.encode(new_score, sedes=rlp.sedes.big_endian_int), ) return new_score
def _add_attestations_root_to_block_lookup(db: BaseDB, block: BaseBeaconBlock) -> None: root = block.signing_root for index, attestation in enumerate(block.body.attestations): attestation_key = AttestationKey(root, index) db.set( SchemaV1.make_attestation_root_to_block_lookup_key( attestation.hash_tree_root), ssz.encode(attestation_key), )
def _persist_block_chain( cls, db: BaseDB, blocks: Iterable[BaseBeaconBlock] ) -> Tuple[Tuple[BaseBeaconBlock, ...], Tuple[BaseBeaconBlock, ...]]: try: first_block = first(blocks) except StopIteration: return tuple(), tuple() else: for parent, child in sliding_window(2, blocks): if parent.hash != child.parent_hash: raise ValidationError( "Non-contiguous chain. Expected {} to have {} as parent but was {}" .format( encode_hex(child.hash), encode_hex(parent.hash), encode_hex(child.parent_hash), )) is_genesis = first_block.parent_hash == GENESIS_PARENT_HASH if not is_genesis and not cls._block_exists( db, first_block.parent_hash): raise ParentNotFound( "Cannot persist block ({}) with unknown parent ({})". format(encode_hex(first_block.hash), encode_hex(first_block.parent_hash))) if is_genesis: score = 0 else: score = cls._get_score(db, first_block.parent_hash) for block in blocks: db.set( block.hash, rlp.encode(block), ) # TODO: It's a stub before we implement fork choice rule score += block.slot_number db.set( SchemaV1.make_block_hash_to_score_lookup_key(block.hash), rlp.encode(score, sedes=rlp.sedes.big_endian_int), ) try: previous_canonical_head = cls._get_canonical_head(db).hash head_score = cls._get_score(db, previous_canonical_head) except CanonicalHeadNotFound: return cls._set_as_canonical_chain_head(db, block.hash) if score > head_score: return cls._set_as_canonical_chain_head(db, block.hash) else: return tuple(), tuple()
def _persist_header_chain( cls, db: BaseDB, headers: Iterable[BlockHeader] ) -> Tuple[Tuple[BlockHeader, ...], Tuple[BlockHeader, ...]]: try: first_header = first(headers) except StopIteration: return tuple(), tuple() else: for parent, child in sliding_window(2, headers): if parent.hash != child.parent_hash: raise ValidationError( "Non-contiguous chain. Expected {} to have {} as parent but was {}".format( encode_hex(child.hash), encode_hex(parent.hash), encode_hex(child.parent_hash), ) ) is_genesis = first_header.parent_hash == GENESIS_PARENT_HASH if not is_genesis and not cls._header_exists(db, first_header.parent_hash): raise ParentNotFound( "Cannot persist block header ({}) with unknown parent ({})".format( encode_hex(first_header.hash), encode_hex(first_header.parent_hash))) if is_genesis: score = 0 else: score = cls._get_score(db, first_header.parent_hash) for header in headers: db.set( header.hash, rlp.encode(header), ) score += header.difficulty db.set( SchemaV1.make_block_hash_to_score_lookup_key(header.hash), rlp.encode(score, sedes=rlp.sedes.big_endian_int), ) try: previous_canonical_head = cls._get_canonical_head(db).hash head_score = cls._get_score(db, previous_canonical_head) except CanonicalHeadNotFound: return cls._set_as_canonical_chain_head(db, header.hash) if score > head_score: return cls._set_as_canonical_chain_head(db, header.hash) else: return tuple(), tuple()
def _add_block_slot_to_root_lookup(db: BaseDB, block: BaseBeaconBlock) -> None: """ Set a record in the database to allow looking up this block by its block slot. """ block_slot_to_root_key = SchemaV1.make_block_slot_to_root_lookup_key( block.slot) db.set( block_slot_to_root_key, ssz.encode(block.root, sedes=ssz.sedes.bytes_sedes), )
def _add_block_slot_to_hash_lookup(db: BaseDB, block: BaseBeaconBlock) -> None: """ Set a record in the database to allow looking up this block by its block slot. """ block_slot_to_hash_key = SchemaV1.make_block_slot_to_hash_lookup_key( block.slot_number) db.set( block_slot_to_hash_key, rlp.encode(block.hash, sedes=rlp.sedes.binary), )
def _add_block_root_to_slot_lookup(db: BaseDB, block: BaseBeaconBlock) -> None: """ Set a record in the database to allow looking up the slot number by its block root. """ block_root_to_slot_key = SchemaV1.make_block_root_to_slot_lookup_key( block.root ) db.set( block_root_to_slot_key, rlp.encode(block.slot, sedes=rlp.sedes.big_endian_int), )
def _add_block_root_to_slot_lookup(db: BaseDB, block: BaseBeaconBlock) -> None: """ Set a record in the database to allow looking up the slot number by its block root. """ block_root_to_slot_key = SchemaV1.make_block_root_to_slot_lookup_key( block.signed_root) db.set( block_root_to_slot_key, ssz.encode(block.slot, sedes=ssz.sedes.uint64), )
def _add_block_number_to_hash_lookup(db: BaseDB, header: BlockHeader) -> None: """ Sets a record in the database to allow looking up this header by its block number. """ block_number_to_hash_key = SchemaV1.make_block_number_to_hash_lookup_key( header.block_number ) db.set( block_number_to_hash_key, rlp.encode(header.hash, sedes=rlp.sedes.binary), )
def _set_block_score_to_db( db: BaseDB, block: BaseBeaconBlock, score: int, ) -> int: # NOTE if we change the score serialization, we will likely need to # patch up the fork choice logic. # We will decide the score serialization is fixed for now. db.set( SchemaV1.make_block_root_to_score_lookup_key(block.signing_root), ssz.encode(score, sedes=ssz.sedes.uint64), ) return score
def _add_crystallized_to_active_state_lookup( cls, db: BaseDB, active_state: ActiveState, crystallized_state_root: Hash32) -> None: """ Set a record in the database to allow looking up this block by its last state recalculation slot. """ slot_to_hash_key = SchemaV1.make_crystallized_to_active_state_root_lookup_key( crystallized_state_root, ) db.set( slot_to_hash_key, rlp.encode(active_state.hash, sedes=rlp.sedes.binary), )
def _get_deletable_state_roots(db: BaseDB) -> Tuple[Hash32]: """ Return deletable_state_roots. """ lookup_key = SchemaV1.make_deletable_state_roots_lookup_key() if not db.exists(lookup_key): db.set( lookup_key, rlp.encode((), sedes=CountableList(hash32)), ) deletable_state_roots = rlp.decode(db[lookup_key], sedes=CountableList(hash32)) return deletable_state_roots
def _add_transaction_to_canonical_chain(db: BaseDB, transaction_hash: Hash32, block_header: BlockHeader, index: int) -> None: """ :param bytes transaction_hash: the hash of the transaction to add the lookup for :param block_header: The header of the block with the txn that is in the canonical chain :param int index: the position of the transaction in the block - add lookup from transaction hash to the block number and index that the body is stored at - remove transaction hash to body lookup in the pending pool """ transaction_key = TransactionKey(block_header.block_number, index) db.set( SchemaV1.make_transaction_hash_to_block_lookup_key(transaction_hash), rlp.encode(transaction_key), )
def _set_as_canonical_chain_head( cls, db: BaseDB, block_root: Hash32, block_class: Type[BaseBeaconBlock] ) -> Tuple[Tuple[BaseBeaconBlock, ...], Tuple[BaseBeaconBlock, ...]]: """ Set the canonical chain HEAD to the block as specified by the given block root. :return: a tuple of the blocks that are newly in the canonical chain, and the blocks that are no longer in the canonical chain """ try: block = cls._get_block_by_root(db, block_root, block_class) except BlockNotFound: raise ValueError( "Cannot use unknown block root as canonical head: {}".format( block_root)) new_canonical_blocks = tuple( reversed(cls._find_new_ancestors(db, block, block_class))) old_canonical_blocks = [] for block in new_canonical_blocks: try: old_canonical_root = cls._get_canonical_block_root( db, block.slot) except BlockNotFound: # no old_canonical block, and no more possible break else: old_canonical_block = cls._get_block_by_root( db, old_canonical_root, block_class) old_canonical_blocks.append(old_canonical_block) for block in new_canonical_blocks: cls._add_block_slot_to_root_lookup(db, block) db.set(SchemaV1.make_canonical_head_root_lookup_key(), block.signed_root) return new_canonical_blocks, tuple(old_canonical_blocks)
def _set_as_canonical_chain_head( cls, db: BaseDB, block_hash: Hash32 ) -> Tuple[Tuple[BlockHeader, ...], Tuple[BlockHeader, ...]]: """ Sets the canonical chain HEAD to the block header as specified by the given block hash. :return: a tuple of the headers that are newly in the canonical chain, and the headers that are no longer in the canonical chain """ try: header = cls._get_block_header_by_hash(db, block_hash) except HeaderNotFound: raise ValueError( "Cannot use unknown block hash as canonical head: {}".format( block_hash)) new_canonical_headers = tuple( reversed(cls._find_new_ancestors(db, header))) old_canonical_headers = [] for h in new_canonical_headers: try: old_canonical_hash = cls._get_canonical_block_hash( db, h.block_number) except HeaderNotFound: # no old_canonical block, and no more possible break else: old_canonical_header = cls._get_block_header_by_hash( db, old_canonical_hash) old_canonical_headers.append(old_canonical_header) for h in new_canonical_headers: cls._add_block_number_to_hash_lookup(db, h) db.set(SchemaV1.make_canonical_head_hash_lookup_key(), header.hash) return new_canonical_headers, tuple(old_canonical_headers)
def _add_slot_to_crystallized_state_lookup( cls, db: BaseDB, crystallized_state: CrystallizedState) -> None: """ Set a record in the database to allow looking up this block by its last state recalculation slot. If it's a fork, store the old state root in `deletable_state_roots`. """ slot_to_hash_key = SchemaV1.make_slot_to_crystallized_state_lookup_key( crystallized_state.last_state_recalc) if db.exists(slot_to_hash_key): deletable_state_roots = cls._get_deletable_state_roots(db) replaced_state_root = rlp.decode(db[slot_to_hash_key], sedes=rlp.sedes.binary) cls._set_deletatable_state( db, deletable_state_roots + (replaced_state_root, ), ) db.set( slot_to_hash_key, rlp.encode(crystallized_state.hash, sedes=rlp.sedes.binary), )
def _set_as_canonical_chain_head( cls, db: BaseDB, block_hash: Hash32, ) -> Tuple[Tuple[BlockHeader, ...], Tuple[BlockHeader, ...]]: try: header = cls._get_block_header_by_hash(db, block_hash) except HeaderNotFound: raise ValueError( "Cannot use unknown block hash as canonical head: {}".format( header.hash)) new_canonical_headers = tuple( reversed(cls._find_new_ancestors(db, header))) old_canonical_headers = [] # remove transaction lookups for blocks that are no longer canonical for h in new_canonical_headers: try: old_hash = cls._get_canonical_block_hash(db, h.block_number) except HeaderNotFound: # no old block, and no more possible break else: old_header = cls._get_block_header_by_hash(db, old_hash) old_canonical_headers.append(old_header) for transaction_hash in cls._get_block_transaction_hashes( db, old_header): cls._remove_transaction_from_canonical_chain( db, transaction_hash) for h in new_canonical_headers: cls._add_block_number_to_hash_lookup(db, h) db.set(SchemaV1.make_canonical_head_hash_lookup_key(), header.hash) return new_canonical_headers, tuple(old_canonical_headers)
def _persist_block_chain( cls, db: BaseDB, blocks: Iterable[BaseBeaconBlock], block_class: Type[BaseBeaconBlock] ) -> Tuple[Tuple[BaseBeaconBlock, ...], Tuple[BaseBeaconBlock, ...]]: blocks_iterator = iter(blocks) try: first_block = first(blocks_iterator) except StopIteration: return tuple(), tuple() is_genesis = first_block.parent_root == GENESIS_PARENT_HASH if not is_genesis and not cls._block_exists(db, first_block.parent_root): raise ParentNotFound( "Cannot persist block ({}) with unknown parent ({})".format( encode_hex(first_block.root), encode_hex(first_block.parent_root))) if is_genesis: score = 0 else: score = cls._get_score(db, first_block.parent_root) curr_block_head = first_block db.set( curr_block_head.root, rlp.encode(curr_block_head), ) cls._set_block_scores_to_db(db, curr_block_head) orig_blocks_seq = concat([(first_block, ), blocks_iterator]) for parent, child in sliding_window(2, orig_blocks_seq): if parent.root != child.parent_root: raise ValidationError( "Non-contiguous chain. Expected {} to have {} as parent but was {}" .format( encode_hex(child.root), encode_hex(parent.root), encode_hex(child.parent_root), )) curr_block_head = child db.set( curr_block_head.root, rlp.encode(curr_block_head), ) cls._set_block_scores_to_db(db, curr_block_head) try: previous_canonical_head = cls._get_canonical_head(db, block_class).root head_score = cls._get_score(db, previous_canonical_head) except CanonicalHeadNotFound: return cls._set_as_canonical_chain_head(db, curr_block_head.root, block_class) if score > head_score: return cls._set_as_canonical_chain_head(db, curr_block_head.root, block_class) else: return tuple(), tuple()
def _persist_state(cls, db: BaseDB, state: BeaconState) -> None: db.set( state.hash, rlp.encode(state), )
def _remove_transaction_from_canonical_chain(db: BaseDB, transaction_hash: Hash32) -> None: """ Removes the transaction specified by the given hash from the canonical chain. """ db.delete(SchemaV1.make_transaction_hash_to_block_lookup_key(transaction_hash))
def _persist_state(cls, db: BaseDB, state: BeaconState) -> None: db.set( state.root, ssz.encode(state), )
def _persist_block_chain( cls, db: BaseDB, blocks: Iterable[BaseBeaconBlock], block_class: Type[BaseBeaconBlock], fork_choice_scorings: Iterable[ForkChoiceScoringFn], ) -> Tuple[Tuple[BaseBeaconBlock, ...], Tuple[BaseBeaconBlock, ...]]: blocks_iterator = iter(blocks) scorings_iterator = iter(fork_choice_scorings) try: first_block = first(blocks_iterator) first_scoring = first(scorings_iterator) except StopIteration: return tuple(), tuple() try: previous_canonical_head = cls._get_canonical_head( db, block_class).signing_root head_score = cls._get_score(db, previous_canonical_head) except CanonicalHeadNotFound: no_canonical_head = True else: no_canonical_head = False is_genesis = first_block.is_genesis if not is_genesis and not cls._block_exists(db, first_block.parent_root): raise ParentNotFound( "Cannot persist block ({}) with unknown parent ({})".format( encode_hex(first_block.signing_root), encode_hex(first_block.parent_root), )) score = first_scoring(first_block) curr_block_head = first_block db.set( curr_block_head.signing_root, ssz.encode(curr_block_head), ) cls._add_block_root_to_slot_lookup(db, curr_block_head) cls._set_block_score_to_db(db, curr_block_head, score) cls._add_attestations_root_to_block_lookup(db, curr_block_head) orig_blocks_seq = concat([(first_block, ), blocks_iterator]) for parent, child in sliding_window(2, orig_blocks_seq): if parent.signing_root != child.parent_root: raise ValidationError( "Non-contiguous chain. Expected {} to have {} as parent but was {}" .format( encode_hex(child.signing_root), encode_hex(parent.signing_root), encode_hex(child.parent_root), )) curr_block_head = child db.set( curr_block_head.signing_root, ssz.encode(curr_block_head), ) cls._add_block_root_to_slot_lookup(db, curr_block_head) cls._add_attestations_root_to_block_lookup(db, curr_block_head) # NOTE: len(scorings_iterator) should equal len(blocks_iterator) try: next_scoring = next(scorings_iterator) except StopIteration: raise MissingForkChoiceScoringFns score = next_scoring(curr_block_head) cls._set_block_score_to_db(db, curr_block_head, score) if no_canonical_head: return cls._set_as_canonical_chain_head( db, curr_block_head.signing_root, block_class) if score > head_score: return cls._set_as_canonical_chain_head( db, curr_block_head.signing_root, block_class) else: return tuple(), tuple()
def _persist_uncles(db: BaseDB, uncles: Tuple[BlockHeader]) -> Hash32: uncles_hash = keccak(rlp.encode(uncles)) db.set(uncles_hash, rlp.encode(uncles, sedes=rlp.sedes.CountableList(BlockHeader))) return uncles_hash
def _persist_block_chain( cls, db: BaseDB, blocks: Iterable[BaseBeaconBlock], block_class: Type[BaseBeaconBlock] ) -> Tuple[Tuple[BaseBeaconBlock, ...], Tuple[BaseBeaconBlock, ...]]: blocks_iterator = iter(blocks) try: first_block = first(blocks_iterator) except StopIteration: return tuple(), tuple() try: previous_canonical_head = cls._get_canonical_head( db, block_class).signed_root head_score = cls._get_score(db, previous_canonical_head) except CanonicalHeadNotFound: no_canonical_head = True else: no_canonical_head = False is_genesis = first_block.previous_block_root == GENESIS_PARENT_HASH if not is_genesis and not cls._block_exists( db, first_block.previous_block_root): raise ParentNotFound( "Cannot persist block ({}) with unknown parent ({})".format( encode_hex(first_block.signed_root), encode_hex(first_block.previous_block_root), )) if is_genesis: score = 0 # TODO: this should probably be done as part of the fork choice rule processing db.set( SchemaV1.make_finalized_head_root_lookup_key(), first_block.signed_root, ) else: score = first_block.slot curr_block_head = first_block db.set( curr_block_head.signed_root, ssz.encode(curr_block_head), ) cls._add_block_root_to_slot_lookup(db, curr_block_head) cls._set_block_scores_to_db(db, curr_block_head) orig_blocks_seq = concat([(first_block, ), blocks_iterator]) for parent, child in sliding_window(2, orig_blocks_seq): if parent.signed_root != child.previous_block_root: raise ValidationError( "Non-contiguous chain. Expected {} to have {} as parent but was {}" .format( encode_hex(child.signed_root), encode_hex(parent.signed_root), encode_hex(child.previous_block_root), )) curr_block_head = child db.set( curr_block_head.signed_root, ssz.encode(curr_block_head), ) cls._add_block_root_to_slot_lookup(db, curr_block_head) score = cls._set_block_scores_to_db(db, curr_block_head) if no_canonical_head: return cls._set_as_canonical_chain_head( db, curr_block_head.signed_root, block_class) if score > head_score: return cls._set_as_canonical_chain_head( db, curr_block_head.signed_root, block_class) else: return tuple(), tuple()