def sp_sub_slot_total_iters(self, constants: ConsensusConstants) -> uint128: if self.overflow: return uint128(self.total_iters - self.ip_iters(constants) - self.sub_slot_iters) else: return uint128(self.total_iters - self.ip_iters(constants))
async def get_confirmed_spendable_balance_for_wallet(self, wallet_id: int, unspent_records=None) -> uint128: """ Returns the balance amount of all coins that are spendable. """ spendable: Set[WalletCoinRecord] = await self.get_spendable_coins_for_wallet(wallet_id, unspent_records) spendable_amount: uint128 = uint128(0) for record in spendable: spendable_amount = uint128(spendable_amount + record.coin.amount) return spendable_amount
async def get_confirmed_balance_for_wallet( self, wallet_id: int, unspent_coin_records: Optional[Set[WalletCoinRecord]] = None ) -> uint128: """ Returns the confirmed balance, including coinbase rewards that are not spendable. """ if unspent_coin_records is None: unspent_coin_records = await self.coin_store.get_unspent_coins_for_wallet(wallet_id) amount: uint128 = uint128(0) for record in unspent_coin_records: amount = uint128(amount + record.coin.amount) self.log.info(f"Confirmed balance amount is {amount}") return uint128(amount)
async def get_unconfirmed_balance( self, wallet_id, unspent_coin_records: Optional[Set[WalletCoinRecord]] = None ) -> uint128: """ Returns the balance, including coinbase rewards that are not spendable, and unconfirmed transactions. """ confirmed = await self.get_confirmed_balance_for_wallet( wallet_id, unspent_coin_records) unconfirmed_tx: List[ TransactionRecord] = await self.tx_store.get_unconfirmed_for_wallet( wallet_id) removal_amount: int = 0 addition_amount: int = 0 for record in unconfirmed_tx: for removal in record.removals: removal_amount += removal.amount for addition in record.additions: # This change or a self transaction if await self.does_coin_belong_to_wallet(addition, wallet_id): addition_amount += addition.amount result = confirmed - removal_amount + addition_amount return uint128(result)
async def get_network_space(self, request: Dict) -> Optional[Dict]: """ Retrieves an estimate of total space validating the chain between two block header hashes. """ if "newer_block_header_hash" not in request or "older_block_header_hash" not in request: raise ValueError("Invalid request. newer_block_header_hash and older_block_header_hash required") newer_block_hex = request["newer_block_header_hash"] older_block_hex = request["older_block_header_hash"] if newer_block_hex == older_block_hex: raise ValueError("New and old must not be the same") newer_block_bytes = hexstr_to_bytes(newer_block_hex) older_block_bytes = hexstr_to_bytes(older_block_hex) newer_block = await self.service.block_store.get_block_record(newer_block_bytes) if newer_block is None: raise ValueError("Newer block not found") older_block = await self.service.block_store.get_block_record(older_block_bytes) if older_block is None: raise ValueError("Newer block not found") delta_weight = newer_block.weight - older_block.weight delta_iters = newer_block.total_iters - older_block.total_iters weight_div_iters = delta_weight / delta_iters additional_difficulty_constant = self.service.constants.DIFFICULTY_CONSTANT_FACTOR eligible_plots_filter_multiplier = 2 ** self.service.constants.NUMBER_ZERO_BITS_PLOT_FILTER network_space_bytes_estimate = ( UI_ACTUAL_SPACE_CONSTANT_FACTOR * weight_div_iters * additional_difficulty_constant * eligible_plots_filter_multiplier ) return {"space": uint128(int(network_space_bytes_estimate))}
def test_roundtrip(self): def roundtrip(v): s = io.BytesIO() v.stream(s) s.seek(0) cls = type(v) v2 = cls.parse(s) assert v2 == v # int512 is special. it uses 65 bytes to allow positive and negative # "uint512" roundtrip( int512( 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF # noqa: E501 )) roundtrip( int512( -0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF # noqa: E501 )) roundtrip(uint128(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)) roundtrip(uint128(0)) roundtrip(uint64(0xFFFFFFFFFFFFFFFF)) roundtrip(uint64(0)) roundtrip(int64(0x7FFFFFFFFFFFFFFF)) roundtrip(int64(-0x8000000000000000)) roundtrip(uint32(0xFFFFFFFF)) roundtrip(uint32(0)) roundtrip(int32(0x7FFFFFFF)) roundtrip(int32(-0x80000000)) roundtrip(uint16(0xFFFF)) roundtrip(uint16(0)) roundtrip(int16(0x7FFF)) roundtrip(int16(-0x8000)) roundtrip(uint8(0xFF)) roundtrip(uint8(0)) roundtrip(int8(0x7F)) roundtrip(int8(-0x80))
async def _sync(self) -> None: """ Wallet has fallen far behind (or is starting up for the first time), and must be synced up to the LCA of the blockchain. """ if self.wallet_state_manager is None or self.backup_initialized is False or self.server is None: return None highest_weight: uint128 = uint128(0) peak_height: uint32 = uint32(0) peak: Optional[HeaderBlock] = None potential_peaks: List[Tuple[ bytes32, HeaderBlock]] = self.wallet_state_manager.sync_store.get_potential_peaks_tuples( ) self.log.info(f"Have collected {len(potential_peaks)} potential peaks") for header_hash, potential_peak_block in potential_peaks: if potential_peak_block.weight > highest_weight: highest_weight = potential_peak_block.weight peak_height = potential_peak_block.height peak = potential_peak_block if peak_height is None or peak_height == 0: return None if self.wallet_state_manager.peak is not None and highest_weight <= self.wallet_state_manager.peak.weight: self.log.info("Not performing sync, already caught up.") return None peers: List[WSChiaConnection] = self.server.get_full_node_connections() if len(peers) == 0: self.log.info("No peers to sync to") return None async with self.wallet_state_manager.blockchain.lock: fork_height = None if peak is not None: fork_height = self.wallet_state_manager.sync_store.get_potential_fork_point( peak.header_hash) assert fork_height is not None # This is the fork point in SES in the case where no fork was detected peers = self.server.get_full_node_connections() fork_height = await check_fork_next_block( self.wallet_state_manager.blockchain, fork_height, peers, wallet_next_block_check) if fork_height is None: fork_height = uint32(0) await self.wallet_state_manager.blockchain.warmup(fork_height) await self.batch_sync_to_peak(fork_height, peak)
def __init__(self, constants: ConsensusConstants): self.state_type: StateType = StateType.FIRST_SUB_SLOT self.peak: Optional[timelord_protocol.NewPeakTimelord] = None self.subslot_end: Optional[EndOfSubSlotBundle] = None self.last_ip: uint64 = uint64(0) self.deficit: uint8 = constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK self.sub_epoch_summary: Optional[SubEpochSummary] = None self.constants: ConsensusConstants = constants self.last_weight: uint128 = uint128(0) self.last_height: uint32 = uint32(0) self.total_iters: uint128 = uint128(0) self.last_challenge_sb_or_eos_total_iters = uint128(0) self.last_block_total_iters: Optional[uint128] = None self.last_peak_challenge: bytes32 = constants.GENESIS_CHALLENGE self.difficulty: uint64 = constants.DIFFICULTY_STARTING self.sub_slot_iters: uint64 = constants.SUB_SLOT_ITERS_STARTING self.reward_challenge_cache: List[Tuple[bytes32, uint128]] = [ (constants.GENESIS_CHALLENGE, uint128(0)) ] self.new_epoch = False self.passed_ses_height_but_not_yet_included = False self.infused_ses = False
async def get_unconfirmed_balance( self, wallet_id, unspent_coin_records: Optional[Set[WalletCoinRecord]] = None ) -> uint128: """ Returns the balance, including coinbase rewards that are not spendable, and unconfirmed transactions. """ confirmed = await self.get_confirmed_balance_for_wallet(wallet_id, unspent_coin_records) unconfirmed_tx: List[TransactionRecord] = await self.tx_store.get_unconfirmed_for_wallet(wallet_id) removal_amount = 0 for record in unconfirmed_tx: removal_amount += record.amount removal_amount += record.fee_amount result = confirmed - removal_amount return uint128(result)
def get_recent_reward_challenges(self) -> List[Tuple[bytes32, uint128]]: peak = self.get_peak() if peak is None: return [] recent_rc: List[Tuple[bytes32, uint128]] = [] curr: Optional[BlockRecord] = peak while curr is not None and len(recent_rc) < 2 * self.constants.MAX_SUB_SLOT_BLOCKS: if curr != peak: recent_rc.append((curr.reward_infusion_new_challenge, curr.total_iters)) if curr.first_in_sub_slot: assert curr.finished_reward_slot_hashes is not None sub_slot_total_iters = curr.ip_sub_slot_total_iters(self.constants) # Start from the most recent for rc in reversed(curr.finished_reward_slot_hashes): recent_rc.append((rc, sub_slot_total_iters)) sub_slot_total_iters = uint128(sub_slot_total_iters - curr.sub_slot_iters) curr = self.try_block_record(curr.prev_hash) return list(reversed(recent_rc))
async def get_unconfirmed_balance(self, unspent_records=None) -> uint128: confirmed = await self.get_confirmed_balance(unspent_records) unconfirmed_tx: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet( self.id() ) addition_amount = 0 removal_amount = 0 for record in unconfirmed_tx: if record.type is TransactionType.INCOMING_TX: addition_amount += record.amount else: removal_amount += record.amount result = confirmed - removal_amount + addition_amount self.log.info(f"Unconfirmed balance for cc wallet {self.id()} is {result}") return uint128(result)
def get_heaviest_peak(self) -> Optional[Tuple[bytes32, uint32, uint128]]: """ Returns: the header_hash, height, and weight of the heaviest block that one of our peers has notified us of. """ if len(self.peer_to_peak) == 0: return None heaviest_peak_hash: Optional[bytes32] = None heaviest_peak_weight: uint128 = uint128(0) heaviest_peak_height: Optional[uint32] = None for peer_id, (peak_hash, height, weight) in self.peer_to_peak.items(): if peak_hash not in self.peak_to_peer: continue if heaviest_peak_hash is None or weight > heaviest_peak_weight: heaviest_peak_hash = peak_hash heaviest_peak_weight = weight heaviest_peak_height = height assert heaviest_peak_hash is not None and heaviest_peak_weight is not None and heaviest_peak_height is not None return heaviest_peak_hash, heaviest_peak_height, heaviest_peak_weight
def initialize_genesis_sub_slot(self): self.clear_slots() self.finished_sub_slots = [ (None, [None] * self.constants.NUM_SPS_SUB_SLOT, uint128(0)) ]
async def _check_for_new_ip(self, iter_to_look_for: uint64): if len(self.unfinished_blocks) == 0: return None infusion_iters = [ iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.INFUSION_POINT ] for iteration in infusion_iters: if iteration != iter_to_look_for: continue proofs_with_iter = [ (chain, info, proof) for chain, info, proof, label in self.proofs_finished if info.number_of_iterations == iteration and label == self.num_resets ] if self.last_state.get_challenge(Chain.INFUSED_CHALLENGE_CHAIN) is not None: chain_count = 3 else: chain_count = 2 if len(proofs_with_iter) == chain_count: block = None ip_iters = None for unfinished_block in self.unfinished_blocks: try: _, ip_iters = iters_from_block( self.constants, unfinished_block.reward_chain_block, self.last_state.get_sub_slot_iters(), self.last_state.get_difficulty(), ) except Exception as e: log.error(f"Error {e}") continue if ip_iters - self.last_state.get_last_ip() == iteration: block = unfinished_block break assert ip_iters is not None if block is not None: ip_total_iters = self.last_state.get_total_iters() + iteration challenge = block.reward_chain_block.get_hash() icc_info: Optional[VDFInfo] = None icc_proof: Optional[VDFProof] = None cc_info: Optional[VDFInfo] = None cc_proof: Optional[VDFProof] = None rc_info: Optional[VDFInfo] = None rc_proof: Optional[VDFProof] = None for chain, info, proof in proofs_with_iter: if chain == Chain.CHALLENGE_CHAIN: cc_info = info cc_proof = proof if chain == Chain.REWARD_CHAIN: rc_info = info rc_proof = proof if chain == Chain.INFUSED_CHALLENGE_CHAIN: icc_info = info icc_proof = proof if cc_info is None or cc_proof is None or rc_info is None or rc_proof is None: log.error(f"Insufficient VDF proofs for infusion point ch: {challenge} iterations:{iteration}") return None rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN) if rc_info.challenge != rc_challenge: assert rc_challenge is not None log.warning( f"Do not have correct challenge {rc_challenge.hex()} " f"has {rc_info.challenge}, partial hash {block.reward_chain_block.get_hash()}" ) # This proof is on an outdated challenge, so don't use it continue self.iters_finished.add(iter_to_look_for) self.last_active_time = time.time() log.debug(f"Generated infusion point for challenge: {challenge} iterations: {iteration}.") overflow = is_overflow_block(self.constants, block.reward_chain_block.signage_point_index) if not self.last_state.can_infuse_block(overflow): log.warning("Too many blocks, or overflow in new epoch, cannot infuse, discarding") return None cc_info = dataclasses.replace(cc_info, number_of_iterations=ip_iters) response = timelord_protocol.NewInfusionPointVDF( challenge, cc_info, cc_proof, rc_info, rc_proof, icc_info, icc_proof, ) msg = make_msg(ProtocolMessageTypes.new_infusion_point_vdf, response) if self.server is not None: await self.server.send_to_all([msg], NodeType.FULL_NODE) self.proofs_finished = self._clear_proof_list(iteration) if ( self.last_state.get_last_block_total_iters() is None and not self.last_state.state_type == StateType.FIRST_SUB_SLOT ): # We don't know when the last block was, so we can't make peaks return None sp_total_iters = ( ip_total_iters - ip_iters + calculate_sp_iters( self.constants, block.sub_slot_iters, block.reward_chain_block.signage_point_index, ) - (block.sub_slot_iters if overflow else 0) ) if self.last_state.state_type == StateType.FIRST_SUB_SLOT: is_transaction_block = True height: uint32 = uint32(0) else: last_block_ti = self.last_state.get_last_block_total_iters() assert last_block_ti is not None is_transaction_block = last_block_ti < sp_total_iters height = uint32(self.last_state.get_height() + 1) if height < 5: # Don't directly update our state for the first few blocks, because we cannot validate # whether the pre-farm is correct return None new_reward_chain_block = RewardChainBlock( uint128(self.last_state.get_weight() + block.difficulty), height, uint128(ip_total_iters), block.reward_chain_block.signage_point_index, block.reward_chain_block.pos_ss_cc_challenge_hash, block.reward_chain_block.proof_of_space, block.reward_chain_block.challenge_chain_sp_vdf, block.reward_chain_block.challenge_chain_sp_signature, cc_info, block.reward_chain_block.reward_chain_sp_vdf, block.reward_chain_block.reward_chain_sp_signature, rc_info, icc_info, is_transaction_block, ) if self.last_state.state_type == StateType.FIRST_SUB_SLOT: # Genesis new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1 elif overflow and self.last_state.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK: if self.last_state.peak is not None: assert self.last_state.subslot_end is None # This means the previous block is also an overflow block, and did not manage # to lower the deficit, therefore we cannot lower it either. (new slot) new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK else: # This means we are the first infusion in this sub-slot. This may be a new slot or not. assert self.last_state.subslot_end is not None if self.last_state.subslot_end.infused_challenge_chain is None: # There is no ICC, which means we are not finishing a slot. We can reduce the deficit. new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1 else: # There is an ICC, which means we are finishing a slot. Different slot, so can't change # the deficit new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK else: new_deficit = max(self.last_state.deficit - 1, 0) if new_deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1: last_csb_or_eos = ip_total_iters else: last_csb_or_eos = self.last_state.last_challenge_sb_or_eos_total_iters if self.last_state.just_infused_sub_epoch_summary(): new_sub_epoch_summary = None passed_ses_height_but_not_yet_included = False else: new_sub_epoch_summary = block.sub_epoch_summary if new_reward_chain_block.height % self.constants.SUB_EPOCH_BLOCKS == 0: passed_ses_height_but_not_yet_included = True else: passed_ses_height_but_not_yet_included = ( self.last_state.get_passed_ses_height_but_not_yet_included() ) self.new_peak = timelord_protocol.NewPeakTimelord( new_reward_chain_block, block.difficulty, uint8(new_deficit), block.sub_slot_iters, new_sub_epoch_summary, self.last_state.reward_challenge_cache, uint128(last_csb_or_eos), passed_ses_height_but_not_yet_included, ) await self._handle_new_peak() # Break so we alternate between checking SP and IP break
async def validate_block_body( constants: ConsensusConstants, blocks: BlockchainInterface, block_store: BlockStore, coin_store: CoinStore, peak: Optional[BlockRecord], block: Union[FullBlock, UnfinishedBlock], height: uint32, npc_result: Optional[NPCResult], fork_point_with_peak: Optional[uint32], get_block_generator: Callable, validate_signature=True, ) -> Tuple[Optional[Err], Optional[NPCResult]]: """ This assumes the header block has been completely validated. Validates the transactions and body of the block. Returns None for the first value if everything validates correctly, or an Err if something does not validate. For the second value, returns a CostResult only if validation succeeded, and there are transactions. In other cases it returns None. The NPC result is the result of running the generator with the previous generators refs. It is only present for transaction blocks which have spent coins. """ if isinstance(block, FullBlock): assert height == block.height prev_transaction_block_height: uint32 = uint32(0) # 1. For non transaction-blocs: foliage block, transaction filter, transactions info, and generator must # be empty. If it is a block but not a transaction block, there is no body to validate. Check that all fields are # None if block.foliage.foliage_transaction_block_hash is None: if (block.foliage_transaction_block is not None or block.transactions_info is not None or block.transactions_generator is not None): return Err.NOT_BLOCK_BUT_HAS_DATA, None prev_tb: BlockRecord = blocks.block_record(block.prev_header_hash) while not prev_tb.is_transaction_block: prev_tb = blocks.block_record(prev_tb.prev_hash) assert prev_tb.timestamp is not None if len(block.transactions_generator_ref_list) > 0: return Err.NOT_BLOCK_BUT_HAS_DATA, None return None, None # This means the block is valid # All checks below this point correspond to transaction blocks # 2. For blocks, foliage block, transactions info must not be empty if block.foliage_transaction_block is None or block.transactions_info is None: return Err.IS_TRANSACTION_BLOCK_BUT_NO_DATA, None assert block.foliage_transaction_block is not None # keeps track of the reward coins that need to be incorporated expected_reward_coins: Set[Coin] = set() # 3. The transaction info hash in the Foliage block must match the transaction info if block.foliage_transaction_block.transactions_info_hash != std_hash( block.transactions_info): return Err.INVALID_TRANSACTIONS_INFO_HASH, None # 4. The foliage block hash in the foliage block must match the foliage block if block.foliage.foliage_transaction_block_hash != std_hash( block.foliage_transaction_block): return Err.INVALID_FOLIAGE_BLOCK_HASH, None # 5. The reward claims must be valid for the previous blocks, and current block fees # If height == 0, expected_reward_coins will be left empty if height > 0: # Add reward claims for all blocks from the prev prev block, until the prev block (including the latter) prev_transaction_block = blocks.block_record( block.foliage_transaction_block.prev_transaction_block_hash) prev_transaction_block_height = prev_transaction_block.height assert prev_transaction_block.fees is not None pool_coin = create_pool_coin( prev_transaction_block_height, prev_transaction_block.pool_puzzle_hash, calculate_pool_reward(prev_transaction_block.height), constants.GENESIS_CHALLENGE, ) farmer_coin = create_farmer_coin( prev_transaction_block_height, prev_transaction_block.farmer_puzzle_hash, uint64( calculate_base_farmer_reward(prev_transaction_block.height) + prev_transaction_block.fees), constants.GENESIS_CHALLENGE, ) # Adds the previous block expected_reward_coins.add(pool_coin) expected_reward_coins.add(farmer_coin) # For the second block in the chain, don't go back further if prev_transaction_block.height > 0: curr_b = blocks.block_record(prev_transaction_block.prev_hash) while not curr_b.is_transaction_block: expected_reward_coins.add( create_pool_coin( curr_b.height, curr_b.pool_puzzle_hash, calculate_pool_reward(curr_b.height), constants.GENESIS_CHALLENGE, )) expected_reward_coins.add( create_farmer_coin( curr_b.height, curr_b.farmer_puzzle_hash, calculate_base_farmer_reward(curr_b.height), constants.GENESIS_CHALLENGE, )) curr_b = blocks.block_record(curr_b.prev_hash) if set(block.transactions_info.reward_claims_incorporated ) != expected_reward_coins: return Err.INVALID_REWARD_COINS, None if len(block.transactions_info.reward_claims_incorporated) != len( expected_reward_coins): return Err.INVALID_REWARD_COINS, None removals: List[bytes32] = [] coinbase_additions: List[Coin] = list(expected_reward_coins) additions: List[Coin] = [] npc_list: List[NPC] = [] removals_puzzle_dic: Dict[bytes32, bytes32] = {} cost: uint64 = uint64(0) # In header validation we check that timestamp is not more that 10 minutes into the future # 6. No transactions before INITIAL_TRANSACTION_FREEZE timestamp # (this test has been removed) # 7a. The generator root must be the hash of the serialized bytes of # the generator for this block (or zeroes if no generator) if block.transactions_generator is not None: if std_hash(bytes(block.transactions_generator) ) != block.transactions_info.generator_root: return Err.INVALID_TRANSACTIONS_GENERATOR_HASH, None else: if block.transactions_info.generator_root != bytes([0] * 32): return Err.INVALID_TRANSACTIONS_GENERATOR_HASH, None # 8a. The generator_ref_list must be the hash of the serialized bytes of # the generator ref list for this block (or 'one' bytes [0x01] if no generator) # 8b. The generator ref list length must be less than or equal to MAX_GENERATOR_REF_LIST_SIZE entries # 8c. The generator ref list must not point to a height >= this block's height if block.transactions_generator_ref_list in (None, []): if block.transactions_info.generator_refs_root != bytes([1] * 32): return Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT, None else: # If we have a generator reference list, we must have a generator if block.transactions_generator is None: return Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT, None # The generator_refs_root must be the hash of the concatenation of the List[uint32] generator_refs_hash = std_hash(b"".join( [bytes(i) for i in block.transactions_generator_ref_list])) if block.transactions_info.generator_refs_root != generator_refs_hash: return Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT, None if len(block.transactions_generator_ref_list ) > constants.MAX_GENERATOR_REF_LIST_SIZE: return Err.TOO_MANY_GENERATOR_REFS, None if any([ index >= height for index in block.transactions_generator_ref_list ]): return Err.FUTURE_GENERATOR_REFS, None if block.transactions_generator is not None: # Get List of names removed, puzzles hashes for removed coins and conditions created assert npc_result is not None cost = calculate_cost_of_program(block.transactions_generator, npc_result, constants.COST_PER_BYTE) npc_list = npc_result.npc_list # 7. Check that cost <= MAX_BLOCK_COST_CLVM log.debug( f"Cost: {cost} max: {constants.MAX_BLOCK_COST_CLVM} " f"percent full: {round(100 * (cost / constants.MAX_BLOCK_COST_CLVM), 2)}%" ) if cost > constants.MAX_BLOCK_COST_CLVM: return Err.BLOCK_COST_EXCEEDS_MAX, None # 8. The CLVM program must not return any errors if npc_result.error is not None: return Err(npc_result.error), None for npc in npc_list: removals.append(npc.coin_name) removals_puzzle_dic[npc.coin_name] = npc.puzzle_hash additions = additions_for_npc(npc_list) else: assert npc_result is None # 9. Check that the correct cost is in the transactions info if block.transactions_info.cost != cost: return Err.INVALID_BLOCK_COST, None additions_dic: Dict[bytes32, Coin] = {} # 10. Check additions for max coin amount # Be careful to check for 64 bit overflows in other languages. This is the max 64 bit unsigned integer # We will not even reach here because Coins do type checking (uint64) for coin in additions + coinbase_additions: additions_dic[coin.name()] = coin if coin.amount < 0: return Err.COIN_AMOUNT_NEGATIVE, None if coin.amount > constants.MAX_COIN_AMOUNT: return Err.COIN_AMOUNT_EXCEEDS_MAXIMUM, None # 11. Validate addition and removal roots root_error = validate_block_merkle_roots( block.foliage_transaction_block.additions_root, block.foliage_transaction_block.removals_root, additions + coinbase_additions, removals, ) if root_error: return root_error, None # 12. The additions and removals must result in the correct filter byte_array_tx: List[bytes32] = [] for coin in additions + coinbase_additions: # TODO: address hint error and remove ignore # error: Argument 1 to "append" of "list" has incompatible type "bytearray"; expected "bytes32" # [arg-type] byte_array_tx.append(bytearray( coin.puzzle_hash)) # type: ignore[arg-type] for coin_name in removals: # TODO: address hint error and remove ignore # error: Argument 1 to "append" of "list" has incompatible type "bytearray"; expected "bytes32" # [arg-type] byte_array_tx.append(bytearray(coin_name)) # type: ignore[arg-type] bip158: PyBIP158 = PyBIP158(byte_array_tx) encoded_filter = bytes(bip158.GetEncoded()) filter_hash = std_hash(encoded_filter) if filter_hash != block.foliage_transaction_block.filter_hash: return Err.INVALID_TRANSACTIONS_FILTER_HASH, None # 13. Check for duplicate outputs in additions addition_counter = collections.Counter( _.name() for _ in additions + coinbase_additions) for k, v in addition_counter.items(): if v > 1: return Err.DUPLICATE_OUTPUT, None # 14. Check for duplicate spends inside block removal_counter = collections.Counter(removals) for k, v in removal_counter.items(): if v > 1: return Err.DOUBLE_SPEND, None # 15. Check if removals exist and were not previously spent. (unspent_db + diff_store + this_block) # The fork point is the last block in common between the peak chain and the chain of `block` if peak is None or height == 0: fork_h: int = -1 elif fork_point_with_peak is not None: fork_h = fork_point_with_peak else: fork_h = find_fork_point_in_chain( blocks, peak, blocks.block_record(block.prev_header_hash)) # Get additions and removals since (after) fork_h but not including this block # The values include: the coin that was added, the height of the block in which it was confirmed, and the # timestamp of the block in which it was confirmed additions_since_fork: Dict[bytes32, Tuple[Coin, uint32, uint64]] = { } # This includes coinbase additions removals_since_fork: Set[bytes32] = set() # For height 0, there are no additions and removals before this block, so we can skip if height > 0: # First, get all the blocks in the fork > fork_h, < block.height prev_block: Optional[FullBlock] = await block_store.get_full_block( block.prev_header_hash) reorg_blocks: Dict[uint32, FullBlock] = {} curr: Optional[FullBlock] = prev_block assert curr is not None while curr.height > fork_h: if curr.height == 0: break curr = await block_store.get_full_block(curr.prev_header_hash) assert curr is not None reorg_blocks[curr.height] = curr if fork_h != -1: assert len(reorg_blocks) == height - fork_h - 1 curr = prev_block assert curr is not None while curr.height > fork_h: # Coin store doesn't contain coins from fork, we have to run generator for each block in fork if curr.transactions_generator is not None: # These blocks are in the past and therefore assumed to be valid, so get_block_generator won't raise curr_block_generator: Optional[ BlockGenerator] = await get_block_generator(curr) assert curr_block_generator is not None and curr.transactions_info is not None curr_npc_result = get_name_puzzle_conditions( curr_block_generator, min(constants.MAX_BLOCK_COST_CLVM, curr.transactions_info.cost), cost_per_byte=constants.COST_PER_BYTE, mempool_mode=False, ) removals_in_curr, additions_in_curr = tx_removals_and_additions( curr_npc_result.npc_list) else: removals_in_curr = [] additions_in_curr = [] for c_name in removals_in_curr: assert c_name not in removals_since_fork removals_since_fork.add(c_name) for c in additions_in_curr: assert c.name() not in additions_since_fork assert curr.foliage_transaction_block is not None additions_since_fork[c.name()] = ( c, curr.height, curr.foliage_transaction_block.timestamp) for coinbase_coin in curr.get_included_reward_coins(): assert coinbase_coin.name() not in additions_since_fork assert curr.foliage_transaction_block is not None additions_since_fork[coinbase_coin.name()] = ( coinbase_coin, curr.height, curr.foliage_transaction_block.timestamp, ) if curr.height == 0: break curr = reorg_blocks[curr.height - 1] assert curr is not None removal_coin_records: Dict[bytes32, CoinRecord] = {} for rem in removals: if rem in additions_dic: # Ephemeral coin rem_coin: Coin = additions_dic[rem] new_unspent: CoinRecord = CoinRecord( rem_coin, height, height, False, block.foliage_transaction_block.timestamp, ) removal_coin_records[new_unspent.name] = new_unspent else: unspent = await coin_store.get_coin_record(rem) if unspent is not None and unspent.confirmed_block_index <= fork_h: # Spending something in the current chain, confirmed before fork # (We ignore all coins confirmed after fork) if unspent.spent == 1 and unspent.spent_block_index <= fork_h: # Check for coins spent in an ancestor block return Err.DOUBLE_SPEND, None removal_coin_records[unspent.name] = unspent else: # This coin is not in the current heaviest chain, so it must be in the fork if rem not in additions_since_fork: # Check for spending a coin that does not exist in this fork log.error( f"Err.UNKNOWN_UNSPENT: COIN ID: {rem} NPC RESULT: {npc_result}" ) return Err.UNKNOWN_UNSPENT, None new_coin, confirmed_height, confirmed_timestamp = additions_since_fork[ rem] new_coin_record: CoinRecord = CoinRecord( new_coin, confirmed_height, uint32(0), False, confirmed_timestamp, ) removal_coin_records[new_coin_record.name] = new_coin_record # This check applies to both coins created before fork (pulled from coin_store), # and coins created after fork (additions_since_fork) if rem in removals_since_fork: # This coin was spent in the fork return Err.DOUBLE_SPEND_IN_FORK, None removed = 0 for unspent in removal_coin_records.values(): removed += unspent.coin.amount added = 0 for coin in additions: added += coin.amount # 16. Check that the total coin amount for added is <= removed if removed < added: return Err.MINTING_COIN, None fees = removed - added assert fees >= 0 assert_fee_sum: uint128 = uint128(0) for npc in npc_list: if ConditionOpcode.RESERVE_FEE in npc.condition_dict: fee_list: List[ConditionWithArgs] = npc.condition_dict[ ConditionOpcode.RESERVE_FEE] for cvp in fee_list: fee = int_from_bytes(cvp.vars[0]) if fee < 0: return Err.RESERVE_FEE_CONDITION_FAILED, None assert_fee_sum = uint128(assert_fee_sum + fee) # 17. Check that the assert fee sum <= fees, and that each reserved fee is non-negative if fees < assert_fee_sum: return Err.RESERVE_FEE_CONDITION_FAILED, None # 18. Check that the fee amount + farmer reward < maximum coin amount if fees + calculate_base_farmer_reward(height) > constants.MAX_COIN_AMOUNT: return Err.COIN_AMOUNT_EXCEEDS_MAXIMUM, None # 19. Check that the computed fees are equal to the fees in the block header if block.transactions_info.fees != fees: return Err.INVALID_BLOCK_FEE_AMOUNT, None # 20. Verify that removed coin puzzle_hashes match with calculated puzzle_hashes for unspent in removal_coin_records.values(): if unspent.coin.puzzle_hash != removals_puzzle_dic[unspent.name]: return Err.WRONG_PUZZLE_HASH, None # 21. Verify conditions for npc in npc_list: assert height is not None unspent = removal_coin_records[npc.coin_name] error = mempool_check_conditions_dict( unspent, npc.condition_dict, prev_transaction_block_height, block.foliage_transaction_block.timestamp, ) if error: return error, None # create hash_key list for aggsig check pairs_pks, pairs_msgs = pkm_pairs(npc_list, constants.AGG_SIG_ME_ADDITIONAL_DATA) # 22. Verify aggregated signature # TODO: move this to pre_validate_blocks_multiprocessing so we can sync faster if not block.transactions_info.aggregated_signature: return Err.BAD_AGGREGATE_SIGNATURE, None # The pairing cache is not useful while syncing as each pairing is seen # only once, so the extra effort of populating it is not justified. # However, we force caching of pairings just for unfinished blocks # as the cache is likely to be useful when validating the corresponding # finished blocks later. if validate_signature: force_cache: bool = isinstance(block, UnfinishedBlock) if not cached_bls.aggregate_verify( pairs_pks, pairs_msgs, block.transactions_info.aggregated_signature, force_cache): return Err.BAD_AGGREGATE_SIGNATURE, None return None, npc_result
async def _sync(self) -> None: """ Wallet has fallen far behind (or is starting up for the first time), and must be synced up to the LCA of the blockchain. """ if self.wallet_state_manager is None or self.backup_initialized is False or self.server is None: return None highest_weight: uint128 = uint128(0) peak_height: uint32 = uint32(0) peak: Optional[HeaderBlock] = None potential_peaks: List[ Tuple[bytes32, HeaderBlock] ] = self.wallet_state_manager.sync_store.get_potential_peaks_tuples() self.log.info(f"Have collected {len(potential_peaks)} potential peaks") for header_hash, potential_peak_block in potential_peaks: if potential_peak_block.weight > highest_weight: highest_weight = potential_peak_block.weight peak_height = potential_peak_block.height peak = potential_peak_block if peak_height is None or peak_height == 0: return None if self.wallet_state_manager.peak is not None and highest_weight <= self.wallet_state_manager.peak.weight: self.log.info("Not performing sync, already caught up.") return None peers: List[WSChiaConnection] = self.server.get_full_node_connections() if len(peers) == 0: self.log.info("No peers to sync to") return None async with self.wallet_state_manager.blockchain.lock: fork_height = None if peak is not None: fork_height = self.wallet_state_manager.sync_store.get_potential_fork_point(peak.header_hash) our_peak_height = self.wallet_state_manager.blockchain.get_peak_height() ses_heigths = self.wallet_state_manager.blockchain.get_ses_heights() if len(ses_heigths) > 2 and our_peak_height is not None: ses_heigths.sort() max_fork_ses_height = ses_heigths[-3] # This is the fork point in SES in the case where no fork was detected if ( self.wallet_state_manager.blockchain.get_peak_height() is not None and fork_height == max_fork_ses_height ): peers = self.server.get_full_node_connections() for peer in peers: # Grab a block at peak + 1 and check if fork point is actually our current height potential_height = uint32(our_peak_height + 1) block_response: Optional[Any] = await peer.request_header_blocks( wallet_protocol.RequestHeaderBlocks(potential_height, potential_height) ) if block_response is not None and isinstance( block_response, wallet_protocol.RespondHeaderBlocks ): our_peak = self.wallet_state_manager.blockchain.get_peak() if ( our_peak is not None and block_response.header_blocks[0].prev_header_hash == our_peak.header_hash ): fork_height = our_peak_height break if fork_height is None: fork_height = uint32(0) await self.wallet_state_manager.blockchain.warmup(fork_height) batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS advanced_peak = False for i in range(max(0, fork_height - 1), peak_height, batch_size): start_height = i end_height = min(peak_height, start_height + batch_size) peers = self.server.get_full_node_connections() added = False for peer in peers: try: added, advanced_peak = await self.fetch_blocks_and_validate( peer, uint32(start_height), uint32(end_height), None if advanced_peak else fork_height ) if added: break except Exception as e: await peer.close() exc = traceback.format_exc() self.log.error(f"Error while trying to fetch from peer:{e} {exc}") if not added: raise RuntimeError(f"Was not able to add blocks {start_height}-{end_height}") peak = self.wallet_state_manager.blockchain.get_peak() assert peak is not None self.wallet_state_manager.blockchain.clean_block_record( min( end_height - self.constants.BLOCKS_CACHE_SIZE, peak.height - self.constants.BLOCKS_CACHE_SIZE, ) )
async def _sync(self) -> None: """ Wallet has fallen far behind (or is starting up for the first time), and must be synced up to the LCA of the blockchain. """ if self.wallet_state_manager is None or self.backup_initialized is False or self.server is None: return highest_weight: uint128 = uint128(0) peak_height: uint32 = uint32(0) peak: Optional[HeaderBlock] = None potential_peaks: List[Tuple[ bytes32, HeaderBlock]] = self.wallet_state_manager.sync_store.get_potential_peaks_tuples( ) self.log.info(f"Have collected {len(potential_peaks)} potential peaks") for header_hash, potential_peak_block in potential_peaks: if potential_peak_block.weight > highest_weight: highest_weight = potential_peak_block.weight peak_height = potential_peak_block.height peak = potential_peak_block if peak_height is None or peak_height == 0: return if self.wallet_state_manager.peak is not None and highest_weight <= self.wallet_state_manager.peak.weight: self.log.info("Not performing sync, already caught up.") return peers: List[WSChiaConnection] = self.server.get_full_node_connections() if len(peers) == 0: self.log.info("No peers to sync to") return await self.wallet_state_manager.blockchain.lock.acquire() try: fork_height = None if peak is not None: fork_height = self.wallet_state_manager.sync_store.get_potential_fork_point( peak.header_hash) if fork_height is None: fork_height = uint32(0) await self.wallet_state_manager.blockchain.warmup(fork_height) batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS advanced_peak = False for i in range(max(0, fork_height - 1), peak_height, batch_size): start_height = i end_height = min(peak_height, start_height + batch_size) peers = self.server.get_full_node_connections() added = False for peer in peers: try: added, advanced_peak = await self.fetch_blocks_and_validate( peer, uint32(start_height), uint32(end_height), None if advanced_peak else fork_height) if added: break except Exception as e: await peer.close() exc = traceback.format_exc() self.log.error( f"Error while trying to fetch from peer:{e} {exc}") if not added: raise RuntimeError( f"Was not able to add blocks {start_height}-{end_height}" ) peak = self.wallet_state_manager.blockchain.get_peak() assert peak is not None self.wallet_state_manager.blockchain.clean_block_record( min( end_height - self.constants.BLOCKS_CACHE_SIZE, peak.height - self.constants.BLOCKS_CACHE_SIZE, )) finally: self.wallet_state_manager.blockchain.lock.release()
async def _sync(self) -> None: """ Wallet has fallen far behind (or is starting up for the first time), and must be synced up to the LCA of the blockchain. """ if self.wallet_state_manager is None or self.backup_initialized is False or self.server is None: return None highest_weight: uint128 = uint128(0) peak_height: uint32 = uint32(0) peak: Optional[HeaderBlock] = None potential_peaks: List[ Tuple[bytes32, HeaderBlock] ] = self.wallet_state_manager.sync_store.get_potential_peaks_tuples() self.log.info(f"Have collected {len(potential_peaks)} potential peaks") for header_hash, potential_peak_block in potential_peaks: if potential_peak_block.weight > highest_weight: highest_weight = potential_peak_block.weight peak_height = potential_peak_block.height peak = potential_peak_block if peak_height is None or peak_height == 0: return None if self.wallet_state_manager.peak is not None and highest_weight <= self.wallet_state_manager.peak.weight: self.log.info("Not performing sync, already caught up.") return None peers: List[WSChiaConnection] = self.server.get_full_node_connections() if len(peers) == 0: self.log.info("No peers to sync to") return None async with self.wallet_state_manager.blockchain.lock: fork_height = None if peak is not None: fork_height = self.wallet_state_manager.sync_store.get_potential_fork_point(peak.header_hash) our_peak_height = self.wallet_state_manager.blockchain.get_peak_height() ses_heigths = self.wallet_state_manager.blockchain.get_ses_heights() if len(ses_heigths) > 2 and our_peak_height is not None: ses_heigths.sort() max_fork_ses_height = ses_heigths[-3] # This is the fork point in SES in the case where no fork was detected if ( self.wallet_state_manager.blockchain.get_peak_height() is not None and fork_height == max_fork_ses_height ): peers = self.server.get_full_node_connections() for peer in peers: # Grab a block at peak + 1 and check if fork point is actually our current height potential_height = uint32(our_peak_height + 1) block_response: Optional[Any] = await peer.request_header_blocks( wallet_protocol.RequestHeaderBlocks(potential_height, potential_height) ) if block_response is not None and isinstance( block_response, wallet_protocol.RespondHeaderBlocks ): our_peak = self.wallet_state_manager.blockchain.get_peak() if ( our_peak is not None and block_response.header_blocks[0].prev_header_hash == our_peak.header_hash ): fork_height = our_peak_height break if fork_height is None: fork_height = uint32(0) await self.wallet_state_manager.blockchain.warmup(fork_height) await self.batch_sync_to_peak(fork_height, peak)
def ip_sub_slot_total_iters(self, constants: ConsensusConstants) -> uint128: return uint128(self.total_iters - self.ip_iters(constants))
async def run_add_block_benchmark(version: int): verbose: bool = "--verbose" in sys.argv db_wrapper: DBWrapper = await setup_db("block-store-benchmark.db", version) # keep track of benchmark total time all_test_time = 0.0 prev_block = bytes32([0] * 32) prev_ses_hash = bytes32([0] * 32) header_hashes = [] try: block_store = await BlockStore.create(db_wrapper) block_height = 1 timestamp = uint64(1631794488) weight = uint128(10) iters = uint128(123456) sp_index = uint8(0) deficit = uint8(0) sub_slot_iters = uint64(10) required_iters = uint64(100) transaction_block_counter = 0 prev_transaction_block = bytes32([0] * 32) prev_transaction_height = uint32(0) total_time = 0.0 ses_counter = 0 if verbose: print("profiling add_full_block", end="") for height in range(block_height, block_height + NUM_ITERS): is_transaction = transaction_block_counter == 0 fees = uint64(random.randint(0, 150000)) farmer_coin, pool_coin = rewards(uint32(height)) reward_claims_incorporated = [farmer_coin, pool_coin] # TODO: increase fidelity by setting these as well finished_challenge_slot_hashes = None finished_infused_challenge_slot_hashes = None finished_reward_slot_hashes = None sub_epoch_summary_included = None if ses_counter == 0: sub_epoch_summary_included = SubEpochSummary( prev_ses_hash, rand_hash(), uint8(random.randint(0, 255)), # num_blocks_overflow: uint8 None, # new_difficulty: Optional[uint64] None, # new_sub_slot_iters: Optional[uint64] ) has_pool_pk = random.randint(0, 1) proof_of_space = ProofOfSpace( rand_hash(), # challenge rand_g1() if has_pool_pk else None, rand_hash() if not has_pool_pk else None, rand_g1(), # plot_public_key uint8(32), rand_bytes(8 * 32), ) reward_chain_block = RewardChainBlock( weight, uint32(height), iters, sp_index, rand_hash(), # pos_ss_cc_challenge_hash proof_of_space, None if sp_index == 0 else rand_vdf(), rand_g2(), # challenge_chain_sp_signature rand_vdf(), # challenge_chain_ip_vdf rand_vdf() if sp_index != 0 else None, # reward_chain_sp_vdf rand_g2(), # reward_chain_sp_signature rand_vdf(), # reward_chain_ip_vdf rand_vdf() if deficit < 16 else None, is_transaction, ) pool_target = PoolTarget( rand_hash(), # puzzle_hash uint32(0), # max_height ) foliage_block_data = FoliageBlockData( rand_hash(), # unfinished_reward_block_hash pool_target, rand_g2() if has_pool_pk else None, # pool_signature rand_hash(), # farmer_reward_puzzle_hash bytes32([0] * 32), # extension_data ) foliage = Foliage( prev_block, rand_hash(), # reward_block_hash foliage_block_data, rand_g2(), # foliage_block_data_signature rand_hash() if is_transaction else None, # foliage_transaction_block_hash rand_g2() if is_transaction else None, # foliage_transaction_block_signature ) foliage_transaction_block = ( None if not is_transaction else FoliageTransactionBlock( prev_transaction_block, timestamp, rand_hash(), # filter_hash rand_hash(), # additions_root rand_hash(), # removals_root rand_hash(), # transactions_info_hash )) transactions_info = ( None if not is_transaction else TransactionsInfo( rand_hash(), # generator_root rand_hash(), # generator_refs_root rand_g2(), # aggregated_signature fees, uint64(random.randint(0, 12000000000)), # cost reward_claims_incorporated, )) full_block = FullBlock( [], # finished_sub_slots reward_chain_block, rand_vdf_proof() if sp_index > 0 else None, # challenge_chain_sp_proof rand_vdf_proof(), # challenge_chain_ip_proof rand_vdf_proof() if sp_index > 0 else None, # reward_chain_sp_proof rand_vdf_proof(), # reward_chain_ip_proof rand_vdf_proof() if deficit < 4 else None, # infused_challenge_chain_ip_proof foliage, foliage_transaction_block, transactions_info, None if is_transaction else SerializedProgram.from_bytes( clvm_generator), # transactions_generator [], # transactions_generator_ref_list ) header_hash = full_block.header_hash record = BlockRecord( header_hash, prev_block, uint32(height), weight, iters, sp_index, rand_class_group_element(), None if deficit > 3 else rand_class_group_element(), rand_hash(), # reward_infusion_new_challenge rand_hash(), # challenge_block_info_hash sub_slot_iters, rand_hash(), # pool_puzzle_hash rand_hash(), # farmer_puzzle_hash required_iters, deficit, deficit == 16, prev_transaction_height, timestamp if is_transaction else None, prev_transaction_block if prev_transaction_block != bytes32([0] * 32) else None, None if fees == 0 else fees, reward_claims_incorporated, finished_challenge_slot_hashes, finished_infused_challenge_slot_hashes, finished_reward_slot_hashes, sub_epoch_summary_included, ) start = time() await block_store.add_full_block(header_hash, full_block, record, False) await block_store.set_in_chain([(header_hash, )]) header_hashes.append(header_hash) await block_store.set_peak(header_hash) await db_wrapper.db.commit() stop = time() total_time += stop - start # 19 seconds per block timestamp = uint64(timestamp + 19) weight = uint128(weight + 10) iters = uint128(iters + 123456) sp_index = uint8((sp_index + 1) % 64) deficit = uint8((deficit + 3) % 17) ses_counter = (ses_counter + 1) % 384 prev_block = header_hash # every 33 blocks is a transaction block transaction_block_counter = (transaction_block_counter + 1) % 33 if is_transaction: prev_transaction_block = header_hash prev_transaction_height = uint32(height) if ses_counter == 0: prev_ses_hash = header_hash if verbose: print(".", end="") sys.stdout.flush() block_height += NUM_ITERS if verbose: print("") print(f"{total_time:0.4f}s, add_full_block") all_test_time += total_time total_time = 0.0 if verbose: print("profiling get_full_block") random.shuffle(header_hashes) start = time() for h in header_hashes: block = await block_store.get_full_block(h) assert block.header_hash == h stop = time() total_time += stop - start print(f"{total_time:0.4f}s, get_full_block") all_test_time += total_time total_time = 0.0 if verbose: print("profiling get_full_block_bytes") start = time() for h in header_hashes: block = await block_store.get_full_block_bytes(h) assert len(block) > 0 stop = time() total_time += stop - start print(f"{total_time:0.4f}s, get_full_block_bytes") all_test_time += total_time total_time = 0.0 if verbose: print("profiling get_full_blocks_at") start = time() for h in range(1, block_height): blocks = await block_store.get_full_blocks_at([h]) assert len(blocks) == 1 assert blocks[0].height == h stop = time() total_time += stop - start print(f"{total_time:0.4f}s, get_full_blocks_at") all_test_time += total_time total_time = 0.0 if verbose: print("profiling get_block_records_by_hash") start = time() for h in header_hashes: blocks = await block_store.get_block_records_by_hash([h]) assert len(blocks) == 1 assert blocks[0].header_hash == h stop = time() total_time += stop - start print(f"{total_time:0.4f}s, get_block_records_by_hash") all_test_time += total_time total_time = 0.0 if verbose: print("profiling get_blocks_by_hash") start = time() for h in header_hashes: blocks = await block_store.get_blocks_by_hash([h]) assert len(blocks) == 1 assert blocks[0].header_hash == h stop = time() total_time += stop - start print(f"{total_time:0.4f}s, get_blocks_by_hash") all_test_time += total_time total_time = 0.0 if verbose: print("profiling get_block_record") start = time() for h in header_hashes: blocks = await block_store.get_block_record(h) assert blocks.header_hash == h stop = time() total_time += stop - start print(f"{total_time:0.4f}s, get_block_record") all_test_time += total_time total_time = 0.0 if verbose: print("profiling get_block_records_in_range") start = time() for i in range(100): h = random.randint(1, block_height - 100) blocks = await block_store.get_block_records_in_range(h, h + 99) assert len(blocks) == 100 stop = time() total_time += stop - start print(f"{total_time:0.4f}s, get_block_records_in_range") all_test_time += total_time total_time = 0.0 if verbose: print("profiling get_block_records_close_to_peak") start = time() blocks, peak = await block_store.get_block_records_close_to_peak(99) assert len(blocks) == 100 stop = time() total_time += stop - start print(f"{total_time:0.4f}s, get_block_records_close_to_peak") all_test_time += total_time total_time = 0.0 if verbose: print("profiling is_fully_compactified") start = time() for h in header_hashes: compactified = await block_store.is_fully_compactified(h) assert compactified is False stop = time() total_time += stop - start print(f"{total_time:0.4f}s, get_block_record") all_test_time += total_time total_time = 0.0 if verbose: print("profiling get_random_not_compactified") start = time() for i in range(1, 5000): blocks = await block_store.get_random_not_compactified(100) assert len(blocks) == 100 stop = time() total_time += stop - start print(f"{total_time:0.4f}s, get_random_not_compactified") all_test_time += total_time print(f"all tests completed in {all_test_time:0.4f}s") db_size = os.path.getsize(Path("block-store-benchmark.db")) print(f"database size: {db_size/1000000:.3f} MB") finally: await db_wrapper.db.close()
def set_state(self, state: Union[timelord_protocol.NewPeakTimelord, EndOfSubSlotBundle]): if isinstance(state, timelord_protocol.NewPeakTimelord): self.state_type = StateType.PEAK self.peak = state self.subslot_end = None _, self.last_ip = iters_from_block( self.constants, state.reward_chain_block, state.sub_slot_iters, state.difficulty, ) self.deficit = state.deficit self.sub_epoch_summary = state.sub_epoch_summary self.last_weight = state.reward_chain_block.weight self.last_height = state.reward_chain_block.height self.total_iters = state.reward_chain_block.total_iters self.last_peak_challenge = state.reward_chain_block.get_hash() self.difficulty = state.difficulty self.sub_slot_iters = state.sub_slot_iters if state.reward_chain_block.is_transaction_block: self.last_block_total_iters = self.total_iters self.reward_challenge_cache = state.previous_reward_challenges self.last_challenge_sb_or_eos_total_iters = self.peak.last_challenge_sb_or_eos_total_iters self.new_epoch = False if (self.peak.reward_chain_block.height + 1) % self.constants.SUB_EPOCH_BLOCKS == 0: self.passed_ses_height_but_not_yet_included = True else: self.passed_ses_height_but_not_yet_included = state.passes_ses_height_but_not_yet_included log.warning( f"Signage point index: {self.peak.reward_chain_block.signage_point_index}" ) elif isinstance(state, EndOfSubSlotBundle): self.state_type = StateType.END_OF_SUB_SLOT if self.peak is not None: self.total_iters = uint128(self.total_iters - self.get_last_ip() + self.sub_slot_iters) else: self.total_iters = uint128(self.total_iters + self.sub_slot_iters) self.peak = None self.subslot_end = state self.last_ip = uint64(0) self.deficit = state.reward_chain.deficit if state.challenge_chain.new_difficulty is not None: assert state.challenge_chain.new_sub_slot_iters is not None self.difficulty = state.challenge_chain.new_difficulty self.sub_slot_iters = state.challenge_chain.new_sub_slot_iters self.new_epoch = True else: self.new_epoch = False if state.challenge_chain.subepoch_summary_hash is not None: self.infused_ses = True self.passed_ses_height_but_not_yet_included = False else: self.infused_ses = False self.passed_ses_height_but_not_yet_included = self.passed_ses_height_but_not_yet_included self.last_challenge_sb_or_eos_total_iters = self.total_iters else: self.passed_ses_height_but_not_yet_included = self.passed_ses_height_but_not_yet_included self.new_epoch = False self.reward_challenge_cache.append( (self.get_challenge(Chain.REWARD_CHAIN), self.total_iters)) log.info( f"Updated timelord peak to {self.get_challenge(Chain.REWARD_CHAIN)}, total iters: {self.total_iters}" ) while len(self.reward_challenge_cache ) > 2 * self.constants.MAX_SUB_SLOT_BLOCKS: self.reward_challenge_cache.pop(0)
def new_finished_sub_slot( self, eos: EndOfSubSlotBundle, blocks: BlockchainInterface, peak: Optional[BlockRecord], peak_full_block: Optional[FullBlock], ) -> Optional[List[timelord_protocol.NewInfusionPointVDF]]: """ Returns false if not added. Returns a list if added. The list contains all infusion points that depended on this sub slot """ assert len(self.finished_sub_slots) >= 1 assert (peak is None) == (peak_full_block is None) last_slot, _, last_slot_iters = self.finished_sub_slots[-1] cc_challenge: bytes32 = (last_slot.challenge_chain.get_hash() if last_slot is not None else self.constants.GENESIS_CHALLENGE) rc_challenge: bytes32 = (last_slot.reward_chain.get_hash() if last_slot is not None else self.constants.GENESIS_CHALLENGE) icc_challenge: Optional[bytes32] = None icc_iters: Optional[uint64] = None # Skip if already present for slot, _, _ in self.finished_sub_slots: if slot == eos: return [] if eos.challenge_chain.challenge_chain_end_of_slot_vdf.challenge != cc_challenge: # This slot does not append to our next slot # This prevent other peers from appending fake VDFs to our cache return None if peak is None: sub_slot_iters = self.constants.SUB_SLOT_ITERS_STARTING else: sub_slot_iters = peak.sub_slot_iters total_iters = uint128(last_slot_iters + sub_slot_iters) if peak is not None and peak.total_iters > last_slot_iters: # Peak is in this slot rc_challenge = eos.reward_chain.end_of_slot_vdf.challenge cc_start_element = peak.challenge_vdf_output iters = uint64(total_iters - peak.total_iters) if peak.reward_infusion_new_challenge != rc_challenge: # We don't have this challenge hash yet if rc_challenge not in self.future_eos_cache: self.future_eos_cache[rc_challenge] = [] self.future_eos_cache[rc_challenge].append(eos) self.future_cache_key_times[rc_challenge] = int(time.time()) log.info( f"Don't have challenge hash {rc_challenge}, caching EOS") return None if peak.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK: icc_start_element = None elif peak.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1: icc_start_element = ClassgroupElement.get_default_element() else: icc_start_element = peak.infused_challenge_vdf_output if peak.deficit < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK: curr = peak while not curr.first_in_sub_slot and not curr.is_challenge_block( self.constants): curr = blocks.block_record(curr.prev_hash) if curr.is_challenge_block(self.constants): icc_challenge = curr.challenge_block_info_hash icc_iters = uint64(total_iters - curr.total_iters) else: assert curr.finished_infused_challenge_slot_hashes is not None icc_challenge = curr.finished_infused_challenge_slot_hashes[ -1] icc_iters = sub_slot_iters assert icc_challenge is not None if can_finish_sub_and_full_epoch( self.constants, blocks, peak.height, peak.prev_hash, peak.deficit, peak.sub_epoch_summary_included is not None, )[0]: assert peak_full_block is not None ses: Optional[SubEpochSummary] = next_sub_epoch_summary( self.constants, blocks, peak.required_iters, peak_full_block, True) if ses is not None: if eos.challenge_chain.subepoch_summary_hash != ses.get_hash( ): log.warning( f"SES not correct {ses.get_hash(), eos.challenge_chain}" ) return None else: if eos.challenge_chain.subepoch_summary_hash is not None: log.warning("SES not correct, should be None") return None else: # This is on an empty slot cc_start_element = ClassgroupElement.get_default_element() icc_start_element = ClassgroupElement.get_default_element() iters = sub_slot_iters icc_iters = sub_slot_iters # The icc should only be present if the previous slot had an icc too, and not deficit 0 (just finished slot) icc_challenge = (last_slot.infused_challenge_chain.get_hash() if last_slot is not None and last_slot.infused_challenge_chain is not None and last_slot.reward_chain.deficit != self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK else None) # Validate cc VDF partial_cc_vdf_info = VDFInfo( cc_challenge, iters, eos.challenge_chain.challenge_chain_end_of_slot_vdf.output, ) # The EOS will have the whole sub-slot iters, but the proof is only the delta, from the last peak if eos.challenge_chain.challenge_chain_end_of_slot_vdf != dataclasses.replace( partial_cc_vdf_info, number_of_iterations=sub_slot_iters, ): return None if (not eos.proofs.challenge_chain_slot_proof.normalized_to_identity and not eos.proofs.challenge_chain_slot_proof.is_valid( self.constants, cc_start_element, partial_cc_vdf_info, )): return None if (eos.proofs.challenge_chain_slot_proof.normalized_to_identity and not eos.proofs.challenge_chain_slot_proof.is_valid( self.constants, ClassgroupElement.get_default_element(), eos.challenge_chain.challenge_chain_end_of_slot_vdf, )): return None # Validate reward chain VDF if not eos.proofs.reward_chain_slot_proof.is_valid( self.constants, ClassgroupElement.get_default_element(), eos.reward_chain.end_of_slot_vdf, VDFInfo(rc_challenge, iters, eos.reward_chain.end_of_slot_vdf.output), ): return None if icc_challenge is not None: assert icc_start_element is not None assert icc_iters is not None assert eos.infused_challenge_chain is not None assert eos.infused_challenge_chain is not None assert eos.proofs.infused_challenge_chain_slot_proof is not None partial_icc_vdf_info = VDFInfo( icc_challenge, iters, eos.infused_challenge_chain. infused_challenge_chain_end_of_slot_vdf.output, ) # The EOS will have the whole sub-slot iters, but the proof is only the delta, from the last peak if eos.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf != dataclasses.replace( partial_icc_vdf_info, number_of_iterations=icc_iters, ): return None if (not eos.proofs.infused_challenge_chain_slot_proof. normalized_to_identity and not eos.proofs.infused_challenge_chain_slot_proof.is_valid( self.constants, icc_start_element, partial_icc_vdf_info)): return None if (eos.proofs.infused_challenge_chain_slot_proof. normalized_to_identity and not eos.proofs.infused_challenge_chain_slot_proof.is_valid( self.constants, ClassgroupElement.get_default_element(), eos.infused_challenge_chain. infused_challenge_chain_end_of_slot_vdf, )): return None else: # This is the first sub slot and it's empty, therefore there is no ICC if eos.infused_challenge_chain is not None or eos.proofs.infused_challenge_chain_slot_proof is not None: return None self.finished_sub_slots.append( (eos, [None] * self.constants.NUM_SPS_SUB_SLOT, total_iters)) new_ips: List[timelord_protocol.NewInfusionPointVDF] = [] for ip in self.future_ip_cache.get(eos.reward_chain.get_hash(), []): new_ips.append(ip) return new_ips
async def test_basic_store(self, empty_blockchain, normalized_to_identity: bool = False): blockchain = empty_blockchain blocks = bt.get_consecutive_blocks( 10, seed=b"1234", normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) store = await FullNodeStore.create(test_constants) unfinished_blocks = [] for block in blocks: unfinished_blocks.append( UnfinishedBlock( block.finished_sub_slots, block.reward_chain_block.get_unfinished(), block.challenge_chain_sp_proof, block.reward_chain_sp_proof, block.foliage, block.foliage_transaction_block, block.transactions_info, block.transactions_generator, [], )) # Add/get candidate block assert store.get_candidate_block( unfinished_blocks[0].get_hash()) is None for height, unf_block in enumerate(unfinished_blocks): store.add_candidate_block(unf_block.get_hash(), height, unf_block) assert store.get_candidate_block( unfinished_blocks[4].get_hash()) == unfinished_blocks[4] store.clear_candidate_blocks_below(uint32(8)) assert store.get_candidate_block( unfinished_blocks[5].get_hash()) is None assert store.get_candidate_block( unfinished_blocks[8].get_hash()) is not None # Test seen unfinished blocks h_hash_1 = bytes32(token_bytes(32)) assert not store.seen_unfinished_block(h_hash_1) assert store.seen_unfinished_block(h_hash_1) store.clear_seen_unfinished_blocks() assert not store.seen_unfinished_block(h_hash_1) # Add/get unfinished block for height, unf_block in enumerate(unfinished_blocks): assert store.get_unfinished_block(unf_block.partial_hash) is None store.add_unfinished_block( height, unf_block, PreValidationResult(None, uint64(123532), None)) assert store.get_unfinished_block( unf_block.partial_hash) == unf_block store.remove_unfinished_block(unf_block.partial_hash) assert store.get_unfinished_block(unf_block.partial_hash) is None blocks = bt.get_consecutive_blocks( 1, skip_slots=5, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, ) sub_slots = blocks[0].finished_sub_slots assert len(sub_slots) == 5 assert (store.get_finished_sub_slots( BlockCache({}), None, sub_slots[0].challenge_chain.challenge_chain_end_of_slot_vdf. challenge, ) == []) # Test adding non-connecting sub-slots genesis assert store.get_sub_slot(test_constants.GENESIS_CHALLENGE) is None assert store.get_sub_slot( sub_slots[0].challenge_chain.get_hash()) is None assert store.get_sub_slot( sub_slots[1].challenge_chain.get_hash()) is None assert store.new_finished_sub_slot(sub_slots[1], {}, None, None) is None assert store.new_finished_sub_slot(sub_slots[2], {}, None, None) is None # Test adding sub-slots after genesis assert store.new_finished_sub_slot(sub_slots[0], {}, None, None) is not None assert store.get_sub_slot( sub_slots[0].challenge_chain.get_hash())[0] == sub_slots[0] assert store.get_sub_slot( sub_slots[1].challenge_chain.get_hash()) is None assert store.new_finished_sub_slot(sub_slots[1], {}, None, None) is not None for i in range(len(sub_slots)): assert store.new_finished_sub_slot(sub_slots[i], {}, None, None) is not None assert store.get_sub_slot( sub_slots[i].challenge_chain.get_hash())[0] == sub_slots[i] assert store.get_finished_sub_slots( BlockCache({}), None, sub_slots[-1].challenge_chain.get_hash()) == sub_slots assert store.get_finished_sub_slots(BlockCache( {}), None, std_hash(b"not a valid hash")) is None assert (store.get_finished_sub_slots( BlockCache({}), None, sub_slots[-2].challenge_chain.get_hash()) == sub_slots[:-1]) # Test adding genesis peak await blockchain.receive_block(blocks[0]) peak = blockchain.get_peak() peak_full_block = await blockchain.get_full_peak() if peak.overflow: store.new_peak(peak, peak_full_block, sub_slots[-2], sub_slots[-1], False, {}) else: store.new_peak(peak, peak_full_block, None, sub_slots[-1], False, {}) assert store.get_sub_slot( sub_slots[0].challenge_chain.get_hash()) is None assert store.get_sub_slot( sub_slots[1].challenge_chain.get_hash()) is None assert store.get_sub_slot( sub_slots[2].challenge_chain.get_hash()) is None if peak.overflow: assert store.get_sub_slot( sub_slots[3].challenge_chain.get_hash())[0] == sub_slots[3] else: assert store.get_sub_slot( sub_slots[3].challenge_chain.get_hash()) is None assert store.get_sub_slot( sub_slots[4].challenge_chain.get_hash())[0] == sub_slots[4] assert (store.get_finished_sub_slots( blockchain, peak, sub_slots[-1].challenge_chain.get_hash(), ) == []) # Test adding non genesis peak directly blocks = bt.get_consecutive_blocks( 2, skip_slots=2, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) blocks = bt.get_consecutive_blocks( 3, block_list_input=blocks, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) for block in blocks: await blockchain.receive_block(block) sb = blockchain.block_record(block.header_hash) sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots( block.header_hash) res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, False, blockchain) assert res[0] is None # Add reorg blocks blocks_reorg = bt.get_consecutive_blocks( 20, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) for block in blocks_reorg: res, _, _ = await blockchain.receive_block(block) if res == ReceiveBlockResult.NEW_PEAK: sb = blockchain.block_record(block.header_hash) sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots( block.header_hash) res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, True, blockchain) assert res[0] is None # Add slots to the end blocks_2 = bt.get_consecutive_blocks( 1, block_list_input=blocks_reorg, skip_slots=2, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) for slot in blocks_2[-1].finished_sub_slots: store.new_finished_sub_slot(slot, blockchain, blockchain.get_peak(), await blockchain.get_full_peak()) assert store.get_sub_slot( sub_slots[3].challenge_chain.get_hash()) is None assert store.get_sub_slot( sub_slots[4].challenge_chain.get_hash()) is None # Test adding signage point peak = blockchain.get_peak() ss_start_iters = peak.ip_sub_slot_total_iters(test_constants) for i in range( 1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA): sp = get_signage_point( test_constants, blockchain, peak, ss_start_iters, uint8(i), [], peak.sub_slot_iters, ) assert store.new_signage_point(i, blockchain, peak, peak.sub_slot_iters, sp) blocks = blocks_reorg while True: blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) res, _, _ = await blockchain.receive_block(blocks[-1]) if res == ReceiveBlockResult.NEW_PEAK: sb = blockchain.block_record(blocks[-1].header_hash) sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots( blocks[-1].header_hash) res = store.new_peak(sb, blocks[-1], sp_sub_slot, ip_sub_slot, True, blockchain) assert res[0] is None if sb.overflow and sp_sub_slot is not None: assert sp_sub_slot != ip_sub_slot break peak = blockchain.get_peak() assert peak.overflow # Overflow peak should result in 2 finished sub slots assert len(store.finished_sub_slots) == 2 # Add slots to the end, except for the last one, which we will use to test invalid SP blocks_2 = bt.get_consecutive_blocks( 1, block_list_input=blocks, skip_slots=3, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) for slot in blocks_2[-1].finished_sub_slots[:-1]: store.new_finished_sub_slot(slot, blockchain, blockchain.get_peak(), await blockchain.get_full_peak()) finished_sub_slots = blocks_2[-1].finished_sub_slots assert len(store.finished_sub_slots) == 4 # Test adding signage points for overflow blocks (sp_sub_slot) ss_start_iters = peak.sp_sub_slot_total_iters(test_constants) # for i in range(peak.signage_point_index, test_constants.NUM_SPS_SUB_SLOT): # if i < peak.signage_point_index: # continue # latest = peak # while latest.total_iters > peak.sp_total_iters(test_constants): # latest = blockchain.blocks[latest.prev_hash] # sp = get_signage_point( # test_constants, # blockchain.blocks, # latest, # ss_start_iters, # uint8(i), # [], # peak.sub_slot_iters, # ) # assert store.new_signage_point(i, blockchain.blocks, peak, peak.sub_slot_iters, sp) # Test adding signage points for overflow blocks (ip_sub_slot) for i in range( 1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA): sp = get_signage_point( test_constants, blockchain, peak, peak.ip_sub_slot_total_iters(test_constants), uint8(i), [], peak.sub_slot_iters, ) assert store.new_signage_point(i, blockchain, peak, peak.sub_slot_iters, sp) # Test adding future signage point, a few slots forward (good) saved_sp_hash = None for slot_offset in range(1, len(finished_sub_slots)): for i in range( 1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA, ): sp = get_signage_point( test_constants, blockchain, peak, peak.ip_sub_slot_total_iters(test_constants) + slot_offset * peak.sub_slot_iters, uint8(i), finished_sub_slots[:slot_offset], peak.sub_slot_iters, ) assert sp.cc_vdf is not None saved_sp_hash = sp.cc_vdf.output.get_hash() assert store.new_signage_point(i, blockchain, peak, peak.sub_slot_iters, sp) # Test adding future signage point (bad) for i in range( 1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA): sp = get_signage_point( test_constants, blockchain, peak, peak.ip_sub_slot_total_iters(test_constants) + len(finished_sub_slots) * peak.sub_slot_iters, uint8(i), finished_sub_slots[:len(finished_sub_slots)], peak.sub_slot_iters, ) assert not store.new_signage_point(i, blockchain, peak, peak.sub_slot_iters, sp) # Test adding past signage point sp = SignagePoint( blocks[1].reward_chain_block.challenge_chain_sp_vdf, blocks[1].challenge_chain_sp_proof, blocks[1].reward_chain_block.reward_chain_sp_vdf, blocks[1].reward_chain_sp_proof, ) assert not store.new_signage_point( blocks[1].reward_chain_block.signage_point_index, {}, peak, blockchain.block_record( blocks[1].header_hash).sp_sub_slot_total_iters(test_constants), sp, ) # Get signage point by index assert (store.get_signage_point_by_index( finished_sub_slots[0].challenge_chain.get_hash(), 4, finished_sub_slots[0].reward_chain.get_hash(), ) is not None) assert (store.get_signage_point_by_index( finished_sub_slots[0].challenge_chain.get_hash(), 4, std_hash(b"1")) is None) # Get signage point by hash assert store.get_signage_point(saved_sp_hash) is not None assert store.get_signage_point(std_hash(b"2")) is None # Test adding signage points before genesis store.initialize_genesis_sub_slot() assert len(store.finished_sub_slots) == 1 for i in range( 1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA): sp = get_signage_point( test_constants, BlockCache({}, {}), None, uint128(0), uint8(i), [], peak.sub_slot_iters, ) assert store.new_signage_point(i, {}, None, peak.sub_slot_iters, sp) blocks_3 = bt.get_consecutive_blocks( 1, skip_slots=2, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) for slot in blocks_3[-1].finished_sub_slots: store.new_finished_sub_slot(slot, {}, None, None) assert len(store.finished_sub_slots) == 3 finished_sub_slots = blocks_3[-1].finished_sub_slots for slot_offset in range(1, len(finished_sub_slots) + 1): for i in range( 1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA, ): sp = get_signage_point( test_constants, BlockCache({}, {}), None, slot_offset * peak.sub_slot_iters, uint8(i), finished_sub_slots[:slot_offset], peak.sub_slot_iters, ) assert store.new_signage_point(i, {}, None, peak.sub_slot_iters, sp) # Test adding signage points after genesis blocks_4 = bt.get_consecutive_blocks( 1, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) blocks_5 = bt.get_consecutive_blocks( 1, block_list_input=blocks_4, skip_slots=1, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) # If this is not the case, fix test to find a block that is assert (blocks_4[-1].reward_chain_block.signage_point_index < test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA) await blockchain.receive_block(blocks_4[-1]) sb = blockchain.block_record(blocks_4[-1].header_hash) store.new_peak(sb, blocks_4[-1], None, None, False, blockchain) for i in range( sb.signage_point_index + test_constants.NUM_SP_INTERVALS_EXTRA, test_constants.NUM_SPS_SUB_SLOT, ): if is_overflow_block(test_constants, uint8(i)): finished_sub_slots = blocks_5[-1].finished_sub_slots else: finished_sub_slots = [] sp = get_signage_point( test_constants, blockchain, sb, uint128(0), uint8(i), finished_sub_slots, peak.sub_slot_iters, ) assert store.new_signage_point(i, empty_blockchain, sb, peak.sub_slot_iters, sp) # Test future EOS cache store.initialize_genesis_sub_slot() blocks = bt.get_consecutive_blocks( 1, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) await blockchain.receive_block(blocks[-1]) while True: blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) await blockchain.receive_block(blocks[-1]) sb = blockchain.block_record(blocks[-1].header_hash) if sb.first_in_sub_slot: break assert len(blocks) >= 2 dependant_sub_slots = blocks[-1].finished_sub_slots peak = blockchain.get_peak() peak_full_block = await blockchain.get_full_peak() for block in blocks[:-2]: sb = blockchain.block_record(block.header_hash) sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots( block.header_hash) peak = sb peak_full_block = block res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, False, blockchain) assert res[0] is None assert store.new_finished_sub_slot(dependant_sub_slots[0], blockchain, peak, peak_full_block) is None block = blocks[-2] sb = blockchain.block_record(block.header_hash) sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots( block.header_hash) res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, False, blockchain) assert res[0] == dependant_sub_slots[0] assert res[1] == res[2] == [] # Test future IP cache store.initialize_genesis_sub_slot() blocks = bt.get_consecutive_blocks( 60, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, ) for block in blocks[:5]: await blockchain.receive_block(block) sb = blockchain.block_record(block.header_hash) sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots( block.header_hash) res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, False, blockchain) assert res[0] is None case_0, case_1 = False, False for i in range(5, len(blocks) - 1): prev_block = blocks[i] block = blocks[i + 1] new_ip = NewInfusionPointVDF( block.reward_chain_block.get_unfinished().get_hash(), block.reward_chain_block.challenge_chain_ip_vdf, block.challenge_chain_ip_proof, block.reward_chain_block.reward_chain_ip_vdf, block.reward_chain_ip_proof, block.reward_chain_block.infused_challenge_chain_ip_vdf, block.infused_challenge_chain_ip_proof, ) store.add_to_future_ip(new_ip) await blockchain.receive_block(prev_block) sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots( prev_block.header_hash) sb = blockchain.block_record(prev_block.header_hash) res = store.new_peak(sb, prev_block, sp_sub_slot, ip_sub_slot, False, blockchain) if len(block.finished_sub_slots) == 0: case_0 = True assert res[2] == [new_ip] else: case_1 = True assert res[2] == [] found_ips = [] for ss in block.finished_sub_slots: found_ips += store.new_finished_sub_slot( ss, blockchain, sb, prev_block) assert found_ips == [new_ip] # If flaky, increase the number of blocks created assert case_0 and case_1
"c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" )), G2Element( bytes.fromhex( "c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" )), ) ### FULL NODE PROTOCOL. new_peak = full_node_protocol.NewPeak( bytes32( bytes.fromhex( "8a346e8dc02e9b44c0571caa74fd99f163d4c5d7deae9f8ddb00528721493f7a") ), uint32(2653549198), uint128(196318552117141200341240034145143439804), uint32(928039765), bytes32( bytes.fromhex( "dd421c55d4edaeeb3ad60e80d73c2005a1b275c381c7e418915200d7467711b5") ), ) new_transaction = full_node_protocol.NewTransaction( bytes32( bytes.fromhex( "e4fe833328d4e82f9c57bc1fc2082c9b63da23e46927522cb5a073f9f0979b6a") ), uint64(13950654730705425115), uint64(10674036971945712700), )
def next_sub_epoch_summary( constants: ConsensusConstants, blocks: BlockchainInterface, required_iters: uint64, block: Union[UnfinishedBlock, FullBlock], can_finish_soon: bool = False, ) -> Optional[SubEpochSummary]: """ Returns the sub-epoch summary that can be included in the block after block. If it should include one. Block must be eligible to be the last block in the epoch. If not, returns None. Assumes that there is a new slot ending after block. Args: constants: consensus constants being used for this chain blocks: interface to cached SBR required_iters: required iters of the proof of space in block block: the (potentially) last block in the new epoch can_finish_soon: this is useful when sending SES to timelords. We might not be able to finish it, but we will soon (within MAX_SUB_SLOT_BLOCKS) Returns: object: the new sub-epoch summary """ signage_point_index = block.reward_chain_block.signage_point_index prev_b: Optional[BlockRecord] = blocks.try_block_record( block.prev_header_hash) if prev_b is None or prev_b.height == 0: return None if len(block.finished_sub_slots) > 0 and block.finished_sub_slots[ 0].challenge_chain.new_difficulty is not None: # We just included a sub-epoch summary return None assert prev_b is not None # This is the ssi of the current block sub_slot_iters = get_next_sub_slot_iters_and_difficulty( constants, len(block.finished_sub_slots) > 0, prev_b, blocks)[0] overflow = is_overflow_block(constants, signage_point_index) if (len(block.finished_sub_slots) > 0 and block.finished_sub_slots[0].challenge_chain.subepoch_summary_hash is not None): return None if can_finish_soon: deficit: uint8 = uint8( 0) # Assume that our deficit will go to zero soon can_finish_se = True if height_can_be_first_in_epoch(constants, uint32(prev_b.height + 2)): can_finish_epoch = True if (prev_b.height + 2) % constants.SUB_EPOCH_BLOCKS > 1: curr: BlockRecord = prev_b while curr.height % constants.SUB_EPOCH_BLOCKS > 0: if (curr.sub_epoch_summary_included is not None and curr.sub_epoch_summary_included.new_difficulty is not None): can_finish_epoch = False curr = blocks.block_record(curr.prev_hash) if (curr.sub_epoch_summary_included is not None and curr.sub_epoch_summary_included.new_difficulty is not None): can_finish_epoch = False elif height_can_be_first_in_epoch( constants, uint32(prev_b.height + constants.MAX_SUB_SLOT_BLOCKS + 2)): can_finish_epoch = True else: can_finish_epoch = False else: deficit = calculate_deficit( constants, uint32(prev_b.height + 1), prev_b, overflow, len(block.finished_sub_slots), ) can_finish_se, can_finish_epoch = can_finish_sub_and_full_epoch( constants, blocks, uint32(prev_b.height + 1), prev_b.header_hash if prev_b is not None else None, deficit, False, ) # can't finish se, no summary if not can_finish_se: return None next_difficulty = None next_sub_slot_iters = None # if can finish epoch, new difficulty and ssi if can_finish_epoch: sp_iters = calculate_sp_iters(constants, sub_slot_iters, signage_point_index) ip_iters = calculate_ip_iters(constants, sub_slot_iters, signage_point_index, required_iters) next_difficulty = _get_next_difficulty( constants, blocks, block.prev_header_hash, uint32(prev_b.height + 1), uint64(prev_b.weight - blocks.block_record(prev_b.prev_hash).weight), deficit, False, # Already checked above True, uint128(block.total_iters - ip_iters + sp_iters - (sub_slot_iters if overflow else 0)), True, ) next_sub_slot_iters = _get_next_sub_slot_iters( constants, blocks, block.prev_header_hash, uint32(prev_b.height + 1), sub_slot_iters, deficit, False, # Already checked above True, uint128(block.total_iters - ip_iters + sp_iters - (sub_slot_iters if overflow else 0)), True, ) return make_sub_epoch_summary( constants, blocks, uint32(prev_b.height + 2), prev_b, next_difficulty, next_sub_slot_iters, )
def set_state(self, state: Union[timelord_protocol.NewPeakTimelord, EndOfSubSlotBundle]): if isinstance(state, timelord_protocol.NewPeakTimelord): self.state_type = StateType.PEAK self.peak = state self.subslot_end = None _, self.last_ip = iters_from_block( self.constants, state.reward_chain_block, state.sub_slot_iters, state.difficulty, ) self.deficit = state.deficit self.sub_epoch_summary = state.sub_epoch_summary self.last_weight = state.reward_chain_block.weight self.last_height = state.reward_chain_block.height self.total_iters = state.reward_chain_block.total_iters self.last_peak_challenge = state.reward_chain_block.get_hash() self.difficulty = state.difficulty self.sub_slot_iters = state.sub_slot_iters if state.reward_chain_block.is_transaction_block: self.last_block_total_iters = self.total_iters self.reward_challenge_cache = state.previous_reward_challenges self.last_challenge_sb_or_eos_total_iters = self.peak.last_challenge_sb_or_eos_total_iters self.new_epoch = False if (self.peak.reward_chain_block.height + 1) % self.constants.SUB_EPOCH_BLOCKS == 0: self.passed_ses_height_but_not_yet_included = True else: self.passed_ses_height_but_not_yet_included = state.passes_ses_height_but_not_yet_included elif isinstance(state, EndOfSubSlotBundle): self.state_type = StateType.END_OF_SUB_SLOT if self.peak is not None: self.total_iters = uint128(self.total_iters - self.get_last_ip() + self.sub_slot_iters) else: self.total_iters = uint128(self.total_iters + self.sub_slot_iters) self.peak = None self.subslot_end = state self.last_ip = uint64(0) self.deficit = state.reward_chain.deficit if state.challenge_chain.new_difficulty is not None: assert state.challenge_chain.new_sub_slot_iters is not None self.difficulty = state.challenge_chain.new_difficulty self.sub_slot_iters = state.challenge_chain.new_sub_slot_iters self.new_epoch = True else: self.new_epoch = False if state.challenge_chain.subepoch_summary_hash is not None: self.infused_ses = True self.passed_ses_height_but_not_yet_included = False else: self.infused_ses = False # Since we have a new sub slot which is not an end of subepoch, # we will use the last value that we saw for # passed_ses_height_but_not_yet_included self.last_challenge_sb_or_eos_total_iters = self.total_iters else: assert False # TODO: address hint error and remove ignore # error: Argument 1 to "append" of "list" has incompatible type "Tuple[Optional[bytes32], uint128]"; # expected "Tuple[bytes32, uint128]" [arg-type] self.reward_challenge_cache.append( (self.get_challenge(Chain.REWARD_CHAIN), self.total_iters)) # type: ignore[arg-type] # noqa: E501 log.info( f"Updated timelord peak to {self.get_challenge(Chain.REWARD_CHAIN)}, total iters: {self.total_iters}" ) while len(self.reward_challenge_cache ) > 2 * self.constants.MAX_SUB_SLOT_BLOCKS: self.reward_challenge_cache.pop(0)
async def test_basic_store(self, empty_blockchain, normalized_to_identity: bool = False): blockchain = empty_blockchain blocks = bt.get_consecutive_blocks( 10, seed=b"1234", normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) store = FullNodeStore(test_constants) unfinished_blocks = [] for block in blocks: unfinished_blocks.append( UnfinishedBlock( block.finished_sub_slots, block.reward_chain_block.get_unfinished(), block.challenge_chain_sp_proof, block.reward_chain_sp_proof, block.foliage, block.foliage_transaction_block, block.transactions_info, block.transactions_generator, [], )) # Add/get candidate block assert store.get_candidate_block( unfinished_blocks[0].get_hash()) is None for height, unf_block in enumerate(unfinished_blocks): store.add_candidate_block(unf_block.get_hash(), uint32(height), unf_block) candidate = store.get_candidate_block(unfinished_blocks[4].get_hash()) assert candidate is not None assert candidate[1] == unfinished_blocks[4] store.clear_candidate_blocks_below(uint32(8)) assert store.get_candidate_block( unfinished_blocks[5].get_hash()) is None assert store.get_candidate_block( unfinished_blocks[8].get_hash()) is not None # Test seen unfinished blocks h_hash_1 = bytes32(token_bytes(32)) assert not store.seen_unfinished_block(h_hash_1) assert store.seen_unfinished_block(h_hash_1) store.clear_seen_unfinished_blocks() assert not store.seen_unfinished_block(h_hash_1) # Add/get unfinished block for height, unf_block in enumerate(unfinished_blocks): assert store.get_unfinished_block(unf_block.partial_hash) is None store.add_unfinished_block( uint32(height), unf_block, PreValidationResult(None, uint64(123532), None, False)) assert store.get_unfinished_block( unf_block.partial_hash) == unf_block store.remove_unfinished_block(unf_block.partial_hash) assert store.get_unfinished_block(unf_block.partial_hash) is None blocks = bt.get_consecutive_blocks( 1, skip_slots=5, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, ) sub_slots = blocks[0].finished_sub_slots assert len(sub_slots) == 5 assert (store.get_finished_sub_slots( BlockCache({}), None, sub_slots[0].challenge_chain.challenge_chain_end_of_slot_vdf. challenge, ) == []) # Test adding non-connecting sub-slots genesis assert store.get_sub_slot(test_constants.GENESIS_CHALLENGE) is None assert store.get_sub_slot( sub_slots[0].challenge_chain.get_hash()) is None assert store.get_sub_slot( sub_slots[1].challenge_chain.get_hash()) is None assert store.new_finished_sub_slot(sub_slots[1], blockchain, None, None) is None assert store.new_finished_sub_slot(sub_slots[2], blockchain, None, None) is None # Test adding sub-slots after genesis assert store.new_finished_sub_slot(sub_slots[0], blockchain, None, None) is not None sub_slot = store.get_sub_slot(sub_slots[0].challenge_chain.get_hash()) assert sub_slot is not None assert sub_slot[0] == sub_slots[0] assert store.get_sub_slot( sub_slots[1].challenge_chain.get_hash()) is None assert store.new_finished_sub_slot(sub_slots[1], blockchain, None, None) is not None for i in range(len(sub_slots)): assert store.new_finished_sub_slot(sub_slots[i], blockchain, None, None) is not None slot_i = store.get_sub_slot( sub_slots[i].challenge_chain.get_hash()) assert slot_i is not None assert slot_i[0] == sub_slots[i] assert store.get_finished_sub_slots( BlockCache({}), None, sub_slots[-1].challenge_chain.get_hash()) == sub_slots assert store.get_finished_sub_slots(BlockCache( {}), None, std_hash(b"not a valid hash")) is None assert (store.get_finished_sub_slots( BlockCache({}), None, sub_slots[-2].challenge_chain.get_hash()) == sub_slots[:-1]) # Test adding genesis peak await _validate_and_add_block(blockchain, blocks[0]) peak = blockchain.get_peak() peak_full_block = await blockchain.get_full_peak() if peak.overflow: store.new_peak(peak, peak_full_block, sub_slots[-2], sub_slots[-1], None, blockchain) else: store.new_peak(peak, peak_full_block, None, sub_slots[-1], None, blockchain) assert store.get_sub_slot( sub_slots[0].challenge_chain.get_hash()) is None assert store.get_sub_slot( sub_slots[1].challenge_chain.get_hash()) is None assert store.get_sub_slot( sub_slots[2].challenge_chain.get_hash()) is None if peak.overflow: slot_3 = store.get_sub_slot( sub_slots[3].challenge_chain.get_hash()) assert slot_3 is not None assert slot_3[0] == sub_slots[3] else: assert store.get_sub_slot( sub_slots[3].challenge_chain.get_hash()) is None slot_4 = store.get_sub_slot(sub_slots[4].challenge_chain.get_hash()) assert slot_4 is not None assert slot_4[0] == sub_slots[4] assert (store.get_finished_sub_slots( blockchain, peak, sub_slots[-1].challenge_chain.get_hash(), ) == []) # Test adding non genesis peak directly blocks = bt.get_consecutive_blocks( 2, skip_slots=2, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) blocks = bt.get_consecutive_blocks( 3, block_list_input=blocks, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) for block in blocks: await _validate_and_add_block_no_error(blockchain, block) sb = blockchain.block_record(block.header_hash) sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots( block.header_hash) res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, None, blockchain) assert res.added_eos is None # Add reorg blocks blocks_reorg = bt.get_consecutive_blocks( 20, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) for block in blocks_reorg: peak = blockchain.get_peak() assert peak is not None await _validate_and_add_block_no_error(blockchain, block) if blockchain.get_peak().header_hash == block.header_hash: sb = blockchain.block_record(block.header_hash) fork = find_fork_point_in_chain( blockchain, peak, blockchain.block_record(sb.header_hash)) if fork > 0: fork_block = blockchain.height_to_block_record(fork) else: fork_block = None sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots( block.header_hash) res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, fork_block, blockchain) assert res.added_eos is None # Add slots to the end blocks_2 = bt.get_consecutive_blocks( 1, block_list_input=blocks_reorg, skip_slots=2, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) for slot in blocks_2[-1].finished_sub_slots: store.new_finished_sub_slot(slot, blockchain, blockchain.get_peak(), await blockchain.get_full_peak()) assert store.get_sub_slot( sub_slots[3].challenge_chain.get_hash()) is None assert store.get_sub_slot( sub_slots[4].challenge_chain.get_hash()) is None # Test adding signage point peak = blockchain.get_peak() ss_start_iters = peak.ip_sub_slot_total_iters(test_constants) for i in range( 1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA): sp = get_signage_point( test_constants, blockchain, peak, ss_start_iters, uint8(i), [], peak.sub_slot_iters, ) assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp) blocks = blocks_reorg while True: blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) await _validate_and_add_block(blockchain, blocks[-1]) if blockchain.get_peak().header_hash == blocks[-1].header_hash: sb = blockchain.block_record(blocks[-1].header_hash) fork = find_fork_point_in_chain( blockchain, peak, blockchain.block_record(sb.header_hash)) if fork > 0: fork_block = blockchain.height_to_block_record(fork) else: fork_block = None sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots( blocks[-1].header_hash) res = store.new_peak(sb, blocks[-1], sp_sub_slot, ip_sub_slot, fork_block, blockchain) assert res.added_eos is None if sb.overflow and sp_sub_slot is not None: assert sp_sub_slot != ip_sub_slot break peak = blockchain.get_peak() assert peak.overflow # Overflow peak should result in 2 finished sub slots assert len(store.finished_sub_slots) == 2 # Add slots to the end, except for the last one, which we will use to test invalid SP blocks_2 = bt.get_consecutive_blocks( 1, block_list_input=blocks, skip_slots=3, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) for slot in blocks_2[-1].finished_sub_slots[:-1]: store.new_finished_sub_slot(slot, blockchain, blockchain.get_peak(), await blockchain.get_full_peak()) finished_sub_slots = blocks_2[-1].finished_sub_slots assert len(store.finished_sub_slots) == 4 # Test adding signage points for overflow blocks (sp_sub_slot) ss_start_iters = peak.sp_sub_slot_total_iters(test_constants) # for i in range(peak.signage_point_index, test_constants.NUM_SPS_SUB_SLOT): # if i < peak.signage_point_index: # continue # latest = peak # while latest.total_iters > peak.sp_total_iters(test_constants): # latest = blockchain.blocks[latest.prev_hash] # sp = get_signage_point( # test_constants, # blockchain.blocks, # latest, # ss_start_iters, # uint8(i), # [], # peak.sub_slot_iters, # ) # assert store.new_signage_point(i, blockchain.blocks, peak, peak.sub_slot_iters, sp) # Test adding signage points for overflow blocks (ip_sub_slot) for i in range( 1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA): sp = get_signage_point( test_constants, blockchain, peak, peak.ip_sub_slot_total_iters(test_constants), uint8(i), [], peak.sub_slot_iters, ) assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp) # Test adding future signage point, a few slots forward (good) saved_sp_hash = None for slot_offset in range(1, len(finished_sub_slots)): for i in range( 1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA, ): sp = get_signage_point( test_constants, blockchain, peak, peak.ip_sub_slot_total_iters(test_constants) + slot_offset * peak.sub_slot_iters, uint8(i), finished_sub_slots[:slot_offset], peak.sub_slot_iters, ) assert sp.cc_vdf is not None saved_sp_hash = sp.cc_vdf.output.get_hash() assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp) # Test adding future signage point (bad) for i in range( 1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA): sp = get_signage_point( test_constants, blockchain, peak, peak.ip_sub_slot_total_iters(test_constants) + len(finished_sub_slots) * peak.sub_slot_iters, uint8(i), finished_sub_slots[:len(finished_sub_slots)], peak.sub_slot_iters, ) assert not store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp) # Test adding past signage point sp = SignagePoint( blocks[1].reward_chain_block.challenge_chain_sp_vdf, blocks[1].challenge_chain_sp_proof, blocks[1].reward_chain_block.reward_chain_sp_vdf, blocks[1].reward_chain_sp_proof, ) assert not store.new_signage_point( blocks[1].reward_chain_block.signage_point_index, blockchain, peak, blockchain.block_record( blocks[1].header_hash).sp_sub_slot_total_iters(test_constants), sp, ) # Get signage point by index assert (store.get_signage_point_by_index( finished_sub_slots[0].challenge_chain.get_hash(), uint8(4), finished_sub_slots[0].reward_chain.get_hash(), ) is not None) assert (store.get_signage_point_by_index( finished_sub_slots[0].challenge_chain.get_hash(), uint8(4), std_hash(b"1")) is None) # Get signage point by hash # TODO: address hint error and remove ignore # error: Argument 1 to "get_signage_point" of "FullNodeStore" has incompatible type "Optional[bytes32]"; # expected "bytes32" [arg-type] assert store.get_signage_point( saved_sp_hash) is not None # type: ignore[arg-type] assert store.get_signage_point(std_hash(b"2")) is None # Test adding signage points before genesis store.initialize_genesis_sub_slot() assert len(store.finished_sub_slots) == 1 for i in range( 1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA): sp = get_signage_point( test_constants, BlockCache({}, {}), None, uint128(0), uint8(i), [], peak.sub_slot_iters, ) assert store.new_signage_point(uint8(i), blockchain, None, peak.sub_slot_iters, sp) blocks_3 = bt.get_consecutive_blocks( 1, skip_slots=2, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) for slot in blocks_3[-1].finished_sub_slots: store.new_finished_sub_slot(slot, blockchain, None, None) assert len(store.finished_sub_slots) == 3 finished_sub_slots = blocks_3[-1].finished_sub_slots for slot_offset in range(1, len(finished_sub_slots) + 1): for i in range( 1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA, ): sp = get_signage_point( test_constants, BlockCache({}, {}), None, slot_offset * peak.sub_slot_iters, uint8(i), finished_sub_slots[:slot_offset], peak.sub_slot_iters, ) assert store.new_signage_point(uint8(i), blockchain, None, peak.sub_slot_iters, sp) # Test adding signage points after genesis blocks_4 = bt.get_consecutive_blocks( 1, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) blocks_5 = bt.get_consecutive_blocks( 1, block_list_input=blocks_4, skip_slots=1, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) # If this is not the case, fix test to find a block that is assert (blocks_4[-1].reward_chain_block.signage_point_index < test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA) await _validate_and_add_block( blockchain, blocks_4[-1], expected_result=ReceiveBlockResult.ADDED_AS_ORPHAN) sb = blockchain.block_record(blocks_4[-1].header_hash) store.new_peak(sb, blocks_4[-1], None, None, None, blockchain) for i in range( sb.signage_point_index + test_constants.NUM_SP_INTERVALS_EXTRA, test_constants.NUM_SPS_SUB_SLOT, ): if is_overflow_block(test_constants, uint8(i)): finished_sub_slots = blocks_5[-1].finished_sub_slots else: finished_sub_slots = [] sp = get_signage_point( test_constants, blockchain, sb, uint128(0), uint8(i), finished_sub_slots, peak.sub_slot_iters, ) assert store.new_signage_point(uint8(i), empty_blockchain, sb, peak.sub_slot_iters, sp) # Test future EOS cache store.initialize_genesis_sub_slot() blocks = bt.get_consecutive_blocks( 1, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) await _validate_and_add_block_no_error(blockchain, blocks[-1]) while True: blocks = bt.get_consecutive_blocks( 1, block_list_input=blocks, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, ) await _validate_and_add_block_no_error(blockchain, blocks[-1]) sb = blockchain.block_record(blocks[-1].header_hash) if sb.first_in_sub_slot: break assert len(blocks) >= 2 dependant_sub_slots = blocks[-1].finished_sub_slots peak = blockchain.get_peak() peak_full_block = await blockchain.get_full_peak() for block in blocks[:-2]: sb = blockchain.block_record(block.header_hash) sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots( block.header_hash) peak = sb peak_full_block = block res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, None, blockchain) assert res.added_eos is None assert store.new_finished_sub_slot(dependant_sub_slots[0], blockchain, peak, peak_full_block) is None block = blocks[-2] sb = blockchain.block_record(block.header_hash) sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots( block.header_hash) res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, None, blockchain) assert res.added_eos == dependant_sub_slots[0] assert res.new_signage_points == res.new_infusion_points == [] # Test future IP cache store.initialize_genesis_sub_slot() blocks = bt.get_consecutive_blocks( 60, normalized_to_identity_cc_ip=normalized_to_identity, normalized_to_identity_cc_sp=normalized_to_identity, normalized_to_identity_cc_eos=normalized_to_identity, normalized_to_identity_icc_eos=normalized_to_identity, ) for block in blocks[:5]: await _validate_and_add_block_no_error(blockchain, block) sb = blockchain.block_record(block.header_hash) sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots( block.header_hash) res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, None, blockchain) assert res.added_eos is None case_0, case_1 = False, False for i in range(5, len(blocks) - 1): prev_block = blocks[i] block = blocks[i + 1] new_ip = NewInfusionPointVDF( block.reward_chain_block.get_unfinished().get_hash(), block.reward_chain_block.challenge_chain_ip_vdf, block.challenge_chain_ip_proof, block.reward_chain_block.reward_chain_ip_vdf, block.reward_chain_ip_proof, block.reward_chain_block.infused_challenge_chain_ip_vdf, block.infused_challenge_chain_ip_proof, ) store.add_to_future_ip(new_ip) await _validate_and_add_block_no_error(blockchain, prev_block) sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots( prev_block.header_hash) sb = blockchain.block_record(prev_block.header_hash) res = store.new_peak(sb, prev_block, sp_sub_slot, ip_sub_slot, None, blockchain) if len(block.finished_sub_slots) == 0: case_0 = True assert res.new_infusion_points == [new_ip] else: case_1 = True assert res.new_infusion_points == [] found_ips: List[timelord_protocol.NewInfusionPointVDF] = [] for ss in block.finished_sub_slots: ipvdf = store.new_finished_sub_slot( ss, blockchain, sb, prev_block) assert ipvdf is not None found_ips += ipvdf assert found_ips == [new_ip] # If flaky, increase the number of blocks created assert case_0 and case_1 # Try to get two blocks in the same slot, such that we have # SP, B2 SP .... SP B1 # i2 ......... i1 # Then do a reorg up to B2, removing all signage points after B2, but not before log.warning(f"Adding blocks up to {blocks[-1]}") for block in blocks: await _validate_and_add_block_no_error(blockchain, block) log.warning(f"Starting loop") while True: log.warning("Looping") blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=1) await _validate_and_add_block_no_error(blockchain, blocks[-1]) peak = blockchain.get_peak() sub_slots = await blockchain.get_sp_and_ip_sub_slots( peak.header_hash) store.new_peak(peak, blocks[-1], sub_slots[0], sub_slots[1], None, blockchain) blocks = bt.get_consecutive_blocks( 2, block_list_input=blocks, guarantee_transaction_block=True) i3 = blocks[-3].reward_chain_block.signage_point_index i2 = blocks[-2].reward_chain_block.signage_point_index i1 = blocks[-1].reward_chain_block.signage_point_index if (len(blocks[-2].finished_sub_slots) == len( blocks[-1].finished_sub_slots) == 0 and not is_overflow_block(test_constants, signage_point_index=i2) and not is_overflow_block(test_constants, signage_point_index=i1) and i2 > i3 + 3 and i1 > (i2 + 3)): # We hit all the conditions that we want all_sps: List[Optional[SignagePoint]] = [ None ] * test_constants.NUM_SPS_SUB_SLOT def assert_sp_none(sp_index: int, is_none: bool): sp_to_check: Optional[SignagePoint] = all_sps[sp_index] assert sp_to_check is not None assert sp_to_check.cc_vdf is not None fetched = store.get_signage_point( sp_to_check.cc_vdf.output.get_hash()) assert (fetched is None) == is_none if fetched is not None: assert fetched == sp_to_check for i in range(i3 + 1, test_constants.NUM_SPS_SUB_SLOT - 3): finished_sub_slots = [] sp = get_signage_point( test_constants, blockchain, peak, uint128(peak.ip_sub_slot_total_iters(bt.constants)), uint8(i), finished_sub_slots, peak.sub_slot_iters, ) all_sps[i] = sp assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp) # Adding a new peak clears all SPs after that peak await _validate_and_add_block_no_error(blockchain, blocks[-2]) peak = blockchain.get_peak() sub_slots = await blockchain.get_sp_and_ip_sub_slots( peak.header_hash) store.new_peak(peak, blocks[-2], sub_slots[0], sub_slots[1], None, blockchain) assert_sp_none(i2, False) assert_sp_none(i2 + 1, False) assert_sp_none(i1, True) assert_sp_none(i1 + 1, True) assert_sp_none(i1 + 4, True) for i in range(i2, test_constants.NUM_SPS_SUB_SLOT): if is_overflow_block(test_constants, uint8(i)): blocks_alt = bt.get_consecutive_blocks( 1, block_list_input=blocks[:-1], skip_slots=1) finished_sub_slots = blocks_alt[-1].finished_sub_slots else: finished_sub_slots = [] sp = get_signage_point( test_constants, blockchain, peak, uint128(peak.ip_sub_slot_total_iters(bt.constants)), uint8(i), finished_sub_slots, peak.sub_slot_iters, ) all_sps[i] = sp assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp) assert_sp_none(i2, False) assert_sp_none(i2 + 1, False) assert_sp_none(i1, False) assert_sp_none(i1 + 1, False) assert_sp_none(i1 + 4, False) await _validate_and_add_block_no_error(blockchain, blocks[-1]) peak = blockchain.get_peak() sub_slots = await blockchain.get_sp_and_ip_sub_slots( peak.header_hash) # Do a reorg, which should remove everything after B2 store.new_peak( peak, blocks[-1], sub_slots[0], sub_slots[1], (await blockchain.get_block_records_at([blocks[-2].height]))[0], blockchain, ) assert_sp_none(i2, False) assert_sp_none(i2 + 1, False) assert_sp_none(i1, True) assert_sp_none(i1 + 1, True) assert_sp_none(i1 + 4, True) break else: for block in blocks[-2:]: await _validate_and_add_block_no_error(blockchain, block)
def create_unfinished_block( constants: ConsensusConstants, sub_slot_start_total_iters: uint128, sub_slot_iters: uint64, signage_point_index: uint8, sp_iters: uint64, ip_iters: uint64, proof_of_space: ProofOfSpace, slot_cc_challenge: bytes32, farmer_reward_puzzle_hash: bytes32, pool_target: PoolTarget, get_plot_signature: Callable[[bytes32, G1Element], G2Element], get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]], signage_point: SignagePoint, timestamp: uint64, blocks: BlockchainInterface, seed: bytes32 = b"", block_generator: Optional[BlockGenerator] = None, aggregate_sig: G2Element = G2Element(), additions: Optional[List[Coin]] = None, removals: Optional[List[Coin]] = None, prev_block: Optional[BlockRecord] = None, finished_sub_slots_input: List[EndOfSubSlotBundle] = None, ) -> UnfinishedBlock: """ Creates a new unfinished block using all the information available at the signage point. This will have to be modified using information from the infusion point. Args: constants: consensus constants being used for this chain sub_slot_start_total_iters: the starting sub-slot iters at the signage point sub-slot sub_slot_iters: sub-slot-iters at the infusion point epoch signage_point_index: signage point index of the block to create sp_iters: sp_iters of the block to create ip_iters: ip_iters of the block to create proof_of_space: proof of space of the block to create slot_cc_challenge: challenge hash at the sp sub-slot farmer_reward_puzzle_hash: where to pay out farmer rewards pool_target: where to pay out pool rewards get_plot_signature: function that returns signature corresponding to plot public key get_pool_signature: function that returns signature corresponding to pool public key signage_point: signage point information (VDFs) timestamp: timestamp to add to the foliage block, if created seed: seed to randomize chain block_generator: transactions to add to the foliage block, if created aggregate_sig: aggregate of all transctions (or infinity element) additions: Coins added in spend_bundle removals: Coins removed in spend_bundle prev_block: previous block (already in chain) from the signage point blocks: dictionary from header hash to SBR of all included SBR finished_sub_slots_input: finished_sub_slots at the signage point Returns: """ if finished_sub_slots_input is None: finished_sub_slots: List[EndOfSubSlotBundle] = [] else: finished_sub_slots = finished_sub_slots_input.copy() overflow: bool = sp_iters > ip_iters total_iters_sp: uint128 = uint128(sub_slot_start_total_iters + sp_iters) is_genesis: bool = prev_block is None new_sub_slot: bool = len(finished_sub_slots) > 0 cc_sp_hash: Optional[bytes32] = slot_cc_challenge # Only enters this if statement if we are in testing mode (making VDF proofs here) if signage_point.cc_vdf is not None: assert signage_point.rc_vdf is not None cc_sp_hash = signage_point.cc_vdf.output.get_hash() rc_sp_hash = signage_point.rc_vdf.output.get_hash() else: if new_sub_slot: rc_sp_hash = finished_sub_slots[-1].reward_chain.get_hash() else: if is_genesis: rc_sp_hash = constants.GENESIS_CHALLENGE else: assert prev_block is not None assert blocks is not None curr = prev_block while not curr.first_in_sub_slot: curr = blocks.block_record(curr.prev_hash) assert curr.finished_reward_slot_hashes is not None rc_sp_hash = curr.finished_reward_slot_hashes[-1] signage_point = SignagePoint(None, None, None, None) cc_sp_signature: Optional[G2Element] = get_plot_signature(cc_sp_hash, proof_of_space.plot_public_key) rc_sp_signature: Optional[G2Element] = get_plot_signature(rc_sp_hash, proof_of_space.plot_public_key) assert cc_sp_signature is not None assert rc_sp_signature is not None assert blspy.AugSchemeMPL.verify(proof_of_space.plot_public_key, cc_sp_hash, cc_sp_signature) total_iters = uint128(sub_slot_start_total_iters + ip_iters + (sub_slot_iters if overflow else 0)) rc_block = RewardChainBlockUnfinished( total_iters, signage_point_index, slot_cc_challenge, proof_of_space, signage_point.cc_vdf, cc_sp_signature, signage_point.rc_vdf, rc_sp_signature, ) if additions is None: additions = [] if removals is None: removals = [] (foliage, foliage_transaction_block, transactions_info,) = create_foliage( constants, rc_block, block_generator, aggregate_sig, additions, removals, prev_block, blocks, total_iters_sp, timestamp, farmer_reward_puzzle_hash, pool_target, get_plot_signature, get_pool_signature, seed, ) return UnfinishedBlock( finished_sub_slots, rc_block, signage_point.cc_proof, signage_point.rc_proof, foliage, foliage_transaction_block, transactions_info, block_generator.program if block_generator else None, block_generator.block_height_list() if block_generator else [], )
async def get_blockchain_state(self, _request: Dict): """ Returns a summary of the node's view of the blockchain. """ if self.service.initialized is False: res: Dict = { "blockchain_state": { "peak": None, "genesis_challenge_initialized": self.service.initialized, "sync": { "sync_mode": False, "synced": False, "sync_tip_height": 0, "sync_progress_height": 0, }, "difficulty": 0, "sub_slot_iters": 0, "space": 0, "mempool_size": 0, }, } return res peak: Optional[BlockRecord] = self.service.blockchain.get_peak() if peak is not None and peak.height > 0: difficulty = uint64( peak.weight - self.service.blockchain.block_record(peak.prev_hash).weight) sub_slot_iters = peak.sub_slot_iters else: difficulty = self.service.constants.DIFFICULTY_STARTING sub_slot_iters = self.service.constants.SUB_SLOT_ITERS_STARTING sync_mode: bool = self.service.sync_store.get_sync_mode( ) or self.service.sync_store.get_long_sync() sync_tip_height: Optional[uint32] = uint32(0) if sync_mode: if self.service.sync_store.get_sync_target_height() is not None: sync_tip_height = self.service.sync_store.get_sync_target_height( ) assert sync_tip_height is not None if peak is not None: sync_progress_height: uint32 = peak.height # Don't display we're syncing towards 0, instead show 'Syncing height/height' # until sync_store retrieves the correct number. if sync_tip_height == uint32(0): sync_tip_height = peak.height else: sync_progress_height = uint32(0) else: sync_progress_height = uint32(0) if peak is not None and peak.height > 1: newer_block_hex = peak.header_hash.hex() # Average over the last day header_hash = self.service.blockchain.height_to_hash( uint32(max(1, peak.height - 4608))) assert header_hash is not None older_block_hex = header_hash.hex() space = await self.get_network_space({ "newer_block_header_hash": newer_block_hex, "older_block_header_hash": older_block_hex }) else: space = {"space": uint128(0)} if self.service.mempool_manager is not None: mempool_size = len(self.service.mempool_manager.mempool.spends) else: mempool_size = 0 if self.service.server is not None: is_connected = len( self.service.server.get_full_node_connections()) > 0 else: is_connected = False synced = await self.service.synced() and is_connected assert space is not None response: Dict = { "blockchain_state": { "peak": peak, "genesis_challenge_initialized": self.service.initialized, "sync": { "sync_mode": sync_mode, "synced": synced, "sync_tip_height": sync_tip_height, "sync_progress_height": sync_progress_height, }, "difficulty": difficulty, "sub_slot_iters": sub_slot_iters, "space": space["space"], "mempool_size": mempool_size, }, } self.cached_blockchain_state = dict(response["blockchain_state"]) return response
def unfinished_block_to_full_block( unfinished_block: UnfinishedBlock, cc_ip_vdf: VDFInfo, cc_ip_proof: VDFProof, rc_ip_vdf: VDFInfo, rc_ip_proof: VDFProof, icc_ip_vdf: Optional[VDFInfo], icc_ip_proof: Optional[VDFProof], finished_sub_slots: List[EndOfSubSlotBundle], prev_block: Optional[BlockRecord], blocks: BlockchainInterface, total_iters_sp: uint128, difficulty: uint64, ) -> FullBlock: """ Converts an unfinished block to a finished block. Includes all the infusion point VDFs as well as tweaking other properties (height, weight, sub-slots, etc) Args: unfinished_block: the unfinished block to finish cc_ip_vdf: the challenge chain vdf info at the infusion point cc_ip_proof: the challenge chain proof rc_ip_vdf: the reward chain vdf info at the infusion point rc_ip_proof: the reward chain proof icc_ip_vdf: the infused challenge chain vdf info at the infusion point icc_ip_proof: the infused challenge chain proof finished_sub_slots: finished sub slots from the prev block to the infusion point prev_block: prev block from the infusion point blocks: dictionary from header hash to SBR of all included SBR total_iters_sp: total iters at the signage point difficulty: difficulty at the infusion point """ # Replace things that need to be replaced, since foliage blocks did not necessarily have the latest information if prev_block is None: is_transaction_block = True new_weight = uint128(difficulty) new_height = uint32(0) new_foliage = unfinished_block.foliage new_foliage_transaction_block = unfinished_block.foliage_transaction_block new_tx_info = unfinished_block.transactions_info new_generator = unfinished_block.transactions_generator new_generator_ref_list = unfinished_block.transactions_generator_ref_list else: is_transaction_block, _ = get_prev_transaction_block(prev_block, blocks, total_iters_sp) new_weight = uint128(prev_block.weight + difficulty) new_height = uint32(prev_block.height + 1) if is_transaction_block: new_fbh = unfinished_block.foliage.foliage_transaction_block_hash new_fbs = unfinished_block.foliage.foliage_transaction_block_signature new_foliage_transaction_block = unfinished_block.foliage_transaction_block new_tx_info = unfinished_block.transactions_info new_generator = unfinished_block.transactions_generator new_generator_ref_list = unfinished_block.transactions_generator_ref_list else: new_fbh = None new_fbs = None new_foliage_transaction_block = None new_tx_info = None new_generator = None new_generator_ref_list = [] assert (new_fbh is None) == (new_fbs is None) new_foliage = replace( unfinished_block.foliage, prev_block_hash=prev_block.header_hash, foliage_transaction_block_hash=new_fbh, foliage_transaction_block_signature=new_fbs, ) ret = FullBlock( finished_sub_slots, RewardChainBlock( new_weight, new_height, unfinished_block.reward_chain_block.total_iters, unfinished_block.reward_chain_block.signage_point_index, unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash, unfinished_block.reward_chain_block.proof_of_space, unfinished_block.reward_chain_block.challenge_chain_sp_vdf, unfinished_block.reward_chain_block.challenge_chain_sp_signature, cc_ip_vdf, unfinished_block.reward_chain_block.reward_chain_sp_vdf, unfinished_block.reward_chain_block.reward_chain_sp_signature, rc_ip_vdf, icc_ip_vdf, is_transaction_block, ), unfinished_block.challenge_chain_sp_proof, cc_ip_proof, unfinished_block.reward_chain_sp_proof, rc_ip_proof, icc_ip_proof, new_foliage, new_foliage_transaction_block, new_tx_info, new_generator, new_generator_ref_list, ) return recursive_replace( ret, "foliage.reward_block_hash", ret.reward_chain_block.get_hash(), )