def test_count_significant_bits(self): assert count_significant_bits(0b0001) == 1 assert count_significant_bits(0b00010) == 1 assert count_significant_bits(0b01010) == 3 assert count_significant_bits(-0b01010) == 3 assert count_significant_bits(0b0) == 0 assert count_significant_bits(0b1) == 1 assert count_significant_bits(0b1000010101010000) == 12
def _get_next_difficulty( constants: ConsensusConstants, blocks: BlockchainInterface, prev_header_hash: bytes32, height: uint32, current_difficulty: uint64, deficit: uint8, block_at_height_included_ses: bool, new_slot: bool, signage_point_total_iters: uint128, skip_epoch_check=False, ) -> uint64: """ Returns the difficulty of the next block that extends onto block. Used to calculate the number of iterations. WARNING: assumes that the block at height is not the first block in a sub-epoch. Args: constants: consensus constants being used for this chain blocks: dictionary from header hash to SBR of all included SBR prev_header_hash: header hash of the previous block height: the block height of the block to look at deficit: deficit of block at height height current_difficulty: difficulty at the infusion point of the block at height new_slot: whether or not there is a new slot after height signage_point_total_iters: signage point iters of the block at height skip_epoch_check: don't check correct epoch """ next_height: uint32 = uint32(height + 1) if next_height < constants.EPOCH_BLOCKS: # We are in the first epoch return uint64(constants.DIFFICULTY_STARTING) if not blocks.contains_block(prev_header_hash): raise ValueError(f"Header hash {prev_header_hash} not in blocks") prev_b: BlockRecord = blocks.block_record(prev_header_hash) # If we are in the same slot as previous block, return same difficulty if not skip_epoch_check: _, can_finish_epoch = can_finish_sub_and_full_epoch( constants, blocks, height, prev_header_hash, deficit, block_at_height_included_ses) if not new_slot or not can_finish_epoch: return current_difficulty last_block_prev: BlockRecord = _get_second_to_last_transaction_block_in_previous_epoch( constants, blocks, prev_b) # This gets the last transaction block before this block's signage point. Assuming the block at height height # is the last block infused in the epoch: If this block ends up being a # transaction block, then last_block_curr will be the second to last tx block in the epoch. If this block # is not a transaction block, that means there was exactly one other tx block included in between our signage # point and infusion point, and therefore last_block_curr is the second to last as well. last_block_curr = prev_b while last_block_curr.total_iters > signage_point_total_iters or not last_block_curr.is_transaction_block: last_block_curr = blocks.block_record(last_block_curr.prev_hash) assert last_block_curr.timestamp is not None assert last_block_prev.timestamp is not None actual_epoch_time: uint64 = uint64(last_block_curr.timestamp - last_block_prev.timestamp) old_difficulty = uint64(prev_b.weight - blocks.block_record(prev_b.prev_hash).weight) # Terms are rearranged so there is only one division. new_difficulty_precise = uint64( (last_block_curr.weight - last_block_prev.weight) * constants.SUB_SLOT_TIME_TARGET // (constants.SLOT_BLOCKS_TARGET * actual_epoch_time)) # Only change by a max factor, to prevent attacks, as in greenpaper, and must be at least 1 max_diff = uint64(constants.DIFFICULTY_CHANGE_MAX_FACTOR * old_difficulty) min_diff = uint64(old_difficulty // constants.DIFFICULTY_CHANGE_MAX_FACTOR) if new_difficulty_precise >= old_difficulty: new_difficulty_precise = uint64(min(new_difficulty_precise, max_diff)) else: new_difficulty_precise = uint64( max([uint64(1), new_difficulty_precise, min_diff])) new_difficulty = truncate_to_significant_bits(new_difficulty_precise, constants.SIGNIFICANT_BITS) assert count_significant_bits(new_difficulty) <= constants.SIGNIFICANT_BITS return uint64(new_difficulty)
def _get_next_sub_slot_iters( constants: ConsensusConstants, blocks: BlockchainInterface, prev_header_hash: bytes32, height: uint32, curr_sub_slot_iters: uint64, deficit: uint8, block_at_height_included_ses: bool, new_slot: bool, signage_point_total_iters: uint128, skip_epoch_check=False, ) -> uint64: """ Returns the slot iterations required for the next block after the one at height, where new_slot is true iff the next block will be in the next slot. WARNING: assumes that the block at height is not the first block in a sub-epoch. Args: constants: consensus constants being used for this chain blocks: dictionary from header hash to SBR of all included SBR prev_header_hash: header hash of the previous block height: the block height of the block to look at curr_sub_slot_iters: sub-slot iters at the infusion point of the block at height deficit: deficit of block at height height new_slot: whether or not there is a new slot after height signage_point_total_iters: signage point iters of the block at height skip_epoch_check: don't check correct epoch """ next_height: uint32 = uint32(height + 1) if next_height < constants.EPOCH_BLOCKS: return uint64(constants.SUB_SLOT_ITERS_STARTING) if not blocks.contains_block(prev_header_hash): raise ValueError(f"Header hash {prev_header_hash} not in blocks") prev_b: BlockRecord = blocks.block_record(prev_header_hash) # If we are in the same epoch, return same ssi if not skip_epoch_check: _, can_finish_epoch = can_finish_sub_and_full_epoch( constants, blocks, height, prev_header_hash, deficit, block_at_height_included_ses) if not new_slot or not can_finish_epoch: return curr_sub_slot_iters last_block_prev: BlockRecord = _get_second_to_last_transaction_block_in_previous_epoch( constants, blocks, prev_b) # This gets the last transaction block before this block's signage point. Assuming the block at height height # is the last block infused in the epoch: If this block ends up being a # transaction block, then last_block_curr will be the second to last tx block in the epoch. If this block # is not a transaction block, that means there was exactly one other tx block included in between our signage # point and infusion point, and therefore last_block_curr is the second to last as well. last_block_curr = prev_b while last_block_curr.total_iters > signage_point_total_iters or not last_block_curr.is_transaction_block: last_block_curr = blocks.block_record(last_block_curr.prev_hash) assert last_block_curr.timestamp is not None and last_block_prev.timestamp is not None # This is computed as the iterations per second in last epoch, times the target number of seconds per slot new_ssi_precise: uint64 = uint64( constants.SUB_SLOT_TIME_TARGET * (last_block_curr.total_iters - last_block_prev.total_iters) // (last_block_curr.timestamp - last_block_prev.timestamp)) # Only change by a max factor as a sanity check max_ssi = uint64(constants.DIFFICULTY_CHANGE_MAX_FACTOR * last_block_curr.sub_slot_iters) min_ssi = uint64(last_block_curr.sub_slot_iters // constants.DIFFICULTY_CHANGE_MAX_FACTOR) if new_ssi_precise >= last_block_curr.sub_slot_iters: new_ssi_precise = uint64(min(new_ssi_precise, max_ssi)) else: new_ssi_precise = uint64( max([constants.NUM_SPS_SUB_SLOT, new_ssi_precise, min_ssi])) new_ssi = truncate_to_significant_bits(new_ssi_precise, constants.SIGNIFICANT_BITS) new_ssi = uint64( new_ssi - new_ssi % constants.NUM_SPS_SUB_SLOT) # Must divide the sub slot assert count_significant_bits(new_ssi) <= constants.SIGNIFICANT_BITS return new_ssi
def get_next_min_iters( constants: Dict, headers: Dict[bytes32, Header], height_to_hash: Dict[uint32, bytes32], block: Union[FullBlock, HeaderBlock], ) -> uint64: """ Returns the VDF speed in iterations per seconds, to be used for the next block. This depends on the number of iterations of the last epoch, and changes at the same block as the difficulty. """ next_height: uint32 = uint32(block.height + 1) if next_height < constants["DIFFICULTY_EPOCH"]: # First epoch has a hardcoded vdf speed return constants["MIN_ITERS_STARTING"] prev_block_header: Header = headers[block.prev_header_hash] proof_of_space = block.proof_of_space difficulty = get_next_difficulty( constants, headers, height_to_hash, prev_block_header ) iterations = uint64( block.header.data.total_iters - prev_block_header.data.total_iters ) prev_min_iters = calculate_min_iters_from_iterations( proof_of_space, difficulty, iterations ) if next_height % constants["DIFFICULTY_EPOCH"] != constants["DIFFICULTY_DELAY"]: # Not at a point where ips would change, so return the previous ips # TODO: cache this for efficiency return prev_min_iters # min iters (along with difficulty) will change in this block, so we need to calculate the new one. # The calculation is (iters_2 - iters_1) // epoch size # 1 and 2 correspond to height_1 and height_2, being the last block of the second to last, and last # block of the last epochs. Basically, it's total iterations per block on average. # Height1 is the last block 2 epochs ago, so we can include the iterations taken for mining first block in epoch height1 = uint32( next_height - constants["DIFFICULTY_EPOCH"] - constants["DIFFICULTY_DELAY"] - 1 ) # Height2 is the last block in the previous epoch height2 = uint32(next_height - constants["DIFFICULTY_DELAY"] - 1) block1: Optional[Header] = None block2: Optional[Header] = None # We need to backtrack until we merge with the LCA chain, so we can use the height_to_hash dict. # This is important if we are on a fork, or beyond the LCA. curr: Optional[Header] = block.header assert curr is not None while ( curr.height not in height_to_hash or height_to_hash[curr.height] != curr.header_hash ): if curr.height == height1: block1 = curr elif curr.height == height2: block2 = curr curr = headers.get(curr.prev_header_hash, None) assert curr is not None # Once we are before the fork point (and before the LCA), we can use the height_to_hash map if block1 is None and height1 >= 0: # height1 could be -1, for the first difficulty calculation block1 = headers.get(height_to_hash[height1], None) if block2 is None: block2 = headers.get(height_to_hash[height2], None) assert block2 is not None if block1 is not None: iters1 = block1.data.total_iters else: # In the case of height == -1, iters = 0 iters1 = uint64(0) iters2 = block2.data.total_iters min_iters_precise = uint64( (iters2 - iters1) // (constants["DIFFICULTY_EPOCH"] * constants["MIN_ITERS_PROPORTION"]) ) min_iters = uint64( truncate_to_significant_bits(min_iters_precise, constants["SIGNIFICANT_BITS"]) ) assert count_significant_bits(min_iters) <= constants["SIGNIFICANT_BITS"] return min_iters
def get_next_difficulty( constants: Dict, headers: Dict[bytes32, Header], height_to_hash: Dict[uint32, bytes32], block: Header, ) -> uint64: """ Returns the difficulty of the next block that extends onto block. Used to calculate the number of iterations. When changing this, also change the implementation in wallet_state_manager.py. """ next_height: uint32 = uint32(block.height + 1) if next_height < constants["DIFFICULTY_EPOCH"]: # We are in the first epoch return uint64(constants["DIFFICULTY_STARTING"]) # Epochs are diffined as intervals of DIFFICULTY_EPOCH blocks, inclusive and indexed at 0. # For example, [0-2047], [2048-4095], etc. The difficulty changes DIFFICULTY_DELAY into the # epoch, as opposed to the first block (as in Bitcoin). elif next_height % constants["DIFFICULTY_EPOCH"] != constants["DIFFICULTY_DELAY"]: # Not at a point where difficulty would change prev_block: Header = headers[block.prev_header_hash] return uint64(block.weight - prev_block.weight) # old diff curr diff new diff # ----------|-----|----------------------|-----|-----... # h1 h2 h3 i-1 # Height1 is the last block 2 epochs ago, so we can include the time to mine 1st block in previous epoch height1 = uint32( next_height - constants["DIFFICULTY_EPOCH"] - constants["DIFFICULTY_DELAY"] - 1 ) # Height2 is the DIFFICULTY DELAYth block in the previous epoch height2 = uint32(next_height - constants["DIFFICULTY_EPOCH"] - 1) # Height3 is the last block in the previous epoch height3 = uint32(next_height - constants["DIFFICULTY_DELAY"] - 1) # h1 to h2 timestamps are mined on previous difficulty, while and h2 to h3 timestamps are mined on the # current difficulty block1, block2, block3 = None, None, None # We need to backtrack until we merge with the LCA chain, so we can use the height_to_hash dict. # This is important if we are on a fork, or beyond the LCA. curr: Optional[Header] = block assert curr is not None while ( curr.height not in height_to_hash or height_to_hash[curr.height] != curr.header_hash ): if curr.height == height1: block1 = curr elif curr.height == height2: block2 = curr elif curr.height == height3: block3 = curr curr = headers.get(curr.prev_header_hash, None) assert curr is not None # Once we are before the fork point (and before the LCA), we can use the height_to_hash map if not block1 and height1 >= 0: # height1 could be -1, for the first difficulty calculation block1 = headers[height_to_hash[height1]] if not block2: block2 = headers[height_to_hash[height2]] if not block3: block3 = headers[height_to_hash[height3]] assert block2 is not None and block3 is not None # Current difficulty parameter (diff of block h = i - 1) Tc = get_next_difficulty( constants, headers, height_to_hash, headers[block.prev_header_hash] ) # Previous difficulty parameter (diff of block h = i - 2048 - 1) Tp = get_next_difficulty( constants, headers, height_to_hash, headers[block2.prev_header_hash] ) if block1: timestamp1 = block1.data.timestamp # i - 512 - 1 else: # In the case of height == -1, there is no timestamp here, so assume the genesis block # took constants["BLOCK_TIME_TARGET"] seconds to mine. genesis = headers[height_to_hash[uint32(0)]] timestamp1 = genesis.data.timestamp - constants["BLOCK_TIME_TARGET"] timestamp2 = block2.data.timestamp # i - 2048 + 512 - 1 timestamp3 = block3.data.timestamp # i - 512 - 1 # Numerator fits in 128 bits, so big int is not necessary # We multiply by the denominators here, so we only have one fraction in the end (avoiding floating point) term1 = ( constants["DIFFICULTY_DELAY"] * Tp * (timestamp3 - timestamp2) * constants["BLOCK_TIME_TARGET"] ) term2 = ( (constants["DIFFICULTY_WARP_FACTOR"] - 1) * (constants["DIFFICULTY_EPOCH"] - constants["DIFFICULTY_DELAY"]) * Tc * (timestamp2 - timestamp1) * constants["BLOCK_TIME_TARGET"] ) # Round down after the division new_difficulty_precise: uint64 = uint64( (term1 + term2) // ( constants["DIFFICULTY_WARP_FACTOR"] * (timestamp3 - timestamp2) * (timestamp2 - timestamp1) ) ) # Take only DIFFICULTY_SIGNIFICANT_BITS significant bits new_difficulty = uint64( truncate_to_significant_bits( new_difficulty_precise, constants["SIGNIFICANT_BITS"] ) ) assert count_significant_bits(new_difficulty) <= constants["SIGNIFICANT_BITS"] # Only change by a max factor, to prevent attacks, as in greenpaper, and must be at least 1 max_diff = uint64( truncate_to_significant_bits( constants["DIFFICULTY_FACTOR"] * Tc, constants["SIGNIFICANT_BITS"], ) ) min_diff = uint64( truncate_to_significant_bits( Tc // constants["DIFFICULTY_FACTOR"], constants["SIGNIFICANT_BITS"], ) ) if new_difficulty >= Tc: return min(new_difficulty, max_diff) else: return max([uint64(1), new_difficulty, min_diff])
async def validate_unfinished_block_header( constants: ConsensusConstants, headers: Dict[bytes32, Header], height_to_hash: Dict[uint32, bytes32], block_header: Header, proof_of_space: ProofOfSpace, prev_header_block: Optional[HeaderBlock], pre_validated: bool = False, pos_quality_string: bytes32 = None, ) -> Tuple[Optional[Err], Optional[uint64]]: """ Block validation algorithm. Returns the number of VDF iterations that this block's proof of time must have, if the candidate block is fully valid (except for proof of time). The same as validate_block, but without proof of time and challenge validation. If the block is invalid, an error code is returned. Does NOT validate transactions and fees. """ if not pre_validated: # 1. The hash of the proof of space must match header_data.proof_of_space_hash if proof_of_space.get_hash() != block_header.data.proof_of_space_hash: return (Err.INVALID_POSPACE_HASH, None) # 2. The coinbase signature must be valid, according the the pool public key # TODO: change numbers # 3. Check harvester signature of header data is valid based on harvester key validates = blspy.AugSchemeMPL.verify( proof_of_space.plot_public_key, block_header.data.get_hash(), block_header.plot_signature, ) if not validates: return (Err.INVALID_PLOT_SIGNATURE, None) # 4. If not genesis, the previous block must exist if prev_header_block is not None and block_header.prev_header_hash not in headers: return (Err.DOES_NOT_EXTEND, None) # 5. If not genesis, the timestamp must be >= the average timestamp of last 11 blocks # and less than 2 hours in the future (if block height < 11, average all previous blocks). # Average is the sum, int diveded by the number of timestamps if prev_header_block is not None: last_timestamps: List[uint64] = [] curr = prev_header_block.header while len(last_timestamps) < constants.NUMBER_OF_TIMESTAMPS: last_timestamps.append(curr.data.timestamp) fetched = headers.get(curr.prev_header_hash, None) if not fetched: break curr = fetched if len(last_timestamps) != constants.NUMBER_OF_TIMESTAMPS: # For blocks 1 to 10, average timestamps of all previous blocks assert curr.height == 0 prev_time: uint64 = uint64( int(sum(last_timestamps) // len(last_timestamps))) if block_header.data.timestamp < prev_time: return (Err.TIMESTAMP_TOO_FAR_IN_PAST, None) if block_header.data.timestamp > time.time( ) + constants.MAX_FUTURE_TIME: return (Err.TIMESTAMP_TOO_FAR_IN_FUTURE, None) # 7. Extension data must be valid, if any is present # Compute challenge of parent challenge_hash: bytes32 if prev_header_block is not None: challenge: Challenge = prev_header_block.challenge challenge_hash = challenge.get_hash() # 8. Check challenge hash of prev is the same as in pos if challenge_hash != proof_of_space.challenge_hash: return (Err.INVALID_POSPACE_CHALLENGE, None) # 10. The proof of space must be valid on the challenge if pos_quality_string is None: pos_quality_string = proof_of_space.verify_and_get_quality_string( constants.NUMBER_ZERO_BITS_CHALLENGE_SIG) if not pos_quality_string: return (Err.INVALID_POSPACE, None) if prev_header_block is not None: # 11. If not genesis, the height on the previous block must be one less than on this block if block_header.height != prev_header_block.height + 1: return (Err.INVALID_HEIGHT, None) else: # 12. If genesis, the height must be 0 if block_header.height != 0: return (Err.INVALID_HEIGHT, None) # 13. The pool max height must be valid if (block_header.data.pool_target.max_height != 0 and block_header.data.pool_target.max_height < block_header.height): return (Err.INVALID_POOL_TARGET, None) difficulty: uint64 if prev_header_block is not None: difficulty = get_next_difficulty(constants, headers, height_to_hash, prev_header_block.header) min_iters = get_next_min_iters(constants, headers, height_to_hash, prev_header_block) else: difficulty = uint64(constants.DIFFICULTY_STARTING) min_iters = uint64(constants.MIN_ITERS_STARTING) number_of_iters: uint64 = calculate_iterations_quality( pos_quality_string, proof_of_space.size, difficulty, min_iters, ) assert count_significant_bits(difficulty) <= constants.SIGNIFICANT_BITS assert count_significant_bits(min_iters) <= constants.SIGNIFICANT_BITS if prev_header_block is not None: # 17. If not genesis, the total weight must be the parent weight + difficulty if block_header.weight != prev_header_block.weight + difficulty: return (Err.INVALID_WEIGHT, None) # 18. If not genesis, the total iters must be parent iters + number_iters if (block_header.data.total_iters != prev_header_block.header.data.total_iters + number_of_iters): return (Err.INVALID_TOTAL_ITERS, None) else: # 19. If genesis, the total weight must be starting difficulty if block_header.weight != difficulty: return (Err.INVALID_WEIGHT, None) # 20. If genesis, the total iters must be number iters if block_header.data.total_iters != number_of_iters: return (Err.INVALID_TOTAL_ITERS, None) return (None, number_of_iters)
async def validate_unfinished_block_header( constants: Dict, headers: Dict[bytes32, Header], height_to_hash: Dict[uint32, bytes32], block_header: Header, proof_of_space: ProofOfSpace, prev_header_block: Optional[HeaderBlock], pre_validated: bool = False, pos_quality_string: bytes32 = None, ) -> Tuple[Optional[Err], Optional[uint64]]: """ Block validation algorithm. Returns the number of VDF iterations that this block's proof of time must have, if the candidate block is fully valid (except for proof of time). The same as validate_block, but without proof of time and challenge validation. If the block is invalid, an error code is returned. Does NOT validate transactions and fees. """ if not pre_validated: # 1. The hash of the proof of space must match header_data.proof_of_space_hash if proof_of_space.get_hash() != block_header.data.proof_of_space_hash: return (Err.INVALID_POSPACE_HASH, None) # 2. The coinbase signature must be valid, according the the pool public key pair = block_header.data.coinbase_signature.PkMessagePair( proof_of_space.pool_pubkey, block_header.data.coinbase.name(), ) if not block_header.data.coinbase_signature.validate([pair]): return (Err.INVALID_COINBASE_SIGNATURE, None) # 3. Check harvester signature of header data is valid based on harvester key if not block_header.harvester_signature.verify( [blspy.Util.hash256(block_header.data.get_hash())], [proof_of_space.plot_pubkey], ): return (Err.INVALID_HARVESTER_SIGNATURE, None) # 4. If not genesis, the previous block must exist if prev_header_block is not None and block_header.prev_header_hash not in headers: return (Err.DOES_NOT_EXTEND, None) # 5. If not genesis, the timestamp must be >= the average timestamp of last 11 blocks # and less than 2 hours in the future (if block height < 11, average all previous blocks). # Average is the sum, int diveded by the number of timestamps if prev_header_block is not None: last_timestamps: List[uint64] = [] curr = prev_header_block.header while len(last_timestamps) < constants["NUMBER_OF_TIMESTAMPS"]: last_timestamps.append(curr.data.timestamp) fetched = headers.get(curr.prev_header_hash, None) if not fetched: break curr = fetched if len(last_timestamps) != constants["NUMBER_OF_TIMESTAMPS"]: # For blocks 1 to 10, average timestamps of all previous blocks assert curr.height == 0 prev_time: uint64 = uint64( int(sum(last_timestamps) // len(last_timestamps))) if block_header.data.timestamp < prev_time: return (Err.TIMESTAMP_TOO_FAR_IN_PAST, None) if block_header.data.timestamp > time.time( ) + constants["MAX_FUTURE_TIME"]: return (Err.TIMESTAMP_TOO_FAR_IN_FUTURE, None) # 7. Extension data must be valid, if any is present # Compute challenge of parent challenge_hash: bytes32 if prev_header_block is not None: challenge: Challenge = prev_header_block.challenge challenge_hash = challenge.get_hash() # 8. Check challenge hash of prev is the same as in pos if challenge_hash != proof_of_space.challenge_hash: return (Err.INVALID_POSPACE_CHALLENGE, None) # 10. The proof of space must be valid on the challenge if pos_quality_string is None: pos_quality_string = proof_of_space.verify_and_get_quality_string() if not pos_quality_string: return (Err.INVALID_POSPACE, None) if prev_header_block is not None: # 11. If not genesis, the height on the previous block must be one less than on this block if block_header.height != prev_header_block.height + 1: return (Err.INVALID_HEIGHT, None) else: # 12. If genesis, the height must be 0 if block_header.height != 0: return (Err.INVALID_HEIGHT, None) # 13. The coinbase reward must match the block schedule coinbase_reward = calculate_block_reward(block_header.height) if coinbase_reward != block_header.data.coinbase.amount: return (Err.INVALID_COINBASE_AMOUNT, None) # 13b. The coinbase parent id must be the height if block_header.data.coinbase.parent_coin_info != block_header.height.to_bytes( 32, "big"): return (Err.INVALID_COINBASE_PARENT, None) # 13c. The fees coin parent id must be hash(hash(height)) if block_header.data.fees_coin.parent_coin_info != std_hash( std_hash(uint32(block_header.height))): return (Err.INVALID_FEES_COIN_PARENT, None) difficulty: uint64 if prev_header_block is not None: difficulty = get_next_difficulty(constants, headers, height_to_hash, prev_header_block.header) min_iters = get_next_min_iters(constants, headers, height_to_hash, prev_header_block) else: difficulty = uint64(constants["DIFFICULTY_STARTING"]) min_iters = uint64(constants["MIN_ITERS_STARTING"]) number_of_iters: uint64 = calculate_iterations_quality( pos_quality_string, proof_of_space.size, difficulty, min_iters, ) assert count_significant_bits(difficulty) <= constants["SIGNIFICANT_BITS"] assert count_significant_bits(min_iters) <= constants["SIGNIFICANT_BITS"] if prev_header_block is not None: # 17. If not genesis, the total weight must be the parent weight + difficulty if block_header.weight != prev_header_block.weight + difficulty: return (Err.INVALID_WEIGHT, None) # 18. If not genesis, the total iters must be parent iters + number_iters if (block_header.data.total_iters != prev_header_block.header.data.total_iters + number_of_iters): return (Err.INVALID_TOTAL_ITERS, None) else: # 19. If genesis, the total weight must be starting difficulty if block_header.weight != difficulty: return (Err.INVALID_WEIGHT, None) # 20. If genesis, the total iters must be number iters if block_header.data.total_iters != number_of_iters: return (Err.INVALID_TOTAL_ITERS, None) return (None, number_of_iters)
def get_next_difficulty( constants: ConsensusConstants, sub_blocks: Dict[bytes32, SubBlockRecord], height_to_hash: Dict[uint32, bytes32], prev_header_hash: bytes32, sub_block_height: uint32, current_difficulty: uint64, deficit: uint8, new_slot: bool, signage_point_total_iters: uint128, skip_epoch_check=False, ) -> uint64: """ Returns the difficulty of the next sub-block that extends onto sub-block. Used to calculate the number of iterations. When changing this, also change the implementation in wallet_state_manager.py. Args: constants: consensus constants being used for this chain sub_blocks: dictionary from header hash to SBR of all included SBR height_to_hash: sub-block height to header hash map for sub-blocks in peak path prev_header_hash: header hash of the previous sub-block sub_block_height: the sub-block height of the sub-block to look at current_difficulty: difficulty at the infusion point of the sub_block at sub_block_height deficit: deficit of the sub_block at sub_block_height new_slot: whether or not there is a new slot after sub_block_height signage_point_total_iters: signage point iters of the sub_block at sub_block_height skip_epoch_check: don't check correct epoch """ next_sub_block_height: uint32 = uint32(sub_block_height + 1) if next_sub_block_height < (constants.EPOCH_SUB_BLOCKS - constants.MAX_SUB_SLOT_SUB_BLOCKS): # We are in the first epoch return uint64(constants.DIFFICULTY_STARTING) if prev_header_hash not in sub_blocks: raise ValueError(f"Header hash {prev_header_hash} not in sub blocks") prev_sb: SubBlockRecord = sub_blocks[prev_header_hash] # If we are in the same slot as previous sub-block, return same difficulty if not skip_epoch_check: _, can_finish_epoch = can_finish_sub_and_full_epoch( constants, sub_block_height, deficit, sub_blocks, prev_header_hash, False) if not new_slot or not can_finish_epoch: return current_difficulty last_block_prev: SubBlockRecord = _get_last_block_in_previous_epoch( constants, height_to_hash, sub_blocks, prev_sb) # Ensure we get a block for the last block as well, and that it is before the signage point last_block_curr = prev_sb while last_block_curr.total_iters > signage_point_total_iters or not last_block_curr.is_block: last_block_curr = sub_blocks[last_block_curr.prev_hash] assert last_block_curr.timestamp is not None assert last_block_prev.timestamp is not None actual_epoch_time: uint64 = uint64(last_block_curr.timestamp - last_block_prev.timestamp) old_difficulty = uint64(prev_sb.weight - sub_blocks[prev_sb.prev_hash].weight) # Terms are rearranged so there is only one division. new_difficulty_precise = ( (last_block_curr.weight - last_block_prev.weight) * constants.SUB_SLOT_TIME_TARGET // (constants.SLOT_SUB_BLOCKS_TARGET * actual_epoch_time)) # Take only DIFFICULTY_SIGNIFICANT_BITS significant bits new_difficulty = uint64( truncate_to_significant_bits(new_difficulty_precise, constants.SIGNIFICANT_BITS)) assert count_significant_bits(new_difficulty) <= constants.SIGNIFICANT_BITS # Only change by a max factor, to prevent attacks, as in greenpaper, and must be at least 1 max_diff = uint64( truncate_to_significant_bits( constants.DIFFICULTY_FACTOR * old_difficulty, constants.SIGNIFICANT_BITS, )) min_diff = uint64( truncate_to_significant_bits( old_difficulty // constants.DIFFICULTY_FACTOR, constants.SIGNIFICANT_BITS, )) if new_difficulty >= old_difficulty: return min(new_difficulty, max_diff) else: return max([uint64(1), new_difficulty, min_diff])
def get_next_sub_slot_iters( constants: ConsensusConstants, sub_blocks: Dict[bytes32, SubBlockRecord], height_to_hash: Dict[uint32, bytes32], prev_header_hash: bytes32, sub_block_height: uint32, curr_sub_slot_iters: uint64, deficit: uint8, new_slot: bool, signage_point_total_iters: uint128, skip_epoch_check=False, ) -> uint64: """ Returns the slot iterations required for the next block after the one at sub_block_height, where new_slot is true iff the next block will be in the next slot. Args: constants: consensus constants being used for this chain sub_blocks: dictionary from header hash to SBR of all included SBR height_to_hash: sub-block height to header hash map for sub-blocks in peak path prev_header_hash: header hash of the previous sub-block sub_block_height: the sub-block height of the sub-block to look at curr_sub_slot_iters: sub-slot iters at the infusion point of the sub_block at sub_block_height deficit: deficit of the sub_block at sub_block_height new_slot: whether or not there is a new slot after sub_block_height signage_point_total_iters: signage point iters of the sub_block at sub_block_height skip_epoch_check: don't check correct epoch """ next_sub_block_height: uint32 = uint32(sub_block_height + 1) if next_sub_block_height < (constants.EPOCH_SUB_BLOCKS - constants.MAX_SUB_SLOT_SUB_BLOCKS): return uint64(constants.SUB_SLOT_ITERS_STARTING) if prev_header_hash not in sub_blocks: raise ValueError(f"Header hash {prev_header_hash} not in sub blocks") prev_sb: SubBlockRecord = sub_blocks[prev_header_hash] # If we are in the same epoch, return same ssi if not skip_epoch_check: _, can_finish_epoch = can_finish_sub_and_full_epoch( constants, sub_block_height, deficit, sub_blocks, prev_header_hash, False) if not new_slot or not can_finish_epoch: return curr_sub_slot_iters last_block_prev: SubBlockRecord = _get_last_block_in_previous_epoch( constants, height_to_hash, sub_blocks, prev_sb) # Ensure we get a block for the last block as well, and that it is before the signage point last_block_curr = prev_sb while last_block_curr.total_iters > signage_point_total_iters or not last_block_curr.is_block: last_block_curr = sub_blocks[last_block_curr.prev_hash] assert last_block_curr.timestamp is not None and last_block_prev.timestamp is not None # This is computed as the iterations per second in last epoch, times the target number of seconds per slot new_ssi_precise: uint64 = uint64( constants.SUB_SLOT_TIME_TARGET * (last_block_curr.total_iters - last_block_prev.total_iters) // (last_block_curr.timestamp - last_block_prev.timestamp)) new_ssi = uint64( truncate_to_significant_bits(new_ssi_precise, constants.SIGNIFICANT_BITS)) # Only change by a max factor as a sanity check max_ssi = uint64( truncate_to_significant_bits( constants.DIFFICULTY_FACTOR * last_block_curr.sub_slot_iters, constants.SIGNIFICANT_BITS, )) min_ssi = uint64( truncate_to_significant_bits( last_block_curr.sub_slot_iters // constants.DIFFICULTY_FACTOR, constants.SIGNIFICANT_BITS, )) if new_ssi >= last_block_curr.sub_slot_iters: new_ssi = min(new_ssi, max_ssi) else: new_ssi = uint64(max([constants.NUM_SPS_SUB_SLOT, new_ssi, min_ssi])) new_ssi = uint64( new_ssi - new_ssi % constants.NUM_SPS_SUB_SLOT) # Must divide the sub slot assert count_significant_bits(new_ssi) <= constants.SIGNIFICANT_BITS return new_ssi