async def get_sub_block_records_close_to_peak( self, blocks_n: int ) -> Tuple[Dict[bytes32, SubBlockRecord], Optional[bytes32]]: """ Returns a dictionary with all sub blocks, as well as the header hash of the peak, if present. """ res = await self.db.execute( "SELECT header_hash, height from block_records WHERE is_peak = 1") row = await res.fetchone() await res.close() if row is None: return {}, None header_hash_bytes, peak_height = row peak: bytes32 = bytes32(bytes.fromhex(header_hash_bytes)) formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {peak_height - blocks_n}" cursor = await self.db.execute(formatted_str) rows = await cursor.fetchall() await cursor.close() ret: Dict[bytes32, SubBlockRecord] = {} for row in rows: header_hash_bytes, sub_block_bytes = row header_hash = bytes.fromhex(header_hash_bytes) ret[header_hash] = SubBlockRecord.from_bytes(sub_block_bytes) return ret, peak
async def get_blockchain_state(self) -> Dict: response = await self.fetch("get_blockchain_state", {}) if response["blockchain_state"]["peak"] is not None: response["blockchain_state"][ "peak"] = SubBlockRecord.from_json_dict( response["blockchain_state"]["peak"]) return response["blockchain_state"]
async def get_sub_block_record_by_sub_height( self, sub_height) -> Optional[SubBlockRecord]: try: response = await self.fetch("get_sub_block_record_by_sub_height", {"sub_height": sub_height}) except Exception: return None return SubBlockRecord.from_json_dict(response["sub_block_record"])
async def get_sub_block_record(self, header_hash: bytes32) -> Optional[SubBlockRecord]: cursor = await self.db.execute( "SELECT sub_block from sub_block_records WHERE header_hash=?", (header_hash.hex(),), ) row = await cursor.fetchone() await cursor.close() if row is not None: return SubBlockRecord.from_bytes(row[0]) return None
async def get_sub_block_record(self, header_hash) -> Optional[SubBlockRecord]: try: response = await self.fetch("get_sub_block_record", {"header_hash": header_hash.hex()}) if response["sub_block_record"] is None: return None except Exception: return None return SubBlockRecord.from_json_dict(response["sub_block_record"])
def get_icc( constants, vdf_end_total_iters: uint128, finished_sub_slots: List[EndOfSubSlotBundle], latest_sub_block: SubBlockRecord, sub_blocks: Dict[bytes32, SubBlockRecord], sub_slot_start_total_iters: uint128, deficit: uint8, ) -> Tuple[Optional[VDFInfo], Optional[VDFProof]]: if len(finished_sub_slots) == 0: prev_deficit = latest_sub_block.deficit else: prev_deficit = finished_sub_slots[-1].reward_chain.deficit if deficit == prev_deficit == constants.MIN_SUB_BLOCKS_PER_CHALLENGE_BLOCK: # new slot / overflow sb to new slot / overflow sb return None, None if deficit == (prev_deficit - 1) == (constants.MIN_SUB_BLOCKS_PER_CHALLENGE_BLOCK - 1): # new slot / overflow sb to challenge sb return None, None if len(finished_sub_slots) != 0: last_ss = finished_sub_slots[-1] assert last_ss.infused_challenge_chain is not None assert finished_sub_slots[-1].reward_chain.deficit <= (constants.MIN_SUB_BLOCKS_PER_CHALLENGE_BLOCK - 1) return get_vdf_info_and_proof( constants, ClassgroupElement.get_default_element(), last_ss.infused_challenge_chain.get_hash(), uint64(vdf_end_total_iters - sub_slot_start_total_iters), ) curr = latest_sub_block # curr deficit is 0, 1, 2, 3, or 4 while not curr.is_challenge_sub_block(constants) and not curr.first_in_sub_slot: curr = sub_blocks[curr.prev_hash] icc_iters = uint64(vdf_end_total_iters - latest_sub_block.total_iters) if latest_sub_block.is_challenge_sub_block(constants): icc_input = ClassgroupElement.get_default_element() else: icc_input = latest_sub_block.infused_challenge_vdf_output if curr.is_challenge_sub_block(constants): # Deficit 4 icc_challenge_hash = curr.challenge_block_info_hash else: assert curr.finished_infused_challenge_slot_hashes is not None # First sub block in sub slot has deficit 0,1,2 or 3 icc_challenge_hash = curr.finished_infused_challenge_slot_hashes[-1] return get_vdf_info_and_proof( constants, icc_input, icc_challenge_hash, icc_iters, )
def batch_pre_validate_sub_blocks( constants_dict: Dict, sub_blocks_pickled: Dict[bytes, bytes], header_blocks_pickled: List[bytes], transaction_generators: List[Optional[bytes]], check_filter: bool, expected_difficulty: List[uint64], expected_sub_slot_iters: List[uint64], ) -> List[bytes]: assert len(header_blocks_pickled) == len(transaction_generators) sub_blocks = {} for k, v in sub_blocks_pickled.items(): sub_blocks[k] = SubBlockRecord.from_bytes(v) results: List[PreValidationResult] = [] constants: ConsensusConstants = dataclass_from_dict( ConsensusConstants, constants_dict) for i in range(len(header_blocks_pickled)): try: header_block: HeaderBlock = HeaderBlock.from_bytes( header_blocks_pickled[i]) generator: Optional[bytes] = transaction_generators[i] required_iters, error = validate_finished_header_block( constants, sub_blocks, header_block, check_filter, expected_difficulty[i], expected_sub_slot_iters[i], ) cost_result = None error_int: Optional[uint16] = None if error is not None: error_int = uint16(error.code.value) if not error and generator is not None: cost_result = calculate_cost_of_program( Program.from_bytes(generator), constants.CLVM_COST_RATIO_CONSTANT) results.append( PreValidationResult(error_int, required_iters, cost_result)) except Exception: error_stack = traceback.format_exc() log.error(f"Exception: {error_stack}") results.append( PreValidationResult(uint16(Err.UNKNOWN.value), None, None)) return [bytes(r) for r in results]
async def get_sub_block_records( self, ) -> Tuple[Dict[bytes32, SubBlockRecord], Optional[bytes32]]: """ Returns a dictionary with all sub blocks, as well as the header hash of the peak, if present. """ cursor = await self.db.execute("SELECT * from block_records") rows = await cursor.fetchall() await cursor.close() ret: Dict[bytes32, SubBlockRecord] = {} peak: Optional[bytes32] = None for row in rows: header_hash = bytes.fromhex(row[0]) ret[header_hash] = SubBlockRecord.from_bytes(row[3]) if row[5]: assert peak is None # Sanity check, only one peak peak = header_hash return ret, peak
async def get_sub_block_records_in_range( self, start: int, stop: int, ) -> Dict[bytes32, SubBlockRecord]: """ Returns a dictionary with all sub blocks in range between start and stop if present. """ formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {start} and height <= {stop}" cursor = await self.db.execute(formatted_str) rows = await cursor.fetchall() await cursor.close() ret: Dict[bytes32, SubBlockRecord] = {} for row in rows: header_hash = bytes.fromhex(row[0]) ret[header_hash] = SubBlockRecord.from_bytes(row[1]) return ret
async def get_sub_block_records_close_to_peak( self, blocks_n: int ) -> Tuple[Dict[bytes32, SubBlockRecord], Optional[bytes32]]: """ Returns a dictionary with all sub_blocks that have height >= peak height - blocks_n, as well as the peak header hash. """ res = await self.db.execute( "SELECT * from block_records WHERE is_peak = 1") peak_row = await res.fetchone() await res.close() if peak_row is None: return {}, None formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {peak_row[2] - blocks_n}" cursor = await self.db.execute(formatted_str) rows = await cursor.fetchall() await cursor.close() ret: Dict[bytes32, SubBlockRecord] = {} for row in rows: header_hash = bytes.fromhex(row[0]) ret[header_hash] = SubBlockRecord.from_bytes(row[1]) return ret, bytes.fromhex(peak_row[0])
def block_to_sub_block_record( constants: ConsensusConstants, sub_blocks: Dict[bytes32, SubBlockRecord], height_to_hash: Dict[uint32, bytes32], required_iters: uint64, full_block: Optional[Union[FullBlock, HeaderBlock]], header_block: Optional[HeaderBlock], ): if full_block is None: assert header_block is not None block: Union[HeaderBlock, FullBlock] = header_block else: block = full_block if block.sub_block_height == 0: prev_sb: Optional[SubBlockRecord] = None sub_slot_iters: uint64 = uint64(constants.SUB_SLOT_ITERS_STARTING) height = 0 else: prev_sb = sub_blocks[block.prev_header_hash] assert prev_sb is not None if prev_sb.is_block: height = prev_sb.height + 1 else: height = prev_sb.height sub_slot_iters = get_next_sub_slot_iters( constants, sub_blocks, height_to_hash, prev_sb.prev_hash, prev_sb.sub_block_height, prev_sb.sub_slot_iters, prev_sb.deficit, len(block.finished_sub_slots) > 0, prev_sb.sp_total_iters(constants), ) overflow = is_overflow_sub_block( constants, block.reward_chain_sub_block.signage_point_index) deficit = calculate_deficit( constants, block.sub_block_height, prev_sb, overflow, len(block.finished_sub_slots), ) prev_block_hash = block.foliage_block.prev_block_hash if block.foliage_block is not None else None timestamp = block.foliage_block.timestamp if block.foliage_block is not None else None fees = block.transactions_info.fees if block.transactions_info is not None else None # reward_claims_incorporated = ( # block.transactions_info.reward_claims_incorporated if block.transactions_info is not None else None # ) if len(block.finished_sub_slots) > 0: finished_challenge_slot_hashes: Optional[List[bytes32]] = [ sub_slot.challenge_chain.get_hash() for sub_slot in block.finished_sub_slots ] finished_reward_slot_hashes: Optional[List[bytes32]] = [ sub_slot.reward_chain.get_hash() for sub_slot in block.finished_sub_slots ] finished_infused_challenge_slot_hashes: Optional[List[bytes32]] = [ sub_slot.infused_challenge_chain.get_hash() for sub_slot in block.finished_sub_slots if sub_slot.infused_challenge_chain is not None ] elif block.sub_block_height == 0: finished_challenge_slot_hashes = [constants.FIRST_CC_CHALLENGE] finished_reward_slot_hashes = [constants.FIRST_RC_CHALLENGE] finished_infused_challenge_slot_hashes = None else: finished_challenge_slot_hashes = None finished_reward_slot_hashes = None finished_infused_challenge_slot_hashes = None found_ses_hash: Optional[bytes32] = None ses: Optional[SubEpochSummary] = None if len(block.finished_sub_slots) > 0: for sub_slot in block.finished_sub_slots: if sub_slot.challenge_chain.subepoch_summary_hash is not None: found_ses_hash = sub_slot.challenge_chain.subepoch_summary_hash if found_ses_hash: assert prev_sb is not None assert len(block.finished_sub_slots) > 0 ses = make_sub_epoch_summary( constants, sub_blocks, block.sub_block_height, sub_blocks[prev_sb.prev_hash], block.finished_sub_slots[0].challenge_chain.new_difficulty, block.finished_sub_slots[0].challenge_chain.new_sub_slot_iters, ) assert ses.get_hash() == found_ses_hash cbi = ChallengeBlockInfo( block.reward_chain_sub_block.proof_of_space, block.reward_chain_sub_block.challenge_chain_sp_vdf, block.reward_chain_sub_block.challenge_chain_sp_signature, block.reward_chain_sub_block.challenge_chain_ip_vdf, ) if block.reward_chain_sub_block.infused_challenge_chain_ip_vdf is not None: icc_output: Optional[ ClassgroupElement] = block.reward_chain_sub_block.infused_challenge_chain_ip_vdf.output else: icc_output = None return SubBlockRecord( block.header_hash, block.prev_header_hash, block.sub_block_height, uint32(height), block.weight, block.total_iters, block.reward_chain_sub_block.signage_point_index, block.reward_chain_sub_block.challenge_chain_ip_vdf.output, icc_output, block.reward_chain_sub_block.get_hash(), cbi.get_hash(), sub_slot_iters, block.foliage_sub_block.foliage_sub_block_data.pool_target.puzzle_hash, block.foliage_sub_block.foliage_sub_block_data. farmer_reward_puzzle_hash, required_iters, deficit, overflow, timestamp, prev_block_hash, fees, # reward_claims_incorporated, finished_challenge_slot_hashes, finished_infused_challenge_slot_hashes, finished_reward_slot_hashes, ses, )
def new_peak( self, peak: SubBlockRecord, sp_sub_slot: Optional[ EndOfSubSlotBundle], # None if not overflow, or in first/second slot ip_sub_slot: Optional[EndOfSubSlotBundle], # None if in first slot reorg: bool, sub_blocks: Dict[bytes32, SubBlockRecord], ) -> Tuple[Optional[EndOfSubSlotBundle], List[SignagePoint], List[timelord_protocol.NewInfusionPointVDF]]: """ If the peak is an overflow block, must provide two sub-slots: one for the current sub-slot and one for the prev sub-slot (since we still might get more sub-blocks with an sp in the previous sub-slot) """ assert len(self.finished_sub_slots) >= 1 new_finished_sub_slots = [] total_iters_peak = peak.ip_sub_slot_total_iters(self.constants) ip_sub_slot_found = False if not reorg: # This is a new peak that adds to the last peak. We can clear data in old sub-slots. (and new ones) for index, (sub_slot, sps, total_iters) in enumerate(self.finished_sub_slots): if sub_slot == sp_sub_slot: # In the case of a peak overflow sub-block (or first ss), the previous sub-slot is added if sp_sub_slot is None: # This is a non-overflow sub block if (ip_sub_slot is not None and ip_sub_slot.challenge_chain. challenge_chain_end_of_slot_vdf.challenge == self.constants.FIRST_CC_CHALLENGE): new_finished_sub_slots.append( (sub_slot, sps, total_iters)) continue else: # Overflow sub block new_finished_sub_slots.append( (sub_slot, sps, total_iters)) continue if sub_slot == ip_sub_slot: ip_sub_slot_found = True new_finished_sub_slots.append((sub_slot, sps, total_iters)) self.finished_sub_slots = new_finished_sub_slots if reorg or not ip_sub_slot_found: # This is either a reorg, which means some sub-blocks are reverted, or this sub slot is not in our current # cache, delete the entire cache and add this sub slot. self.clear_slots() if peak.overflow: prev_sub_slot_total_iters = peak.sp_sub_slot_total_iters( self.constants) assert total_iters_peak != prev_sub_slot_total_iters self.finished_sub_slots = [( sp_sub_slot, [None] * self.constants.NUM_SPS_SUB_SLOT, prev_sub_slot_total_iters, )] log.info( f"5. Adding sub slot {ip_sub_slot is None}, total iters: {total_iters_peak}" ) self.finished_sub_slots.append(( ip_sub_slot, [None] * self.constants.NUM_SPS_SUB_SLOT, total_iters_peak, )) new_eos: Optional[EndOfSubSlotBundle] = None new_sps: List[SignagePoint] = [] new_ips: List[timelord_protocol.NewInfusionPointVDF] = [] for eos in self.future_eos_cache.get( peak.reward_infusion_new_challenge, []): if self.new_finished_sub_slot(eos, sub_blocks, peak) is not None: new_eos = eos break # This cache is not currently being used for sp in self.future_sp_cache.get(peak.reward_infusion_new_challenge, []): assert sp.cc_vdf is not None index = uint8(sp.cc_vdf.number_of_iterations // peak.sub_slot_iters) if self.new_signage_point(index, sub_blocks, peak, peak.sub_slot_iters, sp): new_sps.append(sp) for ip in self.future_ip_cache.get(peak.reward_infusion_new_challenge, []): new_ips.append(ip) self.future_eos_cache.pop(peak.reward_infusion_new_challenge, []) self.future_sp_cache.pop(peak.reward_infusion_new_challenge, []) self.future_ip_cache.pop(peak.reward_infusion_new_challenge, []) return new_eos, new_sps, new_ips