async def load_blocks_dont_validate( blocks, ) -> Tuple[Dict[bytes32, HeaderBlock], Dict[uint32, bytes32], Dict[ bytes32, BlockRecord], Dict[bytes32, SubEpochSummary]]: header_cache: Dict[bytes32, HeaderBlock] = {} height_to_hash: Dict[uint32, bytes32] = {} sub_blocks: Dict[bytes32, BlockRecord] = {} sub_epoch_summaries: Dict[bytes32, SubEpochSummary] = {} prev_block = None difficulty = test_constants.DIFFICULTY_STARTING block: FullBlock for block in blocks: if block.height > 0: assert prev_block is not None difficulty = block.reward_chain_block.weight - prev_block.weight if block.reward_chain_block.challenge_chain_sp_vdf is None: assert block.reward_chain_block.signage_point_index == 0 cc_sp: bytes32 = block.reward_chain_block.pos_ss_cc_challenge_hash else: cc_sp = block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash( ) quality_string: Optional[ bytes32] = block.reward_chain_block.proof_of_space.verify_and_get_quality_string( test_constants, block.reward_chain_block.pos_ss_cc_challenge_hash, cc_sp, ) assert quality_string is not None required_iters: uint64 = calculate_iterations_quality( test_constants.DIFFICULTY_CONSTANT_FACTOR, quality_string, block.reward_chain_block.proof_of_space.size, difficulty, cc_sp, ) # TODO: address hint error and remove ignore # error: Argument 2 to "BlockCache" has incompatible type "Dict[uint32, bytes32]"; expected # "Optional[Dict[bytes32, HeaderBlock]]" [arg-type] sub_block = block_to_block_record( test_constants, BlockCache(sub_blocks, height_to_hash), # type: ignore[arg-type] required_iters, block, None, ) sub_blocks[block.header_hash] = sub_block height_to_hash[block.height] = block.header_hash header_cache[block.header_hash] = get_block_header(block, [], []) if sub_block.sub_epoch_summary_included is not None: sub_epoch_summaries[ block.height] = sub_block.sub_epoch_summary_included prev_block = block return header_cache, height_to_hash, sub_blocks, sub_epoch_summaries
def do_simulation(days: int, diff: int, k: int, num: int): successes = 0 for i in range(9216 * days): for j in range(num): s = calculate_iterations_quality(2**67, token_bytes(32), k, diff, token_bytes(32)) < (ssi // 64) if s: successes += 1 return successes
def do_simulation(days: int, diff: int, k: int, num: int): successes = 0 for i in range(int(9216 * days)): for j in range(num): # Plot filter if random.random() < (1.0 / 512.0): s = calculate_iterations_quality(2**67, token_bytes(32), k, diff, token_bytes(32)) < (ssi // 64) if s: successes += 1 return successes
def test_win_percentage(self): """ Tests that the percentage of blocks won is proportional to the space of each farmer, with the assumption that all farmers have access to the same VDF speed. """ farmer_ks = { uint8(32): 100, uint8(33): 100, uint8(34): 100, uint8(35): 100, uint8(36): 100, } farmer_space = { k: _expected_plot_size(uint8(k)) * count for k, count in farmer_ks.items() } total_space = sum(farmer_space.values()) percentage_space = { k: float(sp / total_space) for k, sp in farmer_space.items() } wins = {k: 0 for k in farmer_ks.keys()} total_slots = 50 num_sps = 16 sp_interval_iters = uint64(100000000 // 32) difficulty = uint64(500000000000) for slot_index in range(total_slots): total_wins_in_slot = 0 for sp_index in range(num_sps): sp_hash = std_hash( slot_index.to_bytes(4, "big") + sp_index.to_bytes(4, "big")) for k, count in farmer_ks.items(): for farmer_index in range(count): quality = std_hash( slot_index.to_bytes(4, "big") + k.to_bytes(1, "big") + bytes(farmer_index)) required_iters = calculate_iterations_quality( 2**25, quality, k, difficulty, sp_hash) if required_iters < sp_interval_iters: wins[k] += 1 total_wins_in_slot += 1 win_percentage = { k: wins[k] / sum(wins.values()) for k in farmer_ks.keys() } for k in farmer_ks.keys(): # Win rate is proportional to percentage of space assert abs(win_percentage[k] - percentage_space[k]) < 0.01
def iters_from_block( constants, reward_chain_block: Union[RewardChainBlock, RewardChainBlockUnfinished], sub_slot_iters: uint64, difficulty: uint64, ) -> Tuple[uint64, uint64]: if reward_chain_block.challenge_chain_sp_vdf is None: assert reward_chain_block.signage_point_index == 0 cc_sp: bytes32 = reward_chain_block.pos_ss_cc_challenge_hash else: cc_sp = reward_chain_block.challenge_chain_sp_vdf.output.get_hash() quality_string: Optional[ bytes32] = reward_chain_block.proof_of_space.verify_and_get_quality_string( constants, reward_chain_block.pos_ss_cc_challenge_hash, cc_sp, ) assert quality_string is not None required_iters: uint64 = calculate_iterations_quality( constants.DIFFICULTY_CONSTANT_FACTOR, quality_string, reward_chain_block.proof_of_space.size, difficulty, cc_sp, ) return ( calculate_sp_iters(constants, sub_slot_iters, reward_chain_block.signage_point_index), calculate_ip_iters( constants, sub_slot_iters, reward_chain_block.signage_point_index, required_iters, ), )
async def process_partial( self, partial: SubmitPartial, time_received_partial: uint64, balance: uint64, curr_difficulty: uint64, ) -> Dict: if partial.payload.suggested_difficulty < self.min_difficulty: return { "error_code": PoolErr.INVALID_DIFFICULTY.value, "error_message": f"Invalid difficulty {partial.payload.suggested_difficulty}. minimum: {self.min_difficulty} ", "points_balance": balance, "curr_difficulty": curr_difficulty, } # Validate signatures pk1: G1Element = partial.payload.owner_public_key m1: bytes = partial.payload.rewards_target pk2: G1Element = partial.payload.proof_of_space.plot_public_key m2: bytes = partial.payload.get_hash() valid_sig = AugSchemeMPL.aggregate_verify( [pk1, pk2], [m1, m2], partial.rewards_and_partial_aggregate_signature) if not valid_sig: return { "error_code": PoolErr.INVALID_SIGNATURE.value, "error_message": f"The aggregate signature is invalid {partial.rewards_and_partial_aggregate_signature}", "points_balance": balance, "difficulty": curr_difficulty, } if partial.payload.proof_of_space.pool_contract_puzzle_hash != await self.calculate_p2_singleton_ph( partial): return { "error_code": PoolErr.INVALID_P2_SINGLETON_PUZZLE_HASH.value, "error_message": f"The puzzl h {partial.rewards_and_partial_aggregate_signature}", "points_balance": balance, "difficulty": curr_difficulty, } if partial.payload.end_of_sub_slot: response = await self.node_rpc_client.get_recent_signage_point_or_eos( None, partial.payload.sp_hash) else: response = await self.node_rpc_client.get_recent_signage_point_or_eos( partial.payload.sp_hash, None) if response is None or response["reverted"]: return { "error_code": PoolErr.NOT_FOUND.value, "error_message": f"Did not find signage point or EOS {partial.payload.sp_hash}, {response}", "points_balance": balance, "difficulty": curr_difficulty, } node_time_received_sp = response["time_received"] signage_point: Optional[SignagePoint] = response.get( "signage_point", None) end_of_sub_slot: Optional[EndOfSubSlotBundle] = response.get( "eos", None) if time_received_partial - node_time_received_sp > self.partial_time_limit: return { "error_code": PoolErr.TOO_LATE.value, "error_message": f"Received partial in {time_received_partial - node_time_received_sp}. " f"Make sure your proof of space lookups are fast, and network connectivity is good. Response " f"must happen in less than {self.partial_time_limit} seconds. NAS or networking farming can be an " f"issue", "points_balance": balance, "curr_difficulty": curr_difficulty, } # Validate the proof if signage_point is not None: challenge_hash: bytes32 = signage_point.cc_vdf.challenge else: challenge_hash = end_of_sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.get_hash( ) quality_string: Optional[ bytes32] = partial.payload.proof_of_space.verify_and_get_quality_string( self.constants, challenge_hash, partial.payload.sp_hash) if quality_string is None: return { "error_code": PoolErr.INVALID_PROOF.value, "error_message": f"Invalid proof of space {partial.payload.sp_hash}", "points_balance": balance, "curr_difficulty": curr_difficulty, } required_iters: uint64 = calculate_iterations_quality( self.constants.DIFFICULTY_CONSTANT_FACTOR, quality_string, partial.payload.proof_of_space.size, curr_difficulty, partial.payload.sp_hash, ) if required_iters >= self.iters_limit: return { "error_code": PoolErr.PROOF_NOT_GOOD_ENOUGH.value, "error_message": f"Proof of space has required iters {required_iters}, too high for difficulty " f"{curr_difficulty}", "points_balance": balance, "curr_difficulty": curr_difficulty, } await self.pending_point_partials.put( (partial, time_received_partial, curr_difficulty)) return {"points_balance": balance, "curr_difficulty": curr_difficulty}
async def new_proof_of_space( self, new_proof_of_space: harvester_protocol.NewProofOfSpace, peer: ws.WSChiaConnection): """ This is a response from the harvester, for a NewChallenge. Here we check if the proof of space is sufficiently good, and if so, we ask for the whole proof. """ if new_proof_of_space.sp_hash not in self.farmer.number_of_responses: self.farmer.number_of_responses[new_proof_of_space.sp_hash] = 0 self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64( int(time.time())) max_pos_per_sp = 5 if self.farmer.number_of_responses[ new_proof_of_space.sp_hash] > max_pos_per_sp: # This will likely never happen for any farmer with less than 10% of global space # It's meant to make testnets more stable self.farmer.log.info( f"Surpassed {max_pos_per_sp} PoSpace for one SP, no longer submitting PoSpace for signage point " f"{new_proof_of_space.sp_hash}") return if new_proof_of_space.sp_hash not in self.farmer.sps: self.farmer.log.warning( f"Received response for a signage point that we do not have {new_proof_of_space.sp_hash}" ) return sps = self.farmer.sps[new_proof_of_space.sp_hash] for sp in sps: computed_quality_string = new_proof_of_space.proof.verify_and_get_quality_string( self.farmer.constants, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, ) if computed_quality_string is None: self.farmer.log.error( f"Invalid proof of space {new_proof_of_space.proof}") return self.farmer.number_of_responses[new_proof_of_space.sp_hash] += 1 required_iters: uint64 = calculate_iterations_quality( self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR, computed_quality_string, new_proof_of_space.proof.size, sp.difficulty, new_proof_of_space.sp_hash, ) # Double check that the iters are good assert required_iters < calculate_sp_interval_iters( self.farmer.constants, sp.sub_slot_iters) # Proceed at getting the signatures for this PoSpace request = harvester_protocol.RequestSignatures( new_proof_of_space.plot_identifier, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, [sp.challenge_chain_sp, sp.reward_chain_sp], ) if new_proof_of_space.sp_hash not in self.farmer.proofs_of_space: self.farmer.proofs_of_space[new_proof_of_space.sp_hash] = [( new_proof_of_space.plot_identifier, new_proof_of_space.proof, )] else: self.farmer.proofs_of_space[new_proof_of_space.sp_hash].append( ( new_proof_of_space.plot_identifier, new_proof_of_space.proof, )) self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64( int(time.time())) self.farmer.quality_str_to_identifiers[computed_quality_string] = ( new_proof_of_space.plot_identifier, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, peer.peer_node_id, ) self.farmer.cache_add_time[computed_quality_string] = uint64( int(time.time())) return make_msg(ProtocolMessageTypes.request_signatures, request)
async def pre_validate_blocks_multiprocessing( constants: ConsensusConstants, constants_json: Dict, block_records: BlockchainInterface, blocks: Sequence[Union[FullBlock, HeaderBlock]], pool: ProcessPoolExecutor, check_filter: bool, npc_results: Dict[uint32, NPCResult], get_block_generator: Optional[Callable], batch_size: int, wp_summaries: Optional[List[SubEpochSummary]] = None, ) -> Optional[List[PreValidationResult]]: """ This method must be called under the blockchain lock If all the full blocks pass pre-validation, (only validates header), returns the list of required iters. if any validation issue occurs, returns False. Args: check_filter: constants_json: pool: constants: block_records: blocks: list of full blocks to validate (must be connected to current chain) npc_results get_block_generator """ prev_b: Optional[BlockRecord] = None # Collects all the recent blocks (up to the previous sub-epoch) recent_blocks: Dict[bytes32, BlockRecord] = {} recent_blocks_compressed: Dict[bytes32, BlockRecord] = {} num_sub_slots_found = 0 num_blocks_seen = 0 if blocks[0].height > 0: if not block_records.contains_block(blocks[0].prev_header_hash): return [ PreValidationResult(uint16(Err.INVALID_PREV_BLOCK_HASH.value), None, None) ] curr = block_records.block_record(blocks[0].prev_header_hash) num_sub_slots_to_look_for = 3 if curr.overflow else 2 while (curr.sub_epoch_summary_included is None or num_blocks_seen < constants.NUMBER_OF_TIMESTAMPS or num_sub_slots_found < num_sub_slots_to_look_for ) and curr.height > 0: if num_blocks_seen < constants.NUMBER_OF_TIMESTAMPS or num_sub_slots_found < num_sub_slots_to_look_for: recent_blocks_compressed[curr.header_hash] = curr if curr.first_in_sub_slot: assert curr.finished_challenge_slot_hashes is not None num_sub_slots_found += len(curr.finished_challenge_slot_hashes) recent_blocks[curr.header_hash] = curr if curr.is_transaction_block: num_blocks_seen += 1 curr = block_records.block_record(curr.prev_hash) recent_blocks[curr.header_hash] = curr recent_blocks_compressed[curr.header_hash] = curr block_record_was_present = [] for block in blocks: block_record_was_present.append( block_records.contains_block(block.header_hash)) diff_ssis: List[Tuple[uint64, uint64]] = [] for block in blocks: if block.height != 0: assert block_records.contains_block(block.prev_header_hash) if prev_b is None: prev_b = block_records.block_record(block.prev_header_hash) sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty( constants, len(block.finished_sub_slots) > 0, prev_b, block_records) overflow = is_overflow_block( constants, block.reward_chain_block.signage_point_index) challenge = get_block_challenge(constants, block, BlockCache(recent_blocks), prev_b is None, overflow, False) if block.reward_chain_block.challenge_chain_sp_vdf is None: cc_sp_hash: bytes32 = challenge else: cc_sp_hash = block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash( ) q_str: Optional[ bytes32] = block.reward_chain_block.proof_of_space.verify_and_get_quality_string( constants, challenge, cc_sp_hash) if q_str is None: for i, block_i in enumerate(blocks): if not block_record_was_present[ i] and block_records.contains_block( block_i.header_hash): block_records.remove_block_record(block_i.header_hash) return None required_iters: uint64 = calculate_iterations_quality( constants.DIFFICULTY_CONSTANT_FACTOR, q_str, block.reward_chain_block.proof_of_space.size, difficulty, cc_sp_hash, ) block_rec = block_to_block_record( constants, block_records, required_iters, block, None, ) if block_rec.sub_epoch_summary_included is not None and wp_summaries is not None: idx = int(block.height / constants.SUB_EPOCH_BLOCKS) - 1 next_ses = wp_summaries[idx] if not block_rec.sub_epoch_summary_included.get_hash( ) == next_ses.get_hash(): log.error( "sub_epoch_summary does not match wp sub_epoch_summary list" ) return None # Makes sure to not override the valid blocks already in block_records if not block_records.contains_block(block_rec.header_hash): block_records.add_block_record( block_rec) # Temporarily add block to dict recent_blocks[block_rec.header_hash] = block_rec recent_blocks_compressed[block_rec.header_hash] = block_rec else: recent_blocks[block_rec.header_hash] = block_records.block_record( block_rec.header_hash) recent_blocks_compressed[ block_rec.header_hash] = block_records.block_record( block_rec.header_hash) prev_b = block_rec diff_ssis.append((difficulty, sub_slot_iters)) block_dict: Dict[bytes32, Union[FullBlock, HeaderBlock]] = {} for i, block in enumerate(blocks): block_dict[block.header_hash] = block if not block_record_was_present[i]: block_records.remove_block_record(block.header_hash) recent_sb_compressed_pickled = { bytes(k): bytes(v) for k, v in recent_blocks_compressed.items() } npc_results_pickled = {} for k, v in npc_results.items(): npc_results_pickled[k] = bytes(v) futures = [] # Pool of workers to validate blocks concurrently for i in range(0, len(blocks), batch_size): end_i = min(i + batch_size, len(blocks)) blocks_to_validate = blocks[i:end_i] if any([ len(block.finished_sub_slots) > 0 for block in blocks_to_validate ]): final_pickled = { bytes(k): bytes(v) for k, v in recent_blocks.items() } else: final_pickled = recent_sb_compressed_pickled b_pickled: Optional[List[bytes]] = None hb_pickled: Optional[List[bytes]] = None previous_generators: List[Optional[bytes]] = [] for block in blocks_to_validate: # We ONLY add blocks which are in the past, based on header hashes (which are validated later) to the # prev blocks dict. This is important since these blocks are assumed to be valid and are used as previous # generator references prev_blocks_dict: Dict[uint32, Union[FullBlock, HeaderBlock]] = {} curr_b: Union[FullBlock, HeaderBlock] = block while curr_b.prev_header_hash in block_dict: curr_b = block_dict[curr_b.prev_header_hash] prev_blocks_dict[curr_b.header_hash] = curr_b if isinstance(block, FullBlock): assert get_block_generator is not None if b_pickled is None: b_pickled = [] b_pickled.append(bytes(block)) try: block_generator: Optional[ BlockGenerator] = await get_block_generator( block, prev_blocks_dict) except ValueError: return None if block_generator is not None: previous_generators.append(bytes(block_generator)) else: previous_generators.append(None) else: if hb_pickled is None: hb_pickled = [] hb_pickled.append(bytes(block)) futures.append(asyncio.get_running_loop().run_in_executor( pool, batch_pre_validate_blocks, constants_json, final_pickled, b_pickled, hb_pickled, previous_generators, npc_results_pickled, check_filter, [diff_ssis[j][0] for j in range(i, end_i)], [diff_ssis[j][1] for j in range(i, end_i)], )) # Collect all results into one flat list return [ PreValidationResult.from_bytes(result) for batch_result in (await asyncio.gather(*futures)) for result in batch_result ]
def blocking_lookup( filename: Path, plot_info: PlotInfo) -> List[Tuple[bytes32, ProofOfSpace]]: # Uses the DiskProver object to lookup qualities. This is a blocking call, # so it should be run in a thread pool. try: plot_id = plot_info.prover.get_id() sp_challenge_hash = ProofOfSpace.calculate_pos_challenge( plot_id, new_challenge.challenge_hash, new_challenge.sp_hash, ) try: quality_strings = plot_info.prover.get_qualities_for_challenge( sp_challenge_hash) except Exception as e: self.harvester.log.error(f"Error using prover object {e}") self.harvester.log.error( f"File: {filename} Plot ID: {plot_id.hex()}, " f"challenge: {sp_challenge_hash}, plot_info: {plot_info}" ) return [] responses: List[Tuple[bytes32, ProofOfSpace]] = [] if quality_strings is not None: difficulty = new_challenge.difficulty sub_slot_iters = new_challenge.sub_slot_iters if plot_info.pool_contract_puzzle_hash is not None: # If we are pooling, override the difficulty and sub slot iters with the pool threshold info. # This will mean more proofs actually get found, but they are only submitted to the pool, # not the blockchain for pool_difficulty in new_challenge.pool_difficulties: if pool_difficulty.pool_contract_puzzle_hash == plot_info.pool_contract_puzzle_hash: difficulty = pool_difficulty.difficulty sub_slot_iters = pool_difficulty.sub_slot_iters # Found proofs of space (on average 1 is expected per plot) for index, quality_str in enumerate(quality_strings): required_iters: uint64 = calculate_iterations_quality( self.harvester.constants. DIFFICULTY_CONSTANT_FACTOR, quality_str, plot_info.prover.get_size(), difficulty, new_challenge.sp_hash, ) sp_interval_iters = calculate_sp_interval_iters( self.harvester.constants, sub_slot_iters) if required_iters < sp_interval_iters: # Found a very good proof of space! will fetch the whole proof from disk, # then send to farmer try: proof_xs = plot_info.prover.get_full_proof( sp_challenge_hash, index) except Exception as e: self.harvester.log.error( f"Exception fetching full proof for {filename}. {e}" ) self.harvester.log.error( f"File: {filename} Plot ID: {plot_id.hex()}, challenge: {sp_challenge_hash}, " f"plot_info: {plot_info}") continue # Look up local_sk from plot to save locked memory ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(plot_info.prover.get_memo()) local_sk = master_sk_to_local_sk(local_master_sk) include_taproot = plot_info.pool_contract_puzzle_hash is not None plot_public_key = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key, include_taproot) responses.append(( quality_str, ProofOfSpace( sp_challenge_hash, plot_info.pool_public_key, plot_info.pool_contract_puzzle_hash, plot_public_key, uint8(plot_info.prover.get_size()), proof_xs, ), )) return responses except Exception as e: self.harvester.log.error(f"Unknown error: {e}") return []
async def new_proof_of_space( self, new_proof_of_space: harvester_protocol.NewProofOfSpace, peer: ws.WSChiaConnection ): """ This is a response from the harvester, for a NewChallenge. Here we check if the proof of space is sufficiently good, and if so, we ask for the whole proof. """ if new_proof_of_space.sp_hash not in self.farmer.number_of_responses: self.farmer.number_of_responses[new_proof_of_space.sp_hash] = 0 self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time())) max_pos_per_sp = 5 if self.farmer.number_of_responses[new_proof_of_space.sp_hash] > max_pos_per_sp: # This will likely never happen for any farmer with less than 10% of global space # It's meant to make testnets more stable self.farmer.log.info( f"Surpassed {max_pos_per_sp} PoSpace for one SP, no longer submitting PoSpace for signage point " f"{new_proof_of_space.sp_hash}" ) return None if new_proof_of_space.sp_hash not in self.farmer.sps: self.farmer.log.warning( f"Received response for a signage point that we do not have {new_proof_of_space.sp_hash}" ) return None sps = self.farmer.sps[new_proof_of_space.sp_hash] for sp in sps: computed_quality_string = new_proof_of_space.proof.verify_and_get_quality_string( self.farmer.constants, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, ) if computed_quality_string is None: self.farmer.log.error(f"Invalid proof of space {new_proof_of_space.proof}") return None self.farmer.number_of_responses[new_proof_of_space.sp_hash] += 1 required_iters: uint64 = calculate_iterations_quality( self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR, computed_quality_string, new_proof_of_space.proof.size, sp.difficulty, new_proof_of_space.sp_hash, ) # If the iters are good enough to make a block, proceed with the block making flow if required_iters < calculate_sp_interval_iters(self.farmer.constants, sp.sub_slot_iters): # Proceed at getting the signatures for this PoSpace request = harvester_protocol.RequestSignatures( new_proof_of_space.plot_identifier, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, [sp.challenge_chain_sp, sp.reward_chain_sp], ) if new_proof_of_space.sp_hash not in self.farmer.proofs_of_space: self.farmer.proofs_of_space[new_proof_of_space.sp_hash] = [] self.farmer.proofs_of_space[new_proof_of_space.sp_hash].append( ( new_proof_of_space.plot_identifier, new_proof_of_space.proof, ) ) self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time())) self.farmer.quality_str_to_identifiers[computed_quality_string] = ( new_proof_of_space.plot_identifier, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, peer.peer_node_id, ) self.farmer.cache_add_time[computed_quality_string] = uint64(int(time.time())) await peer.send_message(make_msg(ProtocolMessageTypes.request_signatures, request)) p2_singleton_puzzle_hash = new_proof_of_space.proof.pool_contract_puzzle_hash if p2_singleton_puzzle_hash is not None: # Otherwise, send the proof of space to the pool # When we win a block, we also send the partial to the pool if p2_singleton_puzzle_hash not in self.farmer.pool_state: self.farmer.log.info(f"Did not find pool info for {p2_singleton_puzzle_hash}") return pool_state_dict: Dict = self.farmer.pool_state[p2_singleton_puzzle_hash] pool_url = pool_state_dict["pool_config"].pool_url if pool_url == "": return if pool_state_dict["current_difficulty"] is None: self.farmer.log.warning( f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, " f"check communication with the pool, skipping this partial to {pool_url}." ) return required_iters = calculate_iterations_quality( self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR, computed_quality_string, new_proof_of_space.proof.size, pool_state_dict["current_difficulty"], new_proof_of_space.sp_hash, ) if required_iters >= calculate_sp_interval_iters( self.farmer.constants, self.farmer.constants.POOL_SUB_SLOT_ITERS ): self.farmer.log.info( f"Proof of space not good enough for pool {pool_url}: {pool_state_dict['current_difficulty']}" ) return authentication_token_timeout = pool_state_dict["authentication_token_timeout"] if authentication_token_timeout is None: self.farmer.log.warning( f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}" f", check communication with the pool." ) return # Submit partial to pool is_eos = new_proof_of_space.signage_point_index == 0 payload = PostPartialPayload( pool_state_dict["pool_config"].launcher_id, get_current_authentication_token(authentication_token_timeout), new_proof_of_space.proof, new_proof_of_space.sp_hash, is_eos, peer.peer_node_id, ) # The plot key is 2/2 so we need the harvester's half of the signature m_to_sign = payload.get_hash() request = harvester_protocol.RequestSignatures( new_proof_of_space.plot_identifier, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, [m_to_sign], ) response: Any = await peer.request_signatures(request) if not isinstance(response, harvester_protocol.RespondSignatures): self.farmer.log.error(f"Invalid response from harvester: {response}") return assert len(response.message_signatures) == 1 plot_signature: Optional[G2Element] = None for sk in self.farmer.get_private_keys(): pk = sk.get_g1() if pk == response.farmer_pk: agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, True) assert agg_pk == new_proof_of_space.proof.plot_public_key sig_farmer = AugSchemeMPL.sign(sk, m_to_sign, agg_pk) taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(response.local_pk, pk) taproot_sig: G2Element = AugSchemeMPL.sign(taproot_sk, m_to_sign, agg_pk) plot_signature = AugSchemeMPL.aggregate( [sig_farmer, response.message_signatures[0][1], taproot_sig] ) assert AugSchemeMPL.verify(agg_pk, m_to_sign, plot_signature) authentication_pk = pool_state_dict["pool_config"].authentication_public_key if bytes(authentication_pk) is None: self.farmer.log.error(f"No authentication sk for {authentication_pk}") return authentication_sk: PrivateKey = self.farmer.authentication_keys[bytes(authentication_pk)] authentication_signature = AugSchemeMPL.sign(authentication_sk, m_to_sign) assert plot_signature is not None agg_sig: G2Element = AugSchemeMPL.aggregate([plot_signature, authentication_signature]) post_partial_request: PostPartialRequest = PostPartialRequest(payload, agg_sig) post_partial_body = json.dumps(post_partial_request.to_json_dict()) self.farmer.log.info( f"Submitting partial for {post_partial_request.payload.launcher_id.hex()} to {pool_url}" ) pool_state_dict["points_found_since_start"] += pool_state_dict["current_difficulty"] pool_state_dict["points_found_24h"].append((time.time(), pool_state_dict["current_difficulty"])) headers = { "content-type": "application/json;", } try: async with aiohttp.ClientSession() as session: async with session.post(f"{pool_url}/partial", data=post_partial_body, headers=headers) as resp: if resp.ok: pool_response: Dict = json.loads(await resp.text()) self.farmer.log.info(f"Pool response: {pool_response}") if "error_code" in pool_response: self.farmer.log.error( f"Error in pooling: " f"{pool_response['error_code'], pool_response['error_message']}" ) pool_state_dict["pool_errors_24h"].append(pool_response) if pool_response["error_code"] == PoolErrorCode.PROOF_NOT_GOOD_ENOUGH.value: self.farmer.log.error( "Partial not good enough, forcing pool farmer update to " "get our current difficulty." ) pool_state_dict["next_farmer_update"] = 0 await self.farmer.update_pool_state() else: new_difficulty = pool_response["new_difficulty"] pool_state_dict["points_acknowledged_since_start"] += new_difficulty pool_state_dict["points_acknowledged_24h"].append((time.time(), new_difficulty)) pool_state_dict["current_difficulty"] = new_difficulty else: self.farmer.log.error(f"Error sending partial to {pool_url}, {resp.status}") except Exception as e: self.farmer.log.error(f"Error connecting to pool: {e}") return return
async def process_partial( self, partial: PostPartialRequest, farmer_record: FarmerRecord, time_received_partial: uint64, ) -> Dict: # Validate signatures message: bytes32 = partial.payload.get_hash() pk1: G1Element = partial.payload.proof_of_space.plot_public_key pk2: G1Element = farmer_record.authentication_public_key valid_sig = AugSchemeMPL.aggregate_verify([pk1, pk2], [message, message], partial.aggregate_signature) if not valid_sig: return error_dict( PoolErrorCode.INVALID_SIGNATURE, f"The aggregate signature is invalid {partial.aggregate_signature}", ) # TODO (chia-dev): Check DB p2_singleton_puzzle_hash and compare # if partial.payload.proof_of_space.pool_contract_puzzle_hash != p2_singleton_puzzle_hash: # return error_dict( # PoolErrorCode.INVALID_P2_SINGLETON_PUZZLE_HASH, # f"Invalid plot pool contract puzzle hash {partial.payload.proof_of_space.pool_contract_puzzle_hash}" # ) async def get_signage_point_or_eos(): if partial.payload.end_of_sub_slot: return await self.node_rpc_client.get_recent_signage_point_or_eos( None, partial.payload.sp_hash) else: return await self.node_rpc_client.get_recent_signage_point_or_eos( partial.payload.sp_hash, None) response = await get_signage_point_or_eos() if response is None: # Try again after 10 seconds in case we just didn't yet receive the signage point await asyncio.sleep(10) response = await get_signage_point_or_eos() if response is None or response["reverted"]: return error_dict( PoolErrorCode.NOT_FOUND, f"Did not find signage point or EOS {partial.payload.sp_hash}, {response}" ) node_time_received_sp = response["time_received"] signage_point: Optional[SignagePoint] = response.get( "signage_point", None) end_of_sub_slot: Optional[EndOfSubSlotBundle] = response.get( "eos", None) if time_received_partial - node_time_received_sp > self.partial_time_limit: return error_dict( PoolErrorCode.TOO_LATE, f"Received partial in {time_received_partial - node_time_received_sp}. " f"Make sure your proof of space lookups are fast, and network connectivity is good." f"Response must happen in less than {self.partial_time_limit} seconds. NAS or network" f" farming can be an issue", ) # Validate the proof if signage_point is not None: challenge_hash: bytes32 = signage_point.cc_vdf.challenge else: challenge_hash = end_of_sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.get_hash( ) quality_string: Optional[ bytes32] = partial.payload.proof_of_space.verify_and_get_quality_string( self.constants, challenge_hash, partial.payload.sp_hash) if quality_string is None: return error_dict( PoolErrorCode.INVALID_PROOF, f"Invalid proof of space {partial.payload.sp_hash}") current_difficulty = farmer_record.difficulty required_iters: uint64 = calculate_iterations_quality( self.constants.DIFFICULTY_CONSTANT_FACTOR, quality_string, partial.payload.proof_of_space.size, current_difficulty, partial.payload.sp_hash, ) if required_iters >= self.iters_limit: return error_dict( PoolErrorCode.PROOF_NOT_GOOD_ENOUGH, f"Proof of space has required iters {required_iters}, too high for difficulty " f"{current_difficulty}", ) await self.pending_point_partials.put( (partial, time_received_partial, current_difficulty)) async with self.store.lock: # Obtains the new record in case we just updated difficulty farmer_record: Optional[ FarmerRecord] = await self.store.get_farmer_record( partial.payload.launcher_id) if farmer_record is not None: current_difficulty = farmer_record.difficulty # Decide whether to update the difficulty recent_partials = await self.store.get_recent_partials( partial.payload.launcher_id, self.number_of_partials_target) # Only update the difficulty if we meet certain conditions new_difficulty: uint64 = get_new_difficulty( recent_partials, int(self.number_of_partials_target), int(self.time_target), current_difficulty, time_received_partial, self.min_difficulty, ) if current_difficulty != new_difficulty: await self.store.update_difficulty( partial.payload.launcher_id, new_difficulty) return PostPartialResponse(current_difficulty).to_json_dict()