async def _messages_to_resend(self) -> List[Tuple[Message, Set[bytes32]]]: if self.wallet_state_manager is None or self.backup_initialized is False or self._shut_down: return [] messages: List[Tuple[Message, Set[bytes32]]] = [] records: List[ TransactionRecord] = await self.wallet_state_manager.tx_store.get_not_sent( ) for record in records: if record.spend_bundle is None: continue msg = make_msg( ProtocolMessageTypes.send_transaction, wallet_protocol.SendTransaction(record.spend_bundle), ) already_sent = set() for peer, status, _ in record.sent_to: if status == MempoolInclusionStatus.SUCCESS.value: already_sent.add(bytes32.from_hexstr(peer)) messages.append((msg, already_sent)) return messages
async def request_signed_values( self, full_node_request: farmer_protocol.RequestSignedValues): if full_node_request.quality_string not in self.farmer.quality_str_to_identifiers: self.farmer.log.error( f"Do not have quality string {full_node_request.quality_string}" ) return None (plot_identifier, challenge_hash, sp_hash, node_id) = self.farmer.quality_str_to_identifiers[ full_node_request.quality_string] request = harvester_protocol.RequestSignatures( plot_identifier, challenge_hash, sp_hash, [ full_node_request.foliage_block_data_hash, full_node_request.foliage_transaction_block_hash ], ) msg = make_msg(ProtocolMessageTypes.request_signatures, request) await self.farmer.server.send_to_specific([msg], node_id)
async def handshake_task(): # Wait until the task in `Farmer._start` is done so that we have keys available for the handshake. Bail out # early if we need to shut down or if the harvester is not longer connected. while not self.started and not self._shut_down and peer in self.server.get_connections(): await asyncio.sleep(1) if self._shut_down: log.debug("handshake_task: shutdown") self.harvester_handshake_task = None return if peer not in self.server.get_connections(): log.debug("handshake_task: disconnected") self.harvester_handshake_task = None return # Sends a handshake to the harvester handshake = harvester_protocol.HarvesterHandshake( self.get_public_keys(), self.pool_public_keys, ) msg = make_msg(ProtocolMessageTypes.harvester_handshake, handshake) await peer.send_message(msg) self.harvester_handshake_task = None
async def on_connect(peer: ws.WSChiaConnection): msg = make_msg(ProtocolMessageTypes.request_peers_introducer, introducer_protocol.RequestPeersIntroducer()) await peer.send_message(msg)
async def perform_handshake(self, network_id: str, protocol_version: str, server_port: int, local_type: NodeType): if self.is_outbound: outbound_handshake = make_msg( ProtocolMessageTypes.handshake, Handshake( network_id, protocol_version, chia_full_version_str(), uint16(server_port), uint8(local_type.value), [(uint16(Capability.BASE.value), "1")], ), ) assert outbound_handshake is not None await self._send_message(outbound_handshake) inbound_handshake_msg = await self._read_one_message() if inbound_handshake_msg is None: raise ProtocolError(Err.INVALID_HANDSHAKE) inbound_handshake = Handshake.from_bytes( inbound_handshake_msg.data) # Handle case of invalid ProtocolMessageType try: message_type: ProtocolMessageTypes = ProtocolMessageTypes( inbound_handshake_msg.type) except Exception: raise ProtocolError(Err.INVALID_HANDSHAKE) if message_type != ProtocolMessageTypes.handshake: raise ProtocolError(Err.INVALID_HANDSHAKE) if inbound_handshake.network_id != network_id: raise ProtocolError(Err.INCOMPATIBLE_NETWORK_ID) self.peer_server_port = inbound_handshake.server_port self.connection_type = NodeType(inbound_handshake.node_type) else: try: message = await self._read_one_message() except Exception: raise ProtocolError(Err.INVALID_HANDSHAKE) if message is None: raise ProtocolError(Err.INVALID_HANDSHAKE) # Handle case of invalid ProtocolMessageType try: message_type = ProtocolMessageTypes(message.type) except Exception: raise ProtocolError(Err.INVALID_HANDSHAKE) if message_type != ProtocolMessageTypes.handshake: raise ProtocolError(Err.INVALID_HANDSHAKE) inbound_handshake = Handshake.from_bytes(message.data) if inbound_handshake.network_id != network_id: raise ProtocolError(Err.INCOMPATIBLE_NETWORK_ID) outbound_handshake = make_msg( ProtocolMessageTypes.handshake, Handshake( network_id, protocol_version, chia_full_version_str(), uint16(server_port), uint8(local_type.value), [(uint16(Capability.BASE.value), "1")], ), ) await self._send_message(outbound_handshake) self.peer_server_port = inbound_handshake.server_port self.connection_type = NodeType(inbound_handshake.node_type) self.outbound_task = asyncio.create_task(self.outbound_handler()) self.inbound_task = asyncio.create_task(self.inbound_handler()) return True
async def _do_process_communication( self, chain: Chain, challenge: bytes32, initial_form: ClassgroupElement, ip: str, reader: asyncio.StreamReader, writer: asyncio.StreamWriter, # Data specific only when running in bluebox mode. bluebox_iteration: Optional[uint64] = None, header_hash: Optional[bytes32] = None, height: Optional[uint32] = None, field_vdf: Optional[uint8] = None, # Labels a proof to the current state only proof_label: Optional[int] = None, ): disc: int = create_discriminant(challenge, self.constants.DISCRIMINANT_SIZE_BITS) try: # Depending on the flags 'fast_algorithm' and 'sanitizer_mode', # the timelord tells the vdf_client what to execute. async with self.lock: if self.sanitizer_mode: writer.write(b"S") else: if self.config["fast_algorithm"]: # Run n-wesolowski (fast) algorithm. writer.write(b"N") else: # Run two-wesolowski (slow) algorithm. writer.write(b"T") await writer.drain() prefix = str(len(str(disc))) if len(prefix) == 1: prefix = "00" + prefix if len(prefix) == 2: prefix = "0" + prefix async with self.lock: writer.write((prefix + str(disc)).encode()) await writer.drain() # Send initial_form prefixed with its length. async with self.lock: writer.write(bytes([len(initial_form.data)]) + initial_form.data) await writer.drain() try: ok = await reader.readexactly(2) except (asyncio.IncompleteReadError, ConnectionResetError, Exception) as e: log.warning(f"{type(e)} {e}") async with self.lock: self.vdf_failures.append((chain, proof_label)) self.vdf_failures_count += 1 return None if ok.decode() != "OK": return None log.debug("Got handshake with VDF client.") if not self.sanitizer_mode: async with self.lock: self.allows_iters.append(chain) else: async with self.lock: assert chain is Chain.BLUEBOX assert bluebox_iteration is not None prefix = str(len(str(bluebox_iteration))) if len(str(bluebox_iteration)) < 10: prefix = "0" + prefix iter_str = prefix + str(bluebox_iteration) writer.write(iter_str.encode()) await writer.drain() # Listen to the client until "STOP" is received. while True: try: data = await reader.readexactly(4) except ( asyncio.IncompleteReadError, ConnectionResetError, Exception, ) as e: log.warning(f"{type(e)} {e}") async with self.lock: self.vdf_failures.append((chain, proof_label)) self.vdf_failures_count += 1 break msg = "" try: msg = data.decode() except Exception: pass if msg == "STOP": log.debug(f"Stopped client running on ip {ip}.") async with self.lock: writer.write(b"ACK") await writer.drain() break else: try: # This must be a proof, 4 bytes is length prefix length = int.from_bytes(data, "big") proof = await reader.readexactly(length) stdout_bytes_io: io.BytesIO = io.BytesIO(bytes.fromhex(proof.decode())) except ( asyncio.IncompleteReadError, ConnectionResetError, Exception, ) as e: log.warning(f"{type(e)} {e}") async with self.lock: self.vdf_failures.append((chain, proof_label)) self.vdf_failures_count += 1 break iterations_needed = uint64(int.from_bytes(stdout_bytes_io.read(8), "big", signed=True)) y_size_bytes = stdout_bytes_io.read(8) y_size = uint64(int.from_bytes(y_size_bytes, "big", signed=True)) y_bytes = stdout_bytes_io.read(y_size) witness_type = uint8(int.from_bytes(stdout_bytes_io.read(1), "big", signed=True)) proof_bytes: bytes = stdout_bytes_io.read() # Verifies our own proof just in case form_size = ClassgroupElement.get_size(self.constants) output = ClassgroupElement.from_bytes(y_bytes[:form_size]) if not self.sanitizer_mode: time_taken = time.time() - self.chain_start_time[chain] ips = int(iterations_needed / time_taken * 10) / 10 log.info( f"Finished PoT chall:{challenge[:10].hex()}.. {iterations_needed}" f" iters, " f"Estimated IPS: {ips}, Chain: {chain}" ) vdf_info: VDFInfo = VDFInfo( challenge, iterations_needed, output, ) vdf_proof: VDFProof = VDFProof( witness_type, proof_bytes, self.sanitizer_mode, ) if not vdf_proof.is_valid(self.constants, initial_form, vdf_info): log.error("Invalid proof of time!") if not self.sanitizer_mode: async with self.lock: assert proof_label is not None self.proofs_finished.append((chain, vdf_info, vdf_proof, proof_label)) else: async with self.lock: writer.write(b"010") await writer.drain() assert header_hash is not None assert field_vdf is not None assert height is not None response = timelord_protocol.RespondCompactProofOfTime( vdf_info, vdf_proof, header_hash, height, field_vdf ) if self.server is not None: message = make_msg(ProtocolMessageTypes.respond_compact_proof_of_time, response) await self.server.send_to_all([message], NodeType.FULL_NODE) except ConnectionResetError as e: log.debug(f"Connection reset with VDF client {e}")
async def _check_for_end_of_subslot(self, iter_to_look_for: uint64): left_subslot_iters = [ iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.END_OF_SUBSLOT ] if len(left_subslot_iters) == 0: return None if left_subslot_iters[0] != iter_to_look_for: return None chains_finished = [ (chain, info, proof) for chain, info, proof, label in self.proofs_finished if info.number_of_iterations == left_subslot_iters[0] and label == self.num_resets ] if self.last_state.get_challenge(Chain.INFUSED_CHALLENGE_CHAIN) is not None: chain_count = 3 else: chain_count = 2 if len(chains_finished) == chain_count: icc_ip_vdf: Optional[VDFInfo] = None icc_ip_proof: Optional[VDFProof] = None cc_vdf: Optional[VDFInfo] = None cc_proof: Optional[VDFProof] = None rc_vdf: Optional[VDFInfo] = None rc_proof: Optional[VDFProof] = None for chain, info, proof in chains_finished: if chain == Chain.CHALLENGE_CHAIN: cc_vdf = info cc_proof = proof if chain == Chain.REWARD_CHAIN: rc_vdf = info rc_proof = proof if chain == Chain.INFUSED_CHALLENGE_CHAIN: icc_ip_vdf = info icc_ip_proof = proof assert cc_proof is not None and rc_proof is not None and cc_vdf is not None and rc_vdf is not None rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN) if rc_vdf.challenge != rc_challenge: assert rc_challenge is not None log.warning(f"Do not have correct challenge {rc_challenge.hex()} has" f" {rc_vdf.challenge}") # This proof is on an outdated challenge, so don't use it return None log.debug("Collected end of subslot vdfs.") self.iters_finished.add(iter_to_look_for) self.last_active_time = time.time() iters_from_sub_slot_start = cc_vdf.number_of_iterations + self.last_state.get_last_ip() cc_vdf = dataclasses.replace(cc_vdf, number_of_iterations=iters_from_sub_slot_start) if icc_ip_vdf is not None: if self.last_state.peak is not None: total_iters = ( self.last_state.get_total_iters() - self.last_state.get_last_ip() + self.last_state.get_sub_slot_iters() ) else: total_iters = self.last_state.get_total_iters() + self.last_state.get_sub_slot_iters() iters_from_cb = uint64(total_iters - self.last_state.last_challenge_sb_or_eos_total_iters) if iters_from_cb > self.last_state.sub_slot_iters: log.error(f"{self.last_state.peak}") log.error(f"{self.last_state.subslot_end}") assert False assert iters_from_cb <= self.last_state.sub_slot_iters icc_ip_vdf = dataclasses.replace(icc_ip_vdf, number_of_iterations=iters_from_cb) icc_sub_slot: Optional[InfusedChallengeChainSubSlot] = ( None if icc_ip_vdf is None else InfusedChallengeChainSubSlot(icc_ip_vdf) ) if self.last_state.get_deficit() == 0: assert icc_sub_slot is not None icc_sub_slot_hash = icc_sub_slot.get_hash() else: icc_sub_slot_hash = None next_ses: Optional[SubEpochSummary] = self.last_state.get_next_sub_epoch_summary() if next_ses is not None: log.info(f"Including sub epoch summary{next_ses}") ses_hash = next_ses.get_hash() new_sub_slot_iters = next_ses.new_sub_slot_iters new_difficulty = next_ses.new_difficulty else: ses_hash = None new_sub_slot_iters = None new_difficulty = None cc_sub_slot = ChallengeChainSubSlot(cc_vdf, icc_sub_slot_hash, ses_hash, new_sub_slot_iters, new_difficulty) eos_deficit: uint8 = ( self.last_state.get_deficit() if self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK > self.last_state.get_deficit() > 0 else self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK ) rc_sub_slot = RewardChainSubSlot( rc_vdf, cc_sub_slot.get_hash(), icc_sub_slot.get_hash() if icc_sub_slot is not None else None, eos_deficit, ) eos_bundle = EndOfSubSlotBundle( cc_sub_slot, icc_sub_slot, rc_sub_slot, SubSlotProofs(cc_proof, icc_ip_proof, rc_proof), ) if self.server is not None: msg = make_msg( ProtocolMessageTypes.new_end_of_sub_slot_vdf, timelord_protocol.NewEndOfSubSlotVDF(eos_bundle), ) await self.server.send_to_all([msg], NodeType.FULL_NODE) log.info( f"Built end of subslot bundle. cc hash: {eos_bundle.challenge_chain.get_hash()}. New_difficulty: " f"{eos_bundle.challenge_chain.new_difficulty} New ssi: {eos_bundle.challenge_chain.new_sub_slot_iters}" ) if next_ses is None or next_ses.new_difficulty is None: self.unfinished_blocks = self.overflow_blocks.copy() else: # No overflow blocks in a new epoch self.unfinished_blocks = [] self.overflow_blocks = [] self.new_subslot_end = eos_bundle await self._handle_subslot_end()
async def _check_for_new_ip(self, iter_to_look_for: uint64): if len(self.unfinished_blocks) == 0: return None infusion_iters = [ iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.INFUSION_POINT ] for iteration in infusion_iters: if iteration != iter_to_look_for: continue proofs_with_iter = [ (chain, info, proof) for chain, info, proof, label in self.proofs_finished if info.number_of_iterations == iteration and label == self.num_resets ] if self.last_state.get_challenge(Chain.INFUSED_CHALLENGE_CHAIN) is not None: chain_count = 3 else: chain_count = 2 if len(proofs_with_iter) == chain_count: block = None ip_iters = None for unfinished_block in self.unfinished_blocks: try: _, ip_iters = iters_from_block( self.constants, unfinished_block.reward_chain_block, self.last_state.get_sub_slot_iters(), self.last_state.get_difficulty(), ) except Exception as e: log.error(f"Error {e}") continue if ip_iters - self.last_state.get_last_ip() == iteration: block = unfinished_block break assert ip_iters is not None if block is not None: ip_total_iters = self.last_state.get_total_iters() + iteration challenge = block.reward_chain_block.get_hash() icc_info: Optional[VDFInfo] = None icc_proof: Optional[VDFProof] = None cc_info: Optional[VDFInfo] = None cc_proof: Optional[VDFProof] = None rc_info: Optional[VDFInfo] = None rc_proof: Optional[VDFProof] = None for chain, info, proof in proofs_with_iter: if chain == Chain.CHALLENGE_CHAIN: cc_info = info cc_proof = proof if chain == Chain.REWARD_CHAIN: rc_info = info rc_proof = proof if chain == Chain.INFUSED_CHALLENGE_CHAIN: icc_info = info icc_proof = proof if cc_info is None or cc_proof is None or rc_info is None or rc_proof is None: log.error(f"Insufficient VDF proofs for infusion point ch: {challenge} iterations:{iteration}") return None rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN) if rc_info.challenge != rc_challenge: assert rc_challenge is not None log.warning( f"Do not have correct challenge {rc_challenge.hex()} " f"has {rc_info.challenge}, partial hash {block.reward_chain_block.get_hash()}" ) # This proof is on an outdated challenge, so don't use it continue self.iters_finished.add(iter_to_look_for) self.last_active_time = time.time() log.debug(f"Generated infusion point for challenge: {challenge} iterations: {iteration}.") overflow = is_overflow_block(self.constants, block.reward_chain_block.signage_point_index) if not self.last_state.can_infuse_block(overflow): log.warning("Too many blocks, or overflow in new epoch, cannot infuse, discarding") return None cc_info = dataclasses.replace(cc_info, number_of_iterations=ip_iters) response = timelord_protocol.NewInfusionPointVDF( challenge, cc_info, cc_proof, rc_info, rc_proof, icc_info, icc_proof, ) msg = make_msg(ProtocolMessageTypes.new_infusion_point_vdf, response) if self.server is not None: await self.server.send_to_all([msg], NodeType.FULL_NODE) self.proofs_finished = self._clear_proof_list(iteration) if ( self.last_state.get_last_block_total_iters() is None and not self.last_state.state_type == StateType.FIRST_SUB_SLOT ): # We don't know when the last block was, so we can't make peaks return None sp_total_iters = ( ip_total_iters - ip_iters + calculate_sp_iters( self.constants, block.sub_slot_iters, block.reward_chain_block.signage_point_index, ) - (block.sub_slot_iters if overflow else 0) ) if self.last_state.state_type == StateType.FIRST_SUB_SLOT: is_transaction_block = True height: uint32 = uint32(0) else: last_block_ti = self.last_state.get_last_block_total_iters() assert last_block_ti is not None is_transaction_block = last_block_ti < sp_total_iters height = uint32(self.last_state.get_height() + 1) if height < 5: # Don't directly update our state for the first few blocks, because we cannot validate # whether the pre-farm is correct return None new_reward_chain_block = RewardChainBlock( uint128(self.last_state.get_weight() + block.difficulty), height, uint128(ip_total_iters), block.reward_chain_block.signage_point_index, block.reward_chain_block.pos_ss_cc_challenge_hash, block.reward_chain_block.proof_of_space, block.reward_chain_block.challenge_chain_sp_vdf, block.reward_chain_block.challenge_chain_sp_signature, cc_info, block.reward_chain_block.reward_chain_sp_vdf, block.reward_chain_block.reward_chain_sp_signature, rc_info, icc_info, is_transaction_block, ) if self.last_state.state_type == StateType.FIRST_SUB_SLOT: # Genesis new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1 elif overflow and self.last_state.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK: if self.last_state.peak is not None: assert self.last_state.subslot_end is None # This means the previous block is also an overflow block, and did not manage # to lower the deficit, therefore we cannot lower it either. (new slot) new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK else: # This means we are the first infusion in this sub-slot. This may be a new slot or not. assert self.last_state.subslot_end is not None if self.last_state.subslot_end.infused_challenge_chain is None: # There is no ICC, which means we are not finishing a slot. We can reduce the deficit. new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1 else: # There is an ICC, which means we are finishing a slot. Different slot, so can't change # the deficit new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK else: new_deficit = max(self.last_state.deficit - 1, 0) if new_deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1: last_csb_or_eos = ip_total_iters else: last_csb_or_eos = self.last_state.last_challenge_sb_or_eos_total_iters if self.last_state.just_infused_sub_epoch_summary(): new_sub_epoch_summary = None passed_ses_height_but_not_yet_included = False else: new_sub_epoch_summary = block.sub_epoch_summary if new_reward_chain_block.height % self.constants.SUB_EPOCH_BLOCKS == 0: passed_ses_height_but_not_yet_included = True else: passed_ses_height_but_not_yet_included = ( self.last_state.get_passed_ses_height_but_not_yet_included() ) self.new_peak = timelord_protocol.NewPeakTimelord( new_reward_chain_block, block.difficulty, uint8(new_deficit), block.sub_slot_iters, new_sub_epoch_summary, self.last_state.reward_challenge_cache, uint128(last_csb_or_eos), passed_ses_height_but_not_yet_included, ) await self._handle_new_peak() # Break so we alternate between checking SP and IP break
async def _check_for_new_sp(self, iter_to_look_for: uint64): signage_iters = [ iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.SIGNAGE_POINT ] if len(signage_iters) == 0: return None to_remove = [] for potential_sp_iters, signage_point_index in self.signage_point_iters: if potential_sp_iters not in signage_iters or potential_sp_iters != iter_to_look_for: continue signage_iter = potential_sp_iters proofs_with_iter = [ (chain, info, proof) for chain, info, proof, label in self.proofs_finished if info.number_of_iterations == signage_iter and label == self.num_resets ] # Wait for both cc and rc to have the signage point. if len(proofs_with_iter) == 2: cc_info: Optional[VDFInfo] = None cc_proof: Optional[VDFProof] = None rc_info: Optional[VDFInfo] = None rc_proof: Optional[VDFProof] = None for chain, info, proof in proofs_with_iter: if chain == Chain.CHALLENGE_CHAIN: cc_info = info cc_proof = proof if chain == Chain.REWARD_CHAIN: rc_info = info rc_proof = proof if cc_info is None or cc_proof is None or rc_info is None or rc_proof is None: log.error(f"Insufficient signage point data {signage_iter}") continue self.iters_finished.add(iter_to_look_for) self.last_active_time = time.time() rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN) if rc_info.challenge != rc_challenge: assert rc_challenge is not None log.warning(f"SP: Do not have correct challenge {rc_challenge.hex()}" f" has {rc_info.challenge}") # This proof is on an outdated challenge, so don't use it continue iters_from_sub_slot_start = cc_info.number_of_iterations + self.last_state.get_last_ip() response = timelord_protocol.NewSignagePointVDF( signage_point_index, dataclasses.replace(cc_info, number_of_iterations=iters_from_sub_slot_start), cc_proof, rc_info, rc_proof, ) if self.server is not None: msg = make_msg(ProtocolMessageTypes.new_signage_point_vdf, response) await self.server.send_to_all([msg], NodeType.FULL_NODE) # Cleanup the signage point from memory. to_remove.append((signage_iter, signage_point_index)) self.proofs_finished = self._clear_proof_list(signage_iter) # Send the next 3 signage point to the chains. next_iters_count = 0 for next_sp, k in self.signage_point_iters: for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN]: if next_sp not in self.iters_submitted[chain] and next_sp not in self.iters_to_submit[chain]: self.iters_to_submit[chain].append(next_sp) self.iteration_to_proof_type[next_sp] = IterationType.SIGNAGE_POINT next_iters_count += 1 if next_iters_count == 3: break # Break so we alternate between checking SP and IP break for r in to_remove: self.signage_point_iters.remove(r)
async def new_proof_of_space( self, new_proof_of_space: harvester_protocol.NewProofOfSpace, peer: ws.WSChiaConnection): """ This is a response from the harvester, for a NewChallenge. Here we check if the proof of space is sufficiently good, and if so, we ask for the whole proof. """ if new_proof_of_space.sp_hash not in self.farmer.number_of_responses: self.farmer.number_of_responses[new_proof_of_space.sp_hash] = 0 self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64( int(time.time())) max_pos_per_sp = 5 if self.farmer.number_of_responses[ new_proof_of_space.sp_hash] > max_pos_per_sp: # This will likely never happen for any farmer with less than 10% of global space # It's meant to make testnets more stable self.farmer.log.info( f"Surpassed {max_pos_per_sp} PoSpace for one SP, no longer submitting PoSpace for signage point " f"{new_proof_of_space.sp_hash}") return if new_proof_of_space.sp_hash not in self.farmer.sps: self.farmer.log.warning( f"Received response for a signage point that we do not have {new_proof_of_space.sp_hash}" ) return sps = self.farmer.sps[new_proof_of_space.sp_hash] for sp in sps: computed_quality_string = new_proof_of_space.proof.verify_and_get_quality_string( self.farmer.constants, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, ) if computed_quality_string is None: self.farmer.log.error( f"Invalid proof of space {new_proof_of_space.proof}") return self.farmer.number_of_responses[new_proof_of_space.sp_hash] += 1 required_iters: uint64 = calculate_iterations_quality( self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR, computed_quality_string, new_proof_of_space.proof.size, sp.difficulty, new_proof_of_space.sp_hash, ) # Double check that the iters are good assert required_iters < calculate_sp_interval_iters( self.farmer.constants, sp.sub_slot_iters) # Proceed at getting the signatures for this PoSpace request = harvester_protocol.RequestSignatures( new_proof_of_space.plot_identifier, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, [sp.challenge_chain_sp, sp.reward_chain_sp], ) if new_proof_of_space.sp_hash not in self.farmer.proofs_of_space: self.farmer.proofs_of_space[new_proof_of_space.sp_hash] = [( new_proof_of_space.plot_identifier, new_proof_of_space.proof, )] else: self.farmer.proofs_of_space[new_proof_of_space.sp_hash].append( ( new_proof_of_space.plot_identifier, new_proof_of_space.proof, )) self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64( int(time.time())) self.farmer.quality_str_to_identifiers[computed_quality_string] = ( new_proof_of_space.plot_identifier, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, peer.peer_node_id, ) self.farmer.cache_add_time[computed_quality_string] = uint64( int(time.time())) return make_msg(ProtocolMessageTypes.request_signatures, request)
async def respond_signatures( self, response: harvester_protocol.RespondSignatures): """ There are two cases: receiving signatures for sps, or receiving signatures for the block. """ if response.sp_hash not in self.farmer.sps: self.farmer.log.warning( f"Do not have challenge hash {response.challenge_hash}") return is_sp_signatures: bool = False sps = self.farmer.sps[response.sp_hash] signage_point_index = sps[0].signage_point_index found_sp_hash_debug = False for sp_candidate in sps: if response.sp_hash == response.message_signatures[0][0]: found_sp_hash_debug = True if sp_candidate.reward_chain_sp == response.message_signatures[ 1][0]: is_sp_signatures = True if found_sp_hash_debug: assert is_sp_signatures pospace = None for plot_identifier, candidate_pospace in self.farmer.proofs_of_space[ response.sp_hash]: if plot_identifier == response.plot_identifier: pospace = candidate_pospace assert pospace is not None computed_quality_string = pospace.verify_and_get_quality_string( self.farmer.constants, response.challenge_hash, response.sp_hash) if computed_quality_string is None: self.farmer.log.warning(f"Have invalid PoSpace {pospace}") return if is_sp_signatures: ( challenge_chain_sp, challenge_chain_sp_harv_sig, ) = response.message_signatures[0] reward_chain_sp, reward_chain_sp_harv_sig = response.message_signatures[ 1] for sk in self.farmer.get_private_keys(): pk = sk.get_g1() if pk == response.farmer_pk: agg_pk = ProofOfSpace.generate_plot_public_key( response.local_pk, pk) assert agg_pk == pospace.plot_public_key farmer_share_cc_sp = AugSchemeMPL.sign( sk, challenge_chain_sp, agg_pk) agg_sig_cc_sp = AugSchemeMPL.aggregate( [challenge_chain_sp_harv_sig, farmer_share_cc_sp]) assert AugSchemeMPL.verify(agg_pk, challenge_chain_sp, agg_sig_cc_sp) # This means it passes the sp filter farmer_share_rc_sp = AugSchemeMPL.sign( sk, reward_chain_sp, agg_pk) agg_sig_rc_sp = AugSchemeMPL.aggregate( [reward_chain_sp_harv_sig, farmer_share_rc_sp]) assert AugSchemeMPL.verify(agg_pk, reward_chain_sp, agg_sig_rc_sp) if pospace.pool_public_key is not None: assert pospace.pool_contract_puzzle_hash is None pool_pk = bytes(pospace.pool_public_key) if pool_pk not in self.farmer.pool_sks_map: self.farmer.log.error( f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}" ) return pool_target: Optional[PoolTarget] = PoolTarget( self.farmer.pool_target, uint32(0)) assert pool_target is not None pool_target_signature: Optional[ G2Element] = AugSchemeMPL.sign( self.farmer.pool_sks_map[pool_pk], bytes(pool_target)) else: assert pospace.pool_contract_puzzle_hash is not None pool_target = None pool_target_signature = None request = farmer_protocol.DeclareProofOfSpace( response.challenge_hash, challenge_chain_sp, signage_point_index, reward_chain_sp, pospace, agg_sig_cc_sp, agg_sig_rc_sp, self.farmer.farmer_target, pool_target, pool_target_signature, ) self.farmer.state_changed("proof", { "proof": request, "passed_filter": True }) msg = make_msg(ProtocolMessageTypes.declare_proof_of_space, request) await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE) return else: # This is a response with block signatures for sk in self.farmer.get_private_keys(): ( foliage_block_data_hash, foliage_sig_harvester, ) = response.message_signatures[0] ( foliage_transaction_block_hash, foliage_transaction_block_sig_harvester, ) = response.message_signatures[1] pk = sk.get_g1() if pk == response.farmer_pk: agg_pk = ProofOfSpace.generate_plot_public_key( response.local_pk, pk) assert agg_pk == pospace.plot_public_key foliage_sig_farmer = AugSchemeMPL.sign( sk, foliage_block_data_hash, agg_pk) foliage_transaction_block_sig_farmer = AugSchemeMPL.sign( sk, foliage_transaction_block_hash, agg_pk) foliage_agg_sig = AugSchemeMPL.aggregate( [foliage_sig_harvester, foliage_sig_farmer]) foliage_block_agg_sig = AugSchemeMPL.aggregate([ foliage_transaction_block_sig_harvester, foliage_transaction_block_sig_farmer ]) assert AugSchemeMPL.verify(agg_pk, foliage_block_data_hash, foliage_agg_sig) assert AugSchemeMPL.verify(agg_pk, foliage_transaction_block_hash, foliage_block_agg_sig) request_to_nodes = farmer_protocol.SignedValues( computed_quality_string, foliage_agg_sig, foliage_block_agg_sig, ) msg = make_msg(ProtocolMessageTypes.signed_values, request_to_nodes) await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
async def new_signage_point_harvester( self, new_challenge: harvester_protocol.NewSignagePointHarvester, peer: WSChiaConnection): """ The harvester receives a new signage point from the farmer, this happens at the start of each slot. The harvester does a few things: 1. The harvester applies the plot filter for each of the plots, to select the proportion which are eligible for this signage point and challenge. 2. The harvester gets the qualities for each plot. This is approximately 7 reads per plot which qualifies. Note that each plot may have 0, 1, 2, etc qualities for that challenge: but on average it will have 1. 3. Checks the required_iters for each quality and the given signage point, to see which are eligible for inclusion (required_iters < sp_interval_iters). 4. Looks up the full proof of space in the plot for each quality, approximately 64 reads per quality 5. Returns the proof of space to the farmer """ if len(self.harvester.pool_public_keys) == 0 or len( self.harvester.farmer_public_keys) == 0: # This means that we have not received the handshake yet return None start = time.time() assert len(new_challenge.challenge_hash) == 32 # Refresh plots to see if there are any new ones if start - self.harvester.last_load_time > self.harvester.plot_load_frequency: await self.harvester.refresh_plots() self.harvester.last_load_time = time.time() loop = asyncio.get_running_loop() def blocking_lookup( filename: Path, plot_info: PlotInfo) -> List[Tuple[bytes32, ProofOfSpace]]: # Uses the DiskProver object to lookup qualities. This is a blocking call, # so it should be run in a thread pool. try: plot_id = plot_info.prover.get_id() sp_challenge_hash = ProofOfSpace.calculate_pos_challenge( plot_id, new_challenge.challenge_hash, new_challenge.sp_hash, ) try: quality_strings = plot_info.prover.get_qualities_for_challenge( sp_challenge_hash) except Exception as e: self.harvester.log.error(f"Error using prover object {e}") self.harvester.log.error( f"File: {filename} Plot ID: {plot_id.hex()}, " f"challenge: {sp_challenge_hash}, plot_info: {plot_info}" ) return [] responses: List[Tuple[bytes32, ProofOfSpace]] = [] if quality_strings is not None: difficulty = new_challenge.difficulty sub_slot_iters = new_challenge.sub_slot_iters if plot_info.pool_contract_puzzle_hash is not None: # If we are pooling, override the difficulty and sub slot iters with the pool threshold info. # This will mean more proofs actually get found, but they are only submitted to the pool, # not the blockchain for pool_difficulty in new_challenge.pool_difficulties: if pool_difficulty.pool_contract_puzzle_hash == plot_info.pool_contract_puzzle_hash: difficulty = pool_difficulty.difficulty sub_slot_iters = pool_difficulty.sub_slot_iters # Found proofs of space (on average 1 is expected per plot) for index, quality_str in enumerate(quality_strings): required_iters: uint64 = calculate_iterations_quality( self.harvester.constants. DIFFICULTY_CONSTANT_FACTOR, quality_str, plot_info.prover.get_size(), difficulty, new_challenge.sp_hash, ) sp_interval_iters = calculate_sp_interval_iters( self.harvester.constants, sub_slot_iters) if required_iters < sp_interval_iters: # Found a very good proof of space! will fetch the whole proof from disk, # then send to farmer try: proof_xs = plot_info.prover.get_full_proof( sp_challenge_hash, index) except Exception as e: self.harvester.log.error( f"Exception fetching full proof for {filename}. {e}" ) self.harvester.log.error( f"File: {filename} Plot ID: {plot_id.hex()}, challenge: {sp_challenge_hash}, " f"plot_info: {plot_info}") continue # Look up local_sk from plot to save locked memory ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(plot_info.prover.get_memo()) local_sk = master_sk_to_local_sk(local_master_sk) include_taproot = plot_info.pool_contract_puzzle_hash is not None plot_public_key = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key, include_taproot) responses.append(( quality_str, ProofOfSpace( sp_challenge_hash, plot_info.pool_public_key, plot_info.pool_contract_puzzle_hash, plot_public_key, uint8(plot_info.prover.get_size()), proof_xs, ), )) return responses except Exception as e: self.harvester.log.error(f"Unknown error: {e}") return [] async def lookup_challenge( filename: Path, plot_info: PlotInfo ) -> Tuple[Path, List[harvester_protocol.NewProofOfSpace]]: # Executes a DiskProverLookup in a thread pool, and returns responses all_responses: List[harvester_protocol.NewProofOfSpace] = [] if self.harvester._is_shutdown: return filename, [] proofs_of_space_and_q: List[Tuple[ bytes32, ProofOfSpace]] = await loop.run_in_executor( self.harvester.executor, blocking_lookup, filename, plot_info) for quality_str, proof_of_space in proofs_of_space_and_q: all_responses.append( harvester_protocol.NewProofOfSpace( new_challenge.challenge_hash, new_challenge.sp_hash, quality_str.hex() + str(filename.resolve()), proof_of_space, new_challenge.signage_point_index, )) return filename, all_responses awaitables = [] passed = 0 total = 0 for try_plot_filename, try_plot_info in self.harvester.provers.items(): try: if try_plot_filename.exists(): # Passes the plot filter (does not check sp filter yet though, since we have not reached sp) # This is being executed at the beginning of the slot total += 1 if ProofOfSpace.passes_plot_filter( self.harvester.constants, try_plot_info.prover.get_id(), new_challenge.challenge_hash, new_challenge.sp_hash, ): passed += 1 awaitables.append( lookup_challenge(try_plot_filename, try_plot_info)) except Exception as e: self.harvester.log.error( f"Error plot file {try_plot_filename} may no longer exist {e}" ) # Concurrently executes all lookups on disk, to take advantage of multiple disk parallelism total_proofs_found = 0 for filename_sublist_awaitable in asyncio.as_completed(awaitables): filename, sublist = await filename_sublist_awaitable time_taken = time.time() - start if time_taken > 5: self.harvester.log.warning( f"Looking up qualities on {filename} took: {time_taken}. This should be below 5 seconds " f"to minimize risk of losing rewards.") else: pass # If you want additional logs, uncomment the following line # self.harvester.log.debug(f"Looking up qualities on {filename} took: {time_taken}") for response in sublist: total_proofs_found += 1 msg = make_msg(ProtocolMessageTypes.new_proof_of_space, response) await peer.send_message(msg) now = uint64(int(time.time())) farming_info = FarmingInfo( new_challenge.challenge_hash, new_challenge.sp_hash, now, uint32(passed), uint32(total_proofs_found), uint32(total), ) pass_msg = make_msg(ProtocolMessageTypes.farming_info, farming_info) await peer.send_message(pass_msg) self.harvester.log.info( f"{len(awaitables)} plots were eligible for farming {new_challenge.challenge_hash.hex()[:10]}..." f" Found {total_proofs_found} proofs. Time: {time.time() - start:.5f} s. " f"Total {len(self.harvester.provers)} plots")
async def new_proof_of_space( self, new_proof_of_space: harvester_protocol.NewProofOfSpace, peer: ws.WSChiaConnection ): """ This is a response from the harvester, for a NewChallenge. Here we check if the proof of space is sufficiently good, and if so, we ask for the whole proof. """ if new_proof_of_space.sp_hash not in self.farmer.number_of_responses: self.farmer.number_of_responses[new_proof_of_space.sp_hash] = 0 self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time())) max_pos_per_sp = 5 if self.farmer.number_of_responses[new_proof_of_space.sp_hash] > max_pos_per_sp: # This will likely never happen for any farmer with less than 10% of global space # It's meant to make testnets more stable self.farmer.log.info( f"Surpassed {max_pos_per_sp} PoSpace for one SP, no longer submitting PoSpace for signage point " f"{new_proof_of_space.sp_hash}" ) return None if new_proof_of_space.sp_hash not in self.farmer.sps: self.farmer.log.warning( f"Received response for a signage point that we do not have {new_proof_of_space.sp_hash}" ) return None sps = self.farmer.sps[new_proof_of_space.sp_hash] for sp in sps: computed_quality_string = new_proof_of_space.proof.verify_and_get_quality_string( self.farmer.constants, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, ) if computed_quality_string is None: self.farmer.log.error(f"Invalid proof of space {new_proof_of_space.proof}") return None self.farmer.number_of_responses[new_proof_of_space.sp_hash] += 1 required_iters: uint64 = calculate_iterations_quality( self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR, computed_quality_string, new_proof_of_space.proof.size, sp.difficulty, new_proof_of_space.sp_hash, ) # If the iters are good enough to make a block, proceed with the block making flow if required_iters < calculate_sp_interval_iters(self.farmer.constants, sp.sub_slot_iters): # Proceed at getting the signatures for this PoSpace request = harvester_protocol.RequestSignatures( new_proof_of_space.plot_identifier, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, [sp.challenge_chain_sp, sp.reward_chain_sp], ) if new_proof_of_space.sp_hash not in self.farmer.proofs_of_space: self.farmer.proofs_of_space[new_proof_of_space.sp_hash] = [] self.farmer.proofs_of_space[new_proof_of_space.sp_hash].append( ( new_proof_of_space.plot_identifier, new_proof_of_space.proof, ) ) self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time())) self.farmer.quality_str_to_identifiers[computed_quality_string] = ( new_proof_of_space.plot_identifier, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, peer.peer_node_id, ) self.farmer.cache_add_time[computed_quality_string] = uint64(int(time.time())) await peer.send_message(make_msg(ProtocolMessageTypes.request_signatures, request)) p2_singleton_puzzle_hash = new_proof_of_space.proof.pool_contract_puzzle_hash if p2_singleton_puzzle_hash is not None: # Otherwise, send the proof of space to the pool # When we win a block, we also send the partial to the pool if p2_singleton_puzzle_hash not in self.farmer.pool_state: self.farmer.log.info(f"Did not find pool info for {p2_singleton_puzzle_hash}") return pool_state_dict: Dict = self.farmer.pool_state[p2_singleton_puzzle_hash] pool_url = pool_state_dict["pool_config"].pool_url if pool_url == "": return if pool_state_dict["current_difficulty"] is None: self.farmer.log.warning( f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, " f"check communication with the pool, skipping this partial to {pool_url}." ) return required_iters = calculate_iterations_quality( self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR, computed_quality_string, new_proof_of_space.proof.size, pool_state_dict["current_difficulty"], new_proof_of_space.sp_hash, ) if required_iters >= calculate_sp_interval_iters( self.farmer.constants, self.farmer.constants.POOL_SUB_SLOT_ITERS ): self.farmer.log.info( f"Proof of space not good enough for pool {pool_url}: {pool_state_dict['current_difficulty']}" ) return authentication_token_timeout = pool_state_dict["authentication_token_timeout"] if authentication_token_timeout is None: self.farmer.log.warning( f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}" f", check communication with the pool." ) return # Submit partial to pool is_eos = new_proof_of_space.signage_point_index == 0 payload = PostPartialPayload( pool_state_dict["pool_config"].launcher_id, get_current_authentication_token(authentication_token_timeout), new_proof_of_space.proof, new_proof_of_space.sp_hash, is_eos, peer.peer_node_id, ) # The plot key is 2/2 so we need the harvester's half of the signature m_to_sign = payload.get_hash() request = harvester_protocol.RequestSignatures( new_proof_of_space.plot_identifier, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, [m_to_sign], ) response: Any = await peer.request_signatures(request) if not isinstance(response, harvester_protocol.RespondSignatures): self.farmer.log.error(f"Invalid response from harvester: {response}") return assert len(response.message_signatures) == 1 plot_signature: Optional[G2Element] = None for sk in self.farmer.get_private_keys(): pk = sk.get_g1() if pk == response.farmer_pk: agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, True) assert agg_pk == new_proof_of_space.proof.plot_public_key sig_farmer = AugSchemeMPL.sign(sk, m_to_sign, agg_pk) taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(response.local_pk, pk) taproot_sig: G2Element = AugSchemeMPL.sign(taproot_sk, m_to_sign, agg_pk) plot_signature = AugSchemeMPL.aggregate( [sig_farmer, response.message_signatures[0][1], taproot_sig] ) assert AugSchemeMPL.verify(agg_pk, m_to_sign, plot_signature) authentication_pk = pool_state_dict["pool_config"].authentication_public_key if bytes(authentication_pk) is None: self.farmer.log.error(f"No authentication sk for {authentication_pk}") return authentication_sk: PrivateKey = self.farmer.authentication_keys[bytes(authentication_pk)] authentication_signature = AugSchemeMPL.sign(authentication_sk, m_to_sign) assert plot_signature is not None agg_sig: G2Element = AugSchemeMPL.aggregate([plot_signature, authentication_signature]) post_partial_request: PostPartialRequest = PostPartialRequest(payload, agg_sig) post_partial_body = json.dumps(post_partial_request.to_json_dict()) self.farmer.log.info( f"Submitting partial for {post_partial_request.payload.launcher_id.hex()} to {pool_url}" ) pool_state_dict["points_found_since_start"] += pool_state_dict["current_difficulty"] pool_state_dict["points_found_24h"].append((time.time(), pool_state_dict["current_difficulty"])) headers = { "content-type": "application/json;", } try: async with aiohttp.ClientSession() as session: async with session.post(f"{pool_url}/partial", data=post_partial_body, headers=headers) as resp: if resp.ok: pool_response: Dict = json.loads(await resp.text()) self.farmer.log.info(f"Pool response: {pool_response}") if "error_code" in pool_response: self.farmer.log.error( f"Error in pooling: " f"{pool_response['error_code'], pool_response['error_message']}" ) pool_state_dict["pool_errors_24h"].append(pool_response) if pool_response["error_code"] == PoolErrorCode.PROOF_NOT_GOOD_ENOUGH.value: self.farmer.log.error( "Partial not good enough, forcing pool farmer update to " "get our current difficulty." ) pool_state_dict["next_farmer_update"] = 0 await self.farmer.update_pool_state() else: new_difficulty = pool_response["new_difficulty"] pool_state_dict["points_acknowledged_since_start"] += new_difficulty pool_state_dict["points_acknowledged_24h"].append((time.time(), new_difficulty)) pool_state_dict["current_difficulty"] = new_difficulty else: self.farmer.log.error(f"Error sending partial to {pool_url}, {resp.status}") except Exception as e: self.farmer.log.error(f"Error connecting to pool: {e}") return return
async def test_percentage_limits(self): r = RateLimiter(60, 40) new_peak_message = make_msg(ProtocolMessageTypes.new_peak, bytes([1] * 40)) for i in range(50): assert r.process_msg_and_check(new_peak_message) saw_disconnect = False for i in range(50): response = r.process_msg_and_check(new_peak_message) if not response: saw_disconnect = True assert saw_disconnect r = RateLimiter(60, 40) block_message = make_msg(ProtocolMessageTypes.respond_block, bytes([1] * 1024 * 1024)) for i in range(5): assert r.process_msg_and_check(block_message) saw_disconnect = False for i in range(5): response = r.process_msg_and_check(block_message) if not response: saw_disconnect = True assert saw_disconnect # Aggregate percentage limit count r = RateLimiter(60, 40) message_1 = make_msg(ProtocolMessageTypes.request_additions, bytes([1] * 5 * 1024)) message_2 = make_msg(ProtocolMessageTypes.request_removals, bytes([1] * 1024)) message_3 = make_msg(ProtocolMessageTypes.respond_additions, bytes([1] * 1024)) for i in range(180): assert r.process_msg_and_check(message_1) for i in range(180): assert r.process_msg_and_check(message_2) saw_disconnect = False for i in range(100): response = r.process_msg_and_check(message_3) if not response: saw_disconnect = True assert saw_disconnect # Aggregate percentage limit max total size r = RateLimiter(60, 40) message_4 = make_msg(ProtocolMessageTypes.respond_proof_of_weight, bytes([1] * 18 * 1024 * 1024)) message_5 = make_msg(ProtocolMessageTypes.respond_blocks, bytes([1] * 24 * 1024 * 1024)) for i in range(2): assert r.process_msg_and_check(message_4) saw_disconnect = False for i in range(2): response = r.process_msg_and_check(message_5) if not response: saw_disconnect = True assert saw_disconnect
async def new_signage_point( self, new_signage_point: farmer_protocol.NewSignagePoint): try: pool_difficulties: List[PoolDifficulty] = [] for p2_singleton_puzzle_hash, pool_dict in self.farmer.pool_state.items( ): if pool_dict["pool_config"].pool_url == "": # Self pooling continue if pool_dict["current_difficulty"] is None: self.farmer.log.warning( f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, " f"check communication with the pool, skipping this signage point, pool: " f"{pool_dict['pool_config'].pool_url} ") continue pool_difficulties.append( PoolDifficulty( pool_dict["current_difficulty"], self.farmer.constants.POOL_SUB_SLOT_ITERS, p2_singleton_puzzle_hash, )) message = harvester_protocol.NewSignagePointHarvester( new_signage_point.challenge_hash, new_signage_point.difficulty, new_signage_point.sub_slot_iters, new_signage_point.signage_point_index, new_signage_point.challenge_chain_sp, pool_difficulties, ) msg = make_msg(ProtocolMessageTypes.new_signage_point_harvester, message) await self.farmer.server.send_to_all([msg], NodeType.HARVESTER) if new_signage_point.challenge_chain_sp not in self.farmer.sps: self.farmer.sps[new_signage_point.challenge_chain_sp] = [] finally: # Age out old 24h information for every signage point regardless # of any failures. Note that this still lets old data remain if # the client isn't receiving signage points. cutoff_24h = time.time() - (24 * 60 * 60) for p2_singleton_puzzle_hash, pool_dict in self.farmer.pool_state.items( ): for key in ["points_found_24h", "points_acknowledged_24h"]: if key not in pool_dict: continue pool_dict[key] = strip_old_entries(pairs=pool_dict[key], before=cutoff_24h) if new_signage_point in self.farmer.sps[ new_signage_point.challenge_chain_sp]: self.farmer.log.debug( f"Duplicate signage point {new_signage_point.signage_point_index}" ) return self.farmer.sps[new_signage_point.challenge_chain_sp].append( new_signage_point) self.farmer.cache_add_time[ new_signage_point.challenge_chain_sp] = uint64(int(time.time())) self.farmer.state_changed( "new_signage_point", {"sp_hash": new_signage_point.challenge_chain_sp})