async def request_signatures( self, request: harvester_protocol.RequestSignatures): """ The farmer requests a signature on the header hash, for one of the proofs that we found. A signature is created on the header hash using the harvester private key. This can also be used for pooling. """ plot_filename = Path(request.plot_identifier[64:]).resolve() with self.harvester.plot_manager: try: plot_info = self.harvester.plot_manager.plots[plot_filename] except KeyError: self.harvester.log.warning( f"KeyError plot {plot_filename} does not exist.") return None # Look up local_sk from plot to save locked memory ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(plot_info.prover.get_memo()) local_sk = master_sk_to_local_sk(local_master_sk) if isinstance(pool_public_key_or_puzzle_hash, G1Element): include_taproot = False else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) include_taproot = True agg_pk = ProofOfSpace.generate_plot_public_key(local_sk.get_g1(), farmer_public_key, include_taproot) # This is only a partial signature. When combined with the farmer's half, it will # form a complete PrependSignature. message_signatures: List[Tuple[bytes32, G2Element]] = [] for message in request.messages: signature: G2Element = AugSchemeMPL.sign(local_sk, message, agg_pk) message_signatures.append((message, signature)) response: harvester_protocol.RespondSignatures = harvester_protocol.RespondSignatures( request.plot_identifier, request.challenge_hash, request.sp_hash, local_sk.get_g1(), farmer_public_key, message_signatures, ) return make_msg(ProtocolMessageTypes.respond_signatures, response)
async def create_pool_plot(self, p2_singleton_puzzle_hash: bytes32, shuil=None) -> bytes32: plot_dir = get_plot_dir() temp_dir = get_plot_tmp_dir() args = Namespace() args.size = 22 args.num = 1 args.buffer = 100 args.tmp_dir = temp_dir args.tmp2_dir = plot_dir args.final_dir = plot_dir args.plotid = None args.memo = None args.buckets = 0 args.stripe_size = 2000 args.num_threads = 0 args.nobitfield = False args.exclude_final_dir = False args.list_duplicates = False test_private_keys = [AugSchemeMPL.key_gen(std_hash(b"test_pool_rpc"))] plot_public_key = ProofOfSpace.generate_plot_public_key( master_sk_to_local_sk(test_private_keys[0]).get_g1(), bt.farmer_pk, True) plot_id = ProofOfSpace.calculate_plot_id_ph(p2_singleton_puzzle_hash, plot_public_key) try: plot_keys = PlotKeys( bt.farmer_pk, None, encode_puzzle_hash(p2_singleton_puzzle_hash, "txch")) await create_plots( args, plot_keys, bt.root_path, use_datetime=False, test_private_keys=test_private_keys, ) except KeyboardInterrupt: shuil.rmtree(plot_dir, ignore_errors=True) raise await bt.setup_plots() return plot_id
def process_file(file_path: Path) -> Optional[PlotInfo]: if not self._refreshing_enabled: return None filename_str = str(file_path) if self.match_str is not None and self.match_str not in filename_str: return None if ( file_path in self.failed_to_open_filenames and (time.time() - self.failed_to_open_filenames[file_path]) < self.refresh_parameter.retry_invalid_seconds ): # Try once every `refresh_parameter.retry_invalid_seconds` seconds to open the file return None if file_path in self.plots: return self.plots[file_path] entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if entry is not None: loaded_parent, duplicates = entry if str(file_path.parent) in duplicates: log.debug(f"Skip duplicated plot {str(file_path)}") return None try: if not file_path.exists(): return None prover = DiskProver(str(file_path)) log.debug(f"process_file {str(file_path)}") expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = file_path.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) return None cache_entry = self.cache.get(prover.get_id()) if cache_entry is None: ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if farmer_public_key not in self.farmer_public_keys: log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return None pool_public_key: Optional[G1Element] = None pool_contract_puzzle_hash: Optional[bytes32] = None if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if pool_public_key is not None and pool_public_key not in self.pool_public_keys: log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return None # If a plot is in `no_key_filenames` the keys were missing in earlier refresh cycles. We can remove # the current plot from that list if its in there since we passed the key checks above. if file_path in self.no_key_filenames: self.no_key_filenames.remove(file_path) local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None ) cache_entry = CacheEntry(pool_public_key, pool_contract_puzzle_hash, plot_public_key) self.cache.update(prover.get_id(), cache_entry) with self.plot_filename_paths_lock: paths: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if paths is None: paths = (str(Path(prover.get_filename()).parent), set()) self.plot_filename_paths[file_path.name] = paths else: paths[1].add(str(Path(prover.get_filename()).parent)) log.warning(f"Have multiple copies of the plot {file_path.name} in {[paths[0], *paths[1]]}.") return None new_plot_info: PlotInfo = PlotInfo( prover, cache_entry.pool_public_key, cache_entry.pool_contract_puzzle_hash, cache_entry.plot_public_key, stat_info.st_size, stat_info.st_mtime, ) with counter_lock: result.loaded.append(new_plot_info) if file_path in self.failed_to_open_filenames: del self.failed_to_open_filenames[file_path] except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {file_path}. {e} {tb}") self.failed_to_open_filenames[file_path] = int(time.time()) return None log.info(f"Found plot {file_path} of size {new_plot_info.prover.get_size()}") if self.show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") return new_plot_info
def process_file(directory: Path, filename: Path) -> Tuple[int, Dict]: new_provers: Dict[Path, PlotInfo] = {} nonlocal changed filename_str = str(filename) if match_str is not None and match_str not in filename_str: return 0, new_provers if filename.exists(): if filename in failed_to_open_filenames and ( time.time() - failed_to_open_filenames[filename]) < 1200: # Try once every 20 minutes to open the file return 0, new_provers if filename in provers: try: stat_info = filename.stat() except Exception as e: log.error(f"Failed to open file {filename}. {e}") return 0, new_provers if stat_info.st_mtime == provers[filename].time_modified: with plot_ids_lock: if provers[filename].prover.get_id() in plot_ids: log.warning( f"Have multiple copies of the plot {filename}, not adding it." ) return 0, new_provers plot_ids.add(provers[filename].prover.get_id()) new_provers[filename] = provers[filename] return stat_info.st_size, new_provers try: prover = DiskProver(str(filename)) expected_size = _expected_plot_size( prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = filename.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size( ) >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {filename}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) return 0, new_provers ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if farmer_public_keys is not None and farmer_public_key not in farmer_public_keys: log.warning( f"Plot {filename} has a farmer public key that is not in the farmer's pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: return 0, new_provers if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash pool_contract_puzzle_hash = None else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_public_key = None pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if (pool_public_keys is not None and pool_public_key is not None and pool_public_key not in pool_public_keys): log.warning( f"Plot {filename} has a pool public key that is not in the farmer's pool pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: return 0, new_provers stat_info = filename.stat() local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key) with plot_ids_lock: if prover.get_id() in plot_ids: log.warning( f"Have multiple copies of the plot {filename}, not adding it." ) return 0, new_provers plot_ids.add(prover.get_id()) new_provers[filename] = PlotInfo( prover, pool_public_key, pool_contract_puzzle_hash, plot_public_key, stat_info.st_size, stat_info.st_mtime, directory, ) changed = True except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {filename}. {e} {tb}") failed_to_open_filenames[filename] = int(time.time()) return 0, new_provers log.info( f"Found plot {filename} of size {new_provers[filename].prover.get_size()}" ) if show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") return stat_info.st_size, new_provers return 0, new_provers
async def respond_signatures( self, response: harvester_protocol.RespondSignatures): """ There are two cases: receiving signatures for sps, or receiving signatures for the block. """ if response.sp_hash not in self.farmer.sps: self.farmer.log.warning( f"Do not have challenge hash {response.challenge_hash}") return is_sp_signatures: bool = False sps = self.farmer.sps[response.sp_hash] signage_point_index = sps[0].signage_point_index found_sp_hash_debug = False for sp_candidate in sps: if response.sp_hash == response.message_signatures[0][0]: found_sp_hash_debug = True if sp_candidate.reward_chain_sp == response.message_signatures[ 1][0]: is_sp_signatures = True if found_sp_hash_debug: assert is_sp_signatures pospace = None for plot_identifier, candidate_pospace in self.farmer.proofs_of_space[ response.sp_hash]: if plot_identifier == response.plot_identifier: pospace = candidate_pospace assert pospace is not None computed_quality_string = pospace.verify_and_get_quality_string( self.farmer.constants, response.challenge_hash, response.sp_hash) if computed_quality_string is None: self.farmer.log.warning(f"Have invalid PoSpace {pospace}") return if is_sp_signatures: ( challenge_chain_sp, challenge_chain_sp_harv_sig, ) = response.message_signatures[0] reward_chain_sp, reward_chain_sp_harv_sig = response.message_signatures[ 1] for sk in self.farmer.get_private_keys(): pk = sk.get_g1() if pk == response.farmer_pk: agg_pk = ProofOfSpace.generate_plot_public_key( response.local_pk, pk) assert agg_pk == pospace.plot_public_key farmer_share_cc_sp = AugSchemeMPL.sign( sk, challenge_chain_sp, agg_pk) agg_sig_cc_sp = AugSchemeMPL.aggregate( [challenge_chain_sp_harv_sig, farmer_share_cc_sp]) assert AugSchemeMPL.verify(agg_pk, challenge_chain_sp, agg_sig_cc_sp) # This means it passes the sp filter farmer_share_rc_sp = AugSchemeMPL.sign( sk, reward_chain_sp, agg_pk) agg_sig_rc_sp = AugSchemeMPL.aggregate( [reward_chain_sp_harv_sig, farmer_share_rc_sp]) assert AugSchemeMPL.verify(agg_pk, reward_chain_sp, agg_sig_rc_sp) if pospace.pool_public_key is not None: assert pospace.pool_contract_puzzle_hash is None pool_pk = bytes(pospace.pool_public_key) if pool_pk not in self.farmer.pool_sks_map: self.farmer.log.error( f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}" ) return pool_target: Optional[PoolTarget] = PoolTarget( self.farmer.pool_target, uint32(0)) assert pool_target is not None pool_target_signature: Optional[ G2Element] = AugSchemeMPL.sign( self.farmer.pool_sks_map[pool_pk], bytes(pool_target)) else: assert pospace.pool_contract_puzzle_hash is not None pool_target = None pool_target_signature = None request = farmer_protocol.DeclareProofOfSpace( response.challenge_hash, challenge_chain_sp, signage_point_index, reward_chain_sp, pospace, agg_sig_cc_sp, agg_sig_rc_sp, self.farmer.farmer_target, pool_target, pool_target_signature, ) self.farmer.state_changed("proof", { "proof": request, "passed_filter": True }) msg = make_msg(ProtocolMessageTypes.declare_proof_of_space, request) await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE) return else: # This is a response with block signatures for sk in self.farmer.get_private_keys(): ( foliage_block_data_hash, foliage_sig_harvester, ) = response.message_signatures[0] ( foliage_transaction_block_hash, foliage_transaction_block_sig_harvester, ) = response.message_signatures[1] pk = sk.get_g1() if pk == response.farmer_pk: agg_pk = ProofOfSpace.generate_plot_public_key( response.local_pk, pk) assert agg_pk == pospace.plot_public_key foliage_sig_farmer = AugSchemeMPL.sign( sk, foliage_block_data_hash, agg_pk) foliage_transaction_block_sig_farmer = AugSchemeMPL.sign( sk, foliage_transaction_block_hash, agg_pk) foliage_agg_sig = AugSchemeMPL.aggregate( [foliage_sig_harvester, foliage_sig_farmer]) foliage_block_agg_sig = AugSchemeMPL.aggregate([ foliage_transaction_block_sig_harvester, foliage_transaction_block_sig_farmer ]) assert AugSchemeMPL.verify(agg_pk, foliage_block_data_hash, foliage_agg_sig) assert AugSchemeMPL.verify(agg_pk, foliage_transaction_block_hash, foliage_block_agg_sig) request_to_nodes = farmer_protocol.SignedValues( computed_quality_string, foliage_agg_sig, foliage_block_agg_sig, ) msg = make_msg(ProtocolMessageTypes.signed_values, request_to_nodes) await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
def blocking_lookup( filename: Path, plot_info: PlotInfo) -> List[Tuple[bytes32, ProofOfSpace]]: # Uses the DiskProver object to lookup qualities. This is a blocking call, # so it should be run in a thread pool. try: plot_id = plot_info.prover.get_id() sp_challenge_hash = ProofOfSpace.calculate_pos_challenge( plot_id, new_challenge.challenge_hash, new_challenge.sp_hash, ) try: quality_strings = plot_info.prover.get_qualities_for_challenge( sp_challenge_hash) except Exception as e: self.harvester.log.error(f"Error using prover object {e}") self.harvester.log.error( f"File: {filename} Plot ID: {plot_id.hex()}, " f"challenge: {sp_challenge_hash}, plot_info: {plot_info}" ) return [] responses: List[Tuple[bytes32, ProofOfSpace]] = [] if quality_strings is not None: difficulty = new_challenge.difficulty sub_slot_iters = new_challenge.sub_slot_iters if plot_info.pool_contract_puzzle_hash is not None: # If we are pooling, override the difficulty and sub slot iters with the pool threshold info. # This will mean more proofs actually get found, but they are only submitted to the pool, # not the blockchain for pool_difficulty in new_challenge.pool_difficulties: if pool_difficulty.pool_contract_puzzle_hash == plot_info.pool_contract_puzzle_hash: difficulty = pool_difficulty.difficulty sub_slot_iters = pool_difficulty.sub_slot_iters # Found proofs of space (on average 1 is expected per plot) for index, quality_str in enumerate(quality_strings): required_iters: uint64 = calculate_iterations_quality( self.harvester.constants. DIFFICULTY_CONSTANT_FACTOR, quality_str, plot_info.prover.get_size(), difficulty, new_challenge.sp_hash, ) sp_interval_iters = calculate_sp_interval_iters( self.harvester.constants, sub_slot_iters) if required_iters < sp_interval_iters: # Found a very good proof of space! will fetch the whole proof from disk, # then send to farmer try: proof_xs = plot_info.prover.get_full_proof( sp_challenge_hash, index) except Exception as e: self.harvester.log.error( f"Exception fetching full proof for {filename}. {e}" ) self.harvester.log.error( f"File: {filename} Plot ID: {plot_id.hex()}, challenge: {sp_challenge_hash}, " f"plot_info: {plot_info}") continue # Look up local_sk from plot to save locked memory ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(plot_info.prover.get_memo()) local_sk = master_sk_to_local_sk(local_master_sk) include_taproot = plot_info.pool_contract_puzzle_hash is not None plot_public_key = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key, include_taproot) responses.append(( quality_str, ProofOfSpace( sp_challenge_hash, plot_info.pool_public_key, plot_info.pool_contract_puzzle_hash, plot_public_key, uint8(plot_info.prover.get_size()), proof_xs, ), )) return responses except Exception as e: self.harvester.log.error(f"Unknown error: {e}") return []
async def new_proof_of_space( self, new_proof_of_space: harvester_protocol.NewProofOfSpace, peer: ws.WSChiaConnection ): """ This is a response from the harvester, for a NewChallenge. Here we check if the proof of space is sufficiently good, and if so, we ask for the whole proof. """ if new_proof_of_space.sp_hash not in self.farmer.number_of_responses: self.farmer.number_of_responses[new_proof_of_space.sp_hash] = 0 self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time())) max_pos_per_sp = 5 if self.farmer.number_of_responses[new_proof_of_space.sp_hash] > max_pos_per_sp: # This will likely never happen for any farmer with less than 10% of global space # It's meant to make testnets more stable self.farmer.log.info( f"Surpassed {max_pos_per_sp} PoSpace for one SP, no longer submitting PoSpace for signage point " f"{new_proof_of_space.sp_hash}" ) return None if new_proof_of_space.sp_hash not in self.farmer.sps: self.farmer.log.warning( f"Received response for a signage point that we do not have {new_proof_of_space.sp_hash}" ) return None sps = self.farmer.sps[new_proof_of_space.sp_hash] for sp in sps: computed_quality_string = new_proof_of_space.proof.verify_and_get_quality_string( self.farmer.constants, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, ) if computed_quality_string is None: self.farmer.log.error(f"Invalid proof of space {new_proof_of_space.proof}") return None self.farmer.number_of_responses[new_proof_of_space.sp_hash] += 1 required_iters: uint64 = calculate_iterations_quality( self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR, computed_quality_string, new_proof_of_space.proof.size, sp.difficulty, new_proof_of_space.sp_hash, ) # If the iters are good enough to make a block, proceed with the block making flow if required_iters < calculate_sp_interval_iters(self.farmer.constants, sp.sub_slot_iters): # Proceed at getting the signatures for this PoSpace request = harvester_protocol.RequestSignatures( new_proof_of_space.plot_identifier, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, [sp.challenge_chain_sp, sp.reward_chain_sp], ) if new_proof_of_space.sp_hash not in self.farmer.proofs_of_space: self.farmer.proofs_of_space[new_proof_of_space.sp_hash] = [] self.farmer.proofs_of_space[new_proof_of_space.sp_hash].append( ( new_proof_of_space.plot_identifier, new_proof_of_space.proof, ) ) self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time())) self.farmer.quality_str_to_identifiers[computed_quality_string] = ( new_proof_of_space.plot_identifier, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, peer.peer_node_id, ) self.farmer.cache_add_time[computed_quality_string] = uint64(int(time.time())) await peer.send_message(make_msg(ProtocolMessageTypes.request_signatures, request)) p2_singleton_puzzle_hash = new_proof_of_space.proof.pool_contract_puzzle_hash if p2_singleton_puzzle_hash is not None: # Otherwise, send the proof of space to the pool # When we win a block, we also send the partial to the pool if p2_singleton_puzzle_hash not in self.farmer.pool_state: self.farmer.log.info(f"Did not find pool info for {p2_singleton_puzzle_hash}") return pool_state_dict: Dict = self.farmer.pool_state[p2_singleton_puzzle_hash] pool_url = pool_state_dict["pool_config"].pool_url if pool_url == "": return if pool_state_dict["current_difficulty"] is None: self.farmer.log.warning( f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, " f"check communication with the pool, skipping this partial to {pool_url}." ) return required_iters = calculate_iterations_quality( self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR, computed_quality_string, new_proof_of_space.proof.size, pool_state_dict["current_difficulty"], new_proof_of_space.sp_hash, ) if required_iters >= calculate_sp_interval_iters( self.farmer.constants, self.farmer.constants.POOL_SUB_SLOT_ITERS ): self.farmer.log.info( f"Proof of space not good enough for pool {pool_url}: {pool_state_dict['current_difficulty']}" ) return authentication_token_timeout = pool_state_dict["authentication_token_timeout"] if authentication_token_timeout is None: self.farmer.log.warning( f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}" f", check communication with the pool." ) return # Submit partial to pool is_eos = new_proof_of_space.signage_point_index == 0 payload = PostPartialPayload( pool_state_dict["pool_config"].launcher_id, get_current_authentication_token(authentication_token_timeout), new_proof_of_space.proof, new_proof_of_space.sp_hash, is_eos, peer.peer_node_id, ) # The plot key is 2/2 so we need the harvester's half of the signature m_to_sign = payload.get_hash() request = harvester_protocol.RequestSignatures( new_proof_of_space.plot_identifier, new_proof_of_space.challenge_hash, new_proof_of_space.sp_hash, [m_to_sign], ) response: Any = await peer.request_signatures(request) if not isinstance(response, harvester_protocol.RespondSignatures): self.farmer.log.error(f"Invalid response from harvester: {response}") return assert len(response.message_signatures) == 1 plot_signature: Optional[G2Element] = None for sk in self.farmer.get_private_keys(): pk = sk.get_g1() if pk == response.farmer_pk: agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, True) assert agg_pk == new_proof_of_space.proof.plot_public_key sig_farmer = AugSchemeMPL.sign(sk, m_to_sign, agg_pk) taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(response.local_pk, pk) taproot_sig: G2Element = AugSchemeMPL.sign(taproot_sk, m_to_sign, agg_pk) plot_signature = AugSchemeMPL.aggregate( [sig_farmer, response.message_signatures[0][1], taproot_sig] ) assert AugSchemeMPL.verify(agg_pk, m_to_sign, plot_signature) authentication_pk = pool_state_dict["pool_config"].authentication_public_key if bytes(authentication_pk) is None: self.farmer.log.error(f"No authentication sk for {authentication_pk}") return authentication_sk: PrivateKey = self.farmer.authentication_keys[bytes(authentication_pk)] authentication_signature = AugSchemeMPL.sign(authentication_sk, m_to_sign) assert plot_signature is not None agg_sig: G2Element = AugSchemeMPL.aggregate([plot_signature, authentication_signature]) post_partial_request: PostPartialRequest = PostPartialRequest(payload, agg_sig) post_partial_body = json.dumps(post_partial_request.to_json_dict()) self.farmer.log.info( f"Submitting partial for {post_partial_request.payload.launcher_id.hex()} to {pool_url}" ) pool_state_dict["points_found_since_start"] += pool_state_dict["current_difficulty"] pool_state_dict["points_found_24h"].append((time.time(), pool_state_dict["current_difficulty"])) headers = { "content-type": "application/json;", } try: async with aiohttp.ClientSession() as session: async with session.post(f"{pool_url}/partial", data=post_partial_body, headers=headers) as resp: if resp.ok: pool_response: Dict = json.loads(await resp.text()) self.farmer.log.info(f"Pool response: {pool_response}") if "error_code" in pool_response: self.farmer.log.error( f"Error in pooling: " f"{pool_response['error_code'], pool_response['error_message']}" ) pool_state_dict["pool_errors_24h"].append(pool_response) if pool_response["error_code"] == PoolErrorCode.PROOF_NOT_GOOD_ENOUGH.value: self.farmer.log.error( "Partial not good enough, forcing pool farmer update to " "get our current difficulty." ) pool_state_dict["next_farmer_update"] = 0 await self.farmer.update_pool_state() else: new_difficulty = pool_response["new_difficulty"] pool_state_dict["points_acknowledged_since_start"] += new_difficulty pool_state_dict["points_acknowledged_24h"].append((time.time(), new_difficulty)) pool_state_dict["current_difficulty"] = new_difficulty else: self.farmer.log.error(f"Error sending partial to {pool_url}, {resp.status}") except Exception as e: self.farmer.log.error(f"Error connecting to pool: {e}") return return
def create_plots(args, root_path, use_datetime=True, test_private_keys: Optional[List] = None): config_filename = config_path_for_filename(root_path, "config.yaml") config = load_config(root_path, config_filename) if args.tmp2_dir is None: args.tmp2_dir = args.tmp_dir farmer_public_key: G1Element if args.farmer_public_key is not None: farmer_public_key = G1Element.from_bytes( bytes.fromhex(args.farmer_public_key)) else: farmer_public_key = get_farmer_public_key(args.alt_fingerprint) pool_public_key: Optional[G1Element] = None pool_contract_puzzle_hash: Optional[bytes32] = None if args.pool_public_key is not None: if args.pool_contract_address is not None: raise RuntimeError( "Choose one of pool_contract_address and pool_public_key") pool_public_key = G1Element.from_bytes( bytes.fromhex(args.pool_public_key)) else: if args.pool_contract_address is None: # If nothing is set, farms to the provided key (or the first key) pool_public_key = get_pool_public_key(args.alt_fingerprint) else: # If the pool contract puzzle hash is set, use that pool_contract_puzzle_hash = decode_puzzle_hash( args.pool_contract_address) assert (pool_public_key is None) != (pool_contract_puzzle_hash is None) num = args.num if args.size < config["min_mainnet_k_size"] and test_private_keys is None: log.warning( f"Creating plots with size k={args.size}, which is less than the minimum required for mainnet" ) if args.size < 22: log.warning("k under 22 is not supported. Increasing k to 22") args.size = 22 if pool_public_key is not None: log.info( f"Creating {num} plots of size {args.size}, pool public key: " f"{bytes(pool_public_key).hex()} farmer public key: {bytes(farmer_public_key).hex()}" ) else: assert pool_contract_puzzle_hash is not None log.info( f"Creating {num} plots of size {args.size}, pool contract address: " f"{args.pool_contract_address} farmer public key: {bytes(farmer_public_key).hex()}" ) tmp_dir_created = False if not args.tmp_dir.exists(): mkdir(args.tmp_dir) tmp_dir_created = True tmp2_dir_created = False if not args.tmp2_dir.exists(): mkdir(args.tmp2_dir) tmp2_dir_created = True mkdir(args.final_dir) finished_filenames = [] for i in range(num): # Generate a random master secret key if test_private_keys is not None: assert len(test_private_keys) == num sk: PrivateKey = test_private_keys[i] else: sk = AugSchemeMPL.key_gen(token_bytes(32)) # The plot public key is the combination of the harvester and farmer keys # New plots will also include a taproot of the keys, for extensibility include_taproot: bool = pool_contract_puzzle_hash is not None plot_public_key = ProofOfSpace.generate_plot_public_key( master_sk_to_local_sk(sk).get_g1(), farmer_public_key, include_taproot) # The plot id is based on the harvester, farmer, and pool keys if pool_public_key is not None: plot_id: bytes32 = ProofOfSpace.calculate_plot_id_pk( pool_public_key, plot_public_key) plot_memo: bytes32 = stream_plot_info_pk(pool_public_key, farmer_public_key, sk) else: assert pool_contract_puzzle_hash is not None plot_id = ProofOfSpace.calculate_plot_id_ph( pool_contract_puzzle_hash, plot_public_key) plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, sk) if args.plotid is not None: log.info(f"Debug plot ID: {args.plotid}") plot_id = bytes32(bytes.fromhex(args.plotid)) if args.memo is not None: log.info(f"Debug memo: {args.memo}") plot_memo = bytes.fromhex(args.memo) # Uncomment next two lines if memo is needed for dev debug plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") dt_string = datetime.now().strftime("%Y-%m-%d-%H-%M") if use_datetime: filename: str = f"plot-k{args.size}-{dt_string}-{plot_id}.plot" else: filename = f"plot-k{args.size}-{plot_id}.plot" full_path: Path = args.final_dir / filename resolved_final_dir: str = str(Path(args.final_dir).resolve()) plot_directories_list: str = config["harvester"]["plot_directories"] if args.exclude_final_dir: log.info( f"NOT adding directory {resolved_final_dir} to harvester for farming" ) if resolved_final_dir in plot_directories_list: log.warning( f"Directory {resolved_final_dir} already exists for harvester, please remove it manually" ) else: if resolved_final_dir not in plot_directories_list: # Adds the directory to the plot directories if it is not present log.info( f"Adding directory {resolved_final_dir} to harvester for farming" ) config = add_plot_directory(resolved_final_dir, root_path) if not full_path.exists(): log.info(f"Starting plot {i + 1}/{num}") # Creates the plot. This will take a long time for larger plots. plotter: DiskPlotter = DiskPlotter() plotter.create_plot_disk( str(args.tmp_dir), str(args.tmp2_dir), str(args.final_dir), filename, args.size, plot_memo, plot_id, args.buffer, args.buckets, args.stripe_size, args.num_threads, args.nobitfield, ) finished_filenames.append(filename) else: log.info(f"Plot {filename} already exists") log.info("Summary:") if tmp_dir_created: try: args.tmp_dir.rmdir() except Exception: log.info( f"warning: did not remove primary temporary folder {args.tmp_dir}, it may not be empty." ) if tmp2_dir_created: try: args.tmp2_dir.rmdir() except Exception: log.info( f"warning: did not remove secondary temporary folder {args.tmp2_dir}, it may not be empty." ) log.info(f"Created a total of {len(finished_filenames)} new plots") for filename in finished_filenames: log.info(filename)
def process_file(file_path: Path) -> Dict: new_provers: Dict[Path, PlotInfo] = {} filename_str = str(file_path) if self.match_str is not None and self.match_str not in filename_str: return new_provers if file_path.exists(): if ( file_path in self.failed_to_open_filenames and (time.time() - self.failed_to_open_filenames[file_path]) > 1200 ): # Try once every 20 minutes to open the file return new_provers if file_path in self.plots: try: stat_info = file_path.stat() except Exception as e: log.error(f"Failed to open file {file_path}. {e}") return new_provers if stat_info.st_mtime == self.plots[file_path].time_modified: new_provers[file_path] = self.plots[file_path] return new_provers entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if entry is not None: loaded_parent, duplicates = entry if str(file_path.parent) in duplicates: log.debug(f"Skip duplicated plot {str(file_path)}") return new_provers try: with counter_lock: if result.processed_files >= self.refresh_parameter.batch_size: result.remaining_files += 1 return new_provers result.processed_files += 1 prover = DiskProver(str(file_path)) log.debug(f"process_file {str(file_path)}") expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = file_path.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) return new_provers ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if self.farmer_public_keys is not None and farmer_public_key not in self.farmer_public_keys: log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return new_provers if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash pool_contract_puzzle_hash = None else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_public_key = None pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if ( self.pool_public_keys is not None and pool_public_key is not None and pool_public_key not in self.pool_public_keys ): log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return new_provers stat_info = file_path.stat() local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None ) with self.plot_filename_paths_lock: if file_path.name not in self.plot_filename_paths: self.plot_filename_paths[file_path.name] = (str(Path(prover.get_filename()).parent), set()) else: self.plot_filename_paths[file_path.name][1].add(str(Path(prover.get_filename()).parent)) if len(self.plot_filename_paths[file_path.name][1]) > 0: log.warning( f"Have multiple copies of the plot {file_path} in " f"{self.plot_filename_paths[file_path.name][1]}." ) return new_provers new_provers[file_path] = PlotInfo( prover, pool_public_key, pool_contract_puzzle_hash, plot_public_key, stat_info.st_size, stat_info.st_mtime, ) with counter_lock: result.loaded_plots += 1 result.loaded_size += stat_info.st_size except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {file_path}. {e} {tb}") self.failed_to_open_filenames[file_path] = int(time.time()) return new_provers log.info(f"Found plot {file_path} of size {new_provers[file_path].prover.get_size()}") if self.show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") return new_provers return new_provers
def load_plots( provers: Dict[Path, PlotInfo], failed_to_open_filenames: Dict[Path, int], farmer_public_keys: Optional[List[G1Element]], pool_public_keys: Optional[List[G1Element]], match_str: Optional[str], show_memo: bool, root_path: Path, open_no_key_filenames=False, ) -> Tuple[bool, Dict[Path, PlotInfo], Dict[Path, int], Set[Path]]: start_time = time.time() config_file = load_config(root_path, "config.yaml", "harvester") changed = False no_key_filenames: Set[Path] = set() log.info(f'Searching directories {config_file["plot_directories"]}') plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(config_file) all_filenames: List[Path] = [] for paths in plot_filenames.values(): all_filenames += paths total_size = 0 new_provers: Dict[Path, PlotInfo] = {} plot_ids: Set[bytes32] = set() if match_str is not None: log.info( f'Only loading plots that contain "{match_str}" in the file or directory name' ) for filename in all_filenames: filename_str = str(filename) if match_str is not None and match_str not in filename_str: continue if filename.exists(): if filename in failed_to_open_filenames and ( time.time() - failed_to_open_filenames[filename]) < 1200: # Try once every 20 minutes to open the file continue if filename in provers: try: stat_info = filename.stat() except Exception as e: log.error(f"Failed to open file {filename}. {e}") continue if stat_info.st_mtime == provers[filename].time_modified: total_size += stat_info.st_size new_provers[filename] = provers[filename] plot_ids.add(provers[filename].prover.get_id()) continue try: prover = DiskProver(str(filename)) expected_size = _expected_plot_size( prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = filename.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size( ) >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {filename}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) continue if prover.get_id() in plot_ids: log.warning( f"Have multiple copies of the plot {filename}, not adding it." ) continue ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if farmer_public_keys is not None and farmer_public_key not in farmer_public_keys: log.warning( f"Plot {filename} has a farmer public key that is not in the farmer's pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: continue if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash pool_contract_puzzle_hash = None else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_public_key = None pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if (pool_public_keys is not None and pool_public_key is not None and pool_public_key not in pool_public_keys): log.warning( f"Plot {filename} has a pool public key that is not in the farmer's pool pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: continue stat_info = filename.stat() local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key) new_provers[filename] = PlotInfo( prover, pool_public_key, pool_contract_puzzle_hash, plot_public_key, stat_info.st_size, stat_info.st_mtime, ) plot_ids.add(prover.get_id()) total_size += stat_info.st_size changed = True except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {filename}. {e} {tb}") failed_to_open_filenames[filename] = int(time.time()) continue log.info( f"Found plot {filename} of size {new_provers[filename].prover.get_size()}" ) if show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") log.info( f"Loaded a total of {len(new_provers)} plots of size {total_size / (1024 ** 4)} TiB, in" f" {time.time()-start_time} seconds") return changed, new_provers, failed_to_open_filenames, no_key_filenames