def _add_plot( self, str_path: str, plot_sk: PrivateKey, pool_pk: Optional[PublicKey] ) -> bool: plot_config = load_config(self.root_path, "plots.yaml") if pool_pk is None: for pool_pk_cand in self.pool_pubkeys: pr = DiskProver(str_path) if ( ProofOfSpace.calculate_plot_seed( pool_pk_cand, plot_sk.get_public_key() ) == pr.get_id() ): pool_pk = pool_pk_cand break if pool_pk is None: return False plot_config["plots"][str_path] = { "sk": bytes(plot_sk).hex(), "pool_pk": bytes(pool_pk).hex(), } save_config(self.root_path, "plots.yaml", plot_config) self._refresh_plots() return True
def test_faulty_plot_doesnt_crash(self): if Path("myplot.dat").exists(): Path("myplot.dat").unlink() if Path("myplotbad.dat").exists(): Path("myplotbad.dat").unlink() plot_id: bytes = bytes([i for i in range(32, 64)]) pl = DiskPlotter() pl.create_plot_disk( ".", ".", ".", "myplot.dat", 21, bytes([1, 2, 3, 4, 5]), plot_id, 300, 32, 8192, 8, False, ) f = open("myplot.dat", "rb") all_data = bytearray(f.read()) f.close() assert len(all_data) > 20000000 all_data_bad = all_data[:20000000] + bytearray( token_bytes(10000)) + all_data[20100000:] f_bad = open("myplotbad.dat", "wb") f_bad.write(all_data_bad) f_bad.close() pr = DiskProver(str(Path("myplotbad.dat"))) iterations: int = 50000 v = Verifier() successes = 0 failures = 0 for i in range(iterations): if i % 100 == 0: print(i) challenge = sha256(i.to_bytes(4, "big")).digest() try: for index, quality in enumerate( pr.get_qualities_for_challenge(challenge)): proof = pr.get_full_proof(challenge, index) computed_quality = v.validate_proof( plot_id, pr.get_size(), challenge, proof) if computed_quality == quality: successes += 1 else: print("Did not validate") failures += 1 except Exception as e: print(f"Exception: {e}") failures += 1 print(f"Successes: {successes}") print(f"Failures: {failures}")
def main(): """ Script for checking all plots in the plots.yaml file. Specify a number of challenge to test for each plot. """ parser = argparse.ArgumentParser(description="Chia plot checking script.") parser.add_argument("-n", "--num", help="Number of challenges", type=int, default=1000) args = parser.parse_args() v = Verifier() if os.path.isfile(plot_config_filename): plot_config = safe_load(open(plot_config_filename, "r")) for plot_filename, plot_info in plot_config["plots"].items(): plot_seed: bytes32 = ProofOfSpace.calculate_plot_seed( PublicKey.from_bytes(bytes.fromhex(plot_info["pool_pk"])), PrivateKey.from_bytes(bytes.fromhex( plot_info["sk"])).get_public_key()) # Tries relative path full_path: str = os.path.join(plot_root, plot_filename) if not os.path.isfile(full_path): # Tries absolute path full_path: str = plot_filename if not os.path.isfile(full_path): print(f"Plot file {full_path} not found.") continue pr = DiskProver(full_path) total_proofs = 0 try: for i in range(args.num): challenge = sha256(i.to_bytes(32, "big")).digest() for index, quality in enumerate( pr.get_qualities_for_challenge(challenge)): proof = pr.get_full_proof(challenge, index) total_proofs += 1 ver_quality = v.validate_proof(plot_seed, pr.get_size(), challenge, proof) assert (quality == ver_quality) except BaseException as e: print( f"{type(e)}: {e} error in proving/verifying for plot {plot_filename}" ) print( f"{plot_filename}: Proofs {total_proofs} / {args.num}, {round(total_proofs/float(args.num), 4)}" ) else: print(f"Not plot file found at {plot_config_filename}")
async def harvester_handshake( self, harvester_handshake: harvester_protocol.HarvesterHandshake): """ Handshake between the harvester and farmer. The harvester receives the pool public keys, which must be put into the plots, before the plotting process begins. We cannot use any plots which don't have one of the pool keys. """ for partial_filename, plot_config in self.plot_config["plots"].items(): if "plot_root" in self.config: filename = os.path.join(self.config["plot_root"], partial_filename) else: filename = os.path.join(ROOT_DIR, "plots", partial_filename) pool_pubkey = PublicKey.from_bytes( bytes.fromhex(plot_config["pool_pk"])) # Only use plots that correct pools associated with them if pool_pubkey in harvester_handshake.pool_pubkeys: if os.path.isfile(filename): self.provers[partial_filename] = DiskProver(filename) else: log.warn(f"Plot at {filename} does not exist.") else: log.warning( f"Plot {filename} has a pool key that is not in the farmer's pool_pk list." )
async def request_proof_of_space( self, request: harvester_protocol.RequestProofOfSpace): """ The farmer requests a proof of space, for one of the plots. We look up the correct plot based on the plot id and response number, lookup the proof, and return it. """ response: Optional[harvester_protocol.RespondProofOfSpace] = None challenge_hash = request.challenge_hash filename = Path(request.plot_id).resolve() index = request.response_number proof_xs: bytes plot_info = self.provers[filename] try: try: proof_xs = plot_info.prover.get_full_proof( challenge_hash, index) except RuntimeError: prover = DiskProver(str(filename)) self.provers[filename] = PlotInfo( prover, plot_info.pool_public_key, plot_info.farmer_public_key, plot_info.plot_public_key, plot_info.local_sk, plot_info.file_size, plot_info.time_modified, ) proof_xs = self.provers[filename].prover.get_full_proof( challenge_hash, index) except KeyError: log.warning(f"KeyError plot {filename} does not exist.") plot_info = self.provers[filename] plot_public_key = ProofOfSpace.generate_plot_public_key( plot_info.local_sk.get_g1(), plot_info.farmer_public_key) proof_of_space: ProofOfSpace = ProofOfSpace( challenge_hash, plot_info.pool_public_key, plot_public_key, uint8(self.provers[filename].prover.get_size()), proof_xs, ) response = harvester_protocol.RespondProofOfSpace( request.plot_id, request.response_number, proof_of_space, ) if response: yield OutboundMessage( NodeType.FARMER, Message("respond_proof_of_space", response), Delivery.RESPOND, )
def blocking_lookup(filename: Path, prover: DiskProver) -> Optional[List]: # Uses the DiskProver object to lookup qualities. This is a blocking call, # so it should be run in a threadpool. try: quality_strings = prover.get_qualities_for_challenge( new_challenge.challenge_hash ) except RuntimeError: log.error("Error using prover object. Reinitializing prover object.") try: self.provers[filename] = DiskProver(str(filename)) quality_strings = prover.get_qualities_for_challenge( new_challenge.challenge_hash ) except RuntimeError: log.error( f"Retry-Error using prover object on {filename}. Giving up." ) quality_strings = None return quality_strings
async def request_proof_of_space( self, request: harvester_protocol.RequestProofOfSpace ): """ The farmer requests a signature on the header hash, for one of the proofs that we found. We look up the correct plot based on the quality, lookup the proof, and return it. """ response: Optional[harvester_protocol.RespondProofOfSpace] = None try: # Using the quality string, find the right plot and index from our solutions challenge_hash, filename, index = self.challenge_hashes[ request.quality_string ] except KeyError: log.warning(f"Quality string {request.quality_string} not found") return if index is not None: proof_xs: bytes try: try: proof_xs = self.provers[filename].get_full_proof( challenge_hash, index ) except RuntimeError: self.provers[filename] = DiskProver(str(filename)) proof_xs = self.provers[filename].get_full_proof( challenge_hash, index ) except KeyError: log.warning(f"KeyError plot {filename} does not exist.") pool_pubkey = PublicKey.from_bytes( bytes.fromhex(self.plot_config["plots"][filename]["pool_pk"]) ) plot_pubkey = PrivateKey.from_bytes( bytes.fromhex(self.plot_config["plots"][filename]["sk"]) ).get_public_key() proof_of_space: ProofOfSpace = ProofOfSpace( challenge_hash, pool_pubkey, plot_pubkey, uint8(self.provers[filename].get_size()), proof_xs, ) response = harvester_protocol.RespondProofOfSpace( request.quality_string, proof_of_space ) if response: yield OutboundMessage( NodeType.FARMER, Message("respond_proof_of_space", response), Delivery.RESPOND, )
def test_k_21(self): challenge: bytes = bytes([i for i in range(0, 32)]) plot_seed: bytes = bytes([5, 104, 52, 4, 51, 55, 23, 84, 91, 10, 111, 12, 13, 222, 151, 16, 228, 211, 254, 45, 92, 198, 204, 10, 9, 10, 11, 129, 139, 171, 15, 23]) pl = DiskPlotter() pl.create_plot_disk(".", ".", ".", "myplot.dat", 21, bytes([1, 2, 3, 4, 5]), plot_seed, 2*1024) pl = None pr = DiskProver(str(Path("myplot.dat"))) total_proofs: int = 0 iterations: int = 5000 v = Verifier() for i in range(iterations): if i % 100 == 0: print(i) challenge = sha256(i.to_bytes(4, "big")).digest() for index, quality in enumerate(pr.get_qualities_for_challenge(challenge)): proof = pr.get_full_proof(challenge, index) assert len(proof) == 8*pr.get_size() computed_quality = v.validate_proof(plot_seed, pr.get_size(), challenge, proof) assert computed_quality == quality total_proofs += 1 print(f"total proofs {total_proofs} out of {iterations}\ {total_proofs / iterations}") assert total_proofs == 4647 pr = None Path("myplot.dat").unlink()
async def new_challenge(self, new_challenge: harvester_protocol.NewChallenge): """ The harvester receives a new challenge from the farmer, and looks up the quality for any proofs of space that are are found in the plots. If proofs are found, a ChallengeResponse message is sent for each of the proofs found. """ challenge_size = len(new_challenge.challenge_hash) if challenge_size != 32: raise ValueError( f"Invalid challenge size {challenge_size}, 32 was expected") all_responses = [] for filename, prover in self.provers.items(): try: quality_strings = prover.get_qualities_for_challenge( new_challenge.challenge_hash) except RuntimeError: log.error( f"Error using prover object on {filename}. Reinitializing prover object." ) quality_strings = None try: self.provers[filename] = DiskProver(filename) quality_strings = prover.get_qualities_for_challenge( new_challenge.challenge_hash) except RuntimeError: log.error( f"Retry-Error using prover object on {filename}. Giving up." ) quality_strings = None if quality_strings is not None: for index, quality_str in enumerate(quality_strings): quality = ProofOfSpace.quality_str_to_quality( new_challenge.challenge_hash, quality_str) self.challenge_hashes[quality] = ( new_challenge.challenge_hash, filename, uint8(index), ) response: harvester_protocol.ChallengeResponse = harvester_protocol.ChallengeResponse( new_challenge.challenge_hash, quality, prover.get_size()) all_responses.append(response) for response in all_responses: yield OutboundMessage( NodeType.FARMER, Message("challenge_response", response), Delivery.RESPOND, )
def load_plots( config_file: Dict, plot_config_file: Dict, pool_pubkeys: Optional[List[PublicKey]], root_path: Path, ) -> Tuple[Dict[str, DiskProver], List[str], List[str]]: provers: Dict[str, DiskProver] = {} failed_to_open_filenames: List[str] = [] not_found_filenames: List[str] = [] for partial_filename_str, plot_config in plot_config_file["plots"].items(): plot_root = path_from_root(root_path, config_file.get("plot_root", ".")) partial_filename = plot_root / partial_filename_str potential_filenames = [ partial_filename, path_from_root(plot_root, partial_filename_str), ] pool_pubkey = PublicKey.from_bytes( bytes.fromhex(plot_config["pool_pk"])) # Only use plots that correct pools associated with them if pool_pubkeys is not None and pool_pubkey not in pool_pubkeys: log.warning( f"Plot {partial_filename} has a pool key that is not in the farmer's pool_pk list." ) continue found = False failed_to_open = False for filename in potential_filenames: if filename.exists(): try: provers[partial_filename_str] = DiskProver(str(filename)) except Exception as e: log.error(f"Failed to open file {filename}. {e}") failed_to_open = True failed_to_open_filenames.append(partial_filename_str) break log.info( f"Loaded plot {filename} of size {provers[partial_filename_str].get_size()}" ) found = True break if not found and not failed_to_open: log.warning(f"Plot at {potential_filenames} does not exist.") not_found_filenames.append(partial_filename_str) return (provers, failed_to_open_filenames, not_found_filenames)
async def lookup_challenge( filename: Path, prover: DiskProver ) -> List[harvester_protocol.ChallengeResponse]: # Exectures a DiskProverLookup in a threadpool, and returns responses all_responses: List[harvester_protocol.ChallengeResponse] = [] quality_strings = await loop.run_in_executor( self.executor, blocking_lookup, filename, prover) if quality_strings is not None: for index, quality_str in enumerate(quality_strings): response: harvester_protocol.ChallengeResponse = harvester_protocol.ChallengeResponse( new_challenge.challenge_hash, str(filename), uint8(index), quality_str, prover.get_size(), ) all_responses.append(response) return all_responses
async def harvester_handshake( self, harvester_handshake: harvester_protocol.HarvesterHandshake): """ Handshake between the harvester and farmer. The harvester receives the pool public keys, which must be put into the plots, before the plotting process begins. We cannot use any plots which don't have one of the pool keys. """ for partial_filename_str, plot_config in self.plot_config[ "plots"].items(): plot_root = path_from_root(DEFAULT_ROOT_PATH, self.config.get("plot_root", ".")) partial_filename = plot_root / partial_filename_str potential_filenames = [ partial_filename, path_from_root(plot_root, partial_filename_str), ] pool_pubkey = PublicKey.from_bytes( bytes.fromhex(plot_config["pool_pk"])) # Only use plots that correct pools associated with them if pool_pubkey not in harvester_handshake.pool_pubkeys: log.warning( f"Plot {partial_filename} has a pool key that is not in the farmer's pool_pk list." ) continue found = False failed_to_open = False for filename in potential_filenames: if filename.exists(): try: self.provers[partial_filename_str] = DiskProver( str(filename)) except ValueError: log.error(f"Failed to open file {filename}.") failed_to_open = True break log.info( f"Farming plot {filename} of size {self.provers[partial_filename_str].get_size()}" ) found = True break if not found and not failed_to_open: log.warning(f"Plot at {potential_filenames} does not exist.")
def load_plots(config_file: Dict, plot_config_file: Dict, pool_pubkeys: List[PublicKey]) -> Dict[Path, DiskProver]: provers: Dict[Path, DiskProver] = {} for partial_filename_str, plot_config in plot_config_file["plots"].items(): plot_root = path_from_root(DEFAULT_ROOT_PATH, config_file.get("plot_root", ".")) partial_filename = plot_root / partial_filename_str potential_filenames = [ partial_filename, path_from_root(plot_root, partial_filename_str), ] pool_pubkey = PublicKey.from_bytes( bytes.fromhex(plot_config["pool_pk"])) # Only use plots that correct pools associated with them if pool_pubkey not in pool_pubkeys: log.warning( f"Plot {partial_filename} has a pool key that is not in the farmer's pool_pk list." ) continue found = False failed_to_open = False for filename in potential_filenames: if filename.exists(): try: provers[partial_filename_str] = DiskProver(str(filename)) except ValueError as e: log.error(f"Failed to open file {filename}. {e}") failed_to_open = True break log.info( f"Loaded plot {filename} of size {provers[partial_filename_str].get_size()}" ) found = True break if not found and not failed_to_open: log.warning(f"Plot at {potential_filenames} does not exist.") return provers
async def harvester_handshake( self, harvester_handshake: harvester_protocol.HarvesterHandshake ): """ Handshake between the harvester and farmer. The harvester receives the pool public keys, which must be put into the plots, before the plotting process begins. We cannot use any plots which don't have one of the pool keys. """ for partial_filename, plot_config in self.plot_config["plots"].items(): potential_filenames = [partial_filename] if "plot_root" in self.config: potential_filenames.append( os.path.join(self.config["plot_root"], partial_filename) ) else: potential_filenames.append( os.path.join(ROOT_DIR, "plots", partial_filename) ) pool_pubkey = PublicKey.from_bytes(bytes.fromhex(plot_config["pool_pk"])) # Only use plots that correct pools associated with them if pool_pubkey not in harvester_handshake.pool_pubkeys: log.warning( f"Plot {partial_filename} has a pool key that is not in the farmer's pool_pk list." ) continue found = False for filename in potential_filenames: if os.path.isfile(filename): self.provers[partial_filename] = DiskProver(filename) log.info( f"Farming plot {filename} of size {self.provers[partial_filename].get_size()}" ) found = True break if not found: log.warning(f"Plot at {potential_filenames} does not exist.")
class Harvester: config: Dict plot_config: Dict provers: Dict[str, DiskProver] failed_to_open_filenames: List[str] not_found_filenames: List[str] challenge_hashes: Dict[bytes32, Tuple[bytes32, str, uint8]] pool_pubkeys: List[PublicKey] root_path: Path _plot_notification_task: asyncio.Future _is_shutdown: bool executor: concurrent.futures.ThreadPoolExecutor state_changed_callback: Optional[Callable] def __init__(self, config: Dict, plot_config: Dict, root_path: Path): self.config = config self.plot_config = plot_config self.root_path = root_path # From filename to prover self.provers = {} self.failed_to_open_filenames = [] self.not_found_filenames = [] # From quality string to (challenge_hash, filename, index) self.challenge_hashes = {} self._plot_notification_task = asyncio.ensure_future(self._plot_notification()) self._is_shutdown = False self.global_connections: Optional[PeerConnections] = None self.pool_pubkeys = [] self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=10) self.state_changed_callback = None self.server = None def _set_state_changed_callback(self, callback: Callable): self.state_changed_callback = callback if self.global_connections is not None: self.global_connections.set_state_changed_callback(callback) def _state_changed(self, change: str): if self.state_changed_callback is not None: self.state_changed_callback(change) async def _plot_notification(self): """ Log the plot filenames to console periodically """ counter = 1 while not self._is_shutdown: if counter % 600 == 0: found = False for filename, prover in self.provers.items(): log.info(f"Farming plot {filename} of size {prover.get_size()}") found = True if not found: log.warning( "Not farming any plots on this harvester. Check your configuration." ) await asyncio.sleep(1) counter += 1 def _get_plots(self) -> Tuple[List[Dict], List[str], List[str]]: response_plots: List[Dict] = [] for path, prover in self.provers.items(): plot_pk = PrivateKey.from_bytes( bytes.fromhex(self.plot_config["plots"][path]["sk"]) ).get_public_key() pool_pk = PublicKey.from_bytes( bytes.fromhex(self.plot_config["plots"][path]["pool_pk"]) ) response_plots.append( { "filename": str(path), "size": prover.get_size(), "plot-seed": prover.get_id(), "memo": prover.get_memo(), "plot_pk": bytes(plot_pk), "pool_pk": bytes(pool_pk), } ) return (response_plots, self.failed_to_open_filenames, self.not_found_filenames) def _refresh_plots(self, reload_config_file=True): if reload_config_file: self.plot_config = load_config(self.root_path, "plots.yaml") ( self.provers, self.failed_to_open_filenames, self.not_found_filenames, ) = load_plots(self.config, self.plot_config, self.pool_pubkeys, self.root_path) self._state_changed("plots") def _delete_plot(self, str_path: str): if str_path in self.provers: del self.provers[str_path] plot_root = path_from_root(self.root_path, self.config.get("plot_root", ".")) # Remove absolute and relative paths if Path(str_path).exists(): Path(str_path).unlink() if (plot_root / Path(str_path)).exists(): (plot_root / Path(str_path)).unlink() try: # Removes the plot from config.yaml plot_config = load_config(self.root_path, "plots.yaml") if str_path in plot_config["plots"]: del plot_config["plots"][str_path] save_config(self.root_path, "plots.yaml", plot_config) self.plot_config = plot_config except (FileNotFoundError, KeyError) as e: log.warning(f"Could not remove {str_path} {e}") return False self._state_changed("plots") return True def _add_plot( self, str_path: str, plot_sk: PrivateKey, pool_pk: Optional[PublicKey] ) -> bool: plot_config = load_config(self.root_path, "plots.yaml") if pool_pk is None: for pool_pk_cand in self.pool_pubkeys: pr = DiskProver(str_path) if ( ProofOfSpace.calculate_plot_seed( pool_pk_cand, plot_sk.get_public_key() ) == pr.get_id() ): pool_pk = pool_pk_cand break if pool_pk is None: return False plot_config["plots"][str_path] = { "sk": bytes(plot_sk).hex(), "pool_pk": bytes(pool_pk).hex(), } save_config(self.root_path, "plots.yaml", plot_config) self._refresh_plots() return True def set_global_connections(self, global_connections: Optional[PeerConnections]): self.global_connections = global_connections def set_server(self, server): self.server = server def _shutdown(self): self._is_shutdown = True self.executor.shutdown(wait=True) async def _await_shutdown(self): await self._plot_notification_task @api_request async def harvester_handshake( self, harvester_handshake: harvester_protocol.HarvesterHandshake ): """ Handshake between the harvester and farmer. The harvester receives the pool public keys, which must be put into the plots, before the plotting process begins. We cannot use any plots which don't have one of the pool keys. """ self.pool_pubkeys = harvester_handshake.pool_pubkeys self._refresh_plots(reload_config_file=False) if len(self.provers) == 0: log.warning( "Not farming any plots on this harvester. Check your configuration." ) @api_request async def new_challenge(self, new_challenge: harvester_protocol.NewChallenge): """ The harvester receives a new challenge from the farmer, and looks up the quality string for any proofs of space that are are found in the plots. If proofs are found, a ChallengeResponse message is sent for each of the proofs found. """ start = time.time() challenge_size = len(new_challenge.challenge_hash) if challenge_size != 32: raise ValueError( f"Invalid challenge size {challenge_size}, 32 was expected" ) loop = asyncio.get_running_loop() def blocking_lookup(filename: Path, prover: DiskProver) -> Optional[List]: # Uses the DiskProver object to lookup qualities. This is a blocking call, # so it should be run in a threadpool. try: quality_strings = prover.get_qualities_for_challenge( new_challenge.challenge_hash ) except RuntimeError: log.error("Error using prover object. Reinitializing prover object.") try: self.prover = DiskProver(str(filename)) quality_strings = self.prover.get_qualities_for_challenge( new_challenge.challenge_hash ) except RuntimeError: log.error( f"Retry-Error using prover object on {filename}. Giving up." ) quality_strings = None return quality_strings async def lookup_challenge( filename: str, prover: DiskProver ) -> List[harvester_protocol.ChallengeResponse]: # Exectures a DiskProverLookup in a threadpool, and returns responses all_responses: List[harvester_protocol.ChallengeResponse] = [] quality_strings = await loop.run_in_executor( self.executor, blocking_lookup, filename, prover ) if quality_strings is not None: for index, quality_str in enumerate(quality_strings): self.challenge_hashes[quality_str] = ( new_challenge.challenge_hash, filename, uint8(index), ) response: harvester_protocol.ChallengeResponse = harvester_protocol.ChallengeResponse( new_challenge.challenge_hash, quality_str, prover.get_size() ) all_responses.append(response) return all_responses awaitables = [ lookup_challenge(filename, prover) for filename, prover in self.provers.items() ] # Concurrently executes all lookups on disk, to take advantage of multiple disk parallelism for sublist_awaitable in asyncio.as_completed(awaitables): for response in await sublist_awaitable: yield OutboundMessage( NodeType.FARMER, Message("challenge_response", response), Delivery.RESPOND, ) log.info( f"Time taken to lookup qualities in {len(self.provers)} plots: {time.time() - start}" ) @api_request async def request_proof_of_space( self, request: harvester_protocol.RequestProofOfSpace ): """ The farmer requests a signature on the header hash, for one of the proofs that we found. We look up the correct plot based on the quality, lookup the proof, and return it. """ response: Optional[harvester_protocol.RespondProofOfSpace] = None try: # Using the quality string, find the right plot and index from our solutions challenge_hash, filename, index = self.challenge_hashes[ request.quality_string ] except KeyError: log.warning(f"Quality string {request.quality_string} not found") return if index is not None: proof_xs: bytes try: try: proof_xs = self.provers[filename].get_full_proof( challenge_hash, index ) except RuntimeError: self.provers[filename] = DiskProver(str(filename)) proof_xs = self.provers[filename].get_full_proof( challenge_hash, index ) except KeyError: log.warning(f"KeyError plot {filename} does not exist.") pool_pubkey = PublicKey.from_bytes( bytes.fromhex(self.plot_config["plots"][filename]["pool_pk"]) ) plot_pubkey = PrivateKey.from_bytes( bytes.fromhex(self.plot_config["plots"][filename]["sk"]) ).get_public_key() proof_of_space: ProofOfSpace = ProofOfSpace( challenge_hash, pool_pubkey, plot_pubkey, uint8(self.provers[filename].get_size()), proof_xs, ) response = harvester_protocol.RespondProofOfSpace( request.quality_string, proof_of_space ) if response: yield OutboundMessage( NodeType.FARMER, Message("respond_proof_of_space", response), Delivery.RESPOND, ) @api_request async def request_header_signature( self, request: harvester_protocol.RequestHeaderSignature ): """ The farmer requests a signature on the header hash, for one of the proofs that we found. A signature is created on the header hash using the plot private key. """ if request.quality_string not in self.challenge_hashes: return _, filename, _ = self.challenge_hashes[request.quality_string] plot_sk = PrivateKey.from_bytes( bytes.fromhex(self.plot_config["plots"][filename]["sk"]) ) header_hash_signature: PrependSignature = plot_sk.sign_prepend( request.header_hash ) assert header_hash_signature.verify( [Util.hash256(request.header_hash)], [plot_sk.get_public_key()] ) response: harvester_protocol.RespondHeaderSignature = harvester_protocol.RespondHeaderSignature( request.quality_string, header_hash_signature, ) yield OutboundMessage( NodeType.FARMER, Message("respond_header_signature", response), Delivery.RESPOND, ) @api_request async def request_partial_proof( self, request: harvester_protocol.RequestPartialProof ): """ The farmer requests a signature on the farmer_target, for one of the proofs that we found. We look up the correct plot based on the quality, lookup the proof, and sign the farmer target hash using the plot private key. This will be used as a pool share. """ _, filename, _ = self.challenge_hashes[request.quality_string] plot_sk = PrivateKey.from_bytes( bytes.fromhex(self.plot_config["plots"][filename]["sk"]) ) farmer_target_signature: PrependSignature = plot_sk.sign_prepend( request.farmer_target_hash ) response: harvester_protocol.RespondPartialProof = harvester_protocol.RespondPartialProof( request.quality_string, farmer_target_signature ) yield OutboundMessage( NodeType.FARMER, Message("respond_partial_proof", response), Delivery.RESPOND, )
def process_file(file_path: Path) -> Optional[PlotInfo]: if not self._refreshing_enabled: return None filename_str = str(file_path) if self.match_str is not None and self.match_str not in filename_str: return None if ( file_path in self.failed_to_open_filenames and (time.time() - self.failed_to_open_filenames[file_path]) < self.refresh_parameter.retry_invalid_seconds ): # Try once every `refresh_parameter.retry_invalid_seconds` seconds to open the file return None if file_path in self.plots: return self.plots[file_path] entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if entry is not None: loaded_parent, duplicates = entry if str(file_path.parent) in duplicates: log.debug(f"Skip duplicated plot {str(file_path)}") return None try: if not file_path.exists(): return None prover = DiskProver(str(file_path)) log.debug(f"process_file {str(file_path)}") expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = file_path.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) return None cache_entry = self.cache.get(prover.get_id()) if cache_entry is None: ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if farmer_public_key not in self.farmer_public_keys: log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return None pool_public_key: Optional[G1Element] = None pool_contract_puzzle_hash: Optional[bytes32] = None if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if pool_public_key is not None and pool_public_key not in self.pool_public_keys: log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return None # If a plot is in `no_key_filenames` the keys were missing in earlier refresh cycles. We can remove # the current plot from that list if its in there since we passed the key checks above. if file_path in self.no_key_filenames: self.no_key_filenames.remove(file_path) local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None ) cache_entry = CacheEntry(pool_public_key, pool_contract_puzzle_hash, plot_public_key) self.cache.update(prover.get_id(), cache_entry) with self.plot_filename_paths_lock: paths: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if paths is None: paths = (str(Path(prover.get_filename()).parent), set()) self.plot_filename_paths[file_path.name] = paths else: paths[1].add(str(Path(prover.get_filename()).parent)) log.warning(f"Have multiple copies of the plot {file_path.name} in {[paths[0], *paths[1]]}.") return None new_plot_info: PlotInfo = PlotInfo( prover, cache_entry.pool_public_key, cache_entry.pool_contract_puzzle_hash, cache_entry.plot_public_key, stat_info.st_size, stat_info.st_mtime, ) with counter_lock: result.loaded.append(new_plot_info) if file_path in self.failed_to_open_filenames: del self.failed_to_open_filenames[file_path] except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {file_path}. {e} {tb}") self.failed_to_open_filenames[file_path] = int(time.time()) return None log.info(f"Found plot {file_path} of size {new_plot_info.prover.get_size()}") if self.show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") return new_plot_info
from chiapos import DiskProver, DiskPlotter, Verifier from hashlib import sha256 import secrets import os challenge: bytes = bytes([i for i in range(0, 32)]) plot_id: bytes = bytes([ 5, 104, 52, 4, 51, 55, 23, 84, 91, 10, 111, 12, 13, 222, 151, 16, 228, 211, 254, 45, 92, 198, 204, 10, 9, 10, 11, 129, 139, 171, 15, 23 ]) filename = "./myplot.dat" pl = DiskPlotter() pl.create_plot_disk(filename, 21, bytes([1, 2, 3, 4, 5]), plot_id) pr = DiskProver(filename) total_proofs: int = 0 iterations: int = 5000 v = Verifier() for i in range(iterations): challenge = sha256(i.to_bytes(4, "big")).digest() for index, quality in enumerate(pr.get_qualities_for_challenge(challenge)): proof = pr.get_full_proof(challenge, index) total_proofs += 1 ver_quality = v.validate_proof(plot_id, 21, challenge, proof) assert (quality == ver_quality) os.remove(filename) print(f"total proofs {total_proofs} out of {iterations}\
def _create_block( self, test_constants: Dict, challenge_hash: bytes32, height: uint32, prev_header_hash: bytes32, prev_iters: uint64, prev_weight: uint64, timestamp: uint64, difficulty: uint64, ips: uint64, seed: bytes, ) -> FullBlock: """ Creates a block with the specified details. Uses the stored plots to create a proof of space, and also evaluates the VDF for the proof of time. """ prover = None plot_pk = None plot_sk = None qualities: List[bytes] = [] for pn in range(num_plots): # Allow passing in seed, to create reorgs and different chains seeded_pn = (pn + 17 * int.from_bytes(seed, "big")) % num_plots filename = self.filenames[seeded_pn] plot_pk = plot_pks[seeded_pn] plot_sk = plot_sks[seeded_pn] prover = DiskProver(os.path.join(self.plot_dir, filename)) qualities = prover.get_qualities_for_challenge(challenge_hash) if len(qualities) > 0: break assert prover assert plot_pk assert plot_sk if len(qualities) == 0: raise NoProofsOfSpaceFound("No proofs for this challenge") proof_xs: bytes = prover.get_full_proof(challenge_hash, 0) proof_of_space: ProofOfSpace = ProofOfSpace( challenge_hash, pool_pk, plot_pk, k, [uint8(b) for b in proof_xs]) number_iters: uint64 = pot_iterations.calculate_iterations( proof_of_space, difficulty, ips, test_constants["MIN_BLOCK_TIME"]) disc: int = create_discriminant( challenge_hash, test_constants["DISCRIMINANT_SIZE_BITS"]) start_x: ClassGroup = ClassGroup.from_ab_discriminant(2, 1, disc) y_cl, proof_bytes = create_proof_of_time_nwesolowski( disc, start_x, number_iters, disc, n_wesolowski) output = ClassgroupElement(y_cl[0], y_cl[1]) proof_of_time = ProofOfTime( challenge_hash, number_iters, output, n_wesolowski, [uint8(b) for b in proof_bytes], ) coinbase: CoinbaseInfo = CoinbaseInfo( height, block_rewards.calculate_block_reward(uint32(height)), coinbase_target, ) coinbase_sig: PrependSignature = pool_sk.sign_prepend(bytes(coinbase)) fees_target: FeesTarget = FeesTarget(fee_target, uint64(0)) solutions_generator: bytes32 = sha256(seed).digest() cost = uint64(0) body: Body = Body(coinbase, coinbase_sig, fees_target, None, solutions_generator, cost) header_data: HeaderData = HeaderData( prev_header_hash, timestamp, bytes([0] * 32), proof_of_space.get_hash(), body.get_hash(), bytes([0] * 32), ) header_hash_sig: PrependSignature = plot_sk.sign_prepend( header_data.get_hash()) header: Header = Header(header_data, header_hash_sig) challenge = Challenge( challenge_hash, proof_of_space.get_hash(), proof_of_time.get_hash(), height, uint64(prev_weight + difficulty), uint64(prev_iters + number_iters), ) header_block = HeaderBlock(proof_of_space, proof_of_time, challenge, header) full_block: FullBlock = FullBlock(header_block, body) return full_block
def load_plots( provers: Dict[Path, PlotInfo], failed_to_open_filenames: Set[Path], farmer_public_keys: Optional[List[G1Element]], pool_public_keys: Optional[List[G1Element]], root_path: Path, open_no_key_filenames=False, ) -> Tuple[bool, Dict[Path, PlotInfo], Set[Path], Set[Path]]: config_file = load_config(root_path, "config.yaml", "harvester") changed = False no_key_filenames: Set[Path] = set() log.info(f'Searching directories {config_file["plot_directories"]}') plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(config_file) all_filenames: List[Path] = [] for paths in plot_filenames.values(): all_filenames += paths total_size = 0 for filename in all_filenames: if filename in provers: stat_info = filename.stat() if stat_info.st_mtime == provers[filename].time_modified: total_size += stat_info.st_size continue if filename in failed_to_open_filenames: continue if filename.exists(): try: prover = DiskProver(str(filename)) ( pool_public_key, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if (farmer_public_keys is not None and farmer_public_key not in farmer_public_keys): log.warning( f"Plot {filename} has a farmer public key that is not in the farmer's pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: continue if (pool_public_keys is not None and pool_public_key not in pool_public_keys): log.warning( f"Plot {filename} has a pool public key that is not in the farmer's pool pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: continue stat_info = filename.stat() local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key) provers[filename] = PlotInfo( prover, pool_public_key, farmer_public_key, plot_public_key, local_sk, stat_info.st_size, stat_info.st_mtime, ) total_size += stat_info.st_size changed = True except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {filename}. {e} {tb}") failed_to_open_filenames.add(filename) continue log.info( f"Found plot {filename} of size {provers[filename].prover.get_size()}" ) log.info( f"Loaded a total of {len(provers)} plots of size {total_size / (1024 ** 4)} TB" ) return (changed, provers, failed_to_open_filenames, no_key_filenames)
def process_file(directory: Path, filename: Path) -> Tuple[int, Dict]: new_provers: Dict[Path, PlotInfo] = {} nonlocal changed filename_str = str(filename) if match_str is not None and match_str not in filename_str: return 0, new_provers if filename.exists(): if filename in failed_to_open_filenames and ( time.time() - failed_to_open_filenames[filename]) < 1200: # Try once every 20 minutes to open the file return 0, new_provers if filename in provers: try: stat_info = filename.stat() except Exception as e: log.error(f"Failed to open file {filename}. {e}") return 0, new_provers if stat_info.st_mtime == provers[filename].time_modified: with plot_ids_lock: if provers[filename].prover.get_id() in plot_ids: log.warning( f"Have multiple copies of the plot {filename}, not adding it." ) return 0, new_provers plot_ids.add(provers[filename].prover.get_id()) new_provers[filename] = provers[filename] return stat_info.st_size, new_provers try: prover = DiskProver(str(filename)) expected_size = _expected_plot_size( prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = filename.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size( ) >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {filename}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) return 0, new_provers ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if farmer_public_keys is not None and farmer_public_key not in farmer_public_keys: log.warning( f"Plot {filename} has a farmer public key that is not in the farmer's pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: return 0, new_provers if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash pool_contract_puzzle_hash = None else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_public_key = None pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if (pool_public_keys is not None and pool_public_key is not None and pool_public_key not in pool_public_keys): log.warning( f"Plot {filename} has a pool public key that is not in the farmer's pool pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: return 0, new_provers stat_info = filename.stat() local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key) with plot_ids_lock: if prover.get_id() in plot_ids: log.warning( f"Have multiple copies of the plot {filename}, not adding it." ) return 0, new_provers plot_ids.add(prover.get_id()) new_provers[filename] = PlotInfo( prover, pool_public_key, pool_contract_puzzle_hash, plot_public_key, stat_info.st_size, stat_info.st_mtime, directory, ) changed = True except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {filename}. {e} {tb}") failed_to_open_filenames[filename] = int(time.time()) return 0, new_provers log.info( f"Found plot {filename} of size {new_provers[filename].prover.get_size()}" ) if show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") return stat_info.st_size, new_provers return 0, new_provers
def _create_block( self, test_constants: Dict, challenge_hash: bytes32, height: uint32, prev_header_hash: bytes32, prev_iters: uint64, prev_weight: uint128, timestamp: uint64, difficulty: uint64, min_iters: uint64, seed: bytes, genesis: bool = False, reward_puzzlehash: bytes32 = None, transactions: Program = None, aggsig: BLSSignature = None, fees: uint64 = uint64(0), ) -> FullBlock: """ Creates a block with the specified details. Uses the stored plots to create a proof of space, and also evaluates the VDF for the proof of time. """ selected_prover = None selected_plot_sk = None selected_pool_sk = None selected_proof_index = 0 plots = list(self.plot_config["plots"].items()) selected_quality: Optional[bytes] = None best_quality = 0 if self.use_any_pos: for i in range(len(plots) * 3): # Allow passing in seed, to create reorgs and different chains random.seed(seed + i.to_bytes(4, "big")) seeded_pn = random.randint(0, len(plots) - 1) pool_sk = PrivateKey.from_bytes( bytes.fromhex(plots[seeded_pn][1]["pool_sk"]) ) plot_sk = PrivateKey.from_bytes( bytes.fromhex(plots[seeded_pn][1]["sk"]) ) prover = DiskProver(plots[seeded_pn][0]) qualities = prover.get_qualities_for_challenge(challenge_hash) if len(qualities) > 0: if self.use_any_pos: selected_quality = qualities[0] selected_prover = prover selected_pool_sk = pool_sk selected_plot_sk = plot_sk break else: for i in range(len(plots)): pool_sk = PrivateKey.from_bytes(bytes.fromhex(plots[i][1]["pool_sk"])) plot_sk = PrivateKey.from_bytes(bytes.fromhex(plots[i][1]["sk"])) prover = DiskProver(plots[i][0]) qualities = prover.get_qualities_for_challenge(challenge_hash) j = 0 for quality in qualities: qual_int = int.from_bytes(quality, "big", signed=False) if qual_int > best_quality: best_quality = qual_int selected_quality = quality selected_prover = prover selected_pool_sk = pool_sk selected_plot_sk = plot_sk selected_proof_index = j j += 1 assert selected_prover assert selected_pool_sk assert selected_plot_sk pool_pk = selected_pool_sk.get_public_key() plot_pk = selected_plot_sk.get_public_key() if selected_quality is None: raise RuntimeError("No proofs for this challenge") proof_xs: bytes = selected_prover.get_full_proof( challenge_hash, selected_proof_index ) proof_of_space: ProofOfSpace = ProofOfSpace( challenge_hash, pool_pk, plot_pk, selected_prover.get_size(), proof_xs ) number_iters: uint64 = pot_iterations.calculate_iterations( proof_of_space, difficulty, min_iters ) # print("Doing iters", number_iters) int_size = (test_constants["DISCRIMINANT_SIZE_BITS"] + 16) >> 4 result = prove( challenge_hash, test_constants["DISCRIMINANT_SIZE_BITS"], number_iters ) output = ClassgroupElement( int512(int.from_bytes(result[0:int_size], "big", signed=True,)), int512( int.from_bytes(result[int_size : 2 * int_size], "big", signed=True,) ), ) proof_bytes = result[2 * int_size : 4 * int_size] proof_of_time = ProofOfTime( challenge_hash, number_iters, output, self.n_wesolowski, proof_bytes, ) if not reward_puzzlehash: reward_puzzlehash = self.fee_target # Use the extension data to create different blocks based on header hash extension_data: bytes32 = bytes32([random.randint(0, 255) for _ in range(32)]) cost = uint64(0) coinbase_reward = block_rewards.calculate_block_reward(height) fee_reward = uint64(block_rewards.calculate_base_fee(height) + fees) coinbase_coin, coinbase_signature = create_coinbase_coin_and_signature( height, reward_puzzlehash, coinbase_reward, selected_pool_sk ) parent_coin_name = std_hash(std_hash(height)) fees_coin = Coin(parent_coin_name, reward_puzzlehash, uint64(fee_reward)) # Create filter byte_array_tx: List[bytes32] = [] tx_additions: List[Coin] = [] tx_removals: List[bytes32] = [] encoded = None if transactions: error, npc_list, _ = get_name_puzzle_conditions(transactions) additions: List[Coin] = additions_for_npc(npc_list) for coin in additions: tx_additions.append(coin) byte_array_tx.append(bytearray(coin.puzzle_hash)) for npc in npc_list: tx_removals.append(npc.coin_name) byte_array_tx.append(bytearray(npc.coin_name)) bip158: PyBIP158 = PyBIP158(byte_array_tx) encoded = bytes(bip158.GetEncoded()) removal_merkle_set = MerkleSet() addition_merkle_set = MerkleSet() # Create removal Merkle set for coin_name in tx_removals: removal_merkle_set.add_already_hashed(coin_name) # Create addition Merkle set puzzlehash_coin_map: Dict[bytes32, List[Coin]] = {} for coin in tx_additions: if coin.puzzle_hash in puzzlehash_coin_map: puzzlehash_coin_map[coin.puzzle_hash].append(coin) else: puzzlehash_coin_map[coin.puzzle_hash] = [coin] # Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash for puzzle, coins in puzzlehash_coin_map.items(): addition_merkle_set.add_already_hashed(puzzle) addition_merkle_set.add_already_hashed(hash_coin_list(coins)) additions_root = addition_merkle_set.get_root() removal_root = removal_merkle_set.get_root() generator_hash = ( transactions.get_tree_hash() if transactions is not None else bytes32([0] * 32) ) filter_hash = std_hash(encoded) if encoded is not None else bytes32([0] * 32) header_data: HeaderData = HeaderData( height, prev_header_hash, timestamp, filter_hash, proof_of_space.get_hash(), uint128(prev_weight + difficulty), uint64(prev_iters + number_iters), additions_root, removal_root, coinbase_coin, coinbase_signature, fees_coin, aggsig, cost, extension_data, generator_hash, ) header_hash_sig: PrependSignature = selected_plot_sk.sign_prepend( header_data.get_hash() ) header: Header = Header(header_data, header_hash_sig) full_block: FullBlock = FullBlock( proof_of_space, proof_of_time, header, transactions, encoded ) return full_block
def load_plots( provers: Dict[Path, PlotInfo], failed_to_open_filenames: Dict[Path, int], farmer_public_keys: Optional[List[G1Element]], pool_public_keys: Optional[List[G1Element]], match_str: Optional[str], root_path: Path, open_no_key_filenames=False, ) -> Tuple[bool, Dict[Path, PlotInfo], Dict[Path, int], Set[Path]]: start_time = time.time() config_file = load_config(root_path, "config.yaml", "harvester") changed = False no_key_filenames: Set[Path] = set() log.info(f'Searching directories {config_file["plot_directories"]}') plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(config_file) all_filenames: List[Path] = [] for paths in plot_filenames.values(): all_filenames += paths total_size = 0 new_provers: Dict[Path, PlotInfo] = {} if match_str is not None: log.info( f'Only loading plots that contain "{match_str}" in the file or directory name' ) for filename in all_filenames: filename_str = str(filename) if match_str is not None and match_str not in filename_str: continue if filename.exists(): if filename in failed_to_open_filenames and ( time.time() - failed_to_open_filenames[filename]) < 1200: # Try once every 20 minutes to open the file continue if filename in provers: stat_info = filename.stat() if stat_info.st_mtime == provers[filename].time_modified: total_size += stat_info.st_size new_provers[filename] = provers[filename] continue try: prover = DiskProver(str(filename)) expected_size = _expected_plot_size( prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR / 2.0 stat_info = filename.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size( ) >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {filename}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) continue ( pool_public_key, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if farmer_public_keys is not None and farmer_public_key not in farmer_public_keys: log.warning( f"Plot {filename} has a farmer public key that is not in the farmer's pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: continue if pool_public_keys is not None and pool_public_key not in pool_public_keys: log.warning( f"Plot {filename} has a pool public key that is not in the farmer's pool pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: continue stat_info = filename.stat() local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key) new_provers[filename] = PlotInfo( prover, pool_public_key, farmer_public_key, plot_public_key, local_sk, stat_info.st_size, stat_info.st_mtime, ) total_size += stat_info.st_size changed = True except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {filename}. {e} {tb}") failed_to_open_filenames[filename] = int(time.time()) continue log.info( f"Found plot {filename} of size {new_provers[filename].prover.get_size()}" ) log.info( f"Loaded a total of {len(new_provers)} plots of size {total_size / (1024 ** 4)} TiB, in" f" {time.time()-start_time} seconds") return changed, new_provers, failed_to_open_filenames, no_key_filenames
class Harvester: config: Dict provers: Dict[Path, PlotInfo] failed_to_open_filenames: Set[Path] no_key_filenames: Set[Path] farmer_public_keys: List[G1Element] pool_public_keys: List[G1Element] cached_challenges: List[harvester_protocol.NewChallenge] root_path: Path _is_shutdown: bool executor: concurrent.futures.ThreadPoolExecutor state_changed_callback: Optional[Callable] constants: Dict _refresh_lock: asyncio.Lock def __init__(self, root_path: Path, override_constants={}): self.root_path = root_path # From filename to prover self.provers = {} self.failed_to_open_filenames = set() self.no_key_filenames = set() self._is_shutdown = False self.global_connections: Optional[PeerConnections] = None self.farmer_public_keys = [] self.pool_public_keys = [] self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=10) self.state_changed_callback = None self.server = None self.constants = consensus_constants.copy() self.cached_challenges = [] for key, value in override_constants.items(): self.constants[key] = value async def _start(self): self._refresh_lock = asyncio.Lock() def _close(self): self._is_shutdown = True self.executor.shutdown(wait=True) async def _await_closed(self): pass def _set_state_changed_callback(self, callback: Callable): self.state_changed_callback = callback if self.global_connections is not None: self.global_connections.set_state_changed_callback(callback) def _state_changed(self, change: str): if self.state_changed_callback is not None: self.state_changed_callback(change) def _get_plots(self) -> Tuple[List[Dict], List[str], List[str]]: response_plots: List[Dict] = [] for path, plot_info in self.provers.items(): prover = plot_info.prover response_plots.append({ "filename": str(path), "size": prover.get_size(), "plot-seed": prover.get_id(), "pool_public_key": plot_info.pool_public_key, "farmer_public_key": plot_info.farmer_public_key, "plot_public_key": plot_info.plot_public_key, "local_sk": plot_info.local_sk, "file_size": plot_info.file_size, "time_modified": plot_info.time_modified, }) return ( response_plots, [str(s) for s in self.failed_to_open_filenames], [str(s) for s in self.no_key_filenames], ) async def _refresh_plots(self): async with self._refresh_lock: ( changed, self.provers, self.failed_to_open_filenames, self.no_key_filenames, ) = load_plots( self.provers, self.failed_to_open_filenames, self.farmer_public_keys, self.pool_public_keys, self.root_path, ) if changed: self._state_changed("plots") def _delete_plot(self, str_path: str): path = Path(str_path).resolve() if path in self.provers: del self.provers[path] # Remove absolute and relative paths if path.exists(): path.unlink() self._state_changed("plots") return True async def _add_plot_directory(self, str_path: str) -> bool: config = load_config(self.root_path, "config.yaml") if str(Path(str_path).resolve() ) not in config["harvester"]["plot_directories"]: config["harvester"]["plot_directories"].append( str(Path(str_path).resolve())) save_config(self.root_path, "config.yaml", config) await self._refresh_plots() return True def _set_global_connections(self, global_connections: Optional[PeerConnections]): self.global_connections = global_connections def _set_server(self, server): self.server = server @api_request async def harvester_handshake( self, harvester_handshake: harvester_protocol.HarvesterHandshake): """ Handshake between the harvester and farmer. The harvester receives the pool public keys, as well as the farmer pks, which must be put into the plots, before the plotting process begins. We cannot use any plots which have different keys in them. """ self.farmer_public_keys = harvester_handshake.farmer_public_keys self.pool_public_keys = harvester_handshake.pool_public_keys await self._refresh_plots() if len(self.provers) == 0: log.warning( "Not farming any plots on this harvester. Check your configuration." ) return for new_challenge in self.cached_challenges: async for msg in self.new_challenge(new_challenge): yield msg self.cached_challenges = [] self._state_changed("plots") @api_request async def new_challenge(self, new_challenge: harvester_protocol.NewChallenge): """ The harvester receives a new challenge from the farmer, and looks up the quality string for any proofs of space that are are found in the plots. If proofs are found, a ChallengeResponse message is sent for each of the proofs found. """ if len(self.pool_public_keys) == 0 or len( self.farmer_public_keys) == 0: self.cached_challenges = self.cached_challenges[:5] self.cached_challenges.insert(0, new_challenge) return start = time.time() assert len(new_challenge.challenge_hash) == 32 # Refresh plots to see if there are any new ones await self._refresh_plots() loop = asyncio.get_running_loop() def blocking_lookup(filename: Path, prover: DiskProver) -> Optional[List]: # Uses the DiskProver object to lookup qualities. This is a blocking call, # so it should be run in a threadpool. try: quality_strings = prover.get_qualities_for_challenge( new_challenge.challenge_hash) except RuntimeError: log.error( "Error using prover object. Reinitializing prover object.") try: self.prover = DiskProver(str(filename)) quality_strings = self.prover.get_qualities_for_challenge( new_challenge.challenge_hash) except RuntimeError: log.error( f"Retry-Error using prover object on {filename}. Giving up." ) quality_strings = None return quality_strings async def lookup_challenge( filename: Path, prover: DiskProver ) -> List[harvester_protocol.ChallengeResponse]: # Exectures a DiskProverLookup in a threadpool, and returns responses all_responses: List[harvester_protocol.ChallengeResponse] = [] quality_strings = await loop.run_in_executor( self.executor, blocking_lookup, filename, prover) if quality_strings is not None: for index, quality_str in enumerate(quality_strings): response: harvester_protocol.ChallengeResponse = harvester_protocol.ChallengeResponse( new_challenge.challenge_hash, str(filename), uint8(index), quality_str, prover.get_size(), ) all_responses.append(response) return all_responses awaitables = [] for filename, plot_info in self.provers.items(): if ProofOfSpace.can_create_proof( plot_info.prover.get_id(), new_challenge.challenge_hash, self.constants["NUMBER_ZERO_BITS_CHALLENGE_SIG"], ): awaitables.append(lookup_challenge(filename, plot_info.prover)) # Concurrently executes all lookups on disk, to take advantage of multiple disk parallelism total_proofs_found = 0 for sublist_awaitable in asyncio.as_completed(awaitables): for response in await sublist_awaitable: total_proofs_found += 1 yield OutboundMessage( NodeType.FARMER, Message("challenge_response", response), Delivery.RESPOND, ) log.info( f"{len(awaitables)} plots were eligible for farming {new_challenge.challenge_hash.hex()[:10]}..." f" Found {total_proofs_found} proofs. Time: {time.time() - start}. " f"Total {len(self.provers)} plots") @api_request async def request_proof_of_space( self, request: harvester_protocol.RequestProofOfSpace): """ The farmer requests a proof of space, for one of the plots. We look up the correct plot based on the plot id and response number, lookup the proof, and return it. """ response: Optional[harvester_protocol.RespondProofOfSpace] = None challenge_hash = request.challenge_hash filename = Path(request.plot_id).resolve() index = request.response_number proof_xs: bytes plot_info = self.provers[filename] try: try: proof_xs = plot_info.prover.get_full_proof( challenge_hash, index) except RuntimeError: prover = DiskProver(str(filename)) self.provers[filename] = PlotInfo( prover, plot_info.pool_public_key, plot_info.farmer_public_key, plot_info.plot_public_key, plot_info.local_sk, plot_info.file_size, plot_info.time_modified, ) proof_xs = self.provers[filename].prover.get_full_proof( challenge_hash, index) except KeyError: log.warning(f"KeyError plot {filename} does not exist.") plot_info = self.provers[filename] plot_public_key = ProofOfSpace.generate_plot_public_key( plot_info.local_sk.get_g1(), plot_info.farmer_public_key) proof_of_space: ProofOfSpace = ProofOfSpace( challenge_hash, plot_info.pool_public_key, plot_public_key, uint8(self.provers[filename].prover.get_size()), proof_xs, ) response = harvester_protocol.RespondProofOfSpace( request.plot_id, request.response_number, proof_of_space, ) if response: yield OutboundMessage( NodeType.FARMER, Message("respond_proof_of_space", response), Delivery.RESPOND, ) @api_request async def request_signature(self, request: harvester_protocol.RequestSignature): """ The farmer requests a signature on the header hash, for one of the proofs that we found. A signature is created on the header hash using the harvester private key. This can also be used for pooling. """ plot_info = self.provers[Path(request.plot_id).resolve()] local_sk = plot_info.local_sk agg_pk = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), plot_info.farmer_public_key) # This is only a partial signature. When combined with the farmer's half, it will # form a complete PrependSignature. signature: G2Element = AugSchemeMPL.sign(local_sk, request.message, agg_pk) response: harvester_protocol.RespondSignature = harvester_protocol.RespondSignature( request.plot_id, request.message, local_sk.get_g1(), plot_info.farmer_public_key, signature, ) yield OutboundMessage( NodeType.FARMER, Message("respond_signature", response), Delivery.RESPOND, )
class Harvester: config: Dict plot_config: Dict provers: Dict[Path, DiskProver] challenge_hashes: Dict[bytes32, Tuple[bytes32, Path, uint8]] _plot_notification_task: asyncio.Task _is_shutdown: bool executor: concurrent.futures.ThreadPoolExecutor @staticmethod async def create(config: Dict, plot_config: Dict): self = Harvester() self.config = config self.plot_config = plot_config # From filename to prover self.provers = {} # From quality string to (challenge_hash, filename, index) self.challenge_hashes = {} self._plot_notification_task = asyncio.create_task( self._plot_notification()) self._is_shutdown = False self.server = None self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=10) return self async def _plot_notification(self): """ Log the plot filenames to console periodically """ counter = 1 while not self._is_shutdown: if counter % 600 == 0: found = False for filename, prover in self.provers.items(): log.info( f"Farming plot {filename} of size {prover.get_size()}") found = True if not found: log.warning( "Not farming any plots on this harvester. Check your configuration." ) await asyncio.sleep(1) counter += 1 def set_server(self, server): self.server = server def _start_bg_tasks(self): """ Start a background task that checks connection and reconnects periodically to the farmer. """ farmer_peer = PeerInfo(self.config["farmer_peer"]["host"], self.config["farmer_peer"]["port"]) async def connection_check(): while not self._is_shutdown: if self.server is not None: farmer_retry = True for connection in self.server.global_connections.get_connections( ): if connection.get_peer_info() == farmer_peer: farmer_retry = False if farmer_retry: log.info(f"Reconnecting to farmer {farmer_retry}") if not await self.server.start_client( farmer_peer, None, auth=True): await asyncio.sleep(1) await asyncio.sleep(30) self.reconnect_task = asyncio.create_task(connection_check()) def _shutdown(self): self._is_shutdown = True self.executor.shutdown(wait=True) async def _await_shutdown(self): await self._plot_notification_task @api_request async def harvester_handshake( self, harvester_handshake: harvester_protocol.HarvesterHandshake): """ Handshake between the harvester and farmer. The harvester receives the pool public keys, which must be put into the plots, before the plotting process begins. We cannot use any plots which don't have one of the pool keys. """ self.provers = load_plots(self.config, self.plot_config, harvester_handshake.pool_pubkeys) if len(self.provers) == 0: log.warning( "Not farming any plots on this harvester. Check your configuration." ) @api_request async def new_challenge(self, new_challenge: harvester_protocol.NewChallenge): """ The harvester receives a new challenge from the farmer, and looks up the quality string for any proofs of space that are are found in the plots. If proofs are found, a ChallengeResponse message is sent for each of the proofs found. """ start = time.time() challenge_size = len(new_challenge.challenge_hash) if challenge_size != 32: raise ValueError( f"Invalid challenge size {challenge_size}, 32 was expected") loop = asyncio.get_running_loop() def blocking_lookup(filename: Path, prover: DiskProver) -> Optional[List]: # Uses the DiskProver object to lookup qualities. This is a blocking call, # so it should be run in a threadpool. try: quality_strings = prover.get_qualities_for_challenge( new_challenge.challenge_hash) except RuntimeError: log.error( "Error using prover object. Reinitializing prover object.") try: self.prover = DiskProver(str(filename)) quality_strings = self.prover.get_qualities_for_challenge( new_challenge.challenge_hash) except RuntimeError: log.error( f"Retry-Error using prover object on {filename}. Giving up." ) quality_strings = None return quality_strings async def lookup_challenge( filename: Path, prover: DiskProver ) -> List[harvester_protocol.ChallengeResponse]: # Exectures a DiskProverLookup in a threadpool, and returns responses all_responses: List[harvester_protocol.ChallengeResponse] = [] quality_strings = await loop.run_in_executor( self.executor, blocking_lookup, filename, prover) if quality_strings is not None: for index, quality_str in enumerate(quality_strings): self.challenge_hashes[quality_str] = ( new_challenge.challenge_hash, filename, uint8(index), ) response: harvester_protocol.ChallengeResponse = harvester_protocol.ChallengeResponse( new_challenge.challenge_hash, quality_str, prover.get_size()) all_responses.append(response) return all_responses awaitables = [ lookup_challenge(filename, prover) for filename, prover in self.provers.items() ] # Concurrently executes all lookups on disk, to take advantage of multiple disk parallelism for sublist_awaitable in asyncio.as_completed(awaitables): for response in await sublist_awaitable: yield OutboundMessage( NodeType.FARMER, Message("challenge_response", response), Delivery.RESPOND, ) log.info( f"Time taken to lookup qualities in {len(self.provers)} plots: {time.time() - start}" ) @api_request async def request_proof_of_space( self, request: harvester_protocol.RequestProofOfSpace): """ The farmer requests a signature on the header hash, for one of the proofs that we found. We look up the correct plot based on the quality, lookup the proof, and return it. """ response: Optional[harvester_protocol.RespondProofOfSpace] = None try: # Using the quality string, find the right plot and index from our solutions challenge_hash, filename, index = self.challenge_hashes[ request.quality_string] except KeyError: log.warning(f"Quality string {request.quality_string} not found") return if index is not None: proof_xs: bytes try: proof_xs = self.provers[filename].get_full_proof( challenge_hash, index) except RuntimeError: self.provers[filename] = DiskProver(str(filename)) proof_xs = self.provers[filename].get_full_proof( challenge_hash, index) pool_pubkey = PublicKey.from_bytes( bytes.fromhex(self.plot_config["plots"][filename]["pool_pk"])) plot_pubkey = PrivateKey.from_bytes( bytes.fromhex(self.plot_config["plots"][filename] ["sk"])).get_public_key() proof_of_space: ProofOfSpace = ProofOfSpace( challenge_hash, pool_pubkey, plot_pubkey, uint8(self.provers[filename].get_size()), proof_xs, ) response = harvester_protocol.RespondProofOfSpace( request.quality_string, proof_of_space) if response: yield OutboundMessage( NodeType.FARMER, Message("respond_proof_of_space", response), Delivery.RESPOND, ) @api_request async def request_header_signature( self, request: harvester_protocol.RequestHeaderSignature): """ The farmer requests a signature on the header hash, for one of the proofs that we found. A signature is created on the header hash using the plot private key. """ if request.quality_string not in self.challenge_hashes: return _, filename, _ = self.challenge_hashes[request.quality_string] plot_sk = PrivateKey.from_bytes( bytes.fromhex(self.plot_config["plots"][filename]["sk"])) header_hash_signature: PrependSignature = plot_sk.sign_prepend( request.header_hash) assert header_hash_signature.verify( [Util.hash256(request.header_hash)], [plot_sk.get_public_key()]) response: harvester_protocol.RespondHeaderSignature = harvester_protocol.RespondHeaderSignature( request.quality_string, header_hash_signature, ) yield OutboundMessage( NodeType.FARMER, Message("respond_header_signature", response), Delivery.RESPOND, ) @api_request async def request_partial_proof( self, request: harvester_protocol.RequestPartialProof): """ The farmer requests a signature on the farmer_target, for one of the proofs that we found. We look up the correct plot based on the quality, lookup the proof, and sign the farmer target hash using the plot private key. This will be used as a pool share. """ _, filename, _ = self.challenge_hashes[request.quality_string] plot_sk = PrivateKey.from_bytes( bytes.fromhex(self.plot_config["plots"][filename]["sk"])) farmer_target_signature: PrependSignature = plot_sk.sign_prepend( request.farmer_target_hash) response: harvester_protocol.RespondPartialProof = harvester_protocol.RespondPartialProof( request.quality_string, farmer_target_signature) yield OutboundMessage( NodeType.FARMER, Message("respond_partial_proof", response), Delivery.RESPOND, )
def blocking_lookup( filename: Path, plot_info: PlotInfo) -> List[Tuple[bytes32, ProofOfSpace]]: # Uses the DiskProver object to lookup qualities. This is a blocking call, # so it should be run in a thread pool. try: sp_challenge_hash = ProofOfSpace.calculate_pos_challenge( plot_info.prover.get_id(), new_challenge.challenge_hash, new_challenge.sp_hash, ) try: quality_strings = plot_info.prover.get_qualities_for_challenge( sp_challenge_hash) except Exception as e: self.harvester.log.error( f"Error using prover object. Reinitializing prover object. {e}" ) try: self.harvester.provers[filename] = dataclasses.replace( plot_info, prover=DiskProver(str(filename))) quality_strings = plot_info.prover.get_qualities_for_challenge( sp_challenge_hash) except Exception as e: self.harvester.log.error( f"Error reinitializing plot {filename}. {e}") return [] responses: List[Tuple[bytes32, ProofOfSpace]] = [] if quality_strings is not None: # Found proofs of space (on average 1 is expected per plot) for index, quality_str in enumerate(quality_strings): required_iters: uint64 = calculate_iterations_quality( quality_str, plot_info.prover.get_size(), new_challenge.difficulty, new_challenge.sp_hash, ) sp_interval_iters = calculate_sp_interval_iters( self.harvester.constants, new_challenge.sub_slot_iters) if required_iters < sp_interval_iters: # Found a very good proof of space! will fetch the whole proof from disk, # then send to farmer try: proof_xs = plot_info.prover.get_full_proof( sp_challenge_hash, index) except RuntimeError: self.harvester.log.error( f"Exception fetching full proof for {filename}" ) continue plot_public_key = ProofOfSpace.generate_plot_public_key( plot_info.local_sk.get_g1(), plot_info.farmer_public_key) responses.append(( quality_str, ProofOfSpace( sp_challenge_hash, plot_info.pool_public_key, None, plot_public_key, uint8(plot_info.prover.get_size()), proof_xs, ), )) return responses except Exception as e: self.harvester.log.error(f"Unknown error: {e}") return []
def test_k_21(self): challenge: bytes = bytes([i for i in range(0, 32)]) plot_seed: bytes = bytes( [ 5, 104, 52, 4, 51, 55, 23, 84, 91, 10, 111, 12, 13, 222, 151, 16, 228, 211, 254, 45, 92, 198, 204, 10, 9, 10, 11, 129, 139, 171, 15, 23, ] ) pl = DiskPlotter() pl.create_plot_disk( ".", ".", ".", "myplot.dat", 21, bytes([1, 2, 3, 4, 5]), plot_seed, 300, 32, 8192, 8 ) pl = None pr = DiskProver(str(Path("myplot.dat"))) total_proofs: int = 0 iterations: int = 5000 v = Verifier() for i in range(iterations): if i % 100 == 0: print(i) challenge = sha256(i.to_bytes(4, "big")).digest() for index, quality in enumerate(pr.get_qualities_for_challenge(challenge)): proof = pr.get_full_proof(challenge, index) assert len(proof) == 8 * pr.get_size() computed_quality = v.validate_proof( plot_seed, pr.get_size(), challenge, proof ) assert computed_quality == quality total_proofs += 1 print( f"total proofs {total_proofs} out of {iterations}\ {total_proofs / iterations}" ) assert total_proofs > 4000 assert total_proofs < 6000 pr = None sha256_plot_hash = sha256() with open("myplot.dat", "rb") as f: # Read and update hash string value in blocks of 4K for byte_block in iter(lambda: f.read(4096), b""): sha256_plot_hash.update(byte_block) plot_hash = str(sha256_plot_hash.hexdigest()) assert ( plot_hash == "80e32f560f3a4347760d6baae8d16fbaf484948088bff05c51bdcc24b7bc40d9" ) print(f"\nPlotfile asserted sha256: {plot_hash}\n") Path("myplot.dat").unlink()
def process_file(file_path: Path) -> Dict: new_provers: Dict[Path, PlotInfo] = {} filename_str = str(file_path) if self.match_str is not None and self.match_str not in filename_str: return new_provers if file_path.exists(): if ( file_path in self.failed_to_open_filenames and (time.time() - self.failed_to_open_filenames[file_path]) > 1200 ): # Try once every 20 minutes to open the file return new_provers if file_path in self.plots: try: stat_info = file_path.stat() except Exception as e: log.error(f"Failed to open file {file_path}. {e}") return new_provers if stat_info.st_mtime == self.plots[file_path].time_modified: new_provers[file_path] = self.plots[file_path] return new_provers entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if entry is not None: loaded_parent, duplicates = entry if str(file_path.parent) in duplicates: log.debug(f"Skip duplicated plot {str(file_path)}") return new_provers try: with counter_lock: if result.processed_files >= self.refresh_parameter.batch_size: result.remaining_files += 1 return new_provers result.processed_files += 1 prover = DiskProver(str(file_path)) log.debug(f"process_file {str(file_path)}") expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = file_path.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) return new_provers ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if self.farmer_public_keys is not None and farmer_public_key not in self.farmer_public_keys: log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return new_provers if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash pool_contract_puzzle_hash = None else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_public_key = None pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if ( self.pool_public_keys is not None and pool_public_key is not None and pool_public_key not in self.pool_public_keys ): log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return new_provers stat_info = file_path.stat() local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None ) with self.plot_filename_paths_lock: if file_path.name not in self.plot_filename_paths: self.plot_filename_paths[file_path.name] = (str(Path(prover.get_filename()).parent), set()) else: self.plot_filename_paths[file_path.name][1].add(str(Path(prover.get_filename()).parent)) if len(self.plot_filename_paths[file_path.name][1]) > 0: log.warning( f"Have multiple copies of the plot {file_path} in " f"{self.plot_filename_paths[file_path.name][1]}." ) return new_provers new_provers[file_path] = PlotInfo( prover, pool_public_key, pool_contract_puzzle_hash, plot_public_key, stat_info.st_size, stat_info.st_mtime, ) with counter_lock: result.loaded_plots += 1 result.loaded_size += stat_info.st_size except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {file_path}. {e} {tb}") self.failed_to_open_filenames[file_path] = int(time.time()) return new_provers log.info(f"Found plot {file_path} of size {new_provers[file_path].prover.get_size()}") if self.show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") return new_provers return new_provers