async def request_signatures( self, request: harvester_protocol.RequestSignatures): """ The farmer requests a signature on the header hash, for one of the proofs that we found. A signature is created on the header hash using the harvester private key. This can also be used for pooling. """ plot_filename = Path(request.plot_identifier[64:]).resolve() with self.harvester.plot_manager: try: plot_info = self.harvester.plot_manager.plots[plot_filename] except KeyError: self.harvester.log.warning( f"KeyError plot {plot_filename} does not exist.") return None # Look up local_sk from plot to save locked memory ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(plot_info.prover.get_memo()) local_sk = master_sk_to_local_sk(local_master_sk) if isinstance(pool_public_key_or_puzzle_hash, G1Element): include_taproot = False else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) include_taproot = True agg_pk = ProofOfSpace.generate_plot_public_key(local_sk.get_g1(), farmer_public_key, include_taproot) # This is only a partial signature. When combined with the farmer's half, it will # form a complete PrependSignature. message_signatures: List[Tuple[bytes32, G2Element]] = [] for message in request.messages: signature: G2Element = AugSchemeMPL.sign(local_sk, message, agg_pk) message_signatures.append((message, signature)) response: harvester_protocol.RespondSignatures = harvester_protocol.RespondSignatures( request.plot_identifier, request.challenge_hash, request.sp_hash, local_sk.get_g1(), farmer_public_key, message_signatures, ) return make_msg(ProtocolMessageTypes.respond_signatures, response)
def process_file(file_path: Path) -> Optional[PlotInfo]: if not self._refreshing_enabled: return None filename_str = str(file_path) if self.match_str is not None and self.match_str not in filename_str: return None if ( file_path in self.failed_to_open_filenames and (time.time() - self.failed_to_open_filenames[file_path]) < self.refresh_parameter.retry_invalid_seconds ): # Try once every `refresh_parameter.retry_invalid_seconds` seconds to open the file return None if file_path in self.plots: return self.plots[file_path] entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if entry is not None: loaded_parent, duplicates = entry if str(file_path.parent) in duplicates: log.debug(f"Skip duplicated plot {str(file_path)}") return None try: if not file_path.exists(): return None prover = DiskProver(str(file_path)) log.debug(f"process_file {str(file_path)}") expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = file_path.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) return None cache_entry = self.cache.get(prover.get_id()) if cache_entry is None: ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if farmer_public_key not in self.farmer_public_keys: log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return None pool_public_key: Optional[G1Element] = None pool_contract_puzzle_hash: Optional[bytes32] = None if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if pool_public_key is not None and pool_public_key not in self.pool_public_keys: log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return None # If a plot is in `no_key_filenames` the keys were missing in earlier refresh cycles. We can remove # the current plot from that list if its in there since we passed the key checks above. if file_path in self.no_key_filenames: self.no_key_filenames.remove(file_path) local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None ) cache_entry = CacheEntry(pool_public_key, pool_contract_puzzle_hash, plot_public_key) self.cache.update(prover.get_id(), cache_entry) with self.plot_filename_paths_lock: paths: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if paths is None: paths = (str(Path(prover.get_filename()).parent), set()) self.plot_filename_paths[file_path.name] = paths else: paths[1].add(str(Path(prover.get_filename()).parent)) log.warning(f"Have multiple copies of the plot {file_path.name} in {[paths[0], *paths[1]]}.") return None new_plot_info: PlotInfo = PlotInfo( prover, cache_entry.pool_public_key, cache_entry.pool_contract_puzzle_hash, cache_entry.plot_public_key, stat_info.st_size, stat_info.st_mtime, ) with counter_lock: result.loaded.append(new_plot_info) if file_path in self.failed_to_open_filenames: del self.failed_to_open_filenames[file_path] except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {file_path}. {e} {tb}") self.failed_to_open_filenames[file_path] = int(time.time()) return None log.info(f"Found plot {file_path} of size {new_plot_info.prover.get_size()}") if self.show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") return new_plot_info
def check_plots(root_path, num, challenge_start, grep_string, list_duplicates, debug_show_memo): config = load_config(root_path, "config.yaml") plot_refresh_parameter: PlotsRefreshParameter = PlotsRefreshParameter(100, 100, 1) plot_manager: PlotManager = PlotManager( root_path, match_str=grep_string, show_memo=debug_show_memo, open_no_key_filenames=True, refresh_parameter=plot_refresh_parameter, refresh_callback=plot_refresh_callback, ) if num is not None: if num == 0: log.warning("Not opening plot files") else: if num < 5: log.warning(f"{num} challenges is too low, setting it to the minimum of 5") num = 5 if num < 30: log.warning("Use 30 challenges (our default) for balance of speed and accurate results") else: num = 30 if challenge_start is not None: num_start = challenge_start num_end = num_start + num else: num_start = 0 num_end = num challenges = num_end - num_start if list_duplicates: log.warning("Checking for duplicate Plot IDs") log.info("Plot filenames expected to end with -[64 char plot ID].plot") if list_duplicates: all_filenames: List[Path] = [] for paths in get_plot_filenames(root_path).values(): all_filenames += paths find_duplicate_plot_IDs(all_filenames) if num == 0: return None parallel_read: bool = config["harvester"].get("parallel_read", True) v = Verifier() log.info(f"Loading plots in config.yaml using plot_manager loading code (parallel read: {parallel_read})\n") # Prompts interactively if the keyring is protected by a master passphrase. To use the daemon # for keychain access, KeychainProxy/connect_to_keychain should be used instead of Keychain. kc: Keychain = Keychain() plot_manager.set_public_keys( [master_sk_to_farmer_sk(sk).get_g1() for sk, _ in kc.get_all_private_keys()], [G1Element.from_bytes(bytes.fromhex(pk)) for pk in config["farmer"]["pool_public_keys"]], ) plot_manager.start_refreshing() while plot_manager.needs_refresh(): sleep(1) plot_manager.stop_refreshing() if plot_manager.plot_count() > 0: log.info("") log.info("") log.info(f"Starting to test each plot with {num} challenges each\n") total_good_plots: Counter = Counter() total_bad_plots = 0 total_size = 0 bad_plots_list: List[Path] = [] with plot_manager: for plot_path, plot_info in plot_manager.plots.items(): pr = plot_info.prover log.info(f"Testing plot {plot_path} k={pr.get_size()}") log.info(f"\tPool public key: {plot_info.pool_public_key}") # Look up local_sk from plot to save locked memory ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(pr.get_memo()) local_sk = master_sk_to_local_sk(local_master_sk) log.info(f"\tFarmer public key: {farmer_public_key}") log.info(f"\tLocal sk: {local_sk}") total_proofs = 0 caught_exception: bool = False for i in range(num_start, num_end): challenge = std_hash(i.to_bytes(32, "big")) # Some plot errors cause get_qualities_for_challenge to throw a RuntimeError try: quality_start_time = int(round(time() * 1000)) for index, quality_str in enumerate(pr.get_qualities_for_challenge(challenge)): quality_spent_time = int(round(time() * 1000)) - quality_start_time if quality_spent_time > 5000: log.warning( f"\tLooking up qualities took: {quality_spent_time} ms. This should be below 5 seconds " f"to minimize risk of losing rewards." ) else: log.info(f"\tLooking up qualities took: {quality_spent_time} ms.") # Other plot errors cause get_full_proof or validate_proof to throw an AssertionError try: proof_start_time = int(round(time() * 1000)) proof = pr.get_full_proof(challenge, index, parallel_read) proof_spent_time = int(round(time() * 1000)) - proof_start_time if proof_spent_time > 15000: log.warning( f"\tFinding proof took: {proof_spent_time} ms. This should be below 15 seconds " f"to minimize risk of losing rewards." ) else: log.info(f"\tFinding proof took: {proof_spent_time} ms") total_proofs += 1 ver_quality_str = v.validate_proof(pr.get_id(), pr.get_size(), challenge, proof) assert quality_str == ver_quality_str except AssertionError as e: log.error(f"{type(e)}: {e} error in proving/verifying for plot {plot_path}") caught_exception = True quality_start_time = int(round(time() * 1000)) except KeyboardInterrupt: log.warning("Interrupted, closing") return None except SystemExit: log.warning("System is shutting down.") return None except Exception as e: log.error(f"{type(e)}: {e} error in getting challenge qualities for plot {plot_path}") caught_exception = True if caught_exception is True: break if total_proofs > 0 and caught_exception is False: log.info(f"\tProofs {total_proofs} / {challenges}, {round(total_proofs/float(challenges), 4)}") total_good_plots[pr.get_size()] += 1 total_size += plot_path.stat().st_size else: total_bad_plots += 1 log.error(f"\tProofs {total_proofs} / {challenges}, {round(total_proofs/float(challenges), 4)}") bad_plots_list.append(plot_path) log.info("") log.info("") log.info("Summary") total_plots: int = sum(list(total_good_plots.values())) log.info(f"Found {total_plots} valid plots, total size {total_size / (1024 * 1024 * 1024 * 1024):.5f} TiB") for (k, count) in sorted(dict(total_good_plots).items()): log.info(f"{count} plots of size {k}") grand_total_bad = total_bad_plots + len(plot_manager.failed_to_open_filenames) if grand_total_bad > 0: log.warning(f"{grand_total_bad} invalid plots found:") for bad_plot_path in bad_plots_list: log.warning(f"{bad_plot_path}") if len(plot_manager.no_key_filenames) > 0: log.warning( f"There are {len(plot_manager.no_key_filenames)} plots with a farmer or pool public key that " f"is not on this machine. The farmer private key must be in the keychain in order to " f"farm them, use 'chia keys' to transfer keys. The pool public keys must be in the config.yaml" )
def process_file(file_path: Path) -> Dict: new_provers: Dict[Path, PlotInfo] = {} filename_str = str(file_path) if self.match_str is not None and self.match_str not in filename_str: return new_provers if file_path.exists(): if ( file_path in self.failed_to_open_filenames and (time.time() - self.failed_to_open_filenames[file_path]) > 1200 ): # Try once every 20 minutes to open the file return new_provers if file_path in self.plots: try: stat_info = file_path.stat() except Exception as e: log.error(f"Failed to open file {file_path}. {e}") return new_provers if stat_info.st_mtime == self.plots[file_path].time_modified: new_provers[file_path] = self.plots[file_path] return new_provers entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if entry is not None: loaded_parent, duplicates = entry if str(file_path.parent) in duplicates: log.debug(f"Skip duplicated plot {str(file_path)}") return new_provers try: with counter_lock: if result.processed_files >= self.refresh_parameter.batch_size: result.remaining_files += 1 return new_provers result.processed_files += 1 prover = DiskProver(str(file_path)) log.debug(f"process_file {str(file_path)}") expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = file_path.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) return new_provers ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if self.farmer_public_keys is not None and farmer_public_key not in self.farmer_public_keys: log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return new_provers if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash pool_contract_puzzle_hash = None else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_public_key = None pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if ( self.pool_public_keys is not None and pool_public_key is not None and pool_public_key not in self.pool_public_keys ): log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return new_provers stat_info = file_path.stat() local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None ) with self.plot_filename_paths_lock: if file_path.name not in self.plot_filename_paths: self.plot_filename_paths[file_path.name] = (str(Path(prover.get_filename()).parent), set()) else: self.plot_filename_paths[file_path.name][1].add(str(Path(prover.get_filename()).parent)) if len(self.plot_filename_paths[file_path.name][1]) > 0: log.warning( f"Have multiple copies of the plot {file_path} in " f"{self.plot_filename_paths[file_path.name][1]}." ) return new_provers new_provers[file_path] = PlotInfo( prover, pool_public_key, pool_contract_puzzle_hash, plot_public_key, stat_info.st_size, stat_info.st_mtime, ) with counter_lock: result.loaded_plots += 1 result.loaded_size += stat_info.st_size except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {file_path}. {e} {tb}") self.failed_to_open_filenames[file_path] = int(time.time()) return new_provers log.info(f"Found plot {file_path} of size {new_provers[file_path].prover.get_size()}") if self.show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") return new_provers return new_provers
def blocking_lookup( filename: Path, plot_info: PlotInfo) -> List[Tuple[bytes32, ProofOfSpace]]: # Uses the DiskProver object to lookup qualities. This is a blocking call, # so it should be run in a thread pool. try: plot_id = plot_info.prover.get_id() sp_challenge_hash = ProofOfSpace.calculate_pos_challenge( plot_id, new_challenge.challenge_hash, new_challenge.sp_hash, ) try: quality_strings = plot_info.prover.get_qualities_for_challenge( sp_challenge_hash) except Exception as e: self.harvester.log.error(f"Error using prover object {e}") self.harvester.log.error( f"File: {filename} Plot ID: {plot_id.hex()}, " f"challenge: {sp_challenge_hash}, plot_info: {plot_info}" ) return [] responses: List[Tuple[bytes32, ProofOfSpace]] = [] if quality_strings is not None: difficulty = new_challenge.difficulty sub_slot_iters = new_challenge.sub_slot_iters if plot_info.pool_contract_puzzle_hash is not None: # If we are pooling, override the difficulty and sub slot iters with the pool threshold info. # This will mean more proofs actually get found, but they are only submitted to the pool, # not the blockchain for pool_difficulty in new_challenge.pool_difficulties: if pool_difficulty.pool_contract_puzzle_hash == plot_info.pool_contract_puzzle_hash: difficulty = pool_difficulty.difficulty sub_slot_iters = pool_difficulty.sub_slot_iters # Found proofs of space (on average 1 is expected per plot) for index, quality_str in enumerate(quality_strings): required_iters: uint64 = calculate_iterations_quality( self.harvester.constants. DIFFICULTY_CONSTANT_FACTOR, quality_str, plot_info.prover.get_size(), difficulty, new_challenge.sp_hash, ) sp_interval_iters = calculate_sp_interval_iters( self.harvester.constants, sub_slot_iters) if required_iters < sp_interval_iters: # Found a very good proof of space! will fetch the whole proof from disk, # then send to farmer try: proof_xs = plot_info.prover.get_full_proof( sp_challenge_hash, index, self.harvester.parallel_read) except Exception as e: self.harvester.log.error( f"Exception fetching full proof for {filename}. {e}" ) self.harvester.log.error( f"File: {filename} Plot ID: {plot_id.hex()}, challenge: {sp_challenge_hash}, " f"plot_info: {plot_info}") continue # Look up local_sk from plot to save locked memory ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(plot_info.prover.get_memo()) local_sk = master_sk_to_local_sk(local_master_sk) include_taproot = plot_info.pool_contract_puzzle_hash is not None plot_public_key = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key, include_taproot) responses.append(( quality_str, ProofOfSpace( sp_challenge_hash, plot_info.pool_public_key, plot_info.pool_contract_puzzle_hash, plot_public_key, uint8(plot_info.prover.get_size()), proof_xs, ), )) return responses except Exception as e: self.harvester.log.error(f"Unknown error: {e}") return []