async def request_signatures( self, request: harvester_protocol.RequestSignatures): """ The farmer requests a signature on the header hash, for one of the proofs that we found. A signature is created on the header hash using the harvester private key. This can also be used for pooling. """ plot_filename = Path(request.plot_identifier[64:]).resolve() with self.harvester.plot_manager: try: plot_info = self.harvester.plot_manager.plots[plot_filename] except KeyError: self.harvester.log.warning( f"KeyError plot {plot_filename} does not exist.") return None # Look up local_sk from plot to save locked memory ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(plot_info.prover.get_memo()) local_sk = master_sk_to_local_sk(local_master_sk) if isinstance(pool_public_key_or_puzzle_hash, G1Element): include_taproot = False else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) include_taproot = True agg_pk = ProofOfSpace.generate_plot_public_key(local_sk.get_g1(), farmer_public_key, include_taproot) # This is only a partial signature. When combined with the farmer's half, it will # form a complete PrependSignature. message_signatures: List[Tuple[bytes32, G2Element]] = [] for message in request.messages: signature: G2Element = AugSchemeMPL.sign(local_sk, message, agg_pk) message_signatures.append((message, signature)) response: harvester_protocol.RespondSignatures = harvester_protocol.RespondSignatures( request.plot_identifier, request.challenge_hash, request.sp_hash, local_sk.get_g1(), farmer_public_key, message_signatures, ) return make_msg(ProtocolMessageTypes.respond_signatures, response)
async def create_pool_plot(self, p2_singleton_puzzle_hash: bytes32, shuil=None) -> bytes32: plot_dir = get_plot_dir() temp_dir = get_plot_tmp_dir() args = Namespace() args.size = 22 args.num = 1 args.buffer = 100 args.tmp_dir = temp_dir args.tmp2_dir = plot_dir args.final_dir = plot_dir args.plotid = None args.memo = None args.buckets = 0 args.stripe_size = 2000 args.num_threads = 0 args.nobitfield = False args.exclude_final_dir = False args.list_duplicates = False test_private_keys = [AugSchemeMPL.key_gen(std_hash(b"test_pool_rpc"))] plot_public_key = ProofOfSpace.generate_plot_public_key( master_sk_to_local_sk(test_private_keys[0]).get_g1(), bt.farmer_pk, True) plot_id = ProofOfSpace.calculate_plot_id_ph(p2_singleton_puzzle_hash, plot_public_key) try: plot_keys = PlotKeys( bt.farmer_pk, None, encode_puzzle_hash(p2_singleton_puzzle_hash, "txch")) await create_plots( args, plot_keys, bt.root_path, use_datetime=False, test_private_keys=test_private_keys, ) except KeyboardInterrupt: shuil.rmtree(plot_dir, ignore_errors=True) raise await bt.setup_plots() return plot_id
def process_file(file_path: Path) -> Optional[PlotInfo]: if not self._refreshing_enabled: return None filename_str = str(file_path) if self.match_str is not None and self.match_str not in filename_str: return None if ( file_path in self.failed_to_open_filenames and (time.time() - self.failed_to_open_filenames[file_path]) < self.refresh_parameter.retry_invalid_seconds ): # Try once every `refresh_parameter.retry_invalid_seconds` seconds to open the file return None if file_path in self.plots: return self.plots[file_path] entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if entry is not None: loaded_parent, duplicates = entry if str(file_path.parent) in duplicates: log.debug(f"Skip duplicated plot {str(file_path)}") return None try: if not file_path.exists(): return None prover = DiskProver(str(file_path)) log.debug(f"process_file {str(file_path)}") expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = file_path.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) return None cache_entry = self.cache.get(prover.get_id()) if cache_entry is None: ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if farmer_public_key not in self.farmer_public_keys: log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return None pool_public_key: Optional[G1Element] = None pool_contract_puzzle_hash: Optional[bytes32] = None if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if pool_public_key is not None and pool_public_key not in self.pool_public_keys: log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return None # If a plot is in `no_key_filenames` the keys were missing in earlier refresh cycles. We can remove # the current plot from that list if its in there since we passed the key checks above. if file_path in self.no_key_filenames: self.no_key_filenames.remove(file_path) local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None ) cache_entry = CacheEntry(pool_public_key, pool_contract_puzzle_hash, plot_public_key) self.cache.update(prover.get_id(), cache_entry) with self.plot_filename_paths_lock: paths: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if paths is None: paths = (str(Path(prover.get_filename()).parent), set()) self.plot_filename_paths[file_path.name] = paths else: paths[1].add(str(Path(prover.get_filename()).parent)) log.warning(f"Have multiple copies of the plot {file_path.name} in {[paths[0], *paths[1]]}.") return None new_plot_info: PlotInfo = PlotInfo( prover, cache_entry.pool_public_key, cache_entry.pool_contract_puzzle_hash, cache_entry.plot_public_key, stat_info.st_size, stat_info.st_mtime, ) with counter_lock: result.loaded.append(new_plot_info) if file_path in self.failed_to_open_filenames: del self.failed_to_open_filenames[file_path] except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {file_path}. {e} {tb}") self.failed_to_open_filenames[file_path] = int(time.time()) return None log.info(f"Found plot {file_path} of size {new_plot_info.prover.get_size()}") if self.show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") return new_plot_info
def process_file(directory: Path, filename: Path) -> Tuple[int, Dict]: new_provers: Dict[Path, PlotInfo] = {} nonlocal changed filename_str = str(filename) if match_str is not None and match_str not in filename_str: return 0, new_provers if filename.exists(): if filename in failed_to_open_filenames and ( time.time() - failed_to_open_filenames[filename]) < 1200: # Try once every 20 minutes to open the file return 0, new_provers if filename in provers: try: stat_info = filename.stat() except Exception as e: log.error(f"Failed to open file {filename}. {e}") return 0, new_provers if stat_info.st_mtime == provers[filename].time_modified: with plot_ids_lock: if provers[filename].prover.get_id() in plot_ids: log.warning( f"Have multiple copies of the plot {filename}, not adding it." ) return 0, new_provers plot_ids.add(provers[filename].prover.get_id()) new_provers[filename] = provers[filename] return stat_info.st_size, new_provers try: prover = DiskProver(str(filename)) expected_size = _expected_plot_size( prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = filename.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size( ) >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {filename}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) return 0, new_provers ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if farmer_public_keys is not None and farmer_public_key not in farmer_public_keys: log.warning( f"Plot {filename} has a farmer public key that is not in the farmer's pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: return 0, new_provers if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash pool_contract_puzzle_hash = None else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_public_key = None pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if (pool_public_keys is not None and pool_public_key is not None and pool_public_key not in pool_public_keys): log.warning( f"Plot {filename} has a pool public key that is not in the farmer's pool pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: return 0, new_provers stat_info = filename.stat() local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key) with plot_ids_lock: if prover.get_id() in plot_ids: log.warning( f"Have multiple copies of the plot {filename}, not adding it." ) return 0, new_provers plot_ids.add(prover.get_id()) new_provers[filename] = PlotInfo( prover, pool_public_key, pool_contract_puzzle_hash, plot_public_key, stat_info.st_size, stat_info.st_mtime, directory, ) changed = True except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {filename}. {e} {tb}") failed_to_open_filenames[filename] = int(time.time()) return 0, new_provers log.info( f"Found plot {filename} of size {new_provers[filename].prover.get_size()}" ) if show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") return stat_info.st_size, new_provers return 0, new_provers
def check_plots(root_path, num, challenge_start, grep_string, list_duplicates, debug_show_memo): config = load_config(root_path, "config.yaml") if num is not None: if num == 0: log.warning("Not opening plot files") else: if num < 5: log.warning( f"{num} challenges is too low, setting it to the minimum of 5" ) num = 5 if num < 30: log.warning( "Use 30 challenges (our default) for balance of speed and accurate results" ) else: num = 30 if challenge_start is not None: num_start = challenge_start num_end = num_start + num else: num_start = 0 num_end = num challenges = num_end - num_start if grep_string is not None: match_str = grep_string else: match_str = None if list_duplicates: log.warning("Checking for duplicate Plot IDs") log.info("Plot filenames expected to end with -[64 char plot ID].plot") show_memo: bool = debug_show_memo if list_duplicates: plot_filenames: Dict[Path, List[Path]] = get_plot_filenames( config["harvester"]) all_filenames: List[Path] = [] for paths in plot_filenames.values(): all_filenames += paths find_duplicate_plot_IDs(all_filenames) if num == 0: return None v = Verifier() log.info("Loading plots in config.yaml using plot_tools loading code\n") kc: Keychain = Keychain() pks = [ master_sk_to_farmer_sk(sk).get_g1() for sk, _ in kc.get_all_private_keys() ] pool_public_keys = [ G1Element.from_bytes(bytes.fromhex(pk)) for pk in config["farmer"]["pool_public_keys"] ] _, provers, failed_to_open_filenames, no_key_filenames = load_plots( {}, {}, pks, pool_public_keys, match_str, show_memo, root_path, open_no_key_filenames=True, ) if len(provers) > 0: log.info("") log.info("") log.info(f"Starting to test each plot with {num} challenges each\n") total_good_plots: Counter = Counter() total_bad_plots = 0 total_size = 0 bad_plots_list: List[Path] = [] for plot_path, plot_info in provers.items(): pr = plot_info.prover log.info(f"Testing plot {plot_path} k={pr.get_size()}") log.info(f"\tPool public key: {plot_info.pool_public_key}") # Look up local_sk from plot to save locked memory ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(pr.get_memo()) local_sk = master_sk_to_local_sk(local_master_sk) log.info(f"\tFarmer public key: {farmer_public_key}") log.info(f"\tLocal sk: {local_sk}") total_proofs = 0 caught_exception: bool = False for i in range(num_start, num_end): challenge = std_hash(i.to_bytes(32, "big")) # Some plot errors cause get_qualities_for_challenge to throw a RuntimeError try: for index, quality_str in enumerate( pr.get_qualities_for_challenge(challenge)): # Other plot errors cause get_full_proof or validate_proof to throw an AssertionError try: proof = pr.get_full_proof(challenge, index) total_proofs += 1 ver_quality_str = v.validate_proof( pr.get_id(), pr.get_size(), challenge, proof) assert quality_str == ver_quality_str except AssertionError as e: log.error( f"{type(e)}: {e} error in proving/verifying for plot {plot_path}" ) caught_exception = True except KeyboardInterrupt: log.warning("Interrupted, closing") return None except SystemExit: log.warning("System is shutting down.") return None except Exception as e: log.error( f"{type(e)}: {e} error in getting challenge qualities for plot {plot_path}" ) caught_exception = True if caught_exception is True: break if total_proofs > 0 and caught_exception is False: log.info( f"\tProofs {total_proofs} / {challenges}, {round(total_proofs/float(challenges), 4)}" ) total_good_plots[pr.get_size()] += 1 total_size += plot_path.stat().st_size else: total_bad_plots += 1 log.error( f"\tProofs {total_proofs} / {challenges}, {round(total_proofs/float(challenges), 4)}" ) bad_plots_list.append(plot_path) log.info("") log.info("") log.info("Summary") total_plots: int = sum(list(total_good_plots.values())) log.info( f"Found {total_plots} valid plots, total size {total_size / (1024 * 1024 * 1024 * 1024):.5f} TiB" ) for (k, count) in sorted(dict(total_good_plots).items()): log.info(f"{count} plots of size {k}") grand_total_bad = total_bad_plots + len(failed_to_open_filenames) if grand_total_bad > 0: log.warning(f"{grand_total_bad} invalid plots found:") for bad_plot_path in bad_plots_list: log.warning(f"{bad_plot_path}") if len(no_key_filenames) > 0: log.warning( f"There are {len(no_key_filenames)} plots with a farmer or pool public key that " f"is not on this machine. The farmer private key must be in the keychain in order to " f"farm them, use 'chia keys' to transfer keys. The pool public keys must be in the config.yaml" )
def blocking_lookup( filename: Path, plot_info: PlotInfo) -> List[Tuple[bytes32, ProofOfSpace]]: # Uses the DiskProver object to lookup qualities. This is a blocking call, # so it should be run in a thread pool. try: plot_id = plot_info.prover.get_id() sp_challenge_hash = ProofOfSpace.calculate_pos_challenge( plot_id, new_challenge.challenge_hash, new_challenge.sp_hash, ) try: quality_strings = plot_info.prover.get_qualities_for_challenge( sp_challenge_hash) except Exception as e: self.harvester.log.error(f"Error using prover object {e}") self.harvester.log.error( f"File: {filename} Plot ID: {plot_id.hex()}, " f"challenge: {sp_challenge_hash}, plot_info: {plot_info}" ) return [] responses: List[Tuple[bytes32, ProofOfSpace]] = [] if quality_strings is not None: difficulty = new_challenge.difficulty sub_slot_iters = new_challenge.sub_slot_iters if plot_info.pool_contract_puzzle_hash is not None: # If we are pooling, override the difficulty and sub slot iters with the pool threshold info. # This will mean more proofs actually get found, but they are only submitted to the pool, # not the blockchain for pool_difficulty in new_challenge.pool_difficulties: if pool_difficulty.pool_contract_puzzle_hash == plot_info.pool_contract_puzzle_hash: difficulty = pool_difficulty.difficulty sub_slot_iters = pool_difficulty.sub_slot_iters # Found proofs of space (on average 1 is expected per plot) for index, quality_str in enumerate(quality_strings): required_iters: uint64 = calculate_iterations_quality( self.harvester.constants. DIFFICULTY_CONSTANT_FACTOR, quality_str, plot_info.prover.get_size(), difficulty, new_challenge.sp_hash, ) sp_interval_iters = calculate_sp_interval_iters( self.harvester.constants, sub_slot_iters) if required_iters < sp_interval_iters: # Found a very good proof of space! will fetch the whole proof from disk, # then send to farmer try: proof_xs = plot_info.prover.get_full_proof( sp_challenge_hash, index) except Exception as e: self.harvester.log.error( f"Exception fetching full proof for {filename}. {e}" ) self.harvester.log.error( f"File: {filename} Plot ID: {plot_id.hex()}, challenge: {sp_challenge_hash}, " f"plot_info: {plot_info}") continue # Look up local_sk from plot to save locked memory ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(plot_info.prover.get_memo()) local_sk = master_sk_to_local_sk(local_master_sk) include_taproot = plot_info.pool_contract_puzzle_hash is not None plot_public_key = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key, include_taproot) responses.append(( quality_str, ProofOfSpace( sp_challenge_hash, plot_info.pool_public_key, plot_info.pool_contract_puzzle_hash, plot_public_key, uint8(plot_info.prover.get_size()), proof_xs, ), )) return responses except Exception as e: self.harvester.log.error(f"Unknown error: {e}") return []
def check_plots(root_path, num, challenge_start, grep_string, list_duplicates, debug_show_memo): config = load_config(root_path, "config.yaml") plot_refresh_parameter: PlotsRefreshParameter = PlotsRefreshParameter(100, 100, 1) plot_manager: PlotManager = PlotManager( root_path, match_str=grep_string, show_memo=debug_show_memo, open_no_key_filenames=True, refresh_parameter=plot_refresh_parameter, refresh_callback=plot_refresh_callback, ) if num is not None: if num == 0: log.warning("Not opening plot files") else: if num < 5: log.warning(f"{num} challenges is too low, setting it to the minimum of 5") num = 5 if num < 30: log.warning("Use 30 challenges (our default) for balance of speed and accurate results") else: num = 30 if challenge_start is not None: num_start = challenge_start num_end = num_start + num else: num_start = 0 num_end = num challenges = num_end - num_start if list_duplicates: log.warning("Checking for duplicate Plot IDs") log.info("Plot filenames expected to end with -[64 char plot ID].plot") if list_duplicates: all_filenames: List[Path] = [] for paths in get_plot_filenames(root_path).values(): all_filenames += paths find_duplicate_plot_IDs(all_filenames) if num == 0: return None parallel_read: bool = config["harvester"].get("parallel_read", True) v = Verifier() log.info(f"Loading plots in config.yaml using plot_manager loading code (parallel read: {parallel_read})\n") # Prompts interactively if the keyring is protected by a master passphrase. To use the daemon # for keychain access, KeychainProxy/connect_to_keychain should be used instead of Keychain. kc: Keychain = Keychain() plot_manager.set_public_keys( [master_sk_to_farmer_sk(sk).get_g1() for sk, _ in kc.get_all_private_keys()], [G1Element.from_bytes(bytes.fromhex(pk)) for pk in config["farmer"]["pool_public_keys"]], ) plot_manager.start_refreshing() while plot_manager.needs_refresh(): sleep(1) plot_manager.stop_refreshing() if plot_manager.plot_count() > 0: log.info("") log.info("") log.info(f"Starting to test each plot with {num} challenges each\n") total_good_plots: Counter = Counter() total_bad_plots = 0 total_size = 0 bad_plots_list: List[Path] = [] with plot_manager: for plot_path, plot_info in plot_manager.plots.items(): pr = plot_info.prover log.info(f"Testing plot {plot_path} k={pr.get_size()}") log.info(f"\tPool public key: {plot_info.pool_public_key}") # Look up local_sk from plot to save locked memory ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(pr.get_memo()) local_sk = master_sk_to_local_sk(local_master_sk) log.info(f"\tFarmer public key: {farmer_public_key}") log.info(f"\tLocal sk: {local_sk}") total_proofs = 0 caught_exception: bool = False for i in range(num_start, num_end): challenge = std_hash(i.to_bytes(32, "big")) # Some plot errors cause get_qualities_for_challenge to throw a RuntimeError try: quality_start_time = int(round(time() * 1000)) for index, quality_str in enumerate(pr.get_qualities_for_challenge(challenge)): quality_spent_time = int(round(time() * 1000)) - quality_start_time if quality_spent_time > 5000: log.warning( f"\tLooking up qualities took: {quality_spent_time} ms. This should be below 5 seconds " f"to minimize risk of losing rewards." ) else: log.info(f"\tLooking up qualities took: {quality_spent_time} ms.") # Other plot errors cause get_full_proof or validate_proof to throw an AssertionError try: proof_start_time = int(round(time() * 1000)) proof = pr.get_full_proof(challenge, index, parallel_read) proof_spent_time = int(round(time() * 1000)) - proof_start_time if proof_spent_time > 15000: log.warning( f"\tFinding proof took: {proof_spent_time} ms. This should be below 15 seconds " f"to minimize risk of losing rewards." ) else: log.info(f"\tFinding proof took: {proof_spent_time} ms") total_proofs += 1 ver_quality_str = v.validate_proof(pr.get_id(), pr.get_size(), challenge, proof) assert quality_str == ver_quality_str except AssertionError as e: log.error(f"{type(e)}: {e} error in proving/verifying for plot {plot_path}") caught_exception = True quality_start_time = int(round(time() * 1000)) except KeyboardInterrupt: log.warning("Interrupted, closing") return None except SystemExit: log.warning("System is shutting down.") return None except Exception as e: log.error(f"{type(e)}: {e} error in getting challenge qualities for plot {plot_path}") caught_exception = True if caught_exception is True: break if total_proofs > 0 and caught_exception is False: log.info(f"\tProofs {total_proofs} / {challenges}, {round(total_proofs/float(challenges), 4)}") total_good_plots[pr.get_size()] += 1 total_size += plot_path.stat().st_size else: total_bad_plots += 1 log.error(f"\tProofs {total_proofs} / {challenges}, {round(total_proofs/float(challenges), 4)}") bad_plots_list.append(plot_path) log.info("") log.info("") log.info("Summary") total_plots: int = sum(list(total_good_plots.values())) log.info(f"Found {total_plots} valid plots, total size {total_size / (1024 * 1024 * 1024 * 1024):.5f} TiB") for (k, count) in sorted(dict(total_good_plots).items()): log.info(f"{count} plots of size {k}") grand_total_bad = total_bad_plots + len(plot_manager.failed_to_open_filenames) if grand_total_bad > 0: log.warning(f"{grand_total_bad} invalid plots found:") for bad_plot_path in bad_plots_list: log.warning(f"{bad_plot_path}") if len(plot_manager.no_key_filenames) > 0: log.warning( f"There are {len(plot_manager.no_key_filenames)} plots with a farmer or pool public key that " f"is not on this machine. The farmer private key must be in the keychain in order to " f"farm them, use 'chia keys' to transfer keys. The pool public keys must be in the config.yaml" )
def create_plots(args, root_path, use_datetime=True, test_private_keys: Optional[List] = None): config_filename = config_path_for_filename(root_path, "config.yaml") config = load_config(root_path, config_filename) if args.tmp2_dir is None: args.tmp2_dir = args.tmp_dir farmer_public_key: G1Element if args.farmer_public_key is not None: farmer_public_key = G1Element.from_bytes( bytes.fromhex(args.farmer_public_key)) else: farmer_public_key = get_farmer_public_key(args.alt_fingerprint) pool_public_key: Optional[G1Element] = None pool_contract_puzzle_hash: Optional[bytes32] = None if args.pool_public_key is not None: if args.pool_contract_address is not None: raise RuntimeError( "Choose one of pool_contract_address and pool_public_key") pool_public_key = G1Element.from_bytes( bytes.fromhex(args.pool_public_key)) else: if args.pool_contract_address is None: # If nothing is set, farms to the provided key (or the first key) pool_public_key = get_pool_public_key(args.alt_fingerprint) else: # If the pool contract puzzle hash is set, use that pool_contract_puzzle_hash = decode_puzzle_hash( args.pool_contract_address) assert (pool_public_key is None) != (pool_contract_puzzle_hash is None) num = args.num if args.size < config["min_mainnet_k_size"] and test_private_keys is None: log.warning( f"Creating plots with size k={args.size}, which is less than the minimum required for mainnet" ) if args.size < 22: log.warning("k under 22 is not supported. Increasing k to 22") args.size = 22 if pool_public_key is not None: log.info( f"Creating {num} plots of size {args.size}, pool public key: " f"{bytes(pool_public_key).hex()} farmer public key: {bytes(farmer_public_key).hex()}" ) else: assert pool_contract_puzzle_hash is not None log.info( f"Creating {num} plots of size {args.size}, pool contract address: " f"{args.pool_contract_address} farmer public key: {bytes(farmer_public_key).hex()}" ) tmp_dir_created = False if not args.tmp_dir.exists(): mkdir(args.tmp_dir) tmp_dir_created = True tmp2_dir_created = False if not args.tmp2_dir.exists(): mkdir(args.tmp2_dir) tmp2_dir_created = True mkdir(args.final_dir) finished_filenames = [] for i in range(num): # Generate a random master secret key if test_private_keys is not None: assert len(test_private_keys) == num sk: PrivateKey = test_private_keys[i] else: sk = AugSchemeMPL.key_gen(token_bytes(32)) # The plot public key is the combination of the harvester and farmer keys # New plots will also include a taproot of the keys, for extensibility include_taproot: bool = pool_contract_puzzle_hash is not None plot_public_key = ProofOfSpace.generate_plot_public_key( master_sk_to_local_sk(sk).get_g1(), farmer_public_key, include_taproot) # The plot id is based on the harvester, farmer, and pool keys if pool_public_key is not None: plot_id: bytes32 = ProofOfSpace.calculate_plot_id_pk( pool_public_key, plot_public_key) plot_memo: bytes32 = stream_plot_info_pk(pool_public_key, farmer_public_key, sk) else: assert pool_contract_puzzle_hash is not None plot_id = ProofOfSpace.calculate_plot_id_ph( pool_contract_puzzle_hash, plot_public_key) plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, sk) if args.plotid is not None: log.info(f"Debug plot ID: {args.plotid}") plot_id = bytes32(bytes.fromhex(args.plotid)) if args.memo is not None: log.info(f"Debug memo: {args.memo}") plot_memo = bytes.fromhex(args.memo) # Uncomment next two lines if memo is needed for dev debug plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") dt_string = datetime.now().strftime("%Y-%m-%d-%H-%M") if use_datetime: filename: str = f"plot-k{args.size}-{dt_string}-{plot_id}.plot" else: filename = f"plot-k{args.size}-{plot_id}.plot" full_path: Path = args.final_dir / filename resolved_final_dir: str = str(Path(args.final_dir).resolve()) plot_directories_list: str = config["harvester"]["plot_directories"] if args.exclude_final_dir: log.info( f"NOT adding directory {resolved_final_dir} to harvester for farming" ) if resolved_final_dir in plot_directories_list: log.warning( f"Directory {resolved_final_dir} already exists for harvester, please remove it manually" ) else: if resolved_final_dir not in plot_directories_list: # Adds the directory to the plot directories if it is not present log.info( f"Adding directory {resolved_final_dir} to harvester for farming" ) config = add_plot_directory(resolved_final_dir, root_path) if not full_path.exists(): log.info(f"Starting plot {i + 1}/{num}") # Creates the plot. This will take a long time for larger plots. plotter: DiskPlotter = DiskPlotter() plotter.create_plot_disk( str(args.tmp_dir), str(args.tmp2_dir), str(args.final_dir), filename, args.size, plot_memo, plot_id, args.buffer, args.buckets, args.stripe_size, args.num_threads, args.nobitfield, ) finished_filenames.append(filename) else: log.info(f"Plot {filename} already exists") log.info("Summary:") if tmp_dir_created: try: args.tmp_dir.rmdir() except Exception: log.info( f"warning: did not remove primary temporary folder {args.tmp_dir}, it may not be empty." ) if tmp2_dir_created: try: args.tmp2_dir.rmdir() except Exception: log.info( f"warning: did not remove secondary temporary folder {args.tmp2_dir}, it may not be empty." ) log.info(f"Created a total of {len(finished_filenames)} new plots") for filename in finished_filenames: log.info(filename)
def process_file(file_path: Path) -> Dict: new_provers: Dict[Path, PlotInfo] = {} filename_str = str(file_path) if self.match_str is not None and self.match_str not in filename_str: return new_provers if file_path.exists(): if ( file_path in self.failed_to_open_filenames and (time.time() - self.failed_to_open_filenames[file_path]) > 1200 ): # Try once every 20 minutes to open the file return new_provers if file_path in self.plots: try: stat_info = file_path.stat() except Exception as e: log.error(f"Failed to open file {file_path}. {e}") return new_provers if stat_info.st_mtime == self.plots[file_path].time_modified: new_provers[file_path] = self.plots[file_path] return new_provers entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if entry is not None: loaded_parent, duplicates = entry if str(file_path.parent) in duplicates: log.debug(f"Skip duplicated plot {str(file_path)}") return new_provers try: with counter_lock: if result.processed_files >= self.refresh_parameter.batch_size: result.remaining_files += 1 return new_provers result.processed_files += 1 prover = DiskProver(str(file_path)) log.debug(f"process_file {str(file_path)}") expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = file_path.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) return new_provers ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if self.farmer_public_keys is not None and farmer_public_key not in self.farmer_public_keys: log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return new_provers if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash pool_contract_puzzle_hash = None else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_public_key = None pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if ( self.pool_public_keys is not None and pool_public_key is not None and pool_public_key not in self.pool_public_keys ): log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return new_provers stat_info = file_path.stat() local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None ) with self.plot_filename_paths_lock: if file_path.name not in self.plot_filename_paths: self.plot_filename_paths[file_path.name] = (str(Path(prover.get_filename()).parent), set()) else: self.plot_filename_paths[file_path.name][1].add(str(Path(prover.get_filename()).parent)) if len(self.plot_filename_paths[file_path.name][1]) > 0: log.warning( f"Have multiple copies of the plot {file_path} in " f"{self.plot_filename_paths[file_path.name][1]}." ) return new_provers new_provers[file_path] = PlotInfo( prover, pool_public_key, pool_contract_puzzle_hash, plot_public_key, stat_info.st_size, stat_info.st_mtime, ) with counter_lock: result.loaded_plots += 1 result.loaded_size += stat_info.st_size except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {file_path}. {e} {tb}") self.failed_to_open_filenames[file_path] = int(time.time()) return new_provers log.info(f"Found plot {file_path} of size {new_provers[file_path].prover.get_size()}") if self.show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") return new_provers return new_provers
def load_plots( provers: Dict[Path, PlotInfo], failed_to_open_filenames: Dict[Path, int], farmer_public_keys: Optional[List[G1Element]], pool_public_keys: Optional[List[G1Element]], match_str: Optional[str], show_memo: bool, root_path: Path, open_no_key_filenames=False, ) -> Tuple[bool, Dict[Path, PlotInfo], Dict[Path, int], Set[Path]]: start_time = time.time() config_file = load_config(root_path, "config.yaml", "harvester") changed = False no_key_filenames: Set[Path] = set() log.info(f'Searching directories {config_file["plot_directories"]}') plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(config_file) all_filenames: List[Path] = [] for paths in plot_filenames.values(): all_filenames += paths total_size = 0 new_provers: Dict[Path, PlotInfo] = {} plot_ids: Set[bytes32] = set() if match_str is not None: log.info( f'Only loading plots that contain "{match_str}" in the file or directory name' ) for filename in all_filenames: filename_str = str(filename) if match_str is not None and match_str not in filename_str: continue if filename.exists(): if filename in failed_to_open_filenames and ( time.time() - failed_to_open_filenames[filename]) < 1200: # Try once every 20 minutes to open the file continue if filename in provers: try: stat_info = filename.stat() except Exception as e: log.error(f"Failed to open file {filename}. {e}") continue if stat_info.st_mtime == provers[filename].time_modified: total_size += stat_info.st_size new_provers[filename] = provers[filename] plot_ids.add(provers[filename].prover.get_id()) continue try: prover = DiskProver(str(filename)) expected_size = _expected_plot_size( prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = filename.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size( ) >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {filename}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) continue if prover.get_id() in plot_ids: log.warning( f"Have multiple copies of the plot {filename}, not adding it." ) continue ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if farmer_public_keys is not None and farmer_public_key not in farmer_public_keys: log.warning( f"Plot {filename} has a farmer public key that is not in the farmer's pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: continue if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash pool_contract_puzzle_hash = None else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_public_key = None pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if (pool_public_keys is not None and pool_public_key is not None and pool_public_key not in pool_public_keys): log.warning( f"Plot {filename} has a pool public key that is not in the farmer's pool pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: continue stat_info = filename.stat() local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key) new_provers[filename] = PlotInfo( prover, pool_public_key, pool_contract_puzzle_hash, plot_public_key, stat_info.st_size, stat_info.st_mtime, ) plot_ids.add(prover.get_id()) total_size += stat_info.st_size changed = True except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {filename}. {e} {tb}") failed_to_open_filenames[filename] = int(time.time()) continue log.info( f"Found plot {filename} of size {new_provers[filename].prover.get_size()}" ) if show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") log.info( f"Loaded a total of {len(new_provers)} plots of size {total_size / (1024 ** 4)} TiB, in" f" {time.time()-start_time} seconds") return changed, new_provers, failed_to_open_filenames, no_key_filenames