def test_k_21(self): challenge: bytes = bytes([i for i in range(0, 32)]) plot_seed: bytes = bytes([5, 104, 52, 4, 51, 55, 23, 84, 91, 10, 111, 12, 13, 222, 151, 16, 228, 211, 254, 45, 92, 198, 204, 10, 9, 10, 11, 129, 139, 171, 15, 23]) pl = DiskPlotter() pl.create_plot_disk(".", ".", ".", "myplot.dat", 21, bytes([1, 2, 3, 4, 5]), plot_seed, 2*1024) pl = None pr = DiskProver(str(Path("myplot.dat"))) total_proofs: int = 0 iterations: int = 5000 v = Verifier() for i in range(iterations): if i % 100 == 0: print(i) challenge = sha256(i.to_bytes(4, "big")).digest() for index, quality in enumerate(pr.get_qualities_for_challenge(challenge)): proof = pr.get_full_proof(challenge, index) assert len(proof) == 8*pr.get_size() computed_quality = v.validate_proof(plot_seed, pr.get_size(), challenge, proof) assert computed_quality == quality total_proofs += 1 print(f"total proofs {total_proofs} out of {iterations}\ {total_proofs / iterations}") assert total_proofs == 4647 pr = None Path("myplot.dat").unlink()
def test_faulty_plot_doesnt_crash(self): if Path("myplot.dat").exists(): Path("myplot.dat").unlink() if Path("myplotbad.dat").exists(): Path("myplotbad.dat").unlink() plot_id: bytes = bytes([i for i in range(32, 64)]) pl = DiskPlotter() pl.create_plot_disk( ".", ".", ".", "myplot.dat", 21, bytes([1, 2, 3, 4, 5]), plot_id, 300, 32, 8192, 8, False, ) f = open("myplot.dat", "rb") all_data = bytearray(f.read()) f.close() assert len(all_data) > 20000000 all_data_bad = all_data[:20000000] + bytearray( token_bytes(10000)) + all_data[20100000:] f_bad = open("myplotbad.dat", "wb") f_bad.write(all_data_bad) f_bad.close() pr = DiskProver(str(Path("myplotbad.dat"))) iterations: int = 50000 v = Verifier() successes = 0 failures = 0 for i in range(iterations): if i % 100 == 0: print(i) challenge = sha256(i.to_bytes(4, "big")).digest() try: for index, quality in enumerate( pr.get_qualities_for_challenge(challenge)): proof = pr.get_full_proof(challenge, index) computed_quality = v.validate_proof( plot_id, pr.get_size(), challenge, proof) if computed_quality == quality: successes += 1 else: print("Did not validate") failures += 1 except Exception as e: print(f"Exception: {e}") failures += 1 print(f"Successes: {successes}") print(f"Failures: {failures}")
def main(): """ Script for checking all plots in the plots.yaml file. Specify a number of challenge to test for each plot. """ parser = argparse.ArgumentParser(description="Chia plot checking script.") parser.add_argument("-n", "--num", help="Number of challenges", type=int, default=1000) args = parser.parse_args() v = Verifier() if os.path.isfile(plot_config_filename): plot_config = safe_load(open(plot_config_filename, "r")) for plot_filename, plot_info in plot_config["plots"].items(): plot_seed: bytes32 = ProofOfSpace.calculate_plot_seed( PublicKey.from_bytes(bytes.fromhex(plot_info["pool_pk"])), PrivateKey.from_bytes(bytes.fromhex( plot_info["sk"])).get_public_key(), ) if not os.path.isfile(plot_filename): # Tries relative path full_path: str = os.path.join(plot_root, plot_filename) if not os.path.isfile(full_path): # Tries absolute path full_path: str = plot_filename if not os.path.isfile(full_path): print(f"Plot file {full_path} not found.") continue pr = DiskProver(full_path) else: pr = DiskProver(plot_filename) total_proofs = 0 try: for i in range(args.num): challenge = sha256(i.to_bytes(32, "big")).digest() for index, quality in enumerate( pr.get_qualities_for_challenge(challenge)): proof = pr.get_full_proof(challenge, index) total_proofs += 1 ver_quality = v.validate_proof(plot_seed, pr.get_size(), challenge, proof) assert quality == ver_quality except BaseException as e: print( f"{type(e)}: {e} error in proving/verifying for plot {plot_filename}" ) print( f"{plot_filename}: Proofs {total_proofs} / {args.num}, {round(total_proofs/float(args.num), 4)}" ) else: print(f"Not plot file found at {plot_config_filename}")
def main(): """ Script for checking all plots in the plots.yaml file. Specify a number of challenge to test for each plot. """ parser = argparse.ArgumentParser( description="Exodus plot checking script.") parser.add_argument("-n", "--num", help="Number of challenges", type=int, default=100) args = parser.parse_args() root_path = DEFAULT_ROOT_PATH plot_config = load_config(root_path, plot_config_filename) config = load_config(root_path, config_filename) initialize_logging("%(name)-22s", {"log_stdout": True}, root_path) log = logging.getLogger(__name__) v = Verifier() log.info("Loading plots in plots.yaml using harvester loading code\n") provers, _, _ = load_plots(config["harvester"], plot_config, None, root_path) log.info( f"\n\nStarting to test each plot with {args.num} challenges each\n") for plot_path, pr in provers.items(): total_proofs = 0 try: for i in range(args.num): challenge = std_hash(i.to_bytes(32, "big")) for index, quality_str in enumerate( pr.get_qualities_for_challenge(challenge)): proof = pr.get_full_proof(challenge, index) total_proofs += 1 ver_quality_str = v.validate_proof(pr.get_id(), pr.get_size(), challenge, proof) assert quality_str == ver_quality_str except BaseException as e: if isinstance(e, KeyboardInterrupt): log.warning("Interrupted, closing") return log.error( f"{type(e)}: {e} error in proving/verifying for plot {plot_path}" ) if total_proofs > 0: log.info( f"{plot_path}: Proofs {total_proofs} / {args.num}, {round(total_proofs/float(args.num), 4)}" ) else: log.error( f"{plot_path}: Proofs {total_proofs} / {args.num}, {round(total_proofs/float(args.num), 4)}" )
def check_plots(args, root_path): config = load_config(root_path, "config.yaml") if args.num is not None: num = args.num else: num = 20 if args.grep_string is not None: match_str = args.grep_string else: match_str = None v = Verifier() log.info("Loading plots in config.yaml using plot_tools loading code\n") kc: Keychain = Keychain() pks = [ master_sk_to_farmer_sk(sk).get_g1() for sk, _ in kc.get_all_private_keys() ] pool_public_keys = [ G1Element.from_bytes(bytes.fromhex(pk)) for pk in config["farmer"]["pool_public_keys"] ] _, provers, failed_to_open_filenames, no_key_filenames = load_plots( {}, {}, pks, pool_public_keys, match_str, root_path, open_no_key_filenames=True, ) if len(provers) > 0: log.info("") log.info("") log.info(f"Starting to test each plot with {num} challenges each\n") total_good_plots: Counter = Counter() total_bad_plots = 0 total_size = 0 for plot_path, plot_info in provers.items(): pr = plot_info.prover log.info(f"Testing plot {plot_path} k={pr.get_size()}") log.info(f"\tPool public key: {plot_info.pool_public_key}") log.info(f"\tFarmer public key: {plot_info.farmer_public_key}") log.info(f"\tLocal sk: {plot_info.local_sk}") total_proofs = 0 try: for i in range(num): challenge = std_hash(i.to_bytes(32, "big")) for index, quality_str in enumerate( pr.get_qualities_for_challenge(challenge)): proof = pr.get_full_proof(challenge, index) total_proofs += 1 ver_quality_str = v.validate_proof(pr.get_id(), pr.get_size(), challenge, proof) assert quality_str == ver_quality_str except BaseException as e: if isinstance(e, KeyboardInterrupt): log.warning("Interrupted, closing") return log.error( f"{type(e)}: {e} error in proving/verifying for plot {plot_path}" ) if total_proofs > 0: log.info( f"\tProofs {total_proofs} / {num}, {round(total_proofs/float(num), 4)}" ) total_good_plots[pr.get_size()] += 1 total_size += plot_path.stat().st_size else: total_bad_plots += 1 log.error( f"\tProofs {total_proofs} / {num}, {round(total_proofs/float(num), 4)}" ) log.info("") log.info("") log.info("Summary") total_plots: int = sum(list(total_good_plots.values())) log.info( f"Found {total_plots} valid plots, total size {total_size / (1024 * 1024 * 1024 * 1024):.5f} TiB" ) for (k, count) in sorted(dict(total_good_plots).items()): log.info(f"{count} plots of size {k}") grand_total_bad = total_bad_plots + len(failed_to_open_filenames) if grand_total_bad > 0: log.warning(f"{grand_total_bad} invalid plots") if len(no_key_filenames) > 0: log.warning( f"There are {len(no_key_filenames)} plots with a farmer or pool public key that " f"is not on this machine. The farmer private key must be in the keychain in order to " f"farm them, use 'chia keys' to transfer keys. The pool public keys must be in the config.yaml" )
def check_plots(root_path, num, challenge_start, grep_string, list_duplicates, debug_show_memo): config = load_config(root_path, "config.yaml") if num is not None: if num == 0: log.warning("Not opening plot files") else: if num < 5: log.warning( f"{num} challenges is too low, setting it to the minimum of 5" ) num = 5 if num < 30: log.warning( "Use 30 challenges (our default) for balance of speed and accurate results" ) else: num = 30 if challenge_start is not None: num_start = challenge_start num_end = num_start + num else: num_start = 0 num_end = num challenges = num_end - num_start if grep_string is not None: match_str = grep_string else: match_str = None if list_duplicates: log.warning("Checking for duplicate Plot IDs") log.info("Plot filenames expected to end with -[64 char plot ID].plot") show_memo: bool = debug_show_memo if list_duplicates: plot_filenames: Dict[Path, List[Path]] = get_plot_filenames( config["harvester"]) all_filenames: List[Path] = [] for paths in plot_filenames.values(): all_filenames += paths find_duplicate_plot_IDs(all_filenames) if num == 0: return None v = Verifier() log.info("Loading plots in config.yaml using plot_tools loading code\n") kc: Keychain = Keychain() pks = [ master_sk_to_farmer_sk(sk).get_g1() for sk, _ in kc.get_all_private_keys() ] pool_public_keys = [ G1Element.from_bytes(bytes.fromhex(pk)) for pk in config["farmer"]["pool_public_keys"] ] _, provers, failed_to_open_filenames, no_key_filenames = load_plots( {}, {}, pks, pool_public_keys, match_str, show_memo, root_path, open_no_key_filenames=True, ) if len(provers) > 0: log.info("") log.info("") log.info(f"Starting to test each plot with {num} challenges each\n") total_good_plots: Counter = Counter() total_bad_plots = 0 total_size = 0 bad_plots_list: List[Path] = [] for plot_path, plot_info in provers.items(): pr = plot_info.prover log.info(f"Testing plot {plot_path} k={pr.get_size()}") log.info(f"\tPool public key: {plot_info.pool_public_key}") # Look up local_sk from plot to save locked memory ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(pr.get_memo()) local_sk = master_sk_to_local_sk(local_master_sk) log.info(f"\tFarmer public key: {farmer_public_key}") log.info(f"\tLocal sk: {local_sk}") total_proofs = 0 caught_exception: bool = False for i in range(num_start, num_end): challenge = std_hash(i.to_bytes(32, "big")) # Some plot errors cause get_qualities_for_challenge to throw a RuntimeError try: for index, quality_str in enumerate( pr.get_qualities_for_challenge(challenge)): # Other plot errors cause get_full_proof or validate_proof to throw an AssertionError try: proof = pr.get_full_proof(challenge, index) total_proofs += 1 ver_quality_str = v.validate_proof( pr.get_id(), pr.get_size(), challenge, proof) assert quality_str == ver_quality_str except AssertionError as e: log.error( f"{type(e)}: {e} error in proving/verifying for plot {plot_path}" ) caught_exception = True except KeyboardInterrupt: log.warning("Interrupted, closing") return None except SystemExit: log.warning("System is shutting down.") return None except Exception as e: log.error( f"{type(e)}: {e} error in getting challenge qualities for plot {plot_path}" ) caught_exception = True if caught_exception is True: break if total_proofs > 0 and caught_exception is False: log.info( f"\tProofs {total_proofs} / {challenges}, {round(total_proofs/float(challenges), 4)}" ) total_good_plots[pr.get_size()] += 1 total_size += plot_path.stat().st_size else: total_bad_plots += 1 log.error( f"\tProofs {total_proofs} / {challenges}, {round(total_proofs/float(challenges), 4)}" ) bad_plots_list.append(plot_path) log.info("") log.info("") log.info("Summary") total_plots: int = sum(list(total_good_plots.values())) log.info( f"Found {total_plots} valid plots, total size {total_size / (1024 * 1024 * 1024 * 1024):.5f} TiB" ) for (k, count) in sorted(dict(total_good_plots).items()): log.info(f"{count} plots of size {k}") grand_total_bad = total_bad_plots + len(failed_to_open_filenames) if grand_total_bad > 0: log.warning(f"{grand_total_bad} invalid plots found:") for bad_plot_path in bad_plots_list: log.warning(f"{bad_plot_path}") if len(no_key_filenames) > 0: log.warning( f"There are {len(no_key_filenames)} plots with a farmer or pool public key that " f"is not on this machine. The farmer private key must be in the keychain in order to " f"farm them, use 'chia keys' to transfer keys. The pool public keys must be in the config.yaml" )
def test_k_21(self): challenge: bytes = bytes([i for i in range(0, 32)]) plot_seed: bytes = bytes( [ 5, 104, 52, 4, 51, 55, 23, 84, 91, 10, 111, 12, 13, 222, 151, 16, 228, 211, 254, 45, 92, 198, 204, 10, 9, 10, 11, 129, 139, 171, 15, 23, ] ) pl = DiskPlotter() pl.create_plot_disk( ".", ".", ".", "myplot.dat", 21, bytes([1, 2, 3, 4, 5]), plot_seed, 300, 32, 8192, 8 ) pl = None pr = DiskProver(str(Path("myplot.dat"))) total_proofs: int = 0 iterations: int = 5000 v = Verifier() for i in range(iterations): if i % 100 == 0: print(i) challenge = sha256(i.to_bytes(4, "big")).digest() for index, quality in enumerate(pr.get_qualities_for_challenge(challenge)): proof = pr.get_full_proof(challenge, index) assert len(proof) == 8 * pr.get_size() computed_quality = v.validate_proof( plot_seed, pr.get_size(), challenge, proof ) assert computed_quality == quality total_proofs += 1 print( f"total proofs {total_proofs} out of {iterations}\ {total_proofs / iterations}" ) assert total_proofs > 4000 assert total_proofs < 6000 pr = None sha256_plot_hash = sha256() with open("myplot.dat", "rb") as f: # Read and update hash string value in blocks of 4K for byte_block in iter(lambda: f.read(4096), b""): sha256_plot_hash.update(byte_block) plot_hash = str(sha256_plot_hash.hexdigest()) assert ( plot_hash == "80e32f560f3a4347760d6baae8d16fbaf484948088bff05c51bdcc24b7bc40d9" ) print(f"\nPlotfile asserted sha256: {plot_hash}\n") Path("myplot.dat").unlink()
def check_plots(root_path, num, challenge_start, grep_string, list_duplicates, debug_show_memo): config = load_config(root_path, "config.yaml") plot_refresh_parameter: PlotsRefreshParameter = PlotsRefreshParameter(100, 100, 1) plot_manager: PlotManager = PlotManager( root_path, match_str=grep_string, show_memo=debug_show_memo, open_no_key_filenames=True, refresh_parameter=plot_refresh_parameter, refresh_callback=plot_refresh_callback, ) if num is not None: if num == 0: log.warning("Not opening plot files") else: if num < 5: log.warning(f"{num} challenges is too low, setting it to the minimum of 5") num = 5 if num < 30: log.warning("Use 30 challenges (our default) for balance of speed and accurate results") else: num = 30 if challenge_start is not None: num_start = challenge_start num_end = num_start + num else: num_start = 0 num_end = num challenges = num_end - num_start if list_duplicates: log.warning("Checking for duplicate Plot IDs") log.info("Plot filenames expected to end with -[64 char plot ID].plot") if list_duplicates: all_filenames: List[Path] = [] for paths in get_plot_filenames(root_path).values(): all_filenames += paths find_duplicate_plot_IDs(all_filenames) if num == 0: return None parallel_read: bool = config["harvester"].get("parallel_read", True) v = Verifier() log.info(f"Loading plots in config.yaml using plot_manager loading code (parallel read: {parallel_read})\n") # Prompts interactively if the keyring is protected by a master passphrase. To use the daemon # for keychain access, KeychainProxy/connect_to_keychain should be used instead of Keychain. kc: Keychain = Keychain() plot_manager.set_public_keys( [master_sk_to_farmer_sk(sk).get_g1() for sk, _ in kc.get_all_private_keys()], [G1Element.from_bytes(bytes.fromhex(pk)) for pk in config["farmer"]["pool_public_keys"]], ) plot_manager.start_refreshing() while plot_manager.needs_refresh(): sleep(1) plot_manager.stop_refreshing() if plot_manager.plot_count() > 0: log.info("") log.info("") log.info(f"Starting to test each plot with {num} challenges each\n") total_good_plots: Counter = Counter() total_bad_plots = 0 total_size = 0 bad_plots_list: List[Path] = [] with plot_manager: for plot_path, plot_info in plot_manager.plots.items(): pr = plot_info.prover log.info(f"Testing plot {plot_path} k={pr.get_size()}") log.info(f"\tPool public key: {plot_info.pool_public_key}") # Look up local_sk from plot to save locked memory ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(pr.get_memo()) local_sk = master_sk_to_local_sk(local_master_sk) log.info(f"\tFarmer public key: {farmer_public_key}") log.info(f"\tLocal sk: {local_sk}") total_proofs = 0 caught_exception: bool = False for i in range(num_start, num_end): challenge = std_hash(i.to_bytes(32, "big")) # Some plot errors cause get_qualities_for_challenge to throw a RuntimeError try: quality_start_time = int(round(time() * 1000)) for index, quality_str in enumerate(pr.get_qualities_for_challenge(challenge)): quality_spent_time = int(round(time() * 1000)) - quality_start_time if quality_spent_time > 5000: log.warning( f"\tLooking up qualities took: {quality_spent_time} ms. This should be below 5 seconds " f"to minimize risk of losing rewards." ) else: log.info(f"\tLooking up qualities took: {quality_spent_time} ms.") # Other plot errors cause get_full_proof or validate_proof to throw an AssertionError try: proof_start_time = int(round(time() * 1000)) proof = pr.get_full_proof(challenge, index, parallel_read) proof_spent_time = int(round(time() * 1000)) - proof_start_time if proof_spent_time > 15000: log.warning( f"\tFinding proof took: {proof_spent_time} ms. This should be below 15 seconds " f"to minimize risk of losing rewards." ) else: log.info(f"\tFinding proof took: {proof_spent_time} ms") total_proofs += 1 ver_quality_str = v.validate_proof(pr.get_id(), pr.get_size(), challenge, proof) assert quality_str == ver_quality_str except AssertionError as e: log.error(f"{type(e)}: {e} error in proving/verifying for plot {plot_path}") caught_exception = True quality_start_time = int(round(time() * 1000)) except KeyboardInterrupt: log.warning("Interrupted, closing") return None except SystemExit: log.warning("System is shutting down.") return None except Exception as e: log.error(f"{type(e)}: {e} error in getting challenge qualities for plot {plot_path}") caught_exception = True if caught_exception is True: break if total_proofs > 0 and caught_exception is False: log.info(f"\tProofs {total_proofs} / {challenges}, {round(total_proofs/float(challenges), 4)}") total_good_plots[pr.get_size()] += 1 total_size += plot_path.stat().st_size else: total_bad_plots += 1 log.error(f"\tProofs {total_proofs} / {challenges}, {round(total_proofs/float(challenges), 4)}") bad_plots_list.append(plot_path) log.info("") log.info("") log.info("Summary") total_plots: int = sum(list(total_good_plots.values())) log.info(f"Found {total_plots} valid plots, total size {total_size / (1024 * 1024 * 1024 * 1024):.5f} TiB") for (k, count) in sorted(dict(total_good_plots).items()): log.info(f"{count} plots of size {k}") grand_total_bad = total_bad_plots + len(plot_manager.failed_to_open_filenames) if grand_total_bad > 0: log.warning(f"{grand_total_bad} invalid plots found:") for bad_plot_path in bad_plots_list: log.warning(f"{bad_plot_path}") if len(plot_manager.no_key_filenames) > 0: log.warning( f"There are {len(plot_manager.no_key_filenames)} plots with a farmer or pool public key that " f"is not on this machine. The farmer private key must be in the keychain in order to " f"farm them, use 'chia keys' to transfer keys. The pool public keys must be in the config.yaml" )
import secrets import os challenge: bytes = bytes([i for i in range(0, 32)]) plot_id: bytes = bytes([ 5, 104, 52, 4, 51, 55, 23, 84, 91, 10, 111, 12, 13, 222, 151, 16, 228, 211, 254, 45, 92, 198, 204, 10, 9, 10, 11, 129, 139, 171, 15, 23 ]) filename = "./myplot.dat" pl = DiskPlotter() pl.create_plot_disk(filename, 21, bytes([1, 2, 3, 4, 5]), plot_id) pr = DiskProver(filename) total_proofs: int = 0 iterations: int = 5000 v = Verifier() for i in range(iterations): challenge = sha256(i.to_bytes(4, "big")).digest() for index, quality in enumerate(pr.get_qualities_for_challenge(challenge)): proof = pr.get_full_proof(challenge, index) total_proofs += 1 ver_quality = v.validate_proof(plot_id, 21, challenge, proof) assert (quality == ver_quality) os.remove(filename) print(f"total proofs {total_proofs} out of {iterations}\ {total_proofs / iterations}")