def main(): """ Script for checking all plots in the plots.yaml file. Specify a number of challenge to test for each plot. """ parser = argparse.ArgumentParser(description="Chia plot checking script.") parser.add_argument("-n", "--num", help="Number of challenges", type=int, default=1000) args = parser.parse_args() v = Verifier() if os.path.isfile(plot_config_filename): plot_config = safe_load(open(plot_config_filename, "r")) for plot_filename, plot_info in plot_config["plots"].items(): plot_seed: bytes32 = ProofOfSpace.calculate_plot_seed( PublicKey.from_bytes(bytes.fromhex(plot_info["pool_pk"])), PrivateKey.from_bytes(bytes.fromhex( plot_info["sk"])).get_public_key(), ) if not os.path.isfile(plot_filename): # Tries relative path full_path: str = os.path.join(plot_root, plot_filename) if not os.path.isfile(full_path): # Tries absolute path full_path: str = plot_filename if not os.path.isfile(full_path): print(f"Plot file {full_path} not found.") continue pr = DiskProver(full_path) else: pr = DiskProver(plot_filename) total_proofs = 0 try: for i in range(args.num): challenge = sha256(i.to_bytes(32, "big")).digest() for index, quality in enumerate( pr.get_qualities_for_challenge(challenge)): proof = pr.get_full_proof(challenge, index) total_proofs += 1 ver_quality = v.validate_proof(plot_seed, pr.get_size(), challenge, proof) assert quality == ver_quality except BaseException as e: print( f"{type(e)}: {e} error in proving/verifying for plot {plot_filename}" ) print( f"{plot_filename}: Proofs {total_proofs} / {args.num}, {round(total_proofs/float(args.num), 4)}" ) else: print(f"Not plot file found at {plot_config_filename}")
def test_k_21(self): challenge: bytes = bytes([i for i in range(0, 32)]) plot_seed: bytes = bytes([5, 104, 52, 4, 51, 55, 23, 84, 91, 10, 111, 12, 13, 222, 151, 16, 228, 211, 254, 45, 92, 198, 204, 10, 9, 10, 11, 129, 139, 171, 15, 23]) pl = DiskPlotter() pl.create_plot_disk(".", ".", ".", "myplot.dat", 21, bytes([1, 2, 3, 4, 5]), plot_seed, 2*1024) pl = None pr = DiskProver(str(Path("myplot.dat"))) total_proofs: int = 0 iterations: int = 5000 v = Verifier() for i in range(iterations): if i % 100 == 0: print(i) challenge = sha256(i.to_bytes(4, "big")).digest() for index, quality in enumerate(pr.get_qualities_for_challenge(challenge)): proof = pr.get_full_proof(challenge, index) assert len(proof) == 8*pr.get_size() computed_quality = v.validate_proof(plot_seed, pr.get_size(), challenge, proof) assert computed_quality == quality total_proofs += 1 print(f"total proofs {total_proofs} out of {iterations}\ {total_proofs / iterations}") assert total_proofs == 4647 pr = None Path("myplot.dat").unlink()
async def harvester_handshake( self, harvester_handshake: harvester_protocol.HarvesterHandshake): """ Handshake between the harvester and farmer. The harvester receives the pool public keys, which must be put into the plots, before the plotting process begins. We cannot use any plots which don't have one of the pool keys. """ for partial_filename, plot_config in self.plot_config["plots"].items(): if "plot_root" in self.config: filename = os.path.join(self.config["plot_root"], partial_filename) else: filename = os.path.join(ROOT_DIR, "plots", partial_filename) pool_pubkey = PublicKey.from_bytes( bytes.fromhex(plot_config["pool_pk"])) # Only use plots that correct pools associated with them if pool_pubkey in harvester_handshake.pool_pubkeys: if os.path.isfile(filename): self.provers[partial_filename] = DiskProver(filename) else: log.warn(f"Plot at {filename} does not exist.") else: log.warning( f"Plot {filename} has a pool key that is not in the farmer's pool_pk list." )
def _add_plot( self, str_path: str, plot_sk: PrivateKey, pool_pk: Optional[PublicKey] ) -> bool: plot_config = load_config(self.root_path, "plots.yaml") if pool_pk is None: for pool_pk_cand in self.pool_pubkeys: pr = DiskProver(str_path) if ( ProofOfSpace.calculate_plot_seed( pool_pk_cand, plot_sk.get_public_key() ) == pr.get_id() ): pool_pk = pool_pk_cand break if pool_pk is None: return False plot_config["plots"][str_path] = { "sk": bytes(plot_sk).hex(), "pool_pk": bytes(pool_pk).hex(), } save_config(self.root_path, "plots.yaml", plot_config) self._refresh_plots() return True
def test_faulty_plot_doesnt_crash(self): if Path("myplot.dat").exists(): Path("myplot.dat").unlink() if Path("myplotbad.dat").exists(): Path("myplotbad.dat").unlink() plot_id: bytes = bytes([i for i in range(32, 64)]) pl = DiskPlotter() pl.create_plot_disk( ".", ".", ".", "myplot.dat", 21, bytes([1, 2, 3, 4, 5]), plot_id, 300, 32, 8192, 8, False, ) f = open("myplot.dat", "rb") all_data = bytearray(f.read()) f.close() assert len(all_data) > 20000000 all_data_bad = all_data[:20000000] + bytearray( token_bytes(10000)) + all_data[20100000:] f_bad = open("myplotbad.dat", "wb") f_bad.write(all_data_bad) f_bad.close() pr = DiskProver(str(Path("myplotbad.dat"))) iterations: int = 50000 v = Verifier() successes = 0 failures = 0 for i in range(iterations): if i % 100 == 0: print(i) challenge = sha256(i.to_bytes(4, "big")).digest() try: for index, quality in enumerate( pr.get_qualities_for_challenge(challenge)): proof = pr.get_full_proof(challenge, index) computed_quality = v.validate_proof( plot_id, pr.get_size(), challenge, proof) if computed_quality == quality: successes += 1 else: print("Did not validate") failures += 1 except Exception as e: print(f"Exception: {e}") failures += 1 print(f"Successes: {successes}") print(f"Failures: {failures}")
async def request_proof_of_space( self, request: harvester_protocol.RequestProofOfSpace): """ The farmer requests a proof of space, for one of the plots. We look up the correct plot based on the plot id and response number, lookup the proof, and return it. """ response: Optional[harvester_protocol.RespondProofOfSpace] = None challenge_hash = request.challenge_hash filename = Path(request.plot_id).resolve() index = request.response_number proof_xs: bytes plot_info = self.provers[filename] try: try: proof_xs = plot_info.prover.get_full_proof( challenge_hash, index) except RuntimeError: prover = DiskProver(str(filename)) self.provers[filename] = PlotInfo( prover, plot_info.pool_public_key, plot_info.farmer_public_key, plot_info.plot_public_key, plot_info.local_sk, plot_info.file_size, plot_info.time_modified, ) proof_xs = self.provers[filename].prover.get_full_proof( challenge_hash, index) except KeyError: log.warning(f"KeyError plot {filename} does not exist.") plot_info = self.provers[filename] plot_public_key = ProofOfSpace.generate_plot_public_key( plot_info.local_sk.get_g1(), plot_info.farmer_public_key) proof_of_space: ProofOfSpace = ProofOfSpace( challenge_hash, plot_info.pool_public_key, plot_public_key, uint8(self.provers[filename].prover.get_size()), proof_xs, ) response = harvester_protocol.RespondProofOfSpace( request.plot_id, request.response_number, proof_of_space, ) if response: yield OutboundMessage( NodeType.FARMER, Message("respond_proof_of_space", response), Delivery.RESPOND, )
async def request_proof_of_space( self, request: harvester_protocol.RequestProofOfSpace ): """ The farmer requests a signature on the header hash, for one of the proofs that we found. We look up the correct plot based on the quality, lookup the proof, and return it. """ response: Optional[harvester_protocol.RespondProofOfSpace] = None try: # Using the quality string, find the right plot and index from our solutions challenge_hash, filename, index = self.challenge_hashes[ request.quality_string ] except KeyError: log.warning(f"Quality string {request.quality_string} not found") return if index is not None: proof_xs: bytes try: try: proof_xs = self.provers[filename].get_full_proof( challenge_hash, index ) except RuntimeError: self.provers[filename] = DiskProver(str(filename)) proof_xs = self.provers[filename].get_full_proof( challenge_hash, index ) except KeyError: log.warning(f"KeyError plot {filename} does not exist.") pool_pubkey = PublicKey.from_bytes( bytes.fromhex(self.plot_config["plots"][filename]["pool_pk"]) ) plot_pubkey = PrivateKey.from_bytes( bytes.fromhex(self.plot_config["plots"][filename]["sk"]) ).get_public_key() proof_of_space: ProofOfSpace = ProofOfSpace( challenge_hash, pool_pubkey, plot_pubkey, uint8(self.provers[filename].get_size()), proof_xs, ) response = harvester_protocol.RespondProofOfSpace( request.quality_string, proof_of_space ) if response: yield OutboundMessage( NodeType.FARMER, Message("respond_proof_of_space", response), Delivery.RESPOND, )
async def new_challenge(self, new_challenge: harvester_protocol.NewChallenge): """ The harvester receives a new challenge from the farmer, and looks up the quality for any proofs of space that are are found in the plots. If proofs are found, a ChallengeResponse message is sent for each of the proofs found. """ challenge_size = len(new_challenge.challenge_hash) if challenge_size != 32: raise ValueError( f"Invalid challenge size {challenge_size}, 32 was expected") all_responses = [] for filename, prover in self.provers.items(): try: quality_strings = prover.get_qualities_for_challenge( new_challenge.challenge_hash) except RuntimeError: log.error( f"Error using prover object on {filename}. Reinitializing prover object." ) quality_strings = None try: self.provers[filename] = DiskProver(filename) quality_strings = prover.get_qualities_for_challenge( new_challenge.challenge_hash) except RuntimeError: log.error( f"Retry-Error using prover object on {filename}. Giving up." ) quality_strings = None if quality_strings is not None: for index, quality_str in enumerate(quality_strings): quality = ProofOfSpace.quality_str_to_quality( new_challenge.challenge_hash, quality_str) self.challenge_hashes[quality] = ( new_challenge.challenge_hash, filename, uint8(index), ) response: harvester_protocol.ChallengeResponse = harvester_protocol.ChallengeResponse( new_challenge.challenge_hash, quality, prover.get_size()) all_responses.append(response) for response in all_responses: yield OutboundMessage( NodeType.FARMER, Message("challenge_response", response), Delivery.RESPOND, )
def load_plots( config_file: Dict, plot_config_file: Dict, pool_pubkeys: Optional[List[PublicKey]], root_path: Path, ) -> Tuple[Dict[str, DiskProver], List[str], List[str]]: provers: Dict[str, DiskProver] = {} failed_to_open_filenames: List[str] = [] not_found_filenames: List[str] = [] for partial_filename_str, plot_config in plot_config_file["plots"].items(): plot_root = path_from_root(root_path, config_file.get("plot_root", ".")) partial_filename = plot_root / partial_filename_str potential_filenames = [ partial_filename, path_from_root(plot_root, partial_filename_str), ] pool_pubkey = PublicKey.from_bytes( bytes.fromhex(plot_config["pool_pk"])) # Only use plots that correct pools associated with them if pool_pubkeys is not None and pool_pubkey not in pool_pubkeys: log.warning( f"Plot {partial_filename} has a pool key that is not in the farmer's pool_pk list." ) continue found = False failed_to_open = False for filename in potential_filenames: if filename.exists(): try: provers[partial_filename_str] = DiskProver(str(filename)) except Exception as e: log.error(f"Failed to open file {filename}. {e}") failed_to_open = True failed_to_open_filenames.append(partial_filename_str) break log.info( f"Loaded plot {filename} of size {provers[partial_filename_str].get_size()}" ) found = True break if not found and not failed_to_open: log.warning(f"Plot at {potential_filenames} does not exist.") not_found_filenames.append(partial_filename_str) return (provers, failed_to_open_filenames, not_found_filenames)
async def harvester_handshake( self, harvester_handshake: harvester_protocol.HarvesterHandshake): """ Handshake between the harvester and farmer. The harvester receives the pool public keys, which must be put into the plots, before the plotting process begins. We cannot use any plots which don't have one of the pool keys. """ for partial_filename_str, plot_config in self.plot_config[ "plots"].items(): plot_root = path_from_root(DEFAULT_ROOT_PATH, self.config.get("plot_root", ".")) partial_filename = plot_root / partial_filename_str potential_filenames = [ partial_filename, path_from_root(plot_root, partial_filename_str), ] pool_pubkey = PublicKey.from_bytes( bytes.fromhex(plot_config["pool_pk"])) # Only use plots that correct pools associated with them if pool_pubkey not in harvester_handshake.pool_pubkeys: log.warning( f"Plot {partial_filename} has a pool key that is not in the farmer's pool_pk list." ) continue found = False failed_to_open = False for filename in potential_filenames: if filename.exists(): try: self.provers[partial_filename_str] = DiskProver( str(filename)) except ValueError: log.error(f"Failed to open file {filename}.") failed_to_open = True break log.info( f"Farming plot {filename} of size {self.provers[partial_filename_str].get_size()}" ) found = True break if not found and not failed_to_open: log.warning(f"Plot at {potential_filenames} does not exist.")
def blocking_lookup(filename: Path, prover: DiskProver) -> Optional[List]: # Uses the DiskProver object to lookup qualities. This is a blocking call, # so it should be run in a threadpool. try: quality_strings = prover.get_qualities_for_challenge( new_challenge.challenge_hash ) except RuntimeError: log.error("Error using prover object. Reinitializing prover object.") try: self.provers[filename] = DiskProver(str(filename)) quality_strings = prover.get_qualities_for_challenge( new_challenge.challenge_hash ) except RuntimeError: log.error( f"Retry-Error using prover object on {filename}. Giving up." ) quality_strings = None return quality_strings
def load_plots(config_file: Dict, plot_config_file: Dict, pool_pubkeys: List[PublicKey]) -> Dict[Path, DiskProver]: provers: Dict[Path, DiskProver] = {} for partial_filename_str, plot_config in plot_config_file["plots"].items(): plot_root = path_from_root(DEFAULT_ROOT_PATH, config_file.get("plot_root", ".")) partial_filename = plot_root / partial_filename_str potential_filenames = [ partial_filename, path_from_root(plot_root, partial_filename_str), ] pool_pubkey = PublicKey.from_bytes( bytes.fromhex(plot_config["pool_pk"])) # Only use plots that correct pools associated with them if pool_pubkey not in pool_pubkeys: log.warning( f"Plot {partial_filename} has a pool key that is not in the farmer's pool_pk list." ) continue found = False failed_to_open = False for filename in potential_filenames: if filename.exists(): try: provers[partial_filename_str] = DiskProver(str(filename)) except ValueError as e: log.error(f"Failed to open file {filename}. {e}") failed_to_open = True break log.info( f"Loaded plot {filename} of size {provers[partial_filename_str].get_size()}" ) found = True break if not found and not failed_to_open: log.warning(f"Plot at {potential_filenames} does not exist.") return provers
async def harvester_handshake( self, harvester_handshake: harvester_protocol.HarvesterHandshake ): """ Handshake between the harvester and farmer. The harvester receives the pool public keys, which must be put into the plots, before the plotting process begins. We cannot use any plots which don't have one of the pool keys. """ for partial_filename, plot_config in self.plot_config["plots"].items(): potential_filenames = [partial_filename] if "plot_root" in self.config: potential_filenames.append( os.path.join(self.config["plot_root"], partial_filename) ) else: potential_filenames.append( os.path.join(ROOT_DIR, "plots", partial_filename) ) pool_pubkey = PublicKey.from_bytes(bytes.fromhex(plot_config["pool_pk"])) # Only use plots that correct pools associated with them if pool_pubkey not in harvester_handshake.pool_pubkeys: log.warning( f"Plot {partial_filename} has a pool key that is not in the farmer's pool_pk list." ) continue found = False for filename in potential_filenames: if os.path.isfile(filename): self.provers[partial_filename] = DiskProver(filename) log.info( f"Farming plot {filename} of size {self.provers[partial_filename].get_size()}" ) found = True break if not found: log.warning(f"Plot at {potential_filenames} does not exist.")
def _create_block( self, test_constants: Dict, challenge_hash: bytes32, height: uint32, prev_header_hash: bytes32, prev_iters: uint64, prev_weight: uint64, timestamp: uint64, difficulty: uint64, ips: uint64, seed: bytes, ) -> FullBlock: """ Creates a block with the specified details. Uses the stored plots to create a proof of space, and also evaluates the VDF for the proof of time. """ prover = None plot_pk = None plot_sk = None qualities: List[bytes] = [] for pn in range(num_plots): # Allow passing in seed, to create reorgs and different chains seeded_pn = (pn + 17 * int.from_bytes(seed, "big")) % num_plots filename = self.filenames[seeded_pn] plot_pk = plot_pks[seeded_pn] plot_sk = plot_sks[seeded_pn] prover = DiskProver(os.path.join(self.plot_dir, filename)) qualities = prover.get_qualities_for_challenge(challenge_hash) if len(qualities) > 0: break assert prover assert plot_pk assert plot_sk if len(qualities) == 0: raise NoProofsOfSpaceFound("No proofs for this challenge") proof_xs: bytes = prover.get_full_proof(challenge_hash, 0) proof_of_space: ProofOfSpace = ProofOfSpace( challenge_hash, pool_pk, plot_pk, k, [uint8(b) for b in proof_xs]) number_iters: uint64 = pot_iterations.calculate_iterations( proof_of_space, difficulty, ips, test_constants["MIN_BLOCK_TIME"]) disc: int = create_discriminant( challenge_hash, test_constants["DISCRIMINANT_SIZE_BITS"]) start_x: ClassGroup = ClassGroup.from_ab_discriminant(2, 1, disc) y_cl, proof_bytes = create_proof_of_time_nwesolowski( disc, start_x, number_iters, disc, n_wesolowski) output = ClassgroupElement(y_cl[0], y_cl[1]) proof_of_time = ProofOfTime( challenge_hash, number_iters, output, n_wesolowski, [uint8(b) for b in proof_bytes], ) coinbase: CoinbaseInfo = CoinbaseInfo( height, block_rewards.calculate_block_reward(uint32(height)), coinbase_target, ) coinbase_sig: PrependSignature = pool_sk.sign_prepend(bytes(coinbase)) fees_target: FeesTarget = FeesTarget(fee_target, uint64(0)) solutions_generator: bytes32 = sha256(seed).digest() cost = uint64(0) body: Body = Body(coinbase, coinbase_sig, fees_target, None, solutions_generator, cost) header_data: HeaderData = HeaderData( prev_header_hash, timestamp, bytes([0] * 32), proof_of_space.get_hash(), body.get_hash(), bytes([0] * 32), ) header_hash_sig: PrependSignature = plot_sk.sign_prepend( header_data.get_hash()) header: Header = Header(header_data, header_hash_sig) challenge = Challenge( challenge_hash, proof_of_space.get_hash(), proof_of_time.get_hash(), height, uint64(prev_weight + difficulty), uint64(prev_iters + number_iters), ) header_block = HeaderBlock(proof_of_space, proof_of_time, challenge, header) full_block: FullBlock = FullBlock(header_block, body) return full_block
def load_plots( provers: Dict[Path, PlotInfo], failed_to_open_filenames: Dict[Path, int], farmer_public_keys: Optional[List[G1Element]], pool_public_keys: Optional[List[G1Element]], match_str: Optional[str], root_path: Path, open_no_key_filenames=False, ) -> Tuple[bool, Dict[Path, PlotInfo], Dict[Path, int], Set[Path]]: start_time = time.time() config_file = load_config(root_path, "config.yaml", "harvester") changed = False no_key_filenames: Set[Path] = set() log.info(f'Searching directories {config_file["plot_directories"]}') plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(config_file) all_filenames: List[Path] = [] for paths in plot_filenames.values(): all_filenames += paths total_size = 0 new_provers: Dict[Path, PlotInfo] = {} if match_str is not None: log.info( f'Only loading plots that contain "{match_str}" in the file or directory name' ) for filename in all_filenames: filename_str = str(filename) if match_str is not None and match_str not in filename_str: continue if filename.exists(): if filename in failed_to_open_filenames and ( time.time() - failed_to_open_filenames[filename]) < 1200: # Try once every 20 minutes to open the file continue if filename in provers: stat_info = filename.stat() if stat_info.st_mtime == provers[filename].time_modified: total_size += stat_info.st_size new_provers[filename] = provers[filename] continue try: prover = DiskProver(str(filename)) expected_size = _expected_plot_size( prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR / 2.0 stat_info = filename.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size( ) >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {filename}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) continue ( pool_public_key, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if farmer_public_keys is not None and farmer_public_key not in farmer_public_keys: log.warning( f"Plot {filename} has a farmer public key that is not in the farmer's pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: continue if pool_public_keys is not None and pool_public_key not in pool_public_keys: log.warning( f"Plot {filename} has a pool public key that is not in the farmer's pool pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: continue stat_info = filename.stat() local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key) new_provers[filename] = PlotInfo( prover, pool_public_key, farmer_public_key, plot_public_key, local_sk, stat_info.st_size, stat_info.st_mtime, ) total_size += stat_info.st_size changed = True except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {filename}. {e} {tb}") failed_to_open_filenames[filename] = int(time.time()) continue log.info( f"Found plot {filename} of size {new_provers[filename].prover.get_size()}" ) log.info( f"Loaded a total of {len(new_provers)} plots of size {total_size / (1024 ** 4)} TiB, in" f" {time.time()-start_time} seconds") return changed, new_provers, failed_to_open_filenames, no_key_filenames
def process_file(directory: Path, filename: Path) -> Tuple[int, Dict]: new_provers: Dict[Path, PlotInfo] = {} nonlocal changed filename_str = str(filename) if match_str is not None and match_str not in filename_str: return 0, new_provers if filename.exists(): if filename in failed_to_open_filenames and ( time.time() - failed_to_open_filenames[filename]) < 1200: # Try once every 20 minutes to open the file return 0, new_provers if filename in provers: try: stat_info = filename.stat() except Exception as e: log.error(f"Failed to open file {filename}. {e}") return 0, new_provers if stat_info.st_mtime == provers[filename].time_modified: with plot_ids_lock: if provers[filename].prover.get_id() in plot_ids: log.warning( f"Have multiple copies of the plot {filename}, not adding it." ) return 0, new_provers plot_ids.add(provers[filename].prover.get_id()) new_provers[filename] = provers[filename] return stat_info.st_size, new_provers try: prover = DiskProver(str(filename)) expected_size = _expected_plot_size( prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = filename.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size( ) >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {filename}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) return 0, new_provers ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if farmer_public_keys is not None and farmer_public_key not in farmer_public_keys: log.warning( f"Plot {filename} has a farmer public key that is not in the farmer's pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: return 0, new_provers if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash pool_contract_puzzle_hash = None else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_public_key = None pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if (pool_public_keys is not None and pool_public_key is not None and pool_public_key not in pool_public_keys): log.warning( f"Plot {filename} has a pool public key that is not in the farmer's pool pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: return 0, new_provers stat_info = filename.stat() local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key) with plot_ids_lock: if prover.get_id() in plot_ids: log.warning( f"Have multiple copies of the plot {filename}, not adding it." ) return 0, new_provers plot_ids.add(prover.get_id()) new_provers[filename] = PlotInfo( prover, pool_public_key, pool_contract_puzzle_hash, plot_public_key, stat_info.st_size, stat_info.st_mtime, directory, ) changed = True except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {filename}. {e} {tb}") failed_to_open_filenames[filename] = int(time.time()) return 0, new_provers log.info( f"Found plot {filename} of size {new_provers[filename].prover.get_size()}" ) if show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") return stat_info.st_size, new_provers return 0, new_provers
from chiapos import DiskProver, DiskPlotter, Verifier from hashlib import sha256 import secrets import os challenge: bytes = bytes([i for i in range(0, 32)]) plot_id: bytes = bytes([ 5, 104, 52, 4, 51, 55, 23, 84, 91, 10, 111, 12, 13, 222, 151, 16, 228, 211, 254, 45, 92, 198, 204, 10, 9, 10, 11, 129, 139, 171, 15, 23 ]) filename = "./myplot.dat" pl = DiskPlotter() pl.create_plot_disk(filename, 21, bytes([1, 2, 3, 4, 5]), plot_id) pr = DiskProver(filename) total_proofs: int = 0 iterations: int = 5000 v = Verifier() for i in range(iterations): challenge = sha256(i.to_bytes(4, "big")).digest() for index, quality in enumerate(pr.get_qualities_for_challenge(challenge)): proof = pr.get_full_proof(challenge, index) total_proofs += 1 ver_quality = v.validate_proof(plot_id, 21, challenge, proof) assert (quality == ver_quality) os.remove(filename) print(f"total proofs {total_proofs} out of {iterations}\
def blocking_lookup( filename: Path, plot_info: PlotInfo) -> List[Tuple[bytes32, ProofOfSpace]]: # Uses the DiskProver object to lookup qualities. This is a blocking call, # so it should be run in a thread pool. try: sp_challenge_hash = ProofOfSpace.calculate_pos_challenge( plot_info.prover.get_id(), new_challenge.challenge_hash, new_challenge.sp_hash, ) try: quality_strings = plot_info.prover.get_qualities_for_challenge( sp_challenge_hash) except Exception as e: self.harvester.log.error( f"Error using prover object. Reinitializing prover object. {e}" ) try: self.harvester.provers[filename] = dataclasses.replace( plot_info, prover=DiskProver(str(filename))) quality_strings = plot_info.prover.get_qualities_for_challenge( sp_challenge_hash) except Exception as e: self.harvester.log.error( f"Error reinitializing plot {filename}. {e}") return [] responses: List[Tuple[bytes32, ProofOfSpace]] = [] if quality_strings is not None: # Found proofs of space (on average 1 is expected per plot) for index, quality_str in enumerate(quality_strings): required_iters: uint64 = calculate_iterations_quality( quality_str, plot_info.prover.get_size(), new_challenge.difficulty, new_challenge.sp_hash, ) sp_interval_iters = calculate_sp_interval_iters( self.harvester.constants, new_challenge.sub_slot_iters) if required_iters < sp_interval_iters: # Found a very good proof of space! will fetch the whole proof from disk, # then send to farmer try: proof_xs = plot_info.prover.get_full_proof( sp_challenge_hash, index) except RuntimeError: self.harvester.log.error( f"Exception fetching full proof for {filename}" ) continue plot_public_key = ProofOfSpace.generate_plot_public_key( plot_info.local_sk.get_g1(), plot_info.farmer_public_key) responses.append(( quality_str, ProofOfSpace( sp_challenge_hash, plot_info.pool_public_key, None, plot_public_key, uint8(plot_info.prover.get_size()), proof_xs, ), )) return responses except Exception as e: self.harvester.log.error(f"Unknown error: {e}") return []
def test_k_21(self): challenge: bytes = bytes([i for i in range(0, 32)]) plot_seed: bytes = bytes( [ 5, 104, 52, 4, 51, 55, 23, 84, 91, 10, 111, 12, 13, 222, 151, 16, 228, 211, 254, 45, 92, 198, 204, 10, 9, 10, 11, 129, 139, 171, 15, 23, ] ) pl = DiskPlotter() pl.create_plot_disk( ".", ".", ".", "myplot.dat", 21, bytes([1, 2, 3, 4, 5]), plot_seed, 300, 32, 8192, 8 ) pl = None pr = DiskProver(str(Path("myplot.dat"))) total_proofs: int = 0 iterations: int = 5000 v = Verifier() for i in range(iterations): if i % 100 == 0: print(i) challenge = sha256(i.to_bytes(4, "big")).digest() for index, quality in enumerate(pr.get_qualities_for_challenge(challenge)): proof = pr.get_full_proof(challenge, index) assert len(proof) == 8 * pr.get_size() computed_quality = v.validate_proof( plot_seed, pr.get_size(), challenge, proof ) assert computed_quality == quality total_proofs += 1 print( f"total proofs {total_proofs} out of {iterations}\ {total_proofs / iterations}" ) assert total_proofs > 4000 assert total_proofs < 6000 pr = None sha256_plot_hash = sha256() with open("myplot.dat", "rb") as f: # Read and update hash string value in blocks of 4K for byte_block in iter(lambda: f.read(4096), b""): sha256_plot_hash.update(byte_block) plot_hash = str(sha256_plot_hash.hexdigest()) assert ( plot_hash == "80e32f560f3a4347760d6baae8d16fbaf484948088bff05c51bdcc24b7bc40d9" ) print(f"\nPlotfile asserted sha256: {plot_hash}\n") Path("myplot.dat").unlink()
def load_plots( provers: Dict[Path, PlotInfo], failed_to_open_filenames: Set[Path], farmer_public_keys: Optional[List[G1Element]], pool_public_keys: Optional[List[G1Element]], root_path: Path, open_no_key_filenames=False, ) -> Tuple[bool, Dict[Path, PlotInfo], Set[Path], Set[Path]]: config_file = load_config(root_path, "config.yaml", "harvester") changed = False no_key_filenames: Set[Path] = set() log.info(f'Searching directories {config_file["plot_directories"]}') plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(config_file) all_filenames: List[Path] = [] for paths in plot_filenames.values(): all_filenames += paths total_size = 0 for filename in all_filenames: if filename in provers: stat_info = filename.stat() if stat_info.st_mtime == provers[filename].time_modified: total_size += stat_info.st_size continue if filename in failed_to_open_filenames: continue if filename.exists(): try: prover = DiskProver(str(filename)) ( pool_public_key, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if (farmer_public_keys is not None and farmer_public_key not in farmer_public_keys): log.warning( f"Plot {filename} has a farmer public key that is not in the farmer's pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: continue if (pool_public_keys is not None and pool_public_key not in pool_public_keys): log.warning( f"Plot {filename} has a pool public key that is not in the farmer's pool pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: continue stat_info = filename.stat() local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key) provers[filename] = PlotInfo( prover, pool_public_key, farmer_public_key, plot_public_key, local_sk, stat_info.st_size, stat_info.st_mtime, ) total_size += stat_info.st_size changed = True except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {filename}. {e} {tb}") failed_to_open_filenames.add(filename) continue log.info( f"Found plot {filename} of size {provers[filename].prover.get_size()}" ) log.info( f"Loaded a total of {len(provers)} plots of size {total_size / (1024 ** 4)} TB" ) return (changed, provers, failed_to_open_filenames, no_key_filenames)
def _create_block( self, test_constants: Dict, challenge_hash: bytes32, height: uint32, prev_header_hash: bytes32, prev_iters: uint64, prev_weight: uint128, timestamp: uint64, difficulty: uint64, min_iters: uint64, seed: bytes, genesis: bool = False, reward_puzzlehash: bytes32 = None, transactions: Program = None, aggsig: BLSSignature = None, fees: uint64 = uint64(0), ) -> FullBlock: """ Creates a block with the specified details. Uses the stored plots to create a proof of space, and also evaluates the VDF for the proof of time. """ selected_prover = None selected_plot_sk = None selected_pool_sk = None selected_proof_index = 0 plots = list(self.plot_config["plots"].items()) selected_quality: Optional[bytes] = None best_quality = 0 if self.use_any_pos: for i in range(len(plots) * 3): # Allow passing in seed, to create reorgs and different chains random.seed(seed + i.to_bytes(4, "big")) seeded_pn = random.randint(0, len(plots) - 1) pool_sk = PrivateKey.from_bytes( bytes.fromhex(plots[seeded_pn][1]["pool_sk"]) ) plot_sk = PrivateKey.from_bytes( bytes.fromhex(plots[seeded_pn][1]["sk"]) ) prover = DiskProver(plots[seeded_pn][0]) qualities = prover.get_qualities_for_challenge(challenge_hash) if len(qualities) > 0: if self.use_any_pos: selected_quality = qualities[0] selected_prover = prover selected_pool_sk = pool_sk selected_plot_sk = plot_sk break else: for i in range(len(plots)): pool_sk = PrivateKey.from_bytes(bytes.fromhex(plots[i][1]["pool_sk"])) plot_sk = PrivateKey.from_bytes(bytes.fromhex(plots[i][1]["sk"])) prover = DiskProver(plots[i][0]) qualities = prover.get_qualities_for_challenge(challenge_hash) j = 0 for quality in qualities: qual_int = int.from_bytes(quality, "big", signed=False) if qual_int > best_quality: best_quality = qual_int selected_quality = quality selected_prover = prover selected_pool_sk = pool_sk selected_plot_sk = plot_sk selected_proof_index = j j += 1 assert selected_prover assert selected_pool_sk assert selected_plot_sk pool_pk = selected_pool_sk.get_public_key() plot_pk = selected_plot_sk.get_public_key() if selected_quality is None: raise RuntimeError("No proofs for this challenge") proof_xs: bytes = selected_prover.get_full_proof( challenge_hash, selected_proof_index ) proof_of_space: ProofOfSpace = ProofOfSpace( challenge_hash, pool_pk, plot_pk, selected_prover.get_size(), proof_xs ) number_iters: uint64 = pot_iterations.calculate_iterations( proof_of_space, difficulty, min_iters ) # print("Doing iters", number_iters) int_size = (test_constants["DISCRIMINANT_SIZE_BITS"] + 16) >> 4 result = prove( challenge_hash, test_constants["DISCRIMINANT_SIZE_BITS"], number_iters ) output = ClassgroupElement( int512(int.from_bytes(result[0:int_size], "big", signed=True,)), int512( int.from_bytes(result[int_size : 2 * int_size], "big", signed=True,) ), ) proof_bytes = result[2 * int_size : 4 * int_size] proof_of_time = ProofOfTime( challenge_hash, number_iters, output, self.n_wesolowski, proof_bytes, ) if not reward_puzzlehash: reward_puzzlehash = self.fee_target # Use the extension data to create different blocks based on header hash extension_data: bytes32 = bytes32([random.randint(0, 255) for _ in range(32)]) cost = uint64(0) coinbase_reward = block_rewards.calculate_block_reward(height) fee_reward = uint64(block_rewards.calculate_base_fee(height) + fees) coinbase_coin, coinbase_signature = create_coinbase_coin_and_signature( height, reward_puzzlehash, coinbase_reward, selected_pool_sk ) parent_coin_name = std_hash(std_hash(height)) fees_coin = Coin(parent_coin_name, reward_puzzlehash, uint64(fee_reward)) # Create filter byte_array_tx: List[bytes32] = [] tx_additions: List[Coin] = [] tx_removals: List[bytes32] = [] encoded = None if transactions: error, npc_list, _ = get_name_puzzle_conditions(transactions) additions: List[Coin] = additions_for_npc(npc_list) for coin in additions: tx_additions.append(coin) byte_array_tx.append(bytearray(coin.puzzle_hash)) for npc in npc_list: tx_removals.append(npc.coin_name) byte_array_tx.append(bytearray(npc.coin_name)) bip158: PyBIP158 = PyBIP158(byte_array_tx) encoded = bytes(bip158.GetEncoded()) removal_merkle_set = MerkleSet() addition_merkle_set = MerkleSet() # Create removal Merkle set for coin_name in tx_removals: removal_merkle_set.add_already_hashed(coin_name) # Create addition Merkle set puzzlehash_coin_map: Dict[bytes32, List[Coin]] = {} for coin in tx_additions: if coin.puzzle_hash in puzzlehash_coin_map: puzzlehash_coin_map[coin.puzzle_hash].append(coin) else: puzzlehash_coin_map[coin.puzzle_hash] = [coin] # Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash for puzzle, coins in puzzlehash_coin_map.items(): addition_merkle_set.add_already_hashed(puzzle) addition_merkle_set.add_already_hashed(hash_coin_list(coins)) additions_root = addition_merkle_set.get_root() removal_root = removal_merkle_set.get_root() generator_hash = ( transactions.get_tree_hash() if transactions is not None else bytes32([0] * 32) ) filter_hash = std_hash(encoded) if encoded is not None else bytes32([0] * 32) header_data: HeaderData = HeaderData( height, prev_header_hash, timestamp, filter_hash, proof_of_space.get_hash(), uint128(prev_weight + difficulty), uint64(prev_iters + number_iters), additions_root, removal_root, coinbase_coin, coinbase_signature, fees_coin, aggsig, cost, extension_data, generator_hash, ) header_hash_sig: PrependSignature = selected_plot_sk.sign_prepend( header_data.get_hash() ) header: Header = Header(header_data, header_hash_sig) full_block: FullBlock = FullBlock( proof_of_space, proof_of_time, header, transactions, encoded ) return full_block
def process_file(file_path: Path) -> Optional[PlotInfo]: if not self._refreshing_enabled: return None filename_str = str(file_path) if self.match_str is not None and self.match_str not in filename_str: return None if ( file_path in self.failed_to_open_filenames and (time.time() - self.failed_to_open_filenames[file_path]) < self.refresh_parameter.retry_invalid_seconds ): # Try once every `refresh_parameter.retry_invalid_seconds` seconds to open the file return None if file_path in self.plots: return self.plots[file_path] entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if entry is not None: loaded_parent, duplicates = entry if str(file_path.parent) in duplicates: log.debug(f"Skip duplicated plot {str(file_path)}") return None try: if not file_path.exists(): return None prover = DiskProver(str(file_path)) log.debug(f"process_file {str(file_path)}") expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = file_path.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) return None cache_entry = self.cache.get(prover.get_id()) if cache_entry is None: ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if farmer_public_key not in self.farmer_public_keys: log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return None pool_public_key: Optional[G1Element] = None pool_contract_puzzle_hash: Optional[bytes32] = None if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if pool_public_key is not None and pool_public_key not in self.pool_public_keys: log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return None # If a plot is in `no_key_filenames` the keys were missing in earlier refresh cycles. We can remove # the current plot from that list if its in there since we passed the key checks above. if file_path in self.no_key_filenames: self.no_key_filenames.remove(file_path) local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None ) cache_entry = CacheEntry(pool_public_key, pool_contract_puzzle_hash, plot_public_key) self.cache.update(prover.get_id(), cache_entry) with self.plot_filename_paths_lock: paths: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if paths is None: paths = (str(Path(prover.get_filename()).parent), set()) self.plot_filename_paths[file_path.name] = paths else: paths[1].add(str(Path(prover.get_filename()).parent)) log.warning(f"Have multiple copies of the plot {file_path.name} in {[paths[0], *paths[1]]}.") return None new_plot_info: PlotInfo = PlotInfo( prover, cache_entry.pool_public_key, cache_entry.pool_contract_puzzle_hash, cache_entry.plot_public_key, stat_info.st_size, stat_info.st_mtime, ) with counter_lock: result.loaded.append(new_plot_info) if file_path in self.failed_to_open_filenames: del self.failed_to_open_filenames[file_path] except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {file_path}. {e} {tb}") self.failed_to_open_filenames[file_path] = int(time.time()) return None log.info(f"Found plot {file_path} of size {new_plot_info.prover.get_size()}") if self.show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") return new_plot_info
def process_file(file_path: Path) -> Dict: new_provers: Dict[Path, PlotInfo] = {} filename_str = str(file_path) if self.match_str is not None and self.match_str not in filename_str: return new_provers if file_path.exists(): if ( file_path in self.failed_to_open_filenames and (time.time() - self.failed_to_open_filenames[file_path]) > 1200 ): # Try once every 20 minutes to open the file return new_provers if file_path in self.plots: try: stat_info = file_path.stat() except Exception as e: log.error(f"Failed to open file {file_path}. {e}") return new_provers if stat_info.st_mtime == self.plots[file_path].time_modified: new_provers[file_path] = self.plots[file_path] return new_provers entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if entry is not None: loaded_parent, duplicates = entry if str(file_path.parent) in duplicates: log.debug(f"Skip duplicated plot {str(file_path)}") return new_provers try: with counter_lock: if result.processed_files >= self.refresh_parameter.batch_size: result.remaining_files += 1 return new_provers result.processed_files += 1 prover = DiskProver(str(file_path)) log.debug(f"process_file {str(file_path)}") expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = file_path.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) return new_provers ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if self.farmer_public_keys is not None and farmer_public_key not in self.farmer_public_keys: log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return new_provers if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash pool_contract_puzzle_hash = None else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_public_key = None pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if ( self.pool_public_keys is not None and pool_public_key is not None and pool_public_key not in self.pool_public_keys ): log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return new_provers stat_info = file_path.stat() local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None ) with self.plot_filename_paths_lock: if file_path.name not in self.plot_filename_paths: self.plot_filename_paths[file_path.name] = (str(Path(prover.get_filename()).parent), set()) else: self.plot_filename_paths[file_path.name][1].add(str(Path(prover.get_filename()).parent)) if len(self.plot_filename_paths[file_path.name][1]) > 0: log.warning( f"Have multiple copies of the plot {file_path} in " f"{self.plot_filename_paths[file_path.name][1]}." ) return new_provers new_provers[file_path] = PlotInfo( prover, pool_public_key, pool_contract_puzzle_hash, plot_public_key, stat_info.st_size, stat_info.st_mtime, ) with counter_lock: result.loaded_plots += 1 result.loaded_size += stat_info.st_size except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {file_path}. {e} {tb}") self.failed_to_open_filenames[file_path] = int(time.time()) return new_provers log.info(f"Found plot {file_path} of size {new_provers[file_path].prover.get_size()}") if self.show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") return new_provers return new_provers