async def request_proof_of_space( self, request: harvester_protocol.RequestProofOfSpace): """ The farmer requests a proof of space, for one of the plots. We look up the correct plot based on the plot id and response number, lookup the proof, and return it. """ response: Optional[harvester_protocol.RespondProofOfSpace] = None challenge_hash = request.challenge_hash filename = Path(request.plot_id).resolve() index = request.response_number proof_xs: bytes plot_info = self.provers[filename] try: try: proof_xs = plot_info.prover.get_full_proof( challenge_hash, index) except RuntimeError: prover = DiskProver(str(filename)) self.provers[filename] = PlotInfo( prover, plot_info.pool_public_key, plot_info.farmer_public_key, plot_info.plot_public_key, plot_info.local_sk, plot_info.file_size, plot_info.time_modified, ) proof_xs = self.provers[filename].prover.get_full_proof( challenge_hash, index) except KeyError: log.warning(f"KeyError plot {filename} does not exist.") plot_info = self.provers[filename] plot_public_key = ProofOfSpace.generate_plot_public_key( plot_info.local_sk.get_g1(), plot_info.farmer_public_key) proof_of_space: ProofOfSpace = ProofOfSpace( challenge_hash, plot_info.pool_public_key, plot_public_key, uint8(self.provers[filename].prover.get_size()), proof_xs, ) response = harvester_protocol.RespondProofOfSpace( request.plot_id, request.response_number, proof_of_space, ) if response: yield OutboundMessage( NodeType.FARMER, Message("respond_proof_of_space", response), Delivery.RESPOND, )
def get_pospaces_for_challenge( self, constants: ConsensusConstants, challenge_hash: bytes32, signage_point: bytes32, seed: bytes, difficulty: uint64, sub_slot_iters: uint64, ) -> List[Tuple[uint64, ProofOfSpace]]: found_proofs: List[Tuple[uint64, ProofOfSpace]] = [] plots: List[PlotInfo] = [ plot_info for _, plot_info in sorted(list(self.plots.items()), key=lambda x: str(x[0])) ] random.seed(seed) for plot_info in plots: plot_id = plot_info.prover.get_id() if ProofOfSpace.passes_plot_filter(constants, plot_id, challenge_hash, signage_point): new_challenge: bytes32 = ProofOfSpace.calculate_pos_challenge(plot_id, challenge_hash, signage_point) qualities = plot_info.prover.get_qualities_for_challenge(new_challenge) for proof_index, quality_str in enumerate(qualities): required_iters = calculate_iterations_quality( quality_str, plot_info.prover.get_size(), difficulty, signage_point, ) if required_iters < calculate_sp_interval_iters(constants, sub_slot_iters): proof_xs: bytes = plot_info.prover.get_full_proof(new_challenge, proof_index) plot_pk = ProofOfSpace.generate_plot_public_key( plot_info.local_sk.get_g1(), plot_info.farmer_public_key, ) proof_of_space: ProofOfSpace = ProofOfSpace( new_challenge, plot_info.pool_public_key, None, plot_pk, plot_info.prover.get_size(), proof_xs, ) found_proofs.append((required_iters, proof_of_space)) random_sample = found_proofs if len(found_proofs) >= 1: if random.random() < 0.1: # Removes some proofs of space to create "random" chains, based on the seed random_sample = random.sample(found_proofs, len(found_proofs) - 1) return random_sample
async def request_signature(self, request: harvester_protocol.RequestSignature): """ The farmer requests a signature on the header hash, for one of the proofs that we found. A signature is created on the header hash using the harvester private key. This can also be used for pooling. """ plot_info = self.provers[Path(request.plot_id).resolve()] local_sk = plot_info.local_sk agg_pk = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), plot_info.farmer_public_key) # This is only a partial signature. When combined with the farmer's half, it will # form a complete PrependSignature. signature: G2Element = AugSchemeMPL.sign(local_sk, request.message, agg_pk) response: harvester_protocol.RespondSignature = harvester_protocol.RespondSignature( request.plot_id, request.message, local_sk.get_g1(), plot_info.farmer_public_key, signature, ) yield OutboundMessage( NodeType.FARMER, Message("respond_signature", response), Delivery.RESPOND, )
def _add_plot( self, str_path: str, plot_sk: PrivateKey, pool_pk: Optional[PublicKey] ) -> bool: plot_config = load_config(self.root_path, "plots.yaml") if pool_pk is None: for pool_pk_cand in self.pool_pubkeys: pr = DiskProver(str_path) if ( ProofOfSpace.calculate_plot_seed( pool_pk_cand, plot_sk.get_public_key() ) == pr.get_id() ): pool_pk = pool_pk_cand break if pool_pk is None: return False plot_config["plots"][str_path] = { "sk": bytes(plot_sk).hex(), "pool_pk": bytes(pool_pk).hex(), } save_config(self.root_path, "plots.yaml", plot_config) self._refresh_plots() return True
async def respond_signature(self, response: harvester_protocol.RespondSignature): """ Receives a signature on a block header hash, which is required for submitting a block to the blockchain. """ header_hash = response.message proof_of_space: bytes32 = self.header_hash_to_pos[header_hash] validates: bool = False for sk in self._get_private_keys(): pk = sk.get_g1() if pk == response.farmer_pk: agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk) assert agg_pk == proof_of_space.plot_public_key farmer_share = AugSchemeMPL.sign(sk, header_hash, agg_pk) agg_sig = AugSchemeMPL.aggregate( [response.message_signature, farmer_share] ) validates = AugSchemeMPL.verify(agg_pk, header_hash, agg_sig) if validates: break assert validates pos_hash: bytes32 = proof_of_space.get_hash() request = farmer_protocol.HeaderSignature(pos_hash, header_hash, agg_sig) yield OutboundMessage( NodeType.FULL_NODE, Message("header_signature", request), Delivery.BROADCAST )
def __init__(self): plot_seeds: List[bytes32] = [ ProofOfSpace.calculate_plot_seed(pool_pk, plot_pk) for plot_pk in plot_pks ] self.plot_dir = os.path.join("tests", "plots") self.filenames: List[str] = [ "genesis-plots-" + str(k) + sha256(int.to_bytes(i, 4, "big")).digest().hex() + ".dat" for i in range(num_plots) ] done_filenames = set() try: for pn, filename in enumerate(self.filenames): if not os.path.exists(os.path.join(self.plot_dir, filename)): plotter = DiskPlotter() plotter.create_plot_disk( self.plot_dir, self.plot_dir, filename, k, b"genesis", plot_seeds[pn], ) done_filenames.add(filename) except KeyboardInterrupt: for filename in self.filenames: if filename not in done_filenames and os.path.exists( os.path.join(self.plot_dir, filename)): os.remove(os.path.join(self.plot_dir, filename)) sys.exit(1)
async def test_invalid_pos_hash(self, initial_blockchain): blocks, b = initial_blockchain bad_pos_proof = bytearray([i for i in blocks[9].proof_of_space.proof]) bad_pos_proof[0] = uint8((bad_pos_proof[0] + 1) % 256) bad_pos = ProofOfSpace( blocks[9].proof_of_space.challenge_hash, blocks[9].proof_of_space.pool_public_key, blocks[9].proof_of_space.plot_public_key, blocks[9].proof_of_space.size, bytes(bad_pos_proof), ) new_header_data = HeaderData( blocks[9].header.data.height, blocks[9].header.data.prev_header_hash, blocks[9].header.data.timestamp, blocks[9].header.data.filter_hash, bad_pos.get_hash(), blocks[9].header.data.weight, blocks[9].header.data.total_iters, blocks[9].header.data.additions_root, blocks[9].header.data.removals_root, blocks[9].header.data.farmer_rewards_puzzle_hash, blocks[9].header.data.total_transaction_fees, blocks[9].header.data.pool_target, blocks[9].header.data.aggregated_signature, blocks[9].header.data.cost, blocks[9].header.data.extension_data, blocks[9].header.data.generator_hash, ) # Proof of space has invalid block_bad = FullBlock( blocks[9].proof_of_space, blocks[9].proof_of_time, Header( new_header_data, bt.get_plot_signature( new_header_data, blocks[9].proof_of_space.plot_public_key ), ), blocks[9].transactions_generator, blocks[9].transactions_filter, ) result, removed, error_code = await b.receive_block(block_bad) assert result == ReceiveBlockResult.INVALID_BLOCK assert error_code == Err.INVALID_POSPACE_HASH
def main(): """ Script for checking all plots in the plots.yaml file. Specify a number of challenge to test for each plot. """ parser = argparse.ArgumentParser(description="Chia plot checking script.") parser.add_argument("-n", "--num", help="Number of challenges", type=int, default=1000) args = parser.parse_args() v = Verifier() if os.path.isfile(plot_config_filename): plot_config = safe_load(open(plot_config_filename, "r")) for plot_filename, plot_info in plot_config["plots"].items(): plot_seed: bytes32 = ProofOfSpace.calculate_plot_seed( PublicKey.from_bytes(bytes.fromhex(plot_info["pool_pk"])), PrivateKey.from_bytes(bytes.fromhex( plot_info["sk"])).get_public_key(), ) if not os.path.isfile(plot_filename): # Tries relative path full_path: str = os.path.join(plot_root, plot_filename) if not os.path.isfile(full_path): # Tries absolute path full_path: str = plot_filename if not os.path.isfile(full_path): print(f"Plot file {full_path} not found.") continue pr = DiskProver(full_path) else: pr = DiskProver(plot_filename) total_proofs = 0 try: for i in range(args.num): challenge = sha256(i.to_bytes(32, "big")).digest() for index, quality in enumerate( pr.get_qualities_for_challenge(challenge)): proof = pr.get_full_proof(challenge, index) total_proofs += 1 ver_quality = v.validate_proof(plot_seed, pr.get_size(), challenge, proof) assert quality == ver_quality except BaseException as e: print( f"{type(e)}: {e} error in proving/verifying for plot {plot_filename}" ) print( f"{plot_filename}: Proofs {total_proofs} / {args.num}, {round(total_proofs/float(args.num), 4)}" ) else: print(f"Not plot file found at {plot_config_filename}")
async def request_proof_of_space( self, request: harvester_protocol.RequestProofOfSpace ): """ The farmer requests a signature on the header hash, for one of the proofs that we found. We look up the correct plot based on the quality, lookup the proof, and return it. """ response: Optional[harvester_protocol.RespondProofOfSpace] = None try: # Using the quality string, find the right plot and index from our solutions challenge_hash, filename, index = self.challenge_hashes[ request.quality_string ] except KeyError: log.warning(f"Quality string {request.quality_string} not found") return if index is not None: proof_xs: bytes try: try: proof_xs = self.provers[filename].get_full_proof( challenge_hash, index ) except RuntimeError: self.provers[filename] = DiskProver(str(filename)) proof_xs = self.provers[filename].get_full_proof( challenge_hash, index ) except KeyError: log.warning(f"KeyError plot {filename} does not exist.") pool_pubkey = PublicKey.from_bytes( bytes.fromhex(self.plot_config["plots"][filename]["pool_pk"]) ) plot_pubkey = PrivateKey.from_bytes( bytes.fromhex(self.plot_config["plots"][filename]["sk"]) ).get_public_key() proof_of_space: ProofOfSpace = ProofOfSpace( challenge_hash, pool_pubkey, plot_pubkey, uint8(self.provers[filename].get_size()), proof_xs, ) response = harvester_protocol.RespondProofOfSpace( request.quality_string, proof_of_space ) if response: yield OutboundMessage( NodeType.FARMER, Message("respond_proof_of_space", response), Delivery.RESPOND, )
def calculate_iterations( proof_of_space: ProofOfSpace, difficulty: uint64, min_iterations: uint64, ) -> uint64: """ Convenience function to calculate the number of iterations using the proof instead of the quality. The quality must be retrieved from the proof. """ quality: bytes32 = proof_of_space.verify_and_get_quality_string() return calculate_iterations_quality(quality, proof_of_space.size, difficulty, min_iterations)
async def new_challenge(self, new_challenge: harvester_protocol.NewChallenge): """ The harvester receives a new challenge from the farmer, and looks up the quality for any proofs of space that are are found in the plots. If proofs are found, a ChallengeResponse message is sent for each of the proofs found. """ challenge_size = len(new_challenge.challenge_hash) if challenge_size != 32: raise ValueError( f"Invalid challenge size {challenge_size}, 32 was expected") all_responses = [] for filename, prover in self.provers.items(): try: quality_strings = prover.get_qualities_for_challenge( new_challenge.challenge_hash) except RuntimeError: log.error( f"Error using prover object on {filename}. Reinitializing prover object." ) quality_strings = None try: self.provers[filename] = DiskProver(filename) quality_strings = prover.get_qualities_for_challenge( new_challenge.challenge_hash) except RuntimeError: log.error( f"Retry-Error using prover object on {filename}. Giving up." ) quality_strings = None if quality_strings is not None: for index, quality_str in enumerate(quality_strings): quality = ProofOfSpace.quality_str_to_quality( new_challenge.challenge_hash, quality_str) self.challenge_hashes[quality] = ( new_challenge.challenge_hash, filename, uint8(index), ) response: harvester_protocol.ChallengeResponse = harvester_protocol.ChallengeResponse( new_challenge.challenge_hash, quality, prover.get_size()) all_responses.append(response) for response in all_responses: yield OutboundMessage( NodeType.FARMER, Message("challenge_response", response), Delivery.RESPOND, )
def get_plot_signature(self, m: bytes32, plot_pk: G1Element) -> G2Element: """ Returns the plot signature of the header data. """ farmer_sk = master_sk_to_farmer_sk(self.all_sks[0]) for _, plot_info in self.plots.items(): agg_pk = ProofOfSpace.generate_plot_public_key(plot_info.local_sk.get_g1(), plot_info.farmer_public_key) if agg_pk == plot_pk: harv_share = AugSchemeMPL.sign(plot_info.local_sk, m, agg_pk) farm_share = AugSchemeMPL.sign(farmer_sk, m, agg_pk) return AugSchemeMPL.aggregate([harv_share, farm_share]) raise ValueError(f"Do not have key {plot_pk}")
def test_can_create_proof(self): """ Tests that the change of getting a correct proof is exactly 1/256. """ num_trials = 40000 success_count = 0 for _ in range(num_trials): challenge_hash = token_bytes(32) plot_id = token_bytes(32) if ProofOfSpace.can_create_proof(plot_id, challenge_hash, 8): success_count += 1 assert abs((success_count * 256 / num_trials) - 1) < 0.3
def calculate_min_iters_from_iterations( proof_of_space: ProofOfSpace, difficulty: uint64, iterations: uint64, ) -> uint64: """ Using the total number of iterations on a block (which is encoded in the block) along with other details, we can calculate the constant factor in iterations, which is not written into the block. """ quality: bytes32 = proof_of_space.verify_and_get_quality_string() iters_rounded = (int(difficulty) << 32) // quality_str_to_quality( quality, proof_of_space.size) min_iterations = uint64(iterations - iters_rounded) assert min_iterations >= 1 return min_iterations
def get_plot_signature(self, header_data: HeaderData, plot_pk: G1Element) -> Optional[G2Element]: """ Returns the plot signature of the header data. """ farmer_sk = master_sk_to_farmer_sk(self.all_sks[0][0]) for _, plot_info in self.plots.items(): agg_pk = ProofOfSpace.generate_plot_public_key( plot_info.local_sk.get_g1(), plot_info.farmer_public_key) if agg_pk == plot_pk: m = header_data.get_hash() harv_share = AugSchemeMPL.sign(plot_info.local_sk, m, agg_pk) farm_share = AugSchemeMPL.sign(farmer_sk, m, agg_pk) return AugSchemeMPL.aggregate([harv_share, farm_share]) return None
def test_can_create_proof(self): """ Tests that the change of getting a correct proof is exactly 1/target_filter. """ num_trials = 50000 success_count = 0 target_filter = 2**DEFAULT_CONSTANTS.NUMBER_ZERO_BITS_PLOT_FILTER for _ in range(num_trials): challenge_hash = token_bytes(32) plot_id = token_bytes(32) sp_output = token_bytes(32) if ProofOfSpace.passes_plot_filter(DEFAULT_CONSTANTS, plot_id, challenge_hash, sp_output): success_count += 1 assert abs((success_count * target_filter / num_trials) - 1) < 0.3
def calculate_ips_from_iterations( proof_of_space: ProofOfSpace, difficulty: uint64, iterations: uint64, min_block_time: uint64, ) -> uint64: """ Using the total number of iterations on a block (which is encoded in the block) along with other details, we can calculate the VDF speed (iterations per second) used to compute the constant factor in iterations, which is not written into the block. """ quality: bytes32 = proof_of_space.verify_and_get_quality() dec_iters = Decimal(int(difficulty) << 32) * ( _quality_to_decimal(quality) / _expected_plot_size(proof_of_space.size) ) iters_rounded = int(dec_iters.to_integral_exact(rounding=ROUND_UP)) min_iterations = uint64(iterations - iters_rounded) ips = min_iterations / min_block_time assert ips >= 1 assert uint64(int(ips)) == ips return uint64(int(ips))
async def test_invalid_pos(self, initial_blockchain): blocks, b = initial_blockchain bad_pos = [i for i in blocks[9].header_block.proof_of_space.proof] bad_pos[0] = uint8((bad_pos[0] + 1) % 256) # Proof of space invalid block_bad = FullBlock( HeaderBlock( ProofOfSpace( blocks[9].header_block.proof_of_space.challenge_hash, blocks[9].header_block.proof_of_space.pool_pubkey, blocks[9].header_block.proof_of_space.plot_pubkey, blocks[9].header_block.proof_of_space.size, bad_pos, ), blocks[9].header_block.proof_of_time, blocks[9].header_block.challenge, blocks[9].header_block.header, ), blocks[9].body, ) assert (await b.receive_block(block_bad)) == ReceiveBlockResult.INVALID_BLOCK
async def request_signatures( self, request: harvester_protocol.RequestSignatures): """ The farmer requests a signature on the header hash, for one of the proofs that we found. A signature is created on the header hash using the harvester private key. This can also be used for pooling. """ plot_filename = Path(request.plot_identifier[64:]).resolve() try: plot_info = self.harvester.provers[plot_filename] except KeyError: self.harvester.log.warning( f"KeyError plot {plot_filename} does not exist.") return local_sk = plot_info.local_sk agg_pk = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), plot_info.farmer_public_key) # This is only a partial signature. When combined with the farmer's half, it will # form a complete PrependSignature. message_signatures: List[Tuple[bytes32, G2Element]] = [] for message in request.messages: signature: G2Element = AugSchemeMPL.sign(local_sk, message, agg_pk) message_signatures.append((message, signature)) response: harvester_protocol.RespondSignatures = harvester_protocol.RespondSignatures( request.plot_identifier, request.challenge_hash, request.sp_hash, local_sk.get_g1(), plot_info.farmer_public_key, message_signatures, ) msg = Message("respond_signatures", response) return msg
def main(): """ Script for creating plots and adding them to the plot config file. """ root_path = DEFAULT_ROOT_PATH plot_config_filename = config_path_for_filename(root_path, "plots.yaml") key_config_filename = config_path_for_filename(root_path, "keys.yaml") parser = argparse.ArgumentParser(description="Chia plotting script.") parser.add_argument("-k", "--size", help="Plot size", type=int, default=20) parser.add_argument("-n", "--num_plots", help="Number of plots", type=int, default=10) parser.add_argument("-i", "--index", help="First plot index", type=int, default=0) parser.add_argument("-p", "--pool_pub_key", help="Hex public key of pool", type=str, default="") parser.add_argument( "-t", "--tmp_dir", help= "Temporary directory for plotting files (relative to final directory)", type=Path, default=Path("./plots.tmp"), ) new_plots_root = path_from_root( root_path, load_config(root_path, "config.yaml").get("harvester", {}).get("new_plot_root", "plots"), ) parser.add_argument( "-d", "--final_dir", help="Final directory for plots (relative or absolute)", type=Path, default=new_plots_root, ) # We need the keys file, to access pool keys (if the exist), and the sk_seed. args = parser.parse_args() if not key_config_filename.exists(): raise RuntimeError("Keys not generated. Run chia-generate-keys") # The seed is what will be used to generate a private key for each plot key_config = load_config(root_path, key_config_filename) sk_seed: bytes = bytes.fromhex(key_config["sk_seed"]) pool_pk: PublicKey if len(args.pool_pub_key) > 0: # Use the provided pool public key, useful for using an external pool pool_pk = PublicKey.from_bytes(bytes.fromhex(args.pool_pub_key)) else: # Use the pool public key from the config, useful for solo farming pool_sk = PrivateKey.from_bytes( bytes.fromhex(key_config["pool_sks"][0])) pool_pk = pool_sk.get_public_key() print( f"Creating {args.num_plots} plots, from index {args.index} to " f"{args.index + args.num_plots - 1}, of size {args.size}, sk_seed {sk_seed.hex()} ppk {pool_pk}" ) tmp_dir = args.final_dir / args.tmp_dir mkdir(tmp_dir) mkdir(args.final_dir) for i in range(args.index, args.index + args.num_plots): # Generate a sk based on the seed, plot size (k), and index sk: PrivateKey = PrivateKey.from_seed(sk_seed + args.size.to_bytes(1, "big") + i.to_bytes(4, "big")) # The plot seed is based on the pool and plot pks plot_seed: bytes32 = ProofOfSpace.calculate_plot_seed( pool_pk, sk.get_public_key()) filename: str = f"plot-{i}-{args.size}-{plot_seed}.dat" full_path: Path = args.final_dir / filename if not full_path.exists(): # Creates the plot. This will take a long time for larger plots. plotter: DiskPlotter = DiskPlotter() plotter.create_plot_disk( str(tmp_dir), str(args.final_dir), filename, args.size, bytes([]), plot_seed, ) else: print(f"Plot {filename} already exists") # Updates the config if necessary. plot_config = load_config(root_path, plot_config_filename) plot_config_plots_new = deepcopy(plot_config.get("plots", [])) relative_path = make_path_relative(full_path, root_path) if (relative_path not in plot_config_plots_new and full_path not in plot_config_plots_new): plot_config_plots_new[str(full_path)] = { "sk": bytes(sk).hex(), "pool_pk": bytes(pool_pk).hex(), } plot_config["plots"].update(plot_config_plots_new) # Dumps the new config to disk. save_config(root_path, plot_config_filename, plot_config) try: tmp_dir.rmdir() except Exception: print(f"warning: couldn't delete {tmp_dir}")
def _create_block( self, test_constants: ConsensusConstants, challenge_hash: bytes32, height: uint32, prev_header_hash: bytes32, prev_iters: uint64, prev_weight: uint128, timestamp: uint64, difficulty: int, min_iters: int, seed: bytes, genesis: bool = False, reward_puzzlehash: bytes32 = None, transactions: Program = None, aggsig: G2Element = None, fees: uint64 = uint64(0), ) -> FullBlock: """ Creates a block with the specified details. Uses the stored plots to create a proof of space, and also evaluates the VDF for the proof of time. """ selected_plot_info = None selected_proof_index = 0 selected_quality: Optional[bytes] = None best_quality = 0 plots = [ pinfo for _, pinfo in sorted(list(self.plots.items()), key=lambda x: str(x[0])) ] if self.use_any_pos: random.seed(seed) for i in range(len(plots) * 3): # Allow passing in seed, to create reorgs and different chains seeded_pn = random.randint(0, len(plots) - 1) plot_info = plots[seeded_pn] plot_id = plot_info.prover.get_id() ccp = ProofOfSpace.can_create_proof( plot_id, challenge_hash, test_constants.NUMBER_ZERO_BITS_CHALLENGE_SIG, ) if not ccp: continue qualities = plot_info.prover.get_qualities_for_challenge( challenge_hash) if len(qualities) > 0: selected_plot_info = plot_info selected_quality = qualities[0] break else: for i in range(len(plots)): plot_info = plots[i] j = 0 plot_id = plot_info.prover.get_id() ccp = ProofOfSpace.can_create_proof( plot_id, challenge_hash, test_constants.NUMBER_ZERO_BITS_CHALLENGE_SIG, ) if not ccp: continue qualities = plot_info.prover.get_qualities_for_challenge( challenge_hash) for quality in qualities: qual_int = int.from_bytes(quality, "big", signed=False) if qual_int > best_quality: best_quality = qual_int selected_quality = quality selected_plot_info = plot_info selected_proof_index = j j += 1 assert selected_plot_info is not None if selected_quality is None: raise RuntimeError("No proofs for this challenge") proof_xs: bytes = selected_plot_info.prover.get_full_proof( challenge_hash, selected_proof_index) plot_pk = ProofOfSpace.generate_plot_public_key( selected_plot_info.local_sk.get_g1(), selected_plot_info.farmer_public_key, ) proof_of_space: ProofOfSpace = ProofOfSpace( challenge_hash, selected_plot_info.pool_public_key, plot_pk, selected_plot_info.prover.get_size(), proof_xs, ) number_iters: uint64 = pot_iterations.calculate_iterations( proof_of_space, difficulty, min_iters, test_constants.NUMBER_ZERO_BITS_CHALLENGE_SIG, ) if self.real_plots: print(f"Performing {number_iters} VDF iterations") int_size = (test_constants.DISCRIMINANT_SIZE_BITS + 16) >> 4 result = prove(challenge_hash, test_constants.DISCRIMINANT_SIZE_BITS, number_iters) output = ClassgroupElement( int512(int.from_bytes( result[0:int_size], "big", signed=True, )), int512( int.from_bytes( result[int_size:2 * int_size], "big", signed=True, )), ) proof_bytes = result[2 * int_size:4 * int_size] proof_of_time = ProofOfTime( challenge_hash, number_iters, output, uint8(0), proof_bytes, ) # Use the extension data to create different blocks based on header hash extension_data: bytes32 = bytes32( [random.randint(0, 255) for _ in range(32)]) cost = uint64(0) fee_reward = uint64(block_rewards.calculate_base_fee(height) + fees) std_hash(std_hash(height)) # Create filter byte_array_tx: List[bytes32] = [] tx_additions: List[Coin] = [] tx_removals: List[bytes32] = [] if transactions: error, npc_list, _ = get_name_puzzle_conditions(transactions) additions: List[Coin] = additions_for_npc(npc_list) for coin in additions: tx_additions.append(coin) byte_array_tx.append(bytearray(coin.puzzle_hash)) for npc in npc_list: tx_removals.append(npc.coin_name) byte_array_tx.append(bytearray(npc.coin_name)) farmer_ph = self.farmer_ph pool_ph = self.pool_ph if reward_puzzlehash is not None: farmer_ph = reward_puzzlehash pool_ph = reward_puzzlehash byte_array_tx.append(bytearray(farmer_ph)) byte_array_tx.append(bytearray(pool_ph)) bip158: PyBIP158 = PyBIP158(byte_array_tx) encoded = bytes(bip158.GetEncoded()) removal_merkle_set = MerkleSet() addition_merkle_set = MerkleSet() # Create removal Merkle set for coin_name in tx_removals: removal_merkle_set.add_already_hashed(coin_name) # Create addition Merkle set puzzlehash_coin_map: Dict[bytes32, List[Coin]] = {} cb_reward = calculate_block_reward(height) cb_coin = create_coinbase_coin(height, pool_ph, cb_reward) fees_coin = create_fees_coin(height, farmer_ph, fee_reward) for coin in tx_additions + [cb_coin, fees_coin]: if coin.puzzle_hash in puzzlehash_coin_map: puzzlehash_coin_map[coin.puzzle_hash].append(coin) else: puzzlehash_coin_map[coin.puzzle_hash] = [coin] # Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash for puzzle, coins in puzzlehash_coin_map.items(): addition_merkle_set.add_already_hashed(puzzle) addition_merkle_set.add_already_hashed(hash_coin_list(coins)) additions_root = addition_merkle_set.get_root() removal_root = removal_merkle_set.get_root() generator_hash = (transactions.get_tree_hash() if transactions is not None else bytes32([0] * 32)) filter_hash = std_hash(encoded) pool_target = PoolTarget(pool_ph, uint32(height)) pool_target_signature = self.get_pool_key_signature( pool_target, proof_of_space.pool_public_key) assert pool_target_signature is not None final_aggsig: G2Element = pool_target_signature if aggsig is not None: final_aggsig = AugSchemeMPL.aggregate([final_aggsig, aggsig]) header_data: HeaderData = HeaderData( height, prev_header_hash, timestamp, filter_hash, proof_of_space.get_hash(), uint128(prev_weight + difficulty), uint64(prev_iters + number_iters), additions_root, removal_root, farmer_ph, fee_reward, pool_target, final_aggsig, cost, extension_data, generator_hash, ) header_hash_sig: G2Element = self.get_plot_signature( header_data, plot_pk) header: Header = Header(header_data, header_hash_sig) full_block: FullBlock = FullBlock(proof_of_space, proof_of_time, header, transactions, encoded) return full_block
async def validate_unfinished_block_header( constants: ConsensusConstants, headers: Dict[bytes32, Header], height_to_hash: Dict[uint32, bytes32], block_header: Header, proof_of_space: ProofOfSpace, prev_header_block: Optional[HeaderBlock], pre_validated: bool = False, pos_quality_string: bytes32 = None, ) -> Tuple[Optional[Err], Optional[uint64]]: """ Block validation algorithm. Returns the number of VDF iterations that this block's proof of time must have, if the candidate block is fully valid (except for proof of time). The same as validate_block, but without proof of time and challenge validation. If the block is invalid, an error code is returned. Does NOT validate transactions and fees. """ if not pre_validated: # 1. The hash of the proof of space must match header_data.proof_of_space_hash if proof_of_space.get_hash() != block_header.data.proof_of_space_hash: return (Err.INVALID_POSPACE_HASH, None) # 2. The coinbase signature must be valid, according the the pool public key # TODO: change numbers # 3. Check harvester signature of header data is valid based on harvester key validates = blspy.AugSchemeMPL.verify( proof_of_space.plot_public_key, block_header.data.get_hash(), block_header.plot_signature, ) if not validates: return (Err.INVALID_PLOT_SIGNATURE, None) # 4. If not genesis, the previous block must exist if prev_header_block is not None and block_header.prev_header_hash not in headers: return (Err.DOES_NOT_EXTEND, None) # 5. If not genesis, the timestamp must be >= the average timestamp of last 11 blocks # and less than 2 hours in the future (if block height < 11, average all previous blocks). # Average is the sum, int diveded by the number of timestamps if prev_header_block is not None: last_timestamps: List[uint64] = [] curr = prev_header_block.header while len(last_timestamps) < constants.NUMBER_OF_TIMESTAMPS: last_timestamps.append(curr.data.timestamp) fetched = headers.get(curr.prev_header_hash, None) if not fetched: break curr = fetched if len(last_timestamps) != constants.NUMBER_OF_TIMESTAMPS: # For blocks 1 to 10, average timestamps of all previous blocks assert curr.height == 0 prev_time: uint64 = uint64( int(sum(last_timestamps) // len(last_timestamps))) if block_header.data.timestamp < prev_time: return (Err.TIMESTAMP_TOO_FAR_IN_PAST, None) if block_header.data.timestamp > time.time( ) + constants.MAX_FUTURE_TIME: return (Err.TIMESTAMP_TOO_FAR_IN_FUTURE, None) # 7. Extension data must be valid, if any is present # Compute challenge of parent challenge_hash: bytes32 if prev_header_block is not None: challenge: Challenge = prev_header_block.challenge challenge_hash = challenge.get_hash() # 8. Check challenge hash of prev is the same as in pos if challenge_hash != proof_of_space.challenge_hash: return (Err.INVALID_POSPACE_CHALLENGE, None) # 10. The proof of space must be valid on the challenge if pos_quality_string is None: pos_quality_string = proof_of_space.verify_and_get_quality_string( constants.NUMBER_ZERO_BITS_CHALLENGE_SIG) if not pos_quality_string: return (Err.INVALID_POSPACE, None) if prev_header_block is not None: # 11. If not genesis, the height on the previous block must be one less than on this block if block_header.height != prev_header_block.height + 1: return (Err.INVALID_HEIGHT, None) else: # 12. If genesis, the height must be 0 if block_header.height != 0: return (Err.INVALID_HEIGHT, None) # 13. The pool max height must be valid if (block_header.data.pool_target.max_height != 0 and block_header.data.pool_target.max_height < block_header.height): return (Err.INVALID_POOL_TARGET, None) difficulty: uint64 if prev_header_block is not None: difficulty = get_next_difficulty(constants, headers, height_to_hash, prev_header_block.header) min_iters = get_next_min_iters(constants, headers, height_to_hash, prev_header_block) else: difficulty = uint64(constants.DIFFICULTY_STARTING) min_iters = uint64(constants.MIN_ITERS_STARTING) number_of_iters: uint64 = calculate_iterations_quality( pos_quality_string, proof_of_space.size, difficulty, min_iters, ) assert count_significant_bits(difficulty) <= constants.SIGNIFICANT_BITS assert count_significant_bits(min_iters) <= constants.SIGNIFICANT_BITS if prev_header_block is not None: # 17. If not genesis, the total weight must be the parent weight + difficulty if block_header.weight != prev_header_block.weight + difficulty: return (Err.INVALID_WEIGHT, None) # 18. If not genesis, the total iters must be parent iters + number_iters if (block_header.data.total_iters != prev_header_block.header.data.total_iters + number_of_iters): return (Err.INVALID_TOTAL_ITERS, None) else: # 19. If genesis, the total weight must be starting difficulty if block_header.weight != difficulty: return (Err.INVALID_WEIGHT, None) # 20. If genesis, the total iters must be number iters if block_header.data.total_iters != number_of_iters: return (Err.INVALID_TOTAL_ITERS, None) return (None, number_of_iters)
def main(): """ Script for creating plots and adding them to the plot config file. """ root_path = DEFAULT_ROOT_PATH plot_config_filename = config_path_for_filename(root_path, "plots.yaml") parser = argparse.ArgumentParser(description="Chia plotting script.") parser.add_argument("-k", "--size", help="Plot size", type=int, default=26) parser.add_argument( "-n", "--num_plots", help="Number of plots", type=int, default=1 ) parser.add_argument( "-i", "--index", help="First plot index", type=int, default=None ) parser.add_argument( "-p", "--pool_pub_key", help="Hex public key of pool", type=str, default="" ) parser.add_argument( "-s", "--sk_seed", help="Secret key seed in hex", type=str, default=None ) parser.add_argument( "-t", "--tmp_dir", help="Temporary directory for plotting files", type=Path, default=Path("."), ) parser.add_argument( "-2", "--tmp2_dir", help="Second temporary directory for plotting files", type=Path, default=Path("."), ) new_plots_root = path_from_root( root_path, load_config(root_path, "config.yaml") .get("harvester", {}) .get("new_plot_root", "plots"), ) parser.add_argument( "-d", "--final_dir", help="Final directory for plots (relative or absolute)", type=Path, default=new_plots_root, ) args = parser.parse_args() if args.sk_seed is None and args.index is not None: log( f"You have specified the -i (index) argument without the -s (sk_seed) argument." f" The program has changes, so that the sk_seed is now generated randomly, so -i is no longer necessary." f" Please run the program without -i." ) quit() if args.index is None: args.index = 0 # The seed is what will be used to generate a private key for each plot if args.sk_seed is not None: sk_seed: bytes = bytes.fromhex(args.sk_seed) log(f"Using the provided sk_seed {sk_seed.hex()}.") else: sk_seed = token_bytes(32) log( f"Using sk_seed {sk_seed.hex()}. Note that sk seed is now generated randomly, as opposed " f"to from keys.yaml. If you want to use a specific seed, use the -s argument." ) pool_pk: PublicKey if len(args.pool_pub_key) > 0: # Use the provided pool public key, useful for using an external pool pool_pk = PublicKey.from_bytes(bytes.fromhex(args.pool_pub_key)) else: # Use the pool public key from the config, useful for solo farming keychain = Keychain() all_public_keys = keychain.get_all_public_keys() if len(all_public_keys) == 0: raise RuntimeError( "There are no private keys in the keychain, so we cannot create a plot. " "Please generate keys using 'chia keys generate' or pass in a pool pk with -p" ) pool_pk = all_public_keys[0].get_public_key() log( f"Creating {args.num_plots} plots, from index {args.index} to " f"{args.index + args.num_plots - 1}, of size {args.size}, sk_seed {sk_seed.hex()} ppk {pool_pk}" ) mkdir(args.tmp_dir) mkdir(args.tmp2_dir) mkdir(args.final_dir) finished_filenames = [] for i in range(args.index, args.index + args.num_plots): # Generate a sk based on the seed, plot size (k), and index sk: PrivateKey = PrivateKey.from_seed( sk_seed + args.size.to_bytes(1, "big") + i.to_bytes(4, "big") ) # The plot seed is based on the pool and plot pks plot_seed: bytes32 = ProofOfSpace.calculate_plot_seed( pool_pk, sk.get_public_key() ) dt_string = datetime.now().strftime("%Y-%m-%d-%H-%M") filename: str = f"plot-k{args.size}-{dt_string}-{plot_seed}.dat" full_path: Path = args.final_dir / filename plot_config = load_config(root_path, plot_config_filename) plot_config_plots_new = deepcopy(plot_config.get("plots", [])) filenames = [Path(k).name for k in plot_config_plots_new.keys()] already_in_config = any(plot_seed.hex() in fname for fname in filenames) if already_in_config: log(f"Plot {filename} already exists (in config)") continue if not full_path.exists(): # Creates the plot. This will take a long time for larger plots. plotter: DiskPlotter = DiskPlotter() plotter.create_plot_disk( str(args.tmp_dir), str(args.tmp2_dir), str(args.final_dir), filename, args.size, bytes([]), plot_seed, ) finished_filenames.append(filename) else: log(f"Plot {filename} already exists") # Updates the config if necessary. plot_config = load_config(root_path, plot_config_filename) plot_config_plots_new = deepcopy(plot_config.get("plots", [])) plot_config_plots_new[str(full_path)] = { "sk": bytes(sk).hex(), "pool_pk": bytes(pool_pk).hex(), } plot_config["plots"].update(plot_config_plots_new) # Dumps the new config to disk. save_config(root_path, plot_config_filename, plot_config) log("") log("Summary:") try: args.tmp_dir.rmdir() except Exception: log( f"warning: did not remove primary temporary folder {args.tmp_dir}, it may not be empty." ) try: args.tmp2_dir.rmdir() except Exception: log( f"warning: did not remove secondary temporary folder {args.tmp2_dir}, it may not be empty." ) log(f"Created a total of {len(finished_filenames)} new plots") for filename in finished_filenames: log(filename)
def load_plots( provers: Dict[Path, PlotInfo], failed_to_open_filenames: Dict[Path, int], farmer_public_keys: Optional[List[G1Element]], pool_public_keys: Optional[List[G1Element]], match_str: Optional[str], root_path: Path, open_no_key_filenames=False, ) -> Tuple[bool, Dict[Path, PlotInfo], Dict[Path, int], Set[Path]]: start_time = time.time() config_file = load_config(root_path, "config.yaml", "harvester") changed = False no_key_filenames: Set[Path] = set() log.info(f'Searching directories {config_file["plot_directories"]}') plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(config_file) all_filenames: List[Path] = [] for paths in plot_filenames.values(): all_filenames += paths total_size = 0 new_provers: Dict[Path, PlotInfo] = {} if match_str is not None: log.info( f'Only loading plots that contain "{match_str}" in the file or directory name' ) for filename in all_filenames: filename_str = str(filename) if match_str is not None and match_str not in filename_str: continue if filename.exists(): if filename in failed_to_open_filenames and ( time.time() - failed_to_open_filenames[filename]) < 1200: # Try once every 20 minutes to open the file continue if filename in provers: stat_info = filename.stat() if stat_info.st_mtime == provers[filename].time_modified: total_size += stat_info.st_size new_provers[filename] = provers[filename] continue try: prover = DiskProver(str(filename)) expected_size = _expected_plot_size( prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR / 2.0 stat_info = filename.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size( ) >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {filename}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) continue ( pool_public_key, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if farmer_public_keys is not None and farmer_public_key not in farmer_public_keys: log.warning( f"Plot {filename} has a farmer public key that is not in the farmer's pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: continue if pool_public_keys is not None and pool_public_key not in pool_public_keys: log.warning( f"Plot {filename} has a pool public key that is not in the farmer's pool pk list." ) no_key_filenames.add(filename) if not open_no_key_filenames: continue stat_info = filename.stat() local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key) new_provers[filename] = PlotInfo( prover, pool_public_key, farmer_public_key, plot_public_key, local_sk, stat_info.st_size, stat_info.st_mtime, ) total_size += stat_info.st_size changed = True except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {filename}. {e} {tb}") failed_to_open_filenames[filename] = int(time.time()) continue log.info( f"Found plot {filename} of size {new_provers[filename].prover.get_size()}" ) log.info( f"Loaded a total of {len(new_provers)} plots of size {total_size / (1024 ** 4)} TiB, in" f" {time.time()-start_time} seconds") return changed, new_provers, failed_to_open_filenames, no_key_filenames
async def test1(self, simulation): test_rpc_port = uint16(21522) test_rpc_port_2 = uint16(21523) full_node_1, _, harvester, farmer, _, _, _ = simulation def stop_node_cb(): pass def stop_node_cb_2(): pass rpc_cleanup = await start_farmer_rpc_server(farmer, stop_node_cb, test_rpc_port) rpc_cleanup_2 = await start_harvester_rpc_server( harvester, stop_node_cb_2, test_rpc_port_2) try: client = await FarmerRpcClient.create(test_rpc_port) client_2 = await HarvesterRpcClient.create(test_rpc_port_2) await asyncio.sleep(3) assert len(await client.get_connections()) == 2 challenges = await client.get_latest_challenges() assert len(challenges) > 0 res = await client_2.get_plots() num_plots = len(res["plots"]) assert num_plots > 0 plot_dir = get_plot_dir() plotter = DiskPlotter() pool_pk = harvester.pool_pubkeys[0] plot_sk = PrivateKey.from_seed(b"Farmer harvester rpc test seed") plot_seed = ProofOfSpace.calculate_plot_seed( pool_pk, plot_sk.get_public_key()) filename = "test_farmer_harvester_rpc_plot.dat" plotter.create_plot_disk( str(plot_dir), str(plot_dir), str(plot_dir), filename, 18, b"genesis", plot_seed, 2 * 1024, ) await client_2.add_plot(str(plot_dir / filename), plot_sk) res_2 = await client_2.get_plots() assert len(res_2["plots"]) == num_plots + 1 await client_2.delete_plot(str(plot_dir / filename)) res_3 = await client_2.get_plots() assert len(res_3["plots"]) == num_plots filename = "test_farmer_harvester_rpc_plot_2.dat" plotter.create_plot_disk( str(plot_dir), str(plot_dir), str(plot_dir), filename, 18, b"genesis", plot_seed, 2 * 1024, ) await client_2.add_plot(str(plot_dir / filename), plot_sk, pool_pk) assert len((await client_2.get_plots())["plots"]) == num_plots + 1 await client_2.delete_plot(str(plot_dir / filename)) assert len((await client_2.get_plots())["plots"]) == num_plots except AssertionError: # Checks that the RPC manages to stop the node client.close() client_2.close() await client.await_closed() await client_2.await_closed() await rpc_cleanup() await rpc_cleanup_2() raise client.close() client_2.close() await client.await_closed() await client_2.await_closed() await rpc_cleanup() await rpc_cleanup_2()
async def new_signage_point( self, new_challenge: harvester_protocol.NewSignagePoint, peer: WSChiaConnection): """ The harvester receives a new signage point from the farmer, this happens at the start of each slot. The harvester does a few things: 1. The harvester applies the plot filter for each of the plots, to select the proportion which are eligible for this signage point and challenge. 2. The harvester gets the qualities for each plot. This is approximately 7 reads per plot which qualifies. Note that each plot may have 0, 1, 2, etc qualities for that challenge: but on average it will have 1. 3. Checks the required_iters for each quality and the given signage point, to see which are eligible for inclusion (required_iters < sp_interval_iters). 4. Looks up the full proof of space in the plot for each quality, approximately 64 reads per quality 5. Returns the proof of space to the farmer """ if len(self.harvester.pool_public_keys) == 0 or len( self.harvester.farmer_public_keys) == 0: # This means that we have not received the handshake yet return start = time.time() assert len(new_challenge.challenge_hash) == 32 # Refresh plots to see if there are any new ones if start - self.harvester.last_load_time > 120: await self.harvester.refresh_plots() self.harvester.last_load_time = time.time() loop = asyncio.get_running_loop() def blocking_lookup( filename: Path, plot_info: PlotInfo) -> List[Tuple[bytes32, ProofOfSpace]]: # Uses the DiskProver object to lookup qualities. This is a blocking call, # so it should be run in a thread pool. try: sp_challenge_hash = ProofOfSpace.calculate_pos_challenge( plot_info.prover.get_id(), new_challenge.challenge_hash, new_challenge.sp_hash, ) try: quality_strings = plot_info.prover.get_qualities_for_challenge( sp_challenge_hash) except Exception as e: self.harvester.log.error( f"Error using prover object. Reinitializing prover object. {e}" ) try: self.harvester.provers[filename] = dataclasses.replace( plot_info, prover=DiskProver(str(filename))) quality_strings = plot_info.prover.get_qualities_for_challenge( sp_challenge_hash) except Exception as e: self.harvester.log.error( f"Error reinitializing plot {filename}. {e}") return [] responses: List[Tuple[bytes32, ProofOfSpace]] = [] if quality_strings is not None: # Found proofs of space (on average 1 is expected per plot) for index, quality_str in enumerate(quality_strings): required_iters: uint64 = calculate_iterations_quality( quality_str, plot_info.prover.get_size(), new_challenge.difficulty, new_challenge.sp_hash, ) sp_interval_iters = calculate_sp_interval_iters( self.harvester.constants, new_challenge.sub_slot_iters) if required_iters < sp_interval_iters: # Found a very good proof of space! will fetch the whole proof from disk, # then send to farmer try: proof_xs = plot_info.prover.get_full_proof( sp_challenge_hash, index) except RuntimeError: self.harvester.log.error( f"Exception fetching full proof for {filename}" ) continue plot_public_key = ProofOfSpace.generate_plot_public_key( plot_info.local_sk.get_g1(), plot_info.farmer_public_key) responses.append(( quality_str, ProofOfSpace( sp_challenge_hash, plot_info.pool_public_key, None, plot_public_key, uint8(plot_info.prover.get_size()), proof_xs, ), )) return responses except Exception as e: self.harvester.log.error(f"Unknown error: {e}") return [] async def lookup_challenge( filename: Path, plot_info: PlotInfo ) -> List[harvester_protocol.NewProofOfSpace]: # Executes a DiskProverLookup in a thread pool, and returns responses all_responses: List[harvester_protocol.NewProofOfSpace] = [] if self.harvester._is_shutdown: return [] proofs_of_space_and_q: List[Tuple[ bytes32, ProofOfSpace]] = await loop.run_in_executor( self.harvester.executor, blocking_lookup, filename, plot_info) for quality_str, proof_of_space in proofs_of_space_and_q: all_responses.append( harvester_protocol.NewProofOfSpace( new_challenge.challenge_hash, new_challenge.sp_hash, quality_str.hex() + str(filename.resolve()), proof_of_space, new_challenge.signage_point_index, )) return all_responses awaitables = [] for try_plot_filename, try_plot_info in self.harvester.provers.items(): if try_plot_filename.exists(): # Passes the plot filter (does not check sp filter yet though, since we have not reached sp) # This is being executed at the beginning of the slot if ProofOfSpace.passes_plot_filter( self.harvester.constants, try_plot_info.prover.get_id(), new_challenge.challenge_hash, new_challenge.sp_hash, ): awaitables.append( lookup_challenge(try_plot_filename, try_plot_info)) # Concurrently executes all lookups on disk, to take advantage of multiple disk parallelism total_proofs_found = 0 for sublist_awaitable in asyncio.as_completed(awaitables): for response in await sublist_awaitable: total_proofs_found += 1 msg = Message("new_proof_of_space", response) await peer.send_message(msg) self.harvester.log.info( f"{len(awaitables)} plots were eligible for farming {new_challenge.challenge_hash.hex()[:10]}..." f" Found {total_proofs_found} proofs. Time: {time.time() - start}. " f"Total {len(self.harvester.provers)} plots")
def blocking_lookup( filename: Path, plot_info: PlotInfo) -> List[Tuple[bytes32, ProofOfSpace]]: # Uses the DiskProver object to lookup qualities. This is a blocking call, # so it should be run in a thread pool. try: sp_challenge_hash = ProofOfSpace.calculate_pos_challenge( plot_info.prover.get_id(), new_challenge.challenge_hash, new_challenge.sp_hash, ) try: quality_strings = plot_info.prover.get_qualities_for_challenge( sp_challenge_hash) except Exception as e: self.harvester.log.error( f"Error using prover object. Reinitializing prover object. {e}" ) try: self.harvester.provers[filename] = dataclasses.replace( plot_info, prover=DiskProver(str(filename))) quality_strings = plot_info.prover.get_qualities_for_challenge( sp_challenge_hash) except Exception as e: self.harvester.log.error( f"Error reinitializing plot {filename}. {e}") return [] responses: List[Tuple[bytes32, ProofOfSpace]] = [] if quality_strings is not None: # Found proofs of space (on average 1 is expected per plot) for index, quality_str in enumerate(quality_strings): required_iters: uint64 = calculate_iterations_quality( quality_str, plot_info.prover.get_size(), new_challenge.difficulty, new_challenge.sp_hash, ) sp_interval_iters = calculate_sp_interval_iters( self.harvester.constants, new_challenge.sub_slot_iters) if required_iters < sp_interval_iters: # Found a very good proof of space! will fetch the whole proof from disk, # then send to farmer try: proof_xs = plot_info.prover.get_full_proof( sp_challenge_hash, index) except RuntimeError: self.harvester.log.error( f"Exception fetching full proof for {filename}" ) continue plot_public_key = ProofOfSpace.generate_plot_public_key( plot_info.local_sk.get_g1(), plot_info.farmer_public_key) responses.append(( quality_str, ProofOfSpace( sp_challenge_hash, plot_info.pool_public_key, None, plot_public_key, uint8(plot_info.prover.get_size()), proof_xs, ), )) return responses except Exception as e: self.harvester.log.error(f"Unknown error: {e}") return []
async def new_challenge(self, new_challenge: harvester_protocol.NewChallenge): """ The harvester receives a new challenge from the farmer, and looks up the quality string for any proofs of space that are are found in the plots. If proofs are found, a ChallengeResponse message is sent for each of the proofs found. """ if len(self.pool_public_keys) == 0 or len( self.farmer_public_keys) == 0: self.cached_challenges = self.cached_challenges[:5] self.cached_challenges.insert(0, new_challenge) return start = time.time() assert len(new_challenge.challenge_hash) == 32 # Refresh plots to see if there are any new ones await self._refresh_plots() loop = asyncio.get_running_loop() def blocking_lookup(filename: Path, prover: DiskProver) -> Optional[List]: # Uses the DiskProver object to lookup qualities. This is a blocking call, # so it should be run in a threadpool. try: quality_strings = prover.get_qualities_for_challenge( new_challenge.challenge_hash) except RuntimeError: log.error( "Error using prover object. Reinitializing prover object.") try: self.prover = DiskProver(str(filename)) quality_strings = self.prover.get_qualities_for_challenge( new_challenge.challenge_hash) except RuntimeError: log.error( f"Retry-Error using prover object on {filename}. Giving up." ) quality_strings = None return quality_strings async def lookup_challenge( filename: Path, prover: DiskProver ) -> List[harvester_protocol.ChallengeResponse]: # Exectures a DiskProverLookup in a threadpool, and returns responses all_responses: List[harvester_protocol.ChallengeResponse] = [] quality_strings = await loop.run_in_executor( self.executor, blocking_lookup, filename, prover) if quality_strings is not None: for index, quality_str in enumerate(quality_strings): response: harvester_protocol.ChallengeResponse = harvester_protocol.ChallengeResponse( new_challenge.challenge_hash, str(filename), uint8(index), quality_str, prover.get_size(), ) all_responses.append(response) return all_responses awaitables = [] for filename, plot_info in self.provers.items(): if ProofOfSpace.can_create_proof( plot_info.prover.get_id(), new_challenge.challenge_hash, self.constants["NUMBER_ZERO_BITS_CHALLENGE_SIG"], ): awaitables.append(lookup_challenge(filename, plot_info.prover)) # Concurrently executes all lookups on disk, to take advantage of multiple disk parallelism total_proofs_found = 0 for sublist_awaitable in asyncio.as_completed(awaitables): for response in await sublist_awaitable: total_proofs_found += 1 yield OutboundMessage( NodeType.FARMER, Message("challenge_response", response), Delivery.RESPOND, ) log.info( f"{len(awaitables)} plots were eligible for farming {new_challenge.challenge_hash.hex()[:10]}..." f" Found {total_proofs_found} proofs. Time: {time.time() - start}. " f"Total {len(self.provers)} plots")
def main(): """ Script for creating plots and adding them to the plot config file. """ parser = argparse.ArgumentParser(description="Chia plotting script.") parser.add_argument("-k", "--size", help="Plot size", type=int, default=20) parser.add_argument("-n", "--num_plots", help="Number of plots", type=int, default=10) parser.add_argument("-p", "--pool_pub_key", help="Hex public key of pool", type=str, default="") parser.add_argument( "-t", "--tmp_dir", help="Temporary directory for plotting files (relative or absolute)", type=str, default="./plots", ) parser.add_argument( "-d", "--final_dir", help="Final directory for plots (relative or absolute)", type=str, default="./plots", ) # We need the keys file, to access pool keys (if the exist), and the sk_seed. args = parser.parse_args() if not os.path.isfile(key_config_filename): raise RuntimeError( "Keys not generated. Run python3 ./scripts/regenerate_keys.py.") # The seed is what will be used to generate a private key for each plot key_config = safe_load(open(key_config_filename, "r")) sk_seed: bytes = bytes.fromhex(key_config["sk_seed"]) pool_pk: PublicKey if len(args.pool_pub_key) > 0: # Use the provided pool public key, useful for using an external pool pool_pk = PublicKey.from_bytes(bytes.fromhex(args.pool_pub_key)) else: # Use the pool public key from the config, useful for solo farming pool_sk = PrivateKey.from_bytes( bytes.fromhex(key_config["pool_sks"][0])) pool_pk = pool_sk.get_public_key() print( f"Creating {args.num_plots} plots of size {args.size}, sk_seed {sk_seed.hex()} ppk {pool_pk}" ) for i in range(args.num_plots): # Generate a sk based on the seed, plot size (k), and index sk: PrivateKey = PrivateKey.from_seed(sk_seed + args.size.to_bytes(1, "big") + i.to_bytes(4, "big")) # The plot seed is based on the pool and plot pks plot_seed: bytes32 = ProofOfSpace.calculate_plot_seed( pool_pk, sk.get_public_key()) filename: str = f"plot-{i}-{args.size}-{plot_seed}.dat" full_path: str = os.path.join(args.final_dir, filename) if os.path.isfile(full_path): print(f"Plot {filename} already exists") else: # Creates the plot. This will take a long time for larger plots. plotter: DiskPlotter = DiskPlotter() plotter.create_plot_disk(args.tmp_dir, args.final_dir, filename, args.size, bytes([]), plot_seed) # Updates the config if necessary. if os.path.isfile(plot_config_filename): plot_config = safe_load(open(plot_config_filename, "r")) else: plot_config = {"plots": {}} plot_config_plots_new = deepcopy(plot_config["plots"]) if full_path not in plot_config_plots_new: plot_config_plots_new[full_path] = { "sk": bytes(sk).hex(), "pool_pk": bytes(pool_pk).hex(), } plot_config["plots"].update(plot_config_plots_new) # Dumps the new config to disk. with open(plot_config_filename, "w") as f: safe_dump(plot_config, f)
def create_plots(args, root_path, use_datetime=True, test_private_keys: Optional[List] = None): config_filename = config_path_for_filename(root_path, "config.yaml") config = load_config(root_path, config_filename) if args.tmp2_dir is None: args.tmp2_dir = args.tmp_dir farmer_public_key: G1Element if args.farmer_public_key is not None: farmer_public_key = G1Element.from_bytes( bytes.fromhex(args.farmer_public_key)) else: farmer_public_key = get_farmer_public_key(args.alt_fingerprint) pool_public_key: G1Element if args.pool_public_key is not None: pool_public_key = bytes.fromhex(args.pool_public_key) else: pool_public_key = get_pool_public_key(args.alt_fingerprint) if args.num is not None: num = args.num else: num = 1 if args.size < config["min_mainnet_k_size"] and test_private_keys is None: log.warning( f"Creating plots with size k={args.size}, which is less than the minimum required for mainnet" ) if args.size < 22: log.warning("k under 22 is not supported. Increasing k to 22") args.size = 22 log.info( f"Creating {num} plots of size {args.size}, pool public key: " f"{bytes(pool_public_key).hex()} farmer public key: {bytes(farmer_public_key).hex()}" ) tmp_dir_created = False if not args.tmp_dir.exists(): mkdir(args.tmp_dir) tmp_dir_created = True tmp2_dir_created = False if not args.tmp2_dir.exists(): mkdir(args.tmp2_dir) tmp2_dir_created = True mkdir(args.final_dir) finished_filenames = [] for i in range(num): # Generate a random master secret key if test_private_keys is not None: assert len(test_private_keys) == num sk: PrivateKey = test_private_keys[i] else: sk = AugSchemeMPL.key_gen(token_bytes(32)) # The plot public key is the combination of the harvester and farmer keys plot_public_key = ProofOfSpace.generate_plot_public_key( master_sk_to_local_sk(sk).get_g1(), farmer_public_key) # The plot id is based on the harvester, farmer, and pool keys plot_id: bytes32 = ProofOfSpace.calculate_plot_id_pk( pool_public_key, plot_public_key) if args.plotid is not None: log.info(f"Debug plot ID: {args.plotid}") plot_id = bytes32(bytes.fromhex(args.plotid)) plot_memo: bytes32 = stream_plot_info(pool_public_key, farmer_public_key, sk) if args.memo is not None: log.info(f"Debug memo: {args.memo}") plot_memo = bytes.fromhex(args.memo) dt_string = datetime.now().strftime("%Y-%m-%d-%H-%M") if use_datetime: filename: str = f"plot-k{args.size}-{dt_string}-{plot_id}.plot" else: filename = f"plot-k{args.size}-{plot_id}.plot" full_path: Path = args.final_dir / filename resolved_final_dir: str = str(Path(args.final_dir).resolve()) plot_directories_list: str = config["harvester"]["plot_directories"] if args.exclude_final_dir: log.info( f"NOT adding directory {resolved_final_dir} to harvester for farming" ) if resolved_final_dir in plot_directories_list: log.warning( f"Directory {resolved_final_dir} already exists for harvester, please remove it manually" ) else: if resolved_final_dir not in plot_directories_list: # Adds the directory to the plot directories if it is not present log.info( f"Adding directory {resolved_final_dir} to harvester for farming" ) config = add_plot_directory(resolved_final_dir, root_path) if not full_path.exists(): log.info(f"Starting plot {i + 1}/{num}") # Creates the plot. This will take a long time for larger plots. plotter: DiskPlotter = DiskPlotter() plotter.create_plot_disk( str(args.tmp_dir), str(args.tmp2_dir), str(args.final_dir), filename, args.size, plot_memo, plot_id, args.buffer, args.buckets, args.stripe_size, args.num_threads, args.nobitfield, ) finished_filenames.append(filename) else: log.info(f"Plot {filename} already exists") log.info("Summary:") if tmp_dir_created: try: args.tmp_dir.rmdir() except Exception: log.info( f"warning: did not remove primary temporary folder {args.tmp_dir}, it may not be empty." ) if tmp2_dir_created: try: args.tmp2_dir.rmdir() except Exception: log.info( f"warning: did not remove secondary temporary folder {args.tmp2_dir}, it may not be empty." ) log.info(f"Created a total of {len(finished_filenames)} new plots") for filename in finished_filenames: log.info(filename)