def process_file(file_path: Path) -> Optional[PlotInfo]: if not self._refreshing_enabled: return None filename_str = str(file_path) if self.match_str is not None and self.match_str not in filename_str: return None if ( file_path in self.failed_to_open_filenames and (time.time() - self.failed_to_open_filenames[file_path]) < self.refresh_parameter.retry_invalid_seconds ): # Try once every `refresh_parameter.retry_invalid_seconds` seconds to open the file return None if file_path in self.plots: return self.plots[file_path] entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if entry is not None: loaded_parent, duplicates = entry if str(file_path.parent) in duplicates: log.debug(f"Skip duplicated plot {str(file_path)}") return None try: if not file_path.exists(): return None prover = DiskProver(str(file_path)) log.debug(f"process_file {str(file_path)}") expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = file_path.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) return None cache_entry = self.cache.get(prover.get_id()) if cache_entry is None: ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if farmer_public_key not in self.farmer_public_keys: log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return None pool_public_key: Optional[G1Element] = None pool_contract_puzzle_hash: Optional[bytes32] = None if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if pool_public_key is not None and pool_public_key not in self.pool_public_keys: log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return None # If a plot is in `no_key_filenames` the keys were missing in earlier refresh cycles. We can remove # the current plot from that list if its in there since we passed the key checks above. if file_path in self.no_key_filenames: self.no_key_filenames.remove(file_path) local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None ) cache_entry = CacheEntry(pool_public_key, pool_contract_puzzle_hash, plot_public_key) self.cache.update(prover.get_id(), cache_entry) with self.plot_filename_paths_lock: paths: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if paths is None: paths = (str(Path(prover.get_filename()).parent), set()) self.plot_filename_paths[file_path.name] = paths else: paths[1].add(str(Path(prover.get_filename()).parent)) log.warning(f"Have multiple copies of the plot {file_path.name} in {[paths[0], *paths[1]]}.") return None new_plot_info: PlotInfo = PlotInfo( prover, cache_entry.pool_public_key, cache_entry.pool_contract_puzzle_hash, cache_entry.plot_public_key, stat_info.st_size, stat_info.st_mtime, ) with counter_lock: result.loaded.append(new_plot_info) if file_path in self.failed_to_open_filenames: del self.failed_to_open_filenames[file_path] except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {file_path}. {e} {tb}") self.failed_to_open_filenames[file_path] = int(time.time()) return None log.info(f"Found plot {file_path} of size {new_plot_info.prover.get_size()}") if self.show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") return new_plot_info
async def test1(self, simulation): test_rpc_port = uint16(21522) test_rpc_port_2 = uint16(21523) harvester, farmer_api = simulation def stop_node_cb(): pass def stop_node_cb_2(): pass config = bt.config hostname = config["self_hostname"] daemon_port = config["daemon_port"] farmer_rpc_api = FarmerRpcApi(farmer_api.farmer) harvester_rpc_api = HarvesterRpcApi(harvester) rpc_cleanup = await start_rpc_server( farmer_rpc_api, hostname, daemon_port, test_rpc_port, stop_node_cb, bt.root_path, config, connect_to_daemon=False, ) rpc_cleanup_2 = await start_rpc_server( harvester_rpc_api, hostname, daemon_port, test_rpc_port_2, stop_node_cb_2, bt.root_path, config, connect_to_daemon=False, ) try: client = await FarmerRpcClient.create(self_hostname, test_rpc_port, bt.root_path, config) client_2 = await HarvesterRpcClient.create(self_hostname, test_rpc_port_2, bt.root_path, config) async def have_connections(): return len(await client.get_connections()) > 0 await time_out_assert(15, have_connections, True) assert (await client.get_signage_point(std_hash(b"2"))) is None assert len(await client.get_signage_points()) == 0 async def have_signage_points(): return len(await client.get_signage_points()) > 0 sp = farmer_protocol.NewSignagePoint( std_hash(b"1"), std_hash(b"2"), std_hash(b"3"), uint64(1), uint64(1000000), uint8(2) ) await farmer_api.new_signage_point(sp) await time_out_assert(5, have_signage_points, True) assert (await client.get_signage_point(std_hash(b"2"))) is not None async def have_plots(): return len((await client_2.get_plots())["plots"]) > 0 await time_out_assert(5, have_plots, True) res = await client_2.get_plots() num_plots = len(res["plots"]) assert num_plots > 0 plot_dir = get_plot_dir() / "subdir" plot_dir.mkdir(parents=True, exist_ok=True) plot_dir_sub = get_plot_dir() / "subdir" / "subsubdir" plot_dir_sub.mkdir(parents=True, exist_ok=True) plotter = DiskPlotter() filename = "test_farmer_harvester_rpc_plot.plot" filename_2 = "test_farmer_harvester_rpc_plot2.plot" plotter.create_plot_disk( str(plot_dir), str(plot_dir), str(plot_dir), filename, 18, stream_plot_info_pk(bt.pool_pk, bt.farmer_pk, AugSchemeMPL.key_gen(bytes([4] * 32))), token_bytes(32), 128, 0, 2000, 0, False, ) # Making a plot with a puzzle hash encoded into it instead of pk plot_id_2 = token_bytes(32) plotter.create_plot_disk( str(plot_dir), str(plot_dir), str(plot_dir), filename_2, 18, stream_plot_info_ph(std_hash(b"random ph"), bt.farmer_pk, AugSchemeMPL.key_gen(bytes([5] * 32))), plot_id_2, 128, 0, 2000, 0, False, ) # Making the same plot, in a different dir. This should not be farmed plotter.create_plot_disk( str(plot_dir_sub), str(plot_dir_sub), str(plot_dir_sub), filename_2, 18, stream_plot_info_ph(std_hash(b"random ph"), bt.farmer_pk, AugSchemeMPL.key_gen(bytes([5] * 32))), plot_id_2, 128, 0, 2000, 0, False, ) res_2 = await client_2.get_plots() assert len(res_2["plots"]) == num_plots # Test farmer get_harvesters async def test_get_harvesters(): farmer_res = await client.get_harvesters() if len(list(farmer_res["harvesters"])) != 1: return False if len(list(farmer_res["harvesters"][0]["plots"])) != num_plots: return False return True await time_out_assert(30, test_get_harvesters) expected_result: PlotRefreshResult = PlotRefreshResult() def test_refresh_callback(refresh_result: PlotRefreshResult): assert refresh_result.loaded_plots == expected_result.loaded_plots assert refresh_result.removed_plots == expected_result.removed_plots assert refresh_result.processed_files == expected_result.processed_files assert refresh_result.remaining_files == expected_result.remaining_files harvester.plot_manager.set_refresh_callback(test_refresh_callback) async def test_case( trigger, expect_loaded, expect_removed, expect_processed, expected_directories, expect_total_plots ): expected_result.loaded_plots = expect_loaded expected_result.removed_plots = expect_removed expected_result.processed_files = expect_processed await trigger harvester.plot_manager.trigger_refresh() assert len(await client_2.get_plot_directories()) == expected_directories await time_out_assert(5, harvester.plot_manager.needs_refresh, value=False) result = await client_2.get_plots() assert len(result["plots"]) == expect_total_plots assert len(harvester.plot_manager.cache) == expect_total_plots assert len(harvester.plot_manager.failed_to_open_filenames) == 0 # Add plot_dir with two new plots await test_case( client_2.add_plot_directory(str(plot_dir)), expect_loaded=2, expect_removed=0, expect_processed=2, expected_directories=2, expect_total_plots=num_plots + 2, ) # Add plot_dir_sub with one duplicate await test_case( client_2.add_plot_directory(str(plot_dir_sub)), expect_loaded=0, expect_removed=0, expect_processed=1, expected_directories=3, expect_total_plots=num_plots + 2, ) # Delete one plot await test_case( client_2.delete_plot(str(plot_dir / filename)), expect_loaded=0, expect_removed=1, expect_processed=0, expected_directories=3, expect_total_plots=num_plots + 1, ) # Remove directory with the duplicate await test_case( client_2.remove_plot_directory(str(plot_dir_sub)), expect_loaded=0, expect_removed=1, expect_processed=0, expected_directories=2, expect_total_plots=num_plots + 1, ) # Re-add the directory with the duplicate for other tests await test_case( client_2.add_plot_directory(str(plot_dir_sub)), expect_loaded=0, expect_removed=0, expect_processed=1, expected_directories=3, expect_total_plots=num_plots + 1, ) # Remove the directory which has the duplicated plot loaded. This removes the duplicated plot from plot_dir # and in the same run loads the plot from plot_dir_sub which is not longer seen as duplicate. await test_case( client_2.remove_plot_directory(str(plot_dir)), expect_loaded=1, expect_removed=1, expect_processed=1, expected_directories=2, expect_total_plots=num_plots + 1, ) # Re-add the directory now the plot seen as duplicate is from plot_dir, not from plot_dir_sub like before await test_case( client_2.add_plot_directory(str(plot_dir)), expect_loaded=0, expect_removed=0, expect_processed=1, expected_directories=3, expect_total_plots=num_plots + 1, ) # Remove the duplicated plot await test_case( client_2.delete_plot(str(plot_dir / filename_2)), expect_loaded=0, expect_removed=1, expect_processed=0, expected_directories=3, expect_total_plots=num_plots + 1, ) # Remove the directory with the loaded plot which is not longer a duplicate await test_case( client_2.remove_plot_directory(str(plot_dir_sub)), expect_loaded=0, expect_removed=1, expect_processed=0, expected_directories=2, expect_total_plots=num_plots, ) # Remove the directory which contains all other plots await test_case( client_2.remove_plot_directory(str(get_plot_dir())), expect_loaded=0, expect_removed=20, expect_processed=0, expected_directories=1, expect_total_plots=0, ) # Recover the plots to test caching # First make sure cache gets written if required and new plots are loaded await test_case( client_2.add_plot_directory(str(get_plot_dir())), expect_loaded=20, expect_removed=0, expect_processed=20, expected_directories=2, expect_total_plots=20, ) assert harvester.plot_manager.cache.path().exists() unlink(harvester.plot_manager.cache.path()) # Should not write the cache again on shutdown because it didn't change assert not harvester.plot_manager.cache.path().exists() harvester.plot_manager.stop_refreshing() assert not harvester.plot_manager.cache.path().exists() # Manually trigger `save_cache` and make sure it creates a new cache file harvester.plot_manager.cache.save() assert harvester.plot_manager.cache.path().exists() expected_result.loaded_plots = 20 expected_result.removed_plots = 0 expected_result.processed_files = 20 expected_result.remaining_files = 0 plot_manager: PlotManager = PlotManager(harvester.root_path, test_refresh_callback) plot_manager.start_refreshing() assert len(harvester.plot_manager.cache) == len(plot_manager.cache) await time_out_assert(5, plot_manager.needs_refresh, value=False) for path, plot_info in harvester.plot_manager.plots.items(): assert path in plot_manager.plots assert plot_manager.plots[path].prover.get_filename() == plot_info.prover.get_filename() assert plot_manager.plots[path].prover.get_id() == plot_info.prover.get_id() assert plot_manager.plots[path].prover.get_memo() == plot_info.prover.get_memo() assert plot_manager.plots[path].prover.get_size() == plot_info.prover.get_size() assert plot_manager.plots[path].pool_public_key == plot_info.pool_public_key assert plot_manager.plots[path].pool_contract_puzzle_hash == plot_info.pool_contract_puzzle_hash assert plot_manager.plots[path].plot_public_key == plot_info.plot_public_key assert plot_manager.plots[path].file_size == plot_info.file_size assert plot_manager.plots[path].time_modified == plot_info.time_modified assert harvester.plot_manager.plot_filename_paths == plot_manager.plot_filename_paths assert harvester.plot_manager.failed_to_open_filenames == plot_manager.failed_to_open_filenames assert harvester.plot_manager.no_key_filenames == plot_manager.no_key_filenames plot_manager.stop_refreshing() # Modify the content of the plot_manager.dat with open(harvester.plot_manager.cache.path(), "r+b") as file: file.write(b"\xff\xff") # Sets Cache.version to 65535 # Make sure it just loads the plots normally if it fails to load the cache plot_manager = PlotManager(harvester.root_path, test_refresh_callback) plot_manager.cache.load() assert len(plot_manager.cache) == 0 plot_manager.set_public_keys( harvester.plot_manager.farmer_public_keys, harvester.plot_manager.pool_public_keys ) expected_result.loaded_plots = 20 expected_result.removed_plots = 0 expected_result.processed_files = 20 expected_result.remaining_files = 0 plot_manager.start_refreshing() await time_out_assert(5, plot_manager.needs_refresh, value=False) assert len(plot_manager.plots) == len(harvester.plot_manager.plots) plot_manager.stop_refreshing() # Test re-trying if processing a plot failed # First save the plot retry_test_plot = Path(plot_dir_sub / filename_2).resolve() retry_test_plot_save = Path(plot_dir_sub / "save").resolve() copy(retry_test_plot, retry_test_plot_save) # Invalidate the plot with open(plot_dir_sub / filename_2, "r+b") as file: file.write(bytes(100)) # Add it and validate it fails to load await harvester.add_plot_directory(str(plot_dir_sub)) expected_result.loaded_plots = 0 expected_result.removed_plots = 0 expected_result.processed_files = 1 expected_result.remaining_files = 0 harvester.plot_manager.start_refreshing() await time_out_assert(5, harvester.plot_manager.needs_refresh, value=False) assert retry_test_plot in harvester.plot_manager.failed_to_open_filenames # Make sure the file stays in `failed_to_open_filenames` and doesn't get loaded or processed in the next # update round expected_result.loaded_plots = 0 expected_result.processed_files = 0 harvester.plot_manager.trigger_refresh() await time_out_assert(5, harvester.plot_manager.needs_refresh, value=False) assert retry_test_plot in harvester.plot_manager.failed_to_open_filenames # Now decrease the re-try timeout, restore the valid plot file and make sure it properly loads now harvester.plot_manager.refresh_parameter.retry_invalid_seconds = 0 move(retry_test_plot_save, retry_test_plot) expected_result.loaded_plots = 1 expected_result.processed_files = 1 harvester.plot_manager.trigger_refresh() await time_out_assert(5, harvester.plot_manager.needs_refresh, value=False) assert retry_test_plot not in harvester.plot_manager.failed_to_open_filenames targets_1 = await client.get_reward_targets(False) assert "have_pool_sk" not in targets_1 assert "have_farmer_sk" not in targets_1 targets_2 = await client.get_reward_targets(True) assert targets_2["have_pool_sk"] and targets_2["have_farmer_sk"] new_ph: bytes32 = create_puzzlehash_for_pk(master_sk_to_wallet_sk(bt.farmer_master_sk, uint32(10)).get_g1()) new_ph_2: bytes32 = create_puzzlehash_for_pk( master_sk_to_wallet_sk(bt.pool_master_sk, uint32(472)).get_g1() ) await client.set_reward_targets(encode_puzzle_hash(new_ph, "xch"), encode_puzzle_hash(new_ph_2, "xch")) targets_3 = await client.get_reward_targets(True) assert decode_puzzle_hash(targets_3["farmer_target"]) == new_ph assert decode_puzzle_hash(targets_3["pool_target"]) == new_ph_2 assert targets_3["have_pool_sk"] and targets_3["have_farmer_sk"] new_ph_3: bytes32 = create_puzzlehash_for_pk( master_sk_to_wallet_sk(bt.pool_master_sk, uint32(1888)).get_g1() ) await client.set_reward_targets(None, encode_puzzle_hash(new_ph_3, "xch")) targets_4 = await client.get_reward_targets(True) assert decode_puzzle_hash(targets_4["farmer_target"]) == new_ph assert decode_puzzle_hash(targets_4["pool_target"]) == new_ph_3 assert not targets_4["have_pool_sk"] and targets_3["have_farmer_sk"] root_path = farmer_api.farmer._root_path config = load_config(root_path, "config.yaml") assert config["farmer"]["xch_target_address"] == encode_puzzle_hash(new_ph, "xch") assert config["pool"]["xch_target_address"] == encode_puzzle_hash(new_ph_3, "xch") new_ph_3_encoded = encode_puzzle_hash(new_ph_3, "xch") added_char = new_ph_3_encoded + "a" with pytest.raises(ValueError): await client.set_reward_targets(None, added_char) replaced_char = new_ph_3_encoded[0:-1] + "a" with pytest.raises(ValueError): await client.set_reward_targets(None, replaced_char) assert len((await client.get_pool_state())["pool_state"]) == 0 all_sks = farmer_api.farmer.local_keychain.get_all_private_keys() auth_sk = master_sk_to_pooling_authentication_sk(all_sks[0][0], 2, 1) pool_list = [ { "launcher_id": "ae4ef3b9bfe68949691281a015a9c16630fc8f66d48c19ca548fb80768791afa", "authentication_public_key": bytes(auth_sk.get_g1()).hex(), "owner_public_key": "84c3fcf9d5581c1ddc702cb0f3b4a06043303b334dd993ab42b2c320ebfa98e5ce558448615b3f69638ba92cf7f43da5", "payout_instructions": "c2b08e41d766da4116e388357ed957d04ad754623a915f3fd65188a8746cf3e8", "pool_url": "localhost", "p2_singleton_puzzle_hash": "16e4bac26558d315cded63d4c5860e98deb447cc59146dd4de06ce7394b14f17", "target_puzzle_hash": "344587cf06a39db471d2cc027504e8688a0a67cce961253500c956c73603fd58", } ] config["pool"]["pool_list"] = pool_list save_config(root_path, "config.yaml", config) await farmer_api.farmer.update_pool_state() pool_state = (await client.get_pool_state())["pool_state"] assert len(pool_state) == 1 assert ( pool_state[0]["pool_config"]["payout_instructions"] == "c2b08e41d766da4116e388357ed957d04ad754623a915f3fd65188a8746cf3e8" ) await client.set_payout_instructions(hexstr_to_bytes(pool_state[0]["pool_config"]["launcher_id"]), "1234vy") await farmer_api.farmer.update_pool_state() pool_state = (await client.get_pool_state())["pool_state"] assert pool_state[0]["pool_config"]["payout_instructions"] == "1234vy" finally: # Checks that the RPC manages to stop the node client.close() client_2.close() await client.await_closed() await client_2.await_closed() await rpc_cleanup() await rpc_cleanup_2()
async def create_plots(args, keys: PlotKeys, root_path, use_datetime=True, test_private_keys: Optional[List] = None): config_filename = config_path_for_filename(root_path, "config.yaml") config = load_config(root_path, config_filename) if args.tmp2_dir is None: args.tmp2_dir = args.tmp_dir assert (keys.pool_public_key is None) != (keys.pool_contract_puzzle_hash is None) num = args.num if args.size < config["min_mainnet_k_size"] and test_private_keys is None: log.warning(f"Creating plots with size k={args.size}, which is less than the minimum required for mainnet") if args.size < 22: log.warning("k under 22 is not supported. Increasing k to 22") args.size = 22 if keys.pool_public_key is not None: log.info( f"Creating {num} plots of size {args.size}, pool public key: " f"{bytes(keys.pool_public_key).hex()} farmer public key: {bytes(keys.farmer_public_key).hex()}" ) else: assert keys.pool_contract_puzzle_hash is not None log.info( f"Creating {num} plots of size {args.size}, pool contract address: " f"{keys.pool_contract_address} farmer public key: {bytes(keys.farmer_public_key).hex()}" ) tmp_dir_created = False if not args.tmp_dir.exists(): mkdir(args.tmp_dir) tmp_dir_created = True tmp2_dir_created = False if not args.tmp2_dir.exists(): mkdir(args.tmp2_dir) tmp2_dir_created = True mkdir(args.final_dir) finished_filenames = [] for i in range(num): # Generate a random master secret key if test_private_keys is not None: assert len(test_private_keys) == num sk: PrivateKey = test_private_keys[i] else: sk = AugSchemeMPL.key_gen(token_bytes(32)) # The plot public key is the combination of the harvester and farmer keys # New plots will also include a taproot of the keys, for extensibility include_taproot: bool = keys.pool_contract_puzzle_hash is not None plot_public_key = ProofOfSpace.generate_plot_public_key( master_sk_to_local_sk(sk).get_g1(), keys.farmer_public_key, include_taproot ) # The plot id is based on the harvester, farmer, and pool keys if keys.pool_public_key is not None: plot_id: bytes32 = ProofOfSpace.calculate_plot_id_pk(keys.pool_public_key, plot_public_key) plot_memo: bytes32 = stream_plot_info_pk(keys.pool_public_key, keys.farmer_public_key, sk) else: assert keys.pool_contract_puzzle_hash is not None plot_id = ProofOfSpace.calculate_plot_id_ph(keys.pool_contract_puzzle_hash, plot_public_key) plot_memo = stream_plot_info_ph(keys.pool_contract_puzzle_hash, keys.farmer_public_key, sk) if args.plotid is not None: log.info(f"Debug plot ID: {args.plotid}") plot_id = bytes32(bytes.fromhex(args.plotid)) if args.memo is not None: log.info(f"Debug memo: {args.memo}") plot_memo = bytes.fromhex(args.memo) # Uncomment next two lines if memo is needed for dev debug plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") dt_string = datetime.now().strftime("%Y-%m-%d-%H-%M") if use_datetime: filename: str = f"plot-k{args.size}-{dt_string}-{plot_id}.plot" else: filename = f"plot-k{args.size}-{plot_id}.plot" full_path: Path = args.final_dir / filename resolved_final_dir: str = str(Path(args.final_dir).resolve()) plot_directories_list: str = config["harvester"]["plot_directories"] if args.exclude_final_dir: log.info(f"NOT adding directory {resolved_final_dir} to harvester for farming") if resolved_final_dir in plot_directories_list: log.warning(f"Directory {resolved_final_dir} already exists for harvester, please remove it manually") else: if resolved_final_dir not in plot_directories_list: # Adds the directory to the plot directories if it is not present log.info(f"Adding directory {resolved_final_dir} to harvester for farming") config = add_plot_directory(root_path, resolved_final_dir) if not full_path.exists(): log.info(f"Starting plot {i + 1}/{num}") # Creates the plot. This will take a long time for larger plots. plotter: DiskPlotter = DiskPlotter() plotter.create_plot_disk( str(args.tmp_dir), str(args.tmp2_dir), str(args.final_dir), filename, args.size, plot_memo, plot_id, args.buffer, args.buckets, args.stripe_size, args.num_threads, args.nobitfield, ) finished_filenames.append(filename) else: log.info(f"Plot {filename} already exists") log.info("Summary:") if tmp_dir_created: try: args.tmp_dir.rmdir() except Exception: log.info(f"warning: did not remove primary temporary folder {args.tmp_dir}, it may not be empty.") if tmp2_dir_created: try: args.tmp2_dir.rmdir() except Exception: log.info(f"warning: did not remove secondary temporary folder {args.tmp2_dir}, it may not be empty.") log.info(f"Created a total of {len(finished_filenames)} new plots") for filename in finished_filenames: log.info(filename)
def process_file(file_path: Path) -> Dict: new_provers: Dict[Path, PlotInfo] = {} filename_str = str(file_path) if self.match_str is not None and self.match_str not in filename_str: return new_provers if file_path.exists(): if ( file_path in self.failed_to_open_filenames and (time.time() - self.failed_to_open_filenames[file_path]) > 1200 ): # Try once every 20 minutes to open the file return new_provers if file_path in self.plots: try: stat_info = file_path.stat() except Exception as e: log.error(f"Failed to open file {file_path}. {e}") return new_provers if stat_info.st_mtime == self.plots[file_path].time_modified: new_provers[file_path] = self.plots[file_path] return new_provers entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if entry is not None: loaded_parent, duplicates = entry if str(file_path.parent) in duplicates: log.debug(f"Skip duplicated plot {str(file_path)}") return new_provers try: with counter_lock: if result.processed_files >= self.refresh_parameter.batch_size: result.remaining_files += 1 return new_provers result.processed_files += 1 prover = DiskProver(str(file_path)) log.debug(f"process_file {str(file_path)}") expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = file_path.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) return new_provers ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if self.farmer_public_keys is not None and farmer_public_key not in self.farmer_public_keys: log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return new_provers if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash pool_contract_puzzle_hash = None else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_public_key = None pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if ( self.pool_public_keys is not None and pool_public_key is not None and pool_public_key not in self.pool_public_keys ): log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return new_provers stat_info = file_path.stat() local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None ) with self.plot_filename_paths_lock: if file_path.name not in self.plot_filename_paths: self.plot_filename_paths[file_path.name] = (str(Path(prover.get_filename()).parent), set()) else: self.plot_filename_paths[file_path.name][1].add(str(Path(prover.get_filename()).parent)) if len(self.plot_filename_paths[file_path.name][1]) > 0: log.warning( f"Have multiple copies of the plot {file_path} in " f"{self.plot_filename_paths[file_path.name][1]}." ) return new_provers new_provers[file_path] = PlotInfo( prover, pool_public_key, pool_contract_puzzle_hash, plot_public_key, stat_info.st_size, stat_info.st_mtime, ) with counter_lock: result.loaded_plots += 1 result.loaded_size += stat_info.st_size except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {file_path}. {e} {tb}") self.failed_to_open_filenames[file_path] = int(time.time()) return new_provers log.info(f"Found plot {file_path} of size {new_provers[file_path].prover.get_size()}") if self.show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") return new_provers return new_provers
async def test1(self, simulation): test_rpc_port = uint16(21522) test_rpc_port_2 = uint16(21523) harvester_service, farmer_service = simulation harvester = harvester_service._node farmer_api = farmer_service._api def stop_node_cb(): pass def stop_node_cb_2(): pass config = bt.config hostname = config["self_hostname"] daemon_port = config["daemon_port"] farmer_rpc_api = FarmerRpcApi(farmer_api.farmer) harvester_rpc_api = HarvesterRpcApi(harvester) rpc_cleanup = await start_rpc_server( farmer_rpc_api, hostname, daemon_port, test_rpc_port, stop_node_cb, bt.root_path, config, connect_to_daemon=False, ) rpc_cleanup_2 = await start_rpc_server( harvester_rpc_api, hostname, daemon_port, test_rpc_port_2, stop_node_cb_2, bt.root_path, config, connect_to_daemon=False, ) try: client = await FarmerRpcClient.create(self_hostname, test_rpc_port, bt.root_path, config) client_2 = await HarvesterRpcClient.create(self_hostname, test_rpc_port_2, bt.root_path, config) await validate_get_routes(client, farmer_rpc_api) await validate_get_routes(client_2, harvester_rpc_api) async def have_connections(): return len(await client.get_connections()) > 0 await time_out_assert(15, have_connections, True) assert (await client.get_signage_point(std_hash(b"2"))) is None assert len(await client.get_signage_points()) == 0 async def have_signage_points(): return len(await client.get_signage_points()) > 0 sp = farmer_protocol.NewSignagePoint(std_hash(b"1"), std_hash(b"2"), std_hash(b"3"), uint64(1), uint64(1000000), uint8(2)) await farmer_api.new_signage_point(sp) await time_out_assert(5, have_signage_points, True) assert (await client.get_signage_point(std_hash(b"2"))) is not None async def have_plots(): return len((await client_2.get_plots())["plots"]) > 0 await time_out_assert(5, have_plots, True) res = await client_2.get_plots() num_plots = len(res["plots"]) assert num_plots > 0 plot_dir = get_plot_dir() / "subdir" plot_dir.mkdir(parents=True, exist_ok=True) plot_dir_sub = get_plot_dir() / "subdir" / "subsubdir" plot_dir_sub.mkdir(parents=True, exist_ok=True) plotter = DiskPlotter() filename = "test_farmer_harvester_rpc_plot.plot" filename_2 = "test_farmer_harvester_rpc_plot2.plot" plotter.create_plot_disk( str(plot_dir), str(plot_dir), str(plot_dir), filename, 18, stream_plot_info_pk(bt.pool_pk, bt.farmer_pk, AugSchemeMPL.key_gen(bytes([4] * 32))), token_bytes(32), 128, 0, 2000, 0, False, ) # Making a plot with a puzzle hash encoded into it instead of pk plot_id_2 = token_bytes(32) plotter.create_plot_disk( str(plot_dir), str(plot_dir), str(plot_dir), filename_2, 18, stream_plot_info_ph(std_hash(b"random ph"), bt.farmer_pk, AugSchemeMPL.key_gen(bytes([5] * 32))), plot_id_2, 128, 0, 2000, 0, False, ) # Making the same plot, in a different dir. This should not be farmed plotter.create_plot_disk( str(plot_dir_sub), str(plot_dir_sub), str(plot_dir_sub), filename_2, 18, stream_plot_info_ph(std_hash(b"random ph"), bt.farmer_pk, AugSchemeMPL.key_gen(bytes([5] * 32))), plot_id_2, 128, 0, 2000, 0, False, ) res_2 = await client_2.get_plots() assert len(res_2["plots"]) == num_plots # Reset cache and force updates cache every second to make sure the farmer gets the most recent data update_interval_before = farmer_api.farmer.update_harvester_cache_interval farmer_api.farmer.update_harvester_cache_interval = 1 farmer_api.farmer.harvester_cache = {} # Test farmer get_harvesters async def test_get_harvesters(): harvester.plot_manager.trigger_refresh() await time_out_assert(5, harvester.plot_manager.needs_refresh, value=False) farmer_res = await client.get_harvesters() if len(list(farmer_res["harvesters"])) != 1: log.error( f"test_get_harvesters: invalid harvesters {list(farmer_res['harvesters'])}" ) return False if len(list( farmer_res["harvesters"][0]["plots"])) != num_plots: log.error( f"test_get_harvesters: invalid plots {list(farmer_res['harvesters'])}" ) return False return True await time_out_assert_custom_interval(30, 1, test_get_harvesters) # Reset cache and reset update interval to avoid hitting the rate limit farmer_api.farmer.update_harvester_cache_interval = update_interval_before farmer_api.farmer.harvester_cache = {} targets_1 = await client.get_reward_targets(False) assert "have_pool_sk" not in targets_1 assert "have_farmer_sk" not in targets_1 targets_2 = await client.get_reward_targets(True) assert targets_2["have_pool_sk"] and targets_2["have_farmer_sk"] new_ph: bytes32 = create_puzzlehash_for_pk( master_sk_to_wallet_sk(bt.farmer_master_sk, uint32(10)).get_g1()) new_ph_2: bytes32 = create_puzzlehash_for_pk( master_sk_to_wallet_sk(bt.pool_master_sk, uint32(472)).get_g1()) await client.set_reward_targets( encode_puzzle_hash(new_ph, "xch"), encode_puzzle_hash(new_ph_2, "xch")) targets_3 = await client.get_reward_targets(True) assert decode_puzzle_hash(targets_3["farmer_target"]) == new_ph assert decode_puzzle_hash(targets_3["pool_target"]) == new_ph_2 assert targets_3["have_pool_sk"] and targets_3["have_farmer_sk"] new_ph_3: bytes32 = create_puzzlehash_for_pk( master_sk_to_wallet_sk(bt.pool_master_sk, uint32(1888)).get_g1()) await client.set_reward_targets( None, encode_puzzle_hash(new_ph_3, "xch")) targets_4 = await client.get_reward_targets(True) assert decode_puzzle_hash(targets_4["farmer_target"]) == new_ph assert decode_puzzle_hash(targets_4["pool_target"]) == new_ph_3 assert not targets_4["have_pool_sk"] and targets_3["have_farmer_sk"] root_path = farmer_api.farmer._root_path config = load_config(root_path, "config.yaml") assert config["farmer"][ "xch_target_address"] == encode_puzzle_hash(new_ph, "xch") assert config["pool"]["xch_target_address"] == encode_puzzle_hash( new_ph_3, "xch") new_ph_3_encoded = encode_puzzle_hash(new_ph_3, "xch") added_char = new_ph_3_encoded + "a" with pytest.raises(ValueError): await client.set_reward_targets(None, added_char) replaced_char = new_ph_3_encoded[0:-1] + "a" with pytest.raises(ValueError): await client.set_reward_targets(None, replaced_char) assert len((await client.get_pool_state())["pool_state"]) == 0 all_sks = farmer_api.farmer.local_keychain.get_all_private_keys() auth_sk = master_sk_to_pooling_authentication_sk( all_sks[0][0], 2, 1) pool_list = [{ "launcher_id": "ae4ef3b9bfe68949691281a015a9c16630fc8f66d48c19ca548fb80768791afa", "authentication_public_key": bytes(auth_sk.get_g1()).hex(), "owner_public_key": "84c3fcf9d5581c1ddc702cb0f3b4a06043303b334dd993ab42b2c320ebfa98e5ce558448615b3f69638ba92cf7f43da5", # noqa "payout_instructions": "c2b08e41d766da4116e388357ed957d04ad754623a915f3fd65188a8746cf3e8", "pool_url": "localhost", "p2_singleton_puzzle_hash": "16e4bac26558d315cded63d4c5860e98deb447cc59146dd4de06ce7394b14f17", "target_puzzle_hash": "344587cf06a39db471d2cc027504e8688a0a67cce961253500c956c73603fd58", }] config["pool"]["pool_list"] = pool_list save_config(root_path, "config.yaml", config) await farmer_api.farmer.update_pool_state() pool_state = (await client.get_pool_state())["pool_state"] assert len(pool_state) == 1 assert ( pool_state[0]["pool_config"]["payout_instructions"] == "c2b08e41d766da4116e388357ed957d04ad754623a915f3fd65188a8746cf3e8" ) await client.set_payout_instructions( hexstr_to_bytes(pool_state[0]["pool_config"]["launcher_id"]), "1234vy") await farmer_api.farmer.update_pool_state() pool_state = (await client.get_pool_state())["pool_state"] assert pool_state[0]["pool_config"][ "payout_instructions"] == "1234vy" now = time.time() # Big arbitrary numbers used to be unlikely to accidentally collide. before_24h = (now - (25 * 60 * 60), 29984713) since_24h = (now - (23 * 60 * 60), 93049817) for p2_singleton_puzzle_hash, pool_dict in farmer_api.farmer.pool_state.items( ): for key in ["points_found_24h", "points_acknowledged_24h"]: pool_dict[key].insert(0, since_24h) pool_dict[key].insert(0, before_24h) sp = farmer_protocol.NewSignagePoint(std_hash(b"1"), std_hash(b"2"), std_hash(b"3"), uint64(1), uint64(1000000), uint8(2)) await farmer_api.new_signage_point(sp) client_pool_state = await client.get_pool_state() for pool_dict in client_pool_state["pool_state"]: for key in ["points_found_24h", "points_acknowledged_24h"]: assert pool_dict[key][0] == list(since_24h) finally: # Checks that the RPC manages to stop the node client.close() client_2.close() await client.await_closed() await client_2.await_closed() await rpc_cleanup() await rpc_cleanup_2()