def add_private_key(self, mnemonic: str, passphrase: str) -> PrivateKey: """ Adds a private key to the keychain, with the given entropy and passphrase. The keychain itself will store the public key, and the entropy bytes, but not the passphrase. """ seed = mnemonic_to_seed(mnemonic, passphrase) entropy = bytes_from_mnemonic(mnemonic) index = self._get_free_private_key_index() key = AugSchemeMPL.key_gen(seed) fingerprint = key.get_g1().get_fingerprint() if fingerprint in [ pk.get_fingerprint() for pk in self.get_all_public_keys() ]: # Prevents duplicate add return key keyring.set_password( self._get_service(), self._get_private_key_user(index), bytes(key.get_g1()).hex() + entropy.hex(), ) return key
def create_plots(args, root_path, use_datetime=True, test_private_keys: Optional[List] = None): config_filename = config_path_for_filename(root_path, "config.yaml") config = load_config(root_path, config_filename) if args.tmp2_dir is None: args.tmp2_dir = args.tmp_dir farmer_public_key: G1Element if args.farmer_public_key is not None: farmer_public_key = G1Element.from_bytes( bytes.fromhex(args.farmer_public_key)) else: farmer_public_key = get_farmer_public_key(args.alt_fingerprint) pool_public_key: G1Element if args.pool_public_key is not None: pool_public_key = bytes.fromhex(args.pool_public_key) else: pool_public_key = get_pool_public_key(args.alt_fingerprint) if args.num is not None: num = args.num else: num = 1 if args.size < config["min_mainnet_k_size"] and test_private_keys is None: log.warning( f"Creating plots with size k={args.size}, which is less than the minimum required for mainnet" ) if args.size < 22: log.warning("k under 22 is not supported. Increasing k to 22") args.size = 22 log.info( f"Creating {num} plots of size {args.size}, pool public key: " f"{bytes(pool_public_key).hex()} farmer public key: {bytes(farmer_public_key).hex()}" ) tmp_dir_created = False if not args.tmp_dir.exists(): mkdir(args.tmp_dir) tmp_dir_created = True tmp2_dir_created = False if not args.tmp2_dir.exists(): mkdir(args.tmp2_dir) tmp2_dir_created = True mkdir(args.final_dir) finished_filenames = [] for i in range(num): # Generate a random master secret key if test_private_keys is not None: assert len(test_private_keys) == num sk: PrivateKey = test_private_keys[i] else: sk = AugSchemeMPL.key_gen(token_bytes(32)) # The plot public key is the combination of the harvester and farmer keys plot_public_key = ProofOfSpace.generate_plot_public_key( master_sk_to_local_sk(sk).get_g1(), farmer_public_key) # The plot id is based on the harvester, farmer, and pool keys plot_id: bytes32 = ProofOfSpace.calculate_plot_id_pk( pool_public_key, plot_public_key) if args.plotid is not None: log.info(f"Debug plot ID: {args.plotid}") plot_id = bytes32(bytes.fromhex(args.plotid)) plot_memo: bytes32 = stream_plot_info(pool_public_key, farmer_public_key, sk) if args.memo is not None: log.info(f"Debug memo: {args.memo}") plot_memo = bytes.fromhex(args.memo) dt_string = datetime.now().strftime("%Y-%m-%d-%H-%M") if use_datetime: filename: str = f"plot-k{args.size}-{dt_string}-{plot_id}.plot" else: filename = f"plot-k{args.size}-{plot_id}.plot" full_path: Path = args.final_dir / filename resolved_final_dir: str = str(Path(args.final_dir).resolve()) plot_directories_list: str = config["harvester"]["plot_directories"] if args.exclude_final_dir: log.info( f"NOT adding directory {resolved_final_dir} to harvester for farming" ) if resolved_final_dir in plot_directories_list: log.warning( f"Directory {resolved_final_dir} already exists for harvester, please remove it manually" ) else: if resolved_final_dir not in plot_directories_list: # Adds the directory to the plot directories if it is not present log.info( f"Adding directory {resolved_final_dir} to harvester for farming" ) config = add_plot_directory(resolved_final_dir, root_path) if not full_path.exists(): log.info(f"Starting plot {i + 1}/{num}") # Creates the plot. This will take a long time for larger plots. plotter: DiskPlotter = DiskPlotter() plotter.create_plot_disk( str(args.tmp_dir), str(args.tmp2_dir), str(args.final_dir), filename, args.size, plot_memo, plot_id, args.buffer, args.buckets, args.stripe_size, args.num_threads, args.nobitfield, ) finished_filenames.append(filename) else: log.info(f"Plot {filename} already exists") log.info("Summary:") if tmp_dir_created: try: args.tmp_dir.rmdir() except Exception: log.info( f"warning: did not remove primary temporary folder {args.tmp_dir}, it may not be empty." ) if tmp2_dir_created: try: args.tmp2_dir.rmdir() except Exception: log.info( f"warning: did not remove secondary temporary folder {args.tmp2_dir}, it may not be empty." ) log.info(f"Created a total of {len(finished_filenames)} new plots") for filename in finished_filenames: log.info(filename)
def test_readme(): seed: bytes = bytes([ 0, 50, 6, 244, 24, 199, 1, 25, 52, 88, 192, 19, 18, 12, 89, 6, 220, 18, 102, 58, 209, 82, 12, 62, 89, 110, 182, 9, 44, 20, 254, 22, ]) sk: PrivateKey = AugSchemeMPL.key_gen(seed) pk: G1Element = sk.get_g1() message: bytes = bytes([1, 2, 3, 4, 5]) signature: G2Element = AugSchemeMPL.sign(sk, message) ok: bool = AugSchemeMPL.verify(pk, message, signature) assert ok sk_bytes: bytes = bytes(sk) # 32 bytes pk_bytes: bytes = bytes(pk) # 48 bytes signature_bytes: bytes = bytes(signature) # 96 bytes print(sk_bytes.hex(), pk_bytes.hex(), signature_bytes.hex()) sk = PrivateKey.from_bytes(sk_bytes) pk = G1Element.from_bytes(pk_bytes) signature = G2Element.from_bytes(signature_bytes) seed = bytes([1]) + seed[1:] sk1: PrivateKey = AugSchemeMPL.key_gen(seed) seed = bytes([2]) + seed[1:] sk2: PrivateKey = AugSchemeMPL.key_gen(seed) message2: bytes = bytes([1, 2, 3, 4, 5, 6, 7]) pk1: G1Element = sk1.get_g1() sig1: G2Element = AugSchemeMPL.sign(sk1, message) pk2: G1Element = sk2.get_g1() sig2: G2Element = AugSchemeMPL.sign(sk2, message2) agg_sig: G2Element = AugSchemeMPL.aggregate([sig1, sig2]) ok = AugSchemeMPL.aggregate_verify([pk1, pk2], [message, message2], agg_sig) assert ok seed = bytes([3]) + seed[1:] sk3: PrivateKey = AugSchemeMPL.key_gen(seed) pk3: G1Element = sk3.get_g1() message3: bytes = bytes([100, 2, 254, 88, 90, 45, 23]) sig3: G2Element = AugSchemeMPL.sign(sk3, message3) agg_sig_final: G2Element = AugSchemeMPL.aggregate([agg_sig, sig3]) ok = AugSchemeMPL.aggregate_verify([pk1, pk2, pk3], [message, message2, message3], agg_sig_final) assert ok pop_sig1: G2Element = PopSchemeMPL.sign(sk1, message) pop_sig2: G2Element = PopSchemeMPL.sign(sk2, message) pop_sig3: G2Element = PopSchemeMPL.sign(sk3, message) pop1: G2Element = PopSchemeMPL.pop_prove(sk1) pop2: G2Element = PopSchemeMPL.pop_prove(sk2) pop3: G2Element = PopSchemeMPL.pop_prove(sk3) ok = PopSchemeMPL.pop_verify(pk1, pop1) assert ok ok = PopSchemeMPL.pop_verify(pk2, pop2) assert ok ok = PopSchemeMPL.pop_verify(pk3, pop3) assert ok pop_sig_agg: G2Element = PopSchemeMPL.aggregate( [pop_sig1, pop_sig2, pop_sig3]) ok = PopSchemeMPL.fast_aggregate_verify([pk1, pk2, pk3], message, pop_sig_agg) assert ok pop_agg_pk: G1Element = pk1 + pk2 + pk3 ok = PopSchemeMPL.verify(pop_agg_pk, message, pop_sig_agg) assert ok pop_agg_sk: PrivateKey = PrivateKey.aggregate([sk1, sk2, sk3]) ok = PopSchemeMPL.sign(pop_agg_sk, message) == pop_sig_agg assert ok master_sk: PrivateKey = AugSchemeMPL.key_gen(seed) child: PrivateKey = AugSchemeMPL.derive_child_sk(master_sk, 152) grandchild: PrivateKey = AugSchemeMPL.derive_child_sk(child, 952) master_pk: G1Element = master_sk.get_g1() child_u: PrivateKey = AugSchemeMPL.derive_child_sk_unhardened( master_sk, 22) grandchild_u: PrivateKey = AugSchemeMPL.derive_child_sk_unhardened( child_u, 0) child_u_pk: G1Element = AugSchemeMPL.derive_child_pk_unhardened( master_pk, 22) grandchild_u_pk: G1Element = AugSchemeMPL.derive_child_pk_unhardened( child_u_pk, 0) ok = grandchild_u_pk == grandchild_u.get_g1() assert ok
def test_basic_add_delete(self): kc: Keychain = Keychain(testing=True) kc.delete_all_keys() assert kc._get_free_private_key_index() == 0 assert len(kc.get_all_private_keys()) == 0 assert kc.get_first_private_key() is None assert kc.get_first_public_key() is None mnemonic = generate_mnemonic() entropy = bytes_from_mnemonic(mnemonic) assert bytes_to_mnemonic(entropy) == mnemonic mnemonic_2 = generate_mnemonic() kc.add_private_key(mnemonic, "") assert kc._get_free_private_key_index() == 1 assert len(kc.get_all_private_keys()) == 1 kc.add_private_key(mnemonic_2, "") kc.add_private_key(mnemonic_2, "") # checks to not add duplicates assert kc._get_free_private_key_index() == 2 assert len(kc.get_all_private_keys()) == 2 assert kc._get_free_private_key_index() == 2 assert len(kc.get_all_private_keys()) == 2 assert len(kc.get_all_public_keys()) == 2 assert kc.get_all_private_keys()[0] == kc.get_first_private_key() assert kc.get_all_public_keys()[0] == kc.get_first_public_key() assert len(kc.get_all_private_keys()) == 2 seed_2 = mnemonic_to_seed(mnemonic, "") seed_key_2 = AugSchemeMPL.key_gen(seed_2) kc.delete_key_by_fingerprint(seed_key_2.get_g1().get_fingerprint()) assert kc._get_free_private_key_index() == 0 assert len(kc.get_all_private_keys()) == 1 kc.delete_all_keys() assert kc._get_free_private_key_index() == 0 assert len(kc.get_all_private_keys()) == 0 kc.add_private_key(bytes_to_mnemonic(token_bytes(32)), "my passphrase") kc.add_private_key(bytes_to_mnemonic(token_bytes(32)), "") kc.add_private_key(bytes_to_mnemonic(token_bytes(32)), "third passphrase") assert len(kc.get_all_public_keys()) == 3 assert len(kc.get_all_private_keys()) == 1 assert len(kc.get_all_private_keys(["my passphrase", ""])) == 2 assert len( kc.get_all_private_keys( ["my passphrase", "", "third passphrase", "another"])) == 3 assert len(kc.get_all_private_keys(["my passhrase wrong"])) == 0 assert kc.get_first_private_key() is not None assert kc.get_first_private_key(["bad passphrase"]) is None assert kc.get_first_public_key() is not None kc.delete_all_keys() kc.add_private_key(bytes_to_mnemonic(token_bytes(32)), "my passphrase") assert kc.get_first_public_key() is not None
async def test1(self, simulation): test_rpc_port = uint16(21522) test_rpc_port_2 = uint16(21523) harvester, farmer_api = simulation def stop_node_cb(): pass def stop_node_cb_2(): pass config = bt.config hostname = config["self_hostname"] daemon_port = config["daemon_port"] farmer_rpc_api = FarmerRpcApi(farmer_api.farmer) harvester_rpc_api = HarvesterRpcApi(harvester) rpc_cleanup = await start_rpc_server( farmer_rpc_api, hostname, daemon_port, test_rpc_port, stop_node_cb, bt.root_path, config, connect_to_daemon=False, ) rpc_cleanup_2 = await start_rpc_server( harvester_rpc_api, hostname, daemon_port, test_rpc_port_2, stop_node_cb_2, bt.root_path, config, connect_to_daemon=False, ) try: client = await FarmerRpcClient.create(self_hostname, test_rpc_port, bt.root_path, config) client_2 = await HarvesterRpcClient.create(self_hostname, test_rpc_port_2, bt.root_path, config) async def have_connections(): return len(await client.get_connections()) > 0 await time_out_assert(15, have_connections, True) assert (await client.get_signage_point(std_hash(b"2"))) is None assert len(await client.get_signage_points()) == 0 async def have_signage_points(): return len(await client.get_signage_points()) > 0 sp = farmer_protocol.NewSignagePoint( std_hash(b"1"), std_hash(b"2"), std_hash(b"3"), uint64(1), uint64(1000000), uint8(2) ) await farmer_api.new_signage_point(sp) await time_out_assert(5, have_signage_points, True) assert (await client.get_signage_point(std_hash(b"2"))) is not None async def have_plots(): return len((await client_2.get_plots())["plots"]) > 0 await time_out_assert(5, have_plots, True) res = await client_2.get_plots() num_plots = len(res["plots"]) assert num_plots > 0 plot_dir = get_plot_dir() / "subdir" plot_dir.mkdir(parents=True, exist_ok=True) plot_dir_sub = get_plot_dir() / "subdir" / "subsubdir" plot_dir_sub.mkdir(parents=True, exist_ok=True) plotter = DiskPlotter() filename = "test_farmer_harvester_rpc_plot.plot" filename_2 = "test_farmer_harvester_rpc_plot2.plot" plotter.create_plot_disk( str(plot_dir), str(plot_dir), str(plot_dir), filename, 18, stream_plot_info_pk(bt.pool_pk, bt.farmer_pk, AugSchemeMPL.key_gen(bytes([4] * 32))), token_bytes(32), 128, 0, 2000, 0, False, ) # Making a plot with a puzzle hash encoded into it instead of pk plot_id_2 = token_bytes(32) plotter.create_plot_disk( str(plot_dir), str(plot_dir), str(plot_dir), filename_2, 18, stream_plot_info_ph(std_hash(b"random ph"), bt.farmer_pk, AugSchemeMPL.key_gen(bytes([5] * 32))), plot_id_2, 128, 0, 2000, 0, False, ) # Making the same plot, in a different dir. This should not be farmed plotter.create_plot_disk( str(plot_dir_sub), str(plot_dir_sub), str(plot_dir_sub), filename_2, 18, stream_plot_info_ph(std_hash(b"random ph"), bt.farmer_pk, AugSchemeMPL.key_gen(bytes([5] * 32))), plot_id_2, 128, 0, 2000, 0, False, ) res_2 = await client_2.get_plots() assert len(res_2["plots"]) == num_plots # Test farmer get_harvesters async def test_get_harvesters(): farmer_res = await client.get_harvesters() if len(list(farmer_res["harvesters"])) != 1: return False if len(list(farmer_res["harvesters"][0]["plots"])) != num_plots: return False return True await time_out_assert(30, test_get_harvesters) expected_result: PlotRefreshResult = PlotRefreshResult() def test_refresh_callback(refresh_result: PlotRefreshResult): assert refresh_result.loaded_plots == expected_result.loaded_plots assert refresh_result.removed_plots == expected_result.removed_plots assert refresh_result.processed_files == expected_result.processed_files assert refresh_result.remaining_files == expected_result.remaining_files harvester.plot_manager.set_refresh_callback(test_refresh_callback) async def test_case( trigger, expect_loaded, expect_removed, expect_processed, expected_directories, expect_total_plots ): expected_result.loaded_plots = expect_loaded expected_result.removed_plots = expect_removed expected_result.processed_files = expect_processed await trigger harvester.plot_manager.trigger_refresh() assert len(await client_2.get_plot_directories()) == expected_directories await time_out_assert(5, harvester.plot_manager.needs_refresh, value=False) result = await client_2.get_plots() assert len(result["plots"]) == expect_total_plots assert len(harvester.plot_manager.cache) == expect_total_plots assert len(harvester.plot_manager.failed_to_open_filenames) == 0 # Add plot_dir with two new plots await test_case( client_2.add_plot_directory(str(plot_dir)), expect_loaded=2, expect_removed=0, expect_processed=2, expected_directories=2, expect_total_plots=num_plots + 2, ) # Add plot_dir_sub with one duplicate await test_case( client_2.add_plot_directory(str(plot_dir_sub)), expect_loaded=0, expect_removed=0, expect_processed=1, expected_directories=3, expect_total_plots=num_plots + 2, ) # Delete one plot await test_case( client_2.delete_plot(str(plot_dir / filename)), expect_loaded=0, expect_removed=1, expect_processed=0, expected_directories=3, expect_total_plots=num_plots + 1, ) # Remove directory with the duplicate await test_case( client_2.remove_plot_directory(str(plot_dir_sub)), expect_loaded=0, expect_removed=1, expect_processed=0, expected_directories=2, expect_total_plots=num_plots + 1, ) # Re-add the directory with the duplicate for other tests await test_case( client_2.add_plot_directory(str(plot_dir_sub)), expect_loaded=0, expect_removed=0, expect_processed=1, expected_directories=3, expect_total_plots=num_plots + 1, ) # Remove the directory which has the duplicated plot loaded. This removes the duplicated plot from plot_dir # and in the same run loads the plot from plot_dir_sub which is not longer seen as duplicate. await test_case( client_2.remove_plot_directory(str(plot_dir)), expect_loaded=1, expect_removed=1, expect_processed=1, expected_directories=2, expect_total_plots=num_plots + 1, ) # Re-add the directory now the plot seen as duplicate is from plot_dir, not from plot_dir_sub like before await test_case( client_2.add_plot_directory(str(plot_dir)), expect_loaded=0, expect_removed=0, expect_processed=1, expected_directories=3, expect_total_plots=num_plots + 1, ) # Remove the duplicated plot await test_case( client_2.delete_plot(str(plot_dir / filename_2)), expect_loaded=0, expect_removed=1, expect_processed=0, expected_directories=3, expect_total_plots=num_plots + 1, ) # Remove the directory with the loaded plot which is not longer a duplicate await test_case( client_2.remove_plot_directory(str(plot_dir_sub)), expect_loaded=0, expect_removed=1, expect_processed=0, expected_directories=2, expect_total_plots=num_plots, ) # Remove the directory which contains all other plots await test_case( client_2.remove_plot_directory(str(get_plot_dir())), expect_loaded=0, expect_removed=20, expect_processed=0, expected_directories=1, expect_total_plots=0, ) # Recover the plots to test caching # First make sure cache gets written if required and new plots are loaded await test_case( client_2.add_plot_directory(str(get_plot_dir())), expect_loaded=20, expect_removed=0, expect_processed=20, expected_directories=2, expect_total_plots=20, ) assert harvester.plot_manager.cache.path().exists() unlink(harvester.plot_manager.cache.path()) # Should not write the cache again on shutdown because it didn't change assert not harvester.plot_manager.cache.path().exists() harvester.plot_manager.stop_refreshing() assert not harvester.plot_manager.cache.path().exists() # Manually trigger `save_cache` and make sure it creates a new cache file harvester.plot_manager.cache.save() assert harvester.plot_manager.cache.path().exists() expected_result.loaded_plots = 20 expected_result.removed_plots = 0 expected_result.processed_files = 20 expected_result.remaining_files = 0 plot_manager: PlotManager = PlotManager(harvester.root_path, test_refresh_callback) plot_manager.start_refreshing() assert len(harvester.plot_manager.cache) == len(plot_manager.cache) await time_out_assert(5, plot_manager.needs_refresh, value=False) for path, plot_info in harvester.plot_manager.plots.items(): assert path in plot_manager.plots assert plot_manager.plots[path].prover.get_filename() == plot_info.prover.get_filename() assert plot_manager.plots[path].prover.get_id() == plot_info.prover.get_id() assert plot_manager.plots[path].prover.get_memo() == plot_info.prover.get_memo() assert plot_manager.plots[path].prover.get_size() == plot_info.prover.get_size() assert plot_manager.plots[path].pool_public_key == plot_info.pool_public_key assert plot_manager.plots[path].pool_contract_puzzle_hash == plot_info.pool_contract_puzzle_hash assert plot_manager.plots[path].plot_public_key == plot_info.plot_public_key assert plot_manager.plots[path].file_size == plot_info.file_size assert plot_manager.plots[path].time_modified == plot_info.time_modified assert harvester.plot_manager.plot_filename_paths == plot_manager.plot_filename_paths assert harvester.plot_manager.failed_to_open_filenames == plot_manager.failed_to_open_filenames assert harvester.plot_manager.no_key_filenames == plot_manager.no_key_filenames plot_manager.stop_refreshing() # Modify the content of the plot_manager.dat with open(harvester.plot_manager.cache.path(), "r+b") as file: file.write(b"\xff\xff") # Sets Cache.version to 65535 # Make sure it just loads the plots normally if it fails to load the cache plot_manager = PlotManager(harvester.root_path, test_refresh_callback) plot_manager.cache.load() assert len(plot_manager.cache) == 0 plot_manager.set_public_keys( harvester.plot_manager.farmer_public_keys, harvester.plot_manager.pool_public_keys ) expected_result.loaded_plots = 20 expected_result.removed_plots = 0 expected_result.processed_files = 20 expected_result.remaining_files = 0 plot_manager.start_refreshing() await time_out_assert(5, plot_manager.needs_refresh, value=False) assert len(plot_manager.plots) == len(harvester.plot_manager.plots) plot_manager.stop_refreshing() # Test re-trying if processing a plot failed # First save the plot retry_test_plot = Path(plot_dir_sub / filename_2).resolve() retry_test_plot_save = Path(plot_dir_sub / "save").resolve() copy(retry_test_plot, retry_test_plot_save) # Invalidate the plot with open(plot_dir_sub / filename_2, "r+b") as file: file.write(bytes(100)) # Add it and validate it fails to load await harvester.add_plot_directory(str(plot_dir_sub)) expected_result.loaded_plots = 0 expected_result.removed_plots = 0 expected_result.processed_files = 1 expected_result.remaining_files = 0 harvester.plot_manager.start_refreshing() await time_out_assert(5, harvester.plot_manager.needs_refresh, value=False) assert retry_test_plot in harvester.plot_manager.failed_to_open_filenames # Make sure the file stays in `failed_to_open_filenames` and doesn't get loaded or processed in the next # update round expected_result.loaded_plots = 0 expected_result.processed_files = 0 harvester.plot_manager.trigger_refresh() await time_out_assert(5, harvester.plot_manager.needs_refresh, value=False) assert retry_test_plot in harvester.plot_manager.failed_to_open_filenames # Now decrease the re-try timeout, restore the valid plot file and make sure it properly loads now harvester.plot_manager.refresh_parameter.retry_invalid_seconds = 0 move(retry_test_plot_save, retry_test_plot) expected_result.loaded_plots = 1 expected_result.processed_files = 1 harvester.plot_manager.trigger_refresh() await time_out_assert(5, harvester.plot_manager.needs_refresh, value=False) assert retry_test_plot not in harvester.plot_manager.failed_to_open_filenames targets_1 = await client.get_reward_targets(False) assert "have_pool_sk" not in targets_1 assert "have_farmer_sk" not in targets_1 targets_2 = await client.get_reward_targets(True) assert targets_2["have_pool_sk"] and targets_2["have_farmer_sk"] new_ph: bytes32 = create_puzzlehash_for_pk(master_sk_to_wallet_sk(bt.farmer_master_sk, uint32(10)).get_g1()) new_ph_2: bytes32 = create_puzzlehash_for_pk( master_sk_to_wallet_sk(bt.pool_master_sk, uint32(472)).get_g1() ) await client.set_reward_targets(encode_puzzle_hash(new_ph, "xch"), encode_puzzle_hash(new_ph_2, "xch")) targets_3 = await client.get_reward_targets(True) assert decode_puzzle_hash(targets_3["farmer_target"]) == new_ph assert decode_puzzle_hash(targets_3["pool_target"]) == new_ph_2 assert targets_3["have_pool_sk"] and targets_3["have_farmer_sk"] new_ph_3: bytes32 = create_puzzlehash_for_pk( master_sk_to_wallet_sk(bt.pool_master_sk, uint32(1888)).get_g1() ) await client.set_reward_targets(None, encode_puzzle_hash(new_ph_3, "xch")) targets_4 = await client.get_reward_targets(True) assert decode_puzzle_hash(targets_4["farmer_target"]) == new_ph assert decode_puzzle_hash(targets_4["pool_target"]) == new_ph_3 assert not targets_4["have_pool_sk"] and targets_3["have_farmer_sk"] root_path = farmer_api.farmer._root_path config = load_config(root_path, "config.yaml") assert config["farmer"]["xch_target_address"] == encode_puzzle_hash(new_ph, "xch") assert config["pool"]["xch_target_address"] == encode_puzzle_hash(new_ph_3, "xch") new_ph_3_encoded = encode_puzzle_hash(new_ph_3, "xch") added_char = new_ph_3_encoded + "a" with pytest.raises(ValueError): await client.set_reward_targets(None, added_char) replaced_char = new_ph_3_encoded[0:-1] + "a" with pytest.raises(ValueError): await client.set_reward_targets(None, replaced_char) assert len((await client.get_pool_state())["pool_state"]) == 0 all_sks = farmer_api.farmer.local_keychain.get_all_private_keys() auth_sk = master_sk_to_pooling_authentication_sk(all_sks[0][0], 2, 1) pool_list = [ { "launcher_id": "ae4ef3b9bfe68949691281a015a9c16630fc8f66d48c19ca548fb80768791afa", "authentication_public_key": bytes(auth_sk.get_g1()).hex(), "owner_public_key": "84c3fcf9d5581c1ddc702cb0f3b4a06043303b334dd993ab42b2c320ebfa98e5ce558448615b3f69638ba92cf7f43da5", "payout_instructions": "c2b08e41d766da4116e388357ed957d04ad754623a915f3fd65188a8746cf3e8", "pool_url": "localhost", "p2_singleton_puzzle_hash": "16e4bac26558d315cded63d4c5860e98deb447cc59146dd4de06ce7394b14f17", "target_puzzle_hash": "344587cf06a39db471d2cc027504e8688a0a67cce961253500c956c73603fd58", } ] config["pool"]["pool_list"] = pool_list save_config(root_path, "config.yaml", config) await farmer_api.farmer.update_pool_state() pool_state = (await client.get_pool_state())["pool_state"] assert len(pool_state) == 1 assert ( pool_state[0]["pool_config"]["payout_instructions"] == "c2b08e41d766da4116e388357ed957d04ad754623a915f3fd65188a8746cf3e8" ) await client.set_payout_instructions(hexstr_to_bytes(pool_state[0]["pool_config"]["launcher_id"]), "1234vy") await farmer_api.farmer.update_pool_state() pool_state = (await client.get_pool_state())["pool_state"] assert pool_state[0]["pool_config"]["payout_instructions"] == "1234vy" finally: # Checks that the RPC manages to stop the node client.close() client_2.close() await client.await_closed() await client_2.await_closed() await rpc_cleanup() await rpc_cleanup_2()
def rand_g1() -> G1Element: sk = AugSchemeMPL.key_gen(rand_bytes(96)) return sk.get_g1()
print(f"cost of one_greater is: {one_greater}") print(f"cost of one_equal is: {one_equal}") print(f"cost of one_if is: {one_if}") print(f"cost of one_sha256 is: {one_sha256}") print(f"cost of one_pubkey_for_exp is: {one_pubkey_for_exp}") print(f"cost of one_point_add is: {one_point_add}") if __name__ == "__main__": """ Naive way to calculate cost ratio between vByte and CLVM cost unit. AggSig has assigned cost of 20vBytes, simple CLVM program is benchmarked against it. """ wallet_tool = WalletTool() benchmark_all_operators() secret_key: PrivateKey = AugSchemeMPL.key_gen(bytes([2] * 32)) puzzles = [] solutions = [] private_keys = [] public_keys = [] for i in range(0, 1000): private_key: PrivateKey = master_sk_to_wallet_sk(secret_key, uint32(i)) public_key = private_key.public_key() solution = wallet_tool.make_solution( {ConditionOpcode.ASSERT_MY_COIN_ID: [ConditionWithArgs(ConditionOpcode.ASSERT_MY_COIN_ID, [token_bytes()])]} ) puzzle = puzzle_for_pk(bytes(public_key)) puzzles.append(puzzle) solutions.append(solution) private_keys.append(private_key)
def __init__( self, root_path: Optional[Path] = None, real_plots: bool = False, ): self._tempdir = None if root_path is None: self._tempdir = tempfile.TemporaryDirectory() root_path = Path(self._tempdir.name) self.root_path = root_path self.real_plots = real_plots if not real_plots: create_default_chia_config(root_path) initialize_ssl(root_path) # No real plots supplied, so we will use the small test plots self.use_any_pos = True self.keychain = Keychain("testing-1.8.0", True) self.keychain.delete_all_keys() self.farmer_master_sk = self.keychain.add_private_key( bytes_to_mnemonic(std_hash(b"block_tools farmer key")), "") self.pool_master_sk = self.keychain.add_private_key( bytes_to_mnemonic(std_hash(b"block_tools pool key")), "") self.farmer_pk = master_sk_to_farmer_sk( self.farmer_master_sk).get_g1() self.pool_pk = master_sk_to_pool_sk(self.pool_master_sk).get_g1() plot_dir = get_plot_dir() mkdir(plot_dir) temp_dir = plot_dir / "tmp" mkdir(temp_dir) args = Namespace() # Can't go much lower than 18, since plots start having no solutions args.size = 18 # Uses many plots for testing, in order to guarantee proofs of space at every height args.num = 40 args.buffer = 32 args.farmer_public_key = bytes(self.farmer_pk).hex() args.pool_public_key = bytes(self.pool_pk).hex() args.tmp_dir = temp_dir args.tmp2_dir = plot_dir args.final_dir = plot_dir args.plotid = None args.memo = None test_private_keys = [ AugSchemeMPL.key_gen(std_hash(bytes([i]))) for i in range(args.num) ] try: # No datetime in the filename, to get deterministic filenames and not replot create_plots( args, root_path, use_datetime=False, test_private_keys=test_private_keys, ) except KeyboardInterrupt: shutil.rmtree(plot_dir, ignore_errors=True) sys.exit(1) else: initialize_ssl(root_path) self.keychain = Keychain() self.use_any_pos = False sk_and_ent = self.keychain.get_first_private_key() assert sk_and_ent is not None self.farmer_master_sk = sk_and_ent[0] self.pool_master_sk = sk_and_ent[0] self.farmer_ph = create_puzzlehash_for_pk( master_sk_to_wallet_sk(self.farmer_master_sk, uint32(0)).get_g1()) self.pool_ph = create_puzzlehash_for_pk( master_sk_to_wallet_sk(self.pool_master_sk, uint32(0)).get_g1()) self.all_sks = self.keychain.get_all_private_keys() self.pool_pubkeys: List[G1Element] = [ master_sk_to_pool_sk(sk).get_g1() for sk, _ in self.all_sks ] farmer_pubkeys: List[G1Element] = [ master_sk_to_farmer_sk(sk).get_g1() for sk, _ in self.all_sks ] if len(self.pool_pubkeys) == 0 or len(farmer_pubkeys) == 0: raise RuntimeError("Keys not generated. Run `chia generate keys`") _, self.plots, _, _ = load_plots({}, {}, farmer_pubkeys, self.pool_pubkeys, root_path)
def test_basic_add_delete(self): kc: Keychain = Keychain(user="******", service="chia-testing-1.8.0") kc.delete_all_keys() assert kc._get_free_private_key_index() == 0 assert len(kc.get_all_private_keys()) == 0 assert kc.get_first_private_key() is None assert kc.get_first_public_key() is None mnemonic = generate_mnemonic() entropy = bytes_from_mnemonic(mnemonic) assert bytes_to_mnemonic(entropy) == mnemonic mnemonic_2 = generate_mnemonic() # misspelled words in the mnemonic bad_mnemonic = mnemonic.split(" ") bad_mnemonic[6] = "ZZZZZZ" self.assertRaisesRegex( ValueError, "'ZZZZZZ' is not in the mnemonic dictionary; may be misspelled", bytes_from_mnemonic, " ".join(bad_mnemonic), ) kc.add_private_key(mnemonic, "") assert kc._get_free_private_key_index() == 1 assert len(kc.get_all_private_keys()) == 1 kc.add_private_key(mnemonic_2, "") kc.add_private_key(mnemonic_2, "") # checks to not add duplicates assert kc._get_free_private_key_index() == 2 assert len(kc.get_all_private_keys()) == 2 assert kc._get_free_private_key_index() == 2 assert len(kc.get_all_private_keys()) == 2 assert len(kc.get_all_public_keys()) == 2 assert kc.get_all_private_keys()[0] == kc.get_first_private_key() assert kc.get_all_public_keys()[0] == kc.get_first_public_key() assert len(kc.get_all_private_keys()) == 2 seed_2 = mnemonic_to_seed(mnemonic, "") seed_key_2 = AugSchemeMPL.key_gen(seed_2) kc.delete_key_by_fingerprint(seed_key_2.get_g1().get_fingerprint()) assert kc._get_free_private_key_index() == 0 assert len(kc.get_all_private_keys()) == 1 kc.delete_all_keys() assert kc._get_free_private_key_index() == 0 assert len(kc.get_all_private_keys()) == 0 kc.add_private_key(bytes_to_mnemonic(token_bytes(32)), "my passphrase") kc.add_private_key(bytes_to_mnemonic(token_bytes(32)), "") kc.add_private_key(bytes_to_mnemonic(token_bytes(32)), "third passphrase") assert len(kc.get_all_public_keys()) == 3 assert len(kc.get_all_private_keys()) == 1 assert len(kc.get_all_private_keys(["my passphrase", ""])) == 2 assert len( kc.get_all_private_keys( ["my passphrase", "", "third passphrase", "another"])) == 3 assert len(kc.get_all_private_keys(["my passhrase wrong"])) == 0 assert kc.get_first_private_key() is not None assert kc.get_first_private_key(["bad passphrase"]) is None assert kc.get_first_public_key() is not None kc.delete_all_keys() kc.add_private_key(bytes_to_mnemonic(token_bytes(32)), "my passphrase") assert kc.get_first_public_key() is not None
def create_plots(args, root_path, use_datetime=True, test_private_keys: Optional[List] = None): config_filename = config_path_for_filename(root_path, "config.yaml") if args.tmp2_dir is None: args.tmp2_dir = args.final_dir farmer_public_key: G1Element if args.farmer_public_key is not None: farmer_public_key = G1Element.from_bytes( bytes.fromhex(args.farmer_public_key)) else: farmer_public_key = get_default_farmer_public_key() pool_public_key: G1Element if args.pool_public_key is not None: pool_public_key = bytes.fromhex(args.pool_public_key) else: pool_public_key = get_default_pool_public_key() if args.num is not None: num = args.num else: num = 1 log.info( f"Creating {num} plots of size {args.size}, pool public key: " f"{bytes(pool_public_key).hex()} farmer public key: {bytes(farmer_public_key).hex()}" ) tmp_dir_created = False if not args.tmp_dir.exists(): mkdir(args.tmp_dir) tmp_dir_created = True tmp2_dir_created = False if not args.tmp2_dir.exists(): mkdir(args.tmp2_dir) tmp2_dir_created = True mkdir(args.final_dir) finished_filenames = [] config = load_config(root_path, config_filename) plot_filenames = get_plot_filenames(config["harvester"]) for i in range(num): # Generate a random master secret key if test_private_keys is not None: assert len(test_private_keys) == num sk: PrivateKey = test_private_keys[i] else: sk = AugSchemeMPL.key_gen(token_bytes(32)) # The plot public key is the combination of the harvester and farmer keys plot_public_key = ProofOfSpace.generate_plot_public_key( master_sk_to_local_sk(sk).get_g1(), farmer_public_key) # The plot id is based on the harvester, farmer, and pool keys plot_id: bytes32 = ProofOfSpace.calculate_plot_id( pool_public_key, plot_public_key) if args.plotid is not None: log.info(f"Debug plot ID: {args.plotid}") plot_id: bytes32 = bytes32(bytes.fromhex(args.plotid)) plot_memo: bytes32 = stream_plot_info(pool_public_key, farmer_public_key, sk) if args.memo is not None: log.info(f"Debug memo: {args.memo}") plot_memo: bytes32 = bytes.fromhex(args.memo) dt_string = datetime.now().strftime("%Y-%m-%d-%H-%M") if use_datetime: filename: str = f"plot-k{args.size}-{dt_string}-{plot_id}.plot" else: filename = f"plot-k{args.size}-{plot_id}.plot" full_path: Path = args.final_dir / filename if args.final_dir.resolve() not in plot_filenames: if (str(args.final_dir.resolve()) not in config["harvester"]["plot_directories"]): # Adds the directory to the plot directories if it is not present config = add_plot_directory(str(args.final_dir.resolve()), root_path) if not full_path.exists(): log.info(f"Starting plot {i + 1}/{num}") # Creates the plot. This will take a long time for larger plots. plotter: DiskPlotter = DiskPlotter() plotter.create_plot_disk( str(args.tmp_dir), str(args.tmp2_dir), str(args.final_dir), filename, args.size, plot_memo, plot_id, args.buffer, args.buckets, args.stripe_size, args.num_threads, ) finished_filenames.append(filename) else: log.info(f"Plot {filename} already exists") log.info("Summary:") if tmp_dir_created: try: args.tmp_dir.rmdir() except Exception: log.info( f"warning: did not remove primary temporary folder {args.tmp_dir}, it may not be empty." ) if tmp2_dir_created: try: args.tmp2_dir.rmdir() except Exception: log.info( f"warning: did not remove secondary temporary folder {args.tmp2_dir}, it may not be empty." ) log.info(f"Created a total of {len(finished_filenames)} new plots") for filename in finished_filenames: log.info(filename)
async def test1(self, simulation): test_rpc_port = uint16(21522) test_rpc_port_2 = uint16(21523) harvester, farmer = simulation def stop_node_cb(): pass def stop_node_cb_2(): pass config = load_config(bt.root_path, "config.yaml") hostname = config["self_hostname"] daemon_port = config["daemon_port"] farmer_rpc_api = FarmerRpcApi(farmer) harvester_rpc_api = HarvesterRpcApi(harvester) rpc_cleanup = await start_rpc_server( farmer_rpc_api, hostname, daemon_port, test_rpc_port, stop_node_cb, connect_to_daemon=False, ) rpc_cleanup_2 = await start_rpc_server( harvester_rpc_api, hostname, daemon_port, test_rpc_port_2, stop_node_cb_2, connect_to_daemon=False, ) try: client = await FarmerRpcClient.create("localhost", test_rpc_port) client_2 = await HarvesterRpcClient.create("localhost", test_rpc_port_2) async def have_connections(): return len(await client.get_connections()) > 0 await time_out_assert(5, have_connections, True) await client.get_latest_challenges() async def have_challenges(): return len(await client.get_latest_challenges()) > 0 await time_out_assert(5, have_challenges, True) async def have_plots(): return len((await client_2.get_plots())["plots"]) > 0 await time_out_assert(5, have_plots, True) res = await client_2.get_plots() num_plots = len(res["plots"]) assert num_plots > 0 plot_dir = get_plot_dir() / "subdir" plot_dir.mkdir(parents=True, exist_ok=True) plotter = DiskPlotter() filename = "test_farmer_harvester_rpc_plot.plot" plotter.create_plot_disk( str(plot_dir), str(plot_dir), str(plot_dir), filename, 18, stream_plot_info(bt.pool_pk, bt.farmer_pk, AugSchemeMPL.key_gen(bytes([4] * 32))), token_bytes(32), 128, ) res_2 = await client_2.get_plots() assert len(res_2["plots"]) == num_plots print(await client_2.get_plot_directories()) assert len(await client_2.get_plot_directories()) == 1 await client_2.add_plot_directory(str(plot_dir)) assert len(await client_2.get_plot_directories()) == 2 res_2 = await client_2.get_plots() assert len(res_2["plots"]) == num_plots + 1 await client_2.delete_plot(str(plot_dir / filename)) res_3 = await client_2.get_plots() assert len(res_3["plots"]) == num_plots await client_2.remove_plot_directory(str(plot_dir)) print(await client_2.get_plot_directories()) assert len(await client_2.get_plot_directories()) == 1 except AssertionError: # Checks that the RPC manages to stop the node client.close() client_2.close() await client.await_closed() await client_2.await_closed() await rpc_cleanup() await rpc_cleanup_2() raise client.close() client_2.close() await client.await_closed() await client_2.await_closed() await rpc_cleanup() await rpc_cleanup_2()
async def test1(self, simulation): test_rpc_port = uint16(21522) test_rpc_port_2 = uint16(21523) harvester, farmer_api = simulation def stop_node_cb(): pass def stop_node_cb_2(): pass config = bt.config hostname = config["self_hostname"] daemon_port = config["daemon_port"] farmer_rpc_api = FarmerRpcApi(farmer_api.farmer) harvester_rpc_api = HarvesterRpcApi(harvester) rpc_cleanup = await start_rpc_server( farmer_rpc_api, hostname, daemon_port, test_rpc_port, stop_node_cb, bt.root_path, config, connect_to_daemon=False, ) rpc_cleanup_2 = await start_rpc_server( harvester_rpc_api, hostname, daemon_port, test_rpc_port_2, stop_node_cb_2, bt.root_path, config, connect_to_daemon=False, ) try: client = await FarmerRpcClient.create(self_hostname, test_rpc_port, bt.root_path, config) client_2 = await HarvesterRpcClient.create(self_hostname, test_rpc_port_2, bt.root_path, config) async def have_connections(): return len(await client.get_connections()) > 0 await time_out_assert(15, have_connections, True) assert (await client.get_signage_point(std_hash(b"2"))) is None assert len(await client.get_signage_points()) == 0 async def have_signage_points(): return len(await client.get_signage_points()) > 0 sp = farmer_protocol.NewSignagePoint(std_hash(b"1"), std_hash(b"2"), std_hash(b"3"), uint64(1), uint64(1000000), uint8(2)) await farmer_api.new_signage_point(sp) await time_out_assert(5, have_signage_points, True) assert (await client.get_signage_point(std_hash(b"2"))) is not None async def have_plots(): return len((await client_2.get_plots())["plots"]) > 0 await time_out_assert(5, have_plots, True) res = await client_2.get_plots() num_plots = len(res["plots"]) assert num_plots > 0 plot_dir = get_plot_dir() / "subdir" plot_dir.mkdir(parents=True, exist_ok=True) plotter = DiskPlotter() filename = "test_farmer_harvester_rpc_plot.plot" plotter.create_plot_disk( str(plot_dir), str(plot_dir), str(plot_dir), filename, 18, stream_plot_info(bt.pool_pk, bt.farmer_pk, AugSchemeMPL.key_gen(bytes([4] * 32))), token_bytes(32), 128, 0, 2000, 0, False, ) res_2 = await client_2.get_plots() assert len(res_2["plots"]) == num_plots assert len(await client_2.get_plot_directories()) == 1 await client_2.add_plot_directory(str(plot_dir)) assert len(await client_2.get_plot_directories()) == 2 res_2 = await client_2.get_plots() assert len(res_2["plots"]) == num_plots + 1 await client_2.delete_plot(str(plot_dir / filename)) res_3 = await client_2.get_plots() assert len(res_3["plots"]) == num_plots await client_2.remove_plot_directory(str(plot_dir)) assert len(await client_2.get_plot_directories()) == 1 finally: # Checks that the RPC manages to stop the node client.close() client_2.close() await client.await_closed() await client_2.await_closed() await rpc_cleanup() await rpc_cleanup_2()
async def create_plots( args, keys: PlotKeys, root_path, use_datetime=True, test_private_keys: Optional[List] = None ) -> Tuple[Dict[bytes32, Path], Dict[bytes32, Path]]: config_filename = config_path_for_filename(root_path, "config.yaml") config = load_config(root_path, config_filename) if args.tmp2_dir is None: args.tmp2_dir = args.tmp_dir assert (keys.pool_public_key is None) != (keys.pool_contract_puzzle_hash is None) num = args.num if args.size < config["min_mainnet_k_size"] and test_private_keys is None: log.warning( f"Creating plots with size k={args.size}, which is less than the minimum required for mainnet" ) if args.size < 22: log.warning("k under 22 is not supported. Increasing k to 22") args.size = 22 if keys.pool_public_key is not None: log.info( f"Creating {num} plots of size {args.size}, pool public key: " f"{bytes(keys.pool_public_key).hex()} farmer public key: {bytes(keys.farmer_public_key).hex()}" ) else: assert keys.pool_contract_puzzle_hash is not None log.info( f"Creating {num} plots of size {args.size}, pool contract address: " f"{keys.pool_contract_address} farmer public key: {bytes(keys.farmer_public_key).hex()}" ) tmp_dir_created = False if not args.tmp_dir.exists(): mkdir(args.tmp_dir) tmp_dir_created = True tmp2_dir_created = False if not args.tmp2_dir.exists(): mkdir(args.tmp2_dir) tmp2_dir_created = True mkdir(args.final_dir) created_plots: Dict[bytes32, Path] = {} existing_plots: Dict[bytes32, Path] = {} for i in range(num): # Generate a random master secret key if test_private_keys is not None: assert len(test_private_keys) == num sk: PrivateKey = test_private_keys[i] else: sk = AugSchemeMPL.key_gen(token_bytes(32)) # The plot public key is the combination of the harvester and farmer keys # New plots will also include a taproot of the keys, for extensibility include_taproot: bool = keys.pool_contract_puzzle_hash is not None plot_public_key = ProofOfSpace.generate_plot_public_key( master_sk_to_local_sk(sk).get_g1(), keys.farmer_public_key, include_taproot) # The plot id is based on the harvester, farmer, and pool keys if keys.pool_public_key is not None: plot_id: bytes32 = ProofOfSpace.calculate_plot_id_pk( keys.pool_public_key, plot_public_key) plot_memo: bytes32 = stream_plot_info_pk(keys.pool_public_key, keys.farmer_public_key, sk) else: assert keys.pool_contract_puzzle_hash is not None plot_id = ProofOfSpace.calculate_plot_id_ph( keys.pool_contract_puzzle_hash, plot_public_key) plot_memo = stream_plot_info_ph(keys.pool_contract_puzzle_hash, keys.farmer_public_key, sk) if args.plotid is not None: log.info(f"Debug plot ID: {args.plotid}") plot_id = bytes32(bytes.fromhex(args.plotid)) if args.memo is not None: log.info(f"Debug memo: {args.memo}") plot_memo = bytes32.fromhex(args.memo) # Uncomment next two lines if memo is needed for dev debug plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") dt_string = datetime.now().strftime("%Y-%m-%d-%H-%M") if use_datetime: filename: str = f"plot-k{args.size}-{dt_string}-{plot_id}.plot" else: filename = f"plot-k{args.size}-{plot_id}.plot" full_path: Path = args.final_dir / filename resolved_final_dir: str = str(Path(args.final_dir).resolve()) plot_directories_list: str = config["harvester"]["plot_directories"] if args.exclude_final_dir: log.info( f"NOT adding directory {resolved_final_dir} to harvester for farming" ) if resolved_final_dir in plot_directories_list: log.warning( f"Directory {resolved_final_dir} already exists for harvester, please remove it manually" ) else: if resolved_final_dir not in plot_directories_list: # Adds the directory to the plot directories if it is not present log.info( f"Adding directory {resolved_final_dir} to harvester for farming" ) config = add_plot_directory(root_path, resolved_final_dir) if not full_path.exists(): log.info(f"Starting plot {i + 1}/{num}") # Creates the plot. This will take a long time for larger plots. plotter: DiskPlotter = DiskPlotter() plotter.create_plot_disk( str(args.tmp_dir), str(args.tmp2_dir), str(args.final_dir), filename, args.size, plot_memo, plot_id, args.buffer, args.buckets, args.stripe_size, args.num_threads, args.nobitfield, ) created_plots[plot_id] = full_path else: log.info(f"Plot {filename} already exists") existing_plots[plot_id] = full_path log.info("Summary:") if tmp_dir_created: try: args.tmp_dir.rmdir() except Exception: log.info( f"warning: did not remove primary temporary folder {args.tmp_dir}, it may not be empty." ) if tmp2_dir_created: try: args.tmp2_dir.rmdir() except Exception: log.info( f"warning: did not remove secondary temporary folder {args.tmp2_dir}, it may not be empty." ) log.info(f"Created a total of {len(created_plots)} new plots") for created_path in created_plots.values(): log.info(created_path.name) return created_plots, existing_plots
from blspy import AugSchemeMPL from src.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import puzzle_for_pk from src.util.ints import uint32 from src.wallet.derive_keys import master_sk_to_wallet_sk MASTER_KEY = AugSchemeMPL.key_gen(bytes([1] * 32)) def puzzle_program_for_index(index: uint32): return puzzle_for_pk( bytes(master_sk_to_wallet_sk(MASTER_KEY, index).get_g1()))
async def test1(self, simulation): test_rpc_port = uint16(21522) test_rpc_port_2 = uint16(21523) harvester, farmer_api = simulation def stop_node_cb(): pass def stop_node_cb_2(): pass config = bt.config hostname = config["self_hostname"] daemon_port = config["daemon_port"] farmer_rpc_api = FarmerRpcApi(farmer_api.farmer) harvester_rpc_api = HarvesterRpcApi(harvester) rpc_cleanup = await start_rpc_server( farmer_rpc_api, hostname, daemon_port, test_rpc_port, stop_node_cb, bt.root_path, config, connect_to_daemon=False, ) rpc_cleanup_2 = await start_rpc_server( harvester_rpc_api, hostname, daemon_port, test_rpc_port_2, stop_node_cb_2, bt.root_path, config, connect_to_daemon=False, ) try: client = await FarmerRpcClient.create(self_hostname, test_rpc_port, bt.root_path, config) client_2 = await HarvesterRpcClient.create(self_hostname, test_rpc_port_2, bt.root_path, config) async def have_connections(): return len(await client.get_connections()) > 0 await time_out_assert(15, have_connections, True) assert (await client.get_signage_point(std_hash(b"2"))) is None assert len(await client.get_signage_points()) == 0 async def have_signage_points(): return len(await client.get_signage_points()) > 0 sp = farmer_protocol.NewSignagePoint(std_hash(b"1"), std_hash(b"2"), std_hash(b"3"), uint64(1), uint64(1000000), uint8(2)) await farmer_api.new_signage_point(sp) await time_out_assert(5, have_signage_points, True) assert (await client.get_signage_point(std_hash(b"2"))) is not None async def have_plots(): return len((await client_2.get_plots())["plots"]) > 0 await time_out_assert(5, have_plots, True) res = await client_2.get_plots() num_plots = len(res["plots"]) assert num_plots > 0 plot_dir = get_plot_dir() / "subdir" plot_dir.mkdir(parents=True, exist_ok=True) plot_dir_sub = get_plot_dir() / "subdir" / "subsubdir" plot_dir_sub.mkdir(parents=True, exist_ok=True) plotter = DiskPlotter() filename = "test_farmer_harvester_rpc_plot.plot" filename_2 = "test_farmer_harvester_rpc_plot2.plot" plotter.create_plot_disk( str(plot_dir), str(plot_dir), str(plot_dir), filename, 18, stream_plot_info_pk(bt.pool_pk, bt.farmer_pk, AugSchemeMPL.key_gen(bytes([4] * 32))), token_bytes(32), 128, 0, 2000, 0, False, ) # Making a plot with a puzzle hash encoded into it instead of pk plot_id_2 = token_bytes(32) plotter.create_plot_disk( str(plot_dir), str(plot_dir), str(plot_dir), filename_2, 18, stream_plot_info_ph(std_hash(b"random ph"), bt.farmer_pk, AugSchemeMPL.key_gen(bytes([5] * 32))), plot_id_2, 128, 0, 2000, 0, False, ) # Making the same plot, in a different dir. This should not be farmed plotter.create_plot_disk( str(plot_dir_sub), str(plot_dir_sub), str(plot_dir_sub), filename_2, 18, stream_plot_info_ph(std_hash(b"random ph"), bt.farmer_pk, AugSchemeMPL.key_gen(bytes([5] * 32))), plot_id_2, 128, 0, 2000, 0, False, ) res_2 = await client_2.get_plots() assert len(res_2["plots"]) == num_plots # Test farmer get_plots farmer_res = await client.get_plots() assert len(list(farmer_res.values())[0]["plots"]) == num_plots assert len(await client_2.get_plot_directories()) == 1 await client_2.add_plot_directory(str(plot_dir)) await client_2.add_plot_directory(str(plot_dir_sub)) assert len(await client_2.get_plot_directories()) == 3 res_2 = await client_2.get_plots() assert len(res_2["plots"]) == num_plots + 2 await client_2.delete_plot(str(plot_dir / filename)) await client_2.delete_plot(str(plot_dir / filename_2)) await client_2.refresh_plots() res_3 = await client_2.get_plots() assert len(res_3["plots"]) == num_plots + 1 await client_2.remove_plot_directory(str(plot_dir)) assert len(await client_2.get_plot_directories()) == 2 targets_1 = await client.get_reward_targets(False) assert "have_pool_sk" not in targets_1 assert "have_farmer_sk" not in targets_1 targets_2 = await client.get_reward_targets(True) assert targets_2["have_pool_sk"] and targets_2["have_farmer_sk"] new_ph: bytes32 = create_puzzlehash_for_pk( master_sk_to_wallet_sk(bt.farmer_master_sk, uint32(10)).get_g1()) new_ph_2: bytes32 = create_puzzlehash_for_pk( master_sk_to_wallet_sk(bt.pool_master_sk, uint32(472)).get_g1()) await client.set_reward_targets( encode_puzzle_hash(new_ph, "xch"), encode_puzzle_hash(new_ph_2, "xch")) targets_3 = await client.get_reward_targets(True) assert decode_puzzle_hash(targets_3["farmer_target"]) == new_ph assert decode_puzzle_hash(targets_3["pool_target"]) == new_ph_2 assert targets_3["have_pool_sk"] and targets_3["have_farmer_sk"] new_ph_3: bytes32 = create_puzzlehash_for_pk( master_sk_to_wallet_sk(bt.pool_master_sk, uint32(1888)).get_g1()) await client.set_reward_targets( None, encode_puzzle_hash(new_ph_3, "xch")) targets_4 = await client.get_reward_targets(True) assert decode_puzzle_hash(targets_4["farmer_target"]) == new_ph assert decode_puzzle_hash(targets_4["pool_target"]) == new_ph_3 assert not targets_4["have_pool_sk"] and targets_3["have_farmer_sk"] root_path = farmer_api.farmer._root_path config = load_config(root_path, "config.yaml") assert config["farmer"][ "xch_target_address"] == encode_puzzle_hash(new_ph, "xch") assert config["pool"]["xch_target_address"] == encode_puzzle_hash( new_ph_3, "xch") new_ph_3_encoded = encode_puzzle_hash(new_ph_3, "xch") added_char = new_ph_3_encoded + "a" with pytest.raises(ValueError): await client.set_reward_targets(None, added_char) replaced_char = new_ph_3_encoded[0:-1] + "a" with pytest.raises(ValueError): await client.set_reward_targets(None, replaced_char) assert len((await client.get_pool_state())["pool_state"]) == 0 all_sks = farmer_api.farmer.keychain.get_all_private_keys() auth_sk = master_sk_to_pooling_authentication_sk( all_sks[0][0], 2, 1) pool_list = [{ "launcher_id": "ae4ef3b9bfe68949691281a015a9c16630fc8f66d48c19ca548fb80768791afa", "authentication_public_key": bytes(auth_sk.get_g1()).hex(), "owner_public_key": "84c3fcf9d5581c1ddc702cb0f3b4a06043303b334dd993ab42b2c320ebfa98e5ce558448615b3f69638ba92cf7f43da5", "payout_instructions": "c2b08e41d766da4116e388357ed957d04ad754623a915f3fd65188a8746cf3e8", "pool_url": "localhost", "p2_singleton_puzzle_hash": "16e4bac26558d315cded63d4c5860e98deb447cc59146dd4de06ce7394b14f17", "target_puzzle_hash": "344587cf06a39db471d2cc027504e8688a0a67cce961253500c956c73603fd58", }] config["pool"]["pool_list"] = pool_list save_config(root_path, "config.yaml", config) await farmer_api.farmer.update_pool_state() pool_state = (await client.get_pool_state())["pool_state"] assert len(pool_state) == 1 assert ( pool_state[0]["pool_config"]["payout_instructions"] == "c2b08e41d766da4116e388357ed957d04ad754623a915f3fd65188a8746cf3e8" ) await client.set_payout_instructions( hexstr_to_bytes(pool_state[0]["pool_config"]["launcher_id"]), "1234vy") pool_state = (await client.get_pool_state())["pool_state"] assert pool_state[0]["pool_config"][ "payout_instructions"] == "1234vy" finally: # Checks that the RPC manages to stop the node client.close() client_2.close() await client.await_closed() await client_2.await_closed() await rpc_cleanup() await rpc_cleanup_2()
async def test_puzzle_store(self): db_filename = Path("puzzle_store_test.db") if db_filename.exists(): db_filename.unlink() con = await aiosqlite.connect(db_filename) db = await WalletPuzzleStore.create(con) try: derivation_recs = [] # wallet_types = [t for t in WalletType] [t for t in WalletType] for i in range(1000): derivation_recs.append( DerivationRecord( uint32(i), token_bytes(32), AugSchemeMPL.key_gen(token_bytes(32)).get_g1(), WalletType.STANDARD_WALLET, uint32(1), ) ) derivation_recs.append( DerivationRecord( uint32(i), token_bytes(32), AugSchemeMPL.key_gen(token_bytes(32)).get_g1(), WalletType.RATE_LIMITED, uint32(2), ) ) assert await db.puzzle_hash_exists(derivation_recs[0].puzzle_hash) is False assert await db.index_for_pubkey(derivation_recs[0].pubkey) is None assert await db.index_for_puzzle_hash(derivation_recs[2].puzzle_hash) is None assert await db.wallet_info_for_puzzle_hash(derivation_recs[2].puzzle_hash) is None assert len((await db.get_all_puzzle_hashes())) == 0 assert await db.get_last_derivation_path() is None assert await db.get_unused_derivation_path() is None assert await db.get_derivation_record(0, 2) is None await db.add_derivation_paths(derivation_recs) assert await db.puzzle_hash_exists(derivation_recs[0].puzzle_hash) is True phs_1 = [derivation_recs[0].puzzle_hash] phs_2 = [32 * bytes([1]), derivation_recs[0].puzzle_hash] phs_3 = [derivation_recs[0].puzzle_hash, 32 * bytes([1])] phs_4 = [32 * bytes([1]), 32 * bytes([2])] phs_5 = [] assert await db.one_of_puzzle_hashes_exists(phs_1) is True assert await db.one_of_puzzle_hashes_exists(phs_2) is True assert await db.one_of_puzzle_hashes_exists(phs_3) is True assert await db.one_of_puzzle_hashes_exists(phs_4) is False assert await db.one_of_puzzle_hashes_exists(phs_5) is False assert await db.index_for_pubkey(derivation_recs[4].pubkey) == 2 assert await db.index_for_puzzle_hash(derivation_recs[2].puzzle_hash) == 1 assert await db.wallet_info_for_puzzle_hash(derivation_recs[2].puzzle_hash) == ( derivation_recs[2].wallet_id, derivation_recs[2].wallet_type, ) assert len((await db.get_all_puzzle_hashes())) == 2000 assert await db.get_last_derivation_path() == 999 assert await db.get_unused_derivation_path() == 0 assert await db.get_derivation_record(0, 2) == derivation_recs[1] # Indeces up to 250 await db.set_used_up_to(249) assert await db.get_unused_derivation_path() == 250 except Exception as e: print(e, type(e)) await db._clear_database() await db.close() db_filename.unlink() raise e await db._clear_database() await db.close() db_filename.unlink()
async def test1(self, two_nodes): num_blocks = 5 test_rpc_port = uint16(21522) nodes, _ = two_nodes full_node_api_1, full_node_api_2 = nodes server_1 = full_node_api_1.full_node.server server_2 = full_node_api_2.full_node.server def stop_node_cb(): full_node_api_1._close() server_1.close_all() full_node_rpc_api = FullNodeRpcApi(full_node_api_1.full_node) config = bt.config hostname = config["self_hostname"] daemon_port = config["daemon_port"] rpc_cleanup = await start_rpc_server( full_node_rpc_api, hostname, daemon_port, test_rpc_port, stop_node_cb, bt.root_path, config, connect_to_daemon=False, ) try: client = await FullNodeRpcClient.create(self_hostname, test_rpc_port, bt.root_path, config) state = await client.get_blockchain_state() assert state["peak"] is None assert not state["sync"]["sync_mode"] assert state["difficulty"] > 0 assert state["sub_slot_iters"] > 0 blocks = bt.get_consecutive_blocks(num_blocks) blocks = bt.get_consecutive_blocks(num_blocks, block_list_input=blocks, guarantee_transaction_block=True) assert len(await client.get_unfinished_block_headers()) == 0 assert len((await client.get_block_records(0, 100))) == 0 for block in blocks: if is_overflow_block(test_constants, block.reward_chain_block.signage_point_index): finished_ss = block.finished_sub_slots[:-1] else: finished_ss = block.finished_sub_slots unf = UnfinishedBlock( finished_ss, block.reward_chain_block.get_unfinished(), block.challenge_chain_sp_proof, block.reward_chain_sp_proof, block.foliage, block.foliage_transaction_block, block.transactions_info, block.transactions_generator, [], ) await full_node_api_1.full_node.respond_unfinished_block( full_node_protocol.RespondUnfinishedBlock(unf), None ) await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block), None) assert len(await client.get_unfinished_block_headers()) > 0 assert len(await client.get_all_block(0, 2)) == 2 state = await client.get_blockchain_state() block = await client.get_block(state["peak"].header_hash) assert block == blocks[-1] assert (await client.get_block(bytes([1] * 32))) is None assert (await client.get_block_record_by_height(2)).header_hash == blocks[2].header_hash assert len((await client.get_block_records(0, 100))) == num_blocks * 2 assert (await client.get_block_record_by_height(100)) is None ph = list(blocks[-1].get_included_reward_coins())[0].puzzle_hash coins = await client.get_coin_records_by_puzzle_hash(ph) print(coins) assert len(coins) >= 1 pid = list(blocks[-1].get_included_reward_coins())[0].parent_coin_info pid_2 = list(blocks[-1].get_included_reward_coins())[1].parent_coin_info coins = await client.get_coin_records_by_parent_ids([pid, pid_2]) print(coins) assert len(coins) == 2 additions, removals = await client.get_additions_and_removals(blocks[-1].header_hash) assert len(additions) >= 2 and len(removals) == 0 wallet = WalletTool(full_node_api_1.full_node.constants) wallet_receiver = WalletTool(full_node_api_1.full_node.constants, AugSchemeMPL.key_gen(std_hash(b"123123"))) ph = wallet.get_new_puzzlehash() ph_2 = wallet.get_new_puzzlehash() ph_receiver = wallet_receiver.get_new_puzzlehash() assert len(await client.get_coin_records_by_puzzle_hash(ph)) == 0 assert len(await client.get_coin_records_by_puzzle_hash(ph_receiver)) == 0 blocks = bt.get_consecutive_blocks( 2, block_list_input=blocks, guarantee_transaction_block=True, farmer_reward_puzzle_hash=ph, pool_reward_puzzle_hash=ph, ) for block in blocks[-2:]: await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block)) assert len(await client.get_coin_records_by_puzzle_hash(ph)) == 2 assert len(await client.get_coin_records_by_puzzle_hash(ph_receiver)) == 0 coin_to_spend = list(blocks[-1].get_included_reward_coins())[0] spend_bundle = wallet.generate_signed_transaction(coin_to_spend.amount, ph_receiver, coin_to_spend) assert len(await client.get_all_mempool_items()) == 0 assert len(await client.get_all_mempool_tx_ids()) == 0 assert (await client.get_mempool_item_by_tx_id(spend_bundle.name())) is None await client.push_tx(spend_bundle) coin = spend_bundle.additions()[0] assert len(await client.get_all_mempool_items()) == 1 assert len(await client.get_all_mempool_tx_ids()) == 1 assert ( SpendBundle.from_json_dict(list((await client.get_all_mempool_items()).values())[0]["spend_bundle"]) == spend_bundle ) assert (await client.get_all_mempool_tx_ids())[0] == spend_bundle.name() assert ( SpendBundle.from_json_dict( (await client.get_mempool_item_by_tx_id(spend_bundle.name()))["spend_bundle"] ) == spend_bundle ) assert (await client.get_coin_record_by_name(coin.name())) is None await full_node_api_1.farm_new_transaction_block(FarmNewBlockProtocol(ph_2)) assert (await client.get_coin_record_by_name(coin.name())).coin == coin assert len(await client.get_coin_records_by_puzzle_hash(ph_receiver)) == 1 assert len(list(filter(lambda cr: not cr.spent, (await client.get_coin_records_by_puzzle_hash(ph))))) == 3 assert len(await client.get_coin_records_by_puzzle_hashes([ph_receiver, ph])) == 5 assert len(await client.get_coin_records_by_puzzle_hash(ph, False)) == 3 assert len(await client.get_coin_records_by_puzzle_hash(ph, True)) == 4 assert len(await client.get_coin_records_by_puzzle_hash(ph, True, 0, 100)) == 4 assert len(await client.get_coin_records_by_puzzle_hash(ph, True, 50, 100)) == 0 assert len(await client.get_coin_records_by_puzzle_hash(ph, True, 0, blocks[-1].height + 1)) == 2 assert len(await client.get_coin_records_by_puzzle_hash(ph, True, 0, 1)) == 0 assert len(await client.get_connections()) == 0 await client.open_connection(self_hostname, server_2._port) async def num_connections(): return len(await client.get_connections()) await time_out_assert(10, num_connections, 1) connections = await client.get_connections() assert NodeType(connections[0]["type"]) == NodeType.FULL_NODE.value assert len(await client.get_connections(NodeType.FULL_NODE)) == 1 assert len(await client.get_connections(NodeType.FARMER)) == 0 await client.close_connection(connections[0]["node_id"]) await time_out_assert(10, num_connections, 0) finally: # Checks that the RPC manages to stop the node client.close() await client.await_closed() await rpc_cleanup()
async def test1(self, simulation): test_rpc_port = uint16(21522) test_rpc_port_2 = uint16(21523) harvester, farmer_api = simulation def stop_node_cb(): pass def stop_node_cb_2(): pass config = bt.config hostname = config["self_hostname"] daemon_port = config["daemon_port"] farmer_rpc_api = FarmerRpcApi(farmer_api.farmer) harvester_rpc_api = HarvesterRpcApi(harvester) rpc_cleanup = await start_rpc_server( farmer_rpc_api, hostname, daemon_port, test_rpc_port, stop_node_cb, bt.root_path, config, connect_to_daemon=False, ) rpc_cleanup_2 = await start_rpc_server( harvester_rpc_api, hostname, daemon_port, test_rpc_port_2, stop_node_cb_2, bt.root_path, config, connect_to_daemon=False, ) try: client = await FarmerRpcClient.create(self_hostname, test_rpc_port, bt.root_path, config) client_2 = await HarvesterRpcClient.create(self_hostname, test_rpc_port_2, bt.root_path, config) async def have_connections(): return len(await client.get_connections()) > 0 await time_out_assert(15, have_connections, True) assert (await client.get_signage_point(std_hash(b"2"))) is None assert len(await client.get_signage_points()) == 0 async def have_signage_points(): return len(await client.get_signage_points()) > 0 sp = farmer_protocol.NewSignagePoint(std_hash(b"1"), std_hash(b"2"), std_hash(b"3"), uint64(1), uint64(1000000), uint8(2)) await farmer_api.new_signage_point(sp) await time_out_assert(5, have_signage_points, True) assert (await client.get_signage_point(std_hash(b"2"))) is not None async def have_plots(): return len((await client_2.get_plots())["plots"]) > 0 await time_out_assert(5, have_plots, True) res = await client_2.get_plots() num_plots = len(res["plots"]) assert num_plots > 0 plot_dir = get_plot_dir() / "subdir" plot_dir.mkdir(parents=True, exist_ok=True) plot_dir_sub = get_plot_dir() / "subdir" / "subsubdir" plot_dir_sub.mkdir(parents=True, exist_ok=True) plotter = DiskPlotter() filename = "test_farmer_harvester_rpc_plot.plot" filename_2 = "test_farmer_harvester_rpc_plot2.plot" plotter.create_plot_disk( str(plot_dir), str(plot_dir), str(plot_dir), filename, 18, stream_plot_info_pk(bt.pool_pk, bt.farmer_pk, AugSchemeMPL.key_gen(bytes([4] * 32))), token_bytes(32), 128, 0, 2000, 0, False, ) # Making a plot with a puzzle hash encoded into it instead of pk plot_id_2 = token_bytes(32) plotter.create_plot_disk( str(plot_dir), str(plot_dir), str(plot_dir), filename_2, 18, stream_plot_info_ph(std_hash(b"random ph"), bt.farmer_pk, AugSchemeMPL.key_gen(bytes([5] * 32))), plot_id_2, 128, 0, 2000, 0, False, ) # Making the same plot, in a different dir. This should not be farmed plotter.create_plot_disk( str(plot_dir_sub), str(plot_dir_sub), str(plot_dir_sub), filename_2, 18, stream_plot_info_ph(std_hash(b"random ph"), bt.farmer_pk, AugSchemeMPL.key_gen(bytes([5] * 32))), plot_id_2, 128, 0, 2000, 0, False, ) res_2 = await client_2.get_plots() assert len(res_2["plots"]) == num_plots assert len(await client_2.get_plot_directories()) == 1 await client_2.add_plot_directory(str(plot_dir)) await client_2.add_plot_directory(str(plot_dir_sub)) assert len(await client_2.get_plot_directories()) == 3 res_2 = await client_2.get_plots() assert len(res_2["plots"]) == num_plots + 2 await client_2.delete_plot(str(plot_dir / filename)) await client_2.delete_plot(str(plot_dir / filename_2)) await client_2.refresh_plots() res_3 = await client_2.get_plots() assert len(res_3["plots"]) == num_plots + 1 await client_2.remove_plot_directory(str(plot_dir)) assert len(await client_2.get_plot_directories()) == 2 targets_1 = await client.get_reward_targets(False) assert "have_pool_sk" not in targets_1 assert "have_farmer_sk" not in targets_1 targets_2 = await client.get_reward_targets(True) assert targets_2["have_pool_sk"] and targets_2["have_farmer_sk"] new_ph: bytes32 = create_puzzlehash_for_pk( master_sk_to_wallet_sk(bt.farmer_master_sk, uint32(10)).get_g1()) new_ph_2: bytes32 = create_puzzlehash_for_pk( master_sk_to_wallet_sk(bt.pool_master_sk, uint32(472)).get_g1()) await client.set_reward_targets( encode_puzzle_hash(new_ph, "xch"), encode_puzzle_hash(new_ph_2, "xch")) targets_3 = await client.get_reward_targets(True) assert decode_puzzle_hash(targets_3["farmer_target"]) == new_ph assert decode_puzzle_hash(targets_3["pool_target"]) == new_ph_2 assert targets_3["have_pool_sk"] and targets_3["have_farmer_sk"] new_ph_3: bytes32 = create_puzzlehash_for_pk( master_sk_to_wallet_sk(bt.pool_master_sk, uint32(1888)).get_g1()) await client.set_reward_targets( None, encode_puzzle_hash(new_ph_3, "xch")) targets_4 = await client.get_reward_targets(True) assert decode_puzzle_hash(targets_4["farmer_target"]) == new_ph assert decode_puzzle_hash(targets_4["pool_target"]) == new_ph_3 assert not targets_4["have_pool_sk"] and targets_3["have_farmer_sk"] root_path = farmer_api.farmer._root_path config = load_config(root_path, "config.yaml") assert config["farmer"][ "xch_target_address"] == encode_puzzle_hash(new_ph, "xch") assert config["pool"]["xch_target_address"] == encode_puzzle_hash( new_ph_3, "xch") new_ph_3_encoded = encode_puzzle_hash(new_ph_3, "xch") added_char = new_ph_3_encoded + "a" with pytest.raises(ValueError): await client.set_reward_targets(None, added_char) replaced_char = new_ph_3_encoded[0:-1] + "a" with pytest.raises(ValueError): await client.set_reward_targets(None, replaced_char) finally: # Checks that the RPC manages to stop the node client.close() client_2.close() await client.await_closed() await client_2.await_closed() await rpc_cleanup() await rpc_cleanup_2()
def generate_taproot_sk(local_pk: G1Element, farmer_pk: G1Element) -> PrivateKey: taproot_message: bytes = bytes(local_pk + farmer_pk) + bytes( local_pk) + bytes(farmer_pk) taproot_hash: bytes32 = std_hash(taproot_message) return AugSchemeMPL.key_gen(taproot_hash)
def rand_g2() -> G2Element: sk = AugSchemeMPL.key_gen(rand_bytes(96)) return AugSchemeMPL.sign(sk, b"foobar")
async def test1(self, simulation): test_rpc_port = uint16(21522) test_rpc_port_2 = uint16(21523) harvester_service, farmer_service = simulation harvester = harvester_service._node farmer_api = farmer_service._api def stop_node_cb(): pass def stop_node_cb_2(): pass config = bt.config hostname = config["self_hostname"] daemon_port = config["daemon_port"] farmer_rpc_api = FarmerRpcApi(farmer_api.farmer) harvester_rpc_api = HarvesterRpcApi(harvester) rpc_cleanup = await start_rpc_server( farmer_rpc_api, hostname, daemon_port, test_rpc_port, stop_node_cb, bt.root_path, config, connect_to_daemon=False, ) rpc_cleanup_2 = await start_rpc_server( harvester_rpc_api, hostname, daemon_port, test_rpc_port_2, stop_node_cb_2, bt.root_path, config, connect_to_daemon=False, ) try: client = await FarmerRpcClient.create(self_hostname, test_rpc_port, bt.root_path, config) client_2 = await HarvesterRpcClient.create(self_hostname, test_rpc_port_2, bt.root_path, config) await validate_get_routes(client, farmer_rpc_api) await validate_get_routes(client_2, harvester_rpc_api) async def have_connections(): return len(await client.get_connections()) > 0 await time_out_assert(15, have_connections, True) assert (await client.get_signage_point(std_hash(b"2"))) is None assert len(await client.get_signage_points()) == 0 async def have_signage_points(): return len(await client.get_signage_points()) > 0 sp = farmer_protocol.NewSignagePoint(std_hash(b"1"), std_hash(b"2"), std_hash(b"3"), uint64(1), uint64(1000000), uint8(2)) await farmer_api.new_signage_point(sp) await time_out_assert(5, have_signage_points, True) assert (await client.get_signage_point(std_hash(b"2"))) is not None async def have_plots(): return len((await client_2.get_plots())["plots"]) > 0 await time_out_assert(5, have_plots, True) res = await client_2.get_plots() num_plots = len(res["plots"]) assert num_plots > 0 plot_dir = get_plot_dir() / "subdir" plot_dir.mkdir(parents=True, exist_ok=True) plot_dir_sub = get_plot_dir() / "subdir" / "subsubdir" plot_dir_sub.mkdir(parents=True, exist_ok=True) plotter = DiskPlotter() filename = "test_farmer_harvester_rpc_plot.plot" filename_2 = "test_farmer_harvester_rpc_plot2.plot" plotter.create_plot_disk( str(plot_dir), str(plot_dir), str(plot_dir), filename, 18, stream_plot_info_pk(bt.pool_pk, bt.farmer_pk, AugSchemeMPL.key_gen(bytes([4] * 32))), token_bytes(32), 128, 0, 2000, 0, False, ) # Making a plot with a puzzle hash encoded into it instead of pk plot_id_2 = token_bytes(32) plotter.create_plot_disk( str(plot_dir), str(plot_dir), str(plot_dir), filename_2, 18, stream_plot_info_ph(std_hash(b"random ph"), bt.farmer_pk, AugSchemeMPL.key_gen(bytes([5] * 32))), plot_id_2, 128, 0, 2000, 0, False, ) # Making the same plot, in a different dir. This should not be farmed plotter.create_plot_disk( str(plot_dir_sub), str(plot_dir_sub), str(plot_dir_sub), filename_2, 18, stream_plot_info_ph(std_hash(b"random ph"), bt.farmer_pk, AugSchemeMPL.key_gen(bytes([5] * 32))), plot_id_2, 128, 0, 2000, 0, False, ) res_2 = await client_2.get_plots() assert len(res_2["plots"]) == num_plots # Reset cache and force updates cache every second to make sure the farmer gets the most recent data update_interval_before = farmer_api.farmer.update_harvester_cache_interval farmer_api.farmer.update_harvester_cache_interval = 1 farmer_api.farmer.harvester_cache = {} # Test farmer get_harvesters async def test_get_harvesters(): harvester.plot_manager.trigger_refresh() await time_out_assert(5, harvester.plot_manager.needs_refresh, value=False) farmer_res = await client.get_harvesters() if len(list(farmer_res["harvesters"])) != 1: log.error( f"test_get_harvesters: invalid harvesters {list(farmer_res['harvesters'])}" ) return False if len(list( farmer_res["harvesters"][0]["plots"])) != num_plots: log.error( f"test_get_harvesters: invalid plots {list(farmer_res['harvesters'])}" ) return False return True await time_out_assert_custom_interval(30, 1, test_get_harvesters) # Reset cache and reset update interval to avoid hitting the rate limit farmer_api.farmer.update_harvester_cache_interval = update_interval_before farmer_api.farmer.harvester_cache = {} targets_1 = await client.get_reward_targets(False) assert "have_pool_sk" not in targets_1 assert "have_farmer_sk" not in targets_1 targets_2 = await client.get_reward_targets(True) assert targets_2["have_pool_sk"] and targets_2["have_farmer_sk"] new_ph: bytes32 = create_puzzlehash_for_pk( master_sk_to_wallet_sk(bt.farmer_master_sk, uint32(10)).get_g1()) new_ph_2: bytes32 = create_puzzlehash_for_pk( master_sk_to_wallet_sk(bt.pool_master_sk, uint32(472)).get_g1()) await client.set_reward_targets( encode_puzzle_hash(new_ph, "xch"), encode_puzzle_hash(new_ph_2, "xch")) targets_3 = await client.get_reward_targets(True) assert decode_puzzle_hash(targets_3["farmer_target"]) == new_ph assert decode_puzzle_hash(targets_3["pool_target"]) == new_ph_2 assert targets_3["have_pool_sk"] and targets_3["have_farmer_sk"] new_ph_3: bytes32 = create_puzzlehash_for_pk( master_sk_to_wallet_sk(bt.pool_master_sk, uint32(1888)).get_g1()) await client.set_reward_targets( None, encode_puzzle_hash(new_ph_3, "xch")) targets_4 = await client.get_reward_targets(True) assert decode_puzzle_hash(targets_4["farmer_target"]) == new_ph assert decode_puzzle_hash(targets_4["pool_target"]) == new_ph_3 assert not targets_4["have_pool_sk"] and targets_3["have_farmer_sk"] root_path = farmer_api.farmer._root_path config = load_config(root_path, "config.yaml") assert config["farmer"][ "xch_target_address"] == encode_puzzle_hash(new_ph, "xch") assert config["pool"]["xch_target_address"] == encode_puzzle_hash( new_ph_3, "xch") new_ph_3_encoded = encode_puzzle_hash(new_ph_3, "xch") added_char = new_ph_3_encoded + "a" with pytest.raises(ValueError): await client.set_reward_targets(None, added_char) replaced_char = new_ph_3_encoded[0:-1] + "a" with pytest.raises(ValueError): await client.set_reward_targets(None, replaced_char) assert len((await client.get_pool_state())["pool_state"]) == 0 all_sks = farmer_api.farmer.local_keychain.get_all_private_keys() auth_sk = master_sk_to_pooling_authentication_sk( all_sks[0][0], 2, 1) pool_list = [{ "launcher_id": "ae4ef3b9bfe68949691281a015a9c16630fc8f66d48c19ca548fb80768791afa", "authentication_public_key": bytes(auth_sk.get_g1()).hex(), "owner_public_key": "84c3fcf9d5581c1ddc702cb0f3b4a06043303b334dd993ab42b2c320ebfa98e5ce558448615b3f69638ba92cf7f43da5", # noqa "payout_instructions": "c2b08e41d766da4116e388357ed957d04ad754623a915f3fd65188a8746cf3e8", "pool_url": "localhost", "p2_singleton_puzzle_hash": "16e4bac26558d315cded63d4c5860e98deb447cc59146dd4de06ce7394b14f17", "target_puzzle_hash": "344587cf06a39db471d2cc027504e8688a0a67cce961253500c956c73603fd58", }] config["pool"]["pool_list"] = pool_list save_config(root_path, "config.yaml", config) await farmer_api.farmer.update_pool_state() pool_state = (await client.get_pool_state())["pool_state"] assert len(pool_state) == 1 assert ( pool_state[0]["pool_config"]["payout_instructions"] == "c2b08e41d766da4116e388357ed957d04ad754623a915f3fd65188a8746cf3e8" ) await client.set_payout_instructions( hexstr_to_bytes(pool_state[0]["pool_config"]["launcher_id"]), "1234vy") await farmer_api.farmer.update_pool_state() pool_state = (await client.get_pool_state())["pool_state"] assert pool_state[0]["pool_config"][ "payout_instructions"] == "1234vy" now = time.time() # Big arbitrary numbers used to be unlikely to accidentally collide. before_24h = (now - (25 * 60 * 60), 29984713) since_24h = (now - (23 * 60 * 60), 93049817) for p2_singleton_puzzle_hash, pool_dict in farmer_api.farmer.pool_state.items( ): for key in ["points_found_24h", "points_acknowledged_24h"]: pool_dict[key].insert(0, since_24h) pool_dict[key].insert(0, before_24h) sp = farmer_protocol.NewSignagePoint(std_hash(b"1"), std_hash(b"2"), std_hash(b"3"), uint64(1), uint64(1000000), uint8(2)) await farmer_api.new_signage_point(sp) client_pool_state = await client.get_pool_state() for pool_dict in client_pool_state["pool_state"]: for key in ["points_found_24h", "points_acknowledged_24h"]: assert pool_dict[key][0] == list(since_24h) finally: # Checks that the RPC manages to stop the node client.close() client_2.close() await client.await_closed() await client_2.await_closed() await rpc_cleanup() await rpc_cleanup_2()