def initialize_logging(prefix: str, logging_config: Dict): log_path = path_from_root( DEFAULT_ROOT_PATH, logging_config.get("log_filename", "log/debug.log")) mkdir(str(log_path.parent)) if logging_config["log_stdout"]: handler = colorlog.StreamHandler() handler.setFormatter( colorlog.ColoredFormatter( f"{prefix}: %(log_color)s%(levelname)-8s%(reset)s %(asctime)s.%(msecs)03d %(message)s", datefmt="%H:%M:%S", reset=True, )) logger = colorlog.getLogger() logger.addHandler(handler) else: print( f"Starting process and logging to {log_path}. Run with & to run in the background." ) logging.basicConfig( filename=log_path, filemode="a", format= f"{prefix}: %(levelname)-8s %(asctime)s.%(msecs)03d %(message)s", datefmt="%H:%M:%S", ) logger = logging.getLogger() logger.setLevel(logging.INFO)
def launch_plotter(root_path, service_name, service_array): # we need to pass on the possibly altered CHIA_ROOT os.environ["CHIA_ROOT"] = str(root_path) service_executable = executable_for_service(service_array[0]) # Swap service name with name of executable service_array[0] = service_executable startupinfo = None if os.name == "nt": startupinfo = subprocess.STARTUPINFO() # type: ignore startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW # type: ignore plotter_path = plotter_log_path(root_path) if plotter_path.parent.exists(): if plotter_path.exists(): plotter_path.unlink() else: mkdir(plotter_path.parent) outfile = open(plotter_path.resolve(), "w") log.info(f"Service array: {service_array}") process = subprocess.Popen(service_array, shell=False, stdout=outfile, startupinfo=startupinfo) pid_path = pid_path_for_service(root_path, service_name) try: mkdir(pid_path.parent) with open(pid_path, "w") as f: f.write(f"{process.pid}\n") except Exception: pass return process, pid_path
def service_kwargs_for_full_node_simulator( root_path: Path, config: Dict, consensus_constants: ConsensusConstants, bt: BlockTools, ) -> Dict: mkdir(path_from_root(root_path, config["database_path"]).parent) genesis_challenge = bytes32( bytes.fromhex( config["network_genesis_challenges"][config["selected_network"]])) node = FullNode( config, root_path=root_path, consensus_constants=consensus_constants, name=SERVICE_NAME, ) peer_api = FullNodeSimulator(node, bt) kwargs = dict( root_path=root_path, node=node, peer_api=peer_api, node_type=NodeType.FULL_NODE, advertised_port=config["port"], service_name=SERVICE_NAME, server_listen_ports=[config["port"]], on_connect_callback=node.on_connect, rpc_info=(FullNodeRpcApi, config["rpc_port"]), network_id=genesis_challenge, ) return kwargs
def service_kwargs_for_full_node_simulator(root_path: Path, config: Dict, bt: BlockTools) -> Dict: mkdir(path_from_root(root_path, config["database_path"]).parent) constants = bt.constants node = FullNode( config, root_path=root_path, consensus_constants=constants, name=SERVICE_NAME, ) peer_api = FullNodeSimulator(node, bt) kwargs = dict( root_path=root_path, node=node, peer_api=peer_api, node_type=NodeType.FULL_NODE, advertised_port=config["port"], service_name=SERVICE_NAME, server_listen_ports=[config["port"]], on_connect_callback=node.on_connect, rpc_info=(FullNodeRpcApi, config["rpc_port"]), network_id=constants.GENESIS_CHALLENGE, ) return kwargs
def launch_service(root_path, service_command): """ Launch a child process. """ # set up CHIA_ROOT # invoke correct script # save away PID # we need to pass on the possibly altered CHIA_ROOT os.environ["CHIA_ROOT"] = str(root_path) # Innsert proper e service_array = service_command.split() service_executable = executable_for_service(service_array[0]) service_array[0] = service_executable startupinfo = None if os.name == "nt": startupinfo = subprocess.STARTUPINFO() # type: ignore startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW # type: ignore process = subprocess.Popen(service_array, shell=False, startupinfo=startupinfo) pid_path = pid_path_for_service(root_path, service_command) try: mkdir(pid_path.parent) with open(pid_path, "w") as f: f.write(f"{process.pid}\n") except Exception: pass return process, pid_path
def create_default_chia_config(root_path: Path) -> None: for filename in ["config.yaml"]: default_config_file_data = initial_config_file(filename) path = config_path_for_filename(root_path, filename) mkdir(path.parent) with open(path, "w") as f: f.write(default_config_file_data)
def initialize_logging(prefix: str, logging_config: Dict, root_path: Path): log_path = path_from_root( root_path, logging_config.get("log_filename", "log/debug.log")) mkdir(str(log_path.parent)) if logging_config["log_stdout"]: handler = colorlog.StreamHandler() handler.setFormatter( colorlog.ColoredFormatter( f"{prefix}: %(log_color)s%(levelname)-8s%(reset)s %(asctime)s.%(msecs)03d %(message)s", datefmt="%H:%M:%S", reset=True, )) logger = colorlog.getLogger() logger.addHandler(handler) else: logging.basicConfig( filename=log_path, filemode="a", format= f"{prefix}: %(levelname)-8s %(asctime)s.%(msecs)03d %(message)s", datefmt="%H:%M:%S", ) logger = logging.getLogger() handler = RotatingFileHandler(log_path, maxBytes=20000000, backupCount=7) logger.addHandler(handler) logger.setLevel(logging.INFO)
def persistent_blocks( num_of_blocks: int, db_name: str, seed: bytes = b"", empty_sub_slots=0, normalized_to_identity: bool = False, ): # try loading from disc, if not create new blocks.db file # TODO hash fixtures.py and blocktool.py, add to path, delete if the files changed block_path_dir = Path("~/.chia/blocks").expanduser() file_path = Path(f"~/.chia/blocks/{db_name}").expanduser() if not path.exists(block_path_dir): mkdir(block_path_dir.parent) mkdir(block_path_dir) if file_path.exists(): try: bytes_list = file_path.read_bytes() block_bytes_list: List[bytes] = pickle.loads(bytes_list) blocks: List[FullBlock] = [] for block_bytes in block_bytes_list: blocks.append(FullBlock.from_bytes(block_bytes)) if len(blocks) == num_of_blocks: print(f"\n loaded {file_path} with {len(blocks)} blocks") return blocks except EOFError: print("\n error reading db file") return new_test_db(file_path, num_of_blocks, seed, empty_sub_slots, normalized_to_identity)
def service_kwargs_for_full_node_simulator( root_path: Path, config: Dict, consensus_constants: ConsensusConstants, bt: BlockTools, ) -> Dict: mkdir(path_from_root(root_path, config["database_path"]).parent) api = FullNodeSimulator( config, root_path=root_path, consensus_constants=consensus_constants, name=SERVICE_NAME, bt=bt, ) kwargs = dict( root_path=root_path, api=api, node_type=NodeType.FULL_NODE, advertised_port=config["port"], service_name=SERVICE_NAME, server_listen_ports=[config["port"]], on_connect_callback=api._on_connect, rpc_info=(FullNodeRpcApi, config["rpc_port"]), ) return kwargs
def init_plots(self, root_path): plot_dir = get_plot_dir() mkdir(plot_dir) temp_dir = plot_dir / "tmp" mkdir(temp_dir) args = Namespace() # Can't go much lower than 20, since plots start having no solutions and more buggy args.size = 22 # Uses many plots for testing, in order to guarantee proofs of space at every height args.num = 20 args.buffer = 100 args.farmer_public_key = bytes(self.farmer_pk).hex() args.pool_public_key = bytes(self.pool_pk).hex() args.tmp_dir = temp_dir args.tmp2_dir = plot_dir args.final_dir = plot_dir args.plotid = None args.memo = None args.buckets = 0 args.stripe_size = 2000 args.num_threads = 0 args.nobitfield = False args.exclude_final_dir = False test_private_keys = [AugSchemeMPL.key_gen(std_hash(i.to_bytes(2, "big"))) for i in range(args.num)] try: # No datetime in the filename, to get deterministic filenames and not re-plot create_plots( args, root_path, use_datetime=False, test_private_keys=test_private_keys, ) except KeyboardInterrupt: shutil.rmtree(plot_dir, ignore_errors=True) sys.exit(1)
async def _start( self, fingerprint: Optional[int] = None, new_wallet: bool = False, backup_file: Optional[Path] = None, skip_backup_import: bool = False, ) -> bool: private_key = self.get_key_for_fingerprint(fingerprint) if private_key is None: return False db_path_key_suffix = str(private_key.get_g1().get_fingerprint()) path = path_from_root(self.root_path, f"{self.config['database_path']}-{db_path_key_suffix}") mkdir(path.parent) self.wallet_state_manager = await WalletStateManager.create(private_key, self.config, path, self.constants) self.wsm_close_task = None assert self.wallet_state_manager is not None backup_settings: BackupInitialized = self.wallet_state_manager.user_settings.get_backup_settings() if backup_settings.user_initialized is False: if new_wallet is True: await self.wallet_state_manager.user_settings.user_created_new_wallet() self.wallet_state_manager.new_wallet = True elif skip_backup_import is True: await self.wallet_state_manager.user_settings.user_skipped_backup_import() elif backup_file is not None: await self.wallet_state_manager.import_backup_info(backup_file) else: self.backup_initialized = False await self.wallet_state_manager.close_all_stores() self.wallet_state_manager = None return False self.backup_initialized = True if backup_file is not None: json_dict = open_backup_file(backup_file, self.wallet_state_manager.private_key) if "start_height" in json_dict["data"]: start_height = json_dict["data"]["start_height"] self.config["starting_height"] = max(0, start_height - self.config["start_height_buffer"]) else: self.config["starting_height"] = 0 else: self.config["starting_height"] = 0 if self.state_changed_callback is not None: self.wallet_state_manager.set_callback(self.state_changed_callback) self.wallet_state_manager.set_pending_callback(self._pending_tx_handler) self._shut_down = False self.peer_task = asyncio.create_task(self._periodically_check_full_node()) self.sync_event = asyncio.Event() self.sync_task = asyncio.create_task(self.sync_job()) self.log.info("self.sync_job") self.logged_in_fingerprint = fingerprint return True
def generate(args, parser): root_path = args.root_path keys_yaml = "keys.yaml" key_config_filename = config_path_for_filename(root_path, keys_yaml) if args.keys != ["keys"]: parser.print_help() print("\nTry `chia generate keys`") return 1 if key_config_filename.exists(): # If the file exists, warn the user yn = input( f"The keys file {key_config_filename} already exists. Are you sure" f" you want to override the keys? Plots might become invalid. (y/n): " ) if not (yn.lower() == "y" or yn.lower() == "yes"): return 1 else: # Create the file if if doesn't exist mkdir(key_config_filename.parent) open(key_config_filename, "a").close() key_config = load_config(root_path, keys_yaml) if key_config is None: key_config = {} wallet_target = None if args.wallet: wallet_sk = ExtendedPrivateKey.from_seed(token_bytes(32)) wallet_target = create_puzzlehash_for_pk( BLSPublicKey(bytes(wallet_sk.public_child(0).get_public_key())) ) key_config["wallet_sk"] = bytes(wallet_sk).hex() key_config["wallet_target"] = wallet_target.hex() save_config(root_path, keys_yaml, key_config) if args.harvester: # Replaces the harvester's sk seed. Used to generate plot private keys, which are # used to sign farmed blocks. key_config["sk_seed"] = token_bytes(32).hex() save_config(root_path, keys_yaml, key_config) if args.pool: # Replaces the pools keys and targes. Only useful if running a pool, or doing # solo farming. The pool target allows spending of the coinbase. pool_sks = [PrivateKey.from_seed(token_bytes(32)) for _ in range(2)] if wallet_target is None: pool_target = create_puzzlehash_for_pk( BLSPublicKey(bytes(pool_sks[0].get_public_key())) ) else: pool_target = wallet_target key_config["pool_sks"] = [bytes(pool_sk).hex() for pool_sk in pool_sks] key_config["pool_target"] = pool_target.hex() save_config(root_path, keys_yaml, key_config) if args.pooltarget: # Compute a new pool target and save it to the config assert "wallet_target" in key_config key_config["pool_target"] = key_config["wallet_target"] save_config(root_path, keys_yaml, key_config)
def copy_files_rec(old_path: Path, new_path: Path): if old_path.is_file(): print(f"{new_path}") mkdir(new_path.parent) shutil.copy(old_path, new_path) elif old_path.is_dir(): for old_path_child in old_path.iterdir(): new_path_child = new_path / old_path_child.name copy_files_rec(old_path_child, new_path_child)
async def initialize_address_manager(self): mkdir(self.peer_db_path.parent) self.connection = await aiosqlite.connect(self.peer_db_path) self.address_manager_store = await AddressManagerStore.create(self.connection) if not await self.address_manager_store.is_empty(): self.address_manager = await self.address_manager_store.deserialize() else: await self.address_manager_store.clear() self.address_manager = AddressManager() self.server.set_received_message_callback(self.update_peer_timestamp_on_message)
def create_default_chia_config(root_path: Path) -> None: for filename in ["config.yaml"]: default_config_file_data = initial_config_file(filename) path = config_path_for_filename(root_path, filename) mkdir(path.parent) with open(path, "w") as f: f.write(default_config_file_data) plot_yaml_path: Path = root_path / "config" / "plots.yaml" if not plot_yaml_path.exists(): save_config(root_path, "plots.yaml", {"plots": {}})
async def initialize_address_manager(self): mkdir(self.peer_db_path.parent) self.connection = await aiosqlite.connect(self.peer_db_path) self.address_manager_store = await AddressManagerStore.create( self.connection) if not await self.address_manager_store.is_empty(): self.address_manager = await self.address_manager_store.deserialize( ) else: await self.address_manager_store.clear() self.address_manager = AddressManager()
def copy_files_rec(old_path: Path, new_path: Path): if old_path.is_file(): print(f"{new_path}") mkdir(new_path.parent) shutil.copy(old_path, new_path) elif old_path.is_dir(): for old_path_child in old_path.iterdir(): new_path_child = new_path / old_path_child.name copy_files_rec(old_path_child, new_path_child) else: not_found.append(f) print(f"{old_path} not found, skipping")
def initialize_logging(service_name: str, logging_config: Dict, root_path: Path): log_path = path_from_root( root_path, logging_config.get("log_filename", "log/debug.log")) log_date_format = "%Y-%m-%dT%H:%M:%S" mkdir(str(log_path.parent)) file_name_length = 33 - len(service_name) if logging_config["log_stdout"]: handler = colorlog.StreamHandler() handler.setFormatter( colorlog.ColoredFormatter( f"%(asctime)s.%(msecs)03d {service_name} %(name)-{file_name_length}s: " f"%(log_color)s%(levelname)-8s%(reset)s %(message)s", datefmt=log_date_format, reset=True, )) logger = colorlog.getLogger() logger.addHandler(handler) else: logger = logging.getLogger() handler = ConcurrentRotatingFileHandler(log_path, "a", maxBytes=20 * 1024 * 1024, backupCount=7) handler.setFormatter( logging.Formatter( fmt= f"%(asctime)s.%(msecs)03d {service_name} %(name)-{file_name_length}s: %(levelname)-8s %(message)s", datefmt=log_date_format, )) logger.addHandler(handler) if "log_level" in logging_config: if logging_config["log_level"] == "CRITICAL": logger.setLevel(logging.CRITICAL) elif logging_config["log_level"] == "ERROR": logger.setLevel(logging.ERROR) elif logging_config["log_level"] == "WARNING": logger.setLevel(logging.WARNING) elif logging_config["log_level"] == "INFO": logger.setLevel(logging.INFO) elif logging_config["log_level"] == "DEBUG": logger.setLevel(logging.DEBUG) logging.getLogger("aiosqlite").setLevel( logging.INFO) # Too much logging on debug level logging.getLogger("websockets").setLevel( logging.INFO) # Too much logging on debug level else: logger.setLevel(logging.INFO) else: logger.setLevel(logging.INFO)
async def create( config: Dict, private_key: ExtendedPrivateKey, root_path: Path, name: str = None, override_constants: Dict = {}, local_test: bool = False, ): self = WalletNode() self.config = config self.constants = consensus_constants.copy() self.root_path = root_path self.local_test = local_test for key, value in override_constants.items(): self.constants[key] = value if name: self.log = logging.getLogger(name) else: self.log = logging.getLogger(__name__) db_path_key_suffix = str( private_key.get_public_key().get_fingerprint()) path = path_from_root( self.root_path, f"{config['database_path']}-{db_path_key_suffix}") mkdir(path.parent) self.wallet_state_manager = await WalletStateManager.create( private_key, config, path, self.constants) self.wallet_state_manager.set_pending_callback( self._pending_tx_handler) # Normal operation data self.cached_blocks = {} self.future_block_hashes = {} # Sync data self._shut_down = False self.proof_hashes = [] self.header_hashes = [] self.header_hashes_error = False self.short_sync_threshold = 15 self.potential_blocks_received = {} self.potential_header_hashes = {} self.server = None self.tasks = [] return self
def migrate_from( old_root: Path, new_root: Path, manifest: List[str], do_not_migrate_settings: List[str], ): """ Copy all the files in "manifest" to the new config directory. """ if old_root == new_root: print("same as new path, exiting") return 1 if not old_root.is_dir(): print( f"{old_root} not found - this is ok if you did not install this version." ) return 0 print(f"\n{old_root} found") print(f"Copying files from {old_root} to {new_root}\n") not_found = [] for f in manifest: old_path = old_root / f new_path = new_root / f if old_path.is_file(): print(f"{new_path}") mkdir(new_path.parent) shutil.copy(old_path, new_path) else: not_found.append(f) print(f"{old_path} not found, skipping") # update config yaml with new keys config: Dict = load_config(new_root, "config.yaml") config_str: str = initial_config_file("config.yaml") default_config: Dict = yaml.safe_load(config_str) flattened_keys = unflatten_properties( {k: "" for k in do_not_migrate_settings}) dict_add_new_default(config, default_config, flattened_keys) save_config(new_root, "config.yaml", config) # migrate plots # for now, we simply leave them where they are # and make what may have been relative paths absolute if "config/trusted.key" in not_found or "config/trusted.key" in not_found: initialize_ssl(new_root) return 1
def start_service(root_path, service): # set up CHIA_ROOT # invoke correct script # save away PID # we need to pass on the possibly altered CHIA_ROOT os.environ["CHIA_ROOT"] = str(root_path) process = subprocess.Popen(service, shell=True) pid_path = pid_path_for_service(root_path, service) try: mkdir(pid_path.parent) with open(pid_path, "w") as f: f.write(f"{process.pid}\n") print(f"wrote pid to {pid_path}") except Exception: print(f"can't write PID file for {process} at {pid_path}") return process, pid_path
def launch_service(root_path, service_command): """ Launch a child process. """ # set up CHIA_ROOT # invoke correct script # save away PID # we need to pass on the possibly altered CHIA_ROOT os.environ["CHIA_ROOT"] = str(root_path) # Innsert proper e service_array = service_command.split() service_name = service_array[0] service_executable = executable_for_service(service_name) service_array[0] = service_executable startupinfo = None if os.name == "nt": startupinfo = subprocess.STARTUPINFO() # type: ignore startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW # type: ignore if service_name == "chia-create-plots": plotter_path = plotter_log_path(root_path) if plotter_path.parent.exists(): if plotter_path.exists(): plotter_path.unlink() else: mkdir(plotter_path.parent) outfile = open(plotter_path.resolve(), "w") process = subprocess.Popen(service_array, shell=False, stdout=outfile, startupinfo=startupinfo) else: process = subprocess.Popen(service_array, shell=False, startupinfo=startupinfo) pid_path = pid_path_for_service(root_path, service_command) try: mkdir(pid_path.parent) with open(pid_path, "w") as f: f.write(f"{process.pid}\n") except Exception: pass return process, pid_path
async def create( config: Dict, key_config: Dict, name: str = None, override_constants: Dict = {}, ): self = WalletNode() self.config = config self.key_config = key_config self.constants = consensus_constants.copy() for key, value in override_constants.items(): self.constants[key] = value if name: self.log = logging.getLogger(name) else: self.log = logging.getLogger(__name__) path = path_from_root(DEFAULT_ROOT_PATH, config["database_path"]) mkdir(path.parent) self.wallet_state_manager = await WalletStateManager.create( key_config, config, path, self.constants) self.wallet_state_manager.set_pending_callback( self._pending_tx_handler) # Normal operation data self.cached_blocks = {} self.future_block_hashes = {} # Sync data self._shut_down = False self.proof_hashes = [] self.header_hashes = [] self.header_hashes_error = False self.short_sync_threshold = 15 self.potential_blocks_received = {} self.potential_header_hashes = {} self.server = None return self
def singleton(lockfile, text="semaphore"): """ Open a lockfile exclusively. """ if not lockfile.parent.exists(): mkdir(lockfile.parent) try: if has_fcntl: f = open(lockfile, "w") fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB) else: if lockfile.exists(): lockfile.unlink() fd = os.open(lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR) f = open(fd, "w") f.write(text) except IOError: return None return f
def launch_service(root_path: Path, service_command) -> Tuple[subprocess.Popen, Path]: """ Launch a child process. """ # set up CHIA_ROOT # invoke correct script # save away PID # we need to pass on the possibly altered CHIA_ROOT os.environ["CHIA_ROOT"] = str(root_path) # Innsert proper e service_array = service_command.split() service_executable = executable_for_service(service_array[0]) service_array[0] = service_executable startupinfo = None if os.name == "nt": startupinfo = subprocess.STARTUPINFO() # type: ignore startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW # type: ignore # CREATE_NEW_PROCESS_GROUP allows graceful shutdown on windows, by CTRL_BREAK_EVENT signal if sys.platform == "win32" or sys.platform == "cygwin": creationflags = subprocess.CREATE_NEW_PROCESS_GROUP else: creationflags = 0 process = subprocess.Popen( service_array, shell=False, startupinfo=startupinfo, creationflags=creationflags, ) pid_path = pid_path_for_service(root_path, service_command) try: mkdir(pid_path.parent) with open(pid_path, "w") as f: f.write(f"{process.pid}\n") except Exception: pass return process, pid_path
def service_kwargs_for_full_node(root_path): service_name = "full_node" config = load_config_cli(root_path, "config.yaml", "full_node") db_path = path_from_root(root_path, config["simulator_database_path"]) mkdir(db_path.parent) config["database_path"] = config["simulator_database_path"] api = FullNodeSimulator( config, root_path=root_path, consensus_constants=test_constants, name=service_name, bt=BlockTools(), ) async def start_callback(): await api._start() def stop_callback(): api._close() async def await_closed_callback(): await api._await_closed() kwargs = dict( root_path=root_path, api=api, node_type=NodeType.FULL_NODE, advertised_port=config["port"], service_name=service_name, server_listen_ports=[config["port"]], on_connect_callback=api._on_connect, start_callback=start_callback, stop_callback=stop_callback, await_closed_callback=await_closed_callback, rpc_info=(FullNodeRpcApi, config["rpc_port"]), ) return kwargs
async def _start(self, public_key_fingerprint: Optional[int] = None) -> bool: self._shut_down = False private_keys = self.keychain.get_all_private_keys() if len(private_keys) == 0: self.log.warning( "No keys present. Create keys with the UI, or with the 'chia keys' program." ) return False private_key: Optional[PrivateKey] = None if public_key_fingerprint is not None: for sk, _ in private_keys: if sk.get_g1().get_fingerprint() == public_key_fingerprint: private_key = sk break else: private_key = private_keys[0][0] if private_key is None: raise RuntimeError("Invalid fingerprint {public_key_fingerprint}") db_path_key_suffix = str(private_key.get_g1().get_fingerprint()) path = path_from_root( self.root_path, f"{self.config['database_path']}-{db_path_key_suffix}") mkdir(path.parent) self.wallet_state_manager = await WalletStateManager.create( private_key, self.config, path, self.constants) assert self.wallet_state_manager is not None if self.state_changed_callback is not None: self.wallet_state_manager.set_callback(self.state_changed_callback) self.wallet_state_manager.set_pending_callback( self._pending_tx_handler) return True
def main(): """ Script for creating plots and adding them to the plot config file. """ root_path = DEFAULT_ROOT_PATH plot_config_filename = config_path_for_filename(root_path, "plots.yaml") key_config_filename = config_path_for_filename(root_path, "keys.yaml") parser = argparse.ArgumentParser(description="Chia plotting script.") parser.add_argument("-k", "--size", help="Plot size", type=int, default=20) parser.add_argument("-n", "--num_plots", help="Number of plots", type=int, default=10) parser.add_argument("-i", "--index", help="First plot index", type=int, default=0) parser.add_argument("-p", "--pool_pub_key", help="Hex public key of pool", type=str, default="") parser.add_argument( "-t", "--tmp_dir", help= "Temporary directory for plotting files (relative to final directory)", type=Path, default=Path("./plots.tmp"), ) new_plots_root = path_from_root( root_path, load_config(root_path, "config.yaml").get("harvester", {}).get("new_plot_root", "plots"), ) parser.add_argument( "-d", "--final_dir", help="Final directory for plots (relative or absolute)", type=Path, default=new_plots_root, ) # We need the keys file, to access pool keys (if the exist), and the sk_seed. args = parser.parse_args() if not key_config_filename.exists(): raise RuntimeError("Keys not generated. Run chia-generate-keys") # The seed is what will be used to generate a private key for each plot key_config = load_config(root_path, key_config_filename) sk_seed: bytes = bytes.fromhex(key_config["sk_seed"]) pool_pk: PublicKey if len(args.pool_pub_key) > 0: # Use the provided pool public key, useful for using an external pool pool_pk = PublicKey.from_bytes(bytes.fromhex(args.pool_pub_key)) else: # Use the pool public key from the config, useful for solo farming pool_sk = PrivateKey.from_bytes( bytes.fromhex(key_config["pool_sks"][0])) pool_pk = pool_sk.get_public_key() print( f"Creating {args.num_plots} plots, from index {args.index} to " f"{args.index + args.num_plots - 1}, of size {args.size}, sk_seed {sk_seed.hex()} ppk {pool_pk}" ) tmp_dir = args.final_dir / args.tmp_dir mkdir(tmp_dir) mkdir(args.final_dir) for i in range(args.index, args.index + args.num_plots): # Generate a sk based on the seed, plot size (k), and index sk: PrivateKey = PrivateKey.from_seed(sk_seed + args.size.to_bytes(1, "big") + i.to_bytes(4, "big")) # The plot seed is based on the pool and plot pks plot_seed: bytes32 = ProofOfSpace.calculate_plot_seed( pool_pk, sk.get_public_key()) filename: str = f"plot-{i}-{args.size}-{plot_seed}.dat" full_path: Path = args.final_dir / filename if not full_path.exists(): # Creates the plot. This will take a long time for larger plots. plotter: DiskPlotter = DiskPlotter() plotter.create_plot_disk( str(tmp_dir), str(args.final_dir), filename, args.size, bytes([]), plot_seed, ) else: print(f"Plot {filename} already exists") # Updates the config if necessary. plot_config = load_config(root_path, plot_config_filename) plot_config_plots_new = deepcopy(plot_config.get("plots", [])) relative_path = make_path_relative(full_path, root_path) if (relative_path not in plot_config_plots_new and full_path not in plot_config_plots_new): plot_config_plots_new[str(full_path)] = { "sk": bytes(sk).hex(), "pool_pk": bytes(pool_pk).hex(), } plot_config["plots"].update(plot_config_plots_new) # Dumps the new config to disk. save_config(root_path, plot_config_filename, plot_config) try: tmp_dir.rmdir() except Exception: print(f"warning: couldn't delete {tmp_dir}")
def __init__( self, root_path: Optional[Path] = None, real_plots: bool = False, ): self._tempdir = None if root_path is None: self._tempdir = tempfile.TemporaryDirectory() root_path = Path(self._tempdir.name) self.root_path = root_path self.real_plots = real_plots if not real_plots: create_default_chia_config(root_path) initialize_ssl(root_path) # No real plots supplied, so we will use the small test plots self.use_any_pos = True self.keychain = Keychain("testing-1.8.0", True) self.keychain.delete_all_keys() self.farmer_master_sk = self.keychain.add_private_key( bytes_to_mnemonic(std_hash(b"block_tools farmer key")), "") self.pool_master_sk = self.keychain.add_private_key( bytes_to_mnemonic(std_hash(b"block_tools pool key")), "") self.farmer_pk = master_sk_to_farmer_sk( self.farmer_master_sk).get_g1() self.pool_pk = master_sk_to_pool_sk(self.pool_master_sk).get_g1() plot_dir = get_plot_dir() mkdir(plot_dir) temp_dir = plot_dir / "tmp" mkdir(temp_dir) args = Namespace() # Can't go much lower than 18, since plots start having no solutions args.size = 18 # Uses many plots for testing, in order to guarantee proofs of space at every height args.num = 40 args.buffer = 32 args.farmer_public_key = bytes(self.farmer_pk).hex() args.pool_public_key = bytes(self.pool_pk).hex() args.tmp_dir = temp_dir args.tmp2_dir = plot_dir args.final_dir = plot_dir args.plotid = None args.memo = None test_private_keys = [ AugSchemeMPL.key_gen(std_hash(bytes([i]))) for i in range(args.num) ] try: # No datetime in the filename, to get deterministic filenames and not replot create_plots( args, root_path, use_datetime=False, test_private_keys=test_private_keys, ) except KeyboardInterrupt: shutil.rmtree(plot_dir, ignore_errors=True) sys.exit(1) else: initialize_ssl(root_path) self.keychain = Keychain() self.use_any_pos = False sk_and_ent = self.keychain.get_first_private_key() assert sk_and_ent is not None self.farmer_master_sk = sk_and_ent[0] self.pool_master_sk = sk_and_ent[0] self.farmer_ph = create_puzzlehash_for_pk( master_sk_to_wallet_sk(self.farmer_master_sk, uint32(0)).get_g1()) self.pool_ph = create_puzzlehash_for_pk( master_sk_to_wallet_sk(self.pool_master_sk, uint32(0)).get_g1()) self.all_sks = self.keychain.get_all_private_keys() self.pool_pubkeys: List[G1Element] = [ master_sk_to_pool_sk(sk).get_g1() for sk, _ in self.all_sks ] farmer_pubkeys: List[G1Element] = [ master_sk_to_farmer_sk(sk).get_g1() for sk, _ in self.all_sks ] if len(self.pool_pubkeys) == 0 or len(farmer_pubkeys) == 0: raise RuntimeError("Keys not generated. Run `chia generate keys`") _, self.plots, _, _ = load_plots({}, {}, farmer_pubkeys, self.pool_pubkeys, root_path)
def get_plot_dir(): cache_path = ( Path(os.path.expanduser(os.getenv("CHIA_ROOT", "~/.chia/"))) / "test-plots") mkdir(cache_path) return cache_path