async def main(): root_path = DEFAULT_ROOT_PATH net_config = load_config(root_path, "config.yaml") config = load_config_cli(root_path, "config.yaml", "introducer") initialize_logging("Introducer %(name)-21s", config["logging"]) log = logging.getLogger(__name__) setproctitle("chia_introducer") introducer = Introducer(config) ping_interval = net_config.get("ping_interval") network_id = net_config.get("network_id") assert ping_interval is not None assert network_id is not None server = ChiaServer(config["port"], introducer, NodeType.INTRODUCER, ping_interval, network_id) introducer.set_server(server) _ = await server.start_server(None, config) asyncio.get_running_loop().add_signal_handler(signal.SIGINT, server.close_all) asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, server.close_all) await server.await_closed() log.info("Introducer fully closed.")
def handler(args, parser): if args.command is None or len(args.command) < 1: help_message() parser.exit(1) root_path: Path = args.root_path if not root_path.is_dir(): raise RuntimeError( "Please initialize (or migrate) your config directory with chia init." ) initialize_logging("", {"log_stdout": True}, root_path) command = args.command if command not in command_list: help_message() parser.exit(1) if command == "create": create_plots(args, root_path) elif command == "check": check_plots(args, root_path) elif command == "add": str_path = args.final_dir add_plot_directory(str_path, root_path) elif command == "remove": str_path = args.final_dir remove_plot_directory(str_path, root_path) elif command == "show": show(root_path)
async def main(): root_path = DEFAULT_ROOT_PATH net_config = load_config(root_path, "config.yaml") config = load_config_cli(root_path, "config.yaml", "harvester") try: plot_config = load_config(root_path, "plots.yaml") except FileNotFoundError: raise RuntimeError("Plots not generated. Run chia-create-plots") initialize_logging("Harvester %(name)-22s", config["logging"]) log = logging.getLogger(__name__) setproctitle("chia_harvester") harvester = Harvester(config, plot_config) ping_interval = net_config.get("ping_interval") network_id = net_config.get("network_id") assert ping_interval is not None assert network_id is not None server = ChiaServer(config["port"], harvester, NodeType.HARVESTER, ping_interval, network_id) _ = await server.start_server(None, config) asyncio.get_running_loop().add_signal_handler(signal.SIGINT, server.close_all) asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, server.close_all) peer_info = PeerInfo(harvester.config["farmer_peer"]["host"], harvester.config["farmer_peer"]["port"]) _ = await server.start_client(peer_info, None, config) await server.await_closed() harvester._shutdown() await harvester._await_shutdown() log.info("Harvester fully closed.")
def __init__( self, root_path, api: Any, node_type: NodeType, advertised_port: int, service_name: str, server_listen_ports: List[int] = [], connect_peers: List[PeerInfo] = [], on_connect_callback: Optional[OutboundMessage] = None, rpc_start_callback_port: Optional[Tuple[Callable, int]] = None, start_callback: Optional[Callable] = None, stop_callback: Optional[Callable] = None, await_closed_callback: Optional[Callable] = None, periodic_introducer_poll: Optional[Tuple[PeerInfo, int, int]] = None, ): net_config = load_config(root_path, "config.yaml") ping_interval = net_config.get("ping_interval") network_id = net_config.get("network_id") assert ping_interval is not None assert network_id is not None self._node_type = node_type proctitle_name = f"chia_{service_name}" setproctitle(proctitle_name) self._log = logging.getLogger(service_name) config = load_config_cli(root_path, "config.yaml", service_name) initialize_logging(f"{service_name:<30s}", config["logging"], root_path) self._rpc_start_callback_port = rpc_start_callback_port self._server = ChiaServer( config["port"], api, node_type, ping_interval, network_id, root_path, config, ) for _ in ["set_server", "_set_server"]: f = getattr(api, _, None) if f: f(self._server) self._connect_peers = connect_peers self._server_listen_ports = server_listen_ports self._api = api self._task = None self._is_stopping = False self._periodic_introducer_poll = periodic_introducer_poll self._on_connect_callback = on_connect_callback self._start_callback = start_callback self._stop_callback = stop_callback self._await_closed_callback = await_closed_callback
async def main(): config = load_config_cli("config.yaml", "harvester") try: key_config = load_config("keys.yaml") except FileNotFoundError: raise RuntimeError( "Keys not generated. Run python3 ./scripts/regenerate_keys.py.") try: plot_config = load_config("plots.yaml") except FileNotFoundError: raise RuntimeError( "Plots not generated. Run python3.7 ./scripts/create_plots.py.") initialize_logging("Harvester %(name)-22s", config["logging"]) log = logging.getLogger(__name__) setproctitle("chia_harvester") harvester = Harvester(config, key_config, plot_config) server = ChiaServer(config["port"], harvester, NodeType.HARVESTER) _ = await server.start_server(config["port"], None) asyncio.get_running_loop().add_signal_handler(signal.SIGINT, server.close_all) asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, server.close_all) peer_info = PeerInfo(harvester.config["farmer_peer"]["host"], harvester.config["farmer_peer"]["port"]) _ = await server.start_client(peer_info, None) await server.await_closed() harvester._shutdown() await harvester._await_shutdown() log.info("Harvester fully closed.")
async def async_run_daemon(root_path): chia_init(root_path) config = load_config(root_path, "config.yaml") setproctitle("chia_daemon") initialize_logging("daemon", config["logging"], root_path) lockfile = singleton(daemon_launch_lock_path(root_path)) crt_path = root_path / config["daemon_ssl"]["private_crt"] key_path = root_path / config["daemon_ssl"]["private_key"] ca_crt_path = root_path / config["private_ssl_ca"]["crt"] ca_key_path = root_path / config["private_ssl_ca"]["key"] sys.stdout.flush() json_msg = dict_to_json_str({ "message": "cert_path", "success": True, "cert": f"{crt_path}", "key": f"{key_path}", "ca_crt": f"{ca_crt_path}", }) sys.stdout.write("\n" + json_msg + "\n") sys.stdout.flush() if lockfile is None: print("daemon: already launching") return 2 # TODO: clean this up, ensuring lockfile isn't removed until the listen port is open create_server_for_daemon(root_path) ws_server = WebSocketServer(root_path, ca_crt_path, ca_key_path, crt_path, key_path) await ws_server.start()
async def main(): config = load_config_cli("config.yaml", "timelord") initialize_logging("Timelord %(name)-23s", config["logging"]) log = logging.getLogger(__name__) setproctitle("chia_timelord") timelord = Timelord(config) server = ChiaServer(config["port"], timelord, NodeType.TIMELORD) _ = await server.start_server(config["host"], None) def signal_received(): server.close_all() asyncio.create_task(timelord._shutdown()) asyncio.get_running_loop().add_signal_handler(signal.SIGINT, signal_received) asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, signal_received) full_node_peer = PeerInfo( timelord.config["full_node_peer"]["host"], timelord.config["full_node_peer"]["port"], ) await asyncio.sleep(1) # Prevents TCP simultaneous connect with full node await server.start_client(full_node_peer, None) async for msg in timelord._manage_discriminant_queue(): server.push_message(msg) await server.await_closed() log.info("Timelord fully closed.")
async def async_main(): root_path = DEFAULT_ROOT_PATH net_config = load_config(root_path, "config.yaml") config = load_config_cli(root_path, "config.yaml", "farmer") # TOD: Remove once we have pool server config_pool = load_config_cli(root_path, "config.yaml", "pool") initialize_logging("Farmer %(name)-25s", config["logging"], root_path) log = logging.getLogger(__name__) setproctitle("chia_farmer") keychain = Keychain() farmer = Farmer(config, config_pool, keychain) ping_interval = net_config.get("ping_interval") network_id = net_config.get("network_id") assert ping_interval is not None assert network_id is not None server = ChiaServer( config["port"], farmer, NodeType.FARMER, ping_interval, network_id, root_path, config, ) try: asyncio.get_running_loop().add_signal_handler(signal.SIGINT, server.close_all) asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, server.close_all) except NotImplementedError: log.info("signal handlers unsupported") _ = await server.start_server(farmer._on_connect) farmer.set_server(server) rpc_cleanup = None if config["start_rpc_server"]: # Starts the RPC server rpc_cleanup = await start_farmer_rpc_server(farmer, server.close_all, config["rpc_port"]) await asyncio.sleep(10) # Allows full node to startup farmer._start_bg_tasks() await server.await_closed() farmer._shut_down = True # Waits for the rpc server to close if rpc_cleanup is not None: await rpc_cleanup() log.info("Closed RPC server.") log.info("Farmer fully closed.")
async def async_main(): root_path = DEFAULT_ROOT_PATH net_config = load_config(root_path, "config.yaml") config = load_config_cli(root_path, "config.yaml", "timelord") initialize_logging("Timelord %(name)-23s", config["logging"], root_path) log = logging.getLogger(__name__) setproctitle("chia_timelord") timelord = Timelord(config, constants) ping_interval = net_config.get("ping_interval") network_id = net_config.get("network_id") assert ping_interval is not None assert network_id is not None server = ChiaServer( config["port"], timelord, NodeType.TIMELORD, ping_interval, network_id, DEFAULT_ROOT_PATH, config, ) timelord.set_server(server) coro = asyncio.start_server( timelord._handle_client, config["vdf_server"]["host"], config["vdf_server"]["port"], loop=asyncio.get_running_loop(), ) def stop_all(): server.close_all() timelord._shutdown() try: asyncio.get_running_loop().add_signal_handler(signal.SIGINT, stop_all) asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, stop_all) except NotImplementedError: log.info("signal handlers unsupported") await asyncio.sleep(10) # Allows full node to startup peer_info = PeerInfo(config["full_node_peer"]["host"], config["full_node_peer"]["port"]) bg_task = start_timelord_bg_task(server, peer_info, log) vdf_server = asyncio.ensure_future(coro) await timelord._manage_discriminant_queue() log.info("Closed discriminant queue.") log.info("Shutdown timelord.") await server.await_closed() vdf_server.cancel() bg_task.cancel() log.info("Timelord fully closed.")
async def main(): root_path = DEFAULT_ROOT_PATH net_config = load_config(root_path, "config.yaml") config = load_config_cli(root_path, "config.yaml", "timelord") initialize_logging("Timelord %(name)-23s", config["logging"]) log = logging.getLogger(__name__) setproctitle("chia_timelord") timelord = Timelord(config, constants) ping_interval = net_config.get("ping_interval") network_id = net_config.get("network_id") assert ping_interval is not None assert network_id is not None server = ChiaServer(config["port"], timelord, NodeType.TIMELORD, ping_interval, network_id) _ = await server.start_server(None, config) timelord_shutdown_task: Optional[asyncio.Task] = None coro = asyncio.start_server( timelord._handle_client, config["vdf_server"]["host"], config["vdf_server"]["port"], loop=asyncio.get_running_loop(), ) def signal_received(): nonlocal timelord_shutdown_task server.close_all() timelord_shutdown_task = asyncio.create_task(timelord._shutdown()) asyncio.get_running_loop().add_signal_handler(signal.SIGINT, signal_received) asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, signal_received) full_node_peer = PeerInfo( timelord.config["full_node_peer"]["host"], timelord.config["full_node_peer"]["port"], ) await asyncio.sleep(1) # Prevents TCP simultaneous connect with full node await server.start_client(full_node_peer, None, config) vdf_server = asyncio.ensure_future(coro) async for msg in timelord._manage_discriminant_queue(): server.push_message(msg) log.info("Closed discriminant queue.") if timelord_shutdown_task is not None: await timelord_shutdown_task log.info("Shutdown timelord.") await server.await_closed() vdf_server.cancel() log.info("Timelord fully closed.")
def main(): """ Script for checking all plots in the plots.yaml file. Specify a number of challenge to test for each plot. """ parser = argparse.ArgumentParser( description="Exodus plot checking script.") parser.add_argument("-n", "--num", help="Number of challenges", type=int, default=100) args = parser.parse_args() root_path = DEFAULT_ROOT_PATH plot_config = load_config(root_path, plot_config_filename) config = load_config(root_path, config_filename) initialize_logging("%(name)-22s", {"log_stdout": True}, root_path) log = logging.getLogger(__name__) v = Verifier() log.info("Loading plots in plots.yaml using harvester loading code\n") provers, _, _ = load_plots(config["harvester"], plot_config, None, root_path) log.info( f"\n\nStarting to test each plot with {args.num} challenges each\n") for plot_path, pr in provers.items(): total_proofs = 0 try: for i in range(args.num): challenge = std_hash(i.to_bytes(32, "big")) for index, quality_str in enumerate( pr.get_qualities_for_challenge(challenge)): proof = pr.get_full_proof(challenge, index) total_proofs += 1 ver_quality_str = v.validate_proof(pr.get_id(), pr.get_size(), challenge, proof) assert quality_str == ver_quality_str except BaseException as e: if isinstance(e, KeyboardInterrupt): log.warning("Interrupted, closing") return log.error( f"{type(e)}: {e} error in proving/verifying for plot {plot_path}" ) if total_proofs > 0: log.info( f"{plot_path}: Proofs {total_proofs} / {args.num}, {round(total_proofs/float(args.num), 4)}" ) else: log.error( f"{plot_path}: Proofs {total_proofs} / {args.num}, {round(total_proofs/float(args.num), 4)}" )
async def async_main(): root_path = DEFAULT_ROOT_PATH net_config = load_config(root_path, "config.yaml") config = load_config_cli(root_path, "config.yaml", "harvester") try: plot_config = load_config(root_path, "plots.yaml") except FileNotFoundError: raise RuntimeError("Plots not generated. Run chia-create-plots") initialize_logging("Harvester %(name)-22s", config["logging"], root_path) log = logging.getLogger(__name__) setproctitle("chia_harvester") harvester = await Harvester.create(config, plot_config, root_path) ping_interval = net_config.get("ping_interval") network_id = net_config.get("network_id") assert ping_interval is not None assert network_id is not None server = ChiaServer( config["port"], harvester, NodeType.HARVESTER, ping_interval, network_id, DEFAULT_ROOT_PATH, config, ) try: asyncio.get_running_loop().add_signal_handler(signal.SIGINT, server.close_all) asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, server.close_all) except NotImplementedError: log.info("signal handlers unsupported") rpc_cleanup = None if config["start_rpc_server"]: # Starts the RPC server rpc_cleanup = await start_harvester_rpc_server( harvester, server.close_all, config["rpc_port"] ) harvester.set_server(server) await asyncio.sleep(1) harvester._start_bg_tasks() await server.await_closed() harvester._shutdown() await harvester._await_shutdown() # Waits for the rpc server to close if rpc_cleanup is not None: await rpc_cleanup() log.info("Closed RPC server.") log.info("Harvester fully closed.")
async def test_weight_proof_bad_peak_hash(self, default_1000_blocks): blocks = default_1000_blocks header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate( blocks) wpf = WeightProofHandler( test_constants, BlockCache(sub_blocks, height_to_hash, header_cache, summaries), ) wpf.log.setLevel(logging.INFO) initialize_logging("", {"log_stdout": True}, DEFAULT_ROOT_PATH) wp = await wpf.get_proof_of_weight(b"sadgfhjhgdgsfadfgh") assert wp is None
def __init__(self, keychain: Keychain, root_path: Path): self.config = load_config_cli(root_path, "config.yaml", "wallet") initialize_logging("Wallet %(name)-25s", self.config["logging"], root_path) self.log = log self.keychain = keychain self.websocket = None self.root_path = root_path self.wallet_node: Optional[WalletNode] = None self.trade_manager: Optional[TradeManager] = None self.shut_down = False if self.config["testing"] is True: self.config["database_path"] = "test_db_wallet.db"
async def main(): config = load_config_cli("config.yaml", "ui") initialize_logging("UI %(name)-29s", config["logging"]) setproctitle("chia_full_node_ui") await_all_closed, ui_close_cb = await start_ssh_server( config["port"], config["ssh_filename"], config["rpc_port"]) asyncio.get_running_loop().add_signal_handler(signal.SIGINT, lambda: ui_close_cb(False)) asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, lambda: ui_close_cb(False)) await await_all_closed()
async def async_run_daemon(root_path): chia_init(root_path) config = load_config(root_path, "config.yaml") initialize_logging("daemon", config["logging"], root_path) lockfile = singleton(daemon_launch_lock_path(root_path)) if lockfile is None: print("daemon: already launching") return 2 # TODO: clean this up, ensuring lockfile isn't removed until the listen port is open create_server_for_daemon(root_path) log.info("before start") ws_server = WebSocketServer(root_path) await ws_server.start()
async def async_main(): root_path = DEFAULT_ROOT_PATH net_config = load_config(root_path, "config.yaml") config = load_config_cli(root_path, "config.yaml", "farmer") try: check_keys(root_path) key_config = load_config(root_path, "keys.yaml") except FileNotFoundError: raise RuntimeError("Keys not generated. Run `chia generate keys`") initialize_logging("Farmer %(name)-25s", config["logging"], root_path) log = logging.getLogger(__name__) setproctitle("chia_farmer") farmer = Farmer(config, key_config) ping_interval = net_config.get("ping_interval") network_id = net_config.get("network_id") assert ping_interval is not None assert network_id is not None server = ChiaServer( config["port"], farmer, NodeType.FARMER, ping_interval, network_id, root_path, config, ) try: asyncio.get_running_loop().add_signal_handler(signal.SIGINT, server.close_all) asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, server.close_all) except NotImplementedError: log.info("signal handlers unsupported") _ = await server.start_server(farmer._on_connect) farmer.set_server(server) await asyncio.sleep(10) # Allows full node to startup farmer._start_bg_tasks() await server.await_closed() farmer._shut_down = True log.info("Farmer fully closed.")
def main(): setproctitle("chia_timelord_launcher") config = load_config(DEFAULT_ROOT_PATH, "config.yaml", "timelord_launcher") initialize_logging("Launcher %(name)-23s", config["logging"]) def signal_received(): asyncio.create_task(kill_processes()) loop = asyncio.get_event_loop() loop.add_signal_handler(signal.SIGINT, signal_received) loop.add_signal_handler(signal.SIGTERM, signal_received) try: loop.run_until_complete(spawn_all_processes(config)) finally: log.info("Launcher fully closed.") loop.close()
async def main(): config = load_config_cli("config.yaml", "introducer") initialize_logging("Introducer %(name)-21s", config["logging"]) log = logging.getLogger(__name__) setproctitle("chia_introducer") introducer = Introducer(config) server = ChiaServer(config["port"], introducer, NodeType.INTRODUCER) introducer.set_server(server) _ = await server.start_server(config["host"], None) asyncio.get_running_loop().add_signal_handler(signal.SIGINT, server.close_all) asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, server.close_all) await server.await_closed() log.info("Introducer fully closed.")
async def main(): config = load_config_cli("config.yaml", "farmer") try: key_config = load_config("keys.yaml") except FileNotFoundError: raise RuntimeError( "Keys not generated. Run python3 ./scripts/regenerate_keys.py.") initialize_logging("Farmer %(name)-25s", config["logging"]) log = logging.getLogger(__name__) setproctitle("chia_farmer") farmer = Farmer(config, key_config) harvester_peer = PeerInfo(config["harvester_peer"]["host"], config["harvester_peer"]["port"]) full_node_peer = PeerInfo(config["full_node_peer"]["host"], config["full_node_peer"]["port"]) server = ChiaServer(config["port"], farmer, NodeType.FARMER) asyncio.get_running_loop().add_signal_handler(signal.SIGINT, server.close_all) asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, server.close_all) async def on_connect(): # Sends a handshake to the harvester pool_sks: List[PrivateKey] = [ PrivateKey.from_bytes(bytes.fromhex(ce)) for ce in key_config["pool_sks"] ] msg = HarvesterHandshake([sk.get_public_key() for sk in pool_sks]) yield OutboundMessage(NodeType.HARVESTER, Message("harvester_handshake", msg), Delivery.BROADCAST) _ = await server.start_server(config["host"], on_connect) await asyncio.sleep(1) # Prevents TCP simultaneous connect with harvester _ = await server.start_client(harvester_peer, None) _ = await server.start_client(full_node_peer, None) await server.await_closed() log.info("Farmer fully closed.")
async def main(): root_path = DEFAULT_ROOT_PATH net_config = load_config(root_path, "config.yaml") config = load_config_cli(root_path, "config.yaml", "farmer") try: key_config = load_config(root_path, "keys.yaml") except FileNotFoundError: raise RuntimeError("Keys not generated. Run chia-generate-keys") initialize_logging("Farmer %(name)-25s", config["logging"]) log = logging.getLogger(__name__) setproctitle("chia_farmer") farmer = Farmer(config, key_config) harvester_peer = PeerInfo(config["harvester_peer"]["host"], config["harvester_peer"]["port"]) full_node_peer = PeerInfo(config["full_node_peer"]["host"], config["full_node_peer"]["port"]) ping_interval = net_config.get("ping_interval") network_id = net_config.get("network_id") assert ping_interval is not None assert network_id is not None server = ChiaServer(config["port"], farmer, NodeType.FARMER, ping_interval, network_id) asyncio.get_running_loop().add_signal_handler(signal.SIGINT, server.close_all) asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, server.close_all) _ = await server.start_server(farmer._on_connect, config) await asyncio.sleep(2) # Prevents TCP simultaneous connect with harvester _ = await server.start_client(harvester_peer, None, config) _ = await server.start_client(full_node_peer, None, config) farmer.set_server(server) farmer._start_bg_tasks() await server.await_closed() farmer._shut_down = True log.info("Farmer fully closed.")
def main(): root_path = DEFAULT_ROOT_PATH setproctitle("chia_timelord_launcher") net_config = load_config(root_path, "config.yaml") config = net_config["timelord_launcher"] initialize_logging("TLauncher", config["logging"], root_path) def signal_received(): asyncio.create_task(kill_processes()) loop = asyncio.get_event_loop() try: loop.add_signal_handler(signal.SIGINT, signal_received) loop.add_signal_handler(signal.SIGTERM, signal_received) except NotImplementedError: log.info("signal handlers unsupported") try: loop.run_until_complete(spawn_all_processes(config, net_config)) finally: log.info("Launcher fully closed.") loop.close()
header: Header = Header(header_data, header_hash_sig) full_block: FullBlock = FullBlock(proof_of_space, proof_of_time, header, transactions, encoded) return full_block # This code generates a genesis block, call as main to output genesis block to terminal # This might take a while, using the python VDF implementation. # Run by doing python -m tests.block_tools if __name__ == "__main__": from src.util.default_root import DEFAULT_ROOT_PATH from src.consensus.constants import constants as consensus_constants initialize_logging("block_tools", {"log_stdout": True}, DEFAULT_ROOT_PATH) bt = BlockTools(root_path=DEFAULT_ROOT_PATH, real_plots=True) print( bytes( bt.create_genesis_block( consensus_constants, bytes([0] * 32), b"0", bytes32( bytes.fromhex( # "txch102gkhhzs60grx7cfnpng5n6rjecr89r86l5s8xux2za8k820cxsq64ssdg "7a916bdc50d3d0337b0998668a4f439670339467d7e9039b8650ba7b1d4fc1a0" )), )))
async def async_main(): root_path = DEFAULT_ROOT_PATH config = load_config_cli(root_path, "config.yaml", "full_node") net_config = load_config(root_path, "config.yaml") setproctitle("chia_full_node") initialize_logging("FullNode %(name)-23s", config["logging"], root_path) log = logging.getLogger(__name__) server_closed = False full_node = await FullNode.create(config, root_path=root_path) if config["enable_upnp"]: log.info(f"Attempting to enable UPnP (open up port {config['port']})") try: upnp = miniupnpc.UPnP() upnp.discoverdelay = 5 upnp.discover() upnp.selectigd() upnp.addportmapping(config["port"], "TCP", upnp.lanaddr, config["port"], "chia", "") log.info(f"Port {config['port']} opened with UPnP.") except Exception: log.warning( "UPnP failed. This is not required to run chia, but it allows incoming connections from other peers." ) # Starts the full node server (which full nodes can connect to) ping_interval = net_config.get("ping_interval") network_id = net_config.get("network_id") assert ping_interval is not None assert network_id is not None server = ChiaServer( config["port"], full_node, NodeType.FULL_NODE, ping_interval, network_id, DEFAULT_ROOT_PATH, config, ) full_node._set_server(server) _ = await server.start_server(full_node._on_connect) rpc_cleanup = None def master_close_cb(): nonlocal server_closed if not server_closed: # Called by the UI, when node is closed, or when a signal is sent log.info("Closing all connections, and server...") server.close_all() full_node._close() server_closed = True if config["start_rpc_server"]: # Starts the RPC server rpc_cleanup = await start_full_node_rpc_server(full_node, master_close_cb, config["rpc_port"]) try: asyncio.get_running_loop().add_signal_handler(signal.SIGINT, master_close_cb) asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, master_close_cb) except NotImplementedError: log.info("signal handlers unsupported") full_node._start_bg_tasks() # Awaits for server and all connections to close await server.await_closed() log.info("Closed all node servers.") # Stops the full node and closes DBs await full_node._await_closed() # Waits for the rpc server to close if rpc_cleanup is not None: await rpc_cleanup() log.info("Closed RPC server.") await asyncio.get_running_loop().shutdown_asyncgens() log.info("Node fully closed.")
from src.blockchain import Blockchain from src.consensus.constants import constants from src.store import FullNodeStore from src.full_node import FullNode from src.rpc.rpc_server import start_rpc_server from src.server.outbound_message import NodeType from src.server.server import ChiaServer from src.types.full_block import FullBlock from src.types.header_block import HeaderBlock from src.types.peer_info import PeerInfo from src.util.network import parse_host_port from src.util.logging import initialize_logging from setproctitle import setproctitle setproctitle("chia_full_node") initialize_logging("FullNode %(name)-23s") log = logging.getLogger(__name__) server_closed = False async def load_header_blocks_from_store( store: FullNodeStore, ) -> Dict[str, HeaderBlock]: seen_blocks: Dict[str, HeaderBlock] = {} tips: List[HeaderBlock] = [] async for full_block in store.get_blocks(): if not tips or full_block.weight > tips[0].weight: tips = [full_block.header_block] seen_blocks[full_block.header_hash] = full_block.header_block header_blocks = {}
async def main(): root_path = DEFAULT_ROOT_PATH net_config = load_config(root_path, "config.yaml") config = load_config_cli(root_path, "config.yaml", "full_node") setproctitle("chia_full_node") initialize_logging("FullNode %(name)-23s", config["logging"], root_path) log = logging.getLogger(__name__) server_closed = False db_path = path_from_root(DEFAULT_ROOT_PATH, config["simulator_database_path"]) mkdir(db_path.parent) connection = await aiosqlite.connect(db_path) # Create the store (DB) and full node instance store = await FullNodeStore.create(connection) await store._clear_database() genesis: FullBlock = FullBlock.from_bytes(test_constants["GENESIS_BLOCK"]) await store.add_block(genesis) unspent_store = await CoinStore.create(connection) log.info("Initializing blockchain from disk") blockchain = await Blockchain.create(unspent_store, store, test_constants) mempool_manager = MempoolManager(unspent_store, test_constants) await mempool_manager.new_tips(await blockchain.get_full_tips()) full_node = FullNodeSimulator( store, blockchain, config, mempool_manager, unspent_store, override_constants=test_constants, ) ping_interval = net_config.get("ping_interval") network_id = net_config.get("network_id") # Starts the full node server (which full nodes can connect to) assert ping_interval is not None assert network_id is not None server = ChiaServer( config["port"], full_node, NodeType.FULL_NODE, ping_interval, network_id, DEFAULT_ROOT_PATH, config, ) full_node._set_server(server) _ = await server.start_server(full_node._on_connect) rpc_cleanup = None def master_close_cb(): nonlocal server_closed if not server_closed: # Called by the UI, when node is closed, or when a signal is sent log.info("Closing all connections, and server...") full_node._shutdown() server.close_all() server_closed = True if config["start_rpc_server"]: # Starts the RPC server rpc_cleanup = await start_rpc_server(full_node, master_close_cb, config["rpc_port"]) try: asyncio.get_running_loop().add_signal_handler(signal.SIGINT, master_close_cb) asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, master_close_cb) except NotImplementedError: log.info("signal handlers unsupported") log.info("Waiting to connect to some peers...") await asyncio.sleep(3) log.info( f"Connected to {len(server.global_connections.get_connections())} peers." ) # Awaits for server and all connections to close await server.await_closed() log.info("Closed all node servers.") # Waits for the rpc server to close if rpc_cleanup is not None: await rpc_cleanup() log.info("Closed RPC server.") await store.close() log.info("Closed store.") await unspent_store.close() log.info("Closed unspent store.") await asyncio.get_running_loop().shutdown_asyncgens() log.info("Node fully closed.")
async def main(): config = load_config_cli("config.yaml", "full_node") setproctitle("chia_full_node") initialize_logging("FullNode %(name)-23s", config["logging"]) log = logging.getLogger(__name__) server_closed = False # Create the store (DB) and full node instance store = await FullNodeStore.create(f"blockchain_{config['database_id']}.db" ) genesis: FullBlock = FullBlock.from_bytes(constants["GENESIS_BLOCK"]) await store.add_block(genesis) log.info("Initializing blockchain from disk") small_header_blocks: Dict[ str, SmallHeaderBlock] = await load_header_blocks_from_store(store) blockchain = await Blockchain.create(small_header_blocks) full_node = FullNode(store, blockchain, config) if config["enable_upnp"]: log.info(f"Attempting to enable UPnP (open up port {config['port']})") try: upnp = miniupnpc.UPnP() upnp.discoverdelay = 5 upnp.discover() upnp.selectigd() upnp.addportmapping(config["port"], "TCP", upnp.lanaddr, config["port"], "chia", "") log.info(f"Port {config['port']} opened with UPnP.") except Exception as e: log.warning(f"UPnP failed: {e}") # Starts the full node server (which full nodes can connect to) server = ChiaServer(config["port"], full_node, NodeType.FULL_NODE) full_node._set_server(server) _ = await server.start_server(config["host"], full_node._on_connect) rpc_cleanup = None def master_close_cb(): nonlocal server_closed if not server_closed: # Called by the UI, when node is closed, or when a signal is sent log.info("Closing all connections, and server...") full_node._shutdown() server.close_all() server_closed = True if config["start_rpc_server"]: # Starts the RPC server if -r is provided rpc_cleanup = await start_rpc_server(full_node, master_close_cb, config["rpc_port"]) asyncio.get_running_loop().add_signal_handler(signal.SIGINT, master_close_cb) asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, master_close_cb) full_node._start_bg_tasks() log.info("Waiting to connect to some peers...") await asyncio.sleep(3) log.info( f"Connected to {len(server.global_connections.get_connections())} peers." ) if config["connect_to_farmer"] and not server_closed: peer_info = PeerInfo( full_node.config["farmer_peer"]["host"], full_node.config["farmer_peer"]["port"], ) _ = await server.start_client(peer_info, None) if config["connect_to_timelord"] and not server_closed: peer_info = PeerInfo( full_node.config["timelord_peer"]["host"], full_node.config["timelord_peer"]["port"], ) _ = await server.start_client(peer_info, None) # Awaits for server and all connections to close await server.await_closed() log.info("Closed all node servers.") # Waits for the rpc server to close if rpc_cleanup is not None: await rpc_cleanup() log.info("Closed RPC server.") await store.close() log.info("Closed store.") await asyncio.get_running_loop().shutdown_asyncgens() log.info("Node fully closed.")
def __init__( self, root_path, node: Any, peer_api: Any, node_type: NodeType, advertised_port: int, service_name: str, network_id=bytes32, upnp_ports: List[int] = [], server_listen_ports: List[int] = [], connect_peers: List[PeerInfo] = [], auth_connect_peers: bool = True, on_connect_callback: Optional[Callable] = None, rpc_info: Optional[Tuple[type, int]] = None, parse_cli_args=True, connect_to_daemon=True, ): self.root_path = root_path self.config = load_config(root_path, "config.yaml") ping_interval = self.config.get("ping_interval") self.self_hostname = self.config.get("self_hostname") self.daemon_port = self.config.get("daemon_port") assert ping_interval is not None self._connect_to_daemon = connect_to_daemon self._node_type = node_type self._service_name = service_name self._rpc_task = None self._network_id: bytes32 = network_id proctitle_name = f"chia_{service_name}" setproctitle(proctitle_name) self._log = logging.getLogger(service_name) if parse_cli_args: service_config = load_config_cli(root_path, "config.yaml", service_name) else: service_config = load_config(root_path, "config.yaml", service_name) initialize_logging(service_name, service_config["logging"], root_path) self._rpc_info = rpc_info private_ca_crt, private_ca_key = private_ssl_ca_paths( root_path, self.config) chia_ca_crt, chia_ca_key = chia_ssl_ca_paths(root_path, self.config) self._server = ChiaServer( advertised_port, node, peer_api, node_type, ping_interval, network_id, root_path, service_config, (private_ca_crt, private_ca_key), (chia_ca_crt, chia_ca_key), name=f"{service_name}_server", ) f = getattr(node, "set_server", None) if f: f(self._server) else: self._log.warning(f"No set_server method for {service_name}") self._connect_peers = connect_peers self._auth_connect_peers = auth_connect_peers self._upnp_ports = upnp_ports self._server_listen_ports = server_listen_ports self._api = peer_api self._node = node self._did_start = False self._is_stopping = asyncio.Event() self._stopped_by_rpc = False self._on_connect_callback = on_connect_callback self._advertised_port = advertised_port self._reconnect_tasks: List[asyncio.Task] = []
def __init__( self, root_path, api: Any, node_type: NodeType, advertised_port: int, service_name: str, server_listen_ports: List[int] = [], connect_peers: List[PeerInfo] = [], auth_connect_peers: bool = True, on_connect_callback: Optional[OnConnectFunc] = None, rpc_info: Optional[Tuple[type, int]] = None, start_callback: Optional[Callable] = None, stop_callback: Optional[Callable] = None, await_closed_callback: Optional[Callable] = None, periodic_introducer_poll: Optional[Tuple[PeerInfo, int, int]] = None, parse_cli_args=True, ): net_config = load_config(root_path, "config.yaml") ping_interval = net_config.get("ping_interval") network_id = net_config.get("network_id") self.self_hostname = net_config.get("self_hostname") self.daemon_port = net_config.get("daemon_port") assert ping_interval is not None assert network_id is not None self._node_type = node_type proctitle_name = f"chia_{service_name}" setproctitle(proctitle_name) self._log = logging.getLogger(service_name) if parse_cli_args: config = load_config_cli(root_path, "config.yaml", service_name) else: config = load_config(root_path, "config.yaml", service_name) initialize_logging(service_name, config["logging"], root_path) self._rpc_info = rpc_info self._server = ChiaServer( advertised_port, api, node_type, ping_interval, network_id, root_path, config, name=f"{service_name}_server", ) for _ in ["set_server", "_set_server"]: f = getattr(api, _, None) if f: f(self._server) self._connect_peers = connect_peers self._auth_connect_peers = auth_connect_peers self._server_listen_ports = server_listen_ports self._api = api self._task = None self._is_stopping = False self._periodic_introducer_poll = periodic_introducer_poll self._on_connect_callback = on_connect_callback self._start_callback = start_callback self._stop_callback = stop_callback self._await_closed_callback = await_closed_callback self._advertised_port = advertised_port self._server_sockets: List = []
async def main(): root_path = DEFAULT_ROOT_PATH net_config = load_config(root_path, "config.yaml") config = load_config_cli(root_path, "config.yaml", "full_node") setproctitle("chia_full_node") initialize_logging("FullNode %(name)-23s", config["logging"], root_path) log = logging.getLogger(__name__) server_closed = False db_path = path_from_root(root_path, config["simulator_database_path"]) mkdir(db_path.parent) db_path.unlink() full_node = await FullNodeSimulator.create( config, root_path=root_path, override_constants=test_constants, ) ping_interval = net_config.get("ping_interval") network_id = net_config.get("network_id") # Starts the full node server (which full nodes can connect to) assert ping_interval is not None assert network_id is not None server = ChiaServer( config["port"], full_node, NodeType.FULL_NODE, ping_interval, network_id, DEFAULT_ROOT_PATH, config, ) full_node._set_server(server) _ = await server.start_server(full_node._on_connect) rpc_cleanup = None def master_close_cb(): nonlocal server_closed if not server_closed: # Called by the UI, when node is closed, or when a signal is sent log.info("Closing all connections, and server...") server.close_all() server_closed = True if config["start_rpc_server"]: # Starts the RPC server rpc_cleanup = await start_rpc_server(full_node, master_close_cb, config["rpc_port"]) try: asyncio.get_running_loop().add_signal_handler(signal.SIGINT, master_close_cb) asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, master_close_cb) except NotImplementedError: log.info("signal handlers unsupported") # Awaits for server and all connections to close await server.await_closed() log.info("Closed all node servers.") # Stops the full node and closes DBs await full_node._shutdown() # Waits for the rpc server to close if rpc_cleanup is not None: await rpc_cleanup() log.info("Closed RPC server.") await asyncio.get_running_loop().shutdown_asyncgens() log.info("Node fully closed.")