Ejemplo n.º 1
0
def service_kwargs_for_full_node_simulator(root_path: Path, config: Dict,
                                           bt: BlockTools) -> Dict:
    mkdir(path_from_root(root_path, config["database_path"]).parent)
    constants = bt.constants

    node = FullNode(
        config,
        root_path=root_path,
        consensus_constants=constants,
        name=SERVICE_NAME,
    )

    peer_api = FullNodeSimulator(node, bt)
    network_id = config["selected_network"]
    kwargs = dict(
        root_path=root_path,
        node=node,
        peer_api=peer_api,
        node_type=NodeType.FULL_NODE,
        advertised_port=config["port"],
        service_name=SERVICE_NAME,
        server_listen_ports=[config["port"]],
        on_connect_callback=node.on_connect,
        rpc_info=(FullNodeRpcApi, config["rpc_port"]),
        network_id=network_id,
    )
    return kwargs
Ejemplo n.º 2
0
def chia_init(
    root_path: Path,
    *,
    should_check_keys: bool = True,
    fix_ssl_permissions: bool = False,
    testnet: bool = False,
    experimental_v2_db: bool = False,
):
    """
    Standard first run initialization or migration steps. Handles config creation,
    generation of SSL certs, and setting target addresses (via check_keys).

    should_check_keys can be set to False to avoid blocking when accessing a passphrase
    protected Keychain. When launching the daemon from the GUI, we want the GUI to
    handle unlocking the keychain.
    """
    if os.environ.get("CHIA_ROOT", None) is not None:
        print(
            f"warning, your CHIA_ROOT is set to {os.environ['CHIA_ROOT']}. "
            f"Please unset the environment variable and run chia init again\n"
            f"or manually migrate config.yaml"
        )

    print(f"Chia directory {root_path}")
    if root_path.is_dir() and Path(root_path / "config" / "config.yaml").exists():
        # This is reached if CHIA_ROOT is set, or if user has run chia init twice
        # before a new update.
        if testnet:
            configure(root_path, "", "", "", "", "", "", "", "", testnet="true", peer_connect_timeout="")
        if fix_ssl_permissions:
            fix_ssl(root_path)
        if should_check_keys:
            check_keys(root_path)
        print(f"{root_path} already exists, no migration action taken")
        return -1

    create_default_chia_config(root_path)
    if testnet:
        configure(root_path, "", "", "", "", "", "", "", "", testnet="true", peer_connect_timeout="")
    create_all_ssl(root_path)
    if fix_ssl_permissions:
        fix_ssl(root_path)
    if should_check_keys:
        check_keys(root_path)
    if experimental_v2_db:
        config: Dict = load_config(root_path, "config.yaml")["full_node"]
        db_path_replaced: str = config["database_path"].replace("CHALLENGE", config["selected_network"])
        db_path = path_from_root(root_path, db_path_replaced)
        mkdir(db_path.parent)
        import sqlite3

        with sqlite3.connect(db_path) as connection:
            connection.execute("CREATE TABLE database_version(version int)")
            connection.execute("INSERT INTO database_version VALUES (2)")
            connection.commit()

    print("")
    print("To see your keys, run 'chia keys show --show-mnemonic-seed'")

    return 0
Ejemplo n.º 3
0
 async def delete_all_keys(self, request: Dict):
     await self._stop_wallet()
     self.service.keychain.delete_all_keys()
     path = path_from_root(self.service.root_path, self.service.config["database_path"])
     if path.exists():
         path.unlink()
     return {}
Ejemplo n.º 4
0
 def __init__(
     self,
     server: ChiaServer,
     root_path: Path,
     target_outbound_count: int,
     peer_db_path: str,
     introducer_info: Optional[Dict],
     peer_connect_interval: int,
     log,
 ):
     self.server: ChiaServer = server
     self.message_queue: asyncio.Queue = asyncio.Queue()
     self.is_closed = False
     self.target_outbound_count = target_outbound_count
     self.peer_db_path = path_from_root(root_path, peer_db_path)
     if introducer_info is not None:
         self.introducer_info: Optional[PeerInfo] = PeerInfo(
             introducer_info["host"],
             introducer_info["port"],
         )
     else:
         self.introducer_info = None
     self.peer_connect_interval = peer_connect_interval
     self.log = log
     self.relay_queue = None
     self.address_manager: Optional[AddressManager] = None
     self.connection_time_pretest: Dict = {}
     self.received_count_from_peers: Dict = {}
     self.lock = asyncio.Lock()
     self.connect_peers_task: Optional[asyncio.Task] = None
     self.serialize_task: Optional[asyncio.Task] = None
     self.cleanup_task: Optional[asyncio.Task] = None
     self.initial_wait: int = 0
Ejemplo n.º 5
0
 def __init__(
     self,
     config: Dict,
     root_path: Path,
     consensus_constants: ConsensusConstants,
     name: str = None,
 ):
     self.initialized = False
     self.root_path = root_path
     self.config = config
     self.server = None
     self._shut_down = False  # Set to true to close all infinite loops
     self.constants = consensus_constants
     self.state_changed_callback: Optional[Callable] = None
     self.crawl_store = None
     self.log = log
     self.peer_count = 0
     self.with_peak = set()
     self.peers_retrieved: List[Any] = []
     self.host_to_version: Dict[str, str] = {}
     self.version_cache: List[Tuple[str, str]] = []
     self.handshake_time: Dict[str, int] = {}
     self.best_timestamp_per_peer: Dict[str, int] = {}
     if "crawler_db_path" in config and config["crawler_db_path"] != "":
         path = Path(config["crawler_db_path"])
         self.db_path = path.resolve()
     else:
         db_path_replaced: str = "crawler.db"
         self.db_path = path_from_root(root_path, db_path_replaced)
     mkdir(self.db_path.parent)
     self.bootstrap_peers = config["bootstrap_peers"]
     self.minimum_height = config["minimum_height"]
     self.other_peers_port = config["other_peers_port"]
Ejemplo n.º 6
0
    async def log_in(self, request):
        """
        Logs in the wallet with a specific key.
        """

        fingerprint = request["fingerprint"]
        if self.service.logged_in_fingerprint == fingerprint:
            return {"fingerprint": fingerprint}

        await self._stop_wallet()
        log_in_type = request["type"]
        recovery_host = request["host"]
        testing = False

        if "testing" in self.service.config and self.service.config[
                "testing"] is True:
            testing = True
        if log_in_type == "skip":
            started = await self.service._start(fingerprint=fingerprint,
                                                skip_backup_import=True)
        elif log_in_type == "restore_backup":
            file_path = Path(request["file_path"])
            started = await self.service._start(fingerprint=fingerprint,
                                                backup_file=file_path)
        else:
            started = await self.service._start(fingerprint)

        if started is True:
            return {"fingerprint": fingerprint}
        elif testing is True and self.service.backup_initialized is False:
            response = {"success": False, "error": "not_initialized"}
            return response
        elif self.service.backup_initialized is False:
            backup_info = None
            backup_path = None
            try:
                private_key = self.service.get_key_for_fingerprint(fingerprint)
                last_recovery = await download_backup(recovery_host,
                                                      private_key)
                backup_path = path_from_root(self.service.root_path,
                                             "last_recovery")
                if backup_path.exists():
                    backup_path.unlink()
                backup_path.write_text(last_recovery)
                backup_info = get_backup_info(backup_path, private_key)
                backup_info["backup_host"] = recovery_host
                backup_info["downloaded"] = True
            except Exception as e:
                log.error(f"error {e}")
            response = {"success": False, "error": "not_initialized"}
            if backup_info is not None:
                response["backup_info"] = backup_info
                response["backup_path"] = f"{backup_path}"
            return response

        return {"success": False, "error": "Unknown Error"}
Ejemplo n.º 7
0
 def __init__(
     self,
     server: ChiaServer,
     root_path: Path,
     target_outbound_count: int,
     peer_db_path: str,
     introducer_info: Optional[Dict],
     dns_servers: List[str],
     peer_connect_interval: int,
     selected_network: str,
     default_port: Optional[int],
     log,
 ):
     self.server: ChiaServer = server
     self.message_queue: asyncio.Queue = asyncio.Queue()
     self.is_closed = False
     self.target_outbound_count = target_outbound_count
     # This is a double check to make sure testnet and mainnet peer databases never mix up.
     # If the network is not 'mainnet', it names the peer db differently, including the selected_network.
     if selected_network != "mainnet":
         if not peer_db_path.endswith(".sqlite"):
             raise ValueError(
                 f"Invalid path for peer table db: {peer_db_path}. Make the path end with .sqlite"
             )
         peer_db_path = peer_db_path[:-7] + "_" + selected_network + ".sqlite"
     self.peer_db_path = path_from_root(root_path, peer_db_path)
     self.dns_servers = dns_servers
     if introducer_info is not None:
         self.introducer_info: Optional[PeerInfo] = PeerInfo(
             introducer_info["host"],
             introducer_info["port"],
         )
     else:
         self.introducer_info = None
     self.peer_connect_interval = peer_connect_interval
     self.log = log
     self.relay_queue = None
     self.address_manager: Optional[AddressManager] = None
     self.connection_time_pretest: Dict = {}
     self.received_count_from_peers: Dict = {}
     self.lock = asyncio.Lock()
     self.connect_peers_task: Optional[asyncio.Task] = None
     self.serialize_task: Optional[asyncio.Task] = None
     self.cleanup_task: Optional[asyncio.Task] = None
     self.initial_wait: int = 0
     try:
         self.resolver: Optional[
             dns.asyncresolver.Resolver] = dns.asyncresolver.Resolver()
     except Exception:
         self.resolver = None
         self.log.exception("Error initializing asyncresolver")
     self.pending_outbound_connections: Set[str] = set()
     self.pending_tasks: Set[asyncio.Task] = set()
     self.default_port: Optional[int] = default_port
     if default_port is None and selected_network in NETWORK_ID_DEFAULT_PORTS:
         self.default_port = NETWORK_ID_DEFAULT_PORTS[selected_network]
Ejemplo n.º 8
0
 def __init__(self):
     self.reliable_peers_v4 = []
     self.reliable_peers_v6 = []
     self.lock = asyncio.Lock()
     self.pointer_v4 = 0
     self.pointer_v6 = 0
     db_path_replaced: str = "crawler.db"
     root_path = DEFAULT_ROOT_PATH
     self.db_path = path_from_root(root_path, db_path_replaced)
     mkdir(self.db_path.parent)
Ejemplo n.º 9
0
def initialize_logging(service_name: str, logging_config: Dict,
                       root_path: Path):
    log_path = path_from_root(
        root_path, logging_config.get("log_filename", "log/debug.log"))
    log_date_format = "%Y-%m-%dT%H:%M:%S"

    mkdir(str(log_path.parent))
    file_name_length = 33 - len(service_name)
    if logging_config["log_stdout"]:
        handler = colorlog.StreamHandler()
        handler.setFormatter(
            colorlog.ColoredFormatter(
                f"%(asctime)s.%(msecs)03d {service_name} %(name)-{file_name_length}s: "
                f"%(log_color)s%(levelname)-8s%(reset)s %(message)s",
                datefmt=log_date_format,
                reset=True,
            ))

        logger = colorlog.getLogger()
        logger.addHandler(handler)
    else:
        logger = logging.getLogger()
        maxrotation = logging_config.get("log_maxfilesrotation", 7)
        handler = ConcurrentRotatingFileHandler(log_path,
                                                "a",
                                                maxBytes=20 * 1024 * 1024,
                                                backupCount=maxrotation)
        handler.setFormatter(
            logging.Formatter(
                fmt=
                f"%(asctime)s.%(msecs)03d {service_name} %(name)-{file_name_length}s: %(levelname)-8s %(message)s",
                datefmt=log_date_format,
            ))
        logger.addHandler(handler)

    if "log_level" in logging_config:
        if logging_config["log_level"] == "CRITICAL":
            logger.setLevel(logging.CRITICAL)
        elif logging_config["log_level"] == "ERROR":
            logger.setLevel(logging.ERROR)
        elif logging_config["log_level"] == "WARNING":
            logger.setLevel(logging.WARNING)
        elif logging_config["log_level"] == "INFO":
            logger.setLevel(logging.INFO)
        elif logging_config["log_level"] == "DEBUG":
            logger.setLevel(logging.DEBUG)
            logging.getLogger("aiosqlite").setLevel(
                logging.INFO)  # Too much logging on debug level
            logging.getLogger("websockets").setLevel(
                logging.INFO)  # Too much logging on debug level
        else:
            logger.setLevel(logging.INFO)
    else:
        logger.setLevel(logging.INFO)
Ejemplo n.º 10
0
 async def delete_key(self, request):
     await self._stop_wallet()
     fingerprint = request["fingerprint"]
     self.service.keychain.delete_key_by_fingerprint(fingerprint)
     path = path_from_root(
         self.service.root_path,
         f"{self.service.config['database_path']}-{fingerprint}",
     )
     if path.exists():
         path.unlink()
     return {}
Ejemplo n.º 11
0
def db_upgrade_func(
    root_path: Path,
    in_db_path: Optional[Path] = None,
    out_db_path: Optional[Path] = None,
    no_update_config: bool = False,
):

    update_config: bool = in_db_path is None and out_db_path is None and not no_update_config

    config: Dict
    selected_network: str
    db_pattern: str
    if in_db_path is None or out_db_path is None:
        config = load_config(root_path, "config.yaml")["full_node"]
        selected_network = config["selected_network"]
        db_pattern = config["database_path"]

    db_path_replaced: str
    if in_db_path is None:
        db_path_replaced = db_pattern.replace("CHALLENGE", selected_network)
        in_db_path = path_from_root(root_path, db_path_replaced)

    if out_db_path is None:
        db_path_replaced = db_pattern.replace("CHALLENGE",
                                              selected_network).replace(
                                                  "_v1_", "_v2_")
        out_db_path = path_from_root(root_path, db_path_replaced)
        mkdir(out_db_path.parent)

    asyncio.run(convert_v1_to_v2(in_db_path, out_db_path))

    if update_config:
        print("updating config.yaml")
        config = load_config(root_path, "config.yaml")
        new_db_path = db_pattern.replace("_v1_", "_v2_")
        config["full_node"]["database_path"] = new_db_path
        print(f"database_path: {new_db_path}")
        save_config(root_path, "config.yaml", config)

    print(f"\n\nLEAVING PREVIOUS DB FILE UNTOUCHED {in_db_path}\n")
Ejemplo n.º 12
0
async def profile_task(root_path: pathlib.Path, log: logging.Logger) -> None:

    profile_dir = path_from_root(root_path, "profile")
    log.info("Starting profiler. saving to %s" % profile_dir)
    mkdir(profile_dir)

    counter = 0

    while True:
        pr = cProfile.Profile()
        pr.enable()
        # this will throw CancelledError when we're exiting
        await asyncio.sleep(1)
        pr.create_stats()
        pr.dump_stats(profile_dir / ("slot-%05d.profile" % counter))
        log.debug("saving profile %05d" % counter)
        counter += 1
Ejemplo n.º 13
0
 async def _create_backup_and_upload(self, host) -> None:
     assert self.service.wallet_state_manager is not None
     try:
         if "testing" in self.service.config and self.service.config["testing"] is True:
             return
         now = time.time()
         file_name = f"backup_{now}"
         path = path_from_root(self.service.root_path, file_name)
         await self.service.wallet_state_manager.create_wallet_backup(path)
         backup_text = path.read_text()
         response = await upload_backup(host, backup_text)
         success = response["success"]
         if success is False:
             log.error("Failed to upload backup to wallet backup service")
         elif success is True:
             log.info("Finished upload of the backup file")
     except Exception as e:
         log.error(f"Exception in upload backup. Error: {e}")
Ejemplo n.º 14
0
    async def _start(
        self,
        fingerprint: Optional[int] = None,
        new_wallet: bool = False,
        backup_file: Optional[Path] = None,
        skip_backup_import: bool = False,
    ) -> bool:
        private_key = self.get_key_for_fingerprint(fingerprint)
        if private_key is None:
            self.logged_in = False
            return False

        if self.config.get("enable_profiler", False):
            asyncio.create_task(profile_task(self.root_path, "wallet", self.log))

        db_path_key_suffix = str(private_key.get_g1().get_fingerprint())
        db_path_replaced: str = (
            self.config["database_path"]
            .replace("CHALLENGE", self.config["selected_network"])
            .replace("KEY", db_path_key_suffix)
        )
        path = path_from_root(self.root_path, db_path_replaced)
        mkdir(path.parent)

        assert self.server is not None
        self.wallet_state_manager = await WalletStateManager.create(
            private_key, self.config, path, self.constants, self.server
        )

        self.wsm_close_task = None

        assert self.wallet_state_manager is not None

        backup_settings: BackupInitialized = self.wallet_state_manager.user_settings.get_backup_settings()
        if backup_settings.user_initialized is False:
            if new_wallet is True:
                await self.wallet_state_manager.user_settings.user_created_new_wallet()
                self.wallet_state_manager.new_wallet = True
            elif skip_backup_import is True:
                await self.wallet_state_manager.user_settings.user_skipped_backup_import()
            elif backup_file is not None:
                await self.wallet_state_manager.import_backup_info(backup_file)
            else:
                self.backup_initialized = False
                await self.wallet_state_manager.close_all_stores()
                self.wallet_state_manager = None
                self.logged_in = False
                return False

        self.backup_initialized = True

        # Start peers here after the backup initialization has finished
        # We only want to do this once per instantiation
        # However, doing it earlier before backup initialization causes
        # the wallet to spam the introducer
        if self.wallet_peers_initialized is False:
            asyncio.create_task(self.wallet_peers.start())
            self.wallet_peers_initialized = True

        if backup_file is not None:
            json_dict = open_backup_file(backup_file, self.wallet_state_manager.private_key)
            if "start_height" in json_dict["data"]:
                start_height = json_dict["data"]["start_height"]
                self.config["starting_height"] = max(0, start_height - self.config["start_height_buffer"])
            else:
                self.config["starting_height"] = 0
        else:
            self.config["starting_height"] = 0

        if self.state_changed_callback is not None:
            self.wallet_state_manager.set_callback(self.state_changed_callback)

        self.wallet_state_manager.set_pending_callback(self._pending_tx_handler)
        self._shut_down = False

        self.peer_task = asyncio.create_task(self._periodically_check_full_node())
        self.sync_event = asyncio.Event()
        self.sync_task = asyncio.create_task(self.sync_job())
        self.logged_in_fingerprint = fingerprint
        self.logged_in = True
        return True