Exemplo n.º 1
0
def chia_init(
    root_path: Path,
    *,
    should_check_keys: bool = True,
    fix_ssl_permissions: bool = False,
    testnet: bool = False,
    experimental_v2_db: bool = False,
):
    """
    Standard first run initialization or migration steps. Handles config creation,
    generation of SSL certs, and setting target addresses (via check_keys).

    should_check_keys can be set to False to avoid blocking when accessing a passphrase
    protected Keychain. When launching the daemon from the GUI, we want the GUI to
    handle unlocking the keychain.
    """
    if os.environ.get("CHIA_ROOT", None) is not None:
        print(
            f"warning, your CHIA_ROOT is set to {os.environ['CHIA_ROOT']}. "
            f"Please unset the environment variable and run chia init again\n"
            f"or manually migrate config.yaml"
        )

    print(f"Chia directory {root_path}")
    if root_path.is_dir() and Path(root_path / "config" / "config.yaml").exists():
        # This is reached if CHIA_ROOT is set, or if user has run chia init twice
        # before a new update.
        if testnet:
            configure(root_path, "", "", "", "", "", "", "", "", testnet="true", peer_connect_timeout="")
        if fix_ssl_permissions:
            fix_ssl(root_path)
        if should_check_keys:
            check_keys(root_path)
        print(f"{root_path} already exists, no migration action taken")
        return -1

    create_default_chia_config(root_path)
    if testnet:
        configure(root_path, "", "", "", "", "", "", "", "", testnet="true", peer_connect_timeout="")
    create_all_ssl(root_path)
    if fix_ssl_permissions:
        fix_ssl(root_path)
    if should_check_keys:
        check_keys(root_path)
    if experimental_v2_db:
        config: Dict = load_config(root_path, "config.yaml")["full_node"]
        db_path_replaced: str = config["database_path"].replace("CHALLENGE", config["selected_network"])
        db_path = path_from_root(root_path, db_path_replaced)
        mkdir(db_path.parent)
        import sqlite3

        with sqlite3.connect(db_path) as connection:
            connection.execute("CREATE TABLE database_version(version int)")
            connection.execute("INSERT INTO database_version VALUES (2)")
            connection.commit()

    print("")
    print("To see your keys, run 'chia keys show --show-mnemonic-seed'")

    return 0
Exemplo n.º 2
0
def create_default_chia_config(root_path: Path) -> None:
    for filename in ["config.yaml"]:
        default_config_file_data = initial_config_file(filename)
        path = config_path_for_filename(root_path, filename)
        mkdir(path.parent)
        with open(path, "w") as f:
            f.write(default_config_file_data)
Exemplo n.º 3
0
def launch_plotter(root_path: Path, service_name: str, service_array: List[str], id: str):
    # we need to pass on the possibly altered CHIA_ROOT
    os.environ["CHIA_ROOT"] = str(root_path)
    service_executable = executable_for_service(service_array[0])

    # Swap service name with name of executable
    service_array[0] = service_executable
    startupinfo = None
    if os.name == "nt":
        startupinfo = subprocess.STARTUPINFO()  # type: ignore
        startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW  # type: ignore

    plotter_path = plotter_log_path(root_path, id)

    if plotter_path.parent.exists():
        if plotter_path.exists():
            plotter_path.unlink()
    else:
        mkdir(plotter_path.parent)
    outfile = open(plotter_path.resolve(), "w")
    log.info(f"Service array: {service_array}")
    process = subprocess.Popen(service_array, shell=False, stderr=outfile, stdout=outfile, startupinfo=startupinfo)

    pid_path = pid_path_for_service(root_path, service_name, id)
    try:
        mkdir(pid_path.parent)
        with open(pid_path, "w") as f:
            f.write(f"{process.pid}\n")
    except Exception:
        pass
    return process, pid_path
Exemplo n.º 4
0
    def test_create_config_overwrite(self, tmpdir):
        """
        Test create_default_chia_config() when overwriting an existing config.yaml
        """
        # When: using a clean directory
        root_path: Path = Path(tmpdir)
        config_file_path: Path = root_path / "config" / "config.yaml"
        mkdir(config_file_path.parent)
        # When: config.yaml already exists with content
        with open(config_file_path, "w") as f:
            f.write("Some config content")
        # Expect: config.yaml exists
        assert config_file_path.exists() is True
        # When: creating a new config
        create_default_chia_config(root_path)
        # Expect: config.yaml exists
        assert config_file_path.exists() is True

        expected_content: str = initial_config_file("config.yaml")
        assert len(expected_content) > 0

        with open(config_file_path, "r") as f:
            actual_content: str = f.read()
            # Expect: config.yaml contents are overwritten with initial contents
            assert actual_content == expected_content
Exemplo n.º 5
0
 def __init__(
     self,
     config: Dict,
     root_path: Path,
     consensus_constants: ConsensusConstants,
     name: str = None,
 ):
     self.initialized = False
     self.root_path = root_path
     self.config = config
     self.server = None
     self._shut_down = False  # Set to true to close all infinite loops
     self.constants = consensus_constants
     self.state_changed_callback: Optional[Callable] = None
     self.crawl_store = None
     self.log = log
     self.peer_count = 0
     self.with_peak = set()
     self.peers_retrieved: List[Any] = []
     self.host_to_version: Dict[str, str] = {}
     self.version_cache: List[Tuple[str, str]] = []
     self.handshake_time: Dict[str, int] = {}
     self.best_timestamp_per_peer: Dict[str, int] = {}
     if "crawler_db_path" in config and config["crawler_db_path"] != "":
         path = Path(config["crawler_db_path"])
         self.db_path = path.resolve()
     else:
         db_path_replaced: str = "crawler.db"
         self.db_path = path_from_root(root_path, db_path_replaced)
     mkdir(self.db_path.parent)
     self.bootstrap_peers = config["bootstrap_peers"]
     self.minimum_height = config["minimum_height"]
     self.other_peers_port = config["other_peers_port"]
Exemplo n.º 6
0
def service_kwargs_for_full_node_simulator(root_path: Path, config: Dict,
                                           bt: BlockTools) -> Dict:
    mkdir(path_from_root(root_path, config["database_path"]).parent)
    constants = bt.constants

    node = FullNode(
        config,
        root_path=root_path,
        consensus_constants=constants,
        name=SERVICE_NAME,
    )

    peer_api = FullNodeSimulator(node, bt)
    network_id = config["selected_network"]
    kwargs = dict(
        root_path=root_path,
        node=node,
        peer_api=peer_api,
        node_type=NodeType.FULL_NODE,
        advertised_port=config["port"],
        service_name=SERVICE_NAME,
        server_listen_ports=[config["port"]],
        on_connect_callback=node.on_connect,
        rpc_info=(FullNodeRpcApi, config["rpc_port"]),
        network_id=network_id,
    )
    return kwargs
Exemplo n.º 7
0
def copy_files_rec(old_path: Path, new_path: Path):
    if old_path.is_file():
        print(f"{new_path}")
        mkdir(new_path.parent)
        shutil.copy(old_path, new_path)
    elif old_path.is_dir():
        for old_path_child in old_path.iterdir():
            new_path_child = new_path / old_path_child.name
            copy_files_rec(old_path_child, new_path_child)
Exemplo n.º 8
0
 async def initialize_address_manager(self) -> None:
     mkdir(self.peer_db_path.parent)
     self.connection = await aiosqlite.connect(self.peer_db_path)
     self.address_manager_store = await AddressManagerStore.create(self.connection)
     if not await self.address_manager_store.is_empty():
         self.address_manager = await self.address_manager_store.deserialize()
     else:
         await self.address_manager_store.clear()
         self.address_manager = AddressManager()
     self.server.set_received_message_callback(self.update_peer_timestamp_on_message)
Exemplo n.º 9
0
 def __init__(self):
     self.reliable_peers_v4 = []
     self.reliable_peers_v6 = []
     self.lock = asyncio.Lock()
     self.pointer_v4 = 0
     self.pointer_v6 = 0
     db_path_replaced: str = "crawler.db"
     root_path = DEFAULT_ROOT_PATH
     self.db_path = path_from_root(root_path, db_path_replaced)
     mkdir(self.db_path.parent)
Exemplo n.º 10
0
def initialize_logging(service_name: str, logging_config: Dict,
                       root_path: Path):
    log_path = path_from_root(
        root_path, logging_config.get("log_filename", "log/debug.log"))
    log_date_format = "%Y-%m-%dT%H:%M:%S"

    mkdir(str(log_path.parent))
    file_name_length = 33 - len(service_name)
    if logging_config["log_stdout"]:
        handler = colorlog.StreamHandler()
        handler.setFormatter(
            colorlog.ColoredFormatter(
                f"%(asctime)s.%(msecs)03d {service_name} %(name)-{file_name_length}s: "
                f"%(log_color)s%(levelname)-8s%(reset)s %(message)s",
                datefmt=log_date_format,
                reset=True,
            ))

        logger = colorlog.getLogger()
        logger.addHandler(handler)
    else:
        logger = logging.getLogger()
        maxrotation = logging_config.get("log_maxfilesrotation", 7)
        handler = ConcurrentRotatingFileHandler(log_path,
                                                "a",
                                                maxBytes=20 * 1024 * 1024,
                                                backupCount=maxrotation)
        handler.setFormatter(
            logging.Formatter(
                fmt=
                f"%(asctime)s.%(msecs)03d {service_name} %(name)-{file_name_length}s: %(levelname)-8s %(message)s",
                datefmt=log_date_format,
            ))
        logger.addHandler(handler)

    if "log_level" in logging_config:
        if logging_config["log_level"] == "CRITICAL":
            logger.setLevel(logging.CRITICAL)
        elif logging_config["log_level"] == "ERROR":
            logger.setLevel(logging.ERROR)
        elif logging_config["log_level"] == "WARNING":
            logger.setLevel(logging.WARNING)
        elif logging_config["log_level"] == "INFO":
            logger.setLevel(logging.INFO)
        elif logging_config["log_level"] == "DEBUG":
            logger.setLevel(logging.DEBUG)
            logging.getLogger("aiosqlite").setLevel(
                logging.INFO)  # Too much logging on debug level
            logging.getLogger("websockets").setLevel(
                logging.INFO)  # Too much logging on debug level
        else:
            logger.setLevel(logging.INFO)
    else:
        logger.setLevel(logging.INFO)
Exemplo n.º 11
0
def create_default_chia_config(root_path: Path, filenames=["config.yaml"]) -> None:
    for filename in filenames:
        default_config_file_data: str = initial_config_file(filename)
        path: Path = config_path_for_filename(root_path, filename)
        tmp_path: Path = path.with_suffix("." + str(os.getpid()))
        mkdir(path.parent)
        with open(tmp_path, "w") as f:
            f.write(default_config_file_data)
        try:
            os.replace(str(tmp_path), str(path))
        except PermissionError:
            shutil.move(str(tmp_path), str(path))
Exemplo n.º 12
0
 def __init__(self, path: Path, plots_origin: List[Path]):
     self.path = path
     mkdir(path)
     # Drop the existing files in the test directories
     for plot in path.iterdir():
         unlink(plot)
     # Copy over the original plots
     for plot in plots_origin:
         if not Path(path / plot.name).exists():
             copy(plot, path)
     # Adjust the paths to reflect the testing plots
     self.plots = [path / plot.name for plot in plots_origin]
Exemplo n.º 13
0
    def test_multiple_writers(self):
        num_workers = 20
        keyring_path = str(
            KeyringWrapper.get_shared_instance().keyring.keyring_path)
        passphrase_list = list(
            map(
                lambda x: ("test-service", f"test-user-{x}", f"passphrase {x}",
                           keyring_path, x, num_workers),
                range(num_workers),
            ))

        # Create a directory for each process to indicate readiness
        ready_dir: Path = Path(keyring_path).parent / "ready"
        mkdir(ready_dir)

        finished_dir: Path = Path(keyring_path).parent / "finished"
        mkdir(finished_dir)

        # When: spinning off children to each set a passphrase concurrently
        with Pool(processes=num_workers) as pool:
            res = pool.starmap_async(dummy_set_passphrase, passphrase_list)

            # Wait up to 30 seconds for all processes to indicate readiness
            assert poll_directory(ready_dir, num_workers, 30) is True

            log.warning(f"Test setup complete: {num_workers} workers ready")

            # Signal that testing should begin
            start_file_path: Path = ready_dir / "start"
            with open(start_file_path, "w") as f:
                f.write(f"{os.getpid()}\n")

            # Wait up to 30 seconds for all processes to indicate completion
            assert poll_directory(finished_dir, num_workers, 30) is True

            log.warning(f"Finished: {num_workers} workers finished")

            # Collect results
            res.get(
                timeout=10
            )  # 10 second timeout to prevent a bad test from spoiling the fun

        # Expect: parent process should be able to find all passphrases that were set by the child processes
        for item in passphrase_list:
            expected_passphrase = item[2]
            actual_passphrase = KeyringWrapper.get_shared_instance(
            ).get_passphrase(service=item[0], user=item[1])
            assert expected_passphrase == actual_passphrase
    async def serialize(cls, address_manager: AddressManager,
                        peers_file_path: Path) -> None:
        """
        Serialize the address manager's peer data to a file.
        """
        metadata: List[Tuple[str, str]] = []
        nodes: List[Tuple[int, ExtendedPeerInfo]] = []
        new_table_entries: List[Tuple[int, int]] = []
        unique_ids: Dict[int, int] = {}
        count_ids: int = 0

        log.info("Serializing peer data")
        metadata.append(("key", str(address_manager.key)))

        for node_id, info in address_manager.map_info.items():
            unique_ids[node_id] = count_ids
            if info.ref_count > 0:
                assert count_ids != address_manager.new_count
                nodes.append((count_ids, info))
                count_ids += 1
        metadata.append(("new_count", str(count_ids)))

        tried_ids = 0
        for node_id, info in address_manager.map_info.items():
            if info.is_tried:
                assert info is not None
                assert tried_ids != address_manager.tried_count
                nodes.append((count_ids, info))
                count_ids += 1
                tried_ids += 1
        metadata.append(("tried_count", str(tried_ids)))

        for bucket in range(NEW_BUCKET_COUNT):
            for i in range(BUCKET_SIZE):
                if address_manager.new_matrix[bucket][i] != -1:
                    index = unique_ids[address_manager.new_matrix[bucket][i]]
                    new_table_entries.append((index, bucket))

        try:
            # Ensure the parent directory exists
            mkdir(peers_file_path.parent)
            start_time = timer()
            await cls._write_peers(peers_file_path, metadata, nodes,
                                   new_table_entries)
            log.debug(
                f"Serializing peer data took {timer() - start_time} seconds")
        except Exception:
            log.exception(f"Failed to write peer data to {peers_file_path}")
Exemplo n.º 15
0
 def test_load_config_exit_on_error(self, tmpdir):
     """
     Call load_config() with an invalid path. Behavior should be dependent on the exit_on_error flag.
     """
     root_path: Path = tmpdir
     config_file_path: Path = root_path / "config" / "config.yaml"
     # When: config file path points to a directory
     mkdir(config_file_path)
     # When: exit_on_error is True
     # Expect: load_config will exit
     with pytest.raises(SystemExit):
         _ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=True)
     # When: exit_on_error is False
     # Expect: load_config will raise an exception
     with pytest.raises(ValueError):
         _ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=False)
Exemplo n.º 16
0
async def profile_task(root_path: pathlib.Path, log: logging.Logger) -> None:

    profile_dir = path_from_root(root_path, "profile")
    log.info("Starting profiler. saving to %s" % profile_dir)
    mkdir(profile_dir)

    counter = 0

    while True:
        pr = cProfile.Profile()
        pr.enable()
        # this will throw CancelledError when we're exiting
        await asyncio.sleep(1)
        pr.create_stats()
        pr.dump_stats(profile_dir / ("slot-%05d.profile" % counter))
        log.debug("saving profile %05d" % counter)
        counter += 1
Exemplo n.º 17
0
def launch_service(root_path: Path, service_command) -> Tuple[subprocess.Popen, Path]:
    """
    Launch a child process.
    """
    # set up CHIA_ROOT
    # invoke correct script
    # save away PID

    # we need to pass on the possibly altered CHIA_ROOT
    os.environ["CHIA_ROOT"] = str(root_path)

    log.debug(f"Launching service with CHIA_ROOT: {os.environ['CHIA_ROOT']}")

    # Insert proper e
    service_array = service_command.split()
    service_executable = executable_for_service(service_array[0])
    service_array[0] = service_executable

    if service_command == "chia_full_node_simulator":
        # Set the -D/--connect_to_daemon flag to signify that the child should connect
        # to the daemon to access the keychain
        service_array.append("-D")

    startupinfo = None
    if os.name == "nt":
        startupinfo = subprocess.STARTUPINFO()  # type: ignore
        startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW  # type: ignore

    # CREATE_NEW_PROCESS_GROUP allows graceful shutdown on windows, by CTRL_BREAK_EVENT signal
    if sys.platform == "win32" or sys.platform == "cygwin":
        creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
    else:
        creationflags = 0
    environ_copy = os.environ.copy()
    process = subprocess.Popen(
        service_array, shell=False, startupinfo=startupinfo, creationflags=creationflags, env=environ_copy
    )
    pid_path = pid_path_for_service(root_path, service_command)
    try:
        mkdir(pid_path.parent)
        with open(pid_path, "w") as f:
            f.write(f"{process.pid}\n")
    except Exception:
        pass
    return process, pid_path
Exemplo n.º 18
0
def launch_plotter(root_path: Path, service_name: str, service_array: List[str], id: str):
    # we need to pass on the possibly altered CHIA_ROOT
    os.environ["CHIA_ROOT"] = str(root_path)
    service_executable = executable_for_service(service_array[0])

    # Swap service name with name of executable
    service_array[0] = service_executable
    startupinfo = None
    if os.name == "nt":
        startupinfo = subprocess.STARTUPINFO()  # type: ignore
        startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW  # type: ignore

    # Windows-specific.
    # If the current process group is used, CTRL_C_EVENT will kill the parent and everyone in the group!
    try:
        creationflags: int = subprocess.CREATE_NEW_PROCESS_GROUP  # type: ignore
    except AttributeError:  # Not on Windows.
        creationflags = 0

    plotter_path = plotter_log_path(root_path, id)

    if plotter_path.parent.exists():
        if plotter_path.exists():
            plotter_path.unlink()
    else:
        mkdir(plotter_path.parent)
    outfile = open(plotter_path.resolve(), "w")
    log.info(f"Service array: {service_array}")
    process = subprocess.Popen(
        service_array,
        shell=False,
        stderr=outfile,
        stdout=outfile,
        startupinfo=startupinfo,
        creationflags=creationflags,
    )

    pid_path = pid_path_for_service(root_path, service_name, id)
    try:
        mkdir(pid_path.parent)
        with open(pid_path, "w") as f:
            f.write(f"{process.pid}\n")
    except Exception:
        pass
    return process, pid_path
Exemplo n.º 19
0
def launch_service(root_path: Path, service_command) -> Tuple[subprocess.Popen, Path]:
    """
    Launch a child process.
    """
    # set up CHIA_ROOT
    # invoke correct script
    # save away PID

    # we need to pass on the possibly altered CHIA_ROOT
    os.environ["CHIA_ROOT"] = str(root_path)

    log.debug(f"Launching service with CHIA_ROOT: {os.environ['CHIA_ROOT']}")

    lockfile = singleton(service_launch_lock_path(root_path, service_command))
    if lockfile is None:
        logging.error(f"{service_command}: already running")
        raise subprocess.SubprocessError

    # Insert proper e
    service_array = service_command.split()
    service_executable = executable_for_service(service_array[0])
    service_array[0] = service_executable
    startupinfo = None
    if os.name == "nt":
        startupinfo = subprocess.STARTUPINFO()  # type: ignore
        startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW  # type: ignore

    # CREATE_NEW_PROCESS_GROUP allows graceful shutdown on windows, by CTRL_BREAK_EVENT signal
    if sys.platform == "win32" or sys.platform == "cygwin":
        creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
    else:
        creationflags = 0
    environ_copy = os.environ.copy()
    process = subprocess.Popen(
        service_array, shell=False, startupinfo=startupinfo, creationflags=creationflags, env=environ_copy
    )
    pid_path = pid_path_for_service(root_path, service_command)
    try:
        mkdir(pid_path.parent)
        with open(pid_path, "w") as f:
            f.write(f"{process.pid}\n")
    except Exception:
        pass
    return process, pid_path
Exemplo n.º 20
0
def persistent_blocks(
    num_of_blocks: int,
    db_name: str,
    seed: bytes = b"",
    empty_sub_slots=0,
    normalized_to_identity_cc_eos: bool = False,
    normalized_to_identity_icc_eos: bool = False,
    normalized_to_identity_cc_sp: bool = False,
    normalized_to_identity_cc_ip: bool = False,
):
    # try loading from disc, if not create new blocks.db file
    # TODO hash fixtures.py and blocktool.py, add to path, delete if the files changed
    block_path_dir = Path("~/.chia/blocks").expanduser()
    file_path = Path(f"~/.chia/blocks/{db_name}").expanduser()
    if not path.exists(block_path_dir):
        mkdir(block_path_dir.parent)
        mkdir(block_path_dir)

    if file_path.exists():
        try:
            bytes_list = file_path.read_bytes()
            block_bytes_list: List[bytes] = pickle.loads(bytes_list)
            blocks: List[FullBlock] = []
            for block_bytes in block_bytes_list:
                blocks.append(FullBlock.from_bytes(block_bytes))
            if len(blocks) == num_of_blocks:
                print(f"\n loaded {file_path} with {len(blocks)} blocks")
                return blocks
        except EOFError:
            print("\n error reading db file")

    return new_test_db(
        file_path,
        num_of_blocks,
        seed,
        empty_sub_slots,
        normalized_to_identity_cc_eos,
        normalized_to_identity_icc_eos,
        normalized_to_identity_cc_sp,
        normalized_to_identity_cc_ip,
    )
Exemplo n.º 21
0
def singleton(lockfile: Path, text: str = "semaphore") -> Optional[TextIO]:
    """
    Open a lockfile exclusively.
    """

    if not lockfile.parent.exists():
        mkdir(lockfile.parent)

    try:
        if has_fcntl:
            f = open(lockfile, "w")
            fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
        else:
            if lockfile.exists():
                lockfile.unlink()
            fd = os.open(lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
            f = open(fd, "w")
        f.write(text)
    except IOError:
        return None
    return f
Exemplo n.º 22
0
def db_upgrade_func(
    root_path: Path,
    in_db_path: Optional[Path] = None,
    out_db_path: Optional[Path] = None,
    no_update_config: bool = False,
):

    update_config: bool = in_db_path is None and out_db_path is None and not no_update_config

    config: Dict
    selected_network: str
    db_pattern: str
    if in_db_path is None or out_db_path is None:
        config = load_config(root_path, "config.yaml")["full_node"]
        selected_network = config["selected_network"]
        db_pattern = config["database_path"]

    db_path_replaced: str
    if in_db_path is None:
        db_path_replaced = db_pattern.replace("CHALLENGE", selected_network)
        in_db_path = path_from_root(root_path, db_path_replaced)

    if out_db_path is None:
        db_path_replaced = db_pattern.replace("CHALLENGE",
                                              selected_network).replace(
                                                  "_v1_", "_v2_")
        out_db_path = path_from_root(root_path, db_path_replaced)
        mkdir(out_db_path.parent)

    asyncio.run(convert_v1_to_v2(in_db_path, out_db_path))

    if update_config:
        print("updating config.yaml")
        config = load_config(root_path, "config.yaml")
        new_db_path = db_pattern.replace("_v1_", "_v2_")
        config["full_node"]["database_path"] = new_db_path
        print(f"database_path: {new_db_path}")
        save_config(root_path, "config.yaml", config)

    print(f"\n\nLEAVING PREVIOUS DB FILE UNTOUCHED {in_db_path}\n")
Exemplo n.º 23
0
 def __init__(self, path: Path):
     self._changed = False
     self._data = {}
     self._path = path
     if not path.parent.exists():
         mkdir(path.parent)
Exemplo n.º 24
0
    async def _start(
        self,
        fingerprint: Optional[int] = None,
        new_wallet: bool = False,
        backup_file: Optional[Path] = None,
        skip_backup_import: bool = False,
    ) -> bool:
        private_key = self.get_key_for_fingerprint(fingerprint)
        if private_key is None:
            self.logged_in = False
            return False

        if self.config.get("enable_profiler", False):
            asyncio.create_task(profile_task(self.root_path, "wallet", self.log))

        db_path_key_suffix = str(private_key.get_g1().get_fingerprint())
        db_path_replaced: str = (
            self.config["database_path"]
            .replace("CHALLENGE", self.config["selected_network"])
            .replace("KEY", db_path_key_suffix)
        )
        path = path_from_root(self.root_path, db_path_replaced)
        mkdir(path.parent)

        assert self.server is not None
        self.wallet_state_manager = await WalletStateManager.create(
            private_key, self.config, path, self.constants, self.server
        )

        self.wsm_close_task = None

        assert self.wallet_state_manager is not None

        backup_settings: BackupInitialized = self.wallet_state_manager.user_settings.get_backup_settings()
        if backup_settings.user_initialized is False:
            if new_wallet is True:
                await self.wallet_state_manager.user_settings.user_created_new_wallet()
                self.wallet_state_manager.new_wallet = True
            elif skip_backup_import is True:
                await self.wallet_state_manager.user_settings.user_skipped_backup_import()
            elif backup_file is not None:
                await self.wallet_state_manager.import_backup_info(backup_file)
            else:
                self.backup_initialized = False
                await self.wallet_state_manager.close_all_stores()
                self.wallet_state_manager = None
                self.logged_in = False
                return False

        self.backup_initialized = True

        # Start peers here after the backup initialization has finished
        # We only want to do this once per instantiation
        # However, doing it earlier before backup initialization causes
        # the wallet to spam the introducer
        if self.wallet_peers_initialized is False:
            asyncio.create_task(self.wallet_peers.start())
            self.wallet_peers_initialized = True

        if backup_file is not None:
            json_dict = open_backup_file(backup_file, self.wallet_state_manager.private_key)
            if "start_height" in json_dict["data"]:
                start_height = json_dict["data"]["start_height"]
                self.config["starting_height"] = max(0, start_height - self.config["start_height_buffer"])
            else:
                self.config["starting_height"] = 0
        else:
            self.config["starting_height"] = 0

        if self.state_changed_callback is not None:
            self.wallet_state_manager.set_callback(self.state_changed_callback)

        self.wallet_state_manager.set_pending_callback(self._pending_tx_handler)
        self._shut_down = False

        self.peer_task = asyncio.create_task(self._periodically_check_full_node())
        self.sync_event = asyncio.Event()
        self.sync_task = asyncio.create_task(self.sync_job())
        self.logged_in_fingerprint = fingerprint
        self.logged_in = True
        return True
Exemplo n.º 25
0
 def ready_dir(self, tmp_path: Path):
     ready_dir: Path = tmp_path / "ready"
     mkdir(ready_dir)
     return ready_dir
Exemplo n.º 26
0
 def finished_dir(self, tmp_path: Path):
     finished_dir: Path = tmp_path / "finished"
     mkdir(finished_dir)
     return finished_dir
Exemplo n.º 27
0
def create_plots(args,
                 root_path,
                 use_datetime=True,
                 test_private_keys: Optional[List] = None):
    config_filename = config_path_for_filename(root_path, "config.yaml")
    config = load_config(root_path, config_filename)

    if args.tmp2_dir is None:
        args.tmp2_dir = args.tmp_dir

    farmer_public_key: G1Element
    if args.farmer_public_key is not None:
        farmer_public_key = G1Element.from_bytes(
            bytes.fromhex(args.farmer_public_key))
    else:
        farmer_public_key = get_farmer_public_key(args.alt_fingerprint)

    pool_public_key: Optional[G1Element] = None
    pool_contract_puzzle_hash: Optional[bytes32] = None
    if args.pool_public_key is not None:
        if args.pool_contract_address is not None:
            raise RuntimeError(
                "Choose one of pool_contract_address and pool_public_key")
        pool_public_key = G1Element.from_bytes(
            bytes.fromhex(args.pool_public_key))
    else:
        if args.pool_contract_address is None:
            # If nothing is set, farms to the provided key (or the first key)
            pool_public_key = get_pool_public_key(args.alt_fingerprint)
        else:
            # If the pool contract puzzle hash is set, use that
            pool_contract_puzzle_hash = decode_puzzle_hash(
                args.pool_contract_address)

    assert (pool_public_key is None) != (pool_contract_puzzle_hash is None)
    num = args.num

    if args.size < config["min_mainnet_k_size"] and test_private_keys is None:
        log.warning(
            f"Creating plots with size k={args.size}, which is less than the minimum required for mainnet"
        )
    if args.size < 22:
        log.warning("k under 22 is not supported. Increasing k to 22")
        args.size = 22

    if pool_public_key is not None:
        log.info(
            f"Creating {num} plots of size {args.size}, pool public key:  "
            f"{bytes(pool_public_key).hex()} farmer public key: {bytes(farmer_public_key).hex()}"
        )
    else:
        assert pool_contract_puzzle_hash is not None
        log.info(
            f"Creating {num} plots of size {args.size}, pool contract address:  "
            f"{args.pool_contract_address} farmer public key: {bytes(farmer_public_key).hex()}"
        )

    tmp_dir_created = False
    if not args.tmp_dir.exists():
        mkdir(args.tmp_dir)
        tmp_dir_created = True

    tmp2_dir_created = False
    if not args.tmp2_dir.exists():
        mkdir(args.tmp2_dir)
        tmp2_dir_created = True

    mkdir(args.final_dir)

    finished_filenames = []
    for i in range(num):
        # Generate a random master secret key
        if test_private_keys is not None:
            assert len(test_private_keys) == num
            sk: PrivateKey = test_private_keys[i]
        else:
            sk = AugSchemeMPL.key_gen(token_bytes(32))

        # The plot public key is the combination of the harvester and farmer keys
        # New plots will also include a taproot of the keys, for extensibility
        include_taproot: bool = pool_contract_puzzle_hash is not None
        plot_public_key = ProofOfSpace.generate_plot_public_key(
            master_sk_to_local_sk(sk).get_g1(), farmer_public_key,
            include_taproot)

        # The plot id is based on the harvester, farmer, and pool keys
        if pool_public_key is not None:
            plot_id: bytes32 = ProofOfSpace.calculate_plot_id_pk(
                pool_public_key, plot_public_key)
            plot_memo: bytes32 = stream_plot_info_pk(pool_public_key,
                                                     farmer_public_key, sk)
        else:
            assert pool_contract_puzzle_hash is not None
            plot_id = ProofOfSpace.calculate_plot_id_ph(
                pool_contract_puzzle_hash, plot_public_key)
            plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash,
                                            farmer_public_key, sk)

        if args.plotid is not None:
            log.info(f"Debug plot ID: {args.plotid}")
            plot_id = bytes32(bytes.fromhex(args.plotid))

        if args.memo is not None:
            log.info(f"Debug memo: {args.memo}")
            plot_memo = bytes.fromhex(args.memo)

        # Uncomment next two lines if memo is needed for dev debug
        plot_memo_str: str = plot_memo.hex()
        log.info(f"Memo: {plot_memo_str}")

        dt_string = datetime.now().strftime("%Y-%m-%d-%H-%M")

        if use_datetime:
            filename: str = f"plot-k{args.size}-{dt_string}-{plot_id}.plot"
        else:
            filename = f"plot-k{args.size}-{plot_id}.plot"
        full_path: Path = args.final_dir / filename

        resolved_final_dir: str = str(Path(args.final_dir).resolve())
        plot_directories_list: str = config["harvester"]["plot_directories"]

        if args.exclude_final_dir:
            log.info(
                f"NOT adding directory {resolved_final_dir} to harvester for farming"
            )
            if resolved_final_dir in plot_directories_list:
                log.warning(
                    f"Directory {resolved_final_dir} already exists for harvester, please remove it manually"
                )
        else:
            if resolved_final_dir not in plot_directories_list:
                # Adds the directory to the plot directories if it is not present
                log.info(
                    f"Adding directory {resolved_final_dir} to harvester for farming"
                )
                config = add_plot_directory(resolved_final_dir, root_path)

        if not full_path.exists():
            log.info(f"Starting plot {i + 1}/{num}")
            # Creates the plot. This will take a long time for larger plots.
            plotter: DiskPlotter = DiskPlotter()
            plotter.create_plot_disk(
                str(args.tmp_dir),
                str(args.tmp2_dir),
                str(args.final_dir),
                filename,
                args.size,
                plot_memo,
                plot_id,
                args.buffer,
                args.buckets,
                args.stripe_size,
                args.num_threads,
                args.nobitfield,
            )
            finished_filenames.append(filename)
        else:
            log.info(f"Plot {filename} already exists")

    log.info("Summary:")

    if tmp_dir_created:
        try:
            args.tmp_dir.rmdir()
        except Exception:
            log.info(
                f"warning: did not remove primary temporary folder {args.tmp_dir}, it may not be empty."
            )

    if tmp2_dir_created:
        try:
            args.tmp2_dir.rmdir()
        except Exception:
            log.info(
                f"warning: did not remove secondary temporary folder {args.tmp2_dir}, it may not be empty."
            )

    log.info(f"Created a total of {len(finished_filenames)} new plots")
    for filename in finished_filenames:
        log.info(filename)