예제 #1
0
def load_plots(
    config_file: Dict,
    plot_config_file: Dict,
    pool_pubkeys: Optional[List[PublicKey]],
    root_path: Path,
) -> Tuple[Dict[str, DiskProver], List[str], List[str]]:
    provers: Dict[str, DiskProver] = {}
    failed_to_open_filenames: List[str] = []
    not_found_filenames: List[str] = []
    for partial_filename_str, plot_config in plot_config_file["plots"].items():
        plot_root = path_from_root(root_path,
                                   config_file.get("plot_root", "."))
        partial_filename = plot_root / partial_filename_str
        potential_filenames = [
            partial_filename,
            path_from_root(plot_root, partial_filename_str),
        ]
        pool_pubkey = PublicKey.from_bytes(
            bytes.fromhex(plot_config["pool_pk"]))

        # Only use plots that correct pools associated with them
        if pool_pubkeys is not None and pool_pubkey not in pool_pubkeys:
            log.warning(
                f"Plot {partial_filename} has a pool key that is not in the farmer's pool_pk list."
            )
            continue

        found = False
        failed_to_open = False

        for filename in potential_filenames:
            if filename.exists():
                try:
                    provers[partial_filename_str] = DiskProver(str(filename))
                except Exception as e:
                    log.error(f"Failed to open file {filename}. {e}")
                    failed_to_open = True
                    failed_to_open_filenames.append(partial_filename_str)
                    break
                log.info(
                    f"Loaded plot {filename} of size {provers[partial_filename_str].get_size()}"
                )
                found = True
                break
        if not found and not failed_to_open:
            log.warning(f"Plot at {potential_filenames} does not exist.")
            not_found_filenames.append(partial_filename_str)
    return (provers, failed_to_open_filenames, not_found_filenames)
 def __init__(
     self,
     server: ChiaServer,
     root_path: Path,
     target_outbound_count: int,
     peer_db_path: str,
     introducer_info: Optional[Dict],
     peer_connect_interval: int,
     log,
 ):
     self.server: ChiaServer = server
     self.message_queue: asyncio.Queue = asyncio.Queue()
     self.is_closed = False
     self.target_outbound_count = target_outbound_count
     self.peer_db_path = path_from_root(root_path, peer_db_path)
     if introducer_info is not None:
         self.introducer_info: Optional[PeerInfo] = PeerInfo(
             introducer_info["host"],
             introducer_info["port"],
         )
     else:
         self.introducer_info = None
     self.peer_connect_interval = peer_connect_interval
     self.log = log
     self.relay_queue = None
     self.address_manager = None
     self.connection_time_pretest: Dict = {}
예제 #3
0
 def __init__(
     self,
     server,
     root_path,
     global_connections,
     target_outbound_count,
     peer_db_path,
     introducer_info,
     peer_connect_interval,
     log,
 ):
     self.server = server
     assert self.server is not None
     self.message_queue = asyncio.Queue()
     self.is_closed = False
     self.global_connections = global_connections
     self.target_outbound_count = target_outbound_count
     self.peer_db_path = path_from_root(root_path, peer_db_path)
     self.introducer_info = PeerInfo(
         introducer_info["host"],
         introducer_info["port"],
     )
     self.peer_connect_interval = peer_connect_interval
     self.log = log
     self.relay_queue = None
예제 #4
0
 async def delete_all_keys(self, request: Dict):
     await self._stop_wallet()
     self.service.keychain.delete_all_keys()
     path = path_from_root(self.service.root_path, self.service.config["database_path"])
     if path.exists():
         path.unlink()
     return {}
예제 #5
0
def service_kwargs_for_full_node_simulator(
    root_path: Path,
    config: Dict,
    consensus_constants: ConsensusConstants,
    bt: BlockTools,
) -> Dict:
    mkdir(path_from_root(root_path, config["database_path"]).parent)

    api = FullNodeSimulator(
        config,
        root_path=root_path,
        consensus_constants=consensus_constants,
        name=SERVICE_NAME,
        bt=bt,
    )

    kwargs = dict(
        root_path=root_path,
        api=api,
        node_type=NodeType.FULL_NODE,
        advertised_port=config["port"],
        service_name=SERVICE_NAME,
        server_listen_ports=[config["port"]],
        on_connect_callback=api._on_connect,
        rpc_info=(FullNodeRpcApi, config["rpc_port"]),
    )
    return kwargs
예제 #6
0
def service_kwargs_for_full_node_simulator(
    root_path: Path,
    config: Dict,
    consensus_constants: ConsensusConstants,
    bt: BlockTools,
) -> Dict:
    mkdir(path_from_root(root_path, config["database_path"]).parent)
    genesis_challenge = bytes32(
        bytes.fromhex(
            config["network_genesis_challenges"][config["selected_network"]]))

    node = FullNode(
        config,
        root_path=root_path,
        consensus_constants=consensus_constants,
        name=SERVICE_NAME,
    )

    peer_api = FullNodeSimulator(node, bt)

    kwargs = dict(
        root_path=root_path,
        node=node,
        peer_api=peer_api,
        node_type=NodeType.FULL_NODE,
        advertised_port=config["port"],
        service_name=SERVICE_NAME,
        server_listen_ports=[config["port"]],
        on_connect_callback=node.on_connect,
        rpc_info=(FullNodeRpcApi, config["rpc_port"]),
        network_id=genesis_challenge,
    )
    return kwargs
예제 #7
0
def initialize_logging(prefix: str, logging_config: Dict):
    log_path = path_from_root(
        DEFAULT_ROOT_PATH, logging_config.get("log_filename", "log/debug.log"))
    mkdir(str(log_path.parent))
    if logging_config["log_stdout"]:
        handler = colorlog.StreamHandler()
        handler.setFormatter(
            colorlog.ColoredFormatter(
                f"{prefix}: %(log_color)s%(levelname)-8s%(reset)s %(asctime)s.%(msecs)03d %(message)s",
                datefmt="%H:%M:%S",
                reset=True,
            ))

        logger = colorlog.getLogger()
        logger.addHandler(handler)
    else:
        print(
            f"Starting process and logging to {log_path}. Run with & to run in the background."
        )
        logging.basicConfig(
            filename=log_path,
            filemode="a",
            format=
            f"{prefix}: %(levelname)-8s %(asctime)s.%(msecs)03d %(message)s",
            datefmt="%H:%M:%S",
        )
        logger = logging.getLogger()
    logger.setLevel(logging.INFO)
예제 #8
0
def service_kwargs_for_full_node_simulator(root_path: Path, config: Dict,
                                           bt: BlockTools) -> Dict:
    mkdir(path_from_root(root_path, config["database_path"]).parent)
    constants = bt.constants
    node = FullNode(
        config,
        root_path=root_path,
        consensus_constants=constants,
        name=SERVICE_NAME,
    )

    peer_api = FullNodeSimulator(node, bt)

    kwargs = dict(
        root_path=root_path,
        node=node,
        peer_api=peer_api,
        node_type=NodeType.FULL_NODE,
        advertised_port=config["port"],
        service_name=SERVICE_NAME,
        server_listen_ports=[config["port"]],
        on_connect_callback=node.on_connect,
        rpc_info=(FullNodeRpcApi, config["rpc_port"]),
        network_id=constants.GENESIS_CHALLENGE,
    )
    return kwargs
예제 #9
0
def initialize_logging(prefix: str, logging_config: Dict, root_path: Path):
    log_path = path_from_root(
        root_path, logging_config.get("log_filename", "log/debug.log"))
    mkdir(str(log_path.parent))
    if logging_config["log_stdout"]:
        handler = colorlog.StreamHandler()
        handler.setFormatter(
            colorlog.ColoredFormatter(
                f"{prefix}: %(log_color)s%(levelname)-8s%(reset)s %(asctime)s.%(msecs)03d %(message)s",
                datefmt="%H:%M:%S",
                reset=True,
            ))

        logger = colorlog.getLogger()
        logger.addHandler(handler)
    else:
        logging.basicConfig(
            filename=log_path,
            filemode="a",
            format=
            f"{prefix}: %(levelname)-8s %(asctime)s.%(msecs)03d %(message)s",
            datefmt="%H:%M:%S",
        )

        logger = logging.getLogger()
        handler = RotatingFileHandler(log_path,
                                      maxBytes=20000000,
                                      backupCount=7)
        logger.addHandler(handler)

    logger.setLevel(logging.INFO)
예제 #10
0
    def _delete_plot(self, str_path: str):
        if str_path in self.provers:
            del self.provers[str_path]

        plot_root = path_from_root(self.root_path,
                                   self.config.get("plot_root", "."))

        # Remove absolute and relative paths
        if Path(str_path).exists():
            Path(str_path).unlink()

        if (plot_root / Path(str_path)).exists():
            (plot_root / Path(str_path)).unlink()

        try:
            # Removes the plot from config.yaml
            plot_config = load_config(self.root_path, "plots.yaml")
            if str_path in plot_config["plots"]:
                del plot_config["plots"][str_path]
                save_config(self.root_path, "plots.yaml", plot_config)
                self.plot_config = plot_config
        except (FileNotFoundError, KeyError) as e:
            log.warning(f"Could not remove {str_path} {e}")
            return False
        self._state_changed("plots")
        return True
예제 #11
0
 async def clean_all_state(self):
     self.service.keychain.delete_all_keys()
     path = path_from_root(
         self.service.root_path, self.service.config["database_path"]
     )
     if path.exists():
         path.unlink()
    async def _start(
        self,
        fingerprint: Optional[int] = None,
        new_wallet: bool = False,
        backup_file: Optional[Path] = None,
        skip_backup_import: bool = False,
    ) -> bool:
        private_key = self.get_key_for_fingerprint(fingerprint)
        if private_key is None:
            return False

        db_path_key_suffix = str(private_key.get_g1().get_fingerprint())
        path = path_from_root(self.root_path, f"{self.config['database_path']}-{db_path_key_suffix}")
        mkdir(path.parent)

        self.wallet_state_manager = await WalletStateManager.create(private_key, self.config, path, self.constants)

        self.wsm_close_task = None

        assert self.wallet_state_manager is not None

        backup_settings: BackupInitialized = self.wallet_state_manager.user_settings.get_backup_settings()
        if backup_settings.user_initialized is False:
            if new_wallet is True:
                await self.wallet_state_manager.user_settings.user_created_new_wallet()
                self.wallet_state_manager.new_wallet = True
            elif skip_backup_import is True:
                await self.wallet_state_manager.user_settings.user_skipped_backup_import()
            elif backup_file is not None:
                await self.wallet_state_manager.import_backup_info(backup_file)
            else:
                self.backup_initialized = False
                await self.wallet_state_manager.close_all_stores()
                self.wallet_state_manager = None
                return False

        self.backup_initialized = True
        if backup_file is not None:
            json_dict = open_backup_file(backup_file, self.wallet_state_manager.private_key)
            if "start_height" in json_dict["data"]:
                start_height = json_dict["data"]["start_height"]
                self.config["starting_height"] = max(0, start_height - self.config["start_height_buffer"])
            else:
                self.config["starting_height"] = 0
        else:
            self.config["starting_height"] = 0

        if self.state_changed_callback is not None:
            self.wallet_state_manager.set_callback(self.state_changed_callback)

        self.wallet_state_manager.set_pending_callback(self._pending_tx_handler)
        self._shut_down = False

        self.peer_task = asyncio.create_task(self._periodically_check_full_node())
        self.sync_event = asyncio.Event()
        self.sync_task = asyncio.create_task(self.sync_job())
        self.log.info("self.sync_job")
        self.logged_in_fingerprint = fingerprint
        return True
예제 #13
0
    async def harvester_handshake(
            self, harvester_handshake: harvester_protocol.HarvesterHandshake):
        """
        Handshake between the harvester and farmer. The harvester receives the pool public keys,
        which must be put into the plots, before the plotting process begins. We cannot
        use any plots which don't have one of the pool keys.
        """
        for partial_filename_str, plot_config in self.plot_config[
                "plots"].items():
            plot_root = path_from_root(DEFAULT_ROOT_PATH,
                                       self.config.get("plot_root", "."))
            partial_filename = plot_root / partial_filename_str
            potential_filenames = [
                partial_filename,
                path_from_root(plot_root, partial_filename_str),
            ]
            pool_pubkey = PublicKey.from_bytes(
                bytes.fromhex(plot_config["pool_pk"]))

            # Only use plots that correct pools associated with them
            if pool_pubkey not in harvester_handshake.pool_pubkeys:
                log.warning(
                    f"Plot {partial_filename} has a pool key that is not in the farmer's pool_pk list."
                )
                continue

            found = False
            failed_to_open = False
            for filename in potential_filenames:
                if filename.exists():
                    try:
                        self.provers[partial_filename_str] = DiskProver(
                            str(filename))
                    except ValueError:
                        log.error(f"Failed to open file {filename}.")
                        failed_to_open = True
                        break
                    log.info(
                        f"Farming plot {filename} of size {self.provers[partial_filename_str].get_size()}"
                    )
                    found = True
                    break
            if not found and not failed_to_open:
                log.warning(f"Plot at {potential_filenames} does not exist.")
예제 #14
0
    async def log_in(self, request):
        """
        Logs in the wallet with a specific key.
        """

        fingerprint = request["fingerprint"]
        if self.service.logged_in_fingerprint == fingerprint:
            return {"fingerprint": fingerprint}

        await self._stop_wallet()
        log_in_type = request["type"]
        recovery_host = request["host"]
        testing = False

        if "testing" in self.service.config and self.service.config[
                "testing"] is True:
            testing = True
        if log_in_type == "skip":
            started = await self.service._start(fingerprint=fingerprint,
                                                skip_backup_import=True)
        elif log_in_type == "restore_backup":
            file_path = Path(request["file_path"])
            started = await self.service._start(fingerprint=fingerprint,
                                                backup_file=file_path)
        else:
            started = await self.service._start(fingerprint)

        if started is True:
            return {"fingerprint": fingerprint}
        elif testing is True and self.service.backup_initialized is False:
            response = {"success": False, "error": "not_initialized"}
            return response
        elif self.service.backup_initialized is False:
            backup_info = None
            backup_path = None
            try:
                private_key = self.service.get_key_for_fingerprint(fingerprint)
                last_recovery = await download_backup(recovery_host,
                                                      private_key)
                backup_path = path_from_root(self.service.root_path,
                                             "last_recovery")
                if backup_path.exists():
                    backup_path.unlink()
                backup_path.write_text(last_recovery)
                backup_info = get_backup_info(backup_path, private_key)
                backup_info["backup_host"] = recovery_host
                backup_info["downloaded"] = True
            except Exception as e:
                log.error(f"error {e}")
            response = {"success": False, "error": "not_initialized"}
            if backup_info is not None:
                response["backup_info"] = backup_info
                response["backup_path"] = f"{backup_path}"
            return response

        return {"success": False, "error": "Unknown Error"}
예제 #15
0
 async def delete_key(self, request):
     await self._stop_wallet()
     fingerprint = request["fingerprint"]
     self.service.keychain.delete_key_by_fingerprint(fingerprint)
     path = path_from_root(
         self.service.root_path,
         f"{self.service.config['database_path']}-{fingerprint}",
     )
     if path.exists():
         path.unlink()
     return {}
예제 #16
0
def initialize_logging(service_name: str, logging_config: Dict,
                       root_path: Path):
    log_path = path_from_root(
        root_path, logging_config.get("log_filename", "log/debug.log"))
    log_date_format = "%Y-%m-%dT%H:%M:%S"

    mkdir(str(log_path.parent))
    file_name_length = 33 - len(service_name)
    if logging_config["log_stdout"]:
        handler = colorlog.StreamHandler()
        handler.setFormatter(
            colorlog.ColoredFormatter(
                f"%(asctime)s.%(msecs)03d {service_name} %(name)-{file_name_length}s: "
                f"%(log_color)s%(levelname)-8s%(reset)s %(message)s",
                datefmt=log_date_format,
                reset=True,
            ))

        logger = colorlog.getLogger()
        logger.addHandler(handler)
    else:
        logger = logging.getLogger()
        handler = ConcurrentRotatingFileHandler(log_path,
                                                "a",
                                                maxBytes=20 * 1024 * 1024,
                                                backupCount=7)
        handler.setFormatter(
            logging.Formatter(
                fmt=
                f"%(asctime)s.%(msecs)03d {service_name} %(name)-{file_name_length}s: %(levelname)-8s %(message)s",
                datefmt=log_date_format,
            ))
        logger.addHandler(handler)

    if "log_level" in logging_config:
        if logging_config["log_level"] == "CRITICAL":
            logger.setLevel(logging.CRITICAL)
        elif logging_config["log_level"] == "ERROR":
            logger.setLevel(logging.ERROR)
        elif logging_config["log_level"] == "WARNING":
            logger.setLevel(logging.WARNING)
        elif logging_config["log_level"] == "INFO":
            logger.setLevel(logging.INFO)
        elif logging_config["log_level"] == "DEBUG":
            logger.setLevel(logging.DEBUG)
            logging.getLogger("aiosqlite").setLevel(
                logging.INFO)  # Too much logging on debug level
            logging.getLogger("websockets").setLevel(
                logging.INFO)  # Too much logging on debug level
        else:
            logger.setLevel(logging.INFO)
    else:
        logger.setLevel(logging.INFO)
예제 #17
0
def load_plots(config_file: Dict, plot_config_file: Dict,
               pool_pubkeys: List[PublicKey]) -> Dict[Path, DiskProver]:
    provers: Dict[Path, DiskProver] = {}
    for partial_filename_str, plot_config in plot_config_file["plots"].items():
        plot_root = path_from_root(DEFAULT_ROOT_PATH,
                                   config_file.get("plot_root", "."))
        partial_filename = plot_root / partial_filename_str
        potential_filenames = [
            partial_filename,
            path_from_root(plot_root, partial_filename_str),
        ]
        pool_pubkey = PublicKey.from_bytes(
            bytes.fromhex(plot_config["pool_pk"]))

        # Only use plots that correct pools associated with them
        if pool_pubkey not in pool_pubkeys:
            log.warning(
                f"Plot {partial_filename} has a pool key that is not in the farmer's pool_pk list."
            )
            continue

        found = False
        failed_to_open = False
        for filename in potential_filenames:
            if filename.exists():
                try:
                    provers[partial_filename_str] = DiskProver(str(filename))
                except ValueError as e:
                    log.error(f"Failed to open file {filename}. {e}")
                    failed_to_open = True
                    break
                log.info(
                    f"Loaded plot {filename} of size {provers[partial_filename_str].get_size()}"
                )
                found = True
                break
        if not found and not failed_to_open:
            log.warning(f"Plot at {potential_filenames} does not exist.")
    return provers
예제 #18
0
    async def create(
        config: Dict,
        private_key: ExtendedPrivateKey,
        root_path: Path,
        name: str = None,
        override_constants: Dict = {},
        local_test: bool = False,
    ):
        self = WalletNode()
        self.config = config
        self.constants = consensus_constants.copy()
        self.root_path = root_path
        self.local_test = local_test
        for key, value in override_constants.items():
            self.constants[key] = value
        if name:
            self.log = logging.getLogger(name)
        else:
            self.log = logging.getLogger(__name__)

        db_path_key_suffix = str(
            private_key.get_public_key().get_fingerprint())
        path = path_from_root(
            self.root_path, f"{config['database_path']}-{db_path_key_suffix}")
        mkdir(path.parent)

        self.wallet_state_manager = await WalletStateManager.create(
            private_key, config, path, self.constants)
        self.wallet_state_manager.set_pending_callback(
            self._pending_tx_handler)

        # Normal operation data
        self.cached_blocks = {}
        self.future_block_hashes = {}

        # Sync data
        self._shut_down = False
        self.proof_hashes = []
        self.header_hashes = []
        self.header_hashes_error = False
        self.short_sync_threshold = 15
        self.potential_blocks_received = {}
        self.potential_header_hashes = {}

        self.server = None

        self.tasks = []

        return self
예제 #19
0
 async def _create_backup_and_upload(self, host):
     assert self.service.wallet_state_manager is not None
     try:
         if "testing" in self.service.config and self.service.config["testing"] is True:
             return
         now = time.time()
         file_name = f"backup_{now}"
         path = path_from_root(self.service.root_path, file_name)
         await self.service.wallet_state_manager.create_wallet_backup(path)
         backup_text = path.read_text()
         response = await upload_backup(host, backup_text)
         success = response["success"]
         if success is False:
             log.error("Failed to upload backup to wallet backup service")
         elif success is True:
             log.info("Finished upload of the backup file")
     except Exception as e:
         log.error(f"Exception in upload backup. Error: {e}")
예제 #20
0
    async def create(
        config: Dict,
        key_config: Dict,
        name: str = None,
        override_constants: Dict = {},
    ):
        self = WalletNode()
        self.config = config
        self.key_config = key_config
        self.constants = consensus_constants.copy()
        for key, value in override_constants.items():
            self.constants[key] = value
        if name:
            self.log = logging.getLogger(name)
        else:
            self.log = logging.getLogger(__name__)

        path = path_from_root(DEFAULT_ROOT_PATH, config["database_path"])
        mkdir(path.parent)

        self.wallet_state_manager = await WalletStateManager.create(
            key_config, config, path, self.constants)
        self.wallet_state_manager.set_pending_callback(
            self._pending_tx_handler)

        # Normal operation data
        self.cached_blocks = {}
        self.future_block_hashes = {}

        # Sync data
        self._shut_down = False
        self.proof_hashes = []
        self.header_hashes = []
        self.header_hashes_error = False
        self.short_sync_threshold = 15
        self.potential_blocks_received = {}
        self.potential_header_hashes = {}

        self.server = None

        return self
예제 #21
0
def service_kwargs_for_full_node(root_path):
    service_name = "full_node"

    config = load_config_cli(root_path, "config.yaml", "full_node")
    db_path = path_from_root(root_path, config["simulator_database_path"])
    mkdir(db_path.parent)

    config["database_path"] = config["simulator_database_path"]

    api = FullNodeSimulator(
        config,
        root_path=root_path,
        consensus_constants=test_constants,
        name=service_name,
        bt=BlockTools(),
    )

    async def start_callback():
        await api._start()

    def stop_callback():
        api._close()

    async def await_closed_callback():
        await api._await_closed()

    kwargs = dict(
        root_path=root_path,
        api=api,
        node_type=NodeType.FULL_NODE,
        advertised_port=config["port"],
        service_name=service_name,
        server_listen_ports=[config["port"]],
        on_connect_callback=api._on_connect,
        start_callback=start_callback,
        stop_callback=stop_callback,
        await_closed_callback=await_closed_callback,
        rpc_info=(FullNodeRpcApi, config["rpc_port"]),
    )
    return kwargs
예제 #22
0
    async def _start(self,
                     public_key_fingerprint: Optional[int] = None) -> bool:
        self._shut_down = False
        private_keys = self.keychain.get_all_private_keys()
        if len(private_keys) == 0:
            self.log.warning(
                "No keys present. Create keys with the UI, or with the 'chia keys' program."
            )
            return False

        private_key: Optional[PrivateKey] = None
        if public_key_fingerprint is not None:
            for sk, _ in private_keys:
                if sk.get_g1().get_fingerprint() == public_key_fingerprint:
                    private_key = sk
                    break
        else:
            private_key = private_keys[0][0]

        if private_key is None:
            raise RuntimeError("Invalid fingerprint {public_key_fingerprint}")

        db_path_key_suffix = str(private_key.get_g1().get_fingerprint())
        path = path_from_root(
            self.root_path,
            f"{self.config['database_path']}-{db_path_key_suffix}")
        mkdir(path.parent)
        self.wallet_state_manager = await WalletStateManager.create(
            private_key, self.config, path, self.constants)
        assert self.wallet_state_manager is not None
        if self.state_changed_callback is not None:
            self.wallet_state_manager.set_callback(self.state_changed_callback)

        self.wallet_state_manager.set_pending_callback(
            self._pending_tx_handler)
        return True
예제 #23
0
def main():
    """
    Script for creating plots and adding them to the plot config file.
    """
    root_path = DEFAULT_ROOT_PATH
    plot_config_filename = config_path_for_filename(root_path, "plots.yaml")

    parser = argparse.ArgumentParser(description="Chia plotting script.")
    parser.add_argument("-k", "--size", help="Plot size", type=int, default=26)
    parser.add_argument(
        "-n", "--num_plots", help="Number of plots", type=int, default=1
    )
    parser.add_argument(
        "-i", "--index", help="First plot index", type=int, default=None
    )
    parser.add_argument(
        "-p", "--pool_pub_key", help="Hex public key of pool", type=str, default=""
    )
    parser.add_argument(
        "-s", "--sk_seed", help="Secret key seed in hex", type=str, default=None
    )
    parser.add_argument(
        "-t",
        "--tmp_dir",
        help="Temporary directory for plotting files",
        type=Path,
        default=Path("."),
    )
    parser.add_argument(
        "-2",
        "--tmp2_dir",
        help="Second temporary directory for plotting files",
        type=Path,
        default=Path("."),
    )
    new_plots_root = path_from_root(
        root_path,
        load_config(root_path, "config.yaml")
        .get("harvester", {})
        .get("new_plot_root", "plots"),
    )
    parser.add_argument(
        "-d",
        "--final_dir",
        help="Final directory for plots (relative or absolute)",
        type=Path,
        default=new_plots_root,
    )

    args = parser.parse_args()

    if args.sk_seed is None and args.index is not None:
        log(
            f"You have specified the -i (index) argument without the -s (sk_seed) argument."
            f" The program has changes, so that the sk_seed is now generated randomly, so -i is no longer necessary."
            f" Please run the program without -i."
        )
        quit()

    if args.index is None:
        args.index = 0

    # The seed is what will be used to generate a private key for each plot
    if args.sk_seed is not None:
        sk_seed: bytes = bytes.fromhex(args.sk_seed)
        log(f"Using the provided sk_seed {sk_seed.hex()}.")
    else:
        sk_seed = token_bytes(32)
        log(
            f"Using sk_seed {sk_seed.hex()}. Note that sk seed is now generated randomly, as opposed "
            f"to from keys.yaml. If you want to use a specific seed, use the -s argument."
        )

    pool_pk: PublicKey
    if len(args.pool_pub_key) > 0:
        # Use the provided pool public key, useful for using an external pool
        pool_pk = PublicKey.from_bytes(bytes.fromhex(args.pool_pub_key))
    else:
        # Use the pool public key from the config, useful for solo farming
        keychain = Keychain()
        all_public_keys = keychain.get_all_public_keys()
        if len(all_public_keys) == 0:
            raise RuntimeError(
                "There are no private keys in the keychain, so we cannot create a plot. "
                "Please generate keys using 'chia keys generate' or pass in a pool pk with -p"
            )
        pool_pk = all_public_keys[0].get_public_key()

    log(
        f"Creating {args.num_plots} plots, from index {args.index} to "
        f"{args.index + args.num_plots - 1}, of size {args.size}, sk_seed {sk_seed.hex()} ppk {pool_pk}"
    )

    mkdir(args.tmp_dir)
    mkdir(args.tmp2_dir)
    mkdir(args.final_dir)
    finished_filenames = []
    for i in range(args.index, args.index + args.num_plots):
        # Generate a sk based on the seed, plot size (k), and index
        sk: PrivateKey = PrivateKey.from_seed(
            sk_seed + args.size.to_bytes(1, "big") + i.to_bytes(4, "big")
        )

        # The plot seed is based on the pool and plot pks
        plot_seed: bytes32 = ProofOfSpace.calculate_plot_seed(
            pool_pk, sk.get_public_key()
        )
        dt_string = datetime.now().strftime("%Y-%m-%d-%H-%M")

        filename: str = f"plot-k{args.size}-{dt_string}-{plot_seed}.dat"
        full_path: Path = args.final_dir / filename

        plot_config = load_config(root_path, plot_config_filename)
        plot_config_plots_new = deepcopy(plot_config.get("plots", []))
        filenames = [Path(k).name for k in plot_config_plots_new.keys()]
        already_in_config = any(plot_seed.hex() in fname for fname in filenames)
        if already_in_config:
            log(f"Plot {filename} already exists (in config)")
            continue

        if not full_path.exists():
            # Creates the plot. This will take a long time for larger plots.
            plotter: DiskPlotter = DiskPlotter()
            plotter.create_plot_disk(
                str(args.tmp_dir),
                str(args.tmp2_dir),
                str(args.final_dir),
                filename,
                args.size,
                bytes([]),
                plot_seed,
            )
            finished_filenames.append(filename)
        else:
            log(f"Plot {filename} already exists")

        # Updates the config if necessary.
        plot_config = load_config(root_path, plot_config_filename)
        plot_config_plots_new = deepcopy(plot_config.get("plots", []))
        plot_config_plots_new[str(full_path)] = {
            "sk": bytes(sk).hex(),
            "pool_pk": bytes(pool_pk).hex(),
        }
        plot_config["plots"].update(plot_config_plots_new)

        # Dumps the new config to disk.
        save_config(root_path, plot_config_filename, plot_config)
    log("")
    log("Summary:")
    try:
        args.tmp_dir.rmdir()
    except Exception:
        log(
            f"warning: did not remove primary temporary folder {args.tmp_dir}, it may not be empty."
        )
    try:
        args.tmp2_dir.rmdir()
    except Exception:
        log(
            f"warning: did not remove secondary temporary folder {args.tmp2_dir}, it may not be empty."
        )
    log(f"Created a total of {len(finished_filenames)} new plots")
    for filename in finished_filenames:
        log(filename)
예제 #24
0
async def async_main():
    root_path = DEFAULT_ROOT_PATH
    config = load_config_cli(root_path, "config.yaml", "full_node")
    net_config = load_config(root_path, "config.yaml")
    setproctitle("chia_full_node")
    initialize_logging("FullNode %(name)-23s", config["logging"], root_path)

    log = logging.getLogger(__name__)
    server_closed = False

    db_path = path_from_root(root_path, config["database_path"])
    mkdir(db_path.parent)

    # Create the store (DB) and full node instance
    connection = await aiosqlite.connect(db_path)
    store = await FullNodeStore.create(connection)

    genesis: FullBlock = FullBlock.from_bytes(constants["GENESIS_BLOCK"])
    await store.add_block(genesis)
    unspent_store = await CoinStore.create(connection)

    log.info("Initializing blockchain from disk")
    blockchain = await Blockchain.create(unspent_store, store)
    log.info("Blockchain initialized")

    mempool_manager = MempoolManager(unspent_store)
    await mempool_manager.new_tips(await blockchain.get_full_tips())

    full_node = FullNode(store, blockchain, config, mempool_manager, unspent_store)

    if config["enable_upnp"]:
        log.info(f"Attempting to enable UPnP (open up port {config['port']})")
        try:
            upnp = miniupnpc.UPnP()
            upnp.discoverdelay = 5
            upnp.discover()
            upnp.selectigd()
            upnp.addportmapping(
                config["port"], "TCP", upnp.lanaddr, config["port"], "chia", ""
            )
            log.info(f"Port {config['port']} opened with UPnP.")
        except Exception:
            log.exception(f"UPnP failed")

    # Starts the full node server (which full nodes can connect to)
    ping_interval = net_config.get("ping_interval")
    network_id = net_config.get("network_id")
    assert ping_interval is not None
    assert network_id is not None
    server = ChiaServer(
        config["port"],
        full_node,
        NodeType.FULL_NODE,
        ping_interval,
        network_id,
        DEFAULT_ROOT_PATH,
        config,
    )
    full_node._set_server(server)
    _ = await server.start_server(full_node._on_connect)
    rpc_cleanup = None

    def master_close_cb():
        nonlocal server_closed
        if not server_closed:
            # Called by the UI, when node is closed, or when a signal is sent
            log.info("Closing all connections, and server...")
            full_node._shutdown()
            server.close_all()
            server_closed = True

    if config["start_rpc_server"]:
        # Starts the RPC server
        rpc_cleanup = await start_rpc_server(
            full_node, master_close_cb, config["rpc_port"]
        )

    try:
        asyncio.get_running_loop().add_signal_handler(signal.SIGINT, master_close_cb)
        asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, master_close_cb)
    except NotImplementedError:
        log.info("signal handlers unsupported")

    full_node._start_bg_tasks()

    # Awaits for server and all connections to close
    await server.await_closed()
    log.info("Closed all node servers.")

    # Waits for the rpc server to close
    if rpc_cleanup is not None:
        await rpc_cleanup()
    log.info("Closed RPC server.")

    await connection.close()
    log.info("Closed db connection.")

    await asyncio.get_running_loop().shutdown_asyncgens()
    log.info("Node fully closed.")
예제 #25
0
async def main():
    root_path = DEFAULT_ROOT_PATH
    net_config = load_config(root_path, "config.yaml")
    config = load_config_cli(root_path, "config.yaml", "full_node")
    setproctitle("chia_full_node")
    initialize_logging("FullNode %(name)-23s", config["logging"], root_path)

    log = logging.getLogger(__name__)
    server_closed = False

    db_path = path_from_root(DEFAULT_ROOT_PATH,
                             config["simulator_database_path"])
    mkdir(db_path.parent)
    connection = await aiosqlite.connect(db_path)

    # Create the store (DB) and full node instance
    store = await FullNodeStore.create(connection)
    await store._clear_database()

    genesis: FullBlock = FullBlock.from_bytes(test_constants["GENESIS_BLOCK"])
    await store.add_block(genesis)
    unspent_store = await CoinStore.create(connection)

    log.info("Initializing blockchain from disk")
    blockchain = await Blockchain.create(unspent_store, store, test_constants)

    mempool_manager = MempoolManager(unspent_store, test_constants)
    await mempool_manager.new_tips(await blockchain.get_full_tips())

    full_node = FullNodeSimulator(
        store,
        blockchain,
        config,
        mempool_manager,
        unspent_store,
        override_constants=test_constants,
    )

    ping_interval = net_config.get("ping_interval")
    network_id = net_config.get("network_id")

    # Starts the full node server (which full nodes can connect to)
    assert ping_interval is not None
    assert network_id is not None
    server = ChiaServer(
        config["port"],
        full_node,
        NodeType.FULL_NODE,
        ping_interval,
        network_id,
        DEFAULT_ROOT_PATH,
        config,
    )
    full_node._set_server(server)
    _ = await server.start_server(full_node._on_connect)
    rpc_cleanup = None

    def master_close_cb():
        nonlocal server_closed
        if not server_closed:
            # Called by the UI, when node is closed, or when a signal is sent
            log.info("Closing all connections, and server...")
            full_node._shutdown()
            server.close_all()
            server_closed = True

    if config["start_rpc_server"]:
        # Starts the RPC server
        rpc_cleanup = await start_rpc_server(full_node, master_close_cb,
                                             config["rpc_port"])

    try:
        asyncio.get_running_loop().add_signal_handler(signal.SIGINT,
                                                      master_close_cb)
        asyncio.get_running_loop().add_signal_handler(signal.SIGTERM,
                                                      master_close_cb)
    except NotImplementedError:
        log.info("signal handlers unsupported")

    log.info("Waiting to connect to some peers...")
    await asyncio.sleep(3)
    log.info(
        f"Connected to {len(server.global_connections.get_connections())} peers."
    )

    # Awaits for server and all connections to close
    await server.await_closed()
    log.info("Closed all node servers.")

    # Waits for the rpc server to close
    if rpc_cleanup is not None:
        await rpc_cleanup()
    log.info("Closed RPC server.")

    await store.close()
    log.info("Closed store.")

    await unspent_store.close()
    log.info("Closed unspent store.")

    await asyncio.get_running_loop().shutdown_asyncgens()
    log.info("Node fully closed.")
예제 #26
0
파일: init.py 프로젝트: spring3th/Exodus
def migrate_from(
    old_root: Path,
    new_root: Path,
    manifest: List[str],
    do_not_migrate_settings: List[str],
):
    """
    Copy all the files in "manifest" to the new config directory.
    """
    if old_root == new_root:
        print("same as new path, exiting")
        return 1
    if not old_root.is_dir():
        print(
            f"{old_root} not found - this is ok if you did not install this version."
        )
        return 0
    print(f"\n{old_root} found")
    print(f"Copying files from {old_root} to {new_root}\n")
    not_found = []
    for f in manifest:
        old_path = old_root / f
        new_path = new_root / f
        if old_path.is_file():
            print(f"{new_path}")
            mkdir(new_path.parent)
            shutil.copy(old_path, new_path)
        else:
            not_found.append(f)
            print(f"{old_path} not found, skipping")
    # update config yaml with new keys
    config: Dict = load_config(new_root, "config.yaml")
    config_str: str = initial_config_file("config.yaml")
    default_config: Dict = yaml.safe_load(config_str)
    flattened_keys = unflatten_properties(
        {k: ""
         for k in do_not_migrate_settings})
    dict_add_new_default(config, default_config, flattened_keys)

    save_config(new_root, "config.yaml", config)

    # migrate plots
    # for now, we simply leave them where they are
    # and make what may have been relative paths absolute
    if "config/trusted.key" in not_found or "config/trusted.key" in not_found:
        initialize_ssl(new_root)

    plots_config: Dict = load_config(new_root, "plots.yaml")

    plot_root = (load_config(new_root,
                             "config.yaml").get("harvester",
                                                {}).get("plot_root", "."))

    old_plots_root: Path = path_from_root(old_root, plot_root)
    new_plots_root: Path = path_from_root(new_root, plot_root)

    old_plot_paths = plots_config.get("plots", {})
    if len(old_plot_paths) == 0:
        print("no plots found, no plots migrated")
        return 1

    print("\nmigrating plots.yaml")

    new_plot_paths: Dict = {}
    for path, values in old_plot_paths.items():
        old_path_full = path_from_root(old_plots_root, path)
        new_path_relative = make_path_relative(old_path_full, new_plots_root)
        print(f"rewriting {path}\n as {new_path_relative}")
        new_plot_paths[str(new_path_relative)] = values
    plots_config_new: Dict = {"plots": new_plot_paths}
    save_config(new_root, "plots.yaml", plots_config_new)
    print("\nUpdated plots.yaml to point to where your existing plots are.")
    print(
        "\nYour plots have not been moved so be careful deleting old preferences folders."
    )

    print("\nIf you want to move your plot files, you should also modify")
    print(f"{config_path_for_filename(new_root, 'plots.yaml')}")
    return 1
예제 #27
0
def main():
    """
    Script for creating plots and adding them to the plot config file.
    """
    root_path = DEFAULT_ROOT_PATH
    plot_config_filename = config_path_for_filename(root_path, "plots.yaml")
    key_config_filename = config_path_for_filename(root_path, "keys.yaml")

    parser = argparse.ArgumentParser(description="Chia plotting script.")
    parser.add_argument("-k", "--size", help="Plot size", type=int, default=20)
    parser.add_argument("-n",
                        "--num_plots",
                        help="Number of plots",
                        type=int,
                        default=10)
    parser.add_argument("-i",
                        "--index",
                        help="First plot index",
                        type=int,
                        default=0)
    parser.add_argument("-p",
                        "--pool_pub_key",
                        help="Hex public key of pool",
                        type=str,
                        default="")
    parser.add_argument(
        "-t",
        "--tmp_dir",
        help=
        "Temporary directory for plotting files (relative to final directory)",
        type=Path,
        default=Path("./plots.tmp"),
    )

    new_plots_root = path_from_root(
        root_path,
        load_config(root_path,
                    "config.yaml").get("harvester",
                                       {}).get("new_plot_root", "plots"),
    )
    parser.add_argument(
        "-d",
        "--final_dir",
        help="Final directory for plots (relative or absolute)",
        type=Path,
        default=new_plots_root,
    )

    # We need the keys file, to access pool keys (if the exist), and the sk_seed.
    args = parser.parse_args()
    if not key_config_filename.exists():
        raise RuntimeError("Keys not generated. Run chia-generate-keys")

    # The seed is what will be used to generate a private key for each plot
    key_config = load_config(root_path, key_config_filename)
    sk_seed: bytes = bytes.fromhex(key_config["sk_seed"])

    for i in range(args.index, args.index + args.num_plots):
        # Generate a sk based on the seed, plot size (k), and index
        sk: PrivateKey = PrivateKey.from_seed(sk_seed +
                                              args.size.to_bytes(1, "big") +
                                              i.to_bytes(4, "big"))
        print(f"sk: {bytes(sk).hex()}")
예제 #28
0
async def main():
    root_path = DEFAULT_ROOT_PATH
    net_config = load_config(root_path, "config.yaml")
    config = load_config_cli(root_path, "config.yaml", "full_node")
    setproctitle("chia_full_node")
    initialize_logging("FullNode %(name)-23s", config["logging"], root_path)

    log = logging.getLogger(__name__)
    server_closed = False

    db_path = path_from_root(root_path, config["simulator_database_path"])
    mkdir(db_path.parent)
    db_path.unlink()

    full_node = await FullNodeSimulator.create(
        config,
        root_path=root_path,
        override_constants=test_constants,
    )

    ping_interval = net_config.get("ping_interval")
    network_id = net_config.get("network_id")

    # Starts the full node server (which full nodes can connect to)
    assert ping_interval is not None
    assert network_id is not None
    server = ChiaServer(
        config["port"],
        full_node,
        NodeType.FULL_NODE,
        ping_interval,
        network_id,
        DEFAULT_ROOT_PATH,
        config,
    )
    full_node._set_server(server)
    _ = await server.start_server(full_node._on_connect)
    rpc_cleanup = None

    def master_close_cb():
        nonlocal server_closed
        if not server_closed:
            # Called by the UI, when node is closed, or when a signal is sent
            log.info("Closing all connections, and server...")
            server.close_all()
            server_closed = True

    if config["start_rpc_server"]:
        # Starts the RPC server
        rpc_cleanup = await start_rpc_server(full_node, master_close_cb,
                                             config["rpc_port"])

    try:
        asyncio.get_running_loop().add_signal_handler(signal.SIGINT,
                                                      master_close_cb)
        asyncio.get_running_loop().add_signal_handler(signal.SIGTERM,
                                                      master_close_cb)
    except NotImplementedError:
        log.info("signal handlers unsupported")

    # Awaits for server and all connections to close
    await server.await_closed()
    log.info("Closed all node servers.")

    # Stops the full node and closes DBs
    await full_node._shutdown()

    # Waits for the rpc server to close
    if rpc_cleanup is not None:
        await rpc_cleanup()
    log.info("Closed RPC server.")

    await asyncio.get_running_loop().shutdown_asyncgens()
    log.info("Node fully closed.")
예제 #29
0
def main():
    """
    Script for creating plots and adding them to the plot config file.
    """
    root_path = DEFAULT_ROOT_PATH
    plot_config_filename = config_path_for_filename(root_path, "plots.yaml")
    key_config_filename = config_path_for_filename(root_path, "keys.yaml")

    parser = argparse.ArgumentParser(description="Chia plotting script.")
    parser.add_argument("-k", "--size", help="Plot size", type=int, default=20)
    parser.add_argument("-n",
                        "--num_plots",
                        help="Number of plots",
                        type=int,
                        default=10)
    parser.add_argument("-i",
                        "--index",
                        help="First plot index",
                        type=int,
                        default=0)
    parser.add_argument("-p",
                        "--pool_pub_key",
                        help="Hex public key of pool",
                        type=str,
                        default="")
    parser.add_argument(
        "-t",
        "--tmp_dir",
        help=
        "Temporary directory for plotting files (relative to final directory)",
        type=Path,
        default=Path("./plots.tmp"),
    )

    new_plots_root = path_from_root(
        root_path,
        load_config(root_path,
                    "config.yaml").get("harvester",
                                       {}).get("new_plot_root", "plots"),
    )
    parser.add_argument(
        "-d",
        "--final_dir",
        help="Final directory for plots (relative or absolute)",
        type=Path,
        default=new_plots_root,
    )

    # We need the keys file, to access pool keys (if the exist), and the sk_seed.
    args = parser.parse_args()
    if not key_config_filename.exists():
        raise RuntimeError("Keys not generated. Run chia-generate-keys")

    # The seed is what will be used to generate a private key for each plot
    key_config = load_config(root_path, key_config_filename)
    sk_seed: bytes = bytes.fromhex(key_config["sk_seed"])

    pool_pk: PublicKey
    if len(args.pool_pub_key) > 0:
        # Use the provided pool public key, useful for using an external pool
        pool_pk = PublicKey.from_bytes(bytes.fromhex(args.pool_pub_key))
    else:
        # Use the pool public key from the config, useful for solo farming
        pool_sk = PrivateKey.from_bytes(
            bytes.fromhex(key_config["pool_sks"][0]))
        pool_pk = pool_sk.get_public_key()

    print(
        f"Creating {args.num_plots} plots, from index {args.index} to "
        f"{args.index + args.num_plots - 1}, of size {args.size}, sk_seed {sk_seed.hex()} ppk {pool_pk}"
    )

    tmp_dir = args.final_dir / args.tmp_dir
    mkdir(tmp_dir)
    mkdir(args.final_dir)
    for i in range(args.index, args.index + args.num_plots):
        # Generate a sk based on the seed, plot size (k), and index
        sk: PrivateKey = PrivateKey.from_seed(sk_seed +
                                              args.size.to_bytes(1, "big") +
                                              i.to_bytes(4, "big"))

        # The plot seed is based on the pool and plot pks
        plot_seed: bytes32 = ProofOfSpace.calculate_plot_seed(
            pool_pk, sk.get_public_key())
        filename: str = f"plot-{i}-{args.size}-{plot_seed}.dat"
        full_path: Path = args.final_dir / filename
        if not full_path.exists():
            # Creates the plot. This will take a long time for larger plots.
            plotter: DiskPlotter = DiskPlotter()
            plotter.create_plot_disk(
                str(tmp_dir),
                str(args.final_dir),
                filename,
                args.size,
                bytes([]),
                plot_seed,
            )
        else:
            print(f"Plot {filename} already exists")

        # Updates the config if necessary.
        plot_config = load_config(root_path, plot_config_filename)
        plot_config_plots_new = deepcopy(plot_config.get("plots", []))
        relative_path = make_path_relative(full_path, root_path)
        if (relative_path not in plot_config_plots_new
                and full_path not in plot_config_plots_new):
            plot_config_plots_new[str(full_path)] = {
                "sk": bytes(sk).hex(),
                "pool_pk": bytes(pool_pk).hex(),
            }
        plot_config["plots"].update(plot_config_plots_new)

        # Dumps the new config to disk.
        save_config(root_path, plot_config_filename, plot_config)
    try:
        tmp_dir.rmdir()
    except Exception:
        print(f"warning: couldn't delete {tmp_dir}")
예제 #30
0
def main():
    """
    Script for creating plots.yaml from a directory (output file name ==> plots-generated.yaml).
    Copy script to ~/chia-blockchain/src/cmds
    Execute by running: python generate_plots_yaml_file.py 

    Without any parameters the default plots.yaml and keys.yaml locations will be used

    python generate_plots_yaml_file.py -d /mnt/bigdisk/plots             #will scan the specified drive and will create plots-generated.yaml

    python generate_plots_yaml_file.py -a True -d /mnt/bigdisk/plots     #will append entries to lots-generated.yaml, for 2nd, 3rd drives
    """
    root_path = DEFAULT_ROOT_PATH
    plot_config_filename = config_path_for_filename(root_path, "plots.yaml")
    key_config_filename = config_path_for_filename(root_path, "keys.yaml")

    parser = argparse.ArgumentParser(description="Chia plots.yaml generator")

    parser.add_argument("-a",
                        "--append",
                        help="Append to an existing output file",
                        type=bool,
                        default=False)

    new_plots_root = path_from_root(
        root_path,
        load_config(root_path,
                    "config.yaml").get("harvester",
                                       {}).get("new_plot_root", "plots"),
    )

    parser.add_argument(
        "-d",
        "--final_dir",
        help="Directory of plots",
        type=Path,
        default=Path(new_plots_root),
    )

    # We need the keys file, to access pool keys (if the exist), and the sk_seed.
    args = parser.parse_args()
    if not key_config_filename.exists():
        raise RuntimeError("Can not find keys.yaml.")

    # The seed is what will be used to generate a private key for each plot
    key_config = load_config(root_path, key_config_filename)
    sk_seed: bytes = bytes.fromhex(key_config["sk_seed"])

    pool_pk: PublicKey
    # Use the pool public key from the config, useful for solo farming
    pool_sk = PrivateKey.from_bytes(bytes.fromhex(key_config["pool_sks"][0]))
    pool_pk = pool_sk.get_public_key()

    paths = Path(args.final_dir)
    if not paths.exists():
        raise RuntimeError("Path does not exist.")

    if args.append:
        outfile = open(
            str(plot_config_filename)[0:-5] + "-generated.yaml", "a+")
    else:
        outfile = open(
            str(plot_config_filename)[0:-5] + "-generated.yaml", "w+")
        outfile.write("plots:\n")

    pathlist = Path(args.final_dir).glob('*.dat')
    pathlist = sorted(pathlist)
    for path in pathlist:
        #get only th filename from the full path
        filename = path.name

        #split the filename into index, size and plot_seed
        tmp = filename.split('-')
        index = int(tmp[1])
        size = int(tmp[2])
        plot_seed = tmp[3]

        #remove the file extension
        plot_seed = plot_seed[0:-4]
        sk: PrivateKey = PrivateKey.from_seed(sk_seed +
                                              size.to_bytes(1, "big") +
                                              index.to_bytes(4, "big"))
        outfile.write("  " + str(path) + ":\n")
        outfile.write("    pool_pk: " + bytes(pool_pk).hex() + "\n")
        outfile.write("    sk: " + bytes(sk).hex() + "\n")

    outfile.close()
    print("plots-generated.yaml created in the config directory")