def convert(instance, backend): """Convert data backend of an instance.""" current_backend = get_current_backend(instance) target = get_target_backend(backend) data_manager.load_basic_configuration(instance) default_dirs = deepcopy(data_manager.basic_config_default) default_dirs["DATA_PATH"] = str(Path(instance_data[instance]["DATA_PATH"])) if current_backend == BackendType.MONGOV1: raise RuntimeError( "Please see the 3.2 release notes for upgrading a bot using mongo." ) else: new_storage_details = asyncio.run(do_migration(current_backend, target)) if new_storage_details is not None: default_dirs["STORAGE_TYPE"] = target.value default_dirs["STORAGE_DETAILS"] = new_storage_details save_config(instance, default_dirs) conversion_log.info(f"Conversion to {target} complete.") else: conversion_log.info( f"Cannot convert {current_backend.value} to {target.value} at this time." )
def handle_edit(cli_flags: Namespace): """ This one exists to not log all the things like it's a full run of the bot. """ loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) data_manager.load_basic_configuration(cli_flags.instance_name) red = Red(cli_flags=cli_flags, description="Red V3", dm_help=None, fetch_offline_members=True) try: driver_cls = drivers.get_driver_class() loop.run_until_complete( driver_cls.initialize(**data_manager.storage_details())) loop.run_until_complete(edit_instance(red, cli_flags)) loop.run_until_complete(driver_cls.teardown()) except (KeyboardInterrupt, EOFError): print("Aborted!") finally: loop.run_until_complete(asyncio.sleep(1)) asyncio.set_event_loop(None) loop.stop() loop.close() sys.exit(0)
async def json_to_mongov2(instance): instance_vals = instance_data[instance] current_data_dir = Path(instance_vals["DATA_PATH"]) load_basic_configuration(instance) from redbot.core.drivers import red_mongo storage_details = red_mongo.get_config_details() core_conf = Config.get_core_conf() new_driver = red_mongo.Mongo(cog_name="Core", identifier="0", **storage_details) core_conf.init_custom("CUSTOM_GROUPS", 2) custom_group_data = await core_conf.custom("CUSTOM_GROUPS").all() curr_custom_data = custom_group_data.get("Core", {}).get("0", {}) exported_data = await core_conf.driver.export_data(curr_custom_data) conversion_log.info("Starting Core conversion...") await new_driver.import_data(exported_data, curr_custom_data) conversion_log.info("Core conversion complete.") for p in current_data_dir.glob("cogs/**/settings.json"): cog_name = p.parent.stem if "." in cog_name: # Garbage handler continue with p.open(mode="r") as f: cog_data = json.load(f) for identifier, all_data in cog_data.items(): try: conf = Config.get_conf(None, int(identifier), cog_name=cog_name) except ValueError: continue new_driver = red_mongo.Mongo( cog_name=cog_name, identifier=conf.driver.unique_cog_identifier, **storage_details) curr_custom_data = custom_group_data.get(cog_name, {}).get(identifier, {}) exported_data = await conf.driver.export_data(curr_custom_data) conversion_log.info( f"Converting {cog_name} with identifier {identifier}...") await new_driver.import_data(exported_data, curr_custom_data) conversion_log.info("Cog conversion complete.") return storage_details
async def create_backup(instance: str, destination_folder: Path = Path.home()) -> None: data_manager.load_basic_configuration(instance) backend_type = get_current_backend(instance) if backend_type != BackendType.JSON: await do_migration(backend_type, BackendType.JSON) print("Backing up the instance's data...") backup_fpath = await red_create_backup(destination_folder) if backup_fpath is not None: print(f"A backup of {instance} has been made. It is at {backup_fpath}") else: print("Creating the backup failed.")
async def create_backup(instance): instance_vals = instance_data[instance] if confirm( "Would you like to make a backup of the data for this instance? (y/n)" ): load_basic_configuration(instance) if instance_vals["STORAGE_TYPE"] == "MongoDB": await mongo_to_json(instance) print("Backing up the instance's data...") backup_filename = "redv3-{}-{}.tar.gz".format( instance, dt.utcnow().strftime("%Y-%m-%d %H-%M-%S")) pth = Path(instance_vals["DATA_PATH"]) if pth.exists(): backup_pth = pth.home() backup_file = backup_pth / backup_filename to_backup = [] exclusions = [ "__pycache__", "Lavalink.jar", os.path.join("Downloader", "lib"), os.path.join("CogManager", "cogs"), os.path.join("RepoManager", "repos"), ] from redbot.cogs.downloader.repo_manager import RepoManager repo_mgr = RepoManager() await repo_mgr.initialize() repo_output = [] for repo in repo_mgr._repos.values(): repo_output.append({ "url": repo.url, "name": repo.name, "branch": repo.branch }) repo_filename = pth / "cogs" / "RepoManager" / "repos.json" with open(str(repo_filename), "w") as f: f.write(json.dumps(repo_output, indent=4)) instance_vals = {instance_name: basic_config} instance_file = pth / "instance.json" with open(str(instance_file), "w") as instance_out: instance_out.write(json.dumps(instance_vals, indent=4)) for f in pth.glob("**/*"): if not any(ex in str(f) for ex in exclusions): to_backup.append(f) with tarfile.open(str(backup_file), "w:gz") as tar: for f in to_backup: tar.add(str(f), recursive=False) print("A backup of {} has been made. It is at {}".format( instance, backup_file))
async def create_backup( instance: str, destination_folder: Path = Path.home()) -> None: data_manager.load_basic_configuration(instance) backend_type = get_current_backend(instance) if backend_type != BackendType.JSON: await do_migration(backend_type, BackendType.JSON) print("Backing up the instance's data...") driver_cls = drivers.get_driver_class() await driver_cls.initialize(**data_manager.storage_details()) backup_fpath = await red_create_backup(destination_folder) await driver_cls.teardown() if backup_fpath is not None: print(f"A backup of {instance} has been made. It is at {backup_fpath}") else: print("Creating the backup failed.")
async def remove_instance( instance: str, interactive: bool = False, delete_data: Optional[bool] = None, _create_backup: Optional[bool] = None, drop_db: Optional[bool] = None, remove_datapath: Optional[bool] = None, ) -> None: data_manager.load_basic_configuration(instance) backend = get_current_backend(instance) if interactive is True and delete_data is None: msg = "Would you like to delete this instance's data?" if backend != BackendType.JSON: msg += " The database server must be running for this to work." delete_data = click.confirm(msg, default=False) if interactive is True and _create_backup is None: msg = "Would you like to make a backup of the data for this instance?" if backend != BackendType.JSON: msg += " The database server must be running for this to work." _create_backup = click.confirm(msg, default=False) if _create_backup is True: await create_backup(instance) driver_cls = drivers.get_driver_class(backend) if delete_data is True: await driver_cls.initialize(**data_manager.storage_details()) try: await driver_cls.delete_all_data(interactive=interactive, drop_db=drop_db) finally: await driver_cls.teardown() if interactive is True and remove_datapath is None: remove_datapath = click.confirm( "Would you like to delete the instance's entire datapath?", default=False) if remove_datapath is True: data_path = data_manager.core_data_path().parent safe_delete(data_path) save_config(instance, {}, remove=True) print("The instance {} has been removed.".format(instance))
def run_bot(log, autorestart: bool = False, shard_count: int = None, shard_ids: list = None): try: new_loop = asyncio.new_event_loop() cli_flags = parse_cli_flags(["qa", "--no-prompt", "--token", TOKEN]) data_manager.load_basic_configuration(cli_flags.instance_name) red = Red( cli_flags=cli_flags, description="Red V3", dm_help=None, fetch_offline_members=True, loop=new_loop, ) new_loop.run_until_complete(run_red_bot(log, red, cli_flags)) except Exception as exc: log.exception(str(exc), exc_info=exc)
async def remove_instance( instance, interactive: bool = False, delete_data: Optional[bool] = None, _create_backup: Optional[bool] = None, drop_db: Optional[bool] = None, remove_datapath: Optional[bool] = None, ): data_manager.load_basic_configuration(instance) if interactive is True and delete_data is None: delete_data = click.confirm( "Would you like to delete this instance's data?", default=False ) if interactive is True and _create_backup is None: _create_backup = click.confirm( "Would you like to make a backup of the data for this instance?", default=False ) if _create_backup is True: await create_backup(instance) backend = get_current_backend(instance) driver_cls = drivers.get_driver_class(backend) if delete_data is True: await driver_cls.delete_all_data(interactive=interactive, drop_db=drop_db) if interactive is True and remove_datapath is None: remove_datapath = click.confirm( "Would you like to delete the instance's entire datapath?", default=False ) if remove_datapath is True: data_path = data_manager.core_data_path().parent safe_delete(data_path) save_config(instance, {}, remove=True) print("The instance {} has been removed\n".format(instance))
def main(): cli_flags = parse_cli_flags(sys.argv[1:]) handle_early_exit_flags(cli_flags) try: loop = asyncio.get_event_loop() if cli_flags.no_instance: print( "\033[1m" "Warning: The data will be placed in a temporary folder and removed on next system " "reboot." "\033[0m" ) cli_flags.instance_name = "temporary_red" data_manager.create_temp_config() data_manager.load_basic_configuration(cli_flags.instance_name) red = Red( cli_flags=cli_flags, description=description, dm_help=None, fetch_offline_members=True ) if os.name != "nt": # None of this works on windows, and we have to catch KeyboardInterrupt in a global handler! # At least it's not a redundant handler... signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT) for s in signals: loop.add_signal_handler( s, lambda s=s: asyncio.create_task(shutdown_handler(red, s)) ) exc_handler = functools.partial(exception_handler, red) loop.set_exception_handler(exc_handler) # We actually can't use asyncio.run and have graceful cleanup on Windows... loop.create_task(run_bot(red, cli_flags)) loop.run_forever() finally: loop.close()
async def mongo_to_json(instance): load_basic_configuration(instance) from redbot.core.drivers.red_mongo import Mongo m = Mongo("Core", "0", **storage_details()) db = m.db collection_names = await db.list_collection_names() for collection_name in collection_names: if "." in collection_name: # Fix for one of Zeph's problems continue elif collection_name == "Core": c_data_path = core_data_path() else: c_data_path = cog_data_path(raw_name=collection_name) c_data_path.mkdir(parents=True, exist_ok=True) # Every cog name has its own collection collection = db[collection_name] async for document in collection.find(): # Every cog has its own document. # This means if two cogs have the same name but different identifiers, they will # be two separate documents in the same collection cog_id = document.pop("_id") if not isinstance(cog_id, str): # Another garbage data check continue elif not str(cog_id).isdigit(): continue driver = JSON(collection_name, cog_id, data_path_override=c_data_path) for category, value in document.items(): ident_data = IdentifierData(str(cog_id), category, (), (), {}) await driver.set(ident_data, value=value) return {}
def main(): description = "Bot Base - Version {}".format(__version__) cli_flags = parse_cli_flags(sys.argv[1:]) if cli_flags.list_instances: list_instances() elif cli_flags.version: print(description) sys.exit(0) elif not cli_flags.instance_name and not cli_flags.no_instance: print("Error: No instance name was provided!") sys.exit(1) if cli_flags.no_instance: print( "\033[1m" "Warning: The data will be placed in a temporary folder and removed on next system " "reboot." "\033[0m") cli_flags.instance_name = "temporary_red" create_temp_config() load_basic_configuration(cli_flags.instance_name) log = init_loggers(cli_flags) loop = asyncio.get_event_loop() red = Red(cli_flags=cli_flags, description=description, pm_help=None) init_global_checks(red) init_events(red, cli_flags) loop.run_until_complete(red.cog_mgr.initialize()) red.add_cog(Core(red)) red.add_cog(CogManagerUI()) if cli_flags.dev: red.add_cog(Dev()) # noinspection PyProtectedMember modlog._init() # noinspection PyProtectedMember bank._init() if os.name == "posix": loop.add_signal_handler( SIGTERM, lambda: asyncio.ensure_future(sigterm_handler(red, log))) tmp_data = {} loop.run_until_complete(_get_prefix_and_token(red, tmp_data)) token = os.environ.get("RED_TOKEN", tmp_data["token"]) if cli_flags.token: token = cli_flags.token prefix = cli_flags.prefix or tmp_data["prefix"] if not (token and prefix): if cli_flags.no_prompt is False: new_token = interactive_config(red, token_set=bool(token), prefix_set=bool(prefix)) if new_token: token = new_token else: log.critical("Token and prefix must be set in order to login.") sys.exit(1) loop.run_until_complete(_get_prefix_and_token(red, tmp_data)) if cli_flags.dry_run: loop.run_until_complete(red.http.close()) sys.exit(0) try: loop.run_until_complete(red.start(token, bot=True)) except discord.LoginFailure: log.critical("This token doesn't seem to be valid.") db_token = loop.run_until_complete(red.db.token()) if db_token and not cli_flags.no_prompt: print("\nDo you want to reset the token? (y/n)") if confirm("> "): loop.run_until_complete(red.db.token.set("")) print("Token has been reset.") except KeyboardInterrupt: log.info("Keyboard interrupt detected. Quitting...") loop.run_until_complete(red.logout()) red._shutdown_mode = ExitCodes.SHUTDOWN except Exception as e: log.critical("Fatal exception", exc_info=e) loop.run_until_complete(red.logout()) finally: pending = asyncio.Task.all_tasks(loop=red.loop) gathered = asyncio.gather(*pending, loop=red.loop, return_exceptions=True) gathered.cancel() try: loop.run_until_complete(red.rpc.close()) except AttributeError: pass sys.exit(red._shutdown_mode.value)
async def mongov2_to_json(instance): load_basic_configuration(instance) core_path = core_data_path() from redbot.core.drivers import red_json core_conf = Config.get_core_conf() new_driver = red_json.JSON(cog_name="Core", identifier="0", data_path_override=core_path) core_conf.init_custom("CUSTOM_GROUPS", 2) custom_group_data = await core_conf.custom("CUSTOM_GROUPS").all() curr_custom_data = custom_group_data.get("Core", {}).get("0", {}) exported_data = await core_conf.driver.export_data(curr_custom_data) conversion_log.info("Starting Core conversion...") await new_driver.import_data(exported_data, curr_custom_data) conversion_log.info("Core conversion complete.") collection_names = await core_conf.driver.db.list_collection_names() splitted_names = list( filter( lambda elem: elem[1] != "" and elem[0] != "Core", [n.split(".") for n in collection_names], )) ident_map = {} # Cogname: idents list for cog_name, category in splitted_names: if cog_name not in ident_map: ident_map[cog_name] = set() idents = await core_conf.driver.db[cog_name][category].distinct( "_id.RED_uuid") ident_map[cog_name].update(set(idents)) for cog_name, idents in ident_map.items(): for identifier in idents: curr_custom_data = custom_group_data.get(cog_name, {}).get(identifier, {}) try: conf = Config.get_conf(None, int(identifier), cog_name=cog_name) except ValueError: continue exported_data = await conf.driver.export_data(curr_custom_data) new_path = cog_data_path(raw_name=cog_name) new_driver = red_json.JSON(cog_name, identifier, data_path_override=new_path) conversion_log.info( f"Converting {cog_name} with identifier {identifier}...") await new_driver.import_data(exported_data, curr_custom_data) # cog_data_path(raw_name=cog_name) conversion_log.info("Cog conversion complete.") return {}
def main(): red = None # Error handling for users misusing the bot cli_flags = parse_cli_flags(sys.argv[1:]) handle_early_exit_flags(cli_flags) if cli_flags.edit: handle_edit(cli_flags) return try: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) if cli_flags.no_instance: print( "\033[1m" "Warning: The data will be placed in a temporary folder and removed on next system " "reboot." "\033[0m") cli_flags.instance_name = "temporary_red" data_manager.create_temp_config() data_manager.load_basic_configuration(cli_flags.instance_name) red = Red(cli_flags=cli_flags, description="Red V3", dm_help=None) if os.name != "nt": # None of this works on windows. # At least it's not a redundant handler... signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT) for s in signals: loop.add_signal_handler( s, lambda s=s: asyncio.create_task(shutdown_handler(red, s))) exc_handler = functools.partial(global_exception_handler, red) loop.set_exception_handler(exc_handler) # We actually can't (just) use asyncio.run here # We probably could if we didn't support windows, but we might run into # a scenario where this isn't true if anyone works on RPC more in the future fut = loop.create_task(run_bot(red, cli_flags)) r_exc_handler = functools.partial(red_exception_handler, red) fut.add_done_callback(r_exc_handler) loop.run_forever() except KeyboardInterrupt: # We still have to catch this here too. (*joy*) log.warning( "Please do not use Ctrl+C to Shutdown Red! (attempting to die gracefully...)" ) log.error("Received KeyboardInterrupt, treating as interrupt") if red is not None: loop.run_until_complete(shutdown_handler(red, signal.SIGINT)) except SystemExit as exc: # We also have to catch this one here. Basically any exception which normally # Kills the python interpreter (Base Exceptions minus asyncio.cancelled) # We need to do something with prior to having the loop close log.info("Shutting down with exit code: %s", exc.code) if red is not None: loop.run_until_complete(shutdown_handler(red, None, exc.code)) except Exception as exc: # Non standard case. log.exception("Unexpected exception (%s): ", type(exc), exc_info=exc) if red is not None: loop.run_until_complete( shutdown_handler(red, None, ExitCodes.CRITICAL)) finally: # Allows transports to close properly, and prevent new ones from being opened. # Transports may still not be closed correctly on windows, see below loop.run_until_complete(loop.shutdown_asyncgens()) # *we* aren't cleaning up more here, but it prevents # a runtime error at the event loop on windows # with resources which require longer to clean up. # With other event loops, a failure to cleanup prior to here # results in a resource warning instead log.info("Please wait, cleaning up a bit more") loop.run_until_complete(asyncio.sleep(2)) asyncio.set_event_loop(None) loop.stop() loop.close() exit_code = red._shutdown_mode if red is not None else 1 sys.exit(exit_code)