def test_all(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "backup rabbit") execute_outside(capfd, "restore rabbit") backup_folder = BACKUP_DIR.joinpath("rabbit") create_project( capfd=capfd, name=random_project_name(faker), auth="no", frontend="no", services=["rabbit"], ) init_project(capfd) start_registry(capfd) exec_command( capfd, "backup rabbit", f"image, execute {colors.RED}rapydo pull rabbit", ) exec_command( capfd, "restore rabbit", f"image, execute {colors.RED}rapydo pull rabbit", ) pull_images(capfd) start_project(capfd) exec_command(capfd, "status") service_verify(capfd, "rabbitmq") # Just some delay extra delay, rabbit is a slow starter time.sleep(5) # NOTE: q = rabbitmq.__name__ is just to have a fixed name to be used to test the # queue without the need to introdure further nested " or ' query_queue = "shell backend \"/usr/bin/python3 -c 'from restapi.connectors import rabbitmq; q = rabbitmq.__name__; r = rabbitmq.get_instance();print(q, r.queue_exists(q));'\"" create_queue = "shell backend \"/usr/bin/python3 -c 'from restapi.connectors import rabbitmq; q = rabbitmq.__name__; r = rabbitmq.get_instance(); r.create_queue(q);'\"" delete_queue = "shell backend \"/usr/bin/python3 -c 'from restapi.connectors import rabbitmq; q = rabbitmq.__name__; r = rabbitmq.get_instance(); r.delete_queue(q);'\"" exec_command(capfd, query_queue, "restapi.connectors.rabbitmq False") exec_command( capfd, create_queue, ) exec_command(capfd, query_queue, "restapi.connectors.rabbitmq True") # Backup command exec_command( capfd, "backup rabbit", "RabbitMQ is running and the backup will temporary stop it. " "If you want to continue add --force flag", ) exec_command( capfd, "backup rabbit --force --restart backend", "Starting backup on rabbit...", "Backup completed: data/backup/rabbit/", "Restarting services in 20 seconds...", "Restarting services in 10 seconds...", ) # This is to verify that --force restarted rabbit exec_command( capfd, "backup rabbit", "RabbitMQ is running and the backup will temporary stop it. " "If you want to continue add --force flag", ) exec_command( capfd, "backup invalid", "Invalid value for", "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'", ) exec_command(capfd, "remove", "Stack removed") exec_command( capfd, "backup rabbit", "Starting backup on rabbit...", "Backup completed: data/backup/rabbit/", ) # Test backup retention exec_command( capfd, "backup rabbit --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on rabbit...", "Backup completed: data/backup/rabbit/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup rabbit --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on rabbit...", "Backup completed: data/backup/rabbit/", ) exec_command( capfd, "backup rabbit --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on rabbit...", "Backup completed: data/backup/rabbit/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup rabbit --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on rabbit...", "Backup completed: data/backup/rabbit/", ) # Create an additional backup to the test deletion (now backups are 3) exec_command( capfd, "backup rabbit", "Starting backup on rabbit...", "Backup completed: data/backup/rabbit/", ) # Save the current number of backup files number_of_backups = len(list(backup_folder.glob("*"))) # Verify the deletion exec_command( capfd, "backup rabbit --max 1", "deleted because exceeding the max number of backup files (1)", "Starting backup on rabbit...", "Backup completed: data/backup/rabbit/", ) # Now the number of backups should be reduced by 1 (i.e. +1 -2) assert len(list(backup_folder.glob("*"))) == number_of_backups - 1 # Verify that --max ignores files without the date pattern backup_folder.joinpath("xyz").touch(exist_ok=True) backup_folder.joinpath("xyz.ext").touch(exist_ok=True) backup_folder.joinpath("2020_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True) backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True) exec_command( capfd, "backup rabbit --max 999 --dry-run", "Dry run mode is enabled", # Still finding 2, all files above are ignore because not matching the pattern "Found 2 backup files, maximum not reached", "Starting backup on rabbit...", "Backup completed: data/backup/rabbit/", ) exec_command(capfd, "start backend rabbit") # Just some delay extra delay, rabbit is a slow starter if Configuration.swarm_mode: time.sleep(20) else: time.sleep(10) exec_command( capfd, delete_queue, ) exec_command(capfd, query_queue, "restapi.connectors.rabbitmq False") # Restore command exec_command( capfd, "restore rabbit", "Please specify one of the following backup:", ".tar.gz", ) exec_command( capfd, "restore rabbit invalid", "Invalid backup file, data/backup/rabbit/invalid does not exist", ) with TemporaryRemovePath(BACKUP_DIR): exec_command( capfd, "restore rabbit", "No backup found, the following folder " "does not exist: data/backup/rabbit", ) with TemporaryRemovePath(backup_folder): exec_command( capfd, "restore rabbit", f"No backup found, the following folder does not exist: {backup_folder}", ) os.mkdir("data/backup/rabbit") exec_command( capfd, "restore rabbit", "No backup found, data/backup/rabbit is empty", ) open("data/backup/rabbit/test.gz", "a").close() exec_command( capfd, "restore rabbit", "No backup found, data/backup/rabbit is empty", ) open("data/backup/rabbit/test.tar.gz", "a").close() exec_command( capfd, "restore rabbit", "Please specify one of the following backup:", "test.tar.gz", ) os.remove("data/backup/rabbit/test.gz") os.remove("data/backup/rabbit/test.tar.gz") # Test restore on rabbit (required rabbit to be down) files = os.listdir("data/backup/rabbit") files = [f for f in files if f.endswith(".tar.gz")] files.sort() rabbit_dump_file = files[-1] exec_command(capfd, "remove") # 3) restore the dump exec_command( capfd, f"restore rabbit {rabbit_dump_file}", "Starting restore on rabbit...", f"Restore from data/backup/rabbit/{rabbit_dump_file} completed", ) exec_command(capfd, "start", "Stack started") # 4) verify data match again point 1 (restore completed) # postponed because rabbit needs time to start... exec_command( capfd, f"restore rabbit {rabbit_dump_file}", "RabbitMQ is running and the restore will temporary stop it.", "If you want to continue add --force flag", ) exec_command( capfd, f"restore rabbit {rabbit_dump_file} --force --restart backend", "Starting restore on rabbit...", f"Restore from data/backup/rabbit/{rabbit_dump_file} completed", "Restarting services in 20 seconds...", "Restarting services in 10 seconds...", ) # Wait rabbit to completely startup service_verify(capfd, "rabbitmq") exec_command(capfd, query_queue, "restapi.connectors.rabbitmq True")
def test_all(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "backup postgres") execute_outside(capfd, "restore postgres") backup_folder = BACKUP_DIR.joinpath("postgres") create_project( capfd=capfd, name=random_project_name(faker), auth="postgres", frontend="no", ) init_project(capfd) start_registry(capfd) exec_command( capfd, "backup postgres", f"image, execute {colors.RED}rapydo pull postgres", ) exec_command( capfd, "restore postgres", f"image, execute {colors.RED}rapydo pull postgres", ) pull_images(capfd) start_project(capfd) exec_command(capfd, "status") service_verify(capfd, "sqlalchemy") # This will initialize postgres exec_command(capfd, "shell backend 'restapi init'") # Verify the initialization psql = "shell postgres 'psql -U sqluser -d SQL_API -c" exec_command( capfd, f'{psql} "select name, description from role"\'', "normal_user | User", ) exec_command( capfd, "backup postgres", "Starting backup on postgres...", "Backup completed: data/backup/postgres/", ) # A second backup is needed to test backup retention exec_command( capfd, "backup postgres", "Starting backup on postgres...", "Backup completed: data/backup/postgres/", ) # Test backup retention exec_command( capfd, "backup postgres --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on postgres...", "Backup completed: data/backup/postgres/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup postgres --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on postgres...", "Backup completed: data/backup/postgres/", ) exec_command( capfd, "backup postgres --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on postgres...", "Backup completed: data/backup/postgres/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup postgres --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on postgres...", "Backup completed: data/backup/postgres/", ) # Create an additional backup to the test deletion (now backups are 3) exec_command( capfd, "backup postgres", "Starting backup on postgres...", "Backup completed: data/backup/postgres/", ) # Save the current number of backup files number_of_backups = len(list(backup_folder.glob("*"))) # Verify the deletion exec_command( capfd, "backup postgres --max 1", "deleted because exceeding the max number of backup files (1)", "Starting backup on postgres...", "Backup completed: data/backup/postgres/", ) # Now the number of backups should be reduced by 1 (i.e. +1 -2) assert len(list(backup_folder.glob("*"))) == number_of_backups - 1 # Verify that --max ignores files without the date pattern backup_folder.joinpath("xyz").touch(exist_ok=True) backup_folder.joinpath("xyz.ext").touch(exist_ok=True) backup_folder.joinpath("2020_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True) backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_99.gz").touch(exist_ok=True) exec_command( capfd, "backup postgres --max 999 --dry-run", "Dry run mode is enabled", # Still finding 2, all files above are ignore because not matching the pattern "Found 2 backup files, maximum not reached", "Starting backup on postgres...", "Backup completed: data/backup/postgres/", ) exec_command( capfd, "backup invalid", "Invalid value for", "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'", ) exec_command(capfd, "remove", "Stack removed") exec_command( capfd, "backup postgres", "The backup procedure requires postgres running, please start your stack", ) exec_command( capfd, "restore postgres", "Please specify one of the following backup:", ".sql.gz", ) exec_command( capfd, "restore postgres invalid", "Invalid backup file, data/backup/postgres/invalid does not exist", ) with TemporaryRemovePath(BACKUP_DIR): exec_command( capfd, "restore postgres", "No backup found, the following folder " "does not exist: data/backup/postgres", ) with TemporaryRemovePath(backup_folder): exec_command( capfd, "restore postgres", f"No backup found, the following folder does not exist: {backup_folder}", ) os.mkdir("data/backup/postgres") exec_command( capfd, "restore postgres", "No backup found, data/backup/postgres is empty", ) open("data/backup/postgres/test.sql.gz", "a").close() exec_command( capfd, "restore postgres", "Please specify one of the following backup:", "test.sql.gz", ) os.remove("data/backup/postgres/test.sql.gz") files = os.listdir("data/backup/postgres") files = [f for f in files if f.endswith(".sql.gz")] files.sort() postgres_dump_file = files[-1] # Postgres restore not allowed if container is not running exec_command( capfd, f"restore postgres {postgres_dump_file}", "The restore procedure requires postgres running, please start your stack", ) exec_command(capfd, "start", "Stack started") # Here we test the restore procedure: # 1) verify some data in the database exec_command( capfd, f'{psql} "select name, description from role"\'', "normal_user | User", ) # 2) Modify the data exec_command( capfd, f'{psql} "update role SET description=name"\'', ) exec_command( capfd, f'{psql} "select name, description from role"\'', "normal_user | normal_user", ) # 3) restore the dump exec_command( capfd, f"restore postgres {postgres_dump_file}", "Starting restore on postgres...", "CREATE DATABASE", "ALTER DATABASE", f"Restore from data/backup/postgres/{postgres_dump_file} completed", ) # 4) verify data match again point 1 (restore completed) exec_command( capfd, f'{psql} "select name, description from role"\'', "normal_user | User", )
def backup( service: SupportedServices = typer.Argument(..., help="Service name"), force: bool = typer.Option( False, "--force", help="Force the backup procedure", show_default=False, ), max_backups: int = typer.Option( 0, "--max", help= "Maximum number of backups, older exceeding this number will be removed", show_default=False, ), dry_run: bool = typer.Option( False, "--dry-run", help="Do not perform any backup or delete backup files", show_default=False, ), restart: List[str] = typer.Option( [], "--restart", help= "Service to be restarted once completed the backup (multiple allowed)", shell_complete=Application.autocomplete_service, ), ) -> None: Application.print_command( Application.serialize_parameter("--force", force, IF=force), Application.serialize_parameter("--max", max_backups, IF=max_backups), Application.serialize_parameter("--dry-run", dry_run, IF=dry_run), Application.serialize_parameter("--restart", restart, IF=restart), Application.serialize_parameter("", service.value), ) if dry_run: log.warning("Dry run mode is enabled") Application.get_controller().controller_init() service_name = service.value verify_available_images( [service_name], Application.data.compose_config, Application.data.base_services, ) docker = Docker() container = docker.get_container(service_name) backup_dir = BACKUP_DIR.joinpath(service_name) backup_dir.mkdir(parents=True, exist_ok=True) if max_backups > 0: backups = list(backup_dir.glob(get_date_pattern())) if max_backups >= len(backups): log.debug("Found {} backup files, maximum not reached", len(backups)) else: for f in sorted(backups)[:-max_backups]: if not dry_run: f.unlink() log.warning( "{} deleted because exceeding the max number of backup files ({})", f.name, max_backups, ) module = BACKUP_MODULES.get(service.value) if not module: # pragma: no cover print_and_exit(f"{service.value} misconfiguration, module not found") now = datetime.now().strftime("%Y_%m_%d-%H_%M_%S") module.backup(container=container, now=now, force=force, dry_run=dry_run) if restart and not dry_run: log.info("Restarting services in 20 seconds...") time.sleep(10) log.info("Restarting services in 10 seconds...") time.sleep(10) reload(docker, restart)
def restore( service: SupportedServices = typer.Argument(..., help="Service name"), backup_file: Optional[str] = typer.Argument( None, help="Specify the backup to be restored", show_default=False, ), force: bool = typer.Option( False, "--force", help="Force the backup procedure", show_default=False, ), restart: List[str] = typer.Option( [], "--restart", help= "Service to be restarted once completed the restore (multiple allowed)", shell_complete=Application.autocomplete_service, ), ) -> None: Application.print_command( Application.serialize_parameter("--force", force, IF=force), Application.serialize_parameter("--restart", restart, IF=restart), Application.serialize_parameter("", service.value), Application.serialize_parameter("", backup_file), ) Application.get_controller().controller_init() service_name = service.value verify_available_images( [service_name], Application.data.compose_config, Application.data.base_services, ) docker = Docker() container = docker.get_container(service_name) backup_dir = BACKUP_DIR.joinpath(service_name) if not backup_dir.exists(): print_and_exit( "No backup found, the following folder does not exist: {}", backup_dir) module = RESTORE_MODULES.get(service.value) if not module: # pragma: no cover print_and_exit(f"{service.value} misconfiguration, module not found") expected_ext = module.EXPECTED_EXT if backup_file is None: files = backup_dir.iterdir() filtered_files = [ d.name for d in files if d.name.endswith(expected_ext) ] filtered_files.sort() if not len(filtered_files): print_and_exit("No backup found, {} is empty", backup_dir) log.info("Please specify one of the following backup:") for f in filtered_files: print(f"- {f}") return backup_host_path = backup_dir.joinpath(backup_file) if not backup_host_path.exists(): print_and_exit("Invalid backup file, {} does not exist", backup_host_path) module.restore(container=container, backup_file=backup_file, force=force) if restart: log.info("Restarting services in 20 seconds...") time.sleep(10) log.info("Restarting services in 10 seconds...") time.sleep(10) reload(docker, restart)
def load_project_scaffold( self, project: str, auth: Optional[str], services: Optional[List[str]] = None ) -> bool: if services is None: services = [] self.project = project self.expected_folders.extend(self.expected_main_folders) self.expected_folders.append(self.p_path("confs")) self.expected_folders.append(self.p_path("builds")) self.expected_folders.append(self.p_path("backend")) self.expected_folders.append(self.p_path("backend", "endpoints")) self.expected_folders.append(self.p_path("backend", "models")) self.expected_folders.append(self.p_path("backend", "models", "emails")) self.expected_folders.append(self.p_path("backend", "tasks")) self.expected_folders.append(self.p_path("backend", "tests")) self.expected_folders.append(self.p_path("backend", "cron")) self.suggested_gitkeep.append(SUBMODULES_DIR.joinpath(GITKEEP)) self.suggested_gitkeep.append(DATA_DIR.joinpath(GITKEEP)) self.suggested_gitkeep.append(LOGS_FOLDER.joinpath(GITKEEP)) self.suggested_gitkeep.append(BACKUP_DIR.joinpath(GITKEEP)) self.suggested_gitkeep.append(self.p_path("backend", "cron", GITKEEP)) self.suggested_gitkeep.append(self.p_path("builds", GITKEEP)) self.suggested_gitkeep.append(self.p_path("backend", "endpoints", GITKEEP)) self.suggested_gitkeep.append(self.p_path("backend", "tasks", GITKEEP)) self.suggested_gitkeep.append(self.p_path("backend", "tests", GITKEEP)) self.suggested_gitkeep.append( self.p_path("backend", "models", "emails", GITKEEP) ) self.expected_files.append(self.p_path("project_configuration.yaml")) self.expected_files.append(self.p_path("confs", "commons.yml")) self.expected_files.append(self.p_path("confs", "development.yml")) self.expected_files.append(self.p_path("confs", "production.yml")) # Need to ensure mypy to correctly extract typing from the project module self.expected_files.append(self.p_path("backend", "__init__.py")) self.expected_files.append(self.p_path("backend", "initialization.py")) self.expected_files.append(self.p_path("backend", "customization.py")) self.expected_files.append(self.p_path("backend", "endpoints", "__init__.py")) self.expected_files.append(self.p_path("backend", "models", "__init__.py")) self.expected_files.append(self.p_path("backend", "tasks", "__init__.py")) self.expected_files.append(self.p_path("backend", "tests", "__init__.py")) self.expected_files.append(Path(".gitignore")) self.expected_files.append(Path(".gitattributes")) self.expected_files.append(Path("pyproject.toml")) self.expected_files.append(Path(".flake8")) self.expected_files.append(Path(".prettierignore")) self.fixed_files.append(Path(".gitattributes")) self.fixed_files.append(Path("pyproject.toml")) if auth or services: models = self.p_path("backend", "models") if auth == "sqlalchemy" or "postgres" in services or "mysql" in services: self.expected_files.append(models.joinpath("sqlalchemy.py")) if auth == "neo4j" or "neo4j" in services: self.expected_files.append(models.joinpath("neo4j.py")) self.optionals_folders.append(self.p_path("backend", "models", "emails")) self.optionals_files.append( self.p_path("backend", "models", "emails", "activate_account.html") ) self.optionals_files.append( self.p_path("backend", "models", "emails", "new_credentials.html") ) self.optionals_files.append( self.p_path("backend", "models", "emails", "reset_password.html") ) self.optionals_files.append( self.p_path("backend", "models", "emails", "update_credentials.html") ) self.optionals_files.append(Path("codecov.yml")) self.data_folders.extend( [ LOGS_FOLDER, BACKUP_DIR, DATA_DIR.joinpath("uploads"), ] ) # Removed since 0.7.1 self.obsolete_files.append(self.p_path("confs", "debug.yml")) # Removed since 0.7.4 self.obsolete_files.append(SUBMODULES_DIR.joinpath("rapydo-confs")) # Removed since 0.7.5 self.obsolete_files.append(SUBMODULES_DIR.joinpath("frontend")) # Removed since 0.7.6 self.obsolete_files.append(self.p_path("backend", "apis")) # Removed since 0.8 self.obsolete_files.append(self.p_path("backend", "models", "swagger.yaml")) self.obsolete_files.append(self.p_path("backend", "endpoints", "profile.py")) # Removed since 0.9 self.obsolete_files.append(self.p_path("backend", "initialization")) self.obsolete_files.append(self.p_path("frontend", "assets", "favicon.ico")) # Removed since 1.2 self.obsolete_files.append(Path(".pre-commit-config.yaml")) # Removed since 2.3 self.obsolete_files.append(Path(".isort.cfg")) return True
def test_all(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "backup redis") execute_outside(capfd, "restore redis") backup_folder = BACKUP_DIR.joinpath("redis") create_project( capfd=capfd, name=random_project_name(faker), auth="no", frontend="no", services=["redis"], ) init_project(capfd) start_registry(capfd) exec_command( capfd, "backup redis", f"image, execute {colors.RED}rapydo pull redis", ) exec_command( capfd, "restore redis", f"image, execute {colors.RED}rapydo pull redis", ) pull_images(capfd) start_project(capfd) service_verify(capfd, "redis") key = faker.pystr() value1 = f"old-{faker.pystr()}" value2 = f"new-{faker.pystr()}" # NOTE: q = redis.__name__ is just to have a fixed name to be used to test the # queue without the need to introdure further nested " or ' get_key = f'shell redis "sh -c \'redis-cli --pass "$REDIS_PASSWORD" get {key}\'"' set_key1 = ( f'shell redis "sh -c \'redis-cli --pass "$REDIS_PASSWORD" set {key} {value1}\'"' ) set_key2 = ( f'shell redis "sh -c \'redis-cli --pass "$REDIS_PASSWORD" set {key} {value2}\'"' ) exec_command( capfd, set_key1, ) exec_command(capfd, get_key, value1) # Backup command on a running Redis exec_command( capfd, "backup redis", "Starting backup on redis...", "Backup completed: data/backup/redis/", ) exec_command( capfd, "backup invalid", "Invalid value for", "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'", ) exec_command(capfd, "remove", "Stack removed") # Backup command on a stopped Redis exec_command( capfd, "backup redis", "Starting backup on redis...", "Backup completed: data/backup/redis/", ) # Test backup retention exec_command( capfd, "backup redis --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on redis...", "Backup completed: data/backup/redis/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup redis --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on redis...", "Backup completed: data/backup/redis/", ) exec_command( capfd, "backup redis --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on redis...", "Backup completed: data/backup/redis/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup redis --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on redis...", "Backup completed: data/backup/redis/", ) # Create an additional backup to the test deletion (now backups are 3) exec_command( capfd, "backup redis", "Starting backup on redis...", "Backup completed: data/backup/redis/", ) # Save the current number of backup files number_of_backups = len(list(backup_folder.glob("*"))) # Verify the deletion exec_command( capfd, "backup redis --max 1", "deleted because exceeding the max number of backup files (1)", "Starting backup on redis...", "Backup completed: data/backup/redis/", ) # Now the number of backups should be reduced by 1 (i.e. +1 -2) assert len(list(backup_folder.glob("*"))) == number_of_backups - 1 # Verify that --max ignores files without the date pattern backup_folder.joinpath("xyz").touch(exist_ok=True) backup_folder.joinpath("xyz.ext").touch(exist_ok=True) backup_folder.joinpath("2020_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True) backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True) exec_command( capfd, "backup redis --max 999 --dry-run", "Dry run mode is enabled", # Still finding 2, all files above are ignore because not matching the pattern "Found 2 backup files, maximum not reached", "Starting backup on redis...", "Backup completed: data/backup/redis/", ) exec_command(capfd, "start backend redis") exec_command( capfd, set_key2, ) exec_command(capfd, get_key, value2) # Restore command exec_command( capfd, "restore redis", "Please specify one of the following backup:", ".tar.gz", ) exec_command( capfd, "restore redis invalid", "Invalid backup file, data/backup/redis/invalid does not exist", ) with TemporaryRemovePath(BACKUP_DIR): exec_command( capfd, "restore redis", "No backup found, the following folder " "does not exist: data/backup/redis", ) with TemporaryRemovePath(backup_folder): exec_command( capfd, "restore redis", f"No backup found, the following folder does not exist: {backup_folder}", ) os.mkdir("data/backup/redis") exec_command( capfd, "restore redis", "No backup found, data/backup/redis is empty", ) open("data/backup/redis/test.gz", "a").close() exec_command( capfd, "restore redis", "No backup found, data/backup/redis is empty", ) open("data/backup/redis/test.tar.gz", "a").close() exec_command( capfd, "restore redis", "Please specify one of the following backup:", "test.tar.gz", ) os.remove("data/backup/redis/test.gz") os.remove("data/backup/redis/test.tar.gz") # Test restore on redis (required redis to be down) files = os.listdir("data/backup/redis") files = [f for f in files if f.endswith(".tar.gz")] files.sort() redis_dump_file = files[-1] exec_command(capfd, "remove redis") # 3) restore the dump exec_command( capfd, f"restore redis {redis_dump_file}", "Starting restore on redis...", f"Restore from data/backup/redis/{redis_dump_file} completed", ) exec_command(capfd, "start", "Stack started") # 4) verify data match again point 1 (restore completed) # postponed because redis needs time to start... exec_command( capfd, f"restore redis {redis_dump_file}", "Redis is running and the restore will temporary stop it.", "If you want to continue add --force flag", ) exec_command( capfd, f"restore redis {redis_dump_file} --force --restart backend", "Starting restore on redis...", f"Restore from data/backup/redis/{redis_dump_file} completed", ) # Wait redis to completely startup service_verify(capfd, "redis") exec_command(capfd, get_key, value1)
def test_all(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "backup mariadb") execute_outside(capfd, "restore mariadb") backup_folder = BACKUP_DIR.joinpath("mariadb") create_project( capfd=capfd, name=random_project_name(faker), auth="mysql", frontend="no", ) init_project(capfd) start_registry(capfd) exec_command( capfd, "backup mariadb", f"image, execute {colors.RED}rapydo pull mariadb", ) exec_command( capfd, "restore mariadb", f"image, execute {colors.RED}rapydo pull mariadb", ) pull_images(capfd) start_project(capfd) exec_command(capfd, "status") service_verify(capfd, "sqlalchemy") # This will initialize mariadb exec_command(capfd, "shell backend 'restapi init'") def exec_query(query: str) -> str: command = 'shell mariadb "' command += 'sh -c \'mysql -uroot -p"$MYSQL_ROOT_PASSWORD" -D"$MYSQL_DATABASE" ' command += f'-e \\"{query};\\"' # This is to close the sh -c 'command' command += "'" # This is to close the shell "command" command += '"' return command # Verify the initialization exec_command( capfd, exec_query("select name, description from role"), "normal_user\tUser", ) exec_command( capfd, "backup mariadb", "Starting backup on mariadb...", "Backup completed: data/backup/mariadb/", ) # A second backup is needed to test backup retention exec_command( capfd, "backup mariadb", "Starting backup on mariadb...", "Backup completed: data/backup/mariadb/", ) # Test backup retention exec_command( capfd, "backup mariadb --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on mariadb...", "Backup completed: data/backup/mariadb/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup mariadb --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on mariadb...", "Backup completed: data/backup/mariadb/", ) exec_command( capfd, "backup mariadb --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on mariadb...", "Backup completed: data/backup/mariadb/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup mariadb --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on mariadb...", "Backup completed: data/backup/mariadb/", ) # Create an additional backup to the test deletion (now backups are 3) exec_command( capfd, "backup mariadb", "Starting backup on mariadb...", "Backup completed: data/backup/mariadb/", ) # Save the current number of backup files number_of_backups = len(list(backup_folder.glob("*"))) # Verify the deletion exec_command( capfd, "backup mariadb --max 1", "deleted because exceeding the max number of backup files (1)", "Starting backup on mariadb...", "Backup completed: data/backup/mariadb/", ) # Now the number of backups should be reduced by 1 (i.e. +1 -2) assert len(list(backup_folder.glob("*"))) == number_of_backups - 1 # Verify that --max ignores files without the date pattern backup_folder.joinpath("xyz").touch(exist_ok=True) backup_folder.joinpath("xyz.ext").touch(exist_ok=True) backup_folder.joinpath("2020_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True) backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_99.tar").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_99.tar.gz").touch(exist_ok=True) exec_command( capfd, "backup mariadb --max 999 --dry-run", "Dry run mode is enabled", # Still finding 2, all files above are ignore because not matching the pattern "Found 2 backup files, maximum not reached", "Starting backup on mariadb...", "Backup completed: data/backup/mariadb/", ) exec_command( capfd, "backup invalid", "Invalid value for", "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'", ) exec_command(capfd, "remove", "Stack removed") exec_command( capfd, "backup mariadb", "The backup procedure requires mariadb running, please start your stack", ) exec_command( capfd, "restore mariadb", "Please specify one of the following backup:", ".tar.gz", ) exec_command( capfd, "restore mariadb invalid", "Invalid backup file, data/backup/mariadb/invalid does not exist", ) with TemporaryRemovePath(BACKUP_DIR): exec_command( capfd, "restore mariadb", "No backup found, the following folder " "does not exist: data/backup/mariadb", ) with TemporaryRemovePath(backup_folder): exec_command( capfd, "restore mariadb", f"No backup found, the following folder does not exist: {backup_folder}", ) os.mkdir("data/backup/mariadb") exec_command( capfd, "restore mariadb", "No backup found, data/backup/mariadb is empty", ) open("data/backup/mariadb/test.tar.gz", "a").close() exec_command( capfd, "restore mariadb", "Please specify one of the following backup:", "test.tar.gz", ) os.remove("data/backup/mariadb/test.tar.gz") files = os.listdir("data/backup/mariadb") files = [f for f in files if f.endswith(".tar.gz")] files.sort() mariadb_dump_file = files[-1] exec_command(capfd, "start", "Stack started") # Postgres restore not allowed if container is not running exec_command( capfd, f"restore mariadb {mariadb_dump_file}", "MariaDB is running and the restore will temporary stop it. " "If you want to continue add --force flag", ) # Here we test the restore procedure: # 1) verify some data in the database exec_command( capfd, exec_query("select name, description from role"), "normal_user\tUser", ) # 2) Modify the data exec_command( capfd, exec_query("update role SET description=name"), ) exec_command( capfd, exec_query("select name, description from role"), "normal_user\tnormal_user", ) # 3) restore the dump exec_command( capfd, f"restore mariadb {mariadb_dump_file} --force", "Starting restore on mariadb...", "Opening backup file", "Removing current datadir", "Restoring the backup", "...done", "completed OK!", "Removing the temporary uncompressed folder", f"Restore from data/backup/mariadb/{mariadb_dump_file} completed", ) if Configuration.swarm_mode: time.sleep(5) # 4) verify data match again point 1 (restore completed) exec_command( capfd, exec_query("select name, description from role"), "normal_user\tUser", )
def test_all(capfd: Capture, faker: Faker) -> None: execute_outside(capfd, "backup neo4j") execute_outside(capfd, "restore neo4j") backup_folder = BACKUP_DIR.joinpath("neo4j") create_project( capfd=capfd, name=random_project_name(faker), auth="neo4j", frontend="no", ) init_project(capfd) start_registry(capfd) exec_command( capfd, "backup neo4j", f"image, execute {colors.RED}rapydo pull neo4j", ) exec_command( capfd, "restore neo4j", f"image, execute {colors.RED}rapydo pull neo4j", ) pull_images(capfd) start_project(capfd) service_verify(capfd, "neo4j") # This will initialize neo4j exec_command(capfd, "shell backend 'restapi init'") time.sleep(25) # Just some delay extra delay. restapi init alone not always is enough... if Configuration.swarm_mode: time.sleep(30) # Verify the initialization cypher = "shell neo4j 'bin/cypher-shell" exec_command( capfd, f'{cypher} "match (r: Role) return r.name, r.description"\'', '"normal_user", "User"', ) # Backup command exec_command( capfd, "backup neo4j", "Neo4j is running and the backup will temporary stop it. " "If you want to continue add --force flag", ) exec_command( capfd, "backup neo4j --force --restart backend --restart rabbit", "Starting backup on neo4j...", "Backup completed: data/backup/neo4j/", "Restarting services in 20 seconds...", "Restarting services in 10 seconds...", ) # This is to verify that --force restarted neo4j exec_command( capfd, "backup neo4j", "Neo4j is running and the backup will temporary stop it. " "If you want to continue add --force flag", ) exec_command( capfd, "backup invalid", "Invalid value for", "'invalid' is not one of 'mariadb', 'neo4j', 'postgres', 'rabbit', 'redis'", ) exec_command(capfd, "remove", "Stack removed") exec_command( capfd, "backup neo4j", "Starting backup on neo4j...", "Backup completed: data/backup/neo4j/", ) # Test backup retention exec_command( capfd, "backup neo4j --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on neo4j...", "Backup completed: data/backup/neo4j/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup neo4j --max 999 --dry-run", "Dry run mode is enabled", "Found 2 backup files, maximum not reached", "Starting backup on neo4j...", "Backup completed: data/backup/neo4j/", ) exec_command( capfd, "backup neo4j --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on neo4j...", "Backup completed: data/backup/neo4j/", ) # Verify that due to dry run, no backup is executed exec_command( capfd, "backup neo4j --max 1 --dry-run", "Dry run mode is enabled", "deleted because exceeding the max number of backup files (1)", "Starting backup on neo4j...", "Backup completed: data/backup/neo4j/", ) # Create an additional backup to the test deletion (now backups are 3) exec_command( capfd, "backup neo4j", "Starting backup on neo4j...", "Backup completed: data/backup/neo4j/", ) # Save the current number of backup files number_of_backups = len(list(backup_folder.glob("*"))) # Verify the deletion exec_command( capfd, "backup neo4j --max 1", "deleted because exceeding the max number of backup files (1)", "Starting backup on neo4j...", "Backup completed: data/backup/neo4j/", ) # Now the number of backups should be reduced by 1 (i.e. +1 -2) assert len(list(backup_folder.glob("*"))) == number_of_backups - 1 # Verify that --max ignores files without the date pattern backup_folder.joinpath("xyz").touch(exist_ok=True) backup_folder.joinpath("xyz.ext").touch(exist_ok=True) backup_folder.joinpath("2020_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_01").touch(exist_ok=True) backup_folder.joinpath("9999_01_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_99_01-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_99-01_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-99_01_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_99_01.bak").touch(exist_ok=True) backup_folder.joinpath("2020_01_01-01_01_99.bak").touch(exist_ok=True) exec_command( capfd, "backup neo4j --max 999 --dry-run", "Dry run mode is enabled", # Still finding 2, all files above are ignore because not matching the pattern "Found 2 backup files, maximum not reached", "Starting backup on neo4j...", "Backup completed: data/backup/neo4j/", ) exec_command(capfd, "start", "Stack started") # Just some delay extra delay, neo4j is a slow starter time.sleep(25) # Restore command exec_command(capfd, "restore neo4j", "Please specify one of the following backup:", ".dump") exec_command( capfd, "restore neo4j invalid", "Invalid backup file, data/backup/neo4j/invalid does not exist", ) with TemporaryRemovePath(BACKUP_DIR): exec_command( capfd, "restore neo4j", "No backup found, the following folder " "does not exist: data/backup/neo4j", ) with TemporaryRemovePath(backup_folder): exec_command( capfd, "restore neo4j", f"No backup found, the following folder does not exist: {backup_folder}", ) os.mkdir("data/backup/neo4j") exec_command( capfd, "restore neo4j", "No backup found, data/backup/neo4j is empty", ) open("data/backup/neo4j/test.gz", "a").close() exec_command( capfd, "restore neo4j", "No backup found, data/backup/neo4j is empty", ) open("data/backup/neo4j/test.dump", "a").close() exec_command( capfd, "restore neo4j", "Please specify one of the following backup:", "test.dump", ) os.remove("data/backup/neo4j/test.gz") os.remove("data/backup/neo4j/test.dump") # Test restore on neo4j (required neo4j to be down) files = os.listdir("data/backup/neo4j") files = [f for f in files if f.endswith(".dump")] files.sort() neo4j_dump_file = files[-1] time.sleep(25) # Here we test the restore procedure: # 1) verify some data in the database exec_command( capfd, f'{cypher} "match (r: Role) return r.name, r.description"\'', '"normal_user", "User"', ) # 2) Modify the data exec_command(capfd, f'{cypher} "match (r: Role) SET r.description = r.name"\'') exec_command( capfd, f'{cypher} "match (r: Role) return r.name, r.description"\'', '"normal_user", "normal_user"', ) exec_command(capfd, "remove") # 3) restore the dump exec_command( capfd, f"restore neo4j {neo4j_dump_file}", "Starting restore on neo4j...", "Done: ", f"Restore from data/backup/neo4j/{neo4j_dump_file} completed", ) exec_command(capfd, "start", "Stack started") exec_command( capfd, f"restore neo4j {neo4j_dump_file}", "Neo4j is running and the restore will temporary stop it.", "If you want to continue add --force flag", ) exec_command( capfd, f"restore neo4j {neo4j_dump_file} --force --restart backend", "Starting restore on neo4j...", "Done: ", f"Restore from data/backup/neo4j/{neo4j_dump_file} completed", "Restarting services in 20 seconds...", "Restarting services in 10 seconds...", ) # Wait neo4j to completely startup service_verify(capfd, "neo4j") # 4) verify data match again point 1 (restore completed) exec_command( capfd, f'{cypher} "match (r: Role) return r.name, r.description"\'', '"normal_user", "User"', )