def should_run(self) -> bool: """ Verify if the backup should run based on todays date and the frequency value set. Returns: True if the frequency matches today, False if it does not. """ # Our configuration is just the last 3 values of a cron pattern, prepend hour/minute as wild-cards. cron_frequency = f"* * {self.frequency}" try: job = cronex.CronExpression(cron_frequency) except ValueError as e: logger.error( f"Frequency for remote strategy [{self.name}] is not valid [{self.frequency}]. [{e}]" ) return False if not job.check_trigger(time.gmtime(time.time())[:5]): logger.debug( f"Backup strategy [{self.name}] will not run due to frequency [{self.frequency}] not matching today." ) return False return True
def start(self, cli_context: CliContext, service_name: str = None) -> CompletedProcess: if service_name is not None: logger.error( "Docker Swarm orchestrator cannot start individual services. Attempted to start [%s].", service_name, ) return CompletedProcess(args=None, returncode=1) subcommand = ["deploy"] compose_files = decrypt_docker_compose_files( cli_context, self.docker_compose_file, self.docker_compose_override_directory, ) if len(compose_files) == 0: logger.error( "No valid docker compose files were found. Expected file [%s] or files in directory [%s]", self.docker_compose_file, self.docker_compose_override_directory, ) return CompletedProcess(args=None, returncode=1) for compose_file in compose_files: subcommand.extend(("--compose-file", str(compose_file))) return self.__docker_stack(cli_context, subcommand)
def check_environment_variable_defined(env_variables: Iterable[str], error_message_template: str, exit_message: str) -> bool: """Check if environment variables are defined Args: env_variables (Iterable[str]): the environment variables to check error_message_template (str): a template for the error message exit_message (str): the exit message on error Returns: [bool]: True if all environment variables are defined, otherwise False. """ result = True for env_variable in env_variables: value = os.environ.get(env_variable) if value is None: logger.error(error_message_template, env_variable, env_variable) result = False else: logger.debug( f"Confirmed environment variable is set - '{env_variable}' = '{value}'" ) if not result: logger.error(exit_message) return result
def patched_subprocess_run(docker_compose_command, capture_output=True, input=None): # Print out the docker-compose command to perform test validation logger.error( f"PYTEST_PATCHED_DOCKER_COMPOSE_COMMAND=[{docker_compose_command}]" ) # Always succeed - we don't care if the command should have failed or not return subprocess.CompletedProcess(returncode=0, args=None)
def error_and_exit(message: str): """Exit with an error message Args: message (str): [description] """ logger.error(message) # Raise a SystemExit exception with another exception with the error message # as the code so we can capture it externally. raise SystemExit(1) from SystemExit(message)
def execute_compose( cli_context: CliContext, command: Iterable[str], docker_compose_file_relative_path: Path, docker_compose_override_directory_relative_path: Path, stdin_input: str = None, ) -> CompletedProcess: """Builds and executes a docker-compose command. Args: cli_context (CliContext): The current CLI context. command (Iterable[str]): The command to execute with docker-compose. docker_compose_file_relative_path (Path): The relative path to the docker-compose file. Path is relative to the generated configuration directory. docker_compose_override_directory_relative_path (Path): The relative path to a directory containing docker-compose override files. Path is relative to the generated configuration directory. stdin_input (str): Optional - defaults to None. String passed through to the subprocess via stdin. Returns: CompletedProcess: The completed process and its exit code. """ docker_compose_command = [ "docker-compose", "--project-name", cli_context.get_project_name(), ] compose_files = decrypt_docker_compose_files( cli_context, docker_compose_file_relative_path, docker_compose_override_directory_relative_path, ) if len(compose_files) == 0: logger.error( "No valid docker compose files were found. Expected file [%s] or files in directory [%s]", docker_compose_file_relative_path, docker_compose_override_directory_relative_path, ) return CompletedProcess(args=None, returncode=1) for compose_file in compose_files: docker_compose_command.extend(("--file", str(compose_file))) if command is not None: docker_compose_command.extend(command) logger.debug(docker_compose_command) logger.debug("Running [%s]", " ".join(docker_compose_command)) encoded_input = stdin_input.encode("utf-8") if stdin_input is not None else None logger.debug(f"Encoded input: [{encoded_input}]") result = subprocess.run( docker_compose_command, capture_output=True, input=encoded_input ) return result
def shutdown(self, cli_context: CliContext, service_name: str = None) -> CompletedProcess: if service_name is not None: logger.error( "Docker Swarm orchestrator cannot stop individual services. Attempted to shutdown [%s].", service_name, ) return CompletedProcess(args=None, returncode=1) return self.__docker_stack(cli_context, ("rm", ))
def execute_compose( cli_context: CliContext, command: Iterable[str], docker_compose_file_relative_path: Path, docker_compose_override_directory_relative_path: Path, ) -> CompletedProcess: """Builds and executes a docker-compose command. Args: cli_context (CliContext): The current CLI context. command (Iterable[str]): The command to execute with docker-compose. docker_compose_file_relative_path (Path): The relative path to the docker-compose file. Path is relative to the generated configuration directory. docker_compose_override_directory_relative_path (Path): The relative path to a directory containing docker-compose override files. Path is relative to the generated configuration directory. Returns: CompletedProcess: The completed process and its exit code. """ docker_compose_command = [ "docker-compose", "--project-name", cli_context.get_project_name(), ] compose_files = decrypt_docker_compose_files( cli_context, docker_compose_file_relative_path, docker_compose_override_directory_relative_path, ) if len(compose_files) == 0: logger.error( "No valid docker compose files were found. Expected file [%s] or files in directory [%s]", docker_compose_file_relative_path, docker_compose_override_directory_relative_path, ) return CompletedProcess(args=None, returncode=1) for compose_file in compose_files: docker_compose_command.extend(("--file", str(compose_file))) if command is not None: docker_compose_command.extend(command) logger.debug("Running [%s]", " ".join(docker_compose_command)) result = run(docker_compose_command) return result
def service_name_verifier( service_names: tuple[str, ...], valid_service_names: List[str] ) -> bool: """Verify all services exist. Args: service_names (tuple[str, ...]): The list of service names to check. valid_service_names [List[str]]: The list of valid service names. """ invalid_service_names = set(service_names) - set(valid_service_names) for service_name in invalid_service_names: logger.error("Service [%s] does not exist", service_name) return len(invalid_service_names) == 0
def restore(self, ctx, backup_filename: Path): """Restore application data and configuration from the provided local backup `.tgz` file. This will create a backup of the existing data and config, remove the contents `conf`, `data` and `conf/.generated` and then extract the backup to the appropriate locations. `conf`, `data` and `conf/.generated` are mapped into appcli which means we keep the folder but replace their contents on restore. Args: backup_filename (string): The name of the file to use in restoring data. The path of the file will be pulled from `CliContext.obj.backup_dir`. """ cli_context: CliContext = ctx.obj logger.info( f"Initiating system restore with backup [{backup_filename}]") # Check that the backup file exists. backup_dir: Path = cli_context.backup_dir backup_name: Path = Path(os.path.join(backup_dir, backup_filename)) if not backup_name.is_file(): error_and_exit(f"Backup file [{backup_name}] not found.") # Perform a backup of the existing application config and data. logger.debug("Backup existing application data and configuration") restore_backup_name = self.backup( ctx, allow_rolling_deletion=False ) # False ensures we don't accidentally delete our backup logger.debug( f"Backup(s) complete. Generated backups: [{restore_backup_name}]") # Extract conf and data directories from the tar. # This will overwrite the contents of each directory, anything not in the backup (such as files matching the exclude glob patterns) will be left alone. try: with tarfile.open(backup_name) as tar: conf_dir: Path = cli_context.configuration_dir tar.extractall(conf_dir, members=self.__members( tar, os.path.basename(conf_dir))) data_dir: Path = cli_context.data_dir tar.extractall(data_dir, members=self.__members( tar, os.path.basename(data_dir))) except Exception as e: logger.error(f"Failed to extract backup. Reason: {e}") logger.info("Restore complete.")
def verify_service_names( self, cli_context: CliContext, service_names: tuple[str, ...] ) -> bool: if service_names is None or len(service_names) == 0: return True subcommand = ["config", "--services"] result = self.__docker_stack(cli_context, subcommand) if result.returncode != 0: error_msg = result.stderr.decode() logger.error( f"An unexpected error occured while verifying services. Error: {error_msg}" ) return False # Converts the byte type into list of names, and removes trailing empty string valid_service_names = result.stdout.decode().split("\n")[:-1] logger.debug("Valid Services: %s", ", ".join(valid_service_names)) return service_name_verifier(service_names, valid_service_names)
def get_remote_backups(self) -> List[RemoteBackup]: """Get the list of remote strategy objects for this backup configuration. Returns: List[RemoteBackup]: A list of configured remote backups. """ backup_strategies: List[RemoteBackup] = [] for remote_configuration in self.remote_backups: try: remote_backup: RemoteBackup = RemoteBackup.from_dict( remote_configuration) backup_strategies.append(remote_backup) except TypeError as e: logger.error(f"Failed to create remote strategy - {e}") return backup_strategies
def backup(self, ctx: Context, backup_name: str = None, allow_rolling_deletion: bool = True) -> List[Path]: """ Perform all backups present in the configuration file. Args: ctx: (Context). The current Click Context. allow_rolling_deletion: (bool). Enable rolling backups (default True). Set to False to disable rolling backups and keep all backup files. Returns: List[Path]: The list of backup files generated by running all backups. """ cli_context: CliContext = ctx.obj logger.info("Initiating system backup") # Get the key file for decrypting encrypted values used in a remote backup. key_file = cli_context.get_key_file() completed_backups = [] for backup_config in self.backups: backup = BackupConfig.from_dict(backup_config) if backup_name is not None and backup.name != backup_name: logger.debug( f"Skipping backup [{backup.name}] - only running backup [{backup_name}]" ) continue # Check if the set frequency matches today, if it does not then do not continue with the current backup. if not backup.should_run(): continue # create the backup logger.debug(f"Backup [{backup.name}] running...") backup_filename = backup.backup(ctx, allow_rolling_deletion) completed_backups.append((backup.name, backup_filename)) logger.debug( f"Backup [{backup.name}] complete. Output file: [{backup_filename}]" ) # Get any remote backup strategies. remote_backups = backup.get_remote_backups() # Execute each of the remote backup strategies with the local backup file. for remote_backup in remote_backups: try: logger.debug( f"Backup [{backup.name}] remote backup [{remote_backup.name}] running..." ) remote_backup.backup(backup_filename, key_file) logger.debug( f"Backup [{backup.name}] remote backup [{remote_backup.name}] complete." ) except Exception as e: logger.error( f"Error while executing remote strategy [{remote_backup.name}] - {e}" ) traceback.print_exc() logger.info("Backups complete.") if len(completed_backups) > 0: logger.debug(f"Completed backups [{completed_backups}].") else: logger.warning( "No backups successfully ran or completed. Use --debug flag for more detailed logs." ) return completed_backups