def rotate_groups(self, max_backup_groups): """Rotates backup groups.""" try: groups = [] for group in self.__groups(check = True, reverse = True): try: if self.backups(group, check = True, orig_error = True): groups.append(group) except EnvironmentError as e: if e.errno == errno.ENOENT: # Just in case: ignore race conditions pass else: LOG.error( "Error while rotating backup groups: " "Unable to read backup group '%s': %s.", group, psys.e(e)) for group in groups[max_backup_groups:]: LOG.info("Removing backup group '%s'...", group) shutil.rmtree(self.group_path(group), onerror = lambda func, path, excinfo: LOG.error("Failed to remove '%s': %s.", path, psys.e(excinfo[1]))) if not os.path.exists(self.group_path(group)): self.__on_group_deleted(group) except Exception as e: LOG.error("Failed to rotate backup groups: %s", e)
def __wait_pid_thread(self, fork_lock, termination_fd): """Waits for the process termination.""" try: # Wait for fork() process completion with fork_lock: pass # Wait only if we've successfully forked if self.__pid is None: return try: status = eintr_retry(os.waitpid)(self.__pid, 0)[1] except Exception as e: LOG.error("Failed to waitpid() process %s: %s.", self.__pid, psys.e(e)) self.__status = 127 else: if os.WIFEXITED(status): self.__status = os.WEXITSTATUS(status) LOG.debug("Command %s terminated with %s status code.", self, self.__status) elif os.WIFSIGNALED(status): signum = os.WTERMSIG(status) LOG.debug("Command %s terminated due to receipt of %s signal.", self, signum) self.__status = 128 + signum else: LOG.error("Command %s terminated due to unknown reason.", self) self.__status = 127 except Exception: LOG.exception("PID waiting thread crashed.") finally: try: eintr_retry(os.close)(termination_fd) except Exception as e: LOG.error("Unable to close a pipe: %s.", psys.e(e))
def __restore_attributes(self, tar_info, path): """Restores all attributes of a restored file.""" if os.geteuid() == 0: try: try: uid = utils.getpwnam(tar_info.uname)[2] except KeyError: uid = tar_info.uid try: gid = utils.getgrnam(tar_info.gname)[2] except KeyError: gid = tar_info.gid os.lchown(path, uid, gid) except Exception as e: if not psys.is_errno(e, errno.ENOENT): LOG.error("Failed to set owner of '%s': %s.", path, psys.e(e)) self.__ok = False if not tar_info.issym(): try: os.chmod(path, tar_info.mode) except Exception as e: if not psys.is_errno(e, errno.ENOENT): LOG.error("Failed to change permissions of '%s': %s.", path, psys.e(e)) self.__ok = False try: os.utime(path, ( tar_info.mtime, tar_info.mtime )) except Exception as e: if not psys.is_errno(e, errno.ENOENT): LOG.error("Failed to change access and modification time of '%s': %s.", path, psys.e(e)) self.__ok = False
def __decompress(self, path, compressed_file): """Decompresses a compressed tar archive.""" LOG.debug("Decompressing '%s'...", path) try: self.__temp_file = tempfile.NamedTemporaryFile(dir = "/var/tmp") shutil.copyfileobj(compressed_file, self.__temp_file) self.__temp_file.flush() except BaseException as e: if self.__temp_file is not None: try: self.__temp_file.close() except Exception as e: LOG.error("Failed to delete a temporary file '%s': %s.", self.__temp_file.name, psys.e(e)) finally: self.__temp_file = None if not isinstance(e, Exception): raise LOG.error("Failed to decompress '%s': %s.", path, psys.e(e)) else: LOG.debug("Decompressing finished.") self.__file = tarfile.open(self.__temp_file.name)
def __execute(self, stdout): """Executes the command.""" # Configure the standard I/O file descriptors self.__configure_stdio(stdout) # Fork the process --> fork_lock = threading.Lock() with fork_lock: # Allocate all resources before fork() to guarantee that we will # be able to control the process execution. # Execution thread --> poll = psys.poll.Poll() try: self.__communication_thread = threading.Thread( target=self.__communication_thread_func, args=(fork_lock, poll)) self.__communication_thread.daemon = True self.__communication_thread.start() except: poll.close() raise # Execution thread <-- # Wait thread --> try: self.__termination_fd, termination_fd = os.pipe() except Exception as e: raise Error("Unable to create a pipe: {0}.", psys.e(e)) try: self.__wait_thread = threading.Thread( target=self.__wait_pid_thread, args=[fork_lock, termination_fd]) self.__wait_thread.daemon = True self.__wait_thread.start() except BaseException as error: try: eintr_retry(os.close)(termination_fd) except Exception as e: LOG.error("Unable to close a pipe: %s.", psys.e(e)) raise error # Wait thread <-- self.__pid = os.fork() if self.__pid: self.__state = _PROCESS_STATE_RUNNING else: self.__child()
def _load_metadata(backup_path, handle_metadata): """Loads metadata of the specified backup.""" ok = False metadata_path = os.path.join(backup_path, _METADATA_FILE_NAME) LOG.debug("Loading backup metadata '%s'...", metadata_path) try: with bz2.BZ2File(metadata_path, mode = "r") as metadata_file: for line in metadata_file: line = line.rstrip(b"\r\n") if not line: continue handle_metadata(*line.decode(_ENCODING).split(" ", 3)) ok = True except Exception as e: LOG.error("Failed to load backup metadata '%s': %s.", metadata_path, psys.e(e)) else: LOG.debug("Backup metadata '%s' has been successfully loaded.", metadata_path) return ok
def cancel_backup(self, group, name): """Cancels the specified backup.""" shutil.rmtree( self.backup_path(group, name, temp = True), onerror = lambda func, path, excinfo: LOG.error("Failed to remove backup temporary data '%s': %s.", path, psys.e(excinfo[1])))
def close(self, read=True, write=True): """Closes the pipe.""" if read and self.read is not None: try: eintr_retry(os.close)(self.read) except Exception as e: LOG.error("Unable to close a pipe: %s.", psys.e(e)) else: self.read = None if write and self.write is not None: try: eintr_retry(os.close)(self.write) except Exception as e: LOG.error("Unable to close a pipe: %s.", psys.e(e)) else: self.write = None
def __backup_path(self, path, filters, toplevel): """Backups the specified path.""" ok = True LOG.info("Backing up '%s'...", path) try: stat_info = os.lstat(path) if stat.S_ISREG(stat_info.st_mode): self.__backup_file(path) else: if stat.S_ISLNK(stat_info.st_mode): try: link_target = os.readlink(path) except EnvironmentError as e: if e.errno == errno.EINVAL: raise FileTypeChangedError() else: raise else: link_target = None self.__backup.add_file( path, stat_info, link_target = link_target) if stat.S_ISDIR(stat_info.st_mode): prefix = toplevel + os.path.sep for filename in os.listdir(path): file_path = os.path.join(path, filename) for allow, regex in filters: if not file_path.startswith(prefix): raise LogicalError() if regex.search(file_path[len(prefix):]): if allow: self.__backup_path(file_path, filters, toplevel) else: LOG.info("Filtering out '%s'...", file_path) break else: self.__backup_path(file_path, filters, toplevel) except FileTypeChangedError as e: LOG.error("Failed to backup '%s': it has suddenly changed its type during the backup.", path) ok = False except Exception as e: if psys.is_errno(e, (errno.ENOENT, errno.ENOTDIR)) and path != toplevel: LOG.warning("Failed to backup '%s': it has suddenly vanished.", path) else: LOG.error("Failed to backup '%s': %s.", path, psys.e(e)) ok = False return ok
def close(self): """Closes the object.""" if self.__epoll is not None: try: eintr_retry(self.__epoll.close)() except Exception as e: LOG.error("Unable to close an epoll instance: %s.", psys.e(e)) else: self.__epoll = None
def __close(self): """Closes all opened files.""" try: if self.__data is not None: try: self.__data.close() except Exception as e: raise Error("Unable to close backup data file: {}.", psys.e(e)) finally: self.__data = None finally: if self.__metadata is not None: try: self.__metadata.close() except Exception as e: raise Error("Unable to close backup metadata file: {}.", psys.e(e)) finally: self.__metadata = None
def __load_all_backup_metadata(self, trust_modify_time): """Loads all metadata from previous backups.""" try: backups = self.__storage.backups(self.__group, reverse = True) for backup_id, backup in enumerate(backups): self.__load_backup_metadata(backup, with_prev_files_info = trust_modify_time and not backup_id) except Exception as e: LOG.error("Failed to load metadata from previous backups: %s.", psys.e(e))
def __groups(self, check = False, reverse = False): """Returns a list of all backup groups.""" try: return sorted( ( group for group in os.listdir(self.__backup_root) if ( _GROUP_NAME_RE.search(group) if check else not group.startswith(".") )), reverse = reverse) except EnvironmentError as e: raise Error("Error while reading backup root directory '{}': {}.", self.__backup_root, psys.e(e))
def __wait_pid_thread(self, fork_lock, termination_fd): """Waits for the process termination.""" try: # Wait for fork() process completion with fork_lock: pass # Wait only if we've successfully forked if self.__pid is None: return try: status = eintr_retry(os.waitpid)(self.__pid, 0)[1] except Exception as e: LOG.error("Failed to waitpid() process %s: %s.", self.__pid, psys.e(e)) self.__status = 127 else: if os.WIFEXITED(status): self.__status = os.WEXITSTATUS(status) LOG.debug("Command %s terminated with %s status code.", self, self.__status) elif os.WIFSIGNALED(status): signum = os.WTERMSIG(status) LOG.debug( "Command %s terminated due to receipt of %s signal.", self, signum) self.__status = 128 + signum else: LOG.error("Command %s terminated due to unknown reason.", self) self.__status = 127 except Exception: LOG.exception("PID waiting thread crashed.") finally: try: eintr_retry(os.close)(termination_fd) except Exception as e: LOG.error("Unable to close a pipe: %s.", psys.e(e))
def __init__(self, backup_path, restore_path = None, in_place = False): # Backup name self.__name = None # Backup group name self.__group = None # Backup data storage abstraction self.__storage = None # Restore path self.__restore_path = restore_path # Don't use extra disc space by decompressing backup files self.__in_place = in_place # Current object state self.__state = _STATE_OPENED # Data file self.__data = None # Extern files self.__extern_files = {} # All backups with extern files with cached metadata self.__backups = [] # False if something went wrong during the restore self.__ok = True try: LOG.info("Restoring backup '%s'...", backup_path) self.__name, self.__group, self.__storage = Storage.create(backup_path) if self.__restore_path is None: self.__restore_path = self.__name try: self.__data = utils.CompressedTarFile( os.path.join(backup_path, _DATA_FILE_NAME), decompress = not self.__in_place) except Exception as e: raise Error("Unable to open data of '{}' backup: {}.", backup_path, psys.e(e)) self.__init_metadata_cache() except: self.close() raise
def commit_backup(self, group, name): """Commits written backup data.""" cur_path = self.backup_path(group, name, temp = True) new_path = self.backup_path(group, name) try: os.rename(cur_path, new_path) except Exception as e: raise Error("Unable to rename backup data directory '{}' to '{}': {}.", cur_path, new_path, psys.e(e)) self.__on_backup_created(group, name, new_path)
def redirect_fd(path, fd, write=True, append=False): try: if write: file_fd = eintr_retry(os.open)( path, os.O_WRONLY | os.O_CREAT | (os.O_APPEND if append else 0), 0o666) else: file_fd = eintr_retry(os.open)(path, os.O_RDONLY) try: eintr_retry(os.dup2)(file_fd, fd) finally: eintr_retry(os.close)(file_fd) except Exception as e: raise Error("Unable to redirect {0} to {1}: {2}", fd_name[fd] if write else "'" + path + "'", "'" + path + "'" if write else fd_name[fd], psys.e(e))
def backups(self, group, check = False, reverse = False, orig_error = False): """Returns a list of all backups from the specified group.""" group_path = self.group_path(group) try: return sorted( ( backup for backup in os.listdir(group_path) if ( _BACKUP_NAME_RE.search(backup) if check else not backup.startswith(".") )), reverse = reverse) except EnvironmentError as e: if orig_error: raise e else: raise Error("Error while reading backup group directory '{}': {}.", group_path, psys.e(e))
def __close(self): """ Frees all allocated resources unneeded after the process termination or its failed execution. """ for pipe in self.__pipes: pipe.close() del self.__pipes[:] if self.__termination_fd is not None: try: eintr_retry(os.close)(self.__termination_fd) except Exception as e: LOG.error("Unable to close a pipe: %s.", psys.e(e)) else: self.__termination_fd = None
def __create_group(self): """Creates a new backup group.""" group = time.strftime(_GROUP_NAME_FORMAT, time.localtime()) LOG.info("Creating backup group '%s'.", group) group_path = self.group_path(group) try: os.mkdir(group_path) except EnvironmentError as e: if e.errno != errno.EEXIST: raise Error("Unable to create a new backup group '{}': {}.", group_path, psys.e(e)) self.__on_group_created(group) return group
def redirect_fd(path, fd, write=True, append=False): try: if write: file_fd = eintr_retry( os.open)(path, os.O_WRONLY | os.O_CREAT | (os.O_APPEND if append else 0), 0o666) else: file_fd = eintr_retry(os.open)(path, os.O_RDONLY) try: eintr_retry(os.dup2)(file_fd, fd) finally: eintr_retry(os.close)(file_fd) except Exception as e: raise Error("Unable to redirect {0} to {1}: {2}", fd_name[fd] if write else "'" + path + "'", "'" + path + "'" if write else fd_name[fd], psys.e(e))
def __backup_file(self, path): """Backups the specified file.""" try: try: fd = eintr_retry(os.open)(path, self.__open_flags) except EnvironmentError as e: # If O_NOATIME flag was specified, but the effective user ID # of the caller did not match the owner of the file and the # caller was not privileged (CAP_FOWNER), the EPERM will be # returned. if ( hasattr(os, "O_NOATIME") and e.errno == errno.EPERM and self.__open_flags & os.O_NOATIME ): # Just disable this flag on a first EPERM error LOG.debug("Got EPERM error. Disabling O_NOATIME for file opening operations...") self.__open_flags &= ~os.O_NOATIME fd = eintr_retry(os.open)(path, self.__open_flags) else: raise except EnvironmentError as e: # When O_NOFOLLOW is specified, indicates that this is a # symbolic link. if e.errno == errno.ELOOP: raise FileTypeChangedError() else: raise try: file_obj = os.fdopen(fd, "rb") except: try: eintr_retry(os.close)(fd) except Exception as e: LOG.error("Unable to close a file: %s.", psys.e(e)) raise with file_obj: stat_info = os.fstat(file_obj.fileno()) self.__backup.add_file(path, stat_info, file_obj = file_obj)
def __init__(self, source, output=True, pipe=None): # File descriptor that we are going to replace by this pipe self.source = source # True if this is output file descriptor self.output = output if pipe is None: try: self.read, self.write = os.pipe() except Exception as e: raise Error("Unable to create a pipe: {0}.", psys.e(e)) else: if output: self.read = None self.write = pipe.write pipe.write = None else: self.read = pipe.read pipe.read = None self.write = None
def close(self): """Closes the object.""" if self.__state == _STATE_CLOSED: return try: if ( self.__name is not None and self.__group is not None and self.__state != _STATE_COMMITTED ): try: self.__close() except Exception as e: LOG.error("Failed to close '%s' backup object: %s", self.__name, psys.e(e)) self.__storage.cancel_backup(self.__group, self.__name) finally: self.__state = _STATE_CLOSED
def __load_backup_data(self, name, hashes, paths): """Loads the specified backup's data.""" files = {} data = None backup_path = self.__storage.backup_path(self.__group, name) LOG.debug("Loading data of '%s' backup...", backup_path) try: if name == self.__name: data = self.__data else: data = utils.CompressedTarFile( os.path.join(backup_path, _DATA_FILE_NAME), decompress = not self.__in_place) for tar_info in data: hash = paths.get("/" + tar_info.name) if hash is not None and hash in hashes: files[hash] = tar_info except Exception as e: LOG.error("Failed to load data of '%s' backup: %s.", backup_path, psys.e(e)) else: LOG.debug("Data of '%s' backup has been successfully loaded.", backup_path) if files: return { "name": name, "files": files, "data": data, } else: if data is not None and data is not self.__data: try: data.close() except Exception as e: LOG.error("Failed to close data file of '%s' backup: %s.", backup_path, e) return None
def create_backup(self, max_backups): """Creates a new backup.""" name = time.strftime(_BACKUP_NAME_FORMAT, time.localtime()) LOG.info("Creating a new backup '%s'.", name) groups = self.__groups() if groups and len(self.backups(groups[-1], check = True)) < max_backups: group = groups[-1] LOG.info("Using backup group %s.", group) else: group = self.__create_group() backup_path = self.backup_path(group, name, temp = True) try: os.mkdir(backup_path) except Exception as e: raise Error("Unable to create a backup directory '{}': {}.", backup_path, psys.e(e)) return group, name, backup_path
def backup(self): """Starts the backup.""" try: for path, params in self.__config["backup_items"].items(): if self.__run_script(params.get("before")): try: self.__add_toplevel_dirs(path) except Exception as e: LOG.error("Failed to backup '%s': %s.", path, psys.e(e)) self.__ok = False else: self.__ok &= self.__backup_path(path, params.get("filter", []), path) self.__ok &= self.__run_script(params.get("after")) else: self.__ok = False self.__backup.commit() finally: self.__backup.close() return self.__ok
def restore(self, paths_to_restore = None): """Restores the backup. Returns True if all files has been successfully restored. """ if self.__state != _STATE_OPENED: raise Error("The backup file is closed.") try: os.mkdir(self.__restore_path, 0o700) except Exception as e: raise Error("Unable to create restore directory '{}': {}.", self.__restore_path, psys.e(e)) files = [] LOG.debug("Loading the backup's data...") try: for tar_info in self.__data: files.append(tar_info) except Exception as e: LOG.error("Failed to load the backup's data: %s.", psys.e(e)) self.__ok = False else: LOG.debug("The backup's data has been successfully loaded") directories = [] for tar_info in files: path = "/" + tar_info.name if paths_to_restore is not None: for path_to_restore in paths_to_restore: if path == path_to_restore or path.startswith(path_to_restore + os.path.sep): break else: continue restore_path = os.path.join(self.__restore_path, tar_info.name) LOG.info("Restoring '%s'...", path) try: if tar_info.isdir(): os.makedirs(restore_path, mode = 0o700) directories.append(tar_info) elif tar_info.islnk(): target_path = os.path.join(self.__restore_path, tar_info.linkname) try: os.link(target_path, restore_path) except Exception as e: raise Error("Unable to create a hard link to '{}': {}.", target_path, psys.e(e)) else: extern_hash = self.__extern_files.get(path) if tar_info.isreg() else None try: if extern_hash is None: try: self.__data.extract(tar_info, path = self.__restore_path, set_attrs = False) except Exception as e: raise Error("Unable to extract the file from backup: {}.", psys.e(e)) else: self.__restore_extern_file(tar_info, extern_hash) finally: self.__restore_attributes(tar_info, restore_path) except Exception as e: LOG.error("Failed to restore '%s': %s", path, psys.e(e)) self.__ok = False directories.sort(key = lambda tar_info: tar_info.name, reverse = True) for tar_info in directories: self.__restore_attributes(tar_info, os.path.join(self.__restore_path, tar_info.name)) return self.__ok
def __child(self): """Handles child process execution.""" exit_code = 127 try: exec_error = False try: fd_name = { 0: "stdin", 1: "stdout", 2: "stderr", } def redirect_fd(path, fd, write=True, append=False): try: if write: file_fd = eintr_retry(os.open)( path, os.O_WRONLY | os.O_CREAT | (os.O_APPEND if append else 0), 0o666) else: file_fd = eintr_retry(os.open)(path, os.O_RDONLY) try: eintr_retry(os.dup2)(file_fd, fd) finally: eintr_retry(os.close)(file_fd) except Exception as e: raise Error("Unable to redirect {0} to {1}: {2}", fd_name[fd] if write else "'" + path + "'", "'" + path + "'" if write else fd_name[fd], psys.e(e)) # Connect all pipes for pipe in self.__pipes: try: eintr_retry(os.dup2)(pipe.write if pipe.output else pipe.read, pipe.source) except Exception as e: raise Error("Unable to connect a pipe to {0}: {1}", fd_name[pipe.source], psys.e(e)) pipe.close() # Close all file descriptors psys.close_all_fds() # Configure stdin if isinstance(self.__stdin_source, File): redirect_fd(self.__stdin_source.path, psys.STDIN_FILENO, write=False) # Configure stdout if self.__stdout_target is STDERR: try: eintr_retry(os.dup2)(psys.STDERR_FILENO, psys.STDOUT_FILENO) except Exception as e: raise Error("Unable to redirect stderr to stdout: {0}", psys.e(e)) elif isinstance(self.__stdout_target, File): redirect_fd(self.__stdout_target.path, psys.STDOUT_FILENO, append=self.__stdout_target.append) # Configure stderr if self.__stderr_target is STDOUT: try: eintr_retry(os.dup2)(psys.STDOUT_FILENO, psys.STDERR_FILENO) except Exception as e: raise Error("Unable to redirect stderr to stdout: {0}", psys.e(e)) elif isinstance(self.__stderr_target, File): redirect_fd(self.__stderr_target.path, psys.STDERR_FILENO, append=self.__stderr_target.append) # Required when we have C locale command = [psys.b(arg) for arg in self.__command] exec_error = True if self.__env is None: os.execvp(self.__program, command) else: os.execvpe(self.__program, command, self.__env) except Exception as e: if exec_error and isinstance(e, EnvironmentError) and e.errno == errno.EACCES: exit_code = 126 print("Failed to execute '{program}': {error}.".format( program=self.__program, error=psys.e(e)), file=sys.stderr) finally: os._exit(exit_code)
def __child(self): """Handles child process execution.""" exit_code = 127 try: exec_error = False try: fd_name = { 0: "stdin", 1: "stdout", 2: "stderr", } def redirect_fd(path, fd, write=True, append=False): try: if write: file_fd = eintr_retry( os.open)(path, os.O_WRONLY | os.O_CREAT | (os.O_APPEND if append else 0), 0o666) else: file_fd = eintr_retry(os.open)(path, os.O_RDONLY) try: eintr_retry(os.dup2)(file_fd, fd) finally: eintr_retry(os.close)(file_fd) except Exception as e: raise Error("Unable to redirect {0} to {1}: {2}", fd_name[fd] if write else "'" + path + "'", "'" + path + "'" if write else fd_name[fd], psys.e(e)) # Connect all pipes for pipe in self.__pipes: try: eintr_retry( os.dup2)(pipe.write if pipe.output else pipe.read, pipe.source) except Exception as e: raise Error("Unable to connect a pipe to {0}: {1}", fd_name[pipe.source], psys.e(e)) pipe.close() # Close all file descriptors psys.close_all_fds() # Configure stdin if isinstance(self.__stdin_source, File): redirect_fd(self.__stdin_source.path, psys.STDIN_FILENO, write=False) # Configure stdout if self.__stdout_target is STDERR: try: eintr_retry(os.dup2)(psys.STDERR_FILENO, psys.STDOUT_FILENO) except Exception as e: raise Error("Unable to redirect stdout to stderr: {0}", psys.e(e)) elif isinstance(self.__stdout_target, File): redirect_fd(self.__stdout_target.path, psys.STDOUT_FILENO, append=self.__stdout_target.append) # Configure stderr if self.__stderr_target is STDOUT: try: eintr_retry(os.dup2)(psys.STDOUT_FILENO, psys.STDERR_FILENO) except Exception as e: raise Error("Unable to redirect stderr to stdout: {0}", psys.e(e)) elif isinstance(self.__stderr_target, File): redirect_fd(self.__stderr_target.path, psys.STDERR_FILENO, append=self.__stderr_target.append) # Required when we have C locale command = [psys.b(arg) for arg in self.__command] exec_error = True if self.__env is None: os.execvp(self.__program, command) else: os.execvpe(self.__program, command, self.__env) except Exception as e: if exec_error and isinstance( e, EnvironmentError) and e.errno == errno.EACCES: exit_code = 126 print("Failed to execute '{program}': {error}.".format( program=self.__program, error=psys.e(e)), file=sys.stderr) finally: os._exit(exit_code)
def __init__(self, config, storage): # Backup config self.__config = config # Backup storage abstraction self.__storage = storage # Backup name self.__name = None # Backup group self.__group = None # Current object state self.__state = _STATE_OPENED # Backup data file self.__data = None # Backup metadata file self.__metadata = None # A set of hashes of all available files in this backup group self.__hashes = set() # A map of files from the previous backup to their hashes and # fingerprints. self.__prev_files = {} # A set of all files added to the backup self.__files = set() # Inodes of hard links added to the backup (to track hard-linked files) self.__hardlink_inodes = {} try: self.__group, self.__name, path = self.__storage.create_backup( self.__config["max_backups"]) self.__load_all_backup_metadata(self.__config["trust_modify_time"]) LOG.debug("Creating backup %s in group %s...", self.__name, self.__group) try: self.__data = utils.CompressedTarFile( os.path.join(path, _DATA_FILE_NAME), write = self.__config["compression"]) except Exception as e: raise Error("Unable to create a backup data tar archive in '{}': {}.", path, psys.e(e)) metadata_path = os.path.join(path, _METADATA_FILE_NAME) try: self.__metadata = bz2.BZ2File(metadata_path, mode = "w") except Exception as e: raise Error("Unable to create a backup metadata file '{}': {}.", metadata_path, psys.e(e)) except: self.close() raise