def test_backup_file(self): assert not xlog.is_backup_file('000000000000000200000001') assert xlog.is_backup_file( '00000001000000000000000A.00000020.backup') assert xlog.is_backup_file( 'test/00000001000000000000000A.00000020.backup') assert not xlog.is_backup_file('00000002.history') assert not xlog.is_backup_file('00000000000000000000000') assert not xlog.is_backup_file('0000000000000000000000000') assert not xlog.is_backup_file('000000000000X00000000000') assert not xlog.is_backup_file('00000001000000000000000A.backup') assert not xlog.is_any_xlog_file( 'test.00000001000000000000000A.00000020.backup') assert not xlog.is_backup_file('00000001000000000000000A.history')
def test_backup_file(self): assert not xlog.is_backup_file('000000000000000200000001') assert xlog.is_backup_file('00000001000000000000000A.00000020.backup') assert xlog.is_backup_file( 'test/00000001000000000000000A.00000020.backup') assert not xlog.is_backup_file('00000002.history') assert not xlog.is_backup_file('00000000000000000000000') assert not xlog.is_backup_file('0000000000000000000000000') assert not xlog.is_backup_file('000000000000X00000000000') assert not xlog.is_backup_file('00000001000000000000000A.backup') assert not xlog.is_any_xlog_file( 'test.00000001000000000000000A.00000020.backup') assert not xlog.is_backup_file('00000001000000000000000A.history') assert not xlog.is_backup_file('00000001000000000000000A.partial') assert not xlog.is_backup_file( '00000001000000000000000A.00000020.partial')
def rebuild_xlogdb(self): """ Rebuild the whole xlog database guessing it from the archive content. """ from os.path import isdir, join output.info("Rebuilding xlogdb for server %s", self.config.name) root = self.config.wals_directory default_compression = self.config.compression wal_count = label_count = history_count = 0 # lock the xlogdb as we are about replacing it completely with self.server.xlogdb('w') as fxlogdb: xlogdb_new = fxlogdb.name + ".new" with open(xlogdb_new, 'w') as fxlogdb_new: for name in sorted(os.listdir(root)): # ignore the xlogdb and its lockfile if name.startswith(self.server.XLOG_DB): continue fullname = join(root, name) if isdir(fullname): # all relevant files are in subdirectories hash_dir = fullname for wal_name in sorted(os.listdir(hash_dir)): fullname = join(hash_dir, wal_name) if isdir(fullname): _logger.warning( 'unexpected directory ' 'rebuilding the wal database: %s', fullname) else: if xlog.is_wal_file(fullname): wal_count += 1 elif xlog.is_backup_file(fullname): label_count += 1 else: _logger.warning( 'unexpected file ' 'rebuilding the wal database: %s', fullname) continue wal_info = WalFileInfo.from_file( fullname, default_compression=default_compression) fxlogdb_new.write(wal_info.to_xlogdb_line()) else: # only history files are here if xlog.is_history_file(fullname): history_count += 1 wal_info = WalFileInfo.from_file( fullname, default_compression=default_compression) fxlogdb_new.write(wal_info.to_xlogdb_line()) else: _logger.warning( 'unexpected file ' 'rebuilding the wal database: %s', fullname) os.fsync(fxlogdb_new.fileno()) shutil.move(xlogdb_new, fxlogdb.name) fsync_dir(os.path.dirname(fxlogdb.name)) output.info('Done rebuilding xlogdb for server %s ' '(history: %s, backup_labels: %s, wal_file: %s)', self.config.name, history_count, label_count, wal_count)
def _remove_wals_for_backup( cloud_interface, catalog, deleted_backup, dry_run, skip_wal_cleanup_if_standalone=True, ): # An implementation of BackupManager.remove_wal_before_backup which does not # use xlogdb, since xlogdb is not available to barman-cloud should_remove_wals, wal_ranges_to_protect = BackupManager.should_remove_wals( deleted_backup, catalog.get_backup_list(), keep_manager=catalog, skip_wal_cleanup_if_standalone=skip_wal_cleanup_if_standalone, ) next_backup = BackupManager.find_next_backup_in( catalog.get_backup_list(), deleted_backup.backup_id ) wals_to_delete = {} if should_remove_wals: # There is no previous backup or all previous backups are archival # standalone backups, so we can remove unused WALs (those WALs not # required by standalone archival backups). # If there is a next backup then all unused WALs up to the begin_wal # of the next backup can be removed. # If there is no next backup then there are no remaining backups, # because we must assume non-exclusive backups are taken, we can only # safely delete unused WALs up to begin_wal of the deleted backup. # See comments in barman.backup.BackupManager.delete_backup. if next_backup: remove_until = next_backup else: remove_until = deleted_backup # A WAL is only a candidate for deletion if it is on the same timeline so we # use BackupManager to get a set of all other timelines with backups so that # we can preserve all WALs on other timelines. timelines_to_protect = BackupManager.get_timelines_to_protect( remove_until=remove_until, deleted_backup=deleted_backup, available_backups=catalog.get_backup_list(), ) try: wal_paths = catalog.get_wal_paths() except Exception as exc: logging.error( "Cannot clean up WALs for backup %s because an error occurred listing WALs: %s", deleted_backup.backup_id, force_str(exc), ) return for wal_name, wal in wal_paths.items(): if xlog.is_history_file(wal_name): continue if timelines_to_protect: tli, _, _ = xlog.decode_segment_name(wal_name) if tli in timelines_to_protect: continue # Check if the WAL is in a protected range, required by an archival # standalone backup - so do not delete it if xlog.is_backup_file(wal_name): # If we have a backup file, truncate the name for the range check range_check_wal_name = wal_name[:24] else: range_check_wal_name = wal_name if any( range_check_wal_name >= begin_wal and range_check_wal_name <= end_wal for begin_wal, end_wal in wal_ranges_to_protect ): continue if wal_name < remove_until.begin_wal: wals_to_delete[wal_name] = wal # Explicitly sort because dicts are not ordered in python < 3.6 wal_paths_to_delete = sorted(wals_to_delete.values()) if len(wal_paths_to_delete) > 0: if not dry_run: try: cloud_interface.delete_objects(wal_paths_to_delete) except Exception as exc: logging.error( "Could not delete the following WALs for backup %s: %s, Reason: %s", deleted_backup.backup_id, wal_paths_to_delete, force_str(exc), ) # Return early so that we leave the WALs in the local cache so they # can be cleaned up should there be a subsequent backup deletion. return else: print( "Skipping deletion of objects %s due to --dry-run option" % wal_paths_to_delete ) for wal_name in wals_to_delete.keys(): catalog.remove_wal_from_cache(wal_name)
def rebuild_xlogdb(self): """ Rebuild the whole xlog database guessing it from the archive content. """ from os.path import isdir, join output.info("Rebuilding xlogdb for server %s", self.config.name) root = self.config.wals_directory comp_manager = self.compression_manager wal_count = label_count = history_count = 0 # lock the xlogdb as we are about replacing it completely with self.server.xlogdb('w') as fxlogdb: xlogdb_new = fxlogdb.name + ".new" with open(xlogdb_new, 'w') as fxlogdb_new: for name in sorted(os.listdir(root)): # ignore the xlogdb and its lockfile if name.startswith(self.server.XLOG_DB): continue fullname = join(root, name) if isdir(fullname): # all relevant files are in subdirectories hash_dir = fullname for wal_name in sorted(os.listdir(hash_dir)): fullname = join(hash_dir, wal_name) if isdir(fullname): _logger.warning( 'unexpected directory ' 'rebuilding the wal database: %s', fullname) else: if xlog.is_wal_file(fullname): wal_count += 1 elif xlog.is_backup_file(fullname): label_count += 1 elif fullname.endswith('.tmp'): _logger.warning( 'temporary file found ' 'rebuilding the wal database: %s', fullname) continue else: _logger.warning( 'unexpected file ' 'rebuilding the wal database: %s', fullname) continue wal_info = comp_manager.get_wal_file_info( fullname) fxlogdb_new.write(wal_info.to_xlogdb_line()) else: # only history files are here if xlog.is_history_file(fullname): history_count += 1 wal_info = comp_manager.get_wal_file_info(fullname) fxlogdb_new.write(wal_info.to_xlogdb_line()) else: _logger.warning( 'unexpected file ' 'rebuilding the wal database: %s', fullname) os.fsync(fxlogdb_new.fileno()) shutil.move(xlogdb_new, fxlogdb.name) fsync_dir(os.path.dirname(fxlogdb.name)) output.info( 'Done rebuilding xlogdb for server %s ' '(history: %s, backup_labels: %s, wal_file: %s)', self.config.name, history_count, label_count, wal_count)
def test_backup_file(self): assert not xlog.is_backup_file("000000000000000200000001") assert xlog.is_backup_file("00000001000000000000000A.00000020.backup") assert xlog.is_backup_file("test/00000001000000000000000A.00000020.backup") assert not xlog.is_backup_file("00000002.history") assert not xlog.is_backup_file("00000000000000000000000") assert not xlog.is_backup_file("0000000000000000000000000") assert not xlog.is_backup_file("000000000000X00000000000") assert not xlog.is_backup_file("00000001000000000000000A.backup") assert not xlog.is_any_xlog_file( "test.00000001000000000000000A.00000020.backup" ) assert not xlog.is_backup_file("00000001000000000000000A.history") assert not xlog.is_backup_file("00000001000000000000000A.partial") assert not xlog.is_backup_file("00000001000000000000000A.00000020.partial")
def download_wal(self, wal_name, wal_dest): """ Download a WAL file from cloud storage :param str wal_name: Name of the WAL file :param str wal_dest: Full path of the destination WAL file """ # Correctly format the source path on s3 source_dir = os.path.join(self.cloud_interface.path, self.server_name, "wals", hash_dir(wal_name)) # Add a path separator if needed if not source_dir.endswith(os.path.sep): source_dir += os.path.sep wal_path = os.path.join(source_dir, wal_name) remote_name = None # Automatically detect compression based on the file extension compression = None for item in self.cloud_interface.list_bucket(source_dir): # perfect match (uncompressed file) if item == wal_path: remote_name = item # look for compressed files or .partial files elif item.startswith(wal_path): # Detect compression basename = item for e, c in ALLOWED_COMPRESSIONS.items(): if item[-len(e):] == e: # Strip extension basename = basename[:-len(e)] compression = c break # Check basename is a known xlog file (.partial?) if not is_any_xlog_file(basename): logging.warning("Unknown WAL file: %s", item) continue # Exclude backup informative files (not needed in recovery) elif is_backup_file(basename): logging.info("Skipping backup file: %s", item) continue # Found candidate remote_name = item logging.info( "Found WAL %s for server %s as %s", wal_name, self.server_name, remote_name, ) break if not remote_name: logging.info("WAL file %s for server %s does not exists", wal_name, self.server_name) raise OperationErrorExit() if compression and sys.version_info < (3, 0, 0): raise BarmanException( "Compressed WALs cannot be restored with Python 2.x - " "please upgrade to a supported version of Python 3") # Download the file logging.debug( "Downloading %s to %s (%s)", remote_name, wal_dest, "decompressing " + compression if compression else "no compression", ) self.cloud_interface.download_file(remote_name, wal_dest, compression)
def rebuild_xlogdb(self): """ Rebuild the whole xlog database guessing it from the archive content. """ from os.path import isdir, join output.info("Rebuilding xlogdb for server %s", self.config.name) root = self.config.wals_directory comp_manager = self.compression_manager wal_count = label_count = history_count = 0 # lock the xlogdb as we are about replacing it completely with self.server.xlogdb("w") as fxlogdb: xlogdb_dir = os.path.dirname(fxlogdb.name) with tempfile.TemporaryFile(mode="w+", dir=xlogdb_dir) as fxlogdb_new: for name in sorted(os.listdir(root)): # ignore the xlogdb and its lockfile if name.startswith(self.server.XLOG_DB): continue fullname = join(root, name) if isdir(fullname): # all relevant files are in subdirectories hash_dir = fullname for wal_name in sorted(os.listdir(hash_dir)): fullname = join(hash_dir, wal_name) if isdir(fullname): _logger.warning( "unexpected directory " "rebuilding the wal database: %s", fullname, ) else: if xlog.is_wal_file(fullname): wal_count += 1 elif xlog.is_backup_file(fullname): label_count += 1 elif fullname.endswith(".tmp"): _logger.warning( "temporary file found " "rebuilding the wal database: %s", fullname, ) continue else: _logger.warning( "unexpected file " "rebuilding the wal database: %s", fullname, ) continue wal_info = comp_manager.get_wal_file_info( fullname) fxlogdb_new.write(wal_info.to_xlogdb_line()) else: # only history files are here if xlog.is_history_file(fullname): history_count += 1 wal_info = comp_manager.get_wal_file_info(fullname) fxlogdb_new.write(wal_info.to_xlogdb_line()) else: _logger.warning( "unexpected file rebuilding the wal database: %s", fullname, ) fxlogdb_new.flush() fxlogdb_new.seek(0) fxlogdb.seek(0) shutil.copyfileobj(fxlogdb_new, fxlogdb) fxlogdb.truncate() output.info( "Done rebuilding xlogdb for server %s " "(history: %s, backup_labels: %s, wal_file: %s)", self.config.name, history_count, label_count, wal_count, )