def get_wal_until_next_backup(self, backup): """Get the xlog files between backup and the next :param backup: a backup object, the starting point to retrieve wals """ begin = backup.begin_wal next_end = None if self.get_next_backup(backup.backup_id): next_end = self.get_next_backup(backup.backup_id).end_wal backup_tli, _, _ = xlog.decode_segment_name(begin) with self.xlogdb() as fxlogdb: for line in fxlogdb: name, size, _, _ = self.xlogdb_parse_line(line) if name < begin: continue tli, _, _ = xlog.decode_segment_name(name) if tli > backup_tli: continue if not xlog.is_wal_file(name): continue if next_end and name > next_end: break # count yield (name, size)
def get_wal_until_next_backup(self, backup, include_history=False): """ Get the xlog files between backup and the next :param BackupInfo backup: a backup object, the starting point to retrieve WALs :param bool include_history: option for the inclusion of include_history files into the output """ begin = backup.begin_wal next_end = None if self.get_next_backup(backup.backup_id): next_end = self.get_next_backup(backup.backup_id).end_wal backup_tli, _, _ = xlog.decode_segment_name(begin) with self.xlogdb() as fxlogdb: for line in fxlogdb: wal_info = WalFileInfo.from_xlogdb_line(line) # Handle .history files: add all of them to the output, # regardless of their age, if requested (the 'include_history' # parameter is True) if xlog.is_history_file(wal_info.name): if include_history: yield wal_info continue if wal_info.name < begin: continue tli, _, _ = xlog.decode_segment_name(wal_info.name) if tli > backup_tli: continue if not xlog.is_wal_file(wal_info.name): continue if next_end and wal_info.name > next_end: break yield wal_info
def get_required_xlog_files(self, backup, target_tli=None, target_time=None, target_xid=None): """Get the xlog files required for a backup""" begin = backup.begin_wal end = backup.end_wal # If timeline isn't specified, assume it is the same timeline of the backup if not target_tli: target_tli, _, _ = xlog.decode_segment_name(end) with self.xlogdb() as fxlogdb: for line in fxlogdb: name, _, stamp, _ = self.xlogdb_parse_line(line) if name < begin: continue tli, _, _ = xlog.decode_segment_name(name) if tli > target_tli: continue yield name if name > end: end = name if target_time and target_time < stamp: break # return all the remaining history files for line in fxlogdb: name, _, stamp, _ = self.xlogdb_parse_line(line) if xlog.is_history_file(name): yield name
def get_required_xlog_files(self, backup, target_tli=None, target_time=None, target_xid=None): """ Get the xlog files required for a recovery """ begin = backup.begin_wal end = backup.end_wal # If timeline isn't specified, assume it is the same timeline # of the backup if not target_tli: target_tli, _, _ = xlog.decode_segment_name(end) with self.xlogdb() as fxlogdb: for line in fxlogdb: wal_info = WalFileInfo.from_xlogdb_line(line) # Handle .history files: add all of them to the output, # regardless of their age if xlog.is_history_file(wal_info.name): yield wal_info continue if wal_info.name < begin: continue tli, _, _ = xlog.decode_segment_name(wal_info.name) if tli > target_tli: continue yield wal_info if wal_info.name > end: end = wal_info.name if target_time and target_time < wal_info.time: break # return all the remaining history files for line in fxlogdb: wal_info = WalFileInfo.from_xlogdb_line(line) if xlog.is_history_file(wal_info.name): yield wal_info
def get_required_xlog_files(self, backup, target_tli=None, target_time=None, target_xid=None): '''Get the xlog files required for a backup''' begin = backup.begin_wal end = backup.end_wal # If timeline isn't specified, assume it is the same timeline of the backup if not target_tli: target_tli, _, _ = xlog.decode_segment_name(end) with self.xlogdb() as fxlogdb: for line in fxlogdb: name, _, stamp, _ = self.xlogdb_parse_line(line) if name < begin: continue tli, _, _ = xlog.decode_segment_name(name) if tli > target_tli: continue yield name if name > end: end = name if target_time and target_time < stamp: break # return all the remaining history files for line in fxlogdb: name, _, stamp, _ = self.xlogdb_parse_line(line) if xlog.is_history_file(name): yield name
def testDecodeSegmentName(self): self.assertEqual(xlog.decode_segment_name('000000000000000000000000'), [0, 0, 0]) self.assertEqual(xlog.decode_segment_name('000000010000000100000001'), [1, 1, 1]) self.assertEqual(xlog.decode_segment_name('0000000A0000000A0000000A'), [10, 10, 10]) self.assertEqual(xlog.decode_segment_name('000000110000001100000011'), [17, 17, 17]) self.assertEqual(xlog.decode_segment_name('000000000000000200000001'), [0, 2, 1]) self.assertEqual(xlog.decode_segment_name('000000010000000000000002'), [1, 0, 2]) self.assertEqual(xlog.decode_segment_name('000000020000000100000000'), [2, 1, 0]) self.assertRaises(xlog.BadXlogSegmentName, xlog.decode_segment_name, '00000000000000000000000') self.assertRaises(xlog.BadXlogSegmentName, xlog.decode_segment_name, '0000000000000000000000000') self.assertRaises(xlog.BadXlogSegmentName, xlog.decode_segment_name, '000000000000X00000000000') self.assertEqual( xlog.decode_segment_name( '00000001000000000000000A.00000020.backup'), [1, 0, 10]) self.assertEqual(xlog.decode_segment_name('00000001.history'), [1, None, None])
def stop_backup(self, backup_info): """ Stop backup wrapper :param barman.infofile.BackupInfo backup_info: backup information """ postgres = self.executor.server.postgres stop_row = postgres.pgespresso_stop_backup(backup_info.backup_label) if stop_row: end_wal, stop_time = stop_row decoded_segment = xlog.decode_segment_name(end_wal) backup_info.set_attribute('end_time', stop_time) backup_info.set_attribute('end_xlog', "%X/%X" % (decoded_segment[1], (decoded_segment[ 2] + 1) << 24)) backup_info.set_attribute('end_wal', end_wal) backup_info.set_attribute('end_offset', 0) else: raise Exception('Cannot terminate exclusive backup. You might ' 'have to manually execute ' 'pgespresso_abort_backup() on your PostgreSQL ' 'server') self.current_action = "writing backup label" self._write_backup_label(backup_info)
def is_wal_relevant(self, wal_info, first_backup): """ Check the relevance of a WAL file according to a provided BackupInfo (usually the oldest on the server) to ensure that the WAL is newer than the start_wal of the backup. :param WalFileInfo wal_info: the WAL file we are checking :param BackupInfo first_backup: the backup used for the checks (usually the oldest available on the server) """ # Skip history files if xlog.is_history_file(wal_info.name): return True # If the WAL file has a timeline smaller than the one of # the oldest backup it cannot be used in any way. wal_timeline = xlog.decode_segment_name(wal_info.name)[0] if wal_timeline < first_backup.timeline: output.info("\tThe timeline of the WAL file %s (%s), is lower " "than the one of the oldest backup of " "server %s (%s). Moving the WAL in " "the error directory", wal_info.name, wal_timeline, self.config.name, first_backup.timeline) return False # Manage xlog segments older than the first backup if wal_info.name < first_backup.begin_wal: output.info("\tOlder than first backup of server %s. " "Moving the WAL file %s in the error directory", self.config.name, wal_info.name) return False return True
def testDecodeSegmentName(self): self.assertEqual(xlog.decode_segment_name('000000000000000000000000'), [0, 0, 0]) self.assertEqual(xlog.decode_segment_name('000000010000000100000001'), [1, 1, 1]) self.assertEqual(xlog.decode_segment_name('0000000A0000000A0000000A'), [10, 10, 10]) self.assertEqual(xlog.decode_segment_name('000000110000001100000011'), [17, 17, 17]) self.assertEqual(xlog.decode_segment_name('000000000000000200000001'), [0, 2, 1]) self.assertEqual(xlog.decode_segment_name('000000010000000000000002'), [1, 0, 2]) self.assertEqual(xlog.decode_segment_name('000000020000000100000000'), [2, 1, 0]) self.assertRaises(xlog.BadXlogSegmentName, xlog.decode_segment_name, '00000000000000000000000') self.assertRaises(xlog.BadXlogSegmentName, xlog.decode_segment_name, '0000000000000000000000000') self.assertRaises(xlog.BadXlogSegmentName, xlog.decode_segment_name, '000000000000X00000000000') self.assertEqual(xlog.decode_segment_name('00000001000000000000000A.00000020.backup'), [1, 0, 10]) self.assertEqual(xlog.decode_segment_name('00000001.history'), [1, None, None])
def remove_wal_before_backup(self, backup_info, timelines_to_protect=None): """ Remove WAL files which have been archived before the start of the provided backup. If no backup_info is provided delete all available WAL files If timelines_to_protect list is passed, never remove a wal in one of these timelines. :param BackupInfo|None backup_info: the backup information structure :param set timelines_to_protect: optional list of timelines to protect :return list: a list of removed WAL files """ removed = [] with self.server.xlogdb("r+") as fxlogdb: xlogdb_dir = os.path.dirname(fxlogdb.name) with tempfile.TemporaryFile(mode="w+", dir=xlogdb_dir) as fxlogdb_new: for line in fxlogdb: wal_info = WalFileInfo.from_xlogdb_line(line) if not xlog.is_any_xlog_file(wal_info.name): output.error( "invalid WAL segment name %r\n" 'HINT: Please run "barman rebuild-xlogdb %s" ' "to solve this issue", wal_info.name, self.config.name, ) continue # Keeps the WAL segment if it is a history file keep = xlog.is_history_file(wal_info.name) # Keeps the WAL segment if its timeline is in # `timelines_to_protect` if timelines_to_protect: tli, _, _ = xlog.decode_segment_name(wal_info.name) keep |= tli in timelines_to_protect # Keeps the WAL segment if it is a newer # than the given backup (the first available) if backup_info and backup_info.begin_wal is not None: keep |= wal_info.name >= backup_info.begin_wal # If the file has to be kept write it in the new xlogdb # otherwise delete it and record it in the removed list if keep: fxlogdb_new.write(wal_info.to_xlogdb_line()) else: self.delete_wal(wal_info) removed.append(wal_info.name) fxlogdb_new.flush() fxlogdb_new.seek(0) fxlogdb.seek(0) shutil.copyfileobj(fxlogdb_new, fxlogdb) fxlogdb.truncate() return removed
def remove_wal_before_backup(self, backup_info, timelines_to_protect=None): """ Remove WAL files which have been archived before the start of the provided backup. If no backup_info is provided delete all available WAL files If timelines_to_protect list is passed, never remove a wal in one of these timelines. :param BackupInfo|None backup_info: the backup information structure :param set timelines_to_protect: optional list of timelines to protect :return list: a list of removed WAL files """ removed = [] with self.server.xlogdb() as fxlogdb: xlogdb_new = fxlogdb.name + ".new" with open(xlogdb_new, 'w') as fxlogdb_new: for line in fxlogdb: wal_info = WalFileInfo.from_xlogdb_line(line) if not xlog.is_any_xlog_file(wal_info.name): output.error( "invalid xlog segment name %r\n" "HINT: Please run \"barman rebuild-xlogdb %s\" " "to solve this issue", wal_info.name, self.config.name) continue # Keeps the WAL segment if it is a history file keep = xlog.is_history_file(wal_info.name) # Keeps the WAL segment if its timeline is in # `timelines_to_protect` if timelines_to_protect: tli, _, _ = xlog.decode_segment_name(wal_info.name) keep |= tli in timelines_to_protect # Keeps the WAL segment if it is a newer # than the given backup (the first available) if backup_info: keep |= wal_info.name >= backup_info.begin_wal # If the file has to be kept write it in the new xlogdb # otherwise delete it and record it in the removed list if keep: fxlogdb_new.write(wal_info.to_xlogdb_line()) else: self.delete_wal(wal_info) removed.append(wal_info.name) fxlogdb_new.flush() os.fsync(fxlogdb_new.fileno()) shutil.move(xlogdb_new, fxlogdb.name) fsync_dir(os.path.dirname(fxlogdb.name)) return removed
def get_wal_until_next_backup(self, backup): '''Get the xlog files between backup and the next :param backup: a backup object, the starting point to retrieve wals ''' begin = backup.begin_wal next_end = None if self.get_next_backup(backup.backup_id): next_end = self.get_next_backup(backup.backup_id).end_wal backup_tli, _, _ = xlog.decode_segment_name(begin) with self.xlogdb() as fxlogdb: for line in fxlogdb: name, size, _, _ = self.xlogdb_parse_line(line) if name < begin: continue tli, _, _ = xlog.decode_segment_name(name) if tli > backup_tli: continue if not xlog.is_wal_file(name): continue if next_end and name > next_end: break # count yield (name, size)
def stop_backup(self, backup_info): """ Stop backup wrapper :param barman.infofile.BackupInfo backup_info: backup_info object """ if BackupOptions.CONCURRENT_BACKUP not in self.config.backup_options: stop_row = self.pg_stop_backup() if stop_row: stop_xlog, stop_file_name, stop_file_offset, stop_time = \ stop_row backup_info.set_attribute('end_time', stop_time) backup_info.set_attribute('end_xlog', stop_xlog) backup_info.set_attribute('end_wal', stop_file_name) backup_info.set_attribute('end_offset', stop_file_offset) else: raise Exception('Cannot terminate exclusive backup. You might ' 'have to manually execute pg_stop_backup() on ' 'your PostgreSQL server') else: stop_row = self.pgespresso_stop_backup(backup_info.backup_label) if stop_row: end_wal, stop_time = stop_row decoded_segment = xlog.decode_segment_name(end_wal) backup_info.set_attribute('end_time', stop_time) backup_info.set_attribute('end_xlog', "%X/%X" % (decoded_segment[1], (decoded_segment[ 2] + 1) << 24)) backup_info.set_attribute('end_wal', end_wal) backup_info.set_attribute('end_offset', 0) else: raise Exception('Cannot terminate exclusive backup. You might ' 'have to manually execute ' 'pg_espresso_abort_backup() on your PostgreSQL ' 'server')
def test_decode_segment_name(self): assert xlog.decode_segment_name('000000000000000000000000') == [ 0, 0, 0 ] assert xlog.decode_segment_name('000000010000000100000001') == [ 1, 1, 1 ] assert xlog.decode_segment_name('0000000A0000000A0000000A') == [ 10, 10, 10 ] assert xlog.decode_segment_name('000000110000001100000011') == [ 17, 17, 17 ] assert xlog.decode_segment_name('000000000000000200000001') == [ 0, 2, 1 ] assert xlog.decode_segment_name('000000010000000000000002') == [ 1, 0, 2 ] assert xlog.decode_segment_name('000000020000000100000000') == [ 2, 1, 0 ] assert xlog.decode_segment_name( '00000001000000000000000A.00000020.backup') == [1, 0, 10] assert xlog.decode_segment_name('00000001.history') == [1, None, None] with pytest.raises(barman.exceptions.BadXlogSegmentName): xlog.decode_segment_name('00000000000000000000000') with pytest.raises(barman.exceptions.BadXlogSegmentName): xlog.decode_segment_name('0000000000000000000000000') with pytest.raises(barman.exceptions.BadXlogSegmentName): xlog.decode_segment_name('000000000000X00000000000')
def _remove_wals_for_backup( cloud_interface, catalog, deleted_backup, dry_run, skip_wal_cleanup_if_standalone=True, ): # An implementation of BackupManager.remove_wal_before_backup which does not # use xlogdb, since xlogdb is not available to barman-cloud should_remove_wals, wal_ranges_to_protect = BackupManager.should_remove_wals( deleted_backup, catalog.get_backup_list(), keep_manager=catalog, skip_wal_cleanup_if_standalone=skip_wal_cleanup_if_standalone, ) next_backup = BackupManager.find_next_backup_in( catalog.get_backup_list(), deleted_backup.backup_id ) wals_to_delete = {} if should_remove_wals: # There is no previous backup or all previous backups are archival # standalone backups, so we can remove unused WALs (those WALs not # required by standalone archival backups). # If there is a next backup then all unused WALs up to the begin_wal # of the next backup can be removed. # If there is no next backup then there are no remaining backups, # because we must assume non-exclusive backups are taken, we can only # safely delete unused WALs up to begin_wal of the deleted backup. # See comments in barman.backup.BackupManager.delete_backup. if next_backup: remove_until = next_backup else: remove_until = deleted_backup # A WAL is only a candidate for deletion if it is on the same timeline so we # use BackupManager to get a set of all other timelines with backups so that # we can preserve all WALs on other timelines. timelines_to_protect = BackupManager.get_timelines_to_protect( remove_until=remove_until, deleted_backup=deleted_backup, available_backups=catalog.get_backup_list(), ) try: wal_paths = catalog.get_wal_paths() except Exception as exc: logging.error( "Cannot clean up WALs for backup %s because an error occurred listing WALs: %s", deleted_backup.backup_id, force_str(exc), ) return for wal_name, wal in wal_paths.items(): if xlog.is_history_file(wal_name): continue if timelines_to_protect: tli, _, _ = xlog.decode_segment_name(wal_name) if tli in timelines_to_protect: continue # Check if the WAL is in a protected range, required by an archival # standalone backup - so do not delete it if xlog.is_backup_file(wal_name): # If we have a backup file, truncate the name for the range check range_check_wal_name = wal_name[:24] else: range_check_wal_name = wal_name if any( range_check_wal_name >= begin_wal and range_check_wal_name <= end_wal for begin_wal, end_wal in wal_ranges_to_protect ): continue if wal_name < remove_until.begin_wal: wals_to_delete[wal_name] = wal # Explicitly sort because dicts are not ordered in python < 3.6 wal_paths_to_delete = sorted(wals_to_delete.values()) if len(wal_paths_to_delete) > 0: if not dry_run: try: cloud_interface.delete_objects(wal_paths_to_delete) except Exception as exc: logging.error( "Could not delete the following WALs for backup %s: %s, Reason: %s", deleted_backup.backup_id, wal_paths_to_delete, force_str(exc), ) # Return early so that we leave the WALs in the local cache so they # can be cleaned up should there be a subsequent backup deletion. return else: print( "Skipping deletion of objects %s due to --dry-run option" % wal_paths_to_delete ) for wal_name in wals_to_delete.keys(): catalog.remove_wal_from_cache(wal_name)
def test_decode_segment_name(self): assert xlog.decode_segment_name( '000000000000000000000000') == [0, 0, 0] assert xlog.decode_segment_name( '000000010000000100000001') == [1, 1, 1] assert xlog.decode_segment_name( '0000000A0000000A0000000A') == [10, 10, 10] assert xlog.decode_segment_name( '000000110000001100000011') == [17, 17, 17] assert xlog.decode_segment_name( '000000000000000200000001') == [0, 2, 1] assert xlog.decode_segment_name( '000000010000000000000002') == [1, 0, 2] assert xlog.decode_segment_name( '000000020000000100000000') == [2, 1, 0] assert xlog.decode_segment_name( '00000001000000000000000A.00000020.backup') == [1, 0, 10] assert xlog.decode_segment_name( '00000001.history') == [1, None, None] with pytest.raises(xlog.BadXlogSegmentName): xlog.decode_segment_name('00000000000000000000000') with pytest.raises(xlog.BadXlogSegmentName): xlog.decode_segment_name('0000000000000000000000000') with pytest.raises(xlog.BadXlogSegmentName): xlog.decode_segment_name('000000000000X00000000000')