def get_remote_status(self): """ Get remote information on PostgreSQL using Ssh, such as last archived WAL file :rtype: dict(str,str|None) """ remote_status = {} # Retrieve the last archived WAL using a Ssh connection on # the remote server and executing an 'ls' command. Only # for pre-9.4 versions of PostgreSQL. if self.server.postgres.server_version < 90400: remote_status['last_archived_wal'] = None if self.server.postgres.get_setting('data_directory') and \ self.server.postgres.get_setting('archive_command'): # TODO: replace with RemoteUnixCommand cmd = Command(self.ssh_command, self.ssh_options, path=self.server.path) archive_dir = os.path.join( self.server.postgres.get_setting('data_directory'), 'pg_xlog', 'archive_status') out = str(cmd.getoutput('ls', '-tr', archive_dir)[0]) for line in out.splitlines(): if line.endswith('.done'): name = line[:-5] if xlog.is_any_xlog_file(name): remote_status['last_archived_wal'] = name break return remote_status
def get_remote_status(self): """ Retrieve the last archived WAL using a ssh connection on the remote server and executing an ls command. :rtype: dict """ remote_status = {} with self.server.pg_connect(): if self.server.server_version < 90400: remote_status['last_archived_wal'] = None if self.server.get_pg_setting('data_directory') and \ self.server.get_pg_setting('archive_command'): # TODO: replace with RemoteUnixCommand cmd = Command(self.ssh_command, self.ssh_options) archive_dir = os.path.join( self.server.get_pg_setting('data_directory'), 'pg_xlog', 'archive_status') out = str(cmd.getoutput('ls', '-tr', archive_dir)[0]) for line in out.splitlines(): if line.endswith('.done'): name = line[:-5] if xlog.is_any_xlog_file(name): remote_status['last_archived_wal'] = name break return remote_status
def get_next_batch(self): """ Returns the next batch of WAL files that have been archived through a PostgreSQL's 'archive_command' (in the 'incoming' directory) :return: WalArchiverBatch: list of WAL files """ # List and sort all files in the incoming directory file_names = glob(os.path.join( self.config.incoming_wals_directory, '*')) file_names.sort() # Process anything that looks like a valid WAL file. Anything # else is treated like an error/anomaly files = [] errors = [] for file_name in file_names: if xlog.is_any_xlog_file(file_name) and os.path.isfile(file_name): files.append(file_name) else: errors.append(file_name) # Build the list of WalFileInfo wal_files = [WalFileInfo.from_file(f) for f in files] return WalArchiverBatch(wal_files, errors=errors)
def get_next_batch(self): """ Returns the next batch of WAL files that have been archived through a PostgreSQL's 'archive_command' (in the 'incoming' directory) :return: WalArchiverQueue: list of WAL files """ # Get the batch size from configuration (0 = unlimited) batch_size = self.config.archiver_batch_size # List and sort all files in the incoming directory file_names = glob( os.path.join(self.config.incoming_wals_directory, '*')) file_names.sort() # Process anything that looks like a valid WAL file. Anything # else is treated like an error/anomaly files = [] errors = [] for file_name in file_names: if xlog.is_any_xlog_file(file_name) and os.path.isfile(file_name): files.append(file_name) else: errors.append(file_name) # Build the list of WalFileInfo wal_files = [WalFileInfo.from_file(f) for f in files] return WalArchiverQueue(wal_files, batch_size=batch_size, errors=errors)
def get_next_batch(self): """ Returns the next batch of WAL files that have been archived via streaming replication (in the 'streaming' directory) This method always leaves one file in the "streaming" directory, because the 'pg_receivexlog' process needs at least one file to detect the current streaming position after a restart. :return: WalArchiverQueue: list of WAL files """ # Get the batch size from configuration (0 = unlimited) batch_size = self.config.streaming_archiver_batch_size # List and sort all files in the incoming directory file_names = glob( os.path.join(self.config.streaming_wals_directory, '*')) file_names.sort() # Process anything that looks like a valid WAL file, # including partial ones and history files. # Anything else is treated like an error/anomaly files = [] skip = [] errors = [] for file_name in file_names: # Ignore temporary files if file_name.endswith('.tmp'): continue # If the file doesn't exist, it has been renamed/removed while # we were reading the directory. Ignore it. if not os.path.exists(file_name): continue if not os.path.isfile(file_name): errors.append(file_name) elif xlog.is_partial_file(file_name): skip.append(file_name) elif xlog.is_any_xlog_file(file_name): files.append(file_name) else: errors.append(file_name) # In case of more than a partial file, keep the last # and treat the rest as normal files if len(skip) > 1: partials = skip[:-1] _logger.info('Archiving partial files for server %s: %s' % (self.config.name, ", ".join( [os.path.basename(f) for f in partials]))) files.extend(partials) skip = skip[-1:] # Keep the last full WAL file in case no partial file is present elif len(skip) == 0 and files: skip.append(files.pop()) # Build the list of WalFileInfo wal_files = [WalFileInfo.from_file(f, compression=None) for f in files] return WalArchiverQueue(wal_files, batch_size=batch_size, errors=errors, skip=skip)
def get_next_batch(self): """ Returns the next batch of WAL files that have been archived through a PostgreSQL's 'archive_command' (in the 'incoming' directory) :return: WalArchiverQueue: list of WAL files """ # Get the batch size from configuration (0 = unlimited) batch_size = self.config.archiver_batch_size # List and sort all files in the incoming directory # IMPORTANT: the list is sorted, and this allows us to know that the # WAL stream we have is monotonically increasing. That allows us to # verify that a backup has all the WALs required for the restore. file_names = glob( os.path.join(self.config.incoming_wals_directory, '*')) file_names.sort() # Process anything that looks like a valid WAL file. Anything # else is treated like an error/anomaly files = [] errors = [] for file_name in file_names: # Ignore temporary files if file_name.endswith('.tmp'): continue if xlog.is_any_xlog_file(file_name) and os.path.isfile(file_name): files.append(file_name) else: errors.append(file_name) # Build the list of WalFileInfo wal_files = [WalFileInfo.from_file(f) for f in files] return WalArchiverQueue(wal_files, batch_size=batch_size, errors=errors)
def remove_wal_before_backup(self, backup_info, timelines_to_protect=None): """ Remove WAL files which have been archived before the start of the provided backup. If no backup_info is provided delete all available WAL files If timelines_to_protect list is passed, never remove a wal in one of these timelines. :param BackupInfo|None backup_info: the backup information structure :param set timelines_to_protect: optional list of timelines to protect :return list: a list of removed WAL files """ removed = [] with self.server.xlogdb("r+") as fxlogdb: xlogdb_dir = os.path.dirname(fxlogdb.name) with tempfile.TemporaryFile(mode="w+", dir=xlogdb_dir) as fxlogdb_new: for line in fxlogdb: wal_info = WalFileInfo.from_xlogdb_line(line) if not xlog.is_any_xlog_file(wal_info.name): output.error( "invalid WAL segment name %r\n" 'HINT: Please run "barman rebuild-xlogdb %s" ' "to solve this issue", wal_info.name, self.config.name, ) continue # Keeps the WAL segment if it is a history file keep = xlog.is_history_file(wal_info.name) # Keeps the WAL segment if its timeline is in # `timelines_to_protect` if timelines_to_protect: tli, _, _ = xlog.decode_segment_name(wal_info.name) keep |= tli in timelines_to_protect # Keeps the WAL segment if it is a newer # than the given backup (the first available) if backup_info and backup_info.begin_wal is not None: keep |= wal_info.name >= backup_info.begin_wal # If the file has to be kept write it in the new xlogdb # otherwise delete it and record it in the removed list if keep: fxlogdb_new.write(wal_info.to_xlogdb_line()) else: self.delete_wal(wal_info) removed.append(wal_info.name) fxlogdb_new.flush() fxlogdb_new.seek(0) fxlogdb.seek(0) shutil.copyfileobj(fxlogdb_new, fxlogdb) fxlogdb.truncate() return removed
def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) # Read wal_path from environment if we're a hook script if __is_hook_script(): if "BARMAN_FILE" not in os.environ: raise BarmanException( "Expected environment variable BARMAN_FILE not set") config.wal_path = os.getenv("BARMAN_FILE") else: if config.wal_path is None: raise BarmanException( "the following arguments are required: wal_path") # Validate the WAL file name before uploading it if not is_any_xlog_file(config.wal_path): logging.error("%s is an invalid name for a WAL file" % config.wal_path) raise CLIErrorExit() try: cloud_interface = get_cloud_interface(config) with closing(cloud_interface): uploader = CloudWalUploader( cloud_interface=cloud_interface, server_name=config.server_name, compression=config.compression, ) if not cloud_interface.test_connectivity(): raise NetworkErrorExit() # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) # TODO: Should the setup be optional? cloud_interface.setup_bucket() upload_kwargs = {} if is_history_file(config.wal_path): upload_kwargs["override_tags"] = config.history_tags uploader.upload_wal(config.wal_path, **upload_kwargs) except Exception as exc: logging.error("Barman cloud WAL archiver exception: %s", force_str(exc)) logging.debug("Exception details:", exc_info=exc) raise GeneralErrorExit()
def remove_wal_before_backup(self, backup_info, timelines_to_protect=None): """ Remove WAL files which have been archived before the start of the provided backup. If no backup_info is provided delete all available WAL files If timelines_to_protect list is passed, never remove a wal in one of these timelines. :param BackupInfo|None backup_info: the backup information structure :param set timelines_to_protect: optional list of timelines to protect :return list: a list of removed WAL files """ removed = [] with self.server.xlogdb() as fxlogdb: xlogdb_new = fxlogdb.name + ".new" with open(xlogdb_new, 'w') as fxlogdb_new: for line in fxlogdb: wal_info = WalFileInfo.from_xlogdb_line(line) if not xlog.is_any_xlog_file(wal_info.name): output.error( "invalid xlog segment name %r\n" "HINT: Please run \"barman rebuild-xlogdb %s\" " "to solve this issue", wal_info.name, self.config.name) continue # Keeps the WAL segment if it is a history file keep = xlog.is_history_file(wal_info.name) # Keeps the WAL segment if its timeline is in # `timelines_to_protect` if timelines_to_protect: tli, _, _ = xlog.decode_segment_name(wal_info.name) keep |= tli in timelines_to_protect # Keeps the WAL segment if it is a newer # than the given backup (the first available) if backup_info: keep |= wal_info.name >= backup_info.begin_wal # If the file has to be kept write it in the new xlogdb # otherwise delete it and record it in the removed list if keep: fxlogdb_new.write(wal_info.to_xlogdb_line()) else: self.delete_wal(wal_info) removed.append(wal_info.name) fxlogdb_new.flush() os.fsync(fxlogdb_new.fileno()) shutil.move(xlogdb_new, fxlogdb.name) fsync_dir(os.path.dirname(fxlogdb.name)) return removed
def test_partial_file(self): assert not xlog.is_partial_file("000000000000000200000001") assert xlog.is_partial_file("00000001000000000000000A.partial") assert xlog.is_partial_file("test/00000001000000000000000A.partial") assert not xlog.is_partial_file("00000002.history") assert not xlog.is_partial_file("00000000000000000000000.partial") assert not xlog.is_partial_file("0000000000000000000000000.partial") assert not xlog.is_partial_file("000000000000X00000000000.partial") assert not xlog.is_partial_file("00000001000000000000000A.00000020.partial") assert not xlog.is_any_xlog_file("test.00000001000000000000000A.partial") assert not xlog.is_partial_file("00000001.partial")
def test_is_wal_file(self): assert xlog.is_wal_file('000000000000000200000001') assert xlog.is_wal_file('test/000000000000000200000001') assert not xlog.is_wal_file('00000001000000000000000A.00000020.backup') assert not xlog.is_wal_file('00000002.history') assert not xlog.is_wal_file('00000000000000000000000') assert not xlog.is_wal_file('0000000000000000000000000') assert not xlog.is_wal_file('000000000000X00000000000') assert not xlog.is_wal_file('00000001000000000000000A.backup') assert not xlog.is_any_xlog_file( 'test.00000001000000000000000A.00000020.backup') assert not xlog.is_wal_file('00000001000000000000000A.history')
def fetch_remote_status(self): """ Returns the status of the FileWalArchiver. This method does not raise any exception in case of errors, but set the missing values to None in the resulting dictionary. :rtype: dict[str, None|str] """ result = dict.fromkeys( ['archive_mode', 'archive_command'], None) postgres = self.server.postgres # If Postgres is not available we cannot detect anything if not postgres: return result # Query the database for 'archive_mode' and 'archive_command' result['archive_mode'] = postgres.get_setting('archive_mode') result['archive_command'] = postgres.get_setting('archive_command') # Retrieve the last archived WAL using a Ssh connection on # the remote server and executing an 'ls' command. Only # for pre-9.4 versions of PostgreSQL. try: if self.server.postgres and \ self.server.postgres.server_version < 90400: result['last_archived_wal'] = None if postgres.get_setting('data_directory') and \ postgres.get_setting('archive_command') and \ self.ssh_command: cmd = UnixRemoteCommand(self.ssh_command, self.ssh_options, path=self.server.path) archive_dir = os.path.join( self.server.postgres.get_setting('data_directory'), 'pg_xlog', 'archive_status') out = str(cmd.list_dir_content(archive_dir, ['-t'])) for line in out.splitlines(): if line.endswith('.done'): name = line[:-5] if xlog.is_any_xlog_file(name): result['last_archived_wal'] = name break except (PostgresConnectionError, FsOperationFailed) as e: _logger.warn("Error retrieving PostgreSQL status: %s", e) # Add pg_stat_archiver statistics if the view is supported pg_stat_archiver = postgres.get_archiver_stats() if pg_stat_archiver is not None: result.update(pg_stat_archiver) return result
def test_partial_file(self): assert not xlog.is_partial_file('000000000000000200000001') assert xlog.is_partial_file('00000001000000000000000A.partial') assert xlog.is_partial_file('test/00000001000000000000000A.partial') assert not xlog.is_partial_file('00000002.history') assert not xlog.is_partial_file('00000000000000000000000.partial') assert not xlog.is_partial_file('0000000000000000000000000.partial') assert not xlog.is_partial_file('000000000000X00000000000.partial') assert not xlog.is_partial_file( '00000001000000000000000A.00000020.partial') assert not xlog.is_any_xlog_file( 'test.00000001000000000000000A.partial') assert not xlog.is_partial_file('00000001.partial')
def test_is_wal_file(self): assert xlog.is_wal_file("000000000000000200000001") assert xlog.is_wal_file("test/000000000000000200000001") assert not xlog.is_wal_file("00000001000000000000000A.00000020.backup") assert not xlog.is_wal_file("00000002.history") assert not xlog.is_wal_file("00000000000000000000000") assert not xlog.is_wal_file("0000000000000000000000000") assert not xlog.is_wal_file("000000000000X00000000000") assert not xlog.is_wal_file("00000001000000000000000A.backup") assert not xlog.is_any_xlog_file( "test.00000001000000000000000A.00000020.backup") assert not xlog.is_wal_file("00000001000000000000000A.history") assert not xlog.is_wal_file("00000001000000000000000A.partial")
def test_is_wal_file(self): assert xlog.is_wal_file('000000000000000200000001') assert xlog.is_wal_file('test/000000000000000200000001') assert not xlog.is_wal_file('00000001000000000000000A.00000020.backup') assert not xlog.is_wal_file('00000002.history') assert not xlog.is_wal_file('00000000000000000000000') assert not xlog.is_wal_file('0000000000000000000000000') assert not xlog.is_wal_file('000000000000X00000000000') assert not xlog.is_wal_file('00000001000000000000000A.backup') assert not xlog.is_any_xlog_file( 'test.00000001000000000000000A.00000020.backup') assert not xlog.is_wal_file('00000001000000000000000A.history') assert not xlog.is_wal_file('00000001000000000000000A.partial')
def get_next_batch(self): """ Returns the next batch of WAL files that have been archived via streaming replication (in the 'streaming' directory) This method always leaves one file in the "streaming" directory, because the 'pg_receivexlog' process needs at least one file to detect the current streaming position after a restart. :return: WalArchiverQueue: list of WAL files """ # Get the batch size from configuration (0 = unlimited) batch_size = self.config.streaming_archiver_batch_size # List and sort all files in the incoming directory file_names = glob(os.path.join( self.config.streaming_wals_directory, '*')) file_names.sort() # Process anything that looks like a valid WAL file, # including partial ones and history files. # Anything else is treated like an error/anomaly files = [] skip = [] errors = [] for file_name in file_names: if not os.path.isfile(file_name): errors.append(file_name) elif xlog.is_partial_file(file_name): skip.append(file_name) elif xlog.is_any_xlog_file(file_name): files.append(file_name) else: errors.append(file_name) # In case of more than a partial file, keep the last # and treat the rest as errors if len(skip) > 1: errors.extend(skip[:-1]) _logger.warning('Multiple partial files found for server %s: %s' % (self.config.name, ", ".join([os.path.basename(f) for f in errors]))) skip = skip[-1:] # Keep the last full WAL file in case no partial file is present elif len(skip) == 0 and files: skip.append(files.pop()) # Build the list of WalFileInfo wal_files = [WalFileInfo.from_file(f, compression=None) for f in files] return WalArchiverQueue(wal_files, batch_size=batch_size, errors=errors, skip=skip)
def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging(config) # Validate the WAL file name before downloading it if not is_any_xlog_file(config.wal_name): logging.error('%s is an invalid name for a WAL file' % config.wal_name) raise SystemExit(1) try: cloud_interface = CloudInterface( url=config.source_url, encryption=config.encryption, profile_name=config.profile, endpoint_url=config.endpoint_url) with closing(cloud_interface): downloader = S3WalDownloader( cloud_interface=cloud_interface, server_name=config.server_name) if not cloud_interface.test_connectivity(): raise SystemExit(1) # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) if not cloud_interface.bucket_exists: logging.error("Bucket %s does not exist", cloud_interface.bucket_name) raise SystemExit(1) downloader.download_wal(config.wal_name, config.wal_dest) except Exception as exc: logging.error("Barman cloud WAL restore exception: %s", force_str(exc)) logging.debug('Exception details:', exc_info=exc) raise SystemExit(1)
def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging() # Validate the WAL file name before uploading it if not is_any_xlog_file(config.wal_path): logging.error('%s is an invalid name for a WAL file' % config.wal_path) raise SystemExit(1) try: cloud_interface = CloudInterface( destination_url=config.destination_url, encryption=config.encryption, profile_name=config.profile) with closing(cloud_interface): uploader = S3WalUploader( cloud_interface=cloud_interface, server_name=config.server_name, compression=config.compression) if not cloud_interface.test_connectivity(): raise SystemExit(1) # If test is requested, just exit after connectivity test elif config.test: raise SystemExit(0) # TODO: Should the setup be optional? cloud_interface.setup_bucket() uploader.upload_wal(config.wal_path) except Exception as exc: logging.error("Barman cloud WAL archiver exception: %s", force_str(exc)) logging.debug('Exception details:', exc_info=exc) raise SystemExit(1)
def fetch_remote_status(self): """ Get remote information on PostgreSQL using Ssh, such as last archived WAL file This method does not raise any exception in case of errors, but set the missing values to None in the resulting dictionary. :rtype: dict[str, None|str] """ remote_status = {} # Retrieve the last archived WAL using a Ssh connection on # the remote server and executing an 'ls' command. Only # for pre-9.4 versions of PostgreSQL. try: if self.server.postgres and \ self.server.postgres.server_version < 90400: remote_status['last_archived_wal'] = None if self.server.postgres.get_setting('data_directory') and \ self.server.postgres.get_setting('archive_command'): # TODO: replace with RemoteUnixCommand # The Command can raise OSError # if self.ssh_command does not exist. cmd = Command(self.ssh_command, self.ssh_options, path=self.server.path) archive_dir = os.path.join( self.server.postgres.get_setting('data_directory'), 'pg_xlog', 'archive_status') out = str(cmd.getoutput('ls', '-t', archive_dir)[0]) for line in out.splitlines(): if line.endswith('.done'): name = line[:-5] if xlog.is_any_xlog_file(name): remote_status['last_archived_wal'] = name break except (PostgresConnectionError, OSError) as e: _logger.warn("Error retrieving PostgreSQL status: %s", e) return remote_status
def remove_wal_before_backup(self, backup_info): """ Remove WAL files which have been archived before the start of the provided backup. If no backup_info is provided delete all available WAL files :param BackupInfo|None backup_info: the backup information structure :return list: a list of removed WAL files """ removed = [] with self.server.xlogdb() as fxlogdb: xlogdb_new = fxlogdb.name + ".new" with open(xlogdb_new, 'w') as fxlogdb_new: for line in fxlogdb: wal_info = WalFileInfo.from_xlogdb_line(line) if not xlog.is_any_xlog_file(wal_info.name): output.error( "invalid xlog segment name %r\n" "HINT: Please run \"barman rebuild-xlogdb %s\" " "to solve this issue", wal_info.name, self.config.name) continue # Keeps the WAL segment if it is a history file or later # than the given backup (the first available) if (xlog.is_history_file(wal_info.name) or (backup_info and wal_info.name >= backup_info.begin_wal)): fxlogdb_new.write(wal_info.to_xlogdb_line()) continue else: self.delete_wal(wal_info) removed.append(wal_info.name) fxlogdb_new.flush() os.fsync(fxlogdb_new.fileno()) shutil.move(xlogdb_new, fxlogdb.name) fsync_dir(os.path.dirname(fxlogdb.name)) return removed
def main(args=None): """ The main script entry point :param list[str] args: the raw arguments list. When not provided it defaults to sys.args[1:] """ config = parse_arguments(args) configure_logging() # Validate the WAL file name before uploading it file_name = os.path.basename(config.wal_path) if not is_any_xlog_file(os.path.splitext(file_name)[0]): logging.error('%s is an invalid name for a WAL file' % config.wal_path) raise SystemExit(1) try: cloud_interface = CloudInterface( destination_url=config.destination_url, encryption=config.encryption, profile_name=config.profile) uploader = S3WalUploader(cloud_interface=cloud_interface, server_name=config.server_name, compression=config.compression) # If test is requested just test connectivity and exit if config.test: if cloud_interface.test_connectivity(): raise SystemExit(0) raise SystemExit(1) cloud_interface.setup_bucket() uploader.upload_wal(config.wal_path) except Exception as ex: logging.error("Barman cloud WAL archiver exception: %s", ex) raise SystemExit(1)
def download_wal(self, wal_name, wal_dest): """ Download a WAL file from cloud storage :param str wal_name: Name of the WAL file :param str wal_dest: Full path of the destination WAL file """ # Correctly format the source path on s3 source_dir = os.path.join(self.cloud_interface.path, self.server_name, "wals", hash_dir(wal_name)) # Add a path separator if needed if not source_dir.endswith(os.path.sep): source_dir += os.path.sep wal_path = os.path.join(source_dir, wal_name) remote_name = None # Automatically detect compression based on the file extension compression = None for item in self.cloud_interface.list_bucket(source_dir): # perfect match (uncompressed file) if item == wal_path: remote_name = item # look for compressed files or .partial files elif item.startswith(wal_path): # Detect compression basename = item for e, c in ALLOWED_COMPRESSIONS.items(): if item[-len(e):] == e: # Strip extension basename = basename[:-len(e)] compression = c break # Check basename is a known xlog file (.partial?) if not is_any_xlog_file(basename): logging.warning("Unknown WAL file: %s", item) continue # Exclude backup informative files (not needed in recovery) elif is_backup_file(basename): logging.info("Skipping backup file: %s", item) continue # Found candidate remote_name = item logging.info( "Found WAL %s for server %s as %s", wal_name, self.server_name, remote_name, ) break if not remote_name: logging.info("WAL file %s for server %s does not exists", wal_name, self.server_name) raise OperationErrorExit() if compression and sys.version_info < (3, 0, 0): raise BarmanException( "Compressed WALs cannot be restored with Python 2.x - " "please upgrade to a supported version of Python 3") # Download the file logging.debug( "Downloading %s to %s (%s)", remote_name, wal_dest, "decompressing " + compression if compression else "no compression", ) self.cloud_interface.download_file(remote_name, wal_dest, compression)
def test_is_any_xlog_file(self): assert xlog.is_any_xlog_file('000000000000000200000001') assert xlog.is_any_xlog_file('test1/000000000000000200000001') assert xlog.is_any_xlog_file( '00000001000000000000000A.00000020.backup') assert xlog.is_any_xlog_file( 'test2/00000001000000000000000A.00000020.backup') assert xlog.is_any_xlog_file( '00000001000000000000000A.partial') assert xlog.is_any_xlog_file( 'test2/00000001000000000000000A.partial') assert xlog.is_any_xlog_file('00000002.history') assert xlog.is_any_xlog_file('test3/00000002.history') assert not xlog.is_any_xlog_file('00000000000000000000000') assert not xlog.is_any_xlog_file('0000000000000000000000000') assert not xlog.is_any_xlog_file('000000000000X00000000000') assert not xlog.is_any_xlog_file('00000001000000000000000A.backup') assert not xlog.is_any_xlog_file( 'test.00000001000000000000000A.00000020.backup') assert not xlog.is_any_xlog_file( 'test.00000001000000000000000A.00000020.partial') assert not xlog.is_any_xlog_file('00000001000000000000000A.history')