def show(self): """ Shows the server configuration """ # Populate result map with all the required keys result = dict([ (key, getattr(self.config, key)) for key in self.config.KEYS ]) remote_status = self.get_remote_status() result.update(remote_status) # Backup maximum age section if self.config.last_backup_maximum_age is not None: age = self.backup_manager.validate_last_backup_maximum_age( self.config.last_backup_maximum_age) # If latest backup is between the limits of the # last_backup_maximum_age configuration, display how old is # the latest backup. if age[0]: msg = "%s (latest backup: %s )" % \ (human_readable_timedelta( self.config.last_backup_maximum_age), age[1]) else: # If latest backup is outside the limits of the # last_backup_maximum_age configuration (or the configuration # value is none), warn the user. msg = "%s (WARNING! latest backup is %s old)" % \ (human_readable_timedelta( self.config.last_backup_maximum_age), age[1]) result['last_backup_maximum_age'] = msg else: result['last_backup_maximum_age'] = "None" output.result('show_server', self.config.name, result)
def validate_last_backup_maximum_age(self, last_backup_maximum_age): """ Evaluate the age of the last available backup in a catalogue. If the last backup is older than the specified time interval (age), the function returns False. If within the requested age interval, the function returns True. :param timedate.timedelta last_backup_maximum_age: time interval representing the maximum allowed age for the last backup in a server catalogue :return tuple: a tuple containing the boolean result of the check and auxiliary information about the last backup current age """ # Get the ID of the last available backup backup_id = self.get_last_backup() if backup_id: # Get the backup object backup = BackupInfo(self.server, backup_id=backup_id) now = datetime.datetime.now(dateutil.tz.tzlocal()) # Evaluate the point of validity validity_time = now - last_backup_maximum_age # Pretty print of a time interval (age) msg = human_readable_timedelta(now - backup.end_time) # If the backup end time is older than the point of validity, # return False, otherwise return true if backup.end_time < validity_time: return False, msg else: return True, msg else: # If no backup is available return false return False, "No available backups"
def validate_last_backup_maximum_age(self, last_backup_maximum_age): """ Evaluate the age of the last available backup in a catalogue. If the last backup is older than the specified time interval (age), the function returns False. If within the requested age interval, the function returns True. :param timedate.timedelta last_backup_maximum_age: time interval representing the maximum allowed age for the last backup in a server catalogue :return tuple: a tuple containing the boolean result of the check and auxiliary information about the last backup current age """ # Get the ID of the last available backup backup_id = self.get_last_backup_id() if backup_id: # Get the backup object backup = LocalBackupInfo(self.server, backup_id=backup_id) now = datetime.datetime.now(dateutil.tz.tzlocal()) # Evaluate the point of validity validity_time = now - last_backup_maximum_age # Pretty print of a time interval (age) msg = human_readable_timedelta(now - backup.end_time) # If the backup end time is older than the point of validity, # return False, otherwise return true if backup.end_time < validity_time: return False, msg else: return True, msg else: # If no backup is available return false return False, "No available backups"
def check_backup_validity(self, check_strategy): """ Check if backup validity requirements are satisfied :param CheckStrategy check_strategy: the strategy for the management of the results of the various checks """ # first check: check backup maximum age if self.config.last_backup_maximum_age is not None: # get maximum age information backup_age = self.backup_manager.validate_last_backup_maximum_age( self.config.last_backup_maximum_age) # format the output check_strategy.result( self.config.name, 'backup maximum age', backup_age[0], "interval provided: %s, latest backup age: %s" % ( human_readable_timedelta( self.config.last_backup_maximum_age), backup_age[1])) else: # last_backup_maximum_age provided by the user check_strategy.result( self.config.name, 'backup maximum age', True, "no last_backup_maximum_age provided")
def check_backup_validity(self): """ Check if backup validity requirements are satisfied """ # first check: check backup maximum age if self.config.last_backup_maximum_age is not None: # get maximum age information backup_age = self.backup_manager.validate_last_backup_maximum_age( self.config.last_backup_maximum_age) # format the output output.result('check', self.config.name, 'backup maximum age', backup_age[0], "interval provided: %s, latest backup age: %s" % (human_readable_timedelta( self.config.last_backup_maximum_age), backup_age[1])) else: # last_backup_maximum_age provided by the user output.result('check', self.config.name, 'backup maximum age', True, "no last_backup_maximum_age provided")
def _execute_job(self, job): """ Execute a `_RsyncJob` in a worker process :type job: _RsyncJob """ item = self.item_list[job.item_idx] if job.id is not None: bucket = 'bucket %s' % job.id else: bucket = 'global' # Build the rsync object required for the copy rsync = self._rsync_factory(item) # Store the start time job.copy_start_time = datetime.datetime.now() # Write in the log that the job is starting with _logger_lock: _logger.info(job.description, bucket, 'starting') if item.is_directory: # A directory item must always have checksum and file_list set assert job.file_list is not None, \ 'A directory item must not have a None `file_list` attribute' assert job.checksum is not None, \ 'A directory item must not have a None `checksum` attribute' # Generate a unique name for the file containing the list of files file_list_path = os.path.join( self.temp_dir, '%s_%s_%s.list' % (item.label, 'check' if job.checksum else 'safe', os.getpid())) # Write the list, one path per line with open(file_list_path, 'w') as file_list: for entry in job.file_list: assert isinstance(entry, _FileItem), \ "expect %r to be a _FileItem" % entry file_list.write(entry.path + "\n") self._copy(rsync, item.src, item.dst, file_list=file_list_path, checksum=job.checksum) else: # A file must never have checksum and file_list set assert job.file_list is None, \ 'A file item must have a None `file_list` attribute' assert job.checksum is None, \ 'A file item must have a None `checksum` attribute' rsync(item.src, item.dst, allowed_retval=(0, 23, 24)) if rsync.ret == 23: if item.optional: _logger.warning("Ignoring error reading %s", item) else: raise CommandFailedException( dict(ret=rsync.ret, out=rsync.out, err=rsync.err)) # Store the stop time job.copy_end_time = datetime.datetime.now() # Write in the log that the job is finished with _logger_lock: _logger.info( job.description, bucket, 'finished (duration: %s)' % human_readable_timedelta(job.copy_end_time - job.copy_start_time)) # Return the job to the caller, for statistics purpose return job
def backup(self): """ Upload a Backup to S3 """ backup_info = BackupInfo( backup_id=datetime.datetime.now().strftime('%Y%m%dT%H%M%S')) backup_info.set_attribute("systemid", self.postgres.get_systemid()) key_prefix = os.path.join( self.cloud_interface.path, self.server_name, 'base', backup_info.backup_id ) controller = S3UploadController( self.cloud_interface, key_prefix, self.compression) strategy = ConcurrentBackupStrategy(self.postgres) logging.info("Starting backup %s", backup_info.backup_id) strategy.start_backup(backup_info) try: self.backup_copy(controller, backup_info) logging.info("Stopping backup %s", backup_info.backup_id) strategy.stop_backup(backup_info) # Create a restore point after a backup target_name = 'barman_%s' % backup_info.backup_id self.postgres.create_restore_point(target_name) # Free the Postgres connection self.postgres.close() pgdata_stat = os.stat(backup_info.pgdata) controller.add_fileobj( label='backup_label', fileobj=BytesIO(backup_info.backup_label.encode('UTF-8')), dst='data', path='backup_label', uid=pgdata_stat.st_uid, gid=pgdata_stat.st_gid, ) # Closing the controller will finalize all the running uploads controller.close() # Store the end time self.copy_end_time = datetime.datetime.now() # Store statistics about the copy backup_info.set_attribute("copy_stats", controller.statistics()) # Use BaseException instead of Exception to catch events like # KeyboardInterrupt (e.g.: CTRL-C) except BaseException as exc: # Mark the backup as failed and exit self.handle_backup_errors("uploading data", backup_info, exc) raise SystemExit(1) finally: try: with BytesIO() as backup_info_file: backup_info.save(file_object=backup_info_file) backup_info_file.seek(0, os.SEEK_SET) key = os.path.join(controller.key_prefix, 'backup.info') logging.info("Uploading %s", key) self.cloud_interface.upload_fileobj(backup_info_file, key) except BaseException as exc: # Mark the backup as failed and exit self.handle_backup_errors("uploading backup.info file", backup_info, exc) raise SystemExit(1) logging.info("Backup end at LSN: %s (%s, %08X)", backup_info.end_xlog, backup_info.end_wal, backup_info.end_offset) logging.info( "Backup completed (start time: %s, elapsed time: %s)", self.copy_start_time, human_readable_timedelta( datetime.datetime.now() - self.copy_start_time))
def backup(self, wait=False, wait_timeout=None): """ Performs a backup for the server :param bool wait: wait for all the required WAL files to be archived :param int|None wait_timeout: :return BackupInfo: the generated BackupInfo """ _logger.debug("initialising backup information") self.executor.init() backup_info = None try: # Create the BackupInfo object representing the backup backup_info = LocalBackupInfo( self.server, backup_id=datetime.datetime.now().strftime('%Y%m%dT%H%M%S')) backup_info.set_attribute('systemid', self.server.systemid) backup_info.save() self.backup_cache_add(backup_info) output.info("Starting backup using %s method for server %s in %s", self.mode, self.config.name, backup_info.get_basebackup_directory()) # Run the pre-backup-script if present. script = HookScriptRunner(self, 'backup_script', 'pre') script.env_from_backup_info(backup_info) script.run() # Run the pre-backup-retry-script if present. retry_script = RetryHookScriptRunner(self, 'backup_retry_script', 'pre') retry_script.env_from_backup_info(backup_info) retry_script.run() # Do the backup using the BackupExecutor self.executor.backup(backup_info) # Create a restore point after a backup target_name = 'barman_%s' % backup_info.backup_id self.server.postgres.create_restore_point(target_name) # Free the Postgres connection self.server.postgres.close() # Compute backup size and fsync it on disk self.backup_fsync_and_set_sizes(backup_info) # Mark the backup as WAITING_FOR_WALS backup_info.set_attribute("status", BackupInfo.WAITING_FOR_WALS) # Use BaseException instead of Exception to catch events like # KeyboardInterrupt (e.g.: CTRL-C) except BaseException as e: msg_lines = force_str(e).strip().splitlines() # If the exception has no attached message use the raw # type name if len(msg_lines) == 0: msg_lines = [type(e).__name__] if backup_info: # Use only the first line of exception message # in backup_info error field backup_info.set_attribute("status", BackupInfo.FAILED) backup_info.set_attribute( "error", "failure %s (%s)" % (self.executor.current_action, msg_lines[0])) output.error("Backup failed %s.\nDETAILS: %s", self.executor.current_action, '\n'.join(msg_lines)) else: output.info("Backup end at LSN: %s (%s, %08X)", backup_info.end_xlog, backup_info.end_wal, backup_info.end_offset) executor = self.executor output.info( "Backup completed (start time: %s, elapsed time: %s)", self.executor.copy_start_time, human_readable_timedelta(datetime.datetime.now() - executor.copy_start_time)) # If requested, wait for end_wal to be archived if wait: try: self.server.wait_for_wal(backup_info.end_wal, wait_timeout) self.check_backup(backup_info) except KeyboardInterrupt: # Ignore CTRL-C pressed while waiting for WAL files output.info( "Got CTRL-C. Continuing without waiting for '%s' " "to be archived", backup_info.end_wal) finally: if backup_info: backup_info.save() # Make sure we are not holding any PostgreSQL connection # during the post-backup scripts self.server.close() # Run the post-backup-retry-script if present. try: retry_script = RetryHookScriptRunner( self, 'backup_retry_script', 'post') retry_script.env_from_backup_info(backup_info) retry_script.run() except AbortedRetryHookScript as e: # Ignore the ABORT_STOP as it is a post-hook operation _logger.warning( "Ignoring stop request after receiving " "abort (exit code %d) from post-backup " "retry hook script: %s", e.hook.exit_status, e.hook.script) # Run the post-backup-script if present. script = HookScriptRunner(self, 'backup_script', 'post') script.env_from_backup_info(backup_info) script.run() output.result('backup', backup_info) return backup_info
def delete_backup(self, backup): """ Delete a backup :param backup: the backup to delete :return bool: True if deleted, False if could not delete the backup """ available_backups = self.get_available_backups( status_filter=(BackupInfo.DONE, )) minimum_redundancy = self.server.config.minimum_redundancy # Honour minimum required redundancy if backup.status == BackupInfo.DONE and \ minimum_redundancy >= len(available_backups): output.warning( "Skipping delete of backup %s for server %s " "due to minimum redundancy requirements " "(minimum redundancy = %s, " "current redundancy = %s)", backup.backup_id, self.config.name, minimum_redundancy, len(available_backups)) return False # Keep track of when the delete operation started. delete_start_time = datetime.datetime.now() # Run the pre_delete_script if present. script = HookScriptRunner(self, 'delete_script', 'pre') script.env_from_backup_info(backup) script.run() # Run the pre_delete_retry_script if present. retry_script = RetryHookScriptRunner(self, 'delete_retry_script', 'pre') retry_script.env_from_backup_info(backup) retry_script.run() output.info("Deleting backup %s for server %s", backup.backup_id, self.config.name) previous_backup = self.get_previous_backup(backup.backup_id) next_backup = self.get_next_backup(backup.backup_id) # Delete all the data contained in the backup try: self.delete_backup_data(backup) except OSError as e: output.error("Failure deleting backup %s for server %s.\n%s", backup.backup_id, self.config.name, e) return False # Check if we are deleting the first available backup if not previous_backup: # In the case of exclusive backup (default), removes any WAL # files associated to the backup being deleted. # In the case of concurrent backup, removes only WAL files # prior to the start of the backup being deleted, as they # might be useful to any concurrent backup started immediately # after. remove_until = None # means to remove all WAL files if next_backup: remove_until = next_backup elif BackupOptions.CONCURRENT_BACKUP in self.config.backup_options: remove_until = backup timelines_to_protect = set() # If remove_until is not set there are no backup left if remove_until: # Retrieve the list of extra timelines that contains at least # a backup. On such timelines we don't want to delete any WAL for value in self.get_available_backups( BackupInfo.STATUS_ARCHIVING).values(): # Ignore the backup that is being deleted if value == backup: continue timelines_to_protect.add(value.timeline) # Remove the timeline of `remove_until` from the list. # We have enough information to safely delete unused WAL files # on it. timelines_to_protect -= set([remove_until.timeline]) output.info("Delete associated WAL segments:") for name in self.remove_wal_before_backup(remove_until, timelines_to_protect): output.info("\t%s", name) # As last action, remove the backup directory, # ending the delete operation try: self.delete_basebackup(backup) except OSError as e: output.error( "Failure deleting backup %s for server %s.\n%s\n" "Please manually remove the '%s' directory", backup.backup_id, self.config.name, e, backup.get_basebackup_directory()) return False self.backup_cache_remove(backup) # Save the time of the complete removal of the backup delete_end_time = datetime.datetime.now() output.info( "Deleted backup %s (start time: %s, elapsed time: %s)", backup.backup_id, delete_start_time.ctime(), human_readable_timedelta(delete_end_time - delete_start_time)) # Remove the sync lockfile if exists sync_lock = ServerBackupSyncLock(self.config.barman_lock_directory, self.config.name, backup.backup_id) if os.path.exists(sync_lock.filename): _logger.debug("Deleting backup sync lockfile: %s" % sync_lock.filename) os.unlink(sync_lock.filename) # Run the post_delete_retry_script if present. try: retry_script = RetryHookScriptRunner(self, 'delete_retry_script', 'post') retry_script.env_from_backup_info(backup) retry_script.run() except AbortedRetryHookScript as e: # Ignore the ABORT_STOP as it is a post-hook operation _logger.warning( "Ignoring stop request after receiving " "abort (exit code %d) from post-delete " "retry hook script: %s", e.hook.exit_status, e.hook.script) # Run the post_delete_script if present. script = HookScriptRunner(self, 'delete_script', 'post') script.env_from_backup_info(backup) script.run() return True
def backup(self): """ Performs a backup for the server """ _logger.debug("initialising backup information") self.executor.init() backup_info = None try: # Create the BackupInfo object representing the backup backup_info = BackupInfo( self.server, backup_id=datetime.datetime.now().strftime('%Y%m%dT%H%M%S')) backup_info.save() self.backup_cache_add(backup_info) output.info("Starting backup using %s method for server %s in %s", self.mode, self.config.name, backup_info.get_basebackup_directory()) # Run the pre-backup-script if present. script = HookScriptRunner(self, 'backup_script', 'pre') script.env_from_backup_info(backup_info) script.run() # Run the pre-backup-retry-script if present. retry_script = RetryHookScriptRunner(self, 'backup_retry_script', 'pre') retry_script.env_from_backup_info(backup_info) retry_script.run() # Do the backup using the BackupExecutor self.executor.backup(backup_info) # Compute backup size and fsync it on disk self.backup_fsync_and_set_sizes(backup_info) # Mark the backup as DONE backup_info.set_attribute("status", "DONE") # Use BaseException instead of Exception to catch events like # KeyboardInterrupt (e.g.: CRTL-C) except BaseException as e: msg_lines = str(e).strip().splitlines() if backup_info: # Use only the first line of exception message # in backup_info error field backup_info.set_attribute("status", "FAILED") # If the exception has no attached message use the raw # type name if len(msg_lines) == 0: msg_lines = [type(e).__name__] backup_info.set_attribute( "error", "failure %s (%s)" % (self.executor.current_action, msg_lines[0])) output.error("Backup failed %s.\nDETAILS: %s\n%s", self.executor.current_action, msg_lines[0], '\n'.join(msg_lines[1:])) else: output.info("Backup end at LSN: %s (%s, %08X)", backup_info.end_xlog, backup_info.end_wal, backup_info.end_offset) output.info( "Backup completed (start time: %s, elapsed time: %s)", self.executor.copy_start_time, human_readable_timedelta(self.executor.copy_end_time - self.executor.copy_start_time)) # Create a restore point after a backup target_name = 'barman_%s' % backup_info.backup_id self.server.postgres.create_restore_point(target_name) finally: if backup_info: backup_info.save() # Make sure we are not holding any PostgreSQL connection # during the post-backup scripts self.server.close() # Run the post-backup-retry-script if present. try: retry_script = RetryHookScriptRunner( self, 'backup_retry_script', 'post') retry_script.env_from_backup_info(backup_info) retry_script.run() except AbortedRetryHookScript as e: # Ignore the ABORT_STOP as it is a post-hook operation _logger.warning( "Ignoring stop request after receiving " "abort (exit code %d) from post-backup " "retry hook script: %s", e.hook.exit_status, e.hook.script) # Run the post-backup-script if present. script = HookScriptRunner(self, 'backup_script', 'post') script.env_from_backup_info(backup_info) script.run() output.result('backup', backup_info)
def delete_backup(self, backup): """ Delete a backup :param backup: the backup to delete """ available_backups = self.get_available_backups() minimum_redundancy = self.server.config.minimum_redundancy # Honour minimum required redundancy if backup.status == BackupInfo.DONE and \ minimum_redundancy >= len(available_backups): output.warning( "Skipping delete of backup %s for server %s " "due to minimum redundancy requirements " "(minimum redundancy = %s, " "current redundancy = %s)", backup.backup_id, self.config.name, len(available_backups), minimum_redundancy) return # Keep track of when the delete operation started. delete_start_time = datetime.datetime.now() output.info("Deleting backup %s for server %s", backup.backup_id, self.config.name) previous_backup = self.get_previous_backup(backup.backup_id) next_backup = self.get_next_backup(backup.backup_id) # Delete all the data contained in the backup try: self.delete_backup_data(backup) except OSError as e: output.error("Failure deleting backup %s for server %s.\n%s", backup.backup_id, self.config.name, e) return # Check if we are deleting the first available backup if not previous_backup: # In the case of exclusive backup (default), removes any WAL # files associated to the backup being deleted. # In the case of concurrent backup, removes only WAL files # prior to the start of the backup being deleted, as they # might be useful to any concurrent backup started immediately # after. remove_until = None # means to remove all WAL files if next_backup: remove_until = next_backup elif BackupOptions.CONCURRENT_BACKUP in self.config.backup_options: remove_until = backup timelines_to_protect = set() # If remove_until is not set there are no backup left if remove_until: # Retrieve the list of extra timelines that contains at least # a backup. On such timelines we don't want to delete any WAL for value in self.get_available_backups( BackupInfo.STATUS_ARCHIVING).values(): # Ignore the backup that is being deleted if value == backup: continue timelines_to_protect.add(value.timeline) # Remove the timeline of `remove_until` from the list. # We have enough information to safely delete unused WAL files # on it. timelines_to_protect -= set([remove_until.timeline]) output.info("Delete associated WAL segments:") for name in self.remove_wal_before_backup(remove_until, timelines_to_protect): output.info("\t%s", name) # As last action, remove the backup directory, # ending the delete operation try: self.delete_basebackup(backup) except OSError as e: output.error( "Failure deleting backup %s for server %s.\n%s\n" "Please manually remove the '%s' directory", backup.backup_id, self.config.name, e, backup.get_basebackup_directory()) return self.backup_cache_remove(backup) # Save the time of the complete removal of the backup delete_end_time = datetime.datetime.now() output.info( "Deleted backup %s (start time: %s, elapsed time: %s)", backup.backup_id, delete_start_time.ctime(), human_readable_timedelta(delete_end_time - delete_start_time))
def result_show_backup(self, backup_ext_info): """ Output all available information about a backup in show-backup command The argument has to be the result of a Server.get_backup_ext_info() call :param dict backup_ext_info: a dictionary containing the info to display """ data = dict(backup_ext_info) self.info("Backup %s:", data['backup_id']) self.info(" Server Name : %s", data['server_name']) self.info(" Status : %s", data['status']) if data['status'] == BackupInfo.DONE: self.info(" PostgreSQL Version : %s", data['version']) self.info(" PGDATA directory : %s", data['pgdata']) if data['tablespaces']: self.info(" Tablespaces:") for item in data['tablespaces']: self.info(" %s: %s (oid: %s)", item.name, item.location, item.oid) self.info("") self.info(" Base backup information:") self.info(" Disk usage : %s (%s with WALs)", pretty_size(data['size']), pretty_size(data['size'] + data['wal_size'])) if data['deduplicated_size'] is not None and data['size'] > 0: deduplication_ratio = 1 - (float(data['deduplicated_size']) / data['size']) self.info(" Incremental size : %s (-%s)", pretty_size(data['deduplicated_size']), '{percent:.2%}'.format(percent=deduplication_ratio)) self.info(" Timeline : %s", data['timeline']) self.info(" Begin WAL : %s", data['begin_wal']) self.info(" End WAL : %s", data['end_wal']) self.info(" WAL number : %s", data['wal_num']) # Output WAL compression ratio for basebackup WAL files if data['wal_compression_ratio'] > 0: self.info( " WAL compression ratio: %s", '{percent:.2%}'.format( percent=data['wal_compression_ratio'])) self.info(" Begin time : %s", data['begin_time']) self.info(" End time : %s", data['end_time']) # If copy statistics are available print a summary copy_stats = data.get('copy_stats') if copy_stats: copy_time = copy_stats.get('copy_time') if copy_time: value = human_readable_timedelta( datetime.timedelta(seconds=copy_time)) # Show analysis time if it is more than a second analysis_time = copy_stats.get('analysis_time') if analysis_time is not None and analysis_time >= 1: value += " + %s startup" % (human_readable_timedelta( datetime.timedelta(seconds=analysis_time))) self.info(" Copy time : %s", value) size = data['deduplicated_size'] or data['size'] value = "%s/s" % pretty_size(size / copy_time) number_of_workers = copy_stats.get('number_of_workers', 1) if number_of_workers > 1: value += " (%s jobs)" % number_of_workers self.info(" Estimated throughput : %s", value) self.info(" Begin Offset : %s", data['begin_offset']) self.info(" End Offset : %s", data['end_offset']) self.info(" Begin LSN : %s", data['begin_xlog']) self.info(" End LSN : %s", data['end_xlog']) self.info("") self.info(" WAL information:") self.info(" No of files : %s", data['wal_until_next_num']) self.info(" Disk usage : %s", pretty_size(data['wal_until_next_size'])) # Output WAL rate if data['wals_per_second'] > 0: self.info(" WAL rate : %0.2f/hour", data['wals_per_second'] * 3600) # Output WAL compression ratio for archived WAL files if data['wal_until_next_compression_ratio'] > 0: self.info( " Compression ratio : %s", '{percent:.2%}'.format( percent=data['wal_until_next_compression_ratio'])) self.info(" Last available : %s", data['wal_last']) if data['children_timelines']: timelines = data['children_timelines'] self.info( " Reachable timelines : %s", ", ".join([str(history.tli) for history in timelines])) self.info("") self.info(" Catalog information:") self.info(" Retention Policy : %s", data['retention_policy_status'] or 'not enforced') self.info( " Previous Backup : %s", data.setdefault('previous_backup_id', 'not available') or '- (this is the oldest base backup)') self.info( " Next Backup : %s", data.setdefault('next_backup_id', 'not available') or '- (this is the latest base backup)') if data['children_timelines']: self.info("") self.info("WARNING: WAL information is inaccurate due to " "multiple timelines interacting with this backup") else: if data['error']: self.info(" Error: : %s", data['error'])
def result_recovery(self, results): """ Render the result of a recovery. """ if len(results['changes']) > 0: self.info("") self.info("IMPORTANT") self.info("These settings have been modified to prevent " "data losses") self.info("") for assertion in results['changes']: self.info("%s line %s: %s = %s", assertion.filename, assertion.line, assertion.key, assertion.value) if len(results['warnings']) > 0: self.info("") self.info("WARNING") self.info("You are required to review the following options" " as potentially dangerous") self.info("") for assertion in results['warnings']: self.info("%s line %s: %s = %s", assertion.filename, assertion.line, assertion.key, assertion.value) if results['missing_files']: # At least one file is missing, warn the user self.info("") self.info("WARNING") self.info("The following configuration files have not been " "saved during backup, hence they have not been " "restored.") self.info("You need to manually restore them " "in order to start the recovered PostgreSQL instance:") self.info("") for file_name in results['missing_files']: self.info(" %s" % file_name) if results['delete_barman_xlog']: self.info("") self.info("After the recovery, please remember to remove the " "\"barman_xlog\" directory") self.info("inside the PostgreSQL data directory.") if results['get_wal']: self.info("") self.info("WARNING: 'get-wal' is in the specified " "'recovery_options'.") self.info("Before you start up the PostgreSQL server, please " "review the recovery.conf file") self.info("inside the target directory. Make sure that " "'restore_command' can be executed by " "the PostgreSQL user.") self.info("") self.info( "Recovery completed (start time: %s, elapsed time: %s)", results['recovery_start_time'], human_readable_timedelta(datetime.datetime.now() - results['recovery_start_time'])) self.info("") self.info("Your PostgreSQL server has been successfully " "prepared for recovery!")
def delete_backup(self, backup): """ Delete a backup :param backup: the backup to delete """ available_backups = self.get_available_backups() minimum_redundancy = self.server.config.minimum_redundancy # Honour minimum required redundancy if backup.status == BackupInfo.DONE and \ minimum_redundancy >= len(available_backups): output.warning("Skipping delete of backup %s for server %s " "due to minimum redundancy requirements " "(minimum redundancy = %s, " "current redundancy = %s)", backup.backup_id, self.config.name, len(available_backups), minimum_redundancy) return # Keep track of when the delete operation started. delete_start_time = datetime.datetime.now() output.info("Deleting backup %s for server %s", backup.backup_id, self.config.name) previous_backup = self.get_previous_backup(backup.backup_id) next_backup = self.get_next_backup(backup.backup_id) # Delete all the data contained in the backup try: self.delete_backup_data(backup) except OSError as e: output.error("Failure deleting backup %s for server %s.\n%s", backup.backup_id, self.config.name, e) return # Check if we are deleting the first available backup if not previous_backup: # In the case of exclusive backup (default), removes any WAL # files associated to the backup being deleted. # In the case of concurrent backup, removes only WAL files # prior to the start of the backup being deleted, as they # might be useful to any concurrent backup started immediately # after. remove_until = None # means to remove all WAL files if next_backup: remove_until = next_backup elif BackupOptions.CONCURRENT_BACKUP in self.config.backup_options: remove_until = backup timelines_to_protect = set() # If remove_until is not set there are no backup left if remove_until: # Retrieve the list of extra timelines that contains at least # a backup. On such timelines we don't want to delete any WAL for value in self.get_available_backups( BackupInfo.STATUS_ARCHIVING).values(): # Ignore the backup that is being deleted if value == backup: continue timelines_to_protect.add(value.timeline) # Remove the timeline of `remove_until` from the list. # We have enough information to safely delete unused WAL files # on it. timelines_to_protect -= set([remove_until.timeline]) output.info("Delete associated WAL segments:") for name in self.remove_wal_before_backup(remove_until, timelines_to_protect): output.info("\t%s", name) # As last action, remove the backup directory, # ending the delete operation try: self.delete_basebackup(backup) except OSError as e: output.error("Failure deleting backup %s for server %s.\n%s\n" "Please manually remove the '%s' directory", backup.backup_id, self.config.name, e, backup.get_basebackup_directory()) return self.backup_cache_remove(backup) # Save the time of the complete removal of the backup delete_end_time = datetime.datetime.now() output.info("Deleted backup %s (start time: %s, elapsed time: %s)", backup.backup_id, delete_start_time.ctime(), utils.human_readable_timedelta( delete_end_time - delete_start_time))