Exemplo n.º 1
0
    def __init__(self, config):
        ''' The Constructor.

        :param config: the server configuration
        '''
        self.config = config
        self.conn = None
        self.server_txt_version = None
        self.server_version = None
        self.ssh_options = config.ssh_command.split()
        self.ssh_command = self.ssh_options.pop(0)
        self.ssh_options.extend(
            "-o BatchMode=yes -o StrictHostKeyChecking=no".split())
        self.backup_manager = BackupManager(self)
        self.configuration_files = None
Exemplo n.º 2
0
def build_backup_manager(server=None, name=None, config=None,
                         global_conf=None, main_conf=None):
    """
    Instantiate a BackupManager object using mocked parameters

    The compression_manager member is mocked

    :param barman.server.Server|None server: Optionsl Server object
    :rtype: barman.backup.BackupManager
    """
    if server is None:
        server = build_mocked_server(name, config, global_conf, main_conf)
    with mock.patch("barman.backup.CompressionManager"):
        manager = BackupManager(server=server)
    server.backup_manager = manager
    return manager
Exemplo n.º 3
0
def build_backup_manager(
    server=None, name=None, config=None, global_conf=None, main_conf=None
):
    """
    Instantiate a BackupManager object using mocked parameters

    The compression_manager member is mocked

    :param barman.server.Server|None server: Optional Server object
    :rtype: barman.backup.BackupManager
    """
    if server is None:
        server = build_mocked_server(name, config, global_conf, main_conf)
    with mock.patch("barman.backup.CompressionManager"):
        manager = BackupManager(server=server)
    manager.compression_manager.unidentified_compression = None
    manager.compression_manager.get_wal_file_info.side_effect = (
        lambda filename: WalFileInfo.from_file(filename, manager.compression_manager)
    )
    server.backup_manager = manager
    return manager
Exemplo n.º 4
0
def _remove_wals_for_backup(
    cloud_interface,
    catalog,
    deleted_backup,
    dry_run,
    skip_wal_cleanup_if_standalone=True,
):
    # An implementation of BackupManager.remove_wal_before_backup which does not
    # use xlogdb, since xlogdb is not available to barman-cloud
    should_remove_wals, wal_ranges_to_protect = BackupManager.should_remove_wals(
        deleted_backup,
        catalog.get_backup_list(),
        keep_manager=catalog,
        skip_wal_cleanup_if_standalone=skip_wal_cleanup_if_standalone,
    )
    next_backup = BackupManager.find_next_backup_in(
        catalog.get_backup_list(), deleted_backup.backup_id
    )
    wals_to_delete = {}
    if should_remove_wals:
        # There is no previous backup or all previous backups are archival
        # standalone backups, so we can remove unused WALs (those WALs not
        # required by standalone archival backups).
        # If there is a next backup then all unused WALs up to the begin_wal
        # of the next backup can be removed.
        # If there is no next backup then there are no remaining backups,
        # because we must assume non-exclusive backups are taken, we can only
        # safely delete unused WALs up to begin_wal of the deleted backup.
        # See comments in barman.backup.BackupManager.delete_backup.
        if next_backup:
            remove_until = next_backup
        else:
            remove_until = deleted_backup
        # A WAL is only a candidate for deletion if it is on the same timeline so we
        # use BackupManager to get a set of all other timelines with backups so that
        # we can preserve all WALs on other timelines.
        timelines_to_protect = BackupManager.get_timelines_to_protect(
            remove_until=remove_until,
            deleted_backup=deleted_backup,
            available_backups=catalog.get_backup_list(),
        )
        try:
            wal_paths = catalog.get_wal_paths()
        except Exception as exc:
            logging.error(
                "Cannot clean up WALs for backup %s because an error occurred listing WALs: %s",
                deleted_backup.backup_id,
                force_str(exc),
            )
            return
        for wal_name, wal in wal_paths.items():
            if xlog.is_history_file(wal_name):
                continue
            if timelines_to_protect:
                tli, _, _ = xlog.decode_segment_name(wal_name)
                if tli in timelines_to_protect:
                    continue

            # Check if the WAL is in a protected range, required by an archival
            # standalone backup - so do not delete it
            if xlog.is_backup_file(wal_name):
                # If we have a backup file, truncate the name for the range check
                range_check_wal_name = wal_name[:24]
            else:
                range_check_wal_name = wal_name
            if any(
                range_check_wal_name >= begin_wal and range_check_wal_name <= end_wal
                for begin_wal, end_wal in wal_ranges_to_protect
            ):
                continue

            if wal_name < remove_until.begin_wal:
                wals_to_delete[wal_name] = wal
    # Explicitly sort because dicts are not ordered in python < 3.6
    wal_paths_to_delete = sorted(wals_to_delete.values())
    if len(wal_paths_to_delete) > 0:
        if not dry_run:
            try:
                cloud_interface.delete_objects(wal_paths_to_delete)
            except Exception as exc:
                logging.error(
                    "Could not delete the following WALs for backup %s: %s, Reason: %s",
                    deleted_backup.backup_id,
                    wal_paths_to_delete,
                    force_str(exc),
                )
                # Return early so that we leave the WALs in the local cache so they
                # can be cleaned up should there be a subsequent backup deletion.
                return
        else:
            print(
                "Skipping deletion of objects %s due to --dry-run option"
                % wal_paths_to_delete
            )
        for wal_name in wals_to_delete.keys():
            catalog.remove_wal_from_cache(wal_name)
Exemplo n.º 5
0
    def __init__(self, config):
        """
        Server constructor.

        :param barman.config.ServerConfig config: the server configuration
        """
        self.config = config
        self._conn = None
        self.server_txt_version = None
        self.server_version = None
        if self.config.conninfo is None:
            raise ConninfoException(
                'Missing conninfo parameter in barman configuration '
                'for server %s' % config.name)
        self.backup_manager = BackupManager(self)
        self.configuration_files = None
        self.enforce_retention_policies = False

        # Set bandwidth_limit
        if self.config.bandwidth_limit:
            try:
                self.config.bandwidth_limit = int(self.config.bandwidth_limit)
            except ValueError:
                _logger.warning('Invalid bandwidth_limit "%s" for server "%s" '
                                '(fallback to "0")' % (
                                    self.config.bandwidth_limit,
                                    self.config.name))
                self.config.bandwidth_limit = None

        # set tablespace_bandwidth_limit
        if self.config.tablespace_bandwidth_limit:
            rules = {}
            for rule in self.config.tablespace_bandwidth_limit.split():
                try:
                    key, value = rule.split(':', 1)
                    value = int(value)
                    if value != self.config.bandwidth_limit:
                        rules[key] = value
                except ValueError:
                    _logger.warning(
                        "Invalid tablespace_bandwidth_limit rule '%s'" % rule)
            if len(rules) > 0:
                self.config.tablespace_bandwidth_limit = rules
            else:
                self.config.tablespace_bandwidth_limit = None

        # Set minimum redundancy (default 0)
        if self.config.minimum_redundancy.isdigit():
            self.config.minimum_redundancy = int(self.config.minimum_redundancy)
            if self.config.minimum_redundancy < 0:
                _logger.warning('Negative value of minimum_redundancy "%s" '
                                'for server "%s" (fallback to "0")' % (
                                    self.config.minimum_redundancy,
                                    self.config.name))
                self.config.minimum_redundancy = 0
        else:
            _logger.warning('Invalid minimum_redundancy "%s" for server "%s" '
                            '(fallback to "0")' % (
                                self.config.minimum_redundancy,
                                self.config.name))
            self.config.minimum_redundancy = 0

        # Initialise retention policies
        self._init_retention_policies()
Exemplo n.º 6
0
class Server(object):
    """
    This class represents the PostgreSQL server to backup.
    """

    XLOG_DB = "xlog.db"

    # the strategy for the management of the results of the various checks
    __default_check_strategy = CheckOutputStrategy()

    def __init__(self, config):
        """
        Server constructor.

        :param barman.config.ServerConfig config: the server configuration
        """
        self.config = config
        self._conn = None
        self.server_txt_version = None
        self.server_version = None
        if self.config.conninfo is None:
            raise ConninfoException(
                'Missing conninfo parameter in barman configuration '
                'for server %s' % config.name)
        self.backup_manager = BackupManager(self)
        self.configuration_files = None
        self.enforce_retention_policies = False

        # Set bandwidth_limit
        if self.config.bandwidth_limit:
            try:
                self.config.bandwidth_limit = int(self.config.bandwidth_limit)
            except ValueError:
                _logger.warning('Invalid bandwidth_limit "%s" for server "%s" '
                                '(fallback to "0")' % (
                                    self.config.bandwidth_limit,
                                    self.config.name))
                self.config.bandwidth_limit = None

        # set tablespace_bandwidth_limit
        if self.config.tablespace_bandwidth_limit:
            rules = {}
            for rule in self.config.tablespace_bandwidth_limit.split():
                try:
                    key, value = rule.split(':', 1)
                    value = int(value)
                    if value != self.config.bandwidth_limit:
                        rules[key] = value
                except ValueError:
                    _logger.warning(
                        "Invalid tablespace_bandwidth_limit rule '%s'" % rule)
            if len(rules) > 0:
                self.config.tablespace_bandwidth_limit = rules
            else:
                self.config.tablespace_bandwidth_limit = None

        # Set minimum redundancy (default 0)
        if self.config.minimum_redundancy.isdigit():
            self.config.minimum_redundancy = int(self.config.minimum_redundancy)
            if self.config.minimum_redundancy < 0:
                _logger.warning('Negative value of minimum_redundancy "%s" '
                                'for server "%s" (fallback to "0")' % (
                                    self.config.minimum_redundancy,
                                    self.config.name))
                self.config.minimum_redundancy = 0
        else:
            _logger.warning('Invalid minimum_redundancy "%s" for server "%s" '
                            '(fallback to "0")' % (
                                self.config.minimum_redundancy,
                                self.config.name))
            self.config.minimum_redundancy = 0

        # Initialise retention policies
        self._init_retention_policies()

    def _init_retention_policies(self):

        # Set retention policy mode
        if self.config.retention_policy_mode != 'auto':
            _logger.warning(
                'Unsupported retention_policy_mode "%s" for server "%s" '
                '(fallback to "auto")' % (
                    self.config.retention_policy_mode, self.config.name))
            self.config.retention_policy_mode = 'auto'

        # If retention_policy is present, enforce them
        if self.config.retention_policy:
            # Check wal_retention_policy
            if self.config.wal_retention_policy != 'main':
                _logger.warning(
                    'Unsupported wal_retention_policy value "%s" '
                    'for server "%s" (fallback to "main")' % (
                        self.config.wal_retention_policy, self.config.name))
                self.config.wal_retention_policy = 'main'
            # Create retention policy objects
            try:
                rp = RetentionPolicyFactory.create(
                    self, 'retention_policy', self.config.retention_policy)
                # Reassign the configuration value (we keep it in one place)
                self.config.retention_policy = rp
                _logger.debug('Retention policy for server %s: %s' % (
                    self.config.name, self.config.retention_policy))
                try:
                    rp = RetentionPolicyFactory.create(
                        self, 'wal_retention_policy',
                        self.config.wal_retention_policy)
                    # Reassign the configuration value (we keep it in one place)
                    self.config.wal_retention_policy = rp
                    _logger.debug(
                        'WAL retention policy for server %s: %s' % (
                            self.config.name, self.config.wal_retention_policy))
                except ValueError:
                    _logger.exception(
                        'Invalid wal_retention_policy setting "%s" '
                        'for server "%s" (fallback to "main")' % (
                            self.config.wal_retention_policy, self.config.name))
                    rp = RetentionPolicyFactory.create(
                        self, 'wal_retention_policy', 'main')
                    self.config.wal_retention_policy = rp

                self.enforce_retention_policies = True

            except ValueError:
                _logger.exception(
                    'Invalid retention_policy setting "%s" for server "%s"' % (
                        self.config.retention_policy, self.config.name))

    def check(self, check_strategy=__default_check_strategy):
        """
        Implements the 'server check' command and makes sure SSH and PostgreSQL
        connections work properly. It checks also that backup directories exist
        (and if not, it creates them).

        :param CheckStrategy check_strategy: the strategy for the management
             of the results of the various checks
        """
        # Check postgres configuration
        self.check_postgres(check_strategy)
        # Check barman directories from barman configuration
        self.check_directories(check_strategy)
        # Check retention policies
        self.check_retention_policy_settings(check_strategy)
        # Check for backup validity
        self.check_backup_validity(check_strategy)
        # Executes the backup manager set of checks
        self.backup_manager.check(check_strategy)

    def check_postgres(self, check_strategy):
        """
        Checks PostgreSQL connection

        :param CheckStrategy check_strategy: the strategy for the management
             of the results of the various checks
        """
        # Take the status of the remote server
        try:
            remote_status = self.get_remote_status()
        except PostgresConnectionError:
            remote_status = None
        if remote_status is not None and remote_status['server_txt_version']:
            check_strategy.result(self.config.name, 'PostgreSQL', True)
        else:
            check_strategy.result(self.config.name, 'PostgreSQL', False)
            return
        # Check archive_mode parameter: must be on
        if remote_status['archive_mode'] == 'on':
            check_strategy.result(self.config.name, 'archive_mode', True)
        else:
            check_strategy.result(self.config.name, 'archive_mode', False,
                                  "please set it to 'on'")
        # Check wal_level parameter: must be different to 'minimal'
        if remote_status['wal_level'] != 'minimal':
            check_strategy.result(
                self.config.name, 'wal_level', True)
        else:
            check_strategy.result(
                self.config.name, 'wal_level', False,
                "please set it to a higher level than 'minimal'")

        if remote_status['archive_command'] and \
                remote_status['archive_command'] != '(disabled)':
            check_strategy.result(self.config.name, 'archive_command', True)

            # Report if the archiving process works without issues.
            # Skip if the archive_command check fails
            # It can be None if PostgreSQL is older than 9.4
            if remote_status.get('is_archiving') is not None:
                check_strategy.result(self.config.name, 'continuous archiving',
                                      remote_status['is_archiving'])
        else:
            check_strategy.result(self.config.name, 'archive_command', False,
                                  'please set it accordingly to documentation')

    def _make_directories(self):
        """
        Make backup directories in case they do not exist
        """
        for key in self.config.KEYS:
            if key.endswith('_directory') and hasattr(self.config, key):
                val = getattr(self.config, key)
                if val is not None and not os.path.isdir(val):
                    #noinspection PyTypeChecker
                    os.makedirs(val)

    def check_directories(self, check_strategy):
        """
        Checks backup directories and creates them if they do not exist

        :param CheckStrategy check_strategy: the strategy for the management
             of the results of the various checks
        """

        if self.config.disabled:
            check_strategy.result(self.config.name, 'directories', False)
            for conflict_paths in self.config.msg_list:
                output.info("\t%s" % conflict_paths)
        else:
            try:
                self._make_directories()
            except OSError, e:
                check_strategy.result(self.config.name, 'directories', False,
                                      "%s: %s" % (e.filename, e.strerror))
            else:
Exemplo n.º 7
0
    def __init__(self, config):
        ''' The Constructor.

        :param config: the server configuration
        '''
        self.config = config
        self.conn = None
        self.server_txt_version = None
        self.server_version = None
        self.ssh_options = config.ssh_command.split()
        self.ssh_command = self.ssh_options.pop(0)
        self.ssh_options.extend(
            "-o BatchMode=yes -o StrictHostKeyChecking=no".split())
        self.backup_manager = BackupManager(self)
        self.configuration_files = None
        self.enforce_retention_policies = False

        # Set bandwidth_limit
        if self.config.bandwidth_limit:
            try:
                self.config.bandwidth_limit = int(self.config.bandwidth_limit)
            except:
                _logger.warning(
                    'Invalid bandwidth_limit "%s" for server "%s" (fallback to "0")'
                    % (self.config.bandwidth_limit, self.config.name))
                self.config.bandwidth_limit = None

        # set tablespace_bandwidth_limit
        if self.config.tablespace_bandwidth_limit:
            rules = {}
            for rule in self.config.tablespace_bandwidth_limit.split():
                try:
                    key, value = rule.split(':', 1)
                    value = int(value)
                    if value != self.config.bandwidth_limit:
                        rules[key] = value
                except:
                    _logger.warning(
                        "Invalid tablespace_bandwidth_limit rule '%s'" %
                        (rule, ))
            if len(rules) > 0:
                self.config.tablespace_bandwidth_limit = rules
            else:
                self.config.tablespace_bandwidth_limit = None

        # Set minimum redundancy (default 0)
        if self.config.minimum_redundancy.isdigit():
            self.config.minimum_redundancy = int(
                self.config.minimum_redundancy)
            if self.config.minimum_redundancy < 0:
                _logger.warning(
                    'Negative value of minimum_redundancy "%s" for server "%s" (fallback to "0")'
                    % (self.config.minimum_redundancy, self.config.name))
                self.config.minimum_redundancy = 0
        else:
            _logger.warning(
                'Invalid minimum_redundancy "%s" for server "%s" (fallback to "0")'
                % (self.config.minimum_redundancy, self.config.name))
            self.config.minimum_redundancy = 0

        # Initialise retention policies
        self._init_retention_policies()