Ejemplo n.º 1
0
    def generate_archive_status(self, recovery_info, remote_command,
                                required_xlog_files):
        """
        Populate the archive_status directory

        :param dict recovery_info: Dictionary containing all the recovery
            parameters
        :param str remote_command: ssh command for remote connection
        :param tuple required_xlog_files: list of required WAL segments
        """
        if remote_command:
            status_dir = recovery_info['tempdir']
        else:
            status_dir = os.path.join(recovery_info['wal_dest'],
                                      'archive_status')
            mkpath(status_dir)
        for wal_info in required_xlog_files:
            with open(os.path.join(status_dir, "%s.done" % wal_info.name),
                      'a') as f:
                f.write('')
        if remote_command:
            try:
                recovery_info['rsync']('%s/' % status_dir,
                                       ':%s' % os.path.join(
                                           recovery_info['wal_dest'],
                                           'archive_status'))
            except CommandFailedException as e:
                output.exception(
                    "unable to populate pg_xlog/archive_status"
                    "directory: %s", e)
                output.close_and_exit()
Ejemplo n.º 2
0
    def _generate_archive_status(self, recovery_info, remote_command,
                                 required_xlog_files):
        """
        Populate the archive_status directory

        :param dict recovery_info: Dictionary containing all the recovery
            parameters
        :param str remote_command: ssh command for remote connection
        :param tuple required_xlog_files: list of required WAL segments
        """
        if remote_command:
            status_dir = recovery_info['tempdir']
        else:
            status_dir = os.path.join(recovery_info['wal_dest'],
                                      'archive_status')
            mkpath(status_dir)
        for wal_info in required_xlog_files:
            with open(os.path.join(status_dir, "%s.done" % wal_info.name),
                      'a') as f:
                f.write('')
        if remote_command:
            try:
                recovery_info['rsync'](
                    '%s/' % status_dir, ':%s' %
                    os.path.join(recovery_info['wal_dest'], 'archive_status'))
            except CommandFailedException as e:
                output.error(
                    "unable to populate archive_status "
                    "directory: %s", e)
                output.close_and_exit()
Ejemplo n.º 3
0
    def receive_wal(self, reset=False):
        """
        Creates a PgReceiveXlog object and issues the pg_receivexlog command
        for a specific server

        :param bool reset: When set reset the status of receive-wal
        :raise ArchiverFailure: when something goes wrong
        """
        # Ensure the presence of the destination directory
        mkpath(self.config.streaming_wals_directory)

        # Check if is a reset request
        if reset:
            self._reset_streaming_status()
            return

        # Execute basic sanity checks on PostgreSQL connection
        postgres_status = self.server.streaming.get_remote_status()
        if postgres_status["streaming_supported"] is None:
            raise ArchiverFailure(
                'failed opening the PostgreSQL streaming connection')
        elif not postgres_status["streaming_supported"]:
            raise ArchiverFailure(
                'PostgreSQL version too old (%s < 9.2)' %
                self.server.streaming.server_txt_version)
        # Execute basic sanity checks on pg_receivexlog
        remote_status = self.get_remote_status()
        if not remote_status["pg_receivexlog_installed"]:
            raise ArchiverFailure(
                'pg_receivexlog not present in $PATH')
        if not remote_status['pg_receivexlog_compatible']:
            raise ArchiverFailure(
                'pg_receivexlog version not compatible with '
                'PostgreSQL server version')

        # Make sure we are not wasting precious PostgreSQL resources
        self.server.postgres.close()
        self.server.streaming.close()

        _logger.info('Activating WAL archiving through streaming protocol')
        try:
            output_handler = PgReceiveXlog.make_output_handler(
                self.config.name + ': ')
            receive = PgReceiveXlog(remote_status['pg_receivexlog_path'],
                                    self.config.streaming_conninfo,
                                    self.config.streaming_wals_directory,
                                    out_handler=output_handler,
                                    err_handler=output_handler)
            receive.execute()
        except CommandFailedException as e:
            _logger.error(e)
            raise ArchiverFailure("pg_receivexlog exited with an error. "
                                  "Check the logs for more information.")
        except KeyboardInterrupt:
            # This is a normal termination, so there is nothing to do beside
            # informing the user.
            output.info('SIGINT received. Terminate gracefully.')
Ejemplo n.º 4
0
def build_backup_directories(backup_info):
    """
    Create on disk directory structure for a given BackupInfo

    :param BackupInfo backup_info:
    """
    rmtree(backup_info.get_basebackup_directory(), ignore_errors=True)
    mkpath(backup_info.get_data_directory())
    for tbs in backup_info.tablespaces:
        mkpath(backup_info.get_data_directory(tbs.oid))
Ejemplo n.º 5
0
def build_backup_directories(backup_info):
    """
    Create on disk directory structure for a given BackupInfo

    :param BackupInfo backup_info:
    """
    rmtree(backup_info.get_basebackup_directory(), ignore_errors=True)
    mkpath(backup_info.get_data_directory())
    for tbs in backup_info.tablespaces:
        mkpath(backup_info.get_data_directory(tbs.oid))
Ejemplo n.º 6
0
    def cron_wal_archival(self, compressor, wal_info):
        """
        Archive a WAL segment from the incoming directory.
        This function returns a WalFileInfo object.

        :param compressor: the compressor for the file (if any)
        :param wal_info: WalFileInfo of the WAL file is being processed
        """
        destfile = wal_info.fullpath(self.server)
        destdir = os.path.dirname(destfile)
        srcfile = os.path.join(self.config.incoming_wals_directory,
                wal_info.name)

        # Run the pre_archive_script if present.
        script = HookScriptRunner(self, 'archive_script', 'pre')
        script.env_from_wal_info(wal_info, srcfile)
        script.run()

        mkpath(destdir)
        if compressor:
            compressor.compress(srcfile, destfile)
            shutil.copystat(srcfile, destfile)
            os.unlink(srcfile)
        else:
            shutil.move(srcfile, destfile)

        # execute fsync() on the archived WAL containing directory
        fsync_dir(destdir)
        # execute fsync() also on the incoming directory
        fsync_dir(self.config.incoming_wals_directory)
        # execute fsync() on the archived WAL file
        file_fd = os.open(destfile, os.O_RDONLY)
        os.fsync(file_fd)
        os.close(file_fd)

        stat = os.stat(destfile)
        wal_info.size = stat.st_size
        wal_info.compression = compressor and compressor.compression

        # Run the post_archive_script if present.
        script = HookScriptRunner(self, 'archive_script', 'post')
        script.env_from_wal_info(wal_info)
        script.run()
Ejemplo n.º 7
0
    def cron_wal_archival(self, compressor, wal_info):
        """
        Archive a WAL segment from the incoming directory.
        This function returns a WalFileInfo object.

        :param compressor: the compressor for the file (if any)
        :param wal_info: WalFileInfo of the WAL file is being processed
        """

        dest_file = wal_info.fullpath(self.server)
        dest_dir = os.path.dirname(dest_file)
        srcfile = os.path.join(self.config.incoming_wals_directory,
                               wal_info.name)

        error = None
        try:
            # Run the pre_archive_script if present.
            script = HookScriptRunner(self, 'archive_script', 'pre')
            script.env_from_wal_info(wal_info, srcfile)
            script.run()

            # Run the pre_archive_retry_script if present.
            retry_script = RetryHookScriptRunner(self,
                                                 'archive_retry_script',
                                                 'pre')
            retry_script.env_from_wal_info(wal_info, srcfile)
            retry_script.run()

            mkpath(dest_dir)
            if compressor:
                compressor.compress(srcfile, dest_file)
                shutil.copystat(srcfile, dest_file)
                os.unlink(srcfile)
            else:
                shutil.move(srcfile, dest_file)

            # Execute fsync() on the archived WAL containing directory
            fsync_dir(dest_dir)
            # Execute fsync() also on the incoming directory
            fsync_dir(self.config.incoming_wals_directory)
            # Execute fsync() on the archived WAL file
            file_fd = os.open(dest_file, os.O_RDONLY)
            os.fsync(file_fd)
            os.close(file_fd)

            stat = os.stat(dest_file)
            wal_info.size = stat.st_size
            wal_info.compression = compressor and compressor.compression

        except Exception as e:
            # In case of failure save the exception for the post sripts
            error = e
            raise

        # Ensure the execution of the post_archive_retry_script and
        # the post_archive_script
        finally:
            # Run the post_archive_retry_script if present.
            try:
                retry_script = RetryHookScriptRunner(self,
                                                     'archive_retry_script',
                                                     'post')
                retry_script.env_from_wal_info(wal_info, dest_file, error)
                retry_script.run()
            except AbortedRetryHookScript, e:
                # Ignore the ABORT_STOP as it is a post-hook operation
                _logger.warning("Ignoring stop request after receiving "
                                "abort (exit code %d) from post-archive "
                                "retry hook script: %s",
                                e.hook.exit_status, e.hook.script)

            # Run the post_archive_script if present.
            script = HookScriptRunner(self, 'archive_script', 'post', error)
            script.env_from_wal_info(wal_info, dest_file)
            script.run()
Ejemplo n.º 8
0
    def receive_wal(self, reset=False):
        """
        Creates a PgReceiveXlog object and issues the pg_receivexlog command
        for a specific server

        :param bool reset: When set reset the status of receive-wal
        :raise ArchiverFailure: when something goes wrong
        """
        # Ensure the presence of the destination directory
        mkpath(self.config.streaming_wals_directory)

        # Check if is a reset request
        if reset:
            self._reset_streaming_status()
            return

        # Execute basic sanity checks on PostgreSQL connection
        streaming_status = self.server.streaming.get_remote_status()
        if streaming_status["streaming_supported"] is None:
            raise ArchiverFailure(
                'failed opening the PostgreSQL streaming connection '
                'for server %s' % (self.config.name))
        elif not streaming_status["streaming_supported"]:
            raise ArchiverFailure('PostgreSQL version too old (%s < 9.2)' %
                                  self.server.streaming.server_txt_version)
        # Execute basic sanity checks on pg_receivexlog
        remote_status = self.get_remote_status()
        if not remote_status["pg_receivexlog_installed"]:
            raise ArchiverFailure('pg_receivexlog not present in $PATH')
        if not remote_status['pg_receivexlog_compatible']:
            raise ArchiverFailure('pg_receivexlog version not compatible with '
                                  'PostgreSQL server version')

        # Execute sanity check on replication slot usage
        if self.config.slot_name:
            # Check if slots are supported
            if not remote_status['pg_receivexlog_supports_slots']:
                raise ArchiverFailure(
                    'Physical replication slot not supported by %s '
                    '(9.4 or higher is required)' %
                    self.server.streaming.server_txt_version)
            # Check if the required slot exists
            postgres_status = self.server.postgres.get_remote_status()
            if postgres_status['replication_slot'] is None:
                raise ArchiverFailure(
                    "replication slot '%s' doesn't exist. "
                    "Please execute "
                    "'barman receive-wal --create-slot %s'" %
                    (self.config.slot_name, self.config.name))
            # Check if the required slot is available
            if postgres_status['replication_slot'].active:
                raise ArchiverFailure(
                    "replication slot '%s' is already in use" %
                    (self.config.slot_name, ))

        # Make sure we are not wasting precious PostgreSQL resources
        self.server.close()

        _logger.info('Activating WAL archiving through streaming protocol')
        try:
            output_handler = PgReceiveXlog.make_output_handler(
                self.config.name + ': ')
            receive = PgReceiveXlog(
                connection=self.server.streaming,
                destination=self.config.streaming_wals_directory,
                command=remote_status['pg_receivexlog_path'],
                version=remote_status['pg_receivexlog_version'],
                app_name=self.config.streaming_archiver_name,
                path=self.server.path,
                slot_name=self.config.slot_name,
                synchronous=remote_status['pg_receivexlog_synchronous'],
                out_handler=output_handler,
                err_handler=output_handler)
            # Finally execute the pg_receivexlog process
            receive.execute()
        except CommandFailedException as e:
            # Retrieve the return code from the exception
            ret_code = e.args[0]['ret']
            if ret_code < 0:
                # If the return code is negative, then pg_receivexlog
                # was terminated by a signal
                msg = "pg_receivexlog terminated by signal: %s" \
                      % abs(ret_code)
            else:
                # Otherwise terminated with an error
                msg = "pg_receivexlog terminated with error code: %s"\
                      % ret_code

            raise ArchiverFailure(msg)
        except KeyboardInterrupt:
            # This is a normal termination, so there is nothing to do beside
            # informing the user.
            output.info('SIGINT received. Terminate gracefully.')
Ejemplo n.º 9
0
    def archive_wal(self, compressor, wal_info):
        """
        Archive a WAL segment and update the wal_info object

        :param compressor: the compressor for the file (if any)
        :param WalFileInfo wal_info: the WAL file is being processed
        """

        src_file = wal_info.orig_filename
        src_dir = os.path.dirname(src_file)
        dst_file = wal_info.fullpath(self.server)
        tmp_file = dst_file + '.tmp'
        dst_dir = os.path.dirname(dst_file)

        error = None
        try:
            # Run the pre_archive_script if present.
            script = HookScriptRunner(self.backup_manager, 'archive_script',
                                      'pre')
            script.env_from_wal_info(wal_info, src_file)
            script.run()

            # Run the pre_archive_retry_script if present.
            retry_script = RetryHookScriptRunner(self.backup_manager,
                                                 'archive_retry_script', 'pre')
            retry_script.env_from_wal_info(wal_info, src_file)
            retry_script.run()

            # Check if destination already exists
            if os.path.exists(dst_file):
                src_uncompressed = src_file
                dst_uncompressed = dst_file
                dst_info = WalFileInfo.from_file(dst_file)
                try:
                    comp_manager = self.backup_manager.compression_manager
                    if dst_info.compression is not None:
                        dst_uncompressed = dst_file + '.uncompressed'
                        comp_manager.get_compressor(
                            compression=dst_info.compression).decompress(
                                dst_file, dst_uncompressed)
                    if wal_info.compression:
                        src_uncompressed = src_file + '.uncompressed'
                        comp_manager.get_compressor(
                            compression=wal_info.compression).decompress(
                                src_file, src_uncompressed)
                    # Directly compare files.
                    # When the files are identical
                    # raise a MatchingDuplicateWalFile exception,
                    # otherwise raise a DuplicateWalFile exception.
                    if filecmp.cmp(dst_uncompressed, src_uncompressed):
                        raise MatchingDuplicateWalFile(wal_info)
                    else:
                        raise DuplicateWalFile(wal_info)
                finally:
                    if src_uncompressed != src_file:
                        os.unlink(src_uncompressed)
                    if dst_uncompressed != dst_file:
                        os.unlink(dst_uncompressed)

            mkpath(dst_dir)
            # Compress the file only if not already compressed
            if compressor and not wal_info.compression:
                compressor.compress(src_file, tmp_file)
                shutil.copystat(src_file, tmp_file)
                os.rename(tmp_file, dst_file)
                os.unlink(src_file)
                # Update wal_info
                stat = os.stat(dst_file)
                wal_info.size = stat.st_size
                wal_info.compression = compressor.compression
            else:
                # Try to atomically rename the file. If successful,
                # the renaming will be an atomic operation
                # (this is a POSIX requirement).
                try:
                    os.rename(src_file, dst_file)
                except OSError:
                    # Source and destination are probably on different
                    # filesystems
                    shutil.copy2(src_file, tmp_file)
                    os.rename(tmp_file, dst_file)
                    os.unlink(src_file)
            # At this point the original file has been removed
            wal_info.orig_filename = None

            # Execute fsync() on the archived WAL file
            file_fd = os.open(dst_file, os.O_RDONLY)
            os.fsync(file_fd)
            os.close(file_fd)
            # Execute fsync() on the archived WAL containing directory
            fsync_dir(dst_dir)
            # Execute fsync() also on the incoming directory
            fsync_dir(src_dir)
        except Exception as e:
            # In case of failure save the exception for the post scripts
            error = e
            raise

        # Ensure the execution of the post_archive_retry_script and
        # the post_archive_script
        finally:
            # Run the post_archive_retry_script if present.
            try:
                retry_script = RetryHookScriptRunner(self,
                                                     'archive_retry_script',
                                                     'post')
                retry_script.env_from_wal_info(wal_info, dst_file, error)
                retry_script.run()
            except AbortedRetryHookScript as e:
                # Ignore the ABORT_STOP as it is a post-hook operation
                _logger.warning(
                    "Ignoring stop request after receiving "
                    "abort (exit code %d) from post-archive "
                    "retry hook script: %s", e.hook.exit_status, e.hook.script)

            # Run the post_archive_script if present.
            script = HookScriptRunner(self, 'archive_script', 'post', error)
            script.env_from_wal_info(wal_info, dst_file)
            script.run()
Ejemplo n.º 10
0
    def receive_wal(self, reset=False):
        """
        Creates a PgReceiveXlog object and issues the pg_receivexlog command
        for a specific server

        :param bool reset: When set reset the status of receive-wal
        :raise ArchiverFailure: when something goes wrong
        """
        # Ensure the presence of the destination directory
        mkpath(self.config.streaming_wals_directory)

        # Check if is a reset request
        if reset:
            self._reset_streaming_status()
            return

        # Execute basic sanity checks on PostgreSQL connection
        postgres_status = self.server.streaming.get_remote_status()
        if postgres_status["streaming_supported"] is None:
            raise ArchiverFailure(
                'failed opening the PostgreSQL streaming connection')
        elif not postgres_status["streaming_supported"]:
            raise ArchiverFailure(
                'PostgreSQL version too old (%s < 9.2)' %
                self.server.streaming.server_txt_version)
        # Execute basic sanity checks on pg_receivexlog
        remote_status = self.get_remote_status()
        if not remote_status["pg_receivexlog_installed"]:
            raise ArchiverFailure(
                'pg_receivexlog not present in $PATH')
        if not remote_status['pg_receivexlog_compatible']:
            raise ArchiverFailure(
                'pg_receivexlog version not compatible with '
                'PostgreSQL server version')

        # Make sure we are not wasting precious PostgreSQL resources
        self.server.close()

        _logger.info('Activating WAL archiving through streaming protocol')
        try:
            output_handler = PgReceiveXlog.make_output_handler(
                self.config.name + ': ')
            if remote_status['pg_receivexlog_version'] >= "9.3":
                conn_string = self.config.streaming_conninfo
                # Set a default application_name unless defined by user
                if ('application_name' not in
                        self.server.streaming.conn_parameters):
                    conn_string += ' application_name=%s' % (
                        self.config.streaming_archiver_name
                    )
                # If pg_receivexlog version is >= 9.3 we use the connection
                # string because allows the user to use all the parameters
                # supported by the libpq library to create a connection
                receive = PgReceiveXlog(
                    remote_status['pg_receivexlog_path'],
                    conn_string=conn_string,
                    dest=self.config.streaming_wals_directory,
                    out_handler=output_handler,
                    err_handler=output_handler)
            else:
                # 9.2 version of pg_receivexlog doesn't support
                # connection strings so the 'split' version of the conninfo
                # option is used instead.
                conn_params = self.server.streaming.conn_parameters
                receive = PgReceiveXlog(
                    remote_status['pg_receivexlog_path'],
                    host=conn_params.get('host', None),
                    port=conn_params.get('port', None),
                    user=conn_params.get('user', None),
                    dest=self.config.streaming_wals_directory,
                    out_handler=output_handler,
                    err_handler=output_handler)
            # Finally execute the pg_receivexlog process
            receive.execute()
        except CommandFailedException as e:
            _logger.error(e)
            raise ArchiverFailure("pg_receivexlog exited with an error. "
                                  "Check the logs for more information.")
        except KeyboardInterrupt:
            # This is a normal termination, so there is nothing to do beside
            # informing the user.
            output.info('SIGINT received. Terminate gracefully.')
Ejemplo n.º 11
0
    def archive_wal(self, compressor, wal_info):
        """
        Archive a WAL segment and update the wal_info object

        :param compressor: the compressor for the file (if any)
        :param WalFileInfo wal_info: the WAL file is being processed
        """

        src_file = wal_info.orig_filename
        src_dir = os.path.dirname(src_file)
        dst_file = wal_info.fullpath(self.server)
        tmp_file = dst_file + '.tmp'
        dst_dir = os.path.dirname(dst_file)

        error = None
        try:
            # Run the pre_archive_script if present.
            script = HookScriptRunner(self.backup_manager,
                                      'archive_script', 'pre')
            script.env_from_wal_info(wal_info, src_file)
            script.run()

            # Run the pre_archive_retry_script if present.
            retry_script = RetryHookScriptRunner(self.backup_manager,
                                                 'archive_retry_script',
                                                 'pre')
            retry_script.env_from_wal_info(wal_info, src_file)
            retry_script.run()

            # Check if destination already exists
            if os.path.exists(dst_file):
                src_uncompressed = src_file
                dst_uncompressed = dst_file
                dst_info = WalFileInfo.from_file(dst_file)
                try:
                    comp_manager = self.backup_manager.compression_manager
                    if dst_info.compression is not None:
                        dst_uncompressed = dst_file + '.uncompressed'
                        comp_manager.get_compressor(
                            compression=dst_info.compression).decompress(
                                dst_file, dst_uncompressed)
                    if wal_info.compression:
                        src_uncompressed = src_file + '.uncompressed'
                        comp_manager.get_compressor(
                            compression=wal_info.compression).decompress(
                                src_file, src_uncompressed)
                    # Directly compare files.
                    # When the files are identical
                    # raise a MatchingDuplicateWalFile exception,
                    # otherwise raise a DuplicateWalFile exception.
                    if filecmp.cmp(dst_uncompressed, src_uncompressed):
                        raise MatchingDuplicateWalFile(wal_info)
                    else:
                        raise DuplicateWalFile(wal_info)
                finally:
                    if src_uncompressed != src_file:
                        os.unlink(src_uncompressed)
                    if dst_uncompressed != dst_file:
                        os.unlink(dst_uncompressed)

            mkpath(dst_dir)
            # Compress the file only if not already compressed
            if compressor and not wal_info.compression:
                compressor.compress(src_file, tmp_file)
                shutil.copystat(src_file, tmp_file)
                os.rename(tmp_file, dst_file)
                os.unlink(src_file)
                # Update wal_info
                stat = os.stat(dst_file)
                wal_info.size = stat.st_size
                wal_info.compression = compressor.compression
            else:
                # Try to atomically rename the file. If successful,
                # the renaming will be an atomic operation
                # (this is a POSIX requirement).
                try:
                    os.rename(src_file, dst_file)
                except OSError:
                    # Source and destination are probably on different
                    # filesystems
                    shutil.copy2(src_file, tmp_file)
                    os.rename(tmp_file, dst_file)
                    os.unlink(src_file)
            # At this point the original file has been removed
            wal_info.orig_filename = None

            # Execute fsync() on the archived WAL file
            file_fd = os.open(dst_file, os.O_RDONLY)
            os.fsync(file_fd)
            os.close(file_fd)
            # Execute fsync() on the archived WAL containing directory
            fsync_dir(dst_dir)
            # Execute fsync() also on the incoming directory
            fsync_dir(src_dir)
        except Exception as e:
            # In case of failure save the exception for the post scripts
            error = e
            raise

        # Ensure the execution of the post_archive_retry_script and
        # the post_archive_script
        finally:
            # Run the post_archive_retry_script if present.
            try:
                retry_script = RetryHookScriptRunner(self,
                                                     'archive_retry_script',
                                                     'post')
                retry_script.env_from_wal_info(wal_info, dst_file, error)
                retry_script.run()
            except AbortedRetryHookScript as e:
                # Ignore the ABORT_STOP as it is a post-hook operation
                _logger.warning("Ignoring stop request after receiving "
                                "abort (exit code %d) from post-archive "
                                "retry hook script: %s",
                                e.hook.exit_status, e.hook.script)

            # Run the post_archive_script if present.
            script = HookScriptRunner(self, 'archive_script', 'post', error)
            script.env_from_wal_info(wal_info, dst_file)
            script.run()
Ejemplo n.º 12
0
    def _xlog_copy(self, required_xlog_files, wal_dest, remote_command):
        """
        Restore WAL segments

        :param required_xlog_files: list of all required WAL files
        :param wal_dest: the destination directory for xlog recover
        :param remote_command: default None. The remote command to recover
               the xlog, in case of remote backup.
        """
        # List of required WAL files partitioned by containing directory
        xlogs = collections.defaultdict(list)
        # add '/' suffix to ensure it is a directory
        wal_dest = '%s/' % wal_dest
        # Map of every compressor used with any WAL file in the archive,
        # to be used during this recovery
        compressors = {}
        compression_manager = self.backup_manager.compression_manager
        # Fill xlogs and compressors maps from required_xlog_files
        for wal_info in required_xlog_files:
            hashdir = xlog.hash_dir(wal_info.name)
            xlogs[hashdir].append(wal_info)
            # If a compressor is required, make sure it exists in the cache
            if wal_info.compression is not None and \
                    wal_info.compression not in compressors:
                compressors[wal_info.compression] = \
                    compression_manager.get_compressor(
                        compression=wal_info.compression)

        rsync = RsyncPgData(
            path=self.server.path,
            ssh=remote_command,
            bwlimit=self.config.bandwidth_limit,
            network_compression=self.config.network_compression)
        # If compression is used and this is a remote recovery, we need a
        # temporary directory where to spool uncompressed files,
        # otherwise we either decompress every WAL file in the local
        # destination, or we ship the uncompressed file remotely
        if compressors:
            if remote_command:
                # Decompress to a temporary spool directory
                wal_decompression_dest = tempfile.mkdtemp(
                    prefix='barman_xlog-')
            else:
                # Decompress directly to the destination directory
                wal_decompression_dest = wal_dest
            # Make sure wal_decompression_dest exists
            mkpath(wal_decompression_dest)
        else:
            # If no compression
            wal_decompression_dest = None
        if remote_command:
            # If remote recovery tell rsync to copy them remotely
            # add ':' prefix to mark it as remote
            wal_dest = ':%s' % wal_dest
        total_wals = sum(map(len, xlogs.values()))
        partial_count = 0
        for prefix in sorted(xlogs):
            batch_len = len(xlogs[prefix])
            partial_count += batch_len
            source_dir = os.path.join(self.config.wals_directory, prefix)
            _logger.info("Starting copy of %s WAL files %s/%s from %s to %s",
                         batch_len, partial_count, total_wals,
                         xlogs[prefix][0], xlogs[prefix][-1])
            # If at least one compressed file has been found, activate
            # compression check and decompression for each WAL files
            if compressors:
                for segment in xlogs[prefix]:
                    dst_file = os.path.join(wal_decompression_dest,
                                            segment.name)
                    if segment.compression is not None:
                        compressors[segment.compression].decompress(
                            os.path.join(source_dir, segment.name), dst_file)
                    else:
                        shutil.copy2(os.path.join(source_dir, segment.name),
                                     dst_file)
                if remote_command:
                    try:
                        # Transfer the WAL files
                        rsync.from_file_list(
                            list(segment.name for segment in xlogs[prefix]),
                            wal_decompression_dest, wal_dest)
                    except CommandFailedException as e:
                        msg = ("data transfer failure while copying WAL files "
                               "to directory '%s'") % (wal_dest[1:], )
                        raise DataTransferFailure.from_command_error(
                            'rsync', e, msg)

                    # Cleanup files after the transfer
                    for segment in xlogs[prefix]:
                        file_name = os.path.join(wal_decompression_dest,
                                                 segment.name)
                        try:
                            os.unlink(file_name)
                        except OSError as e:
                            output.warning(
                                "Error removing temporary file '%s': %s",
                                file_name, e)
            else:
                try:
                    rsync.from_file_list(
                        list(segment.name
                             for segment in xlogs[prefix]), "%s/" %
                        os.path.join(self.config.wals_directory, prefix),
                        wal_dest)
                except CommandFailedException as e:
                    msg = "data transfer failure while copying WAL files " \
                          "to directory '%s'" % (wal_dest[1:],)
                    raise DataTransferFailure.from_command_error(
                        'rsync', e, msg)

        _logger.info("Finished copying %s WAL files.", total_wals)

        # Remove local decompression target directory if different from the
        # destination directory (it happens when compression is in use during a
        # remote recovery
        if wal_decompression_dest and wal_decompression_dest != wal_dest:
            shutil.rmtree(wal_decompression_dest)
Ejemplo n.º 13
0
    def receive_wal(self, reset=False):
        """
        Creates a PgReceiveXlog object and issues the pg_receivexlog command
        for a specific server

        :param bool reset: When set reset the status of receive-wal
        :raise ArchiverFailure: when something goes wrong
        """
        # Ensure the presence of the destination directory
        mkpath(self.config.streaming_wals_directory)

        # Execute basic sanity checks on PostgreSQL connection
        streaming_status = self.server.streaming.get_remote_status()
        if streaming_status["streaming_supported"] is None:
            raise ArchiverFailure(
                "failed opening the PostgreSQL streaming connection "
                "for server %s" % (self.config.name))
        elif not streaming_status["streaming_supported"]:
            raise ArchiverFailure("PostgreSQL version too old (%s < 9.2)" %
                                  self.server.streaming.server_txt_version)
        # Execute basic sanity checks on pg_receivexlog
        command = "pg_receivewal"
        if self.server.streaming.server_version < 100000:
            command = "pg_receivexlog"
        remote_status = self.get_remote_status()
        if not remote_status["pg_receivexlog_installed"]:
            raise ArchiverFailure("%s not present in $PATH" % command)
        if not remote_status["pg_receivexlog_compatible"]:
            raise ArchiverFailure(
                "%s version not compatible with PostgreSQL server version" %
                command)

        # Execute sanity check on replication slot usage
        postgres_status = self.server.postgres.get_remote_status()
        if self.config.slot_name:
            # Check if slots are supported
            if not remote_status["pg_receivexlog_supports_slots"]:
                raise ArchiverFailure(
                    "Physical replication slot not supported by %s "
                    "(9.4 or higher is required)" %
                    self.server.streaming.server_txt_version)
            # Check if the required slot exists
            if postgres_status["replication_slot"] is None:
                if self.config.create_slot == "auto":
                    if not reset:
                        output.info("Creating replication slot '%s'",
                                    self.config.slot_name)
                        self.server.create_physical_repslot()
                else:
                    raise ArchiverFailure(
                        "replication slot '%s' doesn't exist. "
                        "Please execute "
                        "'barman receive-wal --create-slot %s'" %
                        (self.config.slot_name, self.config.name))
            # Check if the required slot is available
            elif postgres_status["replication_slot"].active:
                raise ArchiverFailure(
                    "replication slot '%s' is already in use" %
                    (self.config.slot_name, ))

        # Check if is a reset request
        if reset:
            self._reset_streaming_status(postgres_status, streaming_status)
            return

        # Check the size of the .partial WAL file and truncate it if needed
        self._truncate_partial_file_if_needed(
            postgres_status["xlog_segment_size"])

        # Make sure we are not wasting precious PostgreSQL resources
        self.server.close()

        _logger.info("Activating WAL archiving through streaming protocol")
        try:
            output_handler = PgReceiveXlog.make_output_handler(
                self.config.name + ": ")
            receive = PgReceiveXlog(
                connection=self.server.streaming,
                destination=self.config.streaming_wals_directory,
                command=remote_status["pg_receivexlog_path"],
                version=remote_status["pg_receivexlog_version"],
                app_name=self.config.streaming_archiver_name,
                path=self.server.path,
                slot_name=self.config.slot_name,
                synchronous=remote_status["pg_receivexlog_synchronous"],
                out_handler=output_handler,
                err_handler=output_handler,
            )
            # Finally execute the pg_receivexlog process
            receive.execute()
        except CommandFailedException as e:
            # Retrieve the return code from the exception
            ret_code = e.args[0]["ret"]
            if ret_code < 0:
                # If the return code is negative, then pg_receivexlog
                # was terminated by a signal
                msg = "%s terminated by signal: %s" % (command, abs(ret_code))
            else:
                # Otherwise terminated with an error
                msg = "%s terminated with error code: %s" % (command, ret_code)

            raise ArchiverFailure(msg)
        except KeyboardInterrupt:
            # This is a normal termination, so there is nothing to do beside
            # informing the user.
            output.info("SIGINT received. Terminate gracefully.")
Ejemplo n.º 14
0
    def receive_wal(self, reset=False):
        """
        Creates a PgReceiveXlog object and issues the pg_receivexlog command
        for a specific server

        :param bool reset: When set reset the status of receive-wal
        :raise ArchiverFailure: when something goes wrong
        """
        # Ensure the presence of the destination directory
        mkpath(self.config.streaming_wals_directory)

        # Check if is a reset request
        if reset:
            self._reset_streaming_status()
            return

        # Execute basic sanity checks on PostgreSQL connection
        postgres_status = self.server.streaming.get_remote_status()
        if postgres_status["streaming_supported"] is None:
            raise ArchiverFailure(
                'failed opening the PostgreSQL streaming connection')
        elif not postgres_status["streaming_supported"]:
            raise ArchiverFailure('PostgreSQL version too old (%s < 9.2)' %
                                  self.server.streaming.server_txt_version)
        # Execute basic sanity checks on pg_receivexlog
        remote_status = self.get_remote_status()
        if not remote_status["pg_receivexlog_installed"]:
            raise ArchiverFailure('pg_receivexlog not present in $PATH')
        if not remote_status['pg_receivexlog_compatible']:
            raise ArchiverFailure('pg_receivexlog version not compatible with '
                                  'PostgreSQL server version')

        # Make sure we are not wasting precious PostgreSQL resources
        self.server.close()

        _logger.info('Activating WAL archiving through streaming protocol')
        try:
            output_handler = PgReceiveXlog.make_output_handler(
                self.config.name + ': ')
            if remote_status['pg_receivexlog_version'] >= "9.3":
                conn_string = self.config.streaming_conninfo
                # Set a default application_name unless defined by user
                if ('application_name'
                        not in self.server.streaming.conn_parameters):
                    conn_string += ' application_name=%s' % (
                        self.config.streaming_archiver_name)
                # If pg_receivexlog version is >= 9.3 we use the connection
                # string because allows the user to use all the parameters
                # supported by the libpq library to create a connection
                receive = PgReceiveXlog(
                    remote_status['pg_receivexlog_path'],
                    conn_string=conn_string,
                    dest=self.config.streaming_wals_directory,
                    out_handler=output_handler,
                    err_handler=output_handler)
            else:
                # 9.2 version of pg_receivexlog doesn't support
                # connection strings so the 'split' version of the conninfo
                # option is used instead.
                conn_params = self.server.streaming.conn_parameters
                receive = PgReceiveXlog(
                    remote_status['pg_receivexlog_path'],
                    host=conn_params.get('host', None),
                    port=conn_params.get('port', None),
                    user=conn_params.get('user', None),
                    dest=self.config.streaming_wals_directory,
                    out_handler=output_handler,
                    err_handler=output_handler)
            # Finally execute the pg_receivexlog process
            receive.execute()
        except CommandFailedException as e:
            _logger.error(e)
            raise ArchiverFailure("pg_receivexlog exited with an error. "
                                  "Check the logs for more information.")
        except KeyboardInterrupt:
            # This is a normal termination, so there is nothing to do beside
            # informing the user.
            output.info('SIGINT received. Terminate gracefully.')
Ejemplo n.º 15
0
    def xlog_copy(self, required_xlog_files, wal_dest, remote_command):
        """
        Restore WAL segments

        :param required_xlog_files: list of all required WAL files
        :param wal_dest: the destination directory for xlog recover
        :param remote_command: default None. The remote command to recover
               the xlog, in case of remote backup.
        """
        # Retrieve the list of required WAL segments
        # according to recovery options
        xlogs = {}
        for wal_info in required_xlog_files:
            hashdir = xlog.hash_dir(wal_info.name)
            if hashdir not in xlogs:
                xlogs[hashdir] = []
            xlogs[hashdir].append(wal_info.name)
        # Check decompression options
        compressor = self.backup_manager.compression_manager.get_compressor()

        rsync = RsyncPgData(
            ssh=remote_command,
            bwlimit=self.config.bandwidth_limit,
            network_compression=self.config.network_compression)
        if remote_command:
            # If remote recovery tell rsync to copy them remotely
            # add ':' prefix to mark it as remote
            # add '/' suffix to ensure it is a directory
            wal_dest = ':%s/' % wal_dest
        else:
            # we will not use rsync: destdir must exists
            mkpath(wal_dest)
        if compressor and remote_command:
            xlog_spool = tempfile.mkdtemp(prefix='barman_xlog-')
        total_wals = sum(map(len, xlogs.values()))
        partial_count = 0
        for prefix in sorted(xlogs):
            batch_len = len(xlogs[prefix])
            partial_count += batch_len
            source_dir = os.path.join(self.config.wals_directory, prefix)
            _logger.info(
                "Starting copy of %s WAL files %s/%s from %s to %s",
                batch_len,
                partial_count,
                total_wals,
                xlogs[prefix][0],
                xlogs[prefix][-1])
            if compressor:
                if remote_command:
                    for segment in xlogs[prefix]:
                        compressor.decompress(os.path.join(source_dir, segment),
                                              os.path.join(xlog_spool, segment))
                    try:
                        rsync.from_file_list(xlogs[prefix],
                                             xlog_spool, wal_dest)
                    except CommandFailedException, e:
                        msg = "data transfer failure while copying WAL files " \
                              "to directory '%s'" % (wal_dest[1:],)
                        raise DataTransferFailure.from_rsync_error(e, msg)

                    # Cleanup files after the transfer
                    for segment in xlogs[prefix]:
                        file_name = os.path.join(xlog_spool, segment)
                        try:
                            os.unlink(file_name)
                        except OSError as e:
                            output.warning(
                                "Error removing temporary file '%s': %s",
                                file_name, e)
                else:
                    # decompress directly to the right place
                    for segment in xlogs[prefix]:
                        compressor.decompress(os.path.join(source_dir, segment),
                                              os.path.join(wal_dest, segment))
            else:
                try:
                    rsync.from_file_list(
                        xlogs[prefix],
                        "%s/" % os.path.join(
                            self.config.wals_directory, prefix),
                        wal_dest)
                except CommandFailedException, e:
                    msg = "data transfer failure while copying WAL files " \
                          "to directory '%s'" % (wal_dest[1:],)
                    raise DataTransferFailure.from_rsync_error(e, msg)
Ejemplo n.º 16
0
                    check=True)
                try:
                    tb_rsync.smart_copy(
                        ':%s/' % tablespace.location,
                        tablespace_dest,
                        safe_horizon,
                        ref_dir)
                except CommandFailedException, e:
                    msg = "data transfer failure on directory '%s'" % \
                          backup_info.get_data_directory(tablespace.oid)
                    raise DataTransferFailure.from_rsync_error(e, msg)

        # Make sure the destination directory exists in order for smart copy
        # to detect that no file is present there
        backup_dest = backup_info.get_data_directory()
        mkpath(backup_dest)

        # Copy the pgdata, trying to reuse the data dir
        # of the previous backup if incremental is active
        ref_dir = self.reuse_dir(previous_backup)
        rsync = RsyncPgData(
            ssh=self.ssh_command,
            ssh_options=self.ssh_options,
            args=self.reuse_args(ref_dir),
            bwlimit=self.config.bandwidth_limit,
            exclude_and_protect=exclude_and_protect,
            network_compression=self.config.network_compression)
        try:
            rsync.smart_copy(':%s/' % backup_info.pgdata, backup_dest,
                             safe_horizon,
                             ref_dir)
Ejemplo n.º 17
0
    def xlog_copy(self, required_xlog_files, wal_dest, remote_command):
        """
        Restore WAL segments

        :param required_xlog_files: list of all required WAL files
        :param wal_dest: the destination directory for xlog recover
        :param remote_command: default None. The remote command to recover
               the xlog, in case of remote backup.
        """
        # List of required WAL files partitioned by containing directory
        xlogs = collections.defaultdict(list)
        # add '/' suffix to ensure it is a directory
        wal_dest = '%s/' % wal_dest
        # Map of every compressor used with any WAL file in the archive,
        # to be used during this recovery
        compressors = {}
        compression_manager = self.backup_manager.compression_manager
        # Fill xlogs and compressors maps from required_xlog_files
        for wal_info in required_xlog_files:
            hashdir = xlog.hash_dir(wal_info.name)
            xlogs[hashdir].append(wal_info)
            # If a compressor is required, make sure it exists in the cache
            if wal_info.compression is not None and \
                    wal_info.compression not in compressors:
                compressors[wal_info.compression] = \
                    compression_manager.get_compressor(
                        compression=wal_info.compression)

        rsync = RsyncPgData(
            path=self.server.path,
            ssh=remote_command,
            bwlimit=self.config.bandwidth_limit,
            network_compression=self.config.network_compression)
        # If compression is used and this is a remote recovery, we need a
        # temporary directory where to spool uncompressed files,
        # otherwise we either decompress every WAL file in the local
        # destination, or we ship the uncompressed file remotely
        if compressors:
            if remote_command:
                # Decompress to a temporary spool directory
                wal_decompression_dest = tempfile.mkdtemp(
                    prefix='barman_xlog-')
            else:
                # Decompress directly to the destination directory
                wal_decompression_dest = wal_dest
            # Make sure wal_decompression_dest exists
            mkpath(wal_decompression_dest)
        else:
            # If no compression
            wal_decompression_dest = None
        if remote_command:
            # If remote recovery tell rsync to copy them remotely
            # add ':' prefix to mark it as remote
            wal_dest = ':%s' % wal_dest
        total_wals = sum(map(len, xlogs.values()))
        partial_count = 0
        for prefix in sorted(xlogs):
            batch_len = len(xlogs[prefix])
            partial_count += batch_len
            source_dir = os.path.join(self.config.wals_directory, prefix)
            _logger.info(
                "Starting copy of %s WAL files %s/%s from %s to %s",
                batch_len,
                partial_count,
                total_wals,
                xlogs[prefix][0],
                xlogs[prefix][-1])
            # If at least one compressed file has been found, activate
            # compression check and decompression for each WAL files
            if compressors:
                for segment in xlogs[prefix]:
                    dst_file = os.path.join(wal_decompression_dest,
                                            segment.name)
                    if segment.compression is not None:
                        compressors[segment.compression].decompress(
                            os.path.join(source_dir, segment.name),
                            dst_file)
                    else:
                        shutil.copy2(os.path.join(source_dir, segment.name),
                                     dst_file)
                if remote_command:
                    try:
                        # Transfer the WAL files
                        rsync.from_file_list(
                            list(segment.name for segment in xlogs[prefix]),
                            wal_decompression_dest, wal_dest)
                    except CommandFailedException as e:
                        msg = ("data transfer failure while copying WAL files "
                               "to directory '%s'") % (wal_dest[1:],)
                        raise DataTransferFailure.from_rsync_error(e, msg)

                    # Cleanup files after the transfer
                    for segment in xlogs[prefix]:
                        file_name = os.path.join(wal_decompression_dest,
                                                 segment.name)
                        try:
                            os.unlink(file_name)
                        except OSError as e:
                            output.warning(
                                "Error removing temporary file '%s': %s",
                                file_name, e)
            else:
                try:
                    rsync.from_file_list(
                        list(segment.name for segment in xlogs[prefix]),
                        "%s/" % os.path.join(self.config.wals_directory,
                                             prefix),
                        wal_dest)
                except CommandFailedException as e:
                    msg = "data transfer failure while copying WAL files " \
                          "to directory '%s'" % (wal_dest[1:],)
                    raise DataTransferFailure.from_rsync_error(e, msg)

        _logger.info("Finished copying %s WAL files.", total_wals)

        # Remove local decompression target directory if different from the
        # destination directory (it happens when compression is in use during a
        # remote recovery
        if wal_decompression_dest and wal_decompression_dest != wal_dest:
            shutil.rmtree(wal_decompression_dest)
Ejemplo n.º 18
0
    def backup_copy(self, backup_info):
        """
        Perform the actual copy of the backup using Rsync.
        First, it copies one tablespace at a time, then the PGDATA directory,
        and finally configuration files (if outside PGDATA).
        Bandwidth limitation, according to configuration, is applied in
        the process.
        This method is the core of base backup copy using Rsync+Ssh.

        :param barman.infofile.BackupInfo backup_info: backup information
        """

        # List of paths to be ignored by Rsync
        exclude_and_protect = []

        # Retrieve the previous backup metadata, then set safe_horizon
        previous_backup = self.backup_manager.get_previous_backup(
            backup_info.backup_id)
        if previous_backup:
            # safe_horizon is a tz-aware timestamp because BackupInfo class
            # ensures it
            safe_horizon = previous_backup.begin_time
        else:
            # If no previous backup is present, safe_horizon is set to None
            safe_horizon = None

        # Copy tablespaces applying bwlimit when necessary
        if backup_info.tablespaces:
            tablespaces_bw_limit = self.config.tablespace_bandwidth_limit
            # Copy a tablespace at a time
            for tablespace in backup_info.tablespaces:
                self.current_action = "copying tablespace '%s'" % \
                                      tablespace.name
                # Apply bandwidth limit if requested
                bwlimit = self.config.bandwidth_limit
                if tablespaces_bw_limit and \
                        tablespace.name in tablespaces_bw_limit:
                    bwlimit = tablespaces_bw_limit[tablespace.name]
                if bwlimit:
                    self.current_action += (" with bwlimit '%d'" % bwlimit)
                _logger.debug(self.current_action)
                # If the tablespace location is inside the data directory,
                # exclude and protect it from being copied twice during
                # the data directory copy
                if tablespace.location.startswith(backup_info.pgdata):
                    exclude_and_protect.append(
                        tablespace.location[len(backup_info.pgdata):])
                # Make sure the destination directory exists in order for
                # smart copy to detect that no file is present there
                tablespace_dest = backup_info.get_data_directory(tablespace.oid)
                mkpath(tablespace_dest)
                # Exclude and protect the tablespace from being copied again
                # during the data directory copy
                exclude_and_protect.append("/pg_tblspc/%s" % tablespace.oid)
                # Copy the backup using smart_copy trying to reuse the
                # tablespace of the previous backup if incremental is active
                ref_dir = self._reuse_dir(previous_backup, tablespace.oid)
                tb_rsync = RsyncPgData(
                    path=self.server.path,
                    ssh=self.ssh_command,
                    ssh_options=self.ssh_options,
                    args=self._reuse_args(ref_dir),
                    bwlimit=bwlimit,
                    network_compression=self.config.network_compression,
                    check=True)
                try:
                    tb_rsync.smart_copy(
                        ':%s/' % tablespace.location,
                        tablespace_dest,
                        safe_horizon,
                        ref_dir)
                except CommandFailedException, e:
                    msg = "data transfer failure on directory '%s'" % \
                          backup_info.get_data_directory(tablespace.oid)
                    raise DataTransferFailure.from_rsync_error(e, msg)
Ejemplo n.º 19
0
    def backup_copy(self, backup_info):
        """
        Perform the actual copy of the backup using Rsync.
        First, it copies one tablespace at a time, then the PGDATA directory,
        and finally configuration files (if outside PGDATA).
        Bandwidth limitation, according to configuration, is applied in
        the process.
        This method is the core of base backup copy using Rsync+Ssh.

        :param barman.infofile.BackupInfo backup_info: backup information
        """

        # List of paths to be ignored by Rsync
        exclude_and_protect = []

        # Retrieve the previous backup metadata, then set safe_horizon
        previous_backup = self.backup_manager.get_previous_backup(
            backup_info.backup_id)
        if previous_backup:
            # safe_horizon is a tz-aware timestamp because BackupInfo class
            # ensures it
            safe_horizon = previous_backup.begin_time
        else:
            # If no previous backup is present, safe_horizon is set to None
            safe_horizon = None

        # Copy tablespaces applying bwlimit when necessary
        if backup_info.tablespaces:
            tablespaces_bw_limit = self.config.tablespace_bandwidth_limit
            # Copy a tablespace at a time
            for tablespace in backup_info.tablespaces:
                self.current_action = "copying tablespace '%s'" % \
                                      tablespace.name
                # Apply bandwidth limit if requested
                bwlimit = self.config.bandwidth_limit
                if tablespaces_bw_limit and \
                        tablespace.name in tablespaces_bw_limit:
                    bwlimit = tablespaces_bw_limit[tablespace.name]
                if bwlimit:
                    self.current_action += (" with bwlimit '%d'" % bwlimit)
                _logger.debug(self.current_action)
                # If the tablespace location is inside the data directory,
                # exclude and protect it from being copied twice during
                # the data directory copy
                if tablespace.location.startswith(backup_info.pgdata):
                    exclude_and_protect.append(
                        tablespace.location[len(backup_info.pgdata):])
                # Make sure the destination directory exists in order for
                # smart copy to detect that no file is present there
                tablespace_dest = backup_info.get_data_directory(
                    tablespace.oid)
                mkpath(tablespace_dest)
                # Exclude and protect the tablespace from being copied again
                # during the data directory copy
                exclude_and_protect.append("/pg_tblspc/%s" % tablespace.oid)
                # Copy the backup using smart_copy trying to reuse the
                # tablespace of the previous backup if incremental is active
                ref_dir = self._reuse_dir(previous_backup, tablespace.oid)
                tb_rsync = RsyncPgData(
                    path=self.server.path,
                    ssh=self.ssh_command,
                    ssh_options=self.ssh_options,
                    args=self._reuse_args(ref_dir),
                    bwlimit=bwlimit,
                    network_compression=self.config.network_compression,
                    check=True)
                try:
                    tb_rsync.smart_copy(
                        ':%s/' % tablespace.location,
                        tablespace_dest,
                        safe_horizon,
                        ref_dir)
                except CommandFailedException as e:
                    msg = "data transfer failure on directory '%s'" % \
                          backup_info.get_data_directory(tablespace.oid)
                    raise DataTransferFailure.from_rsync_error(e, msg)

        # Make sure the destination directory exists in order for smart copy
        # to detect that no file is present there
        backup_dest = backup_info.get_data_directory()
        mkpath(backup_dest)

        # Copy the PGDATA, trying to reuse the data dir
        # of the previous backup if incremental is active
        ref_dir = self._reuse_dir(previous_backup)
        rsync = RsyncPgData(
            path=self.server.path,
            ssh=self.ssh_command,
            ssh_options=self.ssh_options,
            args=self._reuse_args(ref_dir),
            bwlimit=self.config.bandwidth_limit,
            exclude_and_protect=exclude_and_protect,
            network_compression=self.config.network_compression)
        try:
            rsync.smart_copy(':%s/' % backup_info.pgdata, backup_dest,
                             safe_horizon,
                             ref_dir)
        except CommandFailedException as e:
            msg = "data transfer failure on directory '%s'" % \
                  backup_info.pgdata
            raise DataTransferFailure.from_rsync_error(e, msg)

        # At last copy pg_control
        try:
            rsync(':%s/global/pg_control' % (backup_info.pgdata,),
                  '%s/global/pg_control' % (backup_dest,))
        except CommandFailedException as e:
            msg = "data transfer failure on file '%s/global/pg_control'" % \
                  backup_info.pgdata
            raise DataTransferFailure.from_rsync_error(e, msg)

        # Copy configuration files (if not inside PGDATA)
        self.current_action = "copying configuration files"
        _logger.debug(self.current_action)
        for key in ('config_file', 'hba_file', 'ident_file'):
            cf = getattr(backup_info, key, None)
            if cf:
                assert isinstance(cf, str)
                # Consider only those that reside outside of the original
                # PGDATA directory
                if cf.startswith(backup_info.pgdata):
                    self.current_action = \
                        "skipping %s as contained in %s directory" % (
                            key, backup_info.pgdata)
                    _logger.debug(self.current_action)
                    continue
                self.current_action = "copying %s as outside %s directory" % (
                    key, backup_info.pgdata)
                _logger.info(self.current_action)
                try:
                    rsync(':%s' % cf, backup_dest)
                except CommandFailedException as e:
                    ret_code = e.args[0]['ret']
                    msg = "data transfer failure on file '%s'" % cf
                    if 'ident_file' == key and ret_code == 23:
                        # If the ident file is missing,
                        # it isn't an error condition for PostgreSQL.
                        # Barman is consistent with this behavior.
                        output.warning(msg, log=True)
                        continue
                    else:
                        raise DataTransferFailure.from_rsync_error(e, msg)
        # Check for any include directives in PostgreSQL configuration
        # Currently, include directives are not supported for files that
        # reside outside PGDATA. These files must be manually backed up.
        # Barman will emit a warning and list those files
        if backup_info.included_files:
            filtered_files = [
                included_file
                for included_file in backup_info.included_files
                if not included_file.startswith(backup_info.pgdata)
            ]
            if len(filtered_files) > 0:
                output.warning(
                    "The usage of include directives is not supported "
                    "for files that reside outside PGDATA.\n"
                    "Please manually backup the following files:\n"
                    "\t%s\n",
                    "\n\t".join(filtered_files)
                )
Ejemplo n.º 20
0
    def receive_wal(self, reset=False):
        """
        Creates a PgReceiveXlog object and issues the pg_receivexlog command
        for a specific server

        :param bool reset: When set reset the status of receive-wal
        :raise ArchiverFailure: when something goes wrong
        """
        # Ensure the presence of the destination directory
        mkpath(self.config.streaming_wals_directory)

        # Check if is a reset request
        if reset:
            self._reset_streaming_status()
            return

        # Execute basic sanity checks on PostgreSQL connection
        streaming_status = self.server.streaming.get_remote_status()
        if streaming_status["streaming_supported"] is None:
            raise ArchiverFailure(
                'failed opening the PostgreSQL streaming connection')
        elif not streaming_status["streaming_supported"]:
            raise ArchiverFailure(
                'PostgreSQL version too old (%s < 9.2)' %
                self.server.streaming.server_txt_version)
        # Execute basic sanity checks on pg_receivexlog
        remote_status = self.get_remote_status()
        if not remote_status["pg_receivexlog_installed"]:
            raise ArchiverFailure(
                'pg_receivexlog not present in $PATH')
        if not remote_status['pg_receivexlog_compatible']:
            raise ArchiverFailure(
                'pg_receivexlog version not compatible with '
                'PostgreSQL server version')

        # Execute sanity check on replication slot usage
        if self.config.slot_name:
            # Check if slots are supported
            if not remote_status['pg_receivexlog_supports_slots']:
                raise ArchiverFailure(
                    'Physical replication slot not supported by %s '
                    '(9.4 or higher is required)' %
                    self.server.streaming.server_txt_version)
            # Check if the required slot exists
            postgres_status = self.server.postgres.get_remote_status()
            if postgres_status['replication_slot'] is None:
                raise ArchiverFailure(
                    "replication slot '%s' doesn't exist. "
                    "Please execute "
                    "'barman receive-wal --create-slot %s'" %
                    (self.config.slot_name, self.config.name))
            # Check if the required slot is available
            if postgres_status['replication_slot'].active:
                raise ArchiverFailure(
                    "replication slot '%s' is already in use" %
                    (self.config.slot_name,))

        # Make sure we are not wasting precious PostgreSQL resources
        self.server.close()

        _logger.info('Activating WAL archiving through streaming protocol')
        try:
            output_handler = PgReceiveXlog.make_output_handler(
                self.config.name + ': ')
            receive = PgReceiveXlog(
                connection=self.server.streaming,
                destination=self.config.streaming_wals_directory,
                command=remote_status['pg_receivexlog_path'],
                version=remote_status['pg_receivexlog_version'],
                app_name=self.config.streaming_archiver_name,
                path=self.server.path,
                slot_name=self.config.slot_name,
                synchronous=remote_status['pg_receivexlog_synchronous'],
                out_handler=output_handler,
                err_handler=output_handler
            )
            # Finally execute the pg_receivexlog process
            receive.execute()
        except CommandFailedException as e:
            # Retrieve the return code from the exception
            ret_code = e.args[0]['ret']
            if ret_code < 0:
                # If the return code is negative, then pg_receivexlog
                # was terminated by a signal
                msg = "pg_receivexlog terminated by signal: %s" \
                      % abs(ret_code)
            else:
                # Otherwise terminated with an error
                msg = "pg_receivexlog terminated with error code: %s"\
                      % ret_code

            raise ArchiverFailure(msg)
        except KeyboardInterrupt:
            # This is a normal termination, so there is nothing to do beside
            # informing the user.
            output.info('SIGINT received. Terminate gracefully.')