コード例 #1
0
ファイル: test_executor.py プロジェクト: zombig/barman
    def test_backup(self, gpb_mock, pbc_mock, capsys, tmpdir):
        """
        Test backup

        :param gpb_mock: mock for the get_previous_backup method
        :param pbc_mock: mock for the backup_copy method
        :param capsys: stdout capture module
        :param tmpdir: pytest temp directory
        """
        tmp_home = tmpdir.mkdir('home')
        backup_manager = build_backup_manager(global_conf={
            'barman_home': tmp_home.strpath,
            'backup_method': 'postgres'
        })
        backup_info = build_test_backup_info(
            backup_id='fake_backup_id',
            server=backup_manager.server,
            pgdata="/pg/data",
            config_file="/pg/data/postgresql.conf",
            hba_file="/pg/data/pg_hba.conf",
            ident_file="/pg/pg_ident.conf",
            begin_offset=28,
            copy_stats=dict(copy_time=100, total_time=105))
        timestamp = datetime.datetime(2015, 10, 26, 14, 38)
        backup_manager.server.postgres.current_xlog_info = dict(
            location='0/12000090',
            file_name='000000010000000000000012',
            file_offset=144,
            timestamp=timestamp,
        )
        backup_manager.server.postgres.get_setting.return_value = '/pg/data'
        tmp_backup_label = tmp_home.mkdir('main')\
            .mkdir('base').mkdir('fake_backup_id')\
            .mkdir('data').join('backup_label')
        start_time = datetime.datetime.now(tz.tzlocal()).replace(microsecond=0)
        tmp_backup_label.write(
            'START WAL LOCATION: 0/40000028 (file 000000010000000000000040)\n'
            'CHECKPOINT LOCATION: 0/40000028\n'
            'BACKUP METHOD: streamed\n'
            'BACKUP FROM: master\n'
            'START TIME: %s\n'
            'LABEL: pg_basebackup base backup' %
            start_time.strftime('%Y-%m-%d %H:%M:%S %Z')
        )
        backup_manager.executor.backup(backup_info)
        out, err = capsys.readouterr()
        gpb_mock.assert_called_once_with(backup_info.backup_id)
        assert err == ''
        assert 'Starting backup copy via pg_basebackup' in out
        assert 'Copy done' in out
        assert 'Finalising the backup.' in out
        assert backup_info.end_xlog == '0/12000090'
        assert backup_info.end_offset == 144
        assert backup_info.begin_time == start_time
        assert backup_info.begin_wal == '000000010000000000000040'

        # Check the CommandFailedException re raising
        with pytest.raises(CommandFailedException):
            pbc_mock.side_effect = CommandFailedException('test')
            backup_manager.executor.backup(backup_info)
コード例 #2
0
    def __init__(self, command=sys.argv[0], subcommand=None,
                 config=None, args=None):
        """
        Build a specific wrapper for all the barman sub-commands,
        providing an unified interface.

        :param str command: path to barman
        :param str subcommand: the barman sub-command
        :param str config: path to the barman configuration file.
        :param list[str] args: a list containing the sub-command args
            like the target server name
        """
        # The config argument is needed when the user explicitly
        # passes a configuration file, as the child process
        # must know the configuration file to use.
        #
        # The configuration file must always be propagated,
        # even in case of the default one.
        if not config:
            raise CommandFailedException(
                "No configuration file passed to barman subprocess")
        # Build the sub-command:
        # * be sure to run it with the right python interpreter
        # * pass the current configuration file with -c
        # * set it quiet with -q
        self.command = [sys.executable, command,
                        '-c', config, '-q', subcommand]
        # Handle args for the sub-command (like the server name)
        if args:
            self.command += args
コード例 #3
0
 def __init__(self, rsync='rsync', args=None, ssh=None, ssh_options=None,
              bwlimit=None, exclude_and_protect=None,
              network_compression=None, check=True, allowed_retval=(0, 24),
              path=None, **kwargs):
     options = []
     # Try to find rsync in system PATH using the which method.
     # If not found, rsync is not installed and this class cannot
     # work properly.
     # Raise CommandFailedException warning the user
     rsync_path = barman.utils.which(rsync, path)
     if not rsync_path:
         raise CommandFailedException('rsync not in system PATH: '
                                      'is rsync installed?')
     if ssh:
         options += ['-e', self._cmd_quote(ssh, ssh_options)]
     if network_compression:
         options += ['-z']
     if exclude_and_protect:
         for exclude_path in exclude_and_protect:
             options += ["--exclude=%s" % (exclude_path,),
                         "--filter=P_%s" % (exclude_path,)]
     if args:
         options += self._args_for_suse(args)
     if bwlimit is not None and bwlimit > 0:
         options += ["--bwlimit=%s" % bwlimit]
     Command.__init__(self, rsync, args=options, check=check,
                      allowed_retval=allowed_retval, path=path, **kwargs)
コード例 #4
0
    def test_retry(self, get_output_no_retry_mock, sleep_mock, popen,
                   pipe_processor_loop):
        """
        Test the retry method

        :param mock.Mock get_output_no_retry_mock: simulate a
            Command._get_output_once() call
        :param mock.Mock sleep_mock: mimic the sleep timer
        :param mock.Mock popen: unused, mocked from the whole test class
        :param mock.Mock pipe_processor_loop: unused, mocked from the whole
            test class
        """

        command = 'test string'
        cmd = command_wrappers.Command(command,
                                       check=True,
                                       retry_times=5,
                                       retry_sleep=10)

        # check for correct return value
        r = cmd.get_output('test string')
        get_output_no_retry_mock.assert_called_with('test string')
        assert get_output_no_retry_mock.return_value == r

        # check for correct number of calls and invocations of sleep method
        get_output_no_retry_mock.reset_mock()
        sleep_mock.reset_mock()
        expected = mock.Mock()
        get_output_no_retry_mock.side_effect = [
            CommandFailedException('testException'), expected
        ]
        r = cmd.get_output('test string')
        assert get_output_no_retry_mock.call_count == 2
        assert sleep_mock.call_count == 1
        assert r == expected

        # check for correct number of tries and invocations of sleep method
        get_output_no_retry_mock.reset_mock()
        sleep_mock.reset_mock()
        e = CommandFailedException('testException')
        get_output_no_retry_mock.side_effect = [e, e, e, e, e, e]
        with pytest.raises(CommandMaxRetryExceeded) as exc_info:
            cmd.get_output('test string')
        assert exc_info.value.exc == e
        assert sleep_mock.call_count == 5
        assert get_output_no_retry_mock.call_count == 6
コード例 #5
0
    def check_return_value(self):
        """
        Check the current return code and raise CommandFailedException when
        it's not in the allowed_retval list

        :raises: CommandFailedException
        """
        if self.ret not in self.allowed_retval:
            raise CommandFailedException(dict(
                ret=self.ret, out=self.out, err=self.err))
コード例 #6
0
    def check_return_value(self, allowed_retval):
        """
        Check the current return code and raise CommandFailedException when
        it's not in the allowed_retval list

        :param list[int] allowed_retval: list of return values considered
            success
        :raises: CommandFailedException
        """
        if self.ret not in allowed_retval:
            raise CommandFailedException(dict(ret=self.ret, out=self.out, err=self.err))
コード例 #7
0
ファイル: copy_controller.py プロジェクト: f4nt/barman
    def _execute_job(self, job):
        """
        Execute a `_RsyncJob` in a worker process

        :type job: _RsyncJob
        """
        item = job.item
        # Build the rsync object required for the copy
        rsync = self.rsync_factory(item)
        # Write in the log that the job is starting
        with _logger_lock:
            _logger.info(job.description, 'starting')
        if item.is_directory:
            # A directory item must always have checksum and file_list set
            assert job.file_list is not None, \
                'A directory item must not have a None `file_list` attribute'
            assert job.checksum is not None, \
                'A directory item must not have a None `checksum` attribute'

            # Generate a unique name for the file containing the list of files
            file_list_path = os.path.join(
                self.temp_dir, '%s_%s_%s.list' %
                (item.label, 'check' if job.checksum else 'safe', os.getpid()))

            # Write the list, one path per line
            with open(file_list_path, 'w') as file_list:
                for entry in job.file_list:
                    assert isinstance(entry, _FileItem), \
                        "expect %r to be a _FileItem" % entry
                    file_list.write(entry.path + "\n")

            self._copy(rsync,
                       item.src,
                       item.dst,
                       file_list=file_list_path,
                       checksum=job.checksum)
        else:
            # A file must never have checksum and file_list set
            assert job.file_list is None, \
                'A file item must have a None `file_list` attribute'
            assert job.checksum is None, \
                'A file item must have a None `checksum` attribute'
            rsync(item.src, item.dst, allowed_retval=(0, 23, 24))
            if rsync.ret == 23:
                if item.optional:
                    _logger.warning("Ignoring error reading %s", item)
                else:
                    raise CommandFailedException(
                        dict(ret=rsync.ret, out=rsync.out, err=rsync.err))
        # Write in the log that the job is finished
        with _logger_lock:
            _logger.info(job.description, 'finished')
コード例 #8
0
    def copy(self):
        """
        Execute the actual copy
        """
        for item in self.item_list:
            # Prepare the command arguments
            args = self._reuse_args(item.reuse)

            # Merge the global exclude with the one into the item object
            if self.exclude and item.exclude:
                exclude = self.exclude + item.exclude
            else:
                exclude = self.exclude or item.exclude

            # TODO: remove debug output or use it to progress tracking
            # By adding a double '--itemize-changes' option, the rsync
            # output will contain the full list of files that have been
            # touched, even those that have not changed
            args.append('--itemize-changes')
            args.append('--itemize-changes')

            # Build the rsync object that will execute the copy
            rsync = RsyncPgData(path=self.path,
                                ssh=self.ssh_command,
                                ssh_options=self.ssh_options,
                                args=args,
                                bwlimit=item.bwlimit,
                                network_compression=self.network_compression,
                                exclude=exclude,
                                exclude_and_protect=item.exclude_and_protect,
                                retry_times=self.retry_times,
                                retry_sleep=self.retry_sleep,
                                retry_handler=partial(self._retry_handler,
                                                      item))

            # Log the operation that is being executed
            _logger.info("Copying %s", item)

            # If the item is a directory use the smart copy algorithm,
            # otherwise run a plain rsync
            if item.is_directory:
                self._smart_copy(rsync, item.src, item.dst, self.safe_horizon,
                                 item.reuse)
            else:
                rsync(item.src, item.dst, allowed_retval=(0, 23, 24))
                if rsync.ret == 23:
                    if item.optional:
                        _logger.warning("Ignoring error reading %s", item)
                    else:
                        raise CommandFailedException(
                            dict(ret=rsync.ret, out=rsync.out, err=rsync.err))
コード例 #9
0
    def decompress(self, src, dst):
        """
        Decompress using the object defined in the sublcass

        :param src: source file to decompress
        :param dst: destination of the decompression
        """
        try:
            with closing(self._decompressor(src)) as istream:
                with open(dst, 'wb') as ostream:
                    shutil.copyfileobj(istream, ostream)
        except Exception as e:
            # you won't get more information from the compressors anyway
            raise CommandFailedException(dict(ret=None, err=str(e), out=None))
        return 0
コード例 #10
0
 def test_rsync_build_failure(self, popen, pipe_processor_loop, which):
     """
     Simple test that checks if a CommandFailedException is raised
     when Rsync object is build with an invalid path or rsync
     is not in system path
     """
     which.side_effect = CommandFailedException()
     # Pass an invalid path to Rsync class constructor.
     # Expect a CommandFailedException
     with pytest.raises(CommandFailedException):
         command_wrappers.Rsync('/invalid/path/rsync')
     # Force the which method to return false, simulating rsync command not
     # present in system PATH. Expect a CommandFailedExceptiomn
     with mock.patch("barman.utils.which") as mock_which:
         mock_which.return_value = False
         with pytest.raises(CommandFailedException):
             command_wrappers.Rsync(ssh_options=['-c', 'arcfour'])
コード例 #11
0
ファイル: copy_controller.py プロジェクト: secwall/barman
    def _rsync_ignore_vanished_files(self, rsync, *args, **kwargs):
        """
        Wrap an Rsync.get_output() call and ignore missing args

        TODO: when rsync 3.1 will be widespread, replace this
            with --ignore-missing-args argument

        :param Rsync rsync: the Rsync object used to execute the copy
        """
        kwargs['allowed_retval'] = (0, 23, 24)
        rsync.get_output(*args, **kwargs)
        # If return code is 23 and there is any error which doesn't match
        # the VANISHED_RE regexp raise an error
        if rsync.ret == 23 and rsync.err is not None:
            for line in rsync.err.splitlines():
                match = self.VANISHED_RE.match(line.rstrip())
                if match:
                    continue
                else:
                    _logger.error("First rsync error line: %s", line)
                    raise CommandFailedException(
                        dict(ret=rsync.ret, out=rsync.out, err=rsync.err))
        return rsync.out, rsync.err
コード例 #12
0
ファイル: test_sync.py プロジェクト: zombig/barman
    def test_sync_wals(self, rsync_mock, tmpdir, capsys):
        """
        Test the WAL synchronisation method, testing all
        the possible error conditions.

        :param MagicMock rsync_mock: MagicMock replacing Rsync class
        :param py.local.path tmpdir: py.test temporary directory
        :param capsys: fixture that allow to access stdout/stderr output
        """
        server_name = 'main'

        # Prepare paths
        barman_home = tmpdir.mkdir("barman_home")
        backup_dir = barman_home.mkdir(server_name)
        wals_dir = backup_dir.mkdir("wals")
        primary_info_file = backup_dir.join(barman.server.PRIMARY_INFO_FILE)

        # prepare the primary_info file
        remote_basebackup_dir = tmpdir.mkdir("primary")
        primary_info_content = dict(EXPECTED_MINIMAL)
        primary_info_content['config'].update(
            compression=None,
            basebackups_directory=str(remote_basebackup_dir),
            wals_directory=str(wals_dir))
        primary_info_file.write(json.dumps(primary_info_content))

        # Test 1: Not a passive node.
        # Expect SyncError
        server = build_real_server(global_conf=dict(
            barman_home=str(barman_home)))
        with pytest.raises(SyncError):
            server.sync_wals()

        # Test 2: different compression between Master and Passive node.
        # Expect a SyncError
        server = build_real_server(
            global_conf=dict(barman_home=str(barman_home)),
            main_conf=dict(compression='gzip',
                           primary_ssh_command='ssh fakeuser@fakehost'))

        server.sync_wals()
        (out, err) = capsys.readouterr()
        assert "Compression method on server %s " % server_name in err

        # Test 3: No base backup for server, exit with warning
        server = build_real_server(
            global_conf=dict(barman_home=str(barman_home)),
            main_conf=dict(compression=None,
                           wals_directory=str(wals_dir),
                           primary_ssh_command='ssh fakeuser@fakehost'))

        server.sync_wals()
        (out, err) = capsys.readouterr()

        assert 'WARNING: No base backup for ' \
               'server %s' % server.config.name in err

        # Test 4: No wal synchronisation required, expect a warning

        # set return for get_first_backup and get_backup methods
        server.get_first_backup_id = lambda: "too_new"
        server.get_backup = lambda x: build_test_backup_info(
            server=server,
            begin_wal='000000010000000000000005',
            begin_time=dateutil.parser.parse('Wed Jul 23 11:00:43 2014'),
            end_time=dateutil.parser.parse('Wed Jul 23 12:00:43 2014'))
        server.sync_wals()
        (out, err) = capsys.readouterr()

        assert 'WARNING: Skipping WAL synchronisation for ' \
               'server %s: no available local backup for %s' \
               % (server.config.name,
                  primary_info_content['wals'][0]['name']) in err

        # Test 6: simulate rsync failure.
        # Expect a custom error message

        server.get_backup = lambda x: build_test_backup_info(
            server=server,
            begin_wal='000000010000000000000002',
            begin_time=dateutil.parser.parse('Wed Jul 23 11:00:43 2014'),
            end_time=dateutil.parser.parse('Wed Jul 23 12:00:43 2014'))
        rsync_mock.side_effect = CommandFailedException("TestFailure")
        server.sync_wals()

        (out, err) = capsys.readouterr()
        # check stdout for the Custom error message
        assert 'TestFailure' in err

        # Test 7: simulate keyboard interruption
        rsync_mock.side_effect = KeyboardInterrupt()
        server.sync_wals()
        # control the error message for KeyboardInterrupt
        (out, err) = capsys.readouterr()
        assert 'KeyboardInterrupt' in err

        # Test 8: normal execution, expect no output. xlog.db
        # must contain information about the primary info wals

        # reset the rsync_moc, and remove the side_effect
        rsync_mock.reset_mock()
        rsync_mock.side_effect = mock.Mock(name='rsync')

        server.sync_wals()
        # check for no output on stdout and sterr
        (out, err) = capsys.readouterr()
        assert out == ''
        assert err == ''
        # check the xlog content for primary.info wals
        exp_xlog = [
            '000000010000000000000002\t16777216\t1406019026.0\tNone\n',
            '000000010000000000000003\t16777216\t1406019026.0\tNone\n',
            '000000010000000000000004\t16777216\t1406019329.93\tNone\n',
            '000000010000000000000005\t16777216\t1406019330.84\tNone\n'
        ]
        with server.xlogdb() as fxlogdb:
            xlog = fxlogdb.readlines()
            assert xlog == exp_xlog
コード例 #13
0
ファイル: test_sync.py プロジェクト: zombig/barman
    def test_sync_backup(self, logger_mock, rsync_mock, tmpdir, capsys):
        """
        Test the synchronisation method, testing all
        the possible error conditions.

        :param MagicMock logger_mock: MagicMock obj mimicking the logger
        :param MagicMock rsync_mock: MagicMock replacing Rsync class
        :param py.local.path tmpdir: py.test temporary directory
        :param capsys: fixture that allow to access stdout/stderr output
        """
        backup_name = '1234567890'
        server_name = 'main'

        # Prepare paths
        backup_dir = tmpdir.mkdir(server_name)
        basebackup_dir = backup_dir.mkdir("base")
        full_backup_path = basebackup_dir.mkdir(backup_name)
        primary_info_file = backup_dir.join(barman.server.PRIMARY_INFO_FILE)

        # prepare the primary_info file
        remote_basebackup_dir = tmpdir.mkdir("primary")
        primary_info_content = dict(EXPECTED_MINIMAL)
        primary_info_content['config'].update(
            basebackups_directory=str(remote_basebackup_dir))
        primary_info_file.write(json.dumps(primary_info_content))

        # Test 1: Not a passive node.
        # Expect SyncError
        server = build_real_server(
            global_conf={'barman_lock_directory': tmpdir.strpath},
            main_conf={'backup_directory': backup_dir.strpath})
        with pytest.raises(SyncError):
            server.sync_backup(backup_name)

        # Test 2: normal sync execution, no error expected.
        # test for all the step on the logger
        logger_mock.reset_mock()
        server = build_real_server(
            global_conf={'barman_lock_directory': tmpdir.strpath},
            main_conf={
                'backup_directory': backup_dir.strpath,
                'primary_ssh_command': 'ssh fakeuser@fakehost'
            })
        server.sync_backup(backup_name)
        logger_mock.info.assert_any_call(
            "Synchronising with server %s backup %s: step 1/3: "
            "parse server information", server_name, backup_name)
        logger_mock.info.assert_any_call(
            "Synchronising with server %s backup %s: step 2/3: "
            "file copy", server_name, backup_name)
        logger_mock.info.assert_any_call(
            "Synchronising with server %s backup %s: step 3/3: "
            "finalise sync", server_name, backup_name)

        # Test 3: test Rsync Failure
        # Expect a BackupInfo object with status "FAILED"
        # and a error message on the "error" field of the obj
        rsync_mock.reset_mock()
        server.backup_manager._backup_cache = {}
        rsync_mock.side_effect = CommandFailedException("TestFailure")
        full_backup_path.remove(rec=1)
        server.sync_backup(backup_name)
        backup_info = server.get_backup(backup_name)
        assert backup_info.status == BackupInfo.FAILED
        assert backup_info.error == 'failure syncing server main ' \
                                    'backup 1234567890: TestFailure'

        # Test 4: test KeyboardInterrupt management
        # Check the error message for the KeyboardInterrupt event
        rsync_mock.reset_mock()
        rsync_mock.side_effect = CommandFailedException("TestFailure")
        full_backup_path.remove(rec=1)
        rsync_mock.side_effect = KeyboardInterrupt()
        server.sync_backup(backup_name)
        backup_info = server.get_backup(backup_name)
        assert backup_info.status == BackupInfo.FAILED
        assert backup_info.error == 'failure syncing server main ' \
                                    'backup 1234567890: KeyboardInterrupt'

        # Test 5: test backup name not present on Master server
        # Expect a error message on stderr
        rsync_mock.reset_mock()
        rsync_mock.side_effect = CommandFailedException("TestFailure")
        full_backup_path.remove(rec=1)
        server.sync_backup('wrong_backup_name')

        (out, err) = capsys.readouterr()
        # Check the stderr using capsys. we need only the first line
        # from stderr
        e = err.split('\n')
        assert 'ERROR: failure syncing server main ' \
               'backup 1234567890: TestFailure' in e

        # Test 5: Backup already synced
        # Check for the warning message on the stout using capsys
        rsync_mock.reset_mock()
        rsync_mock.side_effect = None
        # do it the first time and check it succeeded
        server.sync_backup(backup_name)
        backup_info = server.get_backup(backup_name)
        assert backup_info.status == BackupInfo.DONE
        # do it again ant test it does not call rsync
        rsync_mock.reset_mock()
        server.sync_backup(backup_name)
        assert not rsync_mock.called
        (out, err) = capsys.readouterr()
        assert out.strip() == 'Backup 1234567890 is already' \
                              ' synced with main server'
コード例 #14
0
 def test_recovery(self, remote_cmd_mock, rsync_pg_mock,
                   copy_controller_mock, tmpdir):
     """
     Test the execution of a recovery
     """
     # Prepare basic directory/files structure
     dest = tmpdir.mkdir('destination')
     base = tmpdir.mkdir('base')
     wals = tmpdir.mkdir('wals')
     backup_info = testing_helpers.build_test_backup_info(tablespaces=[])
     backup_info.config.basebackups_directory = base.strpath
     backup_info.config.wals_directory = wals.strpath
     backup_info.version = 90400
     datadir = base.mkdir(backup_info.backup_id).mkdir('data')
     backup_info.pgdata = datadir.strpath
     postgresql_conf_local = datadir.join('postgresql.conf')
     postgresql_auto_local = datadir.join('postgresql.auto.conf')
     postgresql_conf_local.write('archive_command = something\n'
                                 'data_directory = something')
     postgresql_auto_local.write('archive_command = something\n'
                                 'data_directory = something')
     shutil.copy2(postgresql_conf_local.strpath, dest.strpath)
     shutil.copy2(postgresql_auto_local.strpath, dest.strpath)
     # Avoid triggering warning for missing config files
     datadir.ensure('pg_hba.conf')
     datadir.ensure('pg_ident.conf')
     # Build an executor
     server = testing_helpers.build_real_server(
         global_conf={
             "barman_lock_directory": tmpdir.mkdir('lock').strpath
         },
         main_conf={"wals_directory": wals.strpath})
     executor = RecoveryExecutor(server.backup_manager)
     # test local recovery
     rec_info = executor.recover(backup_info, dest.strpath, None, None,
                                 None, None, None, True, None)
     # remove not usefull keys from the result
     del rec_info['cmd']
     sys_tempdir = rec_info['tempdir']
     assert rec_info == {
         'rsync':
         None,
         'tempdir':
         sys_tempdir,
         'wal_dest':
         dest.join('pg_xlog').strpath,
         'recovery_dest':
         'local',
         'destination_path':
         dest.strpath,
         'temporary_configuration_files': [
             dest.join('postgresql.conf').strpath,
             dest.join('postgresql.auto.conf').strpath
         ],
         'results': {
             'delete_barman_xlog':
             False,
             'get_wal':
             False,
             'changes': [
                 Assertion._make(
                     ['postgresql.conf', 0, 'archive_command', 'false']),
                 Assertion._make([
                     'postgresql.auto.conf', 0, 'archive_command', 'false'
                 ])
             ],
             'missing_files': [],
             'warnings': [
                 Assertion._make(
                     ['postgresql.conf', 2, 'data_directory', 'something']),
                 Assertion._make([
                     'postgresql.auto.conf', 2, 'data_directory',
                     'something'
                 ])
             ]
         },
         'target_epoch':
         None,
         'configuration_files': ['postgresql.conf', 'postgresql.auto.conf'],
         'target_datetime':
         None,
         'safe_horizon':
         None,
         'is_pitr':
         False,
         'get_wal':
         False,
     }
     # test remote recovery
     rec_info = executor.recover(backup_info, dest.strpath, {}, None, None,
                                 None, None, True, "remote@command")
     # remove not useful keys from the result
     del rec_info['cmd']
     del rec_info['rsync']
     sys_tempdir = rec_info['tempdir']
     assert rec_info == {
         'tempdir':
         sys_tempdir,
         'wal_dest':
         dest.join('pg_xlog').strpath,
         'recovery_dest':
         'remote',
         'destination_path':
         dest.strpath,
         'temporary_configuration_files': [
             os.path.join(sys_tempdir, 'postgresql.conf'),
             os.path.join(sys_tempdir, 'postgresql.auto.conf')
         ],
         'results': {
             'delete_barman_xlog':
             False,
             'get_wal':
             False,
             'changes': [
                 Assertion._make(
                     ['postgresql.conf', 0, 'archive_command', 'false']),
                 Assertion._make([
                     'postgresql.auto.conf', 0, 'archive_command', 'false'
                 ])
             ],
             'missing_files': [],
             'warnings': [
                 Assertion._make(
                     ['postgresql.conf', 2, 'data_directory', 'something']),
                 Assertion._make([
                     'postgresql.auto.conf', 2, 'data_directory',
                     'something'
                 ])
             ]
         },
         'target_epoch':
         None,
         'configuration_files': ['postgresql.conf', 'postgresql.auto.conf'],
         'target_datetime':
         None,
         'safe_horizon':
         None,
         'is_pitr':
         False,
         'get_wal':
         False,
     }
     # test failed rsync
     rsync_pg_mock.side_effect = CommandFailedException()
     with pytest.raises(CommandFailedException):
         executor.recover(backup_info, dest.strpath, {}, None, None, None,
                          None, True, "remote@command")
コード例 #15
0
 def test_recovery(
     self, remote_cmd_mock, rsync_pg_mock, copy_controller_mock, tmpdir
 ):
     """
     Test the execution of a recovery
     """
     # Prepare basic directory/files structure
     dest = tmpdir.mkdir("destination")
     base = tmpdir.mkdir("base")
     wals = tmpdir.mkdir("wals")
     backup_info = testing_helpers.build_test_backup_info(tablespaces=[])
     backup_info.config.basebackups_directory = base.strpath
     backup_info.config.wals_directory = wals.strpath
     backup_info.version = 90400
     datadir = base.mkdir(backup_info.backup_id).mkdir("data")
     backup_info.pgdata = datadir.strpath
     postgresql_conf_local = datadir.join("postgresql.conf")
     postgresql_auto_local = datadir.join("postgresql.auto.conf")
     postgresql_conf_local.write(
         "archive_command = something\n" "data_directory = something"
     )
     postgresql_auto_local.write(
         "archive_command = something\n" "data_directory = something"
     )
     shutil.copy2(postgresql_conf_local.strpath, dest.strpath)
     shutil.copy2(postgresql_auto_local.strpath, dest.strpath)
     # Avoid triggering warning for missing config files
     datadir.ensure("pg_hba.conf")
     datadir.ensure("pg_ident.conf")
     # Build an executor
     server = testing_helpers.build_real_server(
         global_conf={"barman_lock_directory": tmpdir.mkdir("lock").strpath},
         main_conf={"wals_directory": wals.strpath},
     )
     executor = RecoveryExecutor(server.backup_manager)
     # test local recovery
     with closing(executor):
         rec_info = executor.recover(backup_info, dest.strpath, exclusive=True)
     # remove not useful keys from the result
     del rec_info["cmd"]
     sys_tempdir = rec_info["tempdir"]
     assert rec_info == {
         "rsync": None,
         "tempdir": sys_tempdir,
         "wal_dest": dest.join("pg_xlog").strpath,
         "recovery_dest": "local",
         "destination_path": dest.strpath,
         "temporary_configuration_files": [
             dest.join("postgresql.conf").strpath,
             dest.join("postgresql.auto.conf").strpath,
         ],
         "results": {
             "delete_barman_wal": False,
             "recovery_start_time": rec_info["results"]["recovery_start_time"],
             "get_wal": False,
             "changes": [
                 Assertion._make(["postgresql.conf", 0, "archive_command", "false"]),
                 Assertion._make(
                     ["postgresql.auto.conf", 0, "archive_command", "false"]
                 ),
             ],
             "missing_files": [],
             "recovery_configuration_file": "recovery.conf",
             "warnings": [
                 Assertion._make(
                     ["postgresql.conf", 2, "data_directory", "something"]
                 ),
                 Assertion._make(
                     ["postgresql.auto.conf", 2, "data_directory", "something"]
                 ),
             ],
         },
         "target_epoch": None,
         "configuration_files": ["postgresql.conf", "postgresql.auto.conf"],
         "target_datetime": None,
         "safe_horizon": None,
         "is_pitr": False,
         "get_wal": False,
     }
     # test remote recovery
     with closing(executor):
         rec_info = executor.recover(
             backup_info,
             dest.strpath,
             remote_command="remote@command",
             exclusive=True,
         )
     # remove not useful keys from the result
     del rec_info["cmd"]
     del rec_info["rsync"]
     sys_tempdir = rec_info["tempdir"]
     assert rec_info == {
         "tempdir": sys_tempdir,
         "wal_dest": dest.join("pg_xlog").strpath,
         "recovery_dest": "remote",
         "destination_path": dest.strpath,
         "temporary_configuration_files": [
             os.path.join(sys_tempdir, "postgresql.conf"),
             os.path.join(sys_tempdir, "postgresql.auto.conf"),
         ],
         "results": {
             "delete_barman_wal": False,
             "get_wal": False,
             "recovery_start_time": rec_info["results"]["recovery_start_time"],
             "changes": [
                 Assertion._make(["postgresql.conf", 0, "archive_command", "false"]),
                 Assertion._make(
                     ["postgresql.auto.conf", 0, "archive_command", "false"]
                 ),
             ],
             "missing_files": [],
             "recovery_configuration_file": "recovery.conf",
             "warnings": [
                 Assertion._make(
                     ["postgresql.conf", 2, "data_directory", "something"]
                 ),
                 Assertion._make(
                     ["postgresql.auto.conf", 2, "data_directory", "something"]
                 ),
             ],
         },
         "target_epoch": None,
         "configuration_files": ["postgresql.conf", "postgresql.auto.conf"],
         "target_datetime": None,
         "safe_horizon": None,
         "is_pitr": False,
         "get_wal": False,
     }
     # test failed rsync
     rsync_pg_mock.side_effect = CommandFailedException()
     with pytest.raises(CommandFailedException):
         with closing(executor):
             executor.recover(
                 backup_info,
                 dest.strpath,
                 exclusive=True,
                 remote_command="remote@command",
             )
コード例 #16
0
ファイル: copy_controller.py プロジェクト: secwall/barman
    def copy(self):
        """
        Execute the actual copy
        """
        # Create a temporary directory to hold the file lists.
        temp_dir = tempfile.mkdtemp(suffix='', prefix='barman-')
        # The following try block is to make sure the temporary directory
        # will be removed on exit.
        try:
            # Initialize the counters used by progress reporting
            self._progress_init()
            _logger.info("Copy started (safe before %r)", self.safe_horizon)

            # Execute some preliminary steps for each item to be copied
            for item in self.item_list:

                # The initial preparation is necessary only for directories
                if not item.is_directory:
                    continue

                # Analyze the source and destination directory content
                self._progress_report("analyze %s" % item)
                self._analyze_directory(item, temp_dir)

                # Prepare the target directories, removing any unneeded file
                self._progress_report(
                    "create destination directories and delete unknown files "
                    "for %s" % item)
                self._create_dir_and_purge(item)

            # Do the actual copy
            for item in self.item_list:

                # Build the rsync object required for the copy
                rsync = self.rsync_factory(item)

                # If the item is a directory use the copy method,
                # otherwise run a plain rsync
                if item.is_directory:

                    # Log the operation that is being executed
                    self._progress_report("copy safe files from %s" % item)

                    # Copy the safe files
                    self._copy(rsync,
                               item.src,
                               item.dst,
                               file_list=item.safe_file,
                               checksum=False)

                    # Log the operation that is being executed
                    self._progress_report("copy files with checksum from %s" %
                                          item)

                    # Copy the files with rsync
                    self._copy(rsync,
                               item.src,
                               item.dst,
                               file_list=item.check_file,
                               checksum=True)
                else:
                    # Log the operation that is being executed
                    self._progress_report("copy %s" % item)
                    rsync(item.src, item.dst, allowed_retval=(0, 23, 24))
                    if rsync.ret == 23:
                        if item.optional:
                            _logger.warning("Ignoring error reading %s", item)
                        else:
                            raise CommandFailedException(
                                dict(ret=rsync.ret,
                                     out=rsync.out,
                                     err=rsync.err))
        finally:
            # Clean tmp dir and log, exception management is delegeted to
            # the executor class
            shutil.rmtree(temp_dir)
            _logger.info("Copy finished (safe before %s)", self.safe_horizon)
コード例 #17
0
ファイル: test_executor.py プロジェクト: secwall/barman
    def test_backup_copy(self, remote_mock, pg_basebackup_mock, tmpdir):
        """
        Test backup folder structure

        :param remote_mock: mock for the fetch_remote_status method
        :param pg_basebackup_mock: mock for the PgBaseBackup object
        :param tmpdir: pytest temp directory
        """
        backup_manager = build_backup_manager(
            global_conf={
                'barman_home': tmpdir.mkdir('home').strpath,
                'backup_method': 'postgres'
            })
        # simulate a old version of pg_basebackup
        # not supporting bandwidth_limit
        remote_mock.return_value = {
            'pg_basebackup_version': '9.2',
            'pg_basebackup_path': '/fake/path',
            'pg_basebackup_bwlimit': False,
        }
        server_mock = backup_manager.server
        streaming_mock = server_mock.streaming
        server_mock.config.bandwidth_limit = 1
        streaming_mock.get_connection_string.return_value = 'fake=connstring'
        streaming_mock.conn_parameters = {
            'host': 'fakeHost',
            'port': 'fakePort',
            'user': '******'
        }
        backup_info = build_test_backup_info(server=backup_manager.server,
                                             backup_id='fake_backup_id')
        backup_manager.executor.backup_copy(backup_info)
        # check that the bwlimit option have been ignored
        assert pg_basebackup_mock.mock_calls == [
            mock.call(connection=mock.ANY,
                      version='9.2',
                      app_name='barman_streaming_backup',
                      destination=mock.ANY,
                      command='/fake/path',
                      tbs_mapping=mock.ANY,
                      bwlimit=None,
                      immediate=False,
                      retry_times=0,
                      retry_sleep=30,
                      retry_handler=mock.ANY,
                      path=mock.ANY),
            mock.call()(),
        ]

        # Check with newer version
        remote_mock.reset_mock()
        pg_basebackup_mock.reset_mock()
        backup_manager.executor._remote_status = None
        remote_mock.return_value = {
            'pg_basebackup_version': '9.5',
            'pg_basebackup_path': '/fake/path',
            'pg_basebackup_bwlimit': True,
        }
        backup_manager.executor.config.immediate_checkpoint = True
        backup_manager.executor.config.streaming_conninfo = 'fake=connstring'
        backup_manager.executor.backup_copy(backup_info)
        # check that the bwlimit option have been passed to the test call
        assert pg_basebackup_mock.mock_calls == [
            mock.call(connection=mock.ANY,
                      version='9.5',
                      app_name='barman_streaming_backup',
                      destination=mock.ANY,
                      command='/fake/path',
                      tbs_mapping=mock.ANY,
                      bwlimit=1,
                      immediate=True,
                      retry_times=0,
                      retry_sleep=30,
                      retry_handler=mock.ANY,
                      path=mock.ANY),
            mock.call()(),
        ]

        # Raise a test CommandFailedException and expect it to be wrapped
        # inside a DataTransferFailure exception
        remote_mock.reset_mock()
        pg_basebackup_mock.reset_mock()
        pg_basebackup_mock.return_value.side_effect = \
            CommandFailedException(dict(ret='ret', out='out', err='err'))
        with pytest.raises(DataTransferFailure):
            backup_manager.executor.backup_copy(backup_info)
コード例 #18
0
ファイル: copy_controller.py プロジェクト: qiuwenhuifx/barman
    def _execute_job(self, job):
        """
        Execute a `_RsyncJob` in a worker process

        :type job: _RsyncJob
        """
        item = self.item_list[job.item_idx]
        if job.id is not None:
            bucket = "bucket %s" % job.id
        else:
            bucket = "global"
        # Build the rsync object required for the copy
        rsync = self._rsync_factory(item)
        # Store the start time
        job.copy_start_time = datetime.datetime.now()
        # Write in the log that the job is starting
        with _logger_lock:
            _logger.info(job.description, bucket, "starting")
        if item.is_directory:
            # A directory item must always have checksum and file_list set
            assert (
                job.file_list is not None
            ), "A directory item must not have a None `file_list` attribute"
            assert (
                job.checksum is not None
            ), "A directory item must not have a None `checksum` attribute"

            # Generate a unique name for the file containing the list of files
            file_list_path = os.path.join(
                self.temp_dir,
                "%s_%s_%s.list"
                % (item.label, "check" if job.checksum else "safe", os.getpid()),
            )

            # Write the list, one path per line
            with open(file_list_path, "w") as file_list:
                for entry in job.file_list:
                    assert isinstance(entry, _FileItem), (
                        "expect %r to be a _FileItem" % entry
                    )
                    file_list.write(entry.path + "\n")

            self._copy(
                rsync,
                item.src,
                item.dst,
                file_list=file_list_path,
                checksum=job.checksum,
            )
        else:
            # A file must never have checksum and file_list set
            assert (
                job.file_list is None
            ), "A file item must have a None `file_list` attribute"
            assert (
                job.checksum is None
            ), "A file item must have a None `checksum` attribute"
            rsync(item.src, item.dst, allowed_retval=(0, 23, 24))
            if rsync.ret == 23:
                if item.optional:
                    _logger.warning("Ignoring error reading %s", item)
                else:
                    raise CommandFailedException(
                        dict(ret=rsync.ret, out=rsync.out, err=rsync.err)
                    )
        # Store the stop time
        job.copy_end_time = datetime.datetime.now()
        # Write in the log that the job is finished
        with _logger_lock:
            _logger.info(
                job.description,
                bucket,
                "finished (duration: %s)"
                % human_readable_timedelta(job.copy_end_time - job.copy_start_time),
            )
        # Return the job to the caller, for statistics purpose
        return job
コード例 #19
0
ファイル: test_sync.py プロジェクト: EnterpriseDB/barman
    def test_sync_backup(self, logger_mock, rsync_mock, tmpdir, capsys):
        """
        Test the synchronisation method, testing all
        the possible error conditions.

        :param MagicMock logger_mock: MagicMock obj mimicking the logger
        :param MagicMock rsync_mock: MagicMock replacing Rsync class
        :param py.local.path tmpdir: py.test temporary directory
        :param capsys: fixture that allow to access stdout/stderr output
        """
        backup_name = "1234567890"
        server_name = "main"

        # Prepare paths
        backup_dir = tmpdir.mkdir(server_name)
        basebackup_dir = backup_dir.mkdir("base")
        full_backup_path = basebackup_dir.mkdir(backup_name)

        self._create_primary_info_file(tmpdir, backup_dir)

        # Test 1: Not a passive node.
        # Expect SyncError
        server = build_real_server(
            global_conf={"barman_lock_directory": tmpdir.strpath},
            main_conf={"backup_directory": backup_dir.strpath},
        )
        with pytest.raises(SyncError):
            server.sync_backup(backup_name)

        # Test 2: normal sync execution, no error expected.
        # test for all the step on the logger
        logger_mock.reset_mock()
        server = build_real_server(
            global_conf={"barman_lock_directory": tmpdir.strpath},
            main_conf={
                "backup_directory": backup_dir.strpath,
                "primary_ssh_command": "ssh fakeuser@fakehost",
            },
        )
        server.sync_backup(backup_name)
        logger_mock.info.assert_any_call(
            "Synchronising with server %s backup %s: step 1/3: "
            "parse server information",
            server_name,
            backup_name,
        )
        logger_mock.info.assert_any_call(
            "Synchronising with server %s backup %s: step 2/3: file copy",
            server_name,
            backup_name,
        )
        logger_mock.info.assert_any_call(
            "Synchronising with server %s backup %s: step 3/3: finalise sync",
            server_name,
            backup_name,
        )

        # Test 3: test Rsync Failure
        # Expect a BackupInfo object with status "FAILED"
        # and a error message on the "error" field of the obj
        rsync_mock.reset_mock()
        server.backup_manager._backup_cache = {}
        rsync_mock.side_effect = CommandFailedException("TestFailure")
        full_backup_path.remove(rec=1)
        server.sync_backup(backup_name)
        backup_info = server.get_backup(backup_name)
        assert backup_info.status == BackupInfo.FAILED
        assert (
            backup_info.error == "failure syncing server main "
            "backup 1234567890: TestFailure"
        )

        # Test 4: test KeyboardInterrupt management
        # Check the error message for the KeyboardInterrupt event
        rsync_mock.reset_mock()
        rsync_mock.side_effect = CommandFailedException("TestFailure")
        full_backup_path.remove(rec=1)
        rsync_mock.side_effect = KeyboardInterrupt()
        server.sync_backup(backup_name)
        backup_info = server.get_backup(backup_name)
        assert backup_info.status == BackupInfo.FAILED
        assert (
            backup_info.error == "failure syncing server main "
            "backup 1234567890: KeyboardInterrupt"
        )

        # Test 5: test backup name not present on Master server
        # Expect a error message on stderr
        rsync_mock.reset_mock()
        rsync_mock.side_effect = CommandFailedException("TestFailure")
        full_backup_path.remove(rec=1)
        server.sync_backup("wrong_backup_name")

        (out, err) = capsys.readouterr()
        # Check the stderr using capsys. we need only the first line
        # from stderr
        e = err.split("\n")
        assert "ERROR: failure syncing server main backup 1234567890: TestFailure" in e

        # Test 5: Backup already synced
        # Check for the warning message on the stout using capsys
        rsync_mock.reset_mock()
        rsync_mock.side_effect = None
        # do it the first time and check it succeeded
        server.sync_backup(backup_name)
        backup_info = server.get_backup(backup_name)
        assert backup_info.status == BackupInfo.DONE
        # do it again ant test it does not call rsync
        rsync_mock.reset_mock()
        server.sync_backup(backup_name)
        assert not rsync_mock.called
        (out, err) = capsys.readouterr()
        assert out.strip() == "Backup 1234567890 is already synced with main server"
コード例 #20
0
ファイル: command_wrappers.py プロジェクト: kmoppel/barman
    def find_command(cls, path=None):
        """
        Find the active command, given all the alternatives as set in the
        property named `COMMAND_ALTERNATIVES` in this class.

        :param str path: The path to use while searching for the command
        :rtype: Command
        """

        # TODO: Unit tests of this one

        # To search for an available command, testing if the command
        # exists in PATH is not sufficient. Debian will install wrappers for
        # all commands, even if the real command doesn't work.
        #
        # I.e. we may have a wrapper for `pg_receivewal` even it PostgreSQL
        # 10 isn't installed.
        #
        # This is an example of what can happen in this case:
        #
        # ```
        # $ pg_receivewal --version; echo $?
        # Error: pg_wrapper: pg_receivewal was not found in
        #   /usr/lib/postgresql/9.6/bin
        # 1
        # $ pg_receivexlog --version; echo $?
        # pg_receivexlog (PostgreSQL) 9.6.3
        # 0
        # ```
        #
        # That means we should not only ensure the existence of the command,
        # but we also need to invoke the command to see if it is a shim
        # or not.

        # Get the system path if needed
        if path is None:
            path = os.getenv('PATH')
        # If the path is None at this point we have nothing to search
        if path is None:
            path = ''

        # Search the requested executable in every directory present
        # in path and return a Command object first occurrence that exists,
        # is executable and runs without errors.
        for path_entry in path.split(os.path.pathsep):
            for cmd in cls.COMMAND_ALTERNATIVES:
                full_path = barman.utils.which(cmd, path_entry)

                # It doesn't exist try another
                if not full_path:
                    continue

                # It exists, let's try invoking it with `--version` to check if
                # it's real or not.
                try:
                    command = Command(full_path, path=path, check=True)
                    command("--version")
                    return command
                except CommandFailedException:
                    # It's only a inactive shim
                    continue

        # We don't have such a command
        raise CommandFailedException('command not in PATH, tried: %s' %
                                     ' '.join(cls.COMMAND_ALTERNATIVES))
コード例 #21
0
ファイル: test_executor.py プロジェクト: zombig/barman
    def test_backup_copy(self, remote_mock, pg_basebackup_mock,
                         tmpdir, capsys):
        """
        Test backup folder structure

        :param remote_mock: mock for the fetch_remote_status method
        :param pg_basebackup_mock: mock for the PgBaseBackup object
        :param tmpdir: pytest temp directory
        """
        backup_manager = build_backup_manager(global_conf={
            'barman_home': tmpdir.mkdir('home').strpath,
            'backup_method': 'postgres'
        })
        # simulate a old version of pg_basebackup
        # not supporting bandwidth_limit
        remote_mock.return_value = {
            'pg_basebackup_version': '9.2',
            'pg_basebackup_path': '/fake/path',
            'pg_basebackup_bwlimit': False,
        }
        server_mock = backup_manager.server
        streaming_mock = server_mock.streaming
        server_mock.config.bandwidth_limit = 1
        streaming_mock.get_connection_string.return_value = 'fake=connstring'
        streaming_mock.conn_parameters = {
            'host': 'fakeHost',
            'port': 'fakePort',
            'user': '******'
        }
        backup_info = build_test_backup_info(server=backup_manager.server,
                                             backup_id='fake_backup_id')
        backup_manager.executor.backup_copy(backup_info)
        out, err = capsys.readouterr()
        assert out == ''
        assert err == ''
        # check that the bwlimit option have been ignored
        assert pg_basebackup_mock.mock_calls == [
            mock.call(
                connection=mock.ANY,
                version='9.2',
                app_name='barman_streaming_backup',
                destination=mock.ANY,
                command='/fake/path',
                tbs_mapping=mock.ANY,
                bwlimit=None,
                immediate=False,
                retry_times=0,
                retry_sleep=30,
                retry_handler=mock.ANY,
                path=mock.ANY),
            mock.call()(),
        ]

        # Check with newer version
        remote_mock.reset_mock()
        pg_basebackup_mock.reset_mock()
        backup_manager.executor._remote_status = None
        remote_mock.return_value = {
            'pg_basebackup_version': '9.5',
            'pg_basebackup_path': '/fake/path',
            'pg_basebackup_bwlimit': True,
        }
        backup_manager.executor.config.immediate_checkpoint = True
        backup_manager.executor.config.streaming_conninfo = 'fake=connstring'
        backup_manager.executor.backup_copy(backup_info)
        out, err = capsys.readouterr()
        assert out == ''
        assert err == ''
        # check that the bwlimit option have been passed to the test call
        assert pg_basebackup_mock.mock_calls == [
            mock.call(
                connection=mock.ANY,
                version='9.5',
                app_name='barman_streaming_backup',
                destination=mock.ANY,
                command='/fake/path',
                tbs_mapping=mock.ANY,
                bwlimit=1,
                immediate=True,
                retry_times=0,
                retry_sleep=30,
                retry_handler=mock.ANY,
                path=mock.ANY),
            mock.call()(),
        ]

        # Check with a config file outside the data directory
        remote_mock.reset_mock()
        pg_basebackup_mock.reset_mock()
        backup_info.ident_file = '/pg/pg_ident.conf'
        backup_manager.executor.backup_copy(backup_info)
        out, err = capsys.readouterr()
        assert out == ''
        assert err.strip() == 'WARNING: pg_basebackup does not copy ' \
                              'the PostgreSQL configuration files that '\
                              'reside outside PGDATA. ' \
                              'Please manually backup the following files:' \
                              '\n\t/pg/pg_ident.conf'
        # check that the bwlimit option have been passed to the test call
        assert pg_basebackup_mock.mock_calls == [
            mock.call(
                connection=mock.ANY,
                version='9.5',
                app_name='barman_streaming_backup',
                destination=mock.ANY,
                command='/fake/path',
                tbs_mapping=mock.ANY,
                bwlimit=1,
                immediate=True,
                retry_times=0,
                retry_sleep=30,
                retry_handler=mock.ANY,
                path=mock.ANY),
            mock.call()(),
        ]

        # Check with a config file outside the data directory and
        # external_configurations backup option
        remote_mock.reset_mock()
        pg_basebackup_mock.reset_mock()
        backup_manager.config.backup_options.add(
            BackupOptions.EXTERNAL_CONFIGURATION)
        backup_manager.executor.backup_copy(backup_info)
        out, err = capsys.readouterr()
        assert out == ''
        assert err == ''
        # check that the bwlimit option have been passed to the test call
        assert pg_basebackup_mock.mock_calls == [
            mock.call(
                connection=mock.ANY,
                version='9.5',
                app_name='barman_streaming_backup',
                destination=mock.ANY,
                command='/fake/path',
                tbs_mapping=mock.ANY,
                bwlimit=1,
                immediate=True,
                retry_times=0,
                retry_sleep=30,
                retry_handler=mock.ANY,
                path=mock.ANY),
            mock.call()(),
        ]

        # Raise a test CommandFailedException and expect it to be wrapped
        # inside a DataTransferFailure exception
        remote_mock.reset_mock()
        pg_basebackup_mock.reset_mock()
        pg_basebackup_mock.return_value.side_effect = \
            CommandFailedException(dict(ret='ret', out='out', err='err'))
        with pytest.raises(DataTransferFailure):
            backup_manager.executor.backup_copy(backup_info)
コード例 #22
0
ファイル: command_wrappers.py プロジェクト: kmoppel/barman
    def __init__(self,
                 cmd,
                 args=None,
                 env_append=None,
                 path=None,
                 shell=False,
                 check=False,
                 allowed_retval=(0, ),
                 close_fds=True,
                 out_handler=None,
                 err_handler=None,
                 retry_times=0,
                 retry_sleep=0,
                 retry_handler=None):
        """
        If the `args` argument is specified the arguments will be always added
        to the ones eventually passed with the actual invocation.

        If the `env_append` argument is present its content will be appended to
        the environment of every invocation.

        The subprocess output and error stream will be processed through
        the output and error handler, respectively defined through the
        `out_handler` and `err_handler` arguments. If not provided every line
        will be sent to the log respectively at INFO and WARNING level.

        The `out_handler` and the `err_handler` functions will be invoked with
        one single argument, which is a string containing the line that is
        being processed.

        If the `close_fds` argument is True, all file descriptors
        except 0, 1 and 2 will be closed before the child process is executed.

        If the `check` argument is True, the exit code will be checked
        against the `allowed_retval` list, raising a CommandFailedException if
        not in the list.

        If `retry_times` is greater than 0, when the execution of a command
        terminates with an error, it will be retried for
        a maximum of `retry_times` times, waiting for `retry_sleep` seconds
        between every attempt.

        Everytime a command is retried the `retry_handler` is executed
        before running the command again. The retry_handler must be a callable
        that accepts the following fields:

         * the Command object
         * the arguments list
         * the keyword arguments dictionary
         * the number of the failed attempt
         * the exception containing the error

        An example of such a function is:

            > def retry_handler(command, args, kwargs, attempt, exc):
            >     print("Failed command!")

        Some of the keyword arguments can be specified both in the class
        constructor and during the method call. If specified in both places,
        the method arguments will take the precedence over
        the constructor arguments.

        :param str cmd: The command to exexute
        :param list[str]|None args: List of additional arguments to append
        :param dict[str.str]|None env_append: additional environment variables
        :param str path: PATH to be used while searching for `cmd`
        :param bool shell: If true, use the shell instead of an "execve" call
        :param bool check: Raise a CommandFailedException if the exit code
            is not present in `allowed_retval`
        :param list[int] allowed_retval: List of exit codes considered as a
            successful termination.
        :param bool close_fds: If set, close all the extra file descriptors
        :param callable out_handler: handler for lines sent on stdout
        :param callable err_handler: handler for lines sent on stderr
        :param int retry_times: number of allowed retry attempts
        :param int retry_sleep: wait seconds between every retry
        :param callable retry_handler: handler invoked during a command retry
        """
        self.pipe = None
        self.cmd = cmd
        self.args = args if args is not None else []
        self.shell = shell
        self.close_fds = close_fds
        self.check = check
        self.allowed_retval = allowed_retval
        self.retry_times = retry_times
        self.retry_sleep = retry_sleep
        self.retry_handler = retry_handler
        self.path = path
        self.ret = None
        self.out = None
        self.err = None
        # If env_append has been provided use it or replace with an empty dict
        env_append = env_append or {}
        # If path has been provided, replace it in the environment
        if path:
            env_append['PATH'] = path
        # Find the absolute path to the command to execute
        if not self.shell:
            full_path = barman.utils.which(self.cmd, self.path)
            if not full_path:
                raise CommandFailedException('%s not in PATH' % self.cmd)
            self.cmd = full_path
        # If env_append contains anything, build an env dict to be used during
        # subprocess call, otherwise set it to None and let the subprocesses
        # inherit the parent environment
        if env_append:
            self.env = os.environ.copy()
            self.env.update(env_append)
        else:
            self.env = None
        # If an output handler has been provided use it, otherwise log the
        # stdout as INFO
        if out_handler:
            self.out_handler = out_handler
        else:
            self.out_handler = self.make_logging_handler(logging.INFO)
        # If an error handler has been provided use it, otherwise log the
        # stderr as WARNING
        if err_handler:
            self.err_handler = err_handler
        else:
            self.err_handler = self.make_logging_handler(logging.WARNING)
コード例 #23
0
    def __init__(self, destination,
                 pg_basebackup='pg_basebackup',
                 conn_string=None,
                 host=None,
                 port=None,
                 user=None,
                 bwlimit=None,
                 tbs_mapping=None,
                 args=None,
                 path=None,
                 immediate=False,
                 **kwargs):
        """
        Constructor

        :param str conn_string: connection string
        :param str host: the host to connect to
        :param str port: the port used for the connection to PostgreSQL
        :param str user: the user to use to connect to PostgreSQL
        :param str pg_basebackup: command to run
        :param str bwlimit: bandwidth limit for pg_basebackup
        :param bool immediate: fast checkpoint identifier for pg_basebackup
        :param str path: additional path for executable retrieval
        :param List[str] args: additional arguments
        :param Dict[str, str] tbs_mapping: used for tablespace
        :param str destination: destination directory
          relocation
        """
        # Check if pg_basebackup is actually available
        pg_basebackup_path = barman.utils.which(pg_basebackup, path)
        if not pg_basebackup_path:
            raise CommandFailedException('pg_basebackup not in system PATH: '
                                         'is pg_basebackup installed?')

        # Set the backup destination
        options = ['-v', '--pgdata=%s' % destination]

        # The tablespace mapping option is repeated once for each tablespace
        if tbs_mapping:
            for (tbs_source, tbs_destination) in tbs_mapping.items():
                options.append('--tablespace-mapping=%s=%s' %
                               (tbs_source, tbs_destination))

        # Pass the connections parameters
        if conn_string:
            options.append("--dbname=%s" % conn_string)
        if host:
            options.append("--host=%s" % host)
        if port:
            options.append("--port=%s" % port)
        if host:
            options.append("--username=%s" % user)

        # Only global bandwidth limit is supported
        if bwlimit is not None and bwlimit > 0:
            options.append("--max-rate=%s" % bwlimit)

        # Immediate checkpoint
        if immediate:
            options.append("--checkpoint=fast")

        # Add other arguments
        if args:
            options += args

        Command.__init__(self, pg_basebackup, args=options, check=True,
                         path=path, **kwargs)