Beispiel #1
0
    def test_delete_running_backup(self, delete_mock, get_first_backup_mock, tmpdir, capsys):
        """
        Simple test for the deletion of a running backup.
        We want to test the behaviour of the server.delete_backup method
        when invoked on a running backup
        """
        # Test the removal of a running backup. status STARTED
        server = build_real_server({"barman_home": tmpdir.strpath})
        backup_info_started = build_test_backup_info(status=BackupInfo.STARTED, server_name=server.config.name)
        get_first_backup_mock.return_value = backup_info_started.backup_id
        with ServerBackupLock(tmpdir.strpath, server.config.name):
            server.delete_backup(backup_info_started)
            out, err = capsys.readouterr()
            assert "Cannot delete a running backup (%s %s)" % (server.config.name, backup_info_started.backup_id) in err

        # Test the removal of a running backup. status EMPTY
        backup_info_empty = build_test_backup_info(status=BackupInfo.EMPTY, server_name=server.config.name)
        get_first_backup_mock.return_value = backup_info_empty.backup_id
        with ServerBackupLock(tmpdir.strpath, server.config.name):
            server.delete_backup(backup_info_empty)
            out, err = capsys.readouterr()
            assert "Cannot delete a running backup (%s %s)" % (server.config.name, backup_info_started.backup_id) in err

        # Test the removal of a running backup. status DONE
        backup_info_done = build_test_backup_info(status=BackupInfo.DONE, server_name=server.config.name)
        with ServerBackupLock(tmpdir.strpath, server.config.name):
            server.delete_backup(backup_info_done)
            delete_mock.assert_called_with(backup_info_done)

        # Test the removal of a backup not running. status STARTED
        server.delete_backup(backup_info_started)
        delete_mock.assert_called_with(backup_info_started)
Beispiel #2
0
    def test_get_backup(self, tmpdir):
        """
        Check the get_backup method that uses the backups cache to retrieve
        a backup using the id
        """
        # Setup temp dir and server
        # build a backup_manager and setup a basic configuration
        backup_manager = build_backup_manager(
            name='TestServer',
            global_conf={
                'barman_home': tmpdir.strpath
            })

        # Create a BackupInfo object with status DONE
        b_info = build_test_backup_info(
            backup_id='fake_backup_id',
            server=backup_manager.server,
        )
        b_info.save()

        assert backup_manager._backup_cache is None

        # Check that the backup returned is the same
        assert backup_manager.get_backup(b_info.backup_id).to_dict() == \
            b_info.to_dict()

        # Empty the backup manager cache
        backup_manager._backup_cache = {}

        # Check that the backup returned is None
        assert backup_manager.get_backup(b_info.backup_id) is None
 def test_recover_basebackup_copy(self, rsync_pg_mock, tmpdir):
     """
     Test the copy of a content of a backup during a recovery
     :param rsync_pg_mock: Mock rsync object for the purpose if this test
     """
     # Build basic folder/files structure
     dest = tmpdir.mkdir('destination')
     server = testing_helpers.build_real_server()
     backup_info = testing_helpers.build_test_backup_info(
         server=server,
         tablespaces=[('tbs1', 16387, '/fake/location')])
     # Build a executor
     executor = RecoveryExecutor(server.backup_manager)
     executor.config.tablespace_bandwidth_limit = {'tbs1': ''}
     executor.config.bandwidth_limit = 10
     executor.basebackup_copy(
         backup_info, dest.strpath, tablespaces=None)
     rsync_pg_mock.assert_called_with(
         network_compression=False, bwlimit=10, ssh=None, path=None,
         exclude_and_protect=['/pg_tblspc/16387'])
     rsync_pg_mock.assert_any_call(
         network_compression=False, bwlimit='', ssh=None, path=None,
         check=True)
     rsync_pg_mock.return_value.smart_copy.assert_any_call(
         '/some/barman/home/main/base/1234567890/16387/',
         '/fake/location', None)
     rsync_pg_mock.return_value.smart_copy.assert_called_with(
         '/some/barman/home/main/base/1234567890/data/',
         dest.strpath, None)
 def test_prepare_tablespaces(self, tmpdir):
     """
     Test tablespaces preparation for recovery
     """
     # Prepare basic directory/files structure
     dest = tmpdir.mkdir('destination')
     wals = tmpdir.mkdir('wals')
     backup_info = testing_helpers.build_test_backup_info(
         tablespaces=[('tbs1', 16387, '/fake/location')])
     # build an executor
     server = testing_helpers.build_real_server(
         main_conf={'wals_directory': wals.strpath})
     executor = RecoveryExecutor(server.backup_manager)
     # use a mock as cmd obj
     cmd_mock = Mock()
     executor.prepare_tablespaces(backup_info, cmd_mock, dest.strpath, {})
     cmd_mock.create_dir_if_not_exists.assert_any_call(
         dest.join('pg_tblspc').strpath)
     cmd_mock.create_dir_if_not_exists.assert_any_call(
         '/fake/location')
     cmd_mock.delete_if_exists.assert_called_once_with(
         dest.join('pg_tblspc').join('16387').strpath)
     cmd_mock.create_symbolic_link.assert_called_once_with(
         '/fake/location',
         dest.join('pg_tblspc').join('16387').strpath)
Beispiel #5
0
    def test_backup_cache_remove(self, tmpdir):
        """
        Check the method responsible for the removal of a BackupInfo object from
        the backups cache
        """
        # build a backup_manager and setup a basic configuration
        backup_manager = build_backup_manager(
            name='TestServer',
            global_conf={
                'barman_home': tmpdir.strpath
            })

        assert backup_manager._backup_cache is None

        # Create a BackupInfo object with status DONE
        b_info = build_test_backup_info(
            backup_id='fake_backup_id',
            server=backup_manager.server,
        )

        # Remove the backup from the uninitialized cache
        backup_manager.backup_cache_remove(b_info)
        # Check that the test backup is still not initialized
        assert backup_manager._backup_cache is None

        # Initialize the cache
        backup_manager._backup_cache = {b_info.backup_id: b_info}
        # Remove the backup from the cache
        backup_manager.backup_cache_remove(b_info)
        assert b_info.backup_id not in backup_manager._backup_cache
    def test_set_pitr_targets(self, tmpdir):
        """
        Evaluate targets for point in time recovery
        """
        # Build basic folder/files structure
        tempdir = tmpdir.mkdir("temp_dir")
        dest = tmpdir.mkdir("dest")
        wal_dest = tmpdir.mkdir("wal_dest")
        recovery_info = {
            "configuration_files": ["postgresql.conf", "postgresql.auto.conf"],
            "tempdir": tempdir.strpath,
            "results": {"changes": [], "warnings": []},
            "is_pitr": False,
            "wal_dest": wal_dest.strpath,
            "get_wal": False,
        }
        backup_info = testing_helpers.build_test_backup_info()
        backup_manager = testing_helpers.build_backup_manager()
        # Build a recovery executor
        executor = RecoveryExecutor(backup_manager)
        executor._set_pitr_targets(recovery_info, backup_info, dest.strpath, "", "", "", "")
        # Test with empty values (no PITR)
        assert recovery_info["target_epoch"] is None
        assert recovery_info["target_datetime"] is None
        assert recovery_info["wal_dest"] == wal_dest.strpath
        # Test for PITR targets
        executor._set_pitr_targets(
            recovery_info, backup_info, dest.strpath, "target_name", "2015-06-03 16:11:03.71038+02", "2", None
        )
        target_datetime = dateutil.parser.parse("2015-06-03 16:11:03.710380+02:00")
        target_epoch = time.mktime(target_datetime.timetuple()) + (target_datetime.microsecond / 1000000.0)

        assert recovery_info["target_datetime"] == target_datetime
        assert recovery_info["target_epoch"] == target_epoch
        assert recovery_info["wal_dest"] == dest.join("barman_xlog").strpath
Beispiel #7
0
 def test_get_wal_info(self, get_wal_mock, tmpdir):
     """
     Basic test for get_wal_info method
     Test the wals per second and total time in seconds values.
     :return:
     """
     # Build a test server with a test path
     server = build_real_server(global_conf={
         'barman_home': tmpdir.strpath
     })
     # Mock method get_wal_until_next_backup for returning a list of
     # 3 fake WAL. the first one is the start and stop WAL of the backup
     wal_list = [
         WalFileInfo.from_xlogdb_line(
             "000000010000000000000002\t16777216\t1434450086.53\tNone\n"),
         WalFileInfo.from_xlogdb_line(
             "000000010000000000000003\t16777216\t1434450087.54\tNone\n"),
         WalFileInfo.from_xlogdb_line(
             "000000010000000000000004\t16777216\t1434450088.55\tNone\n")]
     get_wal_mock.return_value = wal_list
     backup_info = build_test_backup_info(
         server=server,
         begin_wal=wal_list[0].name,
         end_wal=wal_list[0].name)
     backup_info.save()
     # Evaluate total time in seconds:
     # last_wal_timestamp - first_wal_timestamp
     wal_total_seconds = wal_list[-1].time - wal_list[0].time
     # Evaluate the wals_per_second value:
     # wals_in_backup + wals_until_next_backup / total_time_in_seconds
     wals_per_second = len(wal_list) / wal_total_seconds
     wal_info = server.get_wal_info(backup_info)
     assert wal_info
     assert wal_info['wal_total_seconds'] == wal_total_seconds
     assert wal_info['wals_per_second'] == wals_per_second
Beispiel #8
0
    def test_backup_copy(self, rsync_mock, tmpdir):
        """
        Test the execution of a rsync copy

        :param rsync_mock: mock for the rsync command
        :param tmpdir: temporary dir
        """
        backup_manager = build_backup_manager(global_conf={
            'barman_home': tmpdir.mkdir('home').strpath
        })
        backup_info = build_test_backup_info(
            server=backup_manager.server,
            pgdata="/pg/data",
            config_file="/etc/postgresql.conf",
            hba_file="/pg/data/pg_hba.conf",
            ident_file="/pg/data/pg_ident.conf",
            begin_xlog="0/2000028",
            begin_wal="000000010000000000000002",
            begin_offset=28)
        backup_info.save()
        # This is to check that all the preparation is done correctly
        assert os.path.exists(backup_info.filename)

        backup_manager.executor.backup_copy(backup_info)

        assert rsync_mock.mock_calls == [
            mock.call(check=True, network_compression=False, args=[],
                      bwlimit=None, ssh='ssh',
                      ssh_options=['-c', '"arcfour"', '-p', '22',
                                   '*****@*****.**', '-o',
                                   'BatchMode=yes', '-o',
                                   'StrictHostKeyChecking=no']),
            mock.call().smart_copy(':/fake/location/',
                                   backup_info.get_data_directory(16387),
                                   None, None),
            mock.call(check=True, network_compression=False, args=[],
                      bwlimit=None, ssh='ssh',
                      ssh_options=['-c', '"arcfour"', '-p', '22',
                                   '*****@*****.**', '-o',
                                   'BatchMode=yes', '-o',
                                   'StrictHostKeyChecking=no']),
            mock.call().smart_copy(':/another/location/',
                                   backup_info.get_data_directory(16405),
                                   None, None),
            mock.call(network_compression=False,
                      exclude_and_protect=['/pg_tblspc/16387',
                                           '/pg_tblspc/16405'],
                      args=[], bwlimit=None, ssh='ssh',
                      ssh_options=['-c', '"arcfour"', '-p', '22',
                                   '*****@*****.**',
                                   '-o', 'BatchMode=yes',
                                   '-o', 'StrictHostKeyChecking=no']),
            mock.call().smart_copy(':/pg/data/',
                                   backup_info.get_data_directory(),
                                   None, None),
            mock.call()(
                ':/pg/data/global/pg_control',
                '%s/global/pg_control' % backup_info.get_data_directory()),
            mock.call()(':/etc/postgresql.conf',
                        backup_info.get_data_directory())]
    def test_map_temporary_config_files(self, tmpdir):
        """
        Test the method that prepares configuration files
        for the final steps of a recovery
        """
        # Build directory/files structure for testing
        tempdir = tmpdir.mkdir("tempdir")
        recovery_info = {
            "configuration_files": ["postgresql.conf", "postgresql.auto.conf"],
            "tempdir": tempdir.strpath,
            "temporary_configuration_files": [],
            "results": {"changes": [], "warnings": [], "missing_files": []},
        }

        backup_info = testing_helpers.build_test_backup_info()
        backup_info.config.basebackups_directory = tmpdir.strpath
        datadir = tmpdir.mkdir(backup_info.backup_id).mkdir("data")
        postgresql_conf_local = datadir.join("postgresql.conf")
        postgresql_auto_local = datadir.join("postgresql.auto.conf")
        postgresql_conf_local.write("archive_command = something\n" "data_directory = something")
        postgresql_auto_local.write("archive_command = something\n" "data_directory = something")
        # Build a RecoveryExecutor object (using a mock as server and backup
        # manager.
        backup_manager = testing_helpers.build_backup_manager()
        executor = RecoveryExecutor(backup_manager)
        executor._map_temporary_config_files(recovery_info, backup_info, "ssh@something")
        # check that configuration files have been moved by the method
        assert tempdir.join("postgresql.conf").check()
        assert tempdir.join("postgresql.conf").computehash() == postgresql_conf_local.computehash()
        assert tempdir.join("postgresql.auto.conf").check()
        assert tempdir.join("postgresql.auto.conf").computehash() == postgresql_auto_local.computehash()
        assert recovery_info["results"]["missing_files"] == ["pg_hba.conf", "pg_ident.conf"]
Beispiel #10
0
    def test_pgespresso_stop_backup(self, tbs_map_mock, label_mock):
        """
        Basic test for the pgespresso_stop_backup method
        """
        # Build a backup info and configure the mocks
        server = build_mocked_server(main_conf={
            'backup_options':
            BackupOptions.CONCURRENT_BACKUP
        })
        backup_manager = build_backup_manager(server=server)

        # Mock executor._pgespresso_stop_backup(backup_info) call
        stop_time = datetime.datetime.now()
        server.postgres.server_version = 90500
        server.postgres.pgespresso_stop_backup.return_value = {
            'end_wal': "000000060000A25700000044",
            'timestamp': stop_time
        }

        backup_info = build_test_backup_info(timeline=6)
        backup_manager.executor.strategy.stop_backup(backup_info)

        assert backup_info.end_xlog == 'A257/44FFFFFF'
        assert backup_info.end_wal == '000000060000A25700000044'
        assert backup_info.end_offset == 0xFFFFFF
        assert backup_info.end_time == stop_time
Beispiel #11
0
    def test_exclusive_stop_backup(self, stop_mock):
        """
        Basic test for the start_backup method

        :param stop_mock: mimic the response od _pg_stop_backup
        """
        # Build a backup info and configure the mocks
        server = build_mocked_server(main_conf={
            'backup_options':
            BackupOptions.EXCLUSIVE_BACKUP
        })
        backup_manager = build_backup_manager(server=server)
        # Mock executor._pg_stop_backup(backup_info) call
        stop_time = datetime.datetime.now()
        stop_mock.return_value = ("266/4A9C1EF8",
                                  "00000010000002660000004A",
                                  10231544,
                                  stop_time)

        backup_info = build_test_backup_info()
        backup_manager.executor.strategy.stop_backup(backup_info)

        # check that the submitted values are stored inside the BackupInfo obj
        assert backup_info.end_xlog == '266/4A9C1EF8'
        assert backup_info.end_wal == '00000010000002660000004A'
        assert backup_info.end_offset == 10231544
        assert backup_info.end_time == stop_time
Beispiel #12
0
 def test_backup_copy_with_included_files(self, rsync_moc, tmpdir, capsys):
     backup_manager = build_backup_manager(global_conf={
         'barman_home': tmpdir.mkdir('home').strpath
     })
     # Create a backup info with additional configuration files
     backup_info = build_test_backup_info(
         server=backup_manager.server,
         pgdata="/pg/data",
         config_file="/etc/postgresql.conf",
         hba_file="/pg/data/pg_hba.conf",
         ident_file="/pg/data/pg_ident.conf",
         begin_xlog="0/2000028",
         begin_wal="000000010000000000000002",
         included_files=["/tmp/config/file.conf"],
         begin_offset=28)
     backup_info.save()
     # This is to check that all the preparation is done correctly
     assert os.path.exists(backup_info.filename)
     # Execute a backup
     backup_manager.executor.backup_copy(backup_info)
     out, err = capsys.readouterr()
     # check for the presence of the warning in the stderr
     assert "WARNING: The usage of include directives is not supported" in err
     # check that the additional configuration file is present in the output
     assert backup_info.included_files[0] in err
Beispiel #13
0
    def test_concurrent_stop_backup(self, label_mock, stop_mock,):
        """
        Basic test for the start_backup method

        :param label_mock: mimic the response of _write_backup_label
        :param stop_mock: mimic the response of _pgespresso_stop_backup
        """
        # Build a backup info and configure the mocks
        server = build_mocked_server(main_conf={
            'backup_options':
            BackupOptions.CONCURRENT_BACKUP
        })
        backup_manager = build_backup_manager(server=server)

        # Mock executor._pgespresso_stop_backup(backup_info) call
        stop_time = datetime.datetime.now()
        stop_mock.return_value = ("000000060000A25700000044", stop_time)

        backup_info = build_test_backup_info()
        backup_manager.executor.strategy.stop_backup(backup_info)

        assert backup_info.end_xlog == 'A257/45000000'
        assert backup_info.end_wal == '000000060000A25700000044'
        assert backup_info.end_offset == 0
        assert backup_info.end_time == stop_time
Beispiel #14
0
    def test_load_backup_cache(self, tmpdir):
        """
        Check the loading of backups inside the backup_cache
        """
        # build a backup_manager and setup a basic configuration
        backup_manager = build_backup_manager(
            name='TestServer',
            global_conf={
                'barman_home': tmpdir.strpath
            })

        # Make sure the cache is uninitialized
        assert backup_manager._backup_cache is None

        # Create a BackupInfo object with status DONE
        b_info = build_test_backup_info(
            backup_id='fake_backup_id',
            server=backup_manager.server,
        )
        b_info.save()

        # Load backups inside the cache
        backup_manager._load_backup_cache()

        # Check that the test backup is inside the backups_cache
        assert backup_manager._backup_cache[b_info.backup_id].to_dict() == \
               b_info.to_dict()
    def test_setup(self, rsync_mock):
        """
        Test the method that set up a recovery
        """
        backup_info = testing_helpers.build_test_backup_info()
        backup_manager = testing_helpers.build_backup_manager()
        executor = RecoveryExecutor(backup_manager)
        backup_info.version = 90300

        # setup should create a temporary directory
        # and teardown should delete it
        ret = executor._setup(backup_info, None, "/tmp")
        assert os.path.exists(ret["tempdir"])
        executor._teardown(ret)
        assert not os.path.exists(ret["tempdir"])

        # no postgresql.auto.conf on version 9.3
        ret = executor._setup(backup_info, None, "/tmp")
        executor._teardown(ret)
        assert "postgresql.auto.conf" not in ret["configuration_files"]

        # Check the present for postgresql.auto.conf on version 9.4
        backup_info.version = 90400
        ret = executor._setup(backup_info, None, "/tmp")
        executor._teardown(ret)
        assert "postgresql.auto.conf" in ret["configuration_files"]

        # Receive a error if the remote command is invalid
        with pytest.raises(SystemExit):
            executor.server.path = None
            executor._setup(backup_info, "invalid", "/tmp")
Beispiel #16
0
    def test_backup_cache_add(self, tmpdir):
        """
        Check the method responsible for the registration of a BackupInfo obj
        into the backups cache
        """
        # build a backup_manager and setup a basic configuration
        backup_manager = build_backup_manager(
            name='TestServer',
            global_conf={
                'barman_home': tmpdir.strpath
            })


        # Create a BackupInfo object with status DONE
        b_info = build_test_backup_info(
            backup_id='fake_backup_id',
            server=backup_manager.server,
        )
        b_info.save()

        assert backup_manager._backup_cache is None

        # Register the object to cache. The cache is not initialized, so it
        # must load the cache from disk.
        backup_manager.backup_cache_add(b_info)
        # Check that the test backup is in the cache
        assert backup_manager.get_backup(b_info.backup_id) is b_info

        # Initialize an empty cache
        backup_manager._backup_cache = {}
        # Add the backup again
        backup_manager.backup_cache_add(b_info)
        assert backup_manager.get_backup(b_info.backup_id) is b_info
Beispiel #17
0
    def test_get_wal_until_next_backup(self, get_backup_mock, tmpdir):
        """
        Simple test for the management of .history files
        """
        # build a WalFileInfo object
        wfile_info = WalFileInfo()
        wfile_info.name = '000000010000000000000003'
        wfile_info.size = 42
        wfile_info.time = 43
        wfile_info.compression = None

        # build a WalFileInfo history object
        history_info = WalFileInfo()
        history_info.name = '00000001.history'
        history_info.size = 42
        history_info.time = 43
        history_info.compression = None

        # create a xlog.db and add the 2 entries
        wals_dir = tmpdir.mkdir("wals")
        xlog = wals_dir.join("xlog.db")
        xlog.write(wfile_info.to_xlogdb_line() + history_info.to_xlogdb_line())
        # fake backup
        backup = build_test_backup_info(
            begin_wal='000000010000000000000001',
            end_wal='000000010000000000000004')

        # mock a server object and mock a return call to get_next_backup method
        server = build_real_server(
            global_conf={
                "barman_lock_directory": tmpdir.mkdir('lock').strpath
            },
            main_conf={
                "wals_directory": wals_dir.strpath
            })
        get_backup_mock.return_value = build_test_backup_info(
            backup_id="1234567899",
            begin_wal='000000010000000000000005',
            end_wal='000000010000000000000009')

        wals = []
        for wal_file in server.get_wal_until_next_backup(backup,
                                                         include_history=True):
            # get the result of the xlogdb read
            wals.append(wal_file.name)
        # check for the presence of the .history file
        assert history_info.name in wals
    def test_first_backup(self):
        server = build_mocked_server()
        rp = RetentionPolicyFactory.create(
            server,
            'retention_policy',
            'RECOVERY WINDOW OF 4 WEEKS')
        assert isinstance(rp, RecoveryWindowRetentionPolicy)

        # Build a BackupInfo object with status to DONE
        backup_info = build_test_backup_info(
            server=rp.server,
            backup_id='test1',
            end_time=datetime.now(tzlocal()))

        # instruct the get_available_backups method to return a map with
        # our mock as result and minimum_redundancy = 1
        rp.server.get_available_backups.return_value = {
            "test_backup": backup_info
        }
        rp.server.config.minimum_redundancy = 1
        # execute retention policy report
        report = rp.first_backup()

        assert report == 'test_backup'

        rp = RetentionPolicyFactory.create(
            server,
            'retention_policy',
            'REDUNDANCY 2')
        assert isinstance(rp, RedundancyRetentionPolicy)

        # Build a BackupInfo object with status to DONE
        backup_info = build_test_backup_info(
            server=rp.server,
            backup_id='test1',
            end_time=datetime.now(tzlocal()))

        # instruct the get_available_backups method to return a map with
        # our mock as result and minimum_redundancy = 1
        rp.server.get_available_backups.return_value = {
            "test_backup": backup_info
        }
        rp.server.config.minimum_redundancy = 1
        # execute retention policy report
        report = rp.first_backup()

        assert report == 'test_backup'
    def test_recovery_window_report(self, caplog):
        """
        Basic unit test of RecoveryWindowRetentionPolicy

        Given a mock simulating a Backup with status DONE and
        the end_date not over the point of recoverability,
        the report method of the RecoveryWindowRetentionPolicy class must mark
        it as valid
        """
        server = build_mocked_server()
        rp = RetentionPolicyFactory.create(
            server,
            'retention_policy',
            'RECOVERY WINDOW OF 4 WEEKS')
        assert isinstance(rp, RecoveryWindowRetentionPolicy)

        # Build a BackupInfo object with status to DONE
        backup_info = build_test_backup_info(
            server=rp.server,
            backup_id='test1',
            end_time=datetime.now(tzlocal()))

        backup_source = {'test_backup3': backup_info}
        # Add a obsolete backup
        backup_info.end_time = datetime.now(tzlocal()) - timedelta(weeks=5)
        backup_source['test_backup2'] = backup_info
        # Add a second obsolete backup
        backup_info.end_time = datetime.now(tzlocal()) - timedelta(weeks=6)
        backup_source['test_backup'] = backup_info
        rp.server.get_available_backups.return_value = backup_source
        # instruct the get_available_backups method to return a map with
        # our mock as result and minimum_redundancy = 1
        rp.server.config.minimum_redundancy = 1
        rp.server.config.name = "test"
        # execute retention policy report
        report = rp.report()
        # check that our mock is valid for the retention policy
        assert report == {'test_backup3': 'VALID',
                          'test_backup2': 'OBSOLETE',
                          'test_backup': 'OBSOLETE'}

        # Expect a ValueError if passed context is invalid
        with pytest.raises(ValueError):
            rp.report(context='invalid')
        # Set a new minimum_redundancy parameter, enforcing the usage of the
        # configuration parameter instead of the retention policy default
        rp.server.config.minimum_redundancy = 4
        # execute retention policy report
        rp.report()
        # Check for the warning inside the log
        caplog.set_level(logging.WARNING)
        log = caplog.text
        warn = "WARNING  Keeping obsolete backup test_backup2 for " \
               "server test (older than %s) due to minimum redundancy " \
               "requirements (4)\n" % rp._point_of_recoverability()
        assert log.find(warn)
Beispiel #20
0
    def test_stop_backup(self, espressostop_mock, stop_mock):
        """
        Basic test for the start_backup method

        :param espressostop_mock: mimic the response of pg_espresso_stop_backup
        :param stop_mock: mimic the response od pg_stop_backup
        """
        # Build a backup info and configure the mocks
        backup_manager = build_backup_manager()

        # Test 1: stop exclusive backup
        # Mock executor.pg_stop_backup(backup_info) call
        stop_time = datetime.datetime.now()
        stop_mock.return_value = ("266/4A9C1EF8",
                                  "00000010000002660000004A",
                                  10231544,
                                  stop_time)

        backup_info = build_test_backup_info()
        backup_manager.executor.stop_backup(backup_info)

        # check that the submitted values are stored inside the BackupInfo obj
        assert backup_info.end_xlog == '266/4A9C1EF8'
        assert backup_info.end_wal == '00000010000002660000004A'
        assert backup_info.end_offset == 10231544
        assert backup_info.end_time == stop_time

        # Test 2: stop concurrent backup
        # change the configuration to concurrent backup
        backup_manager.executor.config.backup_options = [
            BackupOptions.CONCURRENT_BACKUP]

        # Mock executor.pgespresso_stop_backup(backup_info) call
        stop_time = datetime.datetime.now()
        espressostop_mock.return_value = ("000000060000A25700000044", stop_time)

        backup_info = build_test_backup_info()
        backup_manager.executor.stop_backup(backup_info)

        assert backup_info.end_xlog == 'A257/45000000'
        assert backup_info.end_wal == '000000060000A25700000044'
        assert backup_info.end_offset == 0
        assert backup_info.end_time == stop_time
Beispiel #21
0
    def test_result_list_backup(self, capsys):
        # mock the backup info
        bi = build_test_backup_info()
        backup_size = 12345
        wal_size = 54321
        retention_status = 'test status'

        writer = output.ConsoleOutputWriter()

        # test minimal
        writer.init_list_backup(bi.server_name, True)
        writer.result_list_backup(bi, backup_size, wal_size, retention_status)
        writer.close()
        (out, err) = capsys.readouterr()
        assert writer.minimal
        assert bi.backup_id in out
        assert err == ''

        # test status=DONE output
        writer.init_list_backup(bi.server_name, False)
        writer.result_list_backup(bi, backup_size, wal_size, retention_status)
        writer.close()
        (out, err) = capsys.readouterr()
        assert not writer.minimal
        assert bi.server_name in out
        assert bi.backup_id in out
        assert str(bi.end_time.ctime()) in out
        for name, _, location in bi.tablespaces:
            assert '%s:%s' % (name, location)
        assert 'Size: ' + pretty_size(backup_size) in out
        assert 'WAL Size: ' + pretty_size(wal_size) in out
        assert err == ''

        # test status = FAILED output
        bi = build_test_backup_info(status=BackupInfo.FAILED)
        writer.init_list_backup(bi.server_name, False)
        writer.result_list_backup(bi, backup_size, wal_size, retention_status)
        writer.close()
        (out, err) = capsys.readouterr()
        assert not writer.minimal
        assert bi.server_name in out
        assert bi.backup_id in out
        assert bi.status in out
Beispiel #22
0
    def test_archive_wal_timeline_lower_than_backup(self, tmpdir, capsys):
        """
        Test archive-wal command behaviour when the WAL files are older than
        the first backup of a server.

        Expect it to archive the files anyway
        """
        # Build a real backup manager and a fake backup
        backup_manager = build_backup_manager(
            name='TestServer',
            global_conf={
                'barman_home': tmpdir.strpath
            })
        b_info = build_test_backup_info(
            backup_id='fake_backup_id',
            server=backup_manager.server,
            begin_wal='000000020000000000000002',
            timeline=2
        )
        b_info.save()
        # Build the basic folder structure and files
        backup_manager.compression_manager.get_compressor.return_value = None
        backup_manager.server.get_backup.return_value = b_info
        basedir = tmpdir.join('main')
        incoming_dir = basedir.join('incoming')
        basedir.mkdir('errors')
        archive_dir = basedir.join('wals')
        xlog_db = archive_dir.join('xlog.db')
        wal_name = '000000010000000000000001'
        wal_file = incoming_dir.join(wal_name)
        wal_file.ensure()
        archive_dir.ensure(dir=True)
        xlog_db.ensure()
        backup_manager.server.xlogdb.return_value.__enter__.return_value = \
            xlog_db.open(mode='a')
        backup_manager.server.archivers = [FileWalArchiver(backup_manager)]

        backup_manager.archive_wal()

        # Check that the WAL file is present inside the wal catalog
        with xlog_db.open() as f:
            line = str(f.readline())
            assert wal_name in line
        wal_path = os.path.join(archive_dir.strpath,
                                barman.xlog.hash_dir(wal_name),
                                wal_name)
        # Check that the wal file have been archived
        assert os.path.exists(wal_path)
        # Check the output for the archival of the wal file
        out, err = capsys.readouterr()
        assert ("\t%s\n" % wal_name) in out
Beispiel #23
0
    def test_available_backups(self, tmpdir):
        """
        Test the get_available_backups that retrieves all the
        backups from the backups_cache using a set of backup status as filter
        """
        # build a backup_manager and setup a basic configuration
        backup_manager = build_backup_manager(
            name='TestServer',
            global_conf={
                'barman_home': tmpdir.strpath
            })

        # BackupInfo object with status DONE
        b_info = build_test_backup_info(
            backup_id='fake_backup_id',
            server=backup_manager.server,
            status=BackupInfo.DONE
        )
        b_info.save()

        # Create a BackupInfo object with status FAILED
        failed_b_info = build_test_backup_info(
            backup_id='failed_backup_id',
            server=backup_manager.server,
            status=BackupInfo.FAILED
        )
        failed_b_info.save()

        assert backup_manager._backup_cache is None

        available_backups = backup_manager.get_available_backups(
            (BackupInfo.DONE,))

        assert available_backups[b_info.backup_id].to_dict() == (
            b_info.to_dict())
        # Check that the  failed backup have been filtered from the result
        assert failed_b_info.backup_id not in available_backups
        assert len(available_backups) == 1
    def test_redundancy_report(self, caplog):
        """
        Test of the management of the minimum_redundancy parameter
        into the backup_report method of the RedundancyRetentionPolicy class

        """
        server = build_mocked_server()
        rp = RetentionPolicyFactory.create(
            server,
            'retention_policy',
            'REDUNDANCY 2')
        assert isinstance(rp, RedundancyRetentionPolicy)

        # Build a BackupInfo object with status to DONE
        backup_info = build_test_backup_info(
            server=rp.server,
            backup_id='test1',
            end_time=datetime.now(tzlocal()))

        # instruct the get_available_backups method to return a map with
        # our mock as result and minimum_redundancy = 1
        rp.server.get_available_backups.return_value = {
            "test_backup": backup_info,
            "test_backup2": backup_info,
            "test_backup3": backup_info,
        }
        rp.server.config.minimum_redundancy = 1
        # execute retention policy report
        report = rp.report()
        # check that our mock is valid for the retention policy because
        # the total number of valid backups is lower than the retention policy
        # redundancy.
        assert report == {'test_backup': BackupInfo.OBSOLETE,
                          'test_backup2': BackupInfo.VALID,
                          'test_backup3': BackupInfo.VALID}
        # Expect a ValueError if passed context is invalid
        with pytest.raises(ValueError):
            rp.report(context='invalid')
        # Set a new minimum_redundancy parameter, enforcing the usage of the
        # configuration parameter instead of the retention policy default
        rp.server.config.minimum_redundancy = 3
        # execute retention policy report
        rp.report()
        # Check for the warning inside the log
        caplog.set_level(logging.WARNING)

        log = caplog.text
        assert log.find("WARNING  Retention policy redundancy (2) "
                        "is lower than the required minimum redundancy (3). "
                        "Enforce 3.")
Beispiel #25
0
    def test_archive_wal_timeline_lower_than_backup(self, tmpdir, capsys):
        """
        Test archive-wal command behaviour when the WAL files are older than
        the first backup of a server.

        Expect it to trash WAL files
        """
        # Build a real backup manager and a fake backup
        backup_manager = build_backup_manager(name="TestServer", global_conf={"barman_home": tmpdir.strpath})
        b_info = build_test_backup_info(
            backup_id="fake_backup_id", server=backup_manager.server, begin_wal="000000020000000000000002", timeline=2
        )
        b_info.save()
        # Build the basic folder structure and files
        backup_manager.compression_manager.get_compressor.return_value = None
        backup_manager.server.get_backup.return_value = b_info
        basedir = tmpdir.join("main")
        incoming_dir = basedir.join("incoming")
        basedir.mkdir("errors")
        archive_dir = basedir.join("wals")
        xlog_db = archive_dir.join("xlog.db")
        wal_name = "000000010000000000000001"
        wal_timeline = int(wal_name[0:8], 16)
        wal_file = incoming_dir.join(wal_name)
        wal_file.ensure()
        archive_dir.ensure(dir=True)
        xlog_db.ensure()
        backup_manager.server.xlogdb.return_value.__enter__.return_value = xlog_db.open(mode="a")
        backup_manager.server.archivers = [FileWalArchiver(backup_manager)]

        backup_manager.archive_wal()

        with xlog_db.open() as f:
            line = str(f.readline())
            assert wal_name not in line
        # Check that the WAL file is not present inside the wal catalog
        wal_path = os.path.join(archive_dir.strpath, barman.xlog.hash_dir(wal_name), wal_name)
        # Check that the wal file have not been archived
        assert not os.path.exists(wal_path)
        # Check the output for the removal of the wal file
        out, err = capsys.readouterr()
        assert (
            "The timeline of the WAL file %s (%s), is lower "
            "than the one of the oldest backup of "
            "server %s (%s). Moving the WAL in "
            "the error directory" % (wal_name, wal_timeline, backup_manager.config.name, b_info.timeline)
        ) in out
Beispiel #26
0
    def test_base_archive_wal(self, tmpdir):
        """
        Basic archiving test

        Provide a WAL file and check for the correct location of the file at
        the end of the process
        """
        # Build a real backup manager
        backup_manager = build_backup_manager(
            name='TestServer',
            global_conf={
                'barman_home': tmpdir.strpath
            })
        b_info = build_test_backup_info(
            backup_id='fake_backup_id',
            server=backup_manager.server,
            begin_wal='000000010000000000000001'
        )
        b_info.save()
        backup_manager.server.get_backup.return_value = b_info
        backup_manager.compression_manager.get_compressor.return_value = None
        # Build the basic folder structure and files
        basedir = tmpdir.join('main')
        incoming_dir = basedir.join('incoming')
        archive_dir = basedir.join('wals')
        xlog_db = archive_dir.join('xlog.db')
        wal_name = '000000010000000000000001'
        wal_file = incoming_dir.join(wal_name)
        wal_file.ensure()
        archive_dir.ensure(dir=True)
        xlog_db.ensure()
        backup_manager.server.xlogdb.return_value.__enter__.return_value = \
            xlog_db.open(mode='a')
        backup_manager.server.archivers = [FileWalArchiver(backup_manager)]

        backup_manager.archive_wal()
        wal_path = os.path.join(archive_dir.strpath,
                                barman.xlog.hash_dir(wal_name),
                                wal_name)
        # Check for the presence of the wal file in the wal catalog
        with xlog_db.open() as f:
            line = str(f.readline())
            assert wal_name in line
        # Check that the wal file have been moved from the incoming dir
        assert not os.path.exists(wal_file.strpath)
        # Check that the wal file have been archived to the expected location
        assert os.path.exists(wal_path)
 def test_generate_recovery_conf(self, rsync_pg_mock, tmpdir):
     """
     Test the generation of recovery.conf file
     """
     # Build basic folder/files structure
     recovery_info = {
         "configuration_files": ["postgresql.conf", "postgresql.auto.conf"],
         "tempdir": tmpdir.strpath,
         "results": {"changes": [], "warnings": []},
         "get_wal": False,
     }
     backup_info = testing_helpers.build_test_backup_info()
     dest = tmpdir.mkdir("destination")
     # Build a recovery executor using a real server
     server = testing_helpers.build_real_server()
     executor = RecoveryExecutor(server.backup_manager)
     executor._generate_recovery_conf(
         recovery_info,
         backup_info,
         dest.strpath,
         True,
         "remote@command",
         "target_name",
         "2015-06-03 16:11:03.71038+02",
         "2",
         "",
     )
     # Check that the recovery.conf file exists
     recovery_conf_file = tmpdir.join("recovery.conf")
     assert recovery_conf_file.check()
     # Parse the generated recovery.conf
     recovery_conf = {}
     for line in recovery_conf_file.readlines():
         key, value = (s.strip() for s in line.strip().split("=", 1))
         recovery_conf[key] = value
     # check for contents
     assert "recovery_end_command" in recovery_conf
     assert "recovery_target_time" in recovery_conf
     assert "recovery_target_timeline" in recovery_conf
     assert "recovery_target_xid" not in recovery_conf
     assert "recovery_target_name" in recovery_conf
     assert recovery_conf["recovery_end_command"] == "'rm -fr barman_xlog'"
     assert recovery_conf["recovery_target_time"] == "'2015-06-03 16:11:03.71038+02'"
     assert recovery_conf["recovery_target_timeline"] == "2"
     assert recovery_conf["recovery_target_name"] == "'target_name'"
    def test_set_pitr_targets(self, tmpdir):
        """
        Evaluate targets for point in time recovery
        """
        # Build basic folder/files structure
        tempdir = tmpdir.mkdir('temp_dir')
        dest = tmpdir.mkdir('dest')
        wal_dest = tmpdir.mkdir('wal_dest')
        recovery_info = {
            'configuration_files': ['postgresql.conf', 'postgresql.auto.conf'],
            'tempdir': tempdir.strpath,
            'results': {'changes': [], 'warnings': []},
            'is_pitr': False,
            'wal_dest': wal_dest.strpath,
            'get_wal': False,
        }
        backup_info = testing_helpers.build_test_backup_info()
        server = testing_helpers.build_mocked_server()
        backup_manager = Mock(server=server, config=server.config)
        # Build a recovery executor
        executor = RecoveryExecutor(backup_manager)
        executor.set_pitr_targets(recovery_info, backup_info,
                                  dest.strpath,
                                  '', '', '', '')
        # Test with empty values (no PITR)
        assert recovery_info['target_epoch'] is None
        assert recovery_info['target_datetime'] is None
        assert recovery_info['wal_dest'] == wal_dest.strpath
        # Test for PITR targets
        executor.set_pitr_targets(recovery_info, backup_info,
                                  dest.strpath,
                                  'target_name',
                                  '2015-06-03 16:11:03.71038+02',
                                  '2',
                                  None,)
        target_datetime = dateutil.parser.parse(
            '2015-06-03 16:11:03.710380+02:00')
        target_epoch = (time.mktime(target_datetime.timetuple()) +
                        (target_datetime.microsecond / 1000000.))

        assert recovery_info['target_datetime'] == target_datetime
        assert recovery_info['target_epoch'] == target_epoch
        assert recovery_info['wal_dest'] == dest.join('barman_xlog').strpath
Beispiel #29
0
 def test_setup(self):
     """
     Test the method that set up a recovery
     """
     backup_info = testing_helpers.build_test_backup_info()
     server = testing_helpers.build_mocked_server()
     backup_manager = Mock(server=server, config=server.config)
     executor = RecoveryExecutor(backup_manager)
     backup_info.version = 90300
     # no postgresql.auto.conf on version 9.3
     ret = executor.setup(backup_info, None, "/tmp")
     assert "postgresql.auto.conf" not in ret['configuration_files']
     # Check the present for postgresql.auto.conf on version 9.4
     backup_info.version = 90400
     ret = executor.setup(backup_info, None, "/tmp")
     assert "postgresql.auto.conf" in ret['configuration_files']
     # Receive a error if the remote command is invalid
     with pytest.raises(SystemExit):
         executor.setup(backup_info, "invalid", "/tmp")
Beispiel #30
0
    def test_concurrent_stop_backup(self, tbs_map_mock, label_mock,):
        """
        Basic test for the stop_backup method for 9.6 concurrent api

        :param label_mock: mimic the response of _write_backup_label
        """
        # Build a backup info and configure the mocks
        server = build_mocked_server(main_conf={
            'backup_options':
            BackupOptions.CONCURRENT_BACKUP
        })
        backup_manager = build_backup_manager(server=server)

        stop_time = datetime.datetime.now()
        # This is a pg 9.6
        server.postgres.server_version = 90600
        # Mock stop backup call for the new api method
        start_time = datetime.datetime.now(tz.tzlocal()).replace(microsecond=0)
        server.postgres.stop_concurrent_backup.return_value = {
            'location': "A266/4A9C1EF8",
            'timeline': 6,
            'timestamp': stop_time,
            'backup_label':
                'START WAL LOCATION: A257/44B4C0D8 '
                # Timeline 0 simulates a bug in PostgreSQL 9.6 beta2
                '(file 000000000000A25700000044)\n'
                'START TIME: %s\n' %
                start_time.strftime('%Y-%m-%d %H:%M:%S %Z')
        }

        backup_info = build_test_backup_info()
        backup_manager.executor.strategy.stop_backup(backup_info)

        assert backup_info.end_xlog == 'A266/4A9C1EF8'
        assert backup_info.end_wal == '000000060000A2660000004A'
        assert backup_info.end_offset == 0x9C1EF8
        assert backup_info.end_time == stop_time
        assert backup_info.backup_label == (
            'START WAL LOCATION: A257/44B4C0D8 '
            '(file 000000000000A25700000044)\n'
            'START TIME: %s\n' %
            start_time.strftime('%Y-%m-%d %H:%M:%S %Z')
        )
    def test_set_pitr_targets(self, tmpdir):
        """
        Evaluate targets for point in time recovery
        """
        # Build basic folder/files structure
        tempdir = tmpdir.mkdir("temp_dir")
        dest = tmpdir.mkdir("dest")
        wal_dest = tmpdir.mkdir("wal_dest")
        recovery_info = {
            "configuration_files": ["postgresql.conf", "postgresql.auto.conf"],
            "tempdir": tempdir.strpath,
            "results": {"changes": [], "warnings": []},
            "is_pitr": False,
            "wal_dest": wal_dest.strpath,
            "get_wal": False,
        }
        backup_info = testing_helpers.build_test_backup_info(
            end_time=dateutil.parser.parse("2015-06-03 16:11:01.71038+02")
        )
        backup_manager = testing_helpers.build_backup_manager()
        # Build a recovery executor
        executor = RecoveryExecutor(backup_manager)
        executor._set_pitr_targets(
            recovery_info, backup_info, dest.strpath, "", "", "", "", "", False, None
        )
        # Test with empty values (no PITR)
        assert recovery_info["target_epoch"] is None
        assert recovery_info["target_datetime"] is None
        assert recovery_info["wal_dest"] == wal_dest.strpath

        # Test for PITR targets
        executor._set_pitr_targets(
            recovery_info,
            backup_info,
            dest.strpath,
            "target_name",
            "2015-06-03 16:11:03.71038+02",
            "2",
            None,
            "",
            False,
            None,
        )
        target_datetime = dateutil.parser.parse("2015-06-03 16:11:03.710380+02:00")
        target_epoch = time.mktime(target_datetime.timetuple()) + (
            target_datetime.microsecond / 1000000.0
        )

        assert recovery_info["target_datetime"] == target_datetime
        assert recovery_info["target_epoch"] == target_epoch
        assert recovery_info["wal_dest"] == dest.join("barman_wal").strpath

        # Test for too early PITR target
        with pytest.raises(RecoveryInvalidTargetException) as exc_info:
            executor._set_pitr_targets(
                recovery_info,
                backup_info,
                dest.strpath,
                None,
                "2015-06-03 16:11:00.71038+02",
                None,
                None,
                None,
                False,
                None,
            )
        assert (
            str(exc_info.value) == "The requested target time "
            "2015-06-03 16:11:00.710380+02:00 "
            "is before the backup end time "
            "2015-06-03 16:11:01.710380+02:00"
        )

        # Tests for PostgreSQL < 9.1
        backup_info.version = 90000
        with pytest.raises(RecoveryTargetActionException) as exc_info:
            executor._set_pitr_targets(
                recovery_info,
                backup_info,
                dest.strpath,
                "target_name",
                "2015-06-03 16:11:03.71038+02",
                "2",
                None,
                None,
                False,
                "pause",
            )
        assert (
            str(exc_info.value) == "Illegal target action 'pause' "
            "for this version of PostgreSQL"
        )

        # Tests for PostgreSQL between 9.1 and 9.4 included
        backup_info.version = 90100
        executor._set_pitr_targets(
            recovery_info,
            backup_info,
            dest.strpath,
            "target_name",
            "2015-06-03 16:11:03.71038+02",
            "2",
            None,
            None,
            False,
            None,
        )
        assert "pause_at_recovery_target" not in recovery_info

        executor._set_pitr_targets(
            recovery_info,
            backup_info,
            dest.strpath,
            "target_name",
            "2015-06-03 16:11:03.71038+02",
            "2",
            None,
            None,
            False,
            "pause",
        )
        assert recovery_info["pause_at_recovery_target"] == "on"
        del recovery_info["pause_at_recovery_target"]

        with pytest.raises(RecoveryTargetActionException) as exc_info:
            executor._set_pitr_targets(
                recovery_info,
                backup_info,
                dest.strpath,
                "target_name",
                "2015-06-03 16:11:03.71038+02",
                "2",
                None,
                None,
                False,
                "promote",
            )
        assert (
            str(exc_info.value) == "Illegal target action 'promote' "
            "for this version of PostgreSQL"
        )

        # Tests for PostgreSQL >= 9.5
        backup_info.version = 90500
        executor._set_pitr_targets(
            recovery_info,
            backup_info,
            dest.strpath,
            "target_name",
            "2015-06-03 16:11:03.71038+02",
            "2",
            None,
            None,
            False,
            "pause",
        )
        assert recovery_info["recovery_target_action"] == "pause"

        executor._set_pitr_targets(
            recovery_info,
            backup_info,
            dest.strpath,
            "target_name",
            "2015-06-03 16:11:03.71038+02",
            "2",
            None,
            None,
            False,
            "promote",
        )
        assert recovery_info["recovery_target_action"] == "promote"

        with pytest.raises(RecoveryTargetActionException) as exc_info:
            executor._set_pitr_targets(
                recovery_info,
                backup_info,
                dest.strpath,
                "target_name",
                "2015-06-03 16:11:03.71038+02",
                "2",
                None,
                None,
                False,
                "unavailable",
            )
        assert (
            str(exc_info.value) == "Illegal target action 'unavailable' "
            "for this version of PostgreSQL"
        )

        # Recovery target action should not be available is PITR is not
        # enabled
        backup_info.version = 90500
        with pytest.raises(RecoveryTargetActionException) as exc_info:
            executor._set_pitr_targets(
                recovery_info,
                backup_info,
                dest.strpath,
                None,
                None,
                None,
                None,
                None,
                False,
                "pause",
            )
        assert (
            str(exc_info.value) == "Can't enable recovery target action "
            "when PITR is not required"
        )

        # Test that we are not using target_lsn with a version < 10
        backup_info.version = 90500
        with pytest.raises(RecoveryInvalidTargetException) as exc_info:
            executor._set_pitr_targets(
                recovery_info,
                backup_info,
                dest.strpath,
                None,
                None,
                None,
                None,
                10000,
                False,
                "pause",
            )
        assert (
            str(exc_info.value) == "Illegal use of recovery_target_lsn "
            "'10000' for this version "
            "of PostgreSQL "
            "(version 10 minimum required)"
        )

        # Test that we are not using target_immediate with a version < 9.4
        backup_info.version = 90300
        with pytest.raises(RecoveryInvalidTargetException) as exc_info:
            executor._set_pitr_targets(
                recovery_info,
                backup_info,
                dest.strpath,
                None,
                None,
                None,
                None,
                None,
                True,
                "pause",
            )
        assert (
            str(exc_info.value) == "Illegal use of "
            "recovery_target_immediate "
            "for this version "
            "of PostgreSQL "
            "(version 9.4 minimum required)"
        )
    def test_generate_recovery_conf_pre12(self, rsync_pg_mock, tmpdir):
        """
        Test the generation of recovery.conf file
        """
        # Build basic folder/files structure
        recovery_info = {
            "configuration_files": ["postgresql.conf", "postgresql.auto.conf"],
            "tempdir": tmpdir.strpath,
            "results": {"changes": [], "warnings": []},
            "get_wal": False,
        }
        backup_info = testing_helpers.build_test_backup_info()
        dest = tmpdir.mkdir("destination")

        # Build a recovery executor using a real server
        server = testing_helpers.build_real_server()
        executor = RecoveryExecutor(server.backup_manager)
        executor._generate_recovery_conf(
            recovery_info,
            backup_info,
            dest.strpath,
            True,
            True,
            "remote@command",
            "target_name",
            "2015-06-03 16:11:03.71038+02",
            "2",
            "",
            "",
            None,
        )

        # Check that the recovery.conf file exists
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert recovery_conf_file.check()
        # Parse the generated recovery.conf
        recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file)
        # check for contents
        assert "recovery_end_command" in recovery_conf
        assert "recovery_target_time" in recovery_conf
        assert "recovery_target_timeline" in recovery_conf
        assert "recovery_target_xid" not in recovery_conf
        assert "recovery_target_lsn" not in recovery_conf
        assert "recovery_target_name" in recovery_conf
        assert "recovery_target" not in recovery_conf
        assert recovery_conf["recovery_end_command"] == "'rm -fr barman_wal'"
        assert recovery_conf["recovery_target_time"] == "'2015-06-03 16:11:03.71038+02'"
        assert recovery_conf["recovery_target_timeline"] == "2"
        assert recovery_conf["recovery_target_name"] == "'target_name'"

        # Test 'pause_at_recovery_target' recovery_info entry
        recovery_info["pause_at_recovery_target"] = "on"
        executor._generate_recovery_conf(
            recovery_info,
            backup_info,
            dest.strpath,
            True,
            True,
            "remote@command",
            "target_name",
            "2015-06-03 16:11:03.71038+02",
            "2",
            "",
            "",
            None,
        )
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert recovery_conf_file.check()
        recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file)
        assert recovery_conf["pause_at_recovery_target"] == "'on'"

        # Test 'recovery_target_action'
        del recovery_info["pause_at_recovery_target"]
        recovery_info["recovery_target_action"] = "pause"
        executor._generate_recovery_conf(
            recovery_info,
            backup_info,
            dest.strpath,
            True,
            True,
            "remote@command",
            "target_name",
            "2015-06-03 16:11:03.71038+02",
            "2",
            "",
            "",
            None,
        )
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert recovery_conf_file.check()
        recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file)
        assert recovery_conf["recovery_target_action"] == "'pause'"

        # Test 'standby_mode'
        executor._generate_recovery_conf(
            recovery_info,
            backup_info,
            dest.strpath,
            True,
            True,
            "remote@command",
            "target_name",
            "2015-06-03 16:11:03.71038+02",
            "2",
            "",
            "",
            True,
        )
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert recovery_conf_file.check()
        recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file)
        assert recovery_conf["standby_mode"] == "'on'"

        executor._generate_recovery_conf(
            recovery_info,
            backup_info,
            dest.strpath,
            True,
            True,
            "remote@command",
            "target_name",
            "2015-06-03 16:11:03.71038+02",
            "2",
            "",
            "",
            False,
        )
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert recovery_conf_file.check()
        recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file)
        assert "standby_mode" not in recovery_conf

        executor._generate_recovery_conf(
            recovery_info,
            backup_info,
            dest.strpath,
            True,
            True,
            "remote@command",
            "target_name",
            "2015-06-03 16:11:03.71038+02",
            "2",
            "",
            "",
            None,
        )
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert recovery_conf_file.check()
        recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file)
        assert "standby_mode" not in recovery_conf
    def test_generate_recovery_conf(self, rsync_pg_mock, tmpdir):
        """
        Test the generation of recovery configuration
        :type tmpdir: py.path.local
        """
        # Build basic folder/files structure
        recovery_info = {
            "configuration_files": ["postgresql.conf", "postgresql.auto.conf"],
            "tempdir": tmpdir.strpath,
            "results": {"changes": [], "warnings": []},
            "get_wal": False,
        }
        backup_info = testing_helpers.build_test_backup_info(
            version=120000,
        )
        dest = tmpdir.mkdir("destination")

        # Build a recovery executor using a real server
        server = testing_helpers.build_real_server()
        executor = RecoveryExecutor(server.backup_manager)
        executor._generate_recovery_conf(
            recovery_info,
            backup_info,
            dest.strpath,
            True,
            True,
            "remote@command",
            "target_name",
            "2015-06-03 16:11:03.71038+02",
            "2",
            "",
            "",
            None,
        )

        # Check that the recovery.conf file doesn't exist
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert not recovery_conf_file.check()
        # Check that the recovery.signal file exists
        signal_file = tmpdir.join("recovery.signal")
        assert signal_file.check()
        # Parse the generated recovery configuration
        pg_auto_conf = self.parse_auto_conf_lines(recovery_info)
        # check for contents
        assert "recovery_end_command" in pg_auto_conf
        assert "recovery_target_time" in pg_auto_conf
        assert "recovery_target_timeline" in pg_auto_conf
        assert "recovery_target_xid" not in pg_auto_conf
        assert "recovery_target_lsn" not in pg_auto_conf
        assert "recovery_target_name" in pg_auto_conf
        assert "recovery_target" in pg_auto_conf
        assert pg_auto_conf["recovery_end_command"] == "'rm -fr barman_wal'"
        assert pg_auto_conf["recovery_target_time"] == "'2015-06-03 16:11:03.71038+02'"
        assert pg_auto_conf["recovery_target_timeline"] == "2"
        assert pg_auto_conf["recovery_target_name"] == "'target_name'"

        # Test 'pause_at_recovery_target' recovery_info entry
        signal_file.remove()
        recovery_info["pause_at_recovery_target"] = "on"
        executor._generate_recovery_conf(
            recovery_info,
            backup_info,
            dest.strpath,
            True,
            True,
            "remote@command",
            "target_name",
            "2015-06-03 16:11:03.71038+02",
            "2",
            "",
            "",
            None,
        )
        # Check that the recovery.conf file doesn't exist
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert not recovery_conf_file.check()
        # Check that the recovery.signal file exists
        signal_file = tmpdir.join("recovery.signal")
        assert signal_file.check()
        # Parse the generated recovery configuration
        pg_auto_conf = self.parse_auto_conf_lines(recovery_info)
        # Finally check pause_at_recovery_target value
        assert pg_auto_conf["pause_at_recovery_target"] == "'on'"

        # Test 'recovery_target_action'
        signal_file.remove()
        del recovery_info["pause_at_recovery_target"]
        recovery_info["recovery_target_action"] = "pause"
        executor._generate_recovery_conf(
            recovery_info,
            backup_info,
            dest.strpath,
            True,
            True,
            "remote@command",
            "target_name",
            "2015-06-03 16:11:03.71038+02",
            "2",
            "",
            "",
            None,
        )
        # Check that the recovery.conf file doesn't exist
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert not recovery_conf_file.check()
        # Check that the recovery.signal file exists
        signal_file = tmpdir.join("recovery.signal")
        assert signal_file.check()
        # Parse the generated recovery configuration
        pg_auto_conf = self.parse_auto_conf_lines(recovery_info)
        # Finally check recovery_target_action value
        assert pg_auto_conf["recovery_target_action"] == "'pause'"

        # Test 'standby_mode'
        signal_file.remove()
        executor._generate_recovery_conf(
            recovery_info,
            backup_info,
            dest.strpath,
            True,
            True,
            "remote@command",
            "target_name",
            "2015-06-03 16:11:03.71038+02",
            "2",
            "",
            "",
            True,
        )
        # Check that the recovery.conf file doesn't exist
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert not recovery_conf_file.check()
        # Check that the recovery.signal file doesn't exist
        wrong_signal_file = tmpdir.join("recovery.signal")
        assert not wrong_signal_file.check()
        # Check that the standby.signal file exists
        signal_file = tmpdir.join("standby.signal")
        assert signal_file.check()
        # Parse the generated recovery configuration
        pg_auto_conf = self.parse_auto_conf_lines(recovery_info)
        # standby_mode is not a valid configuration in PostgreSQL 12
        assert "standby_mode" not in pg_auto_conf

        signal_file.remove()
        executor._generate_recovery_conf(
            recovery_info,
            backup_info,
            dest.strpath,
            True,
            True,
            "remote@command",
            "target_name",
            "2015-06-03 16:11:03.71038+02",
            "2",
            "",
            "",
            False,
        )
        # Check that the recovery.conf file doesn't exist
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert not recovery_conf_file.check()
        # Check that the standby.signal file doesn't exist
        wrong_signal_file = tmpdir.join("standby.signal")
        assert not wrong_signal_file.check()
        # Check that the recovery.signal file exists
        signal_file = tmpdir.join("recovery.signal")
        assert signal_file.check()
        # Parse the generated recovery configuration
        pg_auto_conf = self.parse_auto_conf_lines(recovery_info)
        # standby_mode is not a valid configuration in PostgreSQL 12
        assert "standby_mode" not in pg_auto_conf

        signal_file.remove()
        executor._generate_recovery_conf(
            recovery_info,
            backup_info,
            dest.strpath,
            True,
            True,
            "remote@command",
            "target_name",
            "2015-06-03 16:11:03.71038+02",
            "2",
            "",
            "",
            None,
        )
        # Check that the recovery.conf file doesn't exist
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert not recovery_conf_file.check()
        # Check that the standby.signal file doesn't exist
        wrong_signal_file = tmpdir.join("standby.signal")
        assert not wrong_signal_file.check()
        # Check that the recovery.signal file exists
        signal_file = tmpdir.join("recovery.signal")
        assert signal_file.check()
        # Parse the generated recovery configuration
        pg_auto_conf = self.parse_auto_conf_lines(recovery_info)
        # standby_mode is not a valid configuration in PostgreSQL 12
        assert "standby_mode" not in pg_auto_conf
    def test_copy(self, rsync_ignore_mock, tmpdir):
        """
        Unit test for RsyncCopyController._copy's code
        """
        # Build the prerequisites
        server = build_real_server(
            global_conf={'barman_home': tmpdir.mkdir('home').strpath})
        config = server.config
        executor = server.backup_manager.executor

        # Create the RsyncCopyController putting the safe_horizon between
        # the tmp/safe and tmp2/check timestamps
        rcc = RsyncCopyController(
            path=server.path,
            ssh_command=executor.ssh_command,
            ssh_options=executor.ssh_options,
            network_compression=config.network_compression,
            reuse_backup=None,
            safe_horizon=datetime(year=2015,
                                  month=2,
                                  day=20,
                                  hour=19,
                                  minute=0,
                                  second=0,
                                  tzinfo=dateutil.tz.tzlocal()))

        backup_info = build_test_backup_info(
            server=server,
            pgdata="/pg/data",
            config_file="/etc/postgresql.conf",
            hba_file="/pg/data/pg_hba.conf",
            ident_file="/pg/data/pg_ident.conf",
            begin_xlog="0/2000028",
            begin_wal="000000010000000000000002",
            begin_offset=28)
        backup_info.save()
        # This is to check that all the preparation is done correctly
        assert os.path.exists(backup_info.filename)

        # Create an rsync mock
        rsync_mock = mock.Mock(name='Rsync()')

        # Then run the _copy method
        rcc._copy(rsync_mock,
                  ':/pg/data/',
                  backup_info.get_data_directory(),
                  '/path/to/file.list',
                  checksum=True)

        # Verify that _rsync_ignore_vanished_files has been called correctly
        assert rsync_ignore_mock.mock_calls == [
            mock.call(rsync_mock,
                      ':/pg/data/',
                      backup_info.get_data_directory(),
                      '--files-from=/path/to/file.list',
                      '--checksum',
                      check=True),
        ]

        # Try again without checksum
        rsync_ignore_mock.reset_mock()
        rcc._copy(rsync_mock,
                  ':/pg/data/',
                  backup_info.get_data_directory(),
                  '/path/to/file.list',
                  checksum=False)

        # Verify that _rsync_ignore_vanished_files has been called correctly
        assert rsync_ignore_mock.mock_calls == [
            mock.call(rsync_mock,
                      ':/pg/data/',
                      backup_info.get_data_directory(),
                      '--files-from=/path/to/file.list',
                      check=True),
        ]
Beispiel #35
0
    def test_check_sync_required(self):
        """
        Test the behaviour of the check_sync_required method,
        testing all the possible error conditions.
        """
        backup_name = "test_backup_name"
        backups = {"backups": {"test_backup_name": {}}}
        server = build_real_server()
        # Test 1 pass no exception
        server.check_sync_required(backup_name, backups, None)

        # Test 2 backup_name not in backups and no local backup. SyncError
        backup_name = "wrong_test_backup_name"
        with pytest.raises(SyncError):
            server.check_sync_required(backup_name, backups, None)

        # Test 3 backup_name not in backups, and incomplete local
        # copy. Remove partial sync and raise SyncError
        backup_name = "wrong_test_backup_name"
        local_backup_info_mock = build_test_backup_info(
            server=server, status=BackupInfo.FAILED)
        with pytest.raises(SyncToBeDeleted):
            server.check_sync_required(backup_name, backups,
                                       local_backup_info_mock)

        # Test 4 Local only copy, nothing to do.
        backup_name = "wrong_test_backup_name"
        local_backup_info_mock = build_test_backup_info(server=server,
                                                        status=BackupInfo.DONE)
        with pytest.raises(SyncNothingToDo):
            server.check_sync_required(backup_name, backups,
                                       local_backup_info_mock)

        # Test 5 already synced backup. Nothing to do.
        backup_name = "test_backup_name"
        local_backup_info_mock = build_test_backup_info(server=server,
                                                        status=BackupInfo.DONE)
        with pytest.raises(SyncNothingToDo):
            server.check_sync_required(backup_name, backups,
                                       local_backup_info_mock)
        # Test 6 check backup with local retention policies.
        # Case one: Redundancy retention 1
        # Expect "nothing to do"
        backup_name = "test_backup6"
        # build a new server with new configuration that uses retention
        # policies
        server = build_real_server(global_conf={
            "retention_policy": "redundancy 1",
            "wal_retention_policy": "main",
        })
        backups = {
            "backups": {
                "test_backup6":
                build_test_backup_info(server=server,
                                       backup_id="test_backup6").to_json()
            },
            "config": {
                "name": "test_server"
            },
        }
        with mock.patch("barman.server.Server.get_available_backups") as bk:
            local_backup_info_mock = None
            bk.return_value = {
                "test_backup5":
                build_test_backup_info(server=server,
                                       backup_id="test_backup5"),
                "test_backup7":
                build_test_backup_info(server=server,
                                       backup_id="test_backup7"),
            }
            with pytest.raises(SyncNothingToDo):
                server.check_sync_required(backup_name, backups,
                                           local_backup_info_mock)

        # Test 7 check backup with local retention policies.
        # Case two: Recovery window of 1 day
        # Expect "nothing to do"
        backup_name = "test_backup6"
        # build a new server with new configuration that uses retention
        # policies
        server = build_real_server(
            global_conf={
                "retention_policy": "RECOVERY WINDOW OF 1 day",
                "wal_retention_policy": "main",
            })
        backups = {
            "backups": {
                "test_backup6":
                build_test_backup_info(
                    server=server,
                    backup_id="test_backup6",
                    begin_time=(datetime.now(tz.tzlocal()) +
                                timedelta(days=4)),
                    end_time=(datetime.now(tz.tzlocal()) - timedelta(days=3)),
                ).to_json()
            },
            "config": {
                "name": "test_server"
            },
        }
        with mock.patch("barman.server.Server.get_available_backups") as bk:
            local_backup_info_mock = None
            bk.return_value = {
                "test_backup7":
                build_test_backup_info(
                    server=server,
                    backup_id="test_backup7",
                    begin_time=(datetime.now(tz.tzlocal()) +
                                timedelta(days=4)),
                    end_time=(datetime.now(tz.tzlocal()) - timedelta(days=3)),
                )
            }
            with pytest.raises(SyncNothingToDo):
                server.check_sync_required(backup_name, backups,
                                           local_backup_info_mock)
Beispiel #36
0
    def test_backup_status(self):
        """
        Basic unit test of method backup_status

        Given a mock simulating a Backup with status DONE and
        requesting the status through the backup_status method, the
        RetentionPolicy class must mark it as valid

        This method tests the validity of a backup using both
        RedundancyRetentionPolicy and RecoveryWindowRetentionPolicy
        """

        server = build_mocked_server()
        rp = RetentionPolicyFactory.create(server, 'retention_policy',
                                           'REDUNDANCY 2')
        assert isinstance(rp, RedundancyRetentionPolicy)

        # Build a BackupInfo object with status to DONE
        backup_info = build_test_backup_info(server=rp.server,
                                             backup_id='test1',
                                             end_time=datetime.now(tzlocal()))

        # instruct the get_available_backups method to return a map with
        # our mock as result and minimum_redundancy = 1
        rp.server.get_available_backups.return_value = {
            "test_backup": backup_info
        }
        rp.server.config.minimum_redundancy = 1
        # execute retention policy report
        report = rp.backup_status('test_backup')

        assert report == 'VALID'
        # Force context of retention policy for testing purposes.
        # Expect the method to return a BackupInfo.NONE value
        rp.context = 'invalid'
        empty_report = rp.backup_status('test_backup')

        assert empty_report == BackupInfo.NONE

        rp = RetentionPolicyFactory.create(server, 'retention_policy',
                                           'RECOVERY WINDOW OF 4 WEEKS')
        assert isinstance(rp, RecoveryWindowRetentionPolicy)

        # Build a BackupInfo object with status to DONE
        backup_info = build_test_backup_info(server=rp.server,
                                             backup_id='test1',
                                             end_time=datetime.now(tzlocal()))

        # instruct the get_available_backups method to return a map with
        # our mock as result and minimum_redundancy = 1
        rp.server.get_available_backups.return_value = {
            "test_backup": backup_info
        }
        rp.server.config.minimum_redundancy = 1
        # execute retention policy report
        report = rp.backup_status("test_backup")

        assert report == 'VALID'

        # Force context of retention policy for testing purposes.
        # Expect the method to return a BackupInfo.NONE value
        rp.context = 'invalid'
        empty_report = rp.backup_status('test_backup')

        assert empty_report == BackupInfo.NONE
Beispiel #37
0
    def test_backup(self, gpb_mock, pbc_mock, capsys, tmpdir):
        """
        Test backup

        :param gpb_mock: mock for the get_previous_backup method
        :param pbc_mock: mock for the backup_copy method
        :param capsys: stdout capture module
        :param tmpdir: pytest temp directory
        """
        tmp_home = tmpdir.mkdir('home')
        backup_manager = build_backup_manager(global_conf={
            'barman_home': tmp_home.strpath,
            'backup_method': 'postgres'
        })
        backup_info = build_test_backup_info(
            backup_id='fake_backup_id',
            server=backup_manager.server,
            pgdata="/pg/data",
            config_file="/pg/data/postgresql.conf",
            hba_file="/pg/data/pg_hba.conf",
            ident_file="/pg/pg_ident.conf",
            begin_offset=28)
        timestamp = datetime.datetime(2015, 10, 26, 14, 38)
        backup_manager.server.postgres.current_xlog_info = dict(
            location='0/12000090',
            file_name='000000010000000000000012',
            file_offset=144,
            timestamp=timestamp,
        )
        backup_manager.server.postgres.get_setting.return_value = '/pg/data'
        tmp_backup_label = tmp_home.mkdir('main')\
            .mkdir('base').mkdir('fake_backup_id')\
            .mkdir('data').join('backup_label')
        start_time = datetime.datetime.now(tz.tzlocal()).replace(microsecond=0)
        tmp_backup_label.write(
            'START WAL LOCATION: 0/40000028 (file 000000010000000000000040)\n'
            'CHECKPOINT LOCATION: 0/40000028\n'
            'BACKUP METHOD: streamed\n'
            'BACKUP FROM: master\n'
            'START TIME: %s\n'
            'LABEL: pg_basebackup base backup' %
            start_time.strftime('%Y-%m-%d %H:%M:%S %Z'))
        backup_manager.executor.backup(backup_info)
        out, err = capsys.readouterr()
        gpb_mock.assert_called_once_with(backup_info.backup_id)
        assert err.strip() == 'WARNING: pg_basebackup does not copy ' \
                              'the PostgreSQL configuration files that '\
                              'reside outside PGDATA. ' \
                              'Please manually backup the following files:' \
                              '\n\t/pg/pg_ident.conf'
        assert 'Copying files.' in out
        assert 'Copy done.' in out
        assert 'Finalising the backup.' in out
        assert backup_info.end_xlog == '0/12000090'
        assert backup_info.end_offset == 144
        assert backup_info.begin_time == start_time
        assert backup_info.begin_wal == '000000010000000000000040'

        # Check the CommandFailedException re raising
        with pytest.raises(CommandFailedException):
            pbc_mock.side_effect = CommandFailedException('test')
            backup_manager.executor.backup(backup_info)
    def test_statistics(
        self,
        signal_mock,
        tempfile_mock,
        copy_mock,
        create_and_purge_mock,
        analyse_mock,
        rsync_mock,
        tmpdir,
        workers,
    ):
        """
        Unit test for RsyncCopyController.statistics's code
        """

        # Do a fake copy run to populate the start/stop timestamps.
        # The steps are the same of the full run test
        tempdir = tmpdir.mkdir("tmp")
        tempfile_mock.return_value = tempdir.strpath
        server = build_real_server(
            global_conf={"barman_home": tmpdir.mkdir("home").strpath})
        config = server.config
        executor = server.backup_manager.executor

        rcc = RsyncCopyController(
            path=server.path,
            ssh_command=executor.ssh_command,
            ssh_options=executor.ssh_options,
            network_compression=config.network_compression,
            reuse_backup=None,
            safe_horizon=None,
            workers=workers,
        )

        backup_info = build_test_backup_info(
            server=server,
            pgdata="/pg/data",
            config_file="/etc/postgresql.conf",
            hba_file="/pg/data/pg_hba.conf",
            ident_file="/pg/data/pg_ident.conf",
            begin_xlog="0/2000028",
            begin_wal="000000010000000000000002",
            begin_offset=28,
        )
        backup_info.save()
        # This is to check that all the preparation is done correctly
        assert os.path.exists(backup_info.filename)

        # Silence the access to result properties
        rsync_mock.return_value.out = ""
        rsync_mock.return_value.err = ""
        rsync_mock.return_value.ret = 0

        # Mock analyze directory
        def analyse_func(item):
            label = item.label
            item.dir_file = label + "_dir_file"
            item.exclude_and_protect_file = label + "_exclude_and_protect_file"
            item.safe_list = [_FileItem("mode", 1, "date", "path")]
            item.check_list = [_FileItem("mode", 1, "date", "path")]

        analyse_mock.side_effect = analyse_func

        rcc.add_directory(
            label="tbs1",
            src=":/fake/location/",
            dst=backup_info.get_data_directory(16387),
            reuse=None,
            bwlimit=None,
            item_class=rcc.TABLESPACE_CLASS,
        )
        rcc.add_directory(
            label="tbs2",
            src=":/another/location/",
            dst=backup_info.get_data_directory(16405),
            reuse=None,
            bwlimit=None,
            item_class=rcc.TABLESPACE_CLASS,
        )
        rcc.add_directory(
            label="pgdata",
            src=":/pg/data/",
            dst=backup_info.get_data_directory(),
            reuse=None,
            bwlimit=None,
            item_class=rcc.PGDATA_CLASS,
            exclude=[
                "/pg_xlog/*",
                "/pg_log/*",
                "/log/*",
                "/recovery.conf",
                "/postmaster.pid",
            ],
            exclude_and_protect=["pg_tblspc/16387", "pg_tblspc/16405"],
        )
        rcc.add_file(
            label="pg_control",
            src=":/pg/data/global/pg_control",
            dst="%s/global/pg_control" % backup_info.get_data_directory(),
            item_class=rcc.PGCONTROL_CLASS,
        )
        rcc.add_file(
            label="config_file",
            src=":/etc/postgresql.conf",
            dst=backup_info.get_data_directory(),
            item_class=rcc.CONFIG_CLASS,
            optional=False,
        )
        # Do the fake run
        rcc.copy()

        # Calculate statistics
        result = rcc.statistics()

        # We cannot check the actual result because it is not predictable,
        # so we check that every value is present and is a number and it is
        # greater than 0
        assert result.get("analysis_time") > 0
        assert "analysis_time_per_item" in result
        for tbs in ("pgdata", "tbs1", "tbs2"):
            assert result["analysis_time_per_item"][tbs] > 0

        assert result.get("copy_time") > 0
        assert "copy_time_per_item" in result
        assert "serialized_copy_time_per_item" in result
        for tbs in ("pgdata", "tbs1", "tbs2", "config_file", "pg_control"):
            assert result["copy_time_per_item"][tbs] > 0
            assert result["serialized_copy_time_per_item"][tbs] > 0

        assert result.get("number_of_workers") == rcc.workers
        assert result.get("total_time") > 0
Beispiel #39
0
    def test_backup_copy(self, remote_mock, pg_basebackup_mock,
                         tmpdir, capsys):
        """
        Test backup folder structure

        :param remote_mock: mock for the fetch_remote_status method
        :param pg_basebackup_mock: mock for the PgBaseBackup object
        :param tmpdir: pytest temp directory
        """
        backup_manager = build_backup_manager(global_conf={
            'barman_home': tmpdir.mkdir('home').strpath,
            'backup_method': 'postgres'
        })
        # simulate a old version of pg_basebackup
        # not supporting bandwidth_limit
        remote_mock.return_value = {
            'pg_basebackup_version': '9.2',
            'pg_basebackup_path': '/fake/path',
            'pg_basebackup_bwlimit': False,
        }
        server_mock = backup_manager.server
        streaming_mock = server_mock.streaming
        server_mock.config.bandwidth_limit = 1
        streaming_mock.get_connection_string.return_value = 'fake=connstring'
        streaming_mock.conn_parameters = {
            'host': 'fakeHost',
            'port': 'fakePort',
            'user': '******'
        }
        backup_info = build_test_backup_info(server=backup_manager.server,
                                             backup_id='fake_backup_id')
        backup_manager.executor.backup_copy(backup_info)
        out, err = capsys.readouterr()
        assert out == ''
        assert err == ''
        # check that the bwlimit option have been ignored
        assert pg_basebackup_mock.mock_calls == [
            mock.call(
                connection=mock.ANY,
                version='9.2',
                app_name='barman_streaming_backup',
                destination=mock.ANY,
                command='/fake/path',
                tbs_mapping=mock.ANY,
                bwlimit=None,
                immediate=False,
                retry_times=0,
                retry_sleep=30,
                retry_handler=mock.ANY,
                path=mock.ANY),
            mock.call()(),
        ]

        # Check with newer version
        remote_mock.reset_mock()
        pg_basebackup_mock.reset_mock()
        backup_manager.executor._remote_status = None
        remote_mock.return_value = {
            'pg_basebackup_version': '9.5',
            'pg_basebackup_path': '/fake/path',
            'pg_basebackup_bwlimit': True,
        }
        backup_manager.executor.config.immediate_checkpoint = True
        backup_manager.executor.config.streaming_conninfo = 'fake=connstring'
        backup_manager.executor.backup_copy(backup_info)
        out, err = capsys.readouterr()
        assert out == ''
        assert err == ''
        # check that the bwlimit option have been passed to the test call
        assert pg_basebackup_mock.mock_calls == [
            mock.call(
                connection=mock.ANY,
                version='9.5',
                app_name='barman_streaming_backup',
                destination=mock.ANY,
                command='/fake/path',
                tbs_mapping=mock.ANY,
                bwlimit=1,
                immediate=True,
                retry_times=0,
                retry_sleep=30,
                retry_handler=mock.ANY,
                path=mock.ANY),
            mock.call()(),
        ]

        # Check with a config file outside the data directory
        remote_mock.reset_mock()
        pg_basebackup_mock.reset_mock()
        backup_info.ident_file = '/pg/pg_ident.conf'
        backup_manager.executor.backup_copy(backup_info)
        out, err = capsys.readouterr()
        assert out == ''
        assert err.strip() == 'WARNING: pg_basebackup does not copy ' \
                              'the PostgreSQL configuration files that '\
                              'reside outside PGDATA. ' \
                              'Please manually backup the following files:' \
                              '\n\t/pg/pg_ident.conf'
        # check that the bwlimit option have been passed to the test call
        assert pg_basebackup_mock.mock_calls == [
            mock.call(
                connection=mock.ANY,
                version='9.5',
                app_name='barman_streaming_backup',
                destination=mock.ANY,
                command='/fake/path',
                tbs_mapping=mock.ANY,
                bwlimit=1,
                immediate=True,
                retry_times=0,
                retry_sleep=30,
                retry_handler=mock.ANY,
                path=mock.ANY),
            mock.call()(),
        ]

        # Check with a config file outside the data directory and
        # external_configurations backup option
        remote_mock.reset_mock()
        pg_basebackup_mock.reset_mock()
        backup_manager.config.backup_options.add(
            BackupOptions.EXTERNAL_CONFIGURATION)
        backup_manager.executor.backup_copy(backup_info)
        out, err = capsys.readouterr()
        assert out == ''
        assert err == ''
        # check that the bwlimit option have been passed to the test call
        assert pg_basebackup_mock.mock_calls == [
            mock.call(
                connection=mock.ANY,
                version='9.5',
                app_name='barman_streaming_backup',
                destination=mock.ANY,
                command='/fake/path',
                tbs_mapping=mock.ANY,
                bwlimit=1,
                immediate=True,
                retry_times=0,
                retry_sleep=30,
                retry_handler=mock.ANY,
                path=mock.ANY),
            mock.call()(),
        ]

        # Raise a test CommandFailedException and expect it to be wrapped
        # inside a DataTransferFailure exception
        remote_mock.reset_mock()
        pg_basebackup_mock.reset_mock()
        pg_basebackup_mock.return_value.side_effect = \
            CommandFailedException(dict(ret='ret', out='out', err='err'))
        with pytest.raises(DataTransferFailure):
            backup_manager.executor.backup_copy(backup_info)
Beispiel #40
0
    def test_full_copy(self, tempfile_mock, copy_mock, create_and_purge_mock,
                       analyse_mock, rsync_mock, tmpdir):
        """
        Test the execution of a full copy
        """

        # Build the prerequisites
        tempdir = tmpdir.mkdir('tmp')
        tempfile_mock.return_value = tempdir.strpath
        server = build_real_server(global_conf={
            'barman_home': tmpdir.mkdir('home').strpath
        })
        config = server.config
        executor = server.backup_manager.executor

        rcc = RsyncCopyController(
            path=server.path,
            ssh_command=executor.ssh_command,
            ssh_options=executor.ssh_options,
            network_compression=config.network_compression,
            reuse_backup=None,
            safe_horizon=None)

        backup_info = build_test_backup_info(
            server=server,
            pgdata="/pg/data",
            config_file="/etc/postgresql.conf",
            hba_file="/pg/data/pg_hba.conf",
            ident_file="/pg/data/pg_ident.conf",
            begin_xlog="0/2000028",
            begin_wal="000000010000000000000002",
            begin_offset=28)
        backup_info.save()
        # This is to check that all the preparation is done correctly
        assert os.path.exists(backup_info.filename)

        # Silence the access to result properties
        rsync_mock.return_value.out = ''
        rsync_mock.return_value.err = ''
        rsync_mock.return_value.ret = 0

        rcc.add_directory(
            label='tbs1',
            src=':/fake/location/',
            dst=backup_info.get_data_directory(16387),
            reuse=None,
            bwlimit=None,
            item_class=rcc.TABLESPACE_CLASS),
        rcc.add_directory(
            label='tbs2',
            src=':/another/location/',
            dst=backup_info.get_data_directory(16405),
            reuse=None,
            bwlimit=None,
            item_class=rcc.TABLESPACE_CLASS),
        rcc.add_directory(
            label='pgdata',
            src=':/pg/data/',
            dst=backup_info.get_data_directory(),
            reuse=None,
            bwlimit=None,
            item_class=rcc.PGDATA_CLASS,
            exclude=['/pg_xlog/*',
                     '/pg_log/*',
                     '/recovery.conf',
                     '/postmaster.pid'],
            exclude_and_protect=['pg_tblspc/16387', 'pg_tblspc/16405']),
        rcc.add_file(
            label='pg_control',
            src=':/pg/data/global/pg_control',
            dst='%s/global/pg_control' % backup_info.get_data_directory(),
            item_class=rcc.PGCONTROL_CLASS),
        rcc.add_file(
            label='config_file',
            src=':/etc/postgresql.conf',
            dst=backup_info.get_data_directory(),
            item_class=rcc.CONFIG_CLASS,
            optional=False),
        rcc.copy(),

        # Check the order of calls to the Rsync mock
        assert rsync_mock.mock_calls == [
            mock.call(network_compression=False,
                      args=['--itemize-changes',
                            '--itemize-changes'],
                      bwlimit=None, ssh='ssh', path=None,
                      ssh_options=['-c', '"arcfour"', '-p', '22',
                                   '*****@*****.**', '-o',
                                   'BatchMode=yes', '-o',
                                   'StrictHostKeyChecking=no'],
                      exclude=None, exclude_and_protect=None,
                      retry_sleep=0, retry_times=0, retry_handler=mock.ANY),
            mock.call(network_compression=False,
                      args=['--itemize-changes',
                            '--itemize-changes'],
                      bwlimit=None, ssh='ssh', path=None,
                      ssh_options=['-c', '"arcfour"', '-p', '22',
                                   '*****@*****.**', '-o',
                                   'BatchMode=yes', '-o',
                                   'StrictHostKeyChecking=no'],
                      exclude=None, exclude_and_protect=None,
                      retry_sleep=0, retry_times=0, retry_handler=mock.ANY),
            mock.call(network_compression=False,
                      args=['--itemize-changes',
                            '--itemize-changes'],
                      bwlimit=None, ssh='ssh', path=None,
                      ssh_options=['-c', '"arcfour"', '-p', '22',
                                   '*****@*****.**', '-o',
                                   'BatchMode=yes', '-o',
                                   'StrictHostKeyChecking=no'],
                      exclude=[
                          '/pg_xlog/*',
                          '/pg_log/*',
                          '/recovery.conf',
                          '/postmaster.pid'],
                      exclude_and_protect=[
                          'pg_tblspc/16387',
                          'pg_tblspc/16405'],
                      retry_sleep=0, retry_times=0, retry_handler=mock.ANY),
            mock.call(network_compression=False,
                      args=['--itemize-changes',
                            '--itemize-changes'],
                      bwlimit=None, ssh='ssh', path=None,
                      ssh_options=['-c', '"arcfour"', '-p', '22',
                                   '*****@*****.**', '-o',
                                   'BatchMode=yes', '-o',
                                   'StrictHostKeyChecking=no'],
                      exclude=None, exclude_and_protect=None,
                      retry_sleep=0, retry_times=0, retry_handler=mock.ANY),
            mock.call()(
                ':/pg/data/global/pg_control',
                '%s/global/pg_control' % backup_info.get_data_directory(),
                allowed_retval=(0, 23, 24)),
            mock.call(network_compression=False,
                      args=['--itemize-changes',
                            '--itemize-changes'],
                      bwlimit=None, ssh='ssh', path=None,
                      ssh_options=['-c', '"arcfour"', '-p', '22',
                                   '*****@*****.**', '-o',
                                   'BatchMode=yes', '-o',
                                   'StrictHostKeyChecking=no'],
                      exclude=None, exclude_and_protect=None,
                      retry_sleep=0, retry_times=0, retry_handler=mock.ANY),
            mock.call()(
                ':/etc/postgresql.conf',
                backup_info.get_data_directory(),
                allowed_retval=(0, 23, 24)),
        ]

        # Check calls to _analyse_directory method
        assert analyse_mock.mock_calls == [
            mock.call(item, tempdir.strpath) for item in rcc.item_list
            if item.is_directory
        ]

        # Check calls to _create_dir_and_purge method
        assert create_and_purge_mock.mock_calls == [
            mock.call(item) for item in rcc.item_list
            if item.is_directory
        ]

        # Check the order of calls to the copy method
        # All the file_list arguments are None because the analyze part
        # has not really been executed
        assert copy_mock.mock_calls == [
            mock.call(
                mock.ANY, ':/fake/location/',
                backup_info.get_data_directory(16387), checksum=False,
                file_list=None),
            mock.call(
                mock.ANY, ':/fake/location/',
                backup_info.get_data_directory(16387), checksum=True,
                file_list=None),
            mock.call(
                mock.ANY, ':/another/location/',
                backup_info.get_data_directory(16405), checksum=False,
                file_list=None),
            mock.call(
                mock.ANY, ':/another/location/',
                backup_info.get_data_directory(16405), checksum=True,
                file_list=None),
            mock.call(mock.ANY, ':/pg/data/',
                      backup_info.get_data_directory(), checksum=False,
                      file_list=None),
            mock.call(mock.ANY, ':/pg/data/',
                      backup_info.get_data_directory(), checksum=True,
                      file_list=None),
        ]
Beispiel #41
0
    def test_check_redundancy(self, tmpdir):
        """
        Test the check method
        """
        # Setup temp dir and server
        # build a backup_manager and setup a basic configuration
        backup_manager = build_backup_manager(
            name='TestServer',
            global_conf={
                'barman_home': tmpdir.strpath,
                'minimum_redundancy': "1"
            })
        backup_manager.executor = mock.MagicMock()

        # Test the unsatisfied minimum_redundancy option
        strategy_mock = mock.MagicMock()
        backup_manager.check(strategy_mock)
        # Expect a failure from the method
        strategy_mock.result.assert_called_with(
            'TestServer',
            'minimum redundancy requirements',
            False,
            'have 0 backups, expected at least 1'
        )
        # Test the satisfied minimum_redundancy option
        b_info = build_test_backup_info(
            backup_id='fake_backup_id',
            server=backup_manager.server,
        )
        b_info.save()

        strategy_mock.reset_mock()
        backup_manager._load_backup_cache()
        backup_manager.check(strategy_mock)
        # Expect a success from the method
        strategy_mock.result.assert_called_with(
            'TestServer',
            'minimum redundancy requirements',
            True,
            'have 1 backups, expected at least 1'
        )

        # Test for no failed backups
        strategy_mock.reset_mock()
        backup_manager._load_backup_cache()
        backup_manager.check(strategy_mock)
        # Expect a failure from the method
        strategy_mock.result.assert_any_call(
            'TestServer',
            'failed backups',
            True,
            'there are 0 failed backups'
        )

        # Test for failed backups in catalog
        b_info = build_test_backup_info(
            backup_id='failed_backup_id',
            server=backup_manager.server,
            status=BackupInfo.FAILED,
        )
        b_info.save()
        strategy_mock.reset_mock()
        backup_manager._load_backup_cache()
        backup_manager.check(strategy_mock)
        # Expect a failure from the method
        strategy_mock.result.assert_any_call(
            'TestServer',
            'failed backups',
            False,
            'there are 1 failed backups'
        )

        # Test unknown compression
        backup_manager.config.compression = 'test_compression'
        backup_manager.compression_manager.check.return_value = False
        strategy_mock.reset_mock()
        backup_manager.check(strategy_mock)
        # Expect a failure from the method
        strategy_mock.result.assert_any_call(
            'TestServer',
            'compression settings',
            False
        )

        # Test valid compression
        backup_manager.config.compression = 'test_compression'
        backup_manager.compression_manager.check.return_value = True
        strategy_mock.reset_mock()
        backup_manager.check(strategy_mock)
        # Expect a success from the method
        strategy_mock.result.assert_any_call(
            'TestServer',
            'compression settings',
            True
        )
        # Test failure retrieving a compressor
        backup_manager.config.compression = 'test_compression'
        backup_manager.compression_manager.check.return_value = True
        backup_manager.compression_manager.get_compressor.side_effect = \
            CompressionIncompatibility()
        strategy_mock.reset_mock()
        backup_manager.check(strategy_mock)
        # Expect a failure from the method
        strategy_mock.result.assert_any_call(
            'TestServer',
            'compression settings',
            False
        )
Beispiel #42
0
    def test_recovery_window_report(self, server, caplog):
        """
        Basic unit test of RecoveryWindowRetentionPolicy

        Given a mock simulating a Backup with status DONE and
        the end_date not over the point of recoverability,
        the report method of the RecoveryWindowRetentionPolicy class must mark
        it as valid
        """
        rp = RetentionPolicyFactory.create(
            "retention_policy", "RECOVERY WINDOW OF 4 WEEKS", server=server
        )
        assert isinstance(rp, RecoveryWindowRetentionPolicy)

        # Build a BackupInfo object with status to DONE
        backup_source = {
            "test_backup3": build_test_backup_info(
                server=server,
                backup_id="test_backup3",
                end_time=datetime.now(tzlocal()),
            )
        }
        # Add a obsolete backup
        backup_source["test_backup2"] = build_test_backup_info(
            server=server,
            backup_id="test_backup2",
            end_time=datetime.now(tzlocal()) - timedelta(weeks=5),
        )
        # Add a second obsolete backup
        backup_source["test_backup"] = build_test_backup_info(
            server=server,
            backup_id="test_backup",
            end_time=datetime.now(tzlocal()) - timedelta(weeks=6),
        )
        server.get_available_backups.return_value = backup_source
        # instruct the get_available_backups method to return a map with
        # our mock as result and minimum_redundancy = 1
        server.config.minimum_redundancy = 1
        server.config.name = "test"
        # execute retention policy report
        report = rp.report()
        # check that our mock is valid for the retention policy
        assert report == {
            "test_backup3": "VALID",
            "test_backup2": "VALID",
            "test_backup": "OBSOLETE",
        }

        # Expect a ValueError if passed context is invalid
        with pytest.raises(ValueError):
            rp.report(context="invalid")
        # Set a new minimum_redundancy parameter, enforcing the usage of the
        # configuration parameter instead of the retention policy default
        server.config.minimum_redundancy = 4
        # execute retention policy report
        rp.report()
        # Check for the warning inside the log
        caplog.set_level(logging.WARNING)
        log = caplog.text
        warn = (
            r"WARNING  .*Keeping obsolete backup test_backup for "
            r"server test \(older than .*\) due to minimum redundancy "
            r"requirements \(4\)\n"
        )
        assert re.search(warn, log)
    def test_create_dir_and_purge(self, rsync_ignore_mock, rsync_factory_mock,
                                  tmpdir):
        """
        Unit test for RsyncCopyController._create_dir_and_purge's code
        """
        # Build the prerequisites
        server = build_real_server(
            global_conf={'barman_home': tmpdir.mkdir('home').strpath})
        config = server.config
        executor = server.backup_manager.executor

        # Create the RsyncCopyController putting the safe_horizon between
        # the tmp/safe and tmp2/check timestamps
        rcc = RsyncCopyController(
            path=server.path,
            ssh_command=executor.ssh_command,
            ssh_options=executor.ssh_options,
            network_compression=config.network_compression,
            reuse_backup=None,
            safe_horizon=datetime(year=2015,
                                  month=2,
                                  day=20,
                                  hour=19,
                                  minute=0,
                                  second=0,
                                  tzinfo=dateutil.tz.tzlocal()))

        backup_info = build_test_backup_info(
            server=server,
            pgdata="/pg/data",
            config_file="/etc/postgresql.conf",
            hba_file="/pg/data/pg_hba.conf",
            ident_file="/pg/data/pg_ident.conf",
            begin_xlog="0/2000028",
            begin_wal="000000010000000000000002",
            begin_offset=28)
        backup_info.save()
        # This is to check that all the preparation is done correctly
        assert os.path.exists(backup_info.filename)

        # Create an item to inspect
        item = _RsyncCopyItem(label='pgdata',
                              src=':/pg/data/',
                              dst=backup_info.get_data_directory(),
                              is_directory=True,
                              item_class=rcc.PGDATA_CLASS,
                              optional=False)

        # Then run the _create_dir_and_purge method
        rcc._create_dir_and_purge(item)

        # Verify that _rsync_factory has been called correctly
        assert rsync_factory_mock.mock_calls == [
            mock.call(item),
        ]

        # Verify that _rsync_ignore_vanished_files has been called correctly
        assert rsync_ignore_mock.mock_calls == [
            mock.call(rsync_factory_mock.return_value,
                      '--recursive',
                      '--delete',
                      '--files-from=None',
                      '--filter',
                      'merge None',
                      ':/pg/data/',
                      backup_info.get_data_directory(),
                      check=True),
        ]
Beispiel #44
0
    def test_sync_wals(self, rsync_mock, tmpdir, capsys):
        """
        Test the WAL synchronisation method, testing all
        the possible error conditions.

        :param MagicMock rsync_mock: MagicMock replacing Rsync class
        :param py.local.path tmpdir: py.test temporary directory
        :param capsys: fixture that allow to access stdout/stderr output
        """
        server_name = "main"

        # Prepare paths
        barman_home = tmpdir.mkdir("barman_home")
        backup_dir = barman_home.mkdir(server_name)
        wals_dir = backup_dir.mkdir("wals")
        primary_info_file = backup_dir.join(barman.server.PRIMARY_INFO_FILE)

        # prepare the primary_info file
        remote_basebackup_dir = tmpdir.mkdir("primary")
        primary_info_content = dict(EXPECTED_MINIMAL)
        primary_info_content["config"].update(
            compression=None,
            basebackups_directory=str(remote_basebackup_dir),
            wals_directory=str(wals_dir),
        )
        primary_info_file.write(json.dumps(primary_info_content))

        # Test 1: Not a passive node.
        # Expect SyncError
        server = build_real_server(global_conf=dict(
            barman_home=str(barman_home)))
        with pytest.raises(SyncError):
            server.sync_wals()

        # Test 2: different compression between Master and Passive node.
        # Expect a SyncError
        server = build_real_server(
            global_conf=dict(barman_home=str(barman_home)),
            main_conf=dict(compression="gzip",
                           primary_ssh_command="ssh fakeuser@fakehost"),
        )

        server.sync_wals()
        (out, err) = capsys.readouterr()
        assert "Compression method on server %s " % server_name in err

        # Test 3: No base backup for server, exit with warning
        server = build_real_server(
            global_conf=dict(barman_home=str(barman_home)),
            main_conf=dict(
                compression=None,
                wals_directory=str(wals_dir),
                primary_ssh_command="ssh fakeuser@fakehost",
            ),
        )

        server.sync_wals()
        (out, err) = capsys.readouterr()

        assert "WARNING: No base backup for server %s" % server.config.name in err

        # Test 4: No wal synchronisation required, expect a warning

        # set return for get_first_backup and get_backup methods
        server.get_first_backup_id = lambda: "too_new"
        server.get_backup = lambda x: build_test_backup_info(
            server=server,
            begin_wal="000000010000000000000005",
            begin_time=dateutil.parser.parse("Wed Jul 23 11:00:43 2014"),
            end_time=dateutil.parser.parse("Wed Jul 23 12:00:43 2014"),
        )
        server.sync_wals()
        (out, err) = capsys.readouterr()

        assert ("WARNING: Skipping WAL synchronisation for "
                "server %s: no available local backup for %s" %
                (server.config.name, primary_info_content["wals"][0]["name"])
                in err)

        # Test 6: simulate rsync failure.
        # Expect a custom error message

        server.get_backup = lambda x: build_test_backup_info(
            server=server,
            begin_wal="000000010000000000000002",
            begin_time=dateutil.parser.parse("Wed Jul 23 11:00:43 2014"),
            end_time=dateutil.parser.parse("Wed Jul 23 12:00:43 2014"),
        )
        rsync_mock.side_effect = CommandFailedException("TestFailure")
        server.sync_wals()

        (out, err) = capsys.readouterr()
        # check stdout for the Custom error message
        assert "TestFailure" in err

        # Test 7: simulate keyboard interruption
        rsync_mock.side_effect = KeyboardInterrupt()
        server.sync_wals()
        # control the error message for KeyboardInterrupt
        (out, err) = capsys.readouterr()
        assert "KeyboardInterrupt" in err

        # Test 8: normal execution, expect no output. xlog.db
        # must contain information about the primary info wals

        # reset the rsync_moc, and remove the side_effect
        rsync_mock.reset_mock()
        rsync_mock.side_effect = mock.Mock(name="rsync")

        server.sync_wals()
        # check for no output on stdout and sterr
        (out, err) = capsys.readouterr()
        assert out == ""
        assert err == ""
        # check the xlog content for primary.info wals
        exp_xlog = [
            "000000010000000000000002\t16777216\t1406019026.0\tNone\n",
            "000000010000000000000003\t16777216\t1406019026.0\tNone\n",
            "000000010000000000000004\t16777216\t1406019329.93\tNone\n",
            "000000010000000000000005\t16777216\t1406019330.84\tNone\n",
        ]
        with server.xlogdb() as fxlogdb:
            xlog = fxlogdb.readlines()
            assert xlog == exp_xlog
    def test_full_copy(
        self,
        signal_mock,
        tempfile_mock,
        copy_mock,
        create_and_purge_mock,
        analyse_mock,
        rsync_mock,
        tmpdir,
    ):
        """
        Test the execution of a full copy
        """

        # Build the prerequisites
        tempdir = tmpdir.mkdir("tmp")
        tempfile_mock.return_value = tempdir.strpath
        server = build_real_server(
            global_conf={"barman_home": tmpdir.mkdir("home").strpath})
        config = server.config
        executor = server.backup_manager.executor

        rcc = RsyncCopyController(
            path=server.path,
            ssh_command=executor.ssh_command,
            ssh_options=executor.ssh_options,
            network_compression=config.network_compression,
            reuse_backup=None,
            safe_horizon=None,
        )

        backup_info = build_test_backup_info(
            server=server,
            pgdata="/pg/data",
            config_file="/etc/postgresql.conf",
            hba_file="/pg/data/pg_hba.conf",
            ident_file="/pg/data/pg_ident.conf",
            begin_xlog="0/2000028",
            begin_wal="000000010000000000000002",
            begin_offset=28,
        )
        backup_info.save()
        # This is to check that all the preparation is done correctly
        assert os.path.exists(backup_info.filename)

        # Silence the access to result properties
        rsync_mock.return_value.out = ""
        rsync_mock.return_value.err = ""
        rsync_mock.return_value.ret = 0

        # Mock analyze directory
        def analyse_func(item):
            label = item.label
            item.dir_file = label + "_dir_file"
            item.exclude_and_protect_file = label + "_exclude_and_protect_file"
            item.safe_list = [_FileItem("mode", 1, "date", "path")]
            item.check_list = [_FileItem("mode", 1, "date", "path")]

        analyse_mock.side_effect = analyse_func

        rcc.add_directory(
            label="tbs1",
            src=":/fake/location/",
            dst=backup_info.get_data_directory(16387),
            reuse=None,
            bwlimit=None,
            item_class=rcc.TABLESPACE_CLASS,
        )
        rcc.add_directory(
            label="tbs2",
            src=":/another/location/",
            dst=backup_info.get_data_directory(16405),
            reuse=None,
            bwlimit=None,
            item_class=rcc.TABLESPACE_CLASS,
        )
        rcc.add_directory(
            label="pgdata",
            src=":/pg/data/",
            dst=backup_info.get_data_directory(),
            reuse=None,
            bwlimit=None,
            item_class=rcc.PGDATA_CLASS,
            exclude=[
                "/pg_xlog/*",
                "/pg_log/*",
                "/log/*",
                "/recovery.conf",
                "/postmaster.pid",
            ],
            exclude_and_protect=["pg_tblspc/16387", "pg_tblspc/16405"],
        )
        rcc.add_file(
            label="pg_control",
            src=":/pg/data/global/pg_control",
            dst="%s/global/pg_control" % backup_info.get_data_directory(),
            item_class=rcc.PGCONTROL_CLASS,
        )
        rcc.add_file(
            label="config_file",
            src=":/etc/postgresql.conf",
            dst=backup_info.get_data_directory(),
            item_class=rcc.CONFIG_CLASS,
            optional=False,
        )
        rcc.copy()

        # Check the order of calls to the Rsync mock
        assert rsync_mock.mock_calls == [
            mock.call(
                network_compression=False,
                args=[
                    "--ignore-missing-args",
                    "--itemize-changes",
                    "--itemize-changes",
                ],
                bwlimit=None,
                ssh="ssh",
                path=None,
                ssh_options=[
                    "-c",
                    '"arcfour"',
                    "-p",
                    "22",
                    "*****@*****.**",
                    "-o",
                    "BatchMode=yes",
                    "-o",
                    "StrictHostKeyChecking=no",
                ],
                exclude=None,
                exclude_and_protect=None,
                include=None,
                retry_sleep=0,
                retry_times=0,
                retry_handler=mock.ANY,
            ),
            mock.call(
                network_compression=False,
                args=[
                    "--ignore-missing-args",
                    "--itemize-changes",
                    "--itemize-changes",
                ],
                bwlimit=None,
                ssh="ssh",
                path=None,
                ssh_options=[
                    "-c",
                    '"arcfour"',
                    "-p",
                    "22",
                    "*****@*****.**",
                    "-o",
                    "BatchMode=yes",
                    "-o",
                    "StrictHostKeyChecking=no",
                ],
                exclude=None,
                exclude_and_protect=None,
                include=None,
                retry_sleep=0,
                retry_times=0,
                retry_handler=mock.ANY,
            ),
            mock.call(
                network_compression=False,
                args=[
                    "--ignore-missing-args",
                    "--itemize-changes",
                    "--itemize-changes",
                ],
                bwlimit=None,
                ssh="ssh",
                path=None,
                ssh_options=[
                    "-c",
                    '"arcfour"',
                    "-p",
                    "22",
                    "*****@*****.**",
                    "-o",
                    "BatchMode=yes",
                    "-o",
                    "StrictHostKeyChecking=no",
                ],
                exclude=[
                    "/pg_xlog/*",
                    "/pg_log/*",
                    "/log/*",
                    "/recovery.conf",
                    "/postmaster.pid",
                ],
                exclude_and_protect=["pg_tblspc/16387", "pg_tblspc/16405"],
                include=None,
                retry_sleep=0,
                retry_times=0,
                retry_handler=mock.ANY,
            ),
            mock.call(
                network_compression=False,
                args=[
                    "--ignore-missing-args",
                    "--itemize-changes",
                    "--itemize-changes",
                ],
                bwlimit=None,
                ssh="ssh",
                path=None,
                ssh_options=[
                    "-c",
                    '"arcfour"',
                    "-p",
                    "22",
                    "*****@*****.**",
                    "-o",
                    "BatchMode=yes",
                    "-o",
                    "StrictHostKeyChecking=no",
                ],
                exclude=None,
                exclude_and_protect=None,
                include=None,
                retry_sleep=0,
                retry_times=0,
                retry_handler=mock.ANY,
            ),
            mock.call()(
                ":/etc/postgresql.conf",
                backup_info.get_data_directory(),
                allowed_retval=(0, 23, 24),
            ),
            mock.call(
                network_compression=False,
                args=[
                    "--ignore-missing-args",
                    "--itemize-changes",
                    "--itemize-changes",
                ],
                bwlimit=None,
                ssh="ssh",
                path=None,
                ssh_options=[
                    "-c",
                    '"arcfour"',
                    "-p",
                    "22",
                    "*****@*****.**",
                    "-o",
                    "BatchMode=yes",
                    "-o",
                    "StrictHostKeyChecking=no",
                ],
                exclude=None,
                exclude_and_protect=None,
                include=None,
                retry_sleep=0,
                retry_times=0,
                retry_handler=mock.ANY,
            ),
            mock.call()(
                ":/pg/data/global/pg_control",
                "%s/global/pg_control" % backup_info.get_data_directory(),
                allowed_retval=(0, 23, 24),
            ),
        ]

        # Check calls to _analyse_directory method
        assert analyse_mock.mock_calls == [
            mock.call(item) for item in rcc.item_list if item.is_directory
        ]

        # Check calls to _create_dir_and_purge method
        assert create_and_purge_mock.mock_calls == [
            mock.call(item) for item in rcc.item_list if item.is_directory
        ]

        # Utility function to build the file_list name
        def file_list_name(label, kind):
            return "%s/%s_%s_%s.list" % (tempdir.strpath, label, kind,
                                         os.getpid())

        # Check the order of calls to the copy method
        # All the file_list arguments are None because the analyze part
        # has not really been executed
        assert copy_mock.mock_calls == [
            mock.call(
                mock.ANY,
                ":/fake/location/",
                backup_info.get_data_directory(16387),
                checksum=False,
                file_list=file_list_name("tbs1", "safe"),
            ),
            mock.call(
                mock.ANY,
                ":/fake/location/",
                backup_info.get_data_directory(16387),
                checksum=True,
                file_list=file_list_name("tbs1", "check"),
            ),
            mock.call(
                mock.ANY,
                ":/another/location/",
                backup_info.get_data_directory(16405),
                checksum=False,
                file_list=file_list_name("tbs2", "safe"),
            ),
            mock.call(
                mock.ANY,
                ":/another/location/",
                backup_info.get_data_directory(16405),
                checksum=True,
                file_list=file_list_name("tbs2", "check"),
            ),
            mock.call(
                mock.ANY,
                ":/pg/data/",
                backup_info.get_data_directory(),
                checksum=False,
                file_list=file_list_name("pgdata", "safe"),
            ),
            mock.call(
                mock.ANY,
                ":/pg/data/",
                backup_info.get_data_directory(),
                checksum=True,
                file_list=file_list_name("pgdata", "check"),
            ),
        ]
    def test_recover_waiting_for_wals(
        self,
        backup_info_mock,
        rsync_copy_controller_mock,
        output_mock,
        rsync_pgdata_mock,
        unix_remote_command_mock,
        tmpdir,
    ):

        # This backup is waiting for WALs and it remains in that status
        # even after having copied the data files
        backup_info_mock.WAITING_FOR_WALS = "WAITING_FOR_WALS"
        backup_info_mock.return_value.status = BackupInfo.WAITING_FOR_WALS
        backup_info = testing_helpers.build_test_backup_info()
        backup_manager = testing_helpers.build_backup_manager()
        executor = RecoveryExecutor(backup_manager)
        backup_info.status = BackupInfo.WAITING_FOR_WALS
        destination = tmpdir.mkdir("destination").strpath
        with closing(executor):
            executor.recover(backup_info, destination, standby_mode=None)

        # The backup info has been read again
        backup_info_mock.assert_called()

        # The following two warning messages have been emitted
        output_mock.warning.assert_has_calls(
            [
                mock.call(
                    "IMPORTANT: You have requested a recovery operation for "
                    "a backup that does not have yet all the WAL files that "
                    "are required for consistency."
                ),
                mock.call(
                    "IMPORTANT: The backup we have recovered IS NOT "
                    "VALID. Required WAL files for consistency are "
                    "missing. Please verify that WAL archiving is "
                    "working correctly or evaluate using the 'get-wal' "
                    "option for recovery"
                ),
            ]
        )

        # In the following test case, the backup will be validated during
        # the copy of the data files, so there is no need for the warning
        # message at the end of the recovery process to be emitted again
        output_mock.warning.reset_mock()
        backup_info_mock.return_value.status = BackupInfo.DONE
        with closing(executor):
            executor.recover(backup_info, destination, standby_mode=None)

        # The backup info has been read again
        backup_info_mock.assert_called()

        # The following two warning messages have been emitted
        output_mock.warning.assert_has_calls(
            [
                mock.call(
                    "IMPORTANT: You have requested a recovery operation for "
                    "a backup that does not have yet all the WAL files that "
                    "are required for consistency."
                )
            ]
        )
Beispiel #47
0
    def test_status(self, capsys, tmpdir):
        """
        Test the status method.

        Given a test xlog.db expect the method to produce a json output.
        Compare the produced json with the EXPECTED_MINIMAL map

        :param path tmpdir: py.test temporary directory unique to the test
        :param capsys: fixture that allow to access stdout/stderr output
        """
        # Create a test xlog.db
        tmp_path = tmpdir.join("xlog.db")
        tmp_path.write(
            "000000010000000000000001\t16777216\t1406019022.4\tNone\n"
            "000000010000000000000002\t16777216\t1406019026.0\tNone\n"
            "000000010000000000000003\t16777216\t1406019026.0\tNone\n"
            "000000010000000000000004\t16777216\t1406019329.93\tNone\n"
            "000000010000000000000005\t16777216\t1406019330.84\tNone\n"
        )

        # Build a server, replacing some function to use the tmpdir objects
        server = build_real_server()
        server.xlogdb = lambda: tmp_path.open()
        server.get_available_backups = lambda: {
            "1234567890": build_test_backup_info(
                server=server,
                begin_time=dateutil.parser.parse("Wed Jul 23 11:00:43 2014"),
                end_time=dateutil.parser.parse("Wed Jul 23 12:00:43 2014"),
            )
        }

        # Call the status method capturing the output using capsys
        server.sync_status(None, None)
        (out, err) = capsys.readouterr()
        # prepare the expected results
        # (complex values have to be converted to json)
        expected = dict(EXPECTED_MINIMAL)
        expected["config"] = dict(
            [
                (k, v.to_json() if hasattr(v, "to_json") else v)
                for k, v in server.config.to_json().items()
            ]
        )
        assert json.loads(out) == expected

        # Test that status method raises a SyncError
        # if last_wal is older than the first entry of the xlog.db
        with pytest.raises(SyncError):
            server.sync_status("000000010000000000000000")

        # Test that status method raises a SyncError
        # if last_wal is newer than the last entry of the xlog.db
        with pytest.raises(SyncError):
            server.sync_status("000000010000000000000007")

        # test with an empty file
        tmp_path.write("")
        server.sync_status("000000010000000000000001")
        (out, err) = capsys.readouterr()
        result = json.loads(out)
        assert result["last_position"] == 0
        assert result["last_name"] == ""
    def _run_analyze_directory(self, list_files_mock, tmpdir, ref_list,
                               src_list):
        # Apply it to _list_files calls
        list_files_mock.side_effect = [ref_list, src_list]

        # Build the prerequisites
        server = build_real_server(
            global_conf={"barman_home": tmpdir.mkdir("home").strpath})
        config = server.config
        executor = server.backup_manager.executor

        # Create the RsyncCopyController putting the safe_horizon between
        # the tmp/safe and tmp2/check timestamps
        rcc = RsyncCopyController(
            path=server.path,
            ssh_command=executor.ssh_command,
            ssh_options=executor.ssh_options,
            network_compression=config.network_compression,
            reuse_backup=None,
            safe_horizon=datetime(
                year=2015,
                month=2,
                day=20,
                hour=19,
                minute=0,
                second=0,
                tzinfo=dateutil.tz.tzlocal(),
            ),
        )

        backup_info = build_test_backup_info(
            server=server,
            pgdata="/pg/data",
            config_file="/etc/postgresql.conf",
            hba_file="/pg/data/pg_hba.conf",
            ident_file="/pg/data/pg_ident.conf",
            begin_xlog="0/2000028",
            begin_wal="000000010000000000000002",
            begin_offset=28,
        )
        backup_info.save()
        # This is to check that all the preparation is done correctly
        assert os.path.exists(backup_info.filename)

        # Add a temp dir (usually created by copy method
        rcc.temp_dir = tmpdir.mkdir("tmp").strpath

        # Create an item to inspect
        item = _RsyncCopyItem(
            label="pgdata",
            src=":/pg/data/",
            dst=backup_info.get_data_directory(),
            is_directory=True,
            item_class=rcc.PGDATA_CLASS,
            optional=False,
        )

        # Then run the _analyze_directory method
        rcc._analyze_directory(item)

        return item, backup_info
    def test_analyze_directory(self, list_files_mock, rsync_factory_mock,
                               tmpdir):
        """
        Unit test for RsyncCopyController._analyze_directory's code
        """

        # Build file list for ref
        ref_list = [
            _FileItem(
                'drwxrwxrwt', 69632,
                datetime(year=2015,
                         month=2,
                         day=9,
                         hour=15,
                         minute=1,
                         second=0,
                         tzinfo=dateutil.tz.tzlocal()), '.'),
            _FileItem(
                'drwxrwxrwt', 69612,
                datetime(year=2015,
                         month=2,
                         day=19,
                         hour=15,
                         minute=1,
                         second=22,
                         tzinfo=dateutil.tz.tzlocal()), 'tmp'),
            _FileItem(
                '-rw-r--r--', 69632,
                datetime(year=2015,
                         month=2,
                         day=20,
                         hour=18,
                         minute=15,
                         second=33,
                         tzinfo=dateutil.tz.tzlocal()), 'tmp/safe'),
            _FileItem(
                '-rw-r--r--', 69612,
                datetime(year=2015,
                         month=2,
                         day=20,
                         hour=19,
                         minute=15,
                         second=33,
                         tzinfo=dateutil.tz.tzlocal()), 'tmp/check'),
            _FileItem(
                '-rw-r--r--', 69612,
                datetime(year=2015,
                         month=2,
                         day=20,
                         hour=19,
                         minute=15,
                         second=33,
                         tzinfo=dateutil.tz.tzlocal()), 'tmp/diff_time'),
            _FileItem(
                '-rw-r--r--', 69612,
                datetime(year=2015,
                         month=2,
                         day=20,
                         hour=19,
                         minute=15,
                         second=33,
                         tzinfo=dateutil.tz.tzlocal()), 'tmp/diff_size'),
        ]

        # Build the list for source adding a new file, ...
        src_list = ref_list + [
            _FileItem(
                '-rw-r--r--', 69612,
                datetime(year=2015,
                         month=2,
                         day=20,
                         hour=22,
                         minute=15,
                         second=33,
                         tzinfo=dateutil.tz.tzlocal()), 'tmp/new'),
        ]
        # ... changing the timestamp one old file ...
        src_list[4] = _FileItem(
            '-rw-r--r--', 69612,
            datetime(year=2015,
                     month=2,
                     day=20,
                     hour=20,
                     minute=15,
                     second=33,
                     tzinfo=dateutil.tz.tzlocal()), 'tmp/diff_time')
        # ... and changing the size of another
        src_list[5] = _FileItem(
            '-rw-r--r--', 77777,
            datetime(year=2015,
                     month=2,
                     day=20,
                     hour=19,
                     minute=15,
                     second=33,
                     tzinfo=dateutil.tz.tzlocal()), 'tmp/diff_size')

        # Apply it to _list_files calls
        list_files_mock.side_effect = [ref_list, src_list]

        # Build the prerequisites
        server = build_real_server(
            global_conf={'barman_home': tmpdir.mkdir('home').strpath})
        config = server.config
        executor = server.backup_manager.executor

        # Create the RsyncCopyController putting the safe_horizon between
        # the tmp/safe and tmp2/check timestamps
        rcc = RsyncCopyController(
            path=server.path,
            ssh_command=executor.ssh_command,
            ssh_options=executor.ssh_options,
            network_compression=config.network_compression,
            reuse_backup=None,
            safe_horizon=datetime(year=2015,
                                  month=2,
                                  day=20,
                                  hour=19,
                                  minute=0,
                                  second=0,
                                  tzinfo=dateutil.tz.tzlocal()))

        backup_info = build_test_backup_info(
            server=server,
            pgdata="/pg/data",
            config_file="/etc/postgresql.conf",
            hba_file="/pg/data/pg_hba.conf",
            ident_file="/pg/data/pg_ident.conf",
            begin_xlog="0/2000028",
            begin_wal="000000010000000000000002",
            begin_offset=28)
        backup_info.save()
        # This is to check that all the preparation is done correctly
        assert os.path.exists(backup_info.filename)

        # Add a temp dir (usually created by copy method
        rcc.temp_dir = tmpdir.mkdir('tmp').strpath

        # Create an item to inspect
        item = _RsyncCopyItem(label='pgdata',
                              src=':/pg/data/',
                              dst=backup_info.get_data_directory(),
                              is_directory=True,
                              item_class=rcc.PGDATA_CLASS,
                              optional=False)

        # Then run the _analyze_directory method
        rcc._analyze_directory(item)

        # Verify that _rsync_factory has been called correctly
        assert rsync_factory_mock.mock_calls == [
            mock.call(item),
        ]

        # Verify that _list_files has been called correctly
        assert list_files_mock.mock_calls == [
            mock.call(rsync_factory_mock.return_value,
                      backup_info.get_data_directory() + '/'),
            mock.call(rsync_factory_mock.return_value, ':/pg/data/')
        ]

        # Check the result
        # 1) The list of directories should be there and should contain all
        # the directories
        assert item.dir_file
        assert open(item.dir_file).read() == ('.\n' 'tmp\n')
        # The exclude_and_protect file should be populated correctly with all
        # the files in the source
        assert item.exclude_and_protect_file
        assert open(
            item.exclude_and_protect_file).read() == ('P tmp/safe\n'
                                                      '- tmp/safe\n'
                                                      'P tmp/check\n'
                                                      '- tmp/check\n'
                                                      'P tmp/diff_time\n'
                                                      '- tmp/diff_time\n'
                                                      'P tmp/diff_size\n'
                                                      '- tmp/diff_size\n'
                                                      'P tmp/new\n'
                                                      '- tmp/new\n')
        # The check list must contain identical files after the safe_horizon
        assert len(item.check_list) == 1
        assert item.check_list[0].path == 'tmp/check'
        # The safe list must contain every file that is not in check and is
        # present in the source
        assert len(item.safe_list) == 4
        assert item.safe_list[0].path == 'tmp/safe'
        assert item.safe_list[1].path == 'tmp/diff_time'
        assert item.safe_list[2].path == 'tmp/diff_size'
        assert item.safe_list[3].path == 'tmp/new'
Beispiel #50
0
    def test_backup_copy(self, rsync_mock, tmpdir):
        """
        Test the execution of a rsync copy

        :param rsync_mock: mock for the RsyncCopyController object
        :param tmpdir: temporary dir
        """
        backup_manager = build_backup_manager(
            global_conf={'barman_home': tmpdir.mkdir('home').strpath})
        backup_manager.server.path = None
        backup_manager.server.postgres.server_major_version = '9.6'
        backup_info = build_test_backup_info(
            server=backup_manager.server,
            pgdata="/pg/data",
            config_file="/etc/postgresql.conf",
            hba_file="/pg/data/pg_hba.conf",
            ident_file="/pg/data/pg_ident.conf",
            begin_xlog="0/2000028",
            begin_wal="000000010000000000000002",
            begin_offset=28)
        backup_info.save()
        # This is to check that all the preparation is done correctly
        assert os.path.exists(backup_info.filename)

        backup_manager.executor.backup_copy(backup_info)

        assert rsync_mock.mock_calls == [
            mock.call(reuse_backup=None,
                      safe_horizon=None,
                      network_compression=False,
                      ssh_command='ssh',
                      path=None,
                      ssh_options=[
                          '-c', '"arcfour"', '-p', '22',
                          '*****@*****.**', '-o', 'BatchMode=yes', '-o',
                          'StrictHostKeyChecking=no'
                      ],
                      retry_sleep=30,
                      retry_times=0),
            mock.call().add_directory(
                label='tbs1',
                src=':/fake/location/',
                dst=backup_info.get_data_directory(16387),
                reuse=None,
                bwlimit=None,
                item_class=rsync_mock.return_value.TABLESPACE_CLASS,
                exclude=["/*"] + RsyncBackupExecutor.EXCLUDE_LIST,
                include=["/PG_9.6_*"]),
            mock.call().add_directory(
                label='tbs2',
                src=':/another/location/',
                dst=backup_info.get_data_directory(16405),
                reuse=None,
                bwlimit=None,
                item_class=rsync_mock.return_value.TABLESPACE_CLASS,
                exclude=["/*"] + RsyncBackupExecutor.EXCLUDE_LIST,
                include=["/PG_9.6_*"]),
            mock.call().add_directory(
                label='pgdata',
                src=':/pg/data/',
                dst=backup_info.get_data_directory(),
                reuse=None,
                bwlimit=None,
                item_class=rsync_mock.return_value.PGDATA_CLASS,
                exclude=RsyncBackupExecutor.PGDATA_EXCLUDE_LIST +
                RsyncBackupExecutor.EXCLUDE_LIST,
                exclude_and_protect=['pg_tblspc/16387', 'pg_tblspc/16405']),
            mock.call().add_file(
                label='pg_control',
                src=':/pg/data/global/pg_control',
                dst='%s/global/pg_control' % backup_info.get_data_directory(),
                item_class=rsync_mock.return_value.PGCONTROL_CLASS),
            mock.call().add_file(
                label='config_file',
                src=':/etc/postgresql.conf',
                dst=backup_info.get_data_directory(),
                item_class=rsync_mock.return_value.CONFIG_CLASS,
                optional=False),
            mock.call().copy(),
        ]
    def test_set_pitr_targets(self, tmpdir):
        """
        Evaluate targets for point in time recovery
        """
        # Build basic folder/files structure
        tempdir = tmpdir.mkdir('temp_dir')
        dest = tmpdir.mkdir('dest')
        wal_dest = tmpdir.mkdir('wal_dest')
        recovery_info = {
            'configuration_files': ['postgresql.conf', 'postgresql.auto.conf'],
            'tempdir': tempdir.strpath,
            'results': {'changes': [], 'warnings': []},
            'is_pitr': False,
            'wal_dest': wal_dest.strpath,
            'get_wal': False,
        }
        backup_info = testing_helpers.build_test_backup_info(
            end_time=dateutil.parser.parse('2015-06-03 16:11:01.71038+02'))
        backup_manager = testing_helpers.build_backup_manager()
        # Build a recovery executor
        executor = RecoveryExecutor(backup_manager)
        executor._set_pitr_targets(recovery_info, backup_info,
                                   dest.strpath,
                                   '', '', '', '', '', False, None)
        # Test with empty values (no PITR)
        assert recovery_info['target_epoch'] is None
        assert recovery_info['target_datetime'] is None
        assert recovery_info['wal_dest'] == wal_dest.strpath

        # Test for PITR targets
        executor._set_pitr_targets(recovery_info, backup_info,
                                   dest.strpath,
                                   'target_name',
                                   '2015-06-03 16:11:03.71038+02',
                                   '2',
                                   None, '', False, None)
        target_datetime = dateutil.parser.parse(
            '2015-06-03 16:11:03.710380+02:00')
        target_epoch = (
            time.mktime(target_datetime.timetuple()) + (
                target_datetime.microsecond / 1000000.))

        assert recovery_info['target_datetime'] == target_datetime
        assert recovery_info['target_epoch'] == target_epoch
        assert recovery_info['wal_dest'] == dest.join('barman_wal').strpath

        # Test for too early PITR target
        with pytest.raises(RecoveryInvalidTargetException) as exc_info:
            executor._set_pitr_targets(recovery_info, backup_info,
                                       dest.strpath,
                                       None,
                                       '2015-06-03 16:11:00.71038+02',
                                       None, None, None, False, None)
        assert str(exc_info.value) == \
            "The requested target time " \
            "2015-06-03 16:11:00.710380+02:00 " \
            "is before the backup end time " \
            "2015-06-03 16:11:01.710380+02:00"

        # Tests for PostgreSQL < 9.1
        backup_info.version = 90000
        with pytest.raises(RecoveryTargetActionException) as exc_info:
            executor._set_pitr_targets(recovery_info, backup_info,
                                       dest.strpath,
                                       'target_name',
                                       '2015-06-03 16:11:03.71038+02',
                                       '2',
                                       None, None, False, 'pause')
        assert str(exc_info.value) == "Illegal target action 'pause' " \
                                      "for this version of PostgreSQL"

        # Tests for PostgreSQL between 9.1 and 9.4 included
        backup_info.version = 90100
        executor._set_pitr_targets(recovery_info, backup_info,
                                   dest.strpath,
                                   'target_name',
                                   '2015-06-03 16:11:03.71038+02',
                                   '2',
                                   None, None, False, None)
        assert 'pause_at_recovery_target' not in recovery_info

        executor._set_pitr_targets(recovery_info, backup_info,
                                   dest.strpath,
                                   'target_name',
                                   '2015-06-03 16:11:03.71038+02',
                                   '2',
                                   None, None, False, 'pause')
        assert recovery_info['pause_at_recovery_target'] == "on"
        del recovery_info['pause_at_recovery_target']

        with pytest.raises(RecoveryTargetActionException) as exc_info:
            executor._set_pitr_targets(recovery_info, backup_info,
                                       dest.strpath,
                                       'target_name',
                                       '2015-06-03 16:11:03.71038+02',
                                       '2',
                                       None, None, False, 'promote')
        assert str(exc_info.value) == "Illegal target action 'promote' " \
                                      "for this version of PostgreSQL"

        # Tests for PostgreSQL >= 9.5
        backup_info.version = 90500
        executor._set_pitr_targets(recovery_info, backup_info,
                                   dest.strpath,
                                   'target_name',
                                   '2015-06-03 16:11:03.71038+02',
                                   '2',
                                   None, None, False, 'pause')
        assert recovery_info['recovery_target_action'] == "pause"

        executor._set_pitr_targets(recovery_info, backup_info,
                                   dest.strpath,
                                   'target_name',
                                   '2015-06-03 16:11:03.71038+02',
                                   '2',
                                   None, None, False, 'promote')
        assert recovery_info['recovery_target_action'] == "promote"

        with pytest.raises(RecoveryTargetActionException) as exc_info:
            executor._set_pitr_targets(recovery_info, backup_info,
                                       dest.strpath,
                                       'target_name',
                                       '2015-06-03 16:11:03.71038+02',
                                       '2',
                                       None, None, False, 'unavailable')
        assert str(exc_info.value) == "Illegal target action 'unavailable' " \
                                      "for this version of PostgreSQL"

        # Recovery target action should not be available is PITR is not
        # enabled
        backup_info.version = 90500
        with pytest.raises(RecoveryTargetActionException) as exc_info:
            executor._set_pitr_targets(recovery_info, backup_info,
                                       dest.strpath,
                                       None,
                                       None,
                                       None,
                                       None,
                                       None, False, 'pause')
        assert str(exc_info.value) == "Can't enable recovery target action " \
                                      "when PITR is not required"

        # Test that we are not using target_lsn with a version < 10
        backup_info.version = 90500
        with pytest.raises(RecoveryInvalidTargetException) as exc_info:
            executor._set_pitr_targets(recovery_info, backup_info,
                                       dest.strpath,
                                       None,
                                       None,
                                       None,
                                       None,
                                       10000, False, 'pause')
        assert str(exc_info.value) == "Illegal use of recovery_target_lsn " \
                                      "'10000' for this version " \
                                      "of PostgreSQL " \
                                      "(version 10 minimum required)"

        # Test that we are not using target_immediate with a version < 9.4
        backup_info.version = 90300
        with pytest.raises(RecoveryInvalidTargetException) as exc_info:
            executor._set_pitr_targets(recovery_info, backup_info,
                                       dest.strpath,
                                       None,
                                       None,
                                       None,
                                       None,
                                       None, True, 'pause')
        assert str(exc_info.value) == "Illegal use of " \
                                      "recovery_target_immediate " \
                                      "for this version " \
                                      "of PostgreSQL " \
                                      "(version 9.4 minimum required)"
Beispiel #52
0
    def test_backup_copy(self, remote_mock, pg_basebackup_mock, tmpdir):
        """
        Test backup folder structure

        :param remote_mock: mock for the fetch_remote_status method
        :param pg_basebackup_mock: mock for the PgBaseBackup object
        :param tmpdir: pytest temp directory
        """
        backup_manager = build_backup_manager(
            global_conf={
                'barman_home': tmpdir.mkdir('home').strpath,
                'backup_method': 'postgres'
            })
        # simulate a old version of pg_basebackup
        # not supporting bandwidth_limit
        remote_mock.return_value = {
            'pg_basebackup_version': '9.2',
            'pg_basebackup_path': '/fake/path',
            'pg_basebackup_bwlimit': False,
        }
        server_mock = backup_manager.server
        streaming_mock = server_mock.streaming
        server_mock.config.bandwidth_limit = 1
        streaming_mock.get_connection_string.return_value = 'fake=connstring'
        streaming_mock.conn_parameters = {
            'host': 'fakeHost',
            'port': 'fakePort',
            'user': '******'
        }
        backup_info = build_test_backup_info(server=backup_manager.server,
                                             backup_id='fake_backup_id')
        backup_manager.executor.backup_copy(backup_info)
        # check that the bwlimit option have been ignored
        assert pg_basebackup_mock.mock_calls == [
            mock.call(connection=mock.ANY,
                      version='9.2',
                      app_name='barman_streaming_backup',
                      destination=mock.ANY,
                      command='/fake/path',
                      tbs_mapping=mock.ANY,
                      bwlimit=None,
                      immediate=False,
                      retry_times=0,
                      retry_sleep=30,
                      retry_handler=mock.ANY,
                      path=mock.ANY),
            mock.call()(),
        ]

        # Check with newer version
        remote_mock.reset_mock()
        pg_basebackup_mock.reset_mock()
        backup_manager.executor._remote_status = None
        remote_mock.return_value = {
            'pg_basebackup_version': '9.5',
            'pg_basebackup_path': '/fake/path',
            'pg_basebackup_bwlimit': True,
        }
        backup_manager.executor.config.immediate_checkpoint = True
        backup_manager.executor.config.streaming_conninfo = 'fake=connstring'
        backup_manager.executor.backup_copy(backup_info)
        # check that the bwlimit option have been passed to the test call
        assert pg_basebackup_mock.mock_calls == [
            mock.call(connection=mock.ANY,
                      version='9.5',
                      app_name='barman_streaming_backup',
                      destination=mock.ANY,
                      command='/fake/path',
                      tbs_mapping=mock.ANY,
                      bwlimit=1,
                      immediate=True,
                      retry_times=0,
                      retry_sleep=30,
                      retry_handler=mock.ANY,
                      path=mock.ANY),
            mock.call()(),
        ]

        # Raise a test CommandFailedException and expect it to be wrapped
        # inside a DataTransferFailure exception
        remote_mock.reset_mock()
        pg_basebackup_mock.reset_mock()
        pg_basebackup_mock.return_value.side_effect = \
            CommandFailedException(dict(ret='ret', out='out', err='err'))
        with pytest.raises(DataTransferFailure):
            backup_manager.executor.backup_copy(backup_info)
    def test_generate_recovery_conf_pre12(self, rsync_pg_mock, tmpdir):
        """
        Test the generation of recovery.conf file
        """
        # Build basic folder/files structure
        recovery_info = {
            'configuration_files': ['postgresql.conf', 'postgresql.auto.conf'],
            'tempdir': tmpdir.strpath,
            'results': {'changes': [], 'warnings': []},
            'get_wal': False,
        }
        backup_info = testing_helpers.build_test_backup_info()
        dest = tmpdir.mkdir('destination')

        # Build a recovery executor using a real server
        server = testing_helpers.build_real_server()
        executor = RecoveryExecutor(server.backup_manager)
        executor._generate_recovery_conf(recovery_info, backup_info,
                                         dest.strpath,
                                         True, True, 'remote@command',
                                         'target_name',
                                         '2015-06-03 16:11:03.71038+02', '2',
                                         '', '', None)

        # Check that the recovery.conf file exists
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert recovery_conf_file.check()
        # Parse the generated recovery.conf
        recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file)
        # check for contents
        assert 'recovery_end_command' in recovery_conf
        assert 'recovery_target_time' in recovery_conf
        assert 'recovery_target_timeline' in recovery_conf
        assert 'recovery_target_xid' not in recovery_conf
        assert 'recovery_target_lsn' not in recovery_conf
        assert 'recovery_target_name' in recovery_conf
        assert 'recovery_target' not in recovery_conf
        assert recovery_conf['recovery_end_command'] == "'rm -fr barman_wal'"
        assert recovery_conf['recovery_target_time'] == \
            "'2015-06-03 16:11:03.71038+02'"
        assert recovery_conf['recovery_target_timeline'] == '2'
        assert recovery_conf['recovery_target_name'] == "'target_name'"

        # Test 'pause_at_recovery_target' recovery_info entry
        recovery_info['pause_at_recovery_target'] = 'on'
        executor._generate_recovery_conf(recovery_info, backup_info,
                                         dest.strpath,
                                         True, True, 'remote@command',
                                         'target_name',
                                         '2015-06-03 16:11:03.71038+02', '2',
                                         '', '', None)
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert recovery_conf_file.check()
        recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file)
        assert recovery_conf['pause_at_recovery_target'] == "'on'"

        # Test 'recovery_target_action'
        del recovery_info['pause_at_recovery_target']
        recovery_info['recovery_target_action'] = 'pause'
        executor._generate_recovery_conf(recovery_info, backup_info,
                                         dest.strpath,
                                         True, True, 'remote@command',
                                         'target_name',
                                         '2015-06-03 16:11:03.71038+02', '2',
                                         '', '', None)
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert recovery_conf_file.check()
        recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file)
        assert recovery_conf['recovery_target_action'] == "'pause'"

        # Test 'standby_mode'
        executor._generate_recovery_conf(recovery_info, backup_info,
                                         dest.strpath,
                                         True, True, 'remote@command',
                                         'target_name',
                                         '2015-06-03 16:11:03.71038+02', '2',
                                         '', '', True)
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert recovery_conf_file.check()
        recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file)
        assert recovery_conf['standby_mode'] == "'on'"

        executor._generate_recovery_conf(recovery_info, backup_info,
                                         dest.strpath,
                                         True, True, 'remote@command',
                                         'target_name',
                                         '2015-06-03 16:11:03.71038+02', '2',
                                         '', '', False)
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert recovery_conf_file.check()
        recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file)
        assert 'standby_mode' not in recovery_conf

        executor._generate_recovery_conf(recovery_info, backup_info,
                                         dest.strpath,
                                         True, True, 'remote@command',
                                         'target_name',
                                         '2015-06-03 16:11:03.71038+02', '2',
                                         '', '', None)
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert recovery_conf_file.check()
        recovery_conf = testing_helpers.parse_recovery_conf(recovery_conf_file)
        assert 'standby_mode' not in recovery_conf
    def test_generate_recovery_conf(self, rsync_pg_mock, tmpdir):
        """
        Test the generation of recovery configuration
        :type tmpdir: py.path.local
        """
        # Build basic folder/files structure
        recovery_info = {
            'configuration_files': ['postgresql.conf', 'postgresql.auto.conf'],
            'tempdir': tmpdir.strpath,
            'results': {'changes': [], 'warnings': []},
            'get_wal': False,
        }
        backup_info = testing_helpers.build_test_backup_info(
            version=120000,
        )
        dest = tmpdir.mkdir('destination')

        # Build a recovery executor using a real server
        server = testing_helpers.build_real_server()
        executor = RecoveryExecutor(server.backup_manager)
        executor._generate_recovery_conf(recovery_info, backup_info,
                                         dest.strpath,
                                         True, True, 'remote@command',
                                         'target_name',
                                         '2015-06-03 16:11:03.71038+02', '2',
                                         '', '', None)

        # Check that the recovery.conf file doesn't exist
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert not recovery_conf_file.check()
        # Check that the recovery.signal file exists
        signal_file = tmpdir.join("recovery.signal")
        assert signal_file.check()
        # Parse the generated recovery configuration
        pg_auto_conf = self.parse_auto_conf_lines(recovery_info)
        # check for contents
        assert 'recovery_end_command' in pg_auto_conf
        assert 'recovery_target_time' in pg_auto_conf
        assert 'recovery_target_timeline' in pg_auto_conf
        assert 'recovery_target_xid' not in pg_auto_conf
        assert 'recovery_target_lsn' not in pg_auto_conf
        assert 'recovery_target_name' in pg_auto_conf
        assert 'recovery_target' in pg_auto_conf
        assert pg_auto_conf['recovery_end_command'] == "'rm -fr barman_wal'"
        assert pg_auto_conf['recovery_target_time'] == \
            "'2015-06-03 16:11:03.71038+02'"
        assert pg_auto_conf['recovery_target_timeline'] == '2'
        assert pg_auto_conf['recovery_target_name'] == "'target_name'"

        # Test 'pause_at_recovery_target' recovery_info entry
        signal_file.remove()
        recovery_info['pause_at_recovery_target'] = 'on'
        executor._generate_recovery_conf(recovery_info, backup_info,
                                         dest.strpath,
                                         True, True, 'remote@command',
                                         'target_name',
                                         '2015-06-03 16:11:03.71038+02', '2',
                                         '', '', None)
        # Check that the recovery.conf file doesn't exist
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert not recovery_conf_file.check()
        # Check that the recovery.signal file exists
        signal_file = tmpdir.join("recovery.signal")
        assert signal_file.check()
        # Parse the generated recovery configuration
        pg_auto_conf = self.parse_auto_conf_lines(recovery_info)
        # Finally check pause_at_recovery_target value
        assert pg_auto_conf['pause_at_recovery_target'] == "'on'"

        # Test 'recovery_target_action'
        signal_file.remove()
        del recovery_info['pause_at_recovery_target']
        recovery_info['recovery_target_action'] = 'pause'
        executor._generate_recovery_conf(recovery_info, backup_info,
                                         dest.strpath,
                                         True, True, 'remote@command',
                                         'target_name',
                                         '2015-06-03 16:11:03.71038+02', '2',
                                         '', '', None)
        # Check that the recovery.conf file doesn't exist
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert not recovery_conf_file.check()
        # Check that the recovery.signal file exists
        signal_file = tmpdir.join("recovery.signal")
        assert signal_file.check()
        # Parse the generated recovery configuration
        pg_auto_conf = self.parse_auto_conf_lines(recovery_info)
        # Finally check recovery_target_action value
        assert pg_auto_conf['recovery_target_action'] == "'pause'"

        # Test 'standby_mode'
        signal_file.remove()
        executor._generate_recovery_conf(recovery_info, backup_info,
                                         dest.strpath,
                                         True, True, 'remote@command',
                                         'target_name',
                                         '2015-06-03 16:11:03.71038+02', '2',
                                         '', '', True)
        # Check that the recovery.conf file doesn't exist
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert not recovery_conf_file.check()
        # Check that the recovery.signal file doesn't exist
        wrong_signal_file = tmpdir.join("recovery.signal")
        assert not wrong_signal_file.check()
        # Check that the standby.signal file exists
        signal_file = tmpdir.join("standby.signal")
        assert signal_file.check()
        # Parse the generated recovery configuration
        pg_auto_conf = self.parse_auto_conf_lines(recovery_info)
        # standby_mode is not a valid configuration in PostgreSQL 12
        assert 'standby_mode' not in pg_auto_conf

        signal_file.remove()
        executor._generate_recovery_conf(recovery_info, backup_info,
                                         dest.strpath,
                                         True, True, 'remote@command',
                                         'target_name',
                                         '2015-06-03 16:11:03.71038+02', '2',
                                         '', '', False)
        # Check that the recovery.conf file doesn't exist
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert not recovery_conf_file.check()
        # Check that the standby.signal file doesn't exist
        wrong_signal_file = tmpdir.join("standby.signal")
        assert not wrong_signal_file.check()
        # Check that the recovery.signal file exists
        signal_file = tmpdir.join("recovery.signal")
        assert signal_file.check()
        # Parse the generated recovery configuration
        pg_auto_conf = self.parse_auto_conf_lines(recovery_info)
        # standby_mode is not a valid configuration in PostgreSQL 12
        assert 'standby_mode' not in pg_auto_conf

        signal_file.remove()
        executor._generate_recovery_conf(recovery_info, backup_info,
                                         dest.strpath,
                                         True, True, 'remote@command',
                                         'target_name',
                                         '2015-06-03 16:11:03.71038+02', '2',
                                         '', '', None)
        # Check that the recovery.conf file doesn't exist
        recovery_conf_file = tmpdir.join("recovery.conf")
        assert not recovery_conf_file.check()
        # Check that the standby.signal file doesn't exist
        wrong_signal_file = tmpdir.join("standby.signal")
        assert not wrong_signal_file.check()
        # Check that the recovery.signal file exists
        signal_file = tmpdir.join("recovery.signal")
        assert signal_file.check()
        # Parse the generated recovery configuration
        pg_auto_conf = self.parse_auto_conf_lines(recovery_info)
        # standby_mode is not a valid configuration in PostgreSQL 12
        assert 'standby_mode' not in pg_auto_conf
 def test_recovery(self, remote_cmd_mock, rsync_pg_mock,
                   copy_controller_mock, tmpdir):
     """
     Test the execution of a recovery
     """
     # Prepare basic directory/files structure
     dest = tmpdir.mkdir('destination')
     base = tmpdir.mkdir('base')
     wals = tmpdir.mkdir('wals')
     backup_info = testing_helpers.build_test_backup_info(tablespaces=[])
     backup_info.config.basebackups_directory = base.strpath
     backup_info.config.wals_directory = wals.strpath
     backup_info.version = 90400
     datadir = base.mkdir(backup_info.backup_id).mkdir('data')
     backup_info.pgdata = datadir.strpath
     postgresql_conf_local = datadir.join('postgresql.conf')
     postgresql_auto_local = datadir.join('postgresql.auto.conf')
     postgresql_conf_local.write('archive_command = something\n'
                                 'data_directory = something')
     postgresql_auto_local.write('archive_command = something\n'
                                 'data_directory = something')
     shutil.copy2(postgresql_conf_local.strpath, dest.strpath)
     shutil.copy2(postgresql_auto_local.strpath, dest.strpath)
     # Avoid triggering warning for missing config files
     datadir.ensure('pg_hba.conf')
     datadir.ensure('pg_ident.conf')
     # Build an executor
     server = testing_helpers.build_real_server(
         global_conf={
             "barman_lock_directory": tmpdir.mkdir('lock').strpath
         },
         main_conf={
             "wals_directory": wals.strpath
         })
     executor = RecoveryExecutor(server.backup_manager)
     # test local recovery
     with closing(executor):
         rec_info = executor.recover(backup_info, dest.strpath,
                                     exclusive=True)
     # remove not useful keys from the result
     del rec_info['cmd']
     sys_tempdir = rec_info['tempdir']
     assert rec_info == {
         'rsync': None,
         'tempdir': sys_tempdir,
         'wal_dest': dest.join('pg_xlog').strpath,
         'recovery_dest': 'local',
         'destination_path': dest.strpath,
         'temporary_configuration_files': [
             dest.join('postgresql.conf').strpath,
             dest.join('postgresql.auto.conf').strpath],
         'results': {
             'delete_barman_wal': False,
             'recovery_start_time': rec_info['results'][
                 'recovery_start_time'
             ],
             'get_wal': False,
             'changes': [
                 Assertion._make([
                     'postgresql.conf',
                     0,
                     'archive_command',
                     'false']),
                 Assertion._make([
                     'postgresql.auto.conf',
                     0,
                     'archive_command',
                     'false'])],
             'missing_files': [],
             'recovery_configuration_file': 'recovery.conf',
             'warnings': [
                 Assertion._make([
                     'postgresql.conf',
                     2,
                     'data_directory',
                     'something']),
                 Assertion._make([
                     'postgresql.auto.conf',
                     2,
                     'data_directory',
                     'something'])]},
         'target_epoch': None,
         'configuration_files': [
             'postgresql.conf',
             'postgresql.auto.conf'],
         'target_datetime': None,
         'safe_horizon': None,
         'is_pitr': False,
         'get_wal': False,
     }
     # test remote recovery
     with closing(executor):
         rec_info = executor.recover(backup_info, dest.strpath,
                                     remote_command="remote@command",
                                     exclusive=True)
     # remove not useful keys from the result
     del rec_info['cmd']
     del rec_info['rsync']
     sys_tempdir = rec_info['tempdir']
     assert rec_info == {
         'tempdir': sys_tempdir,
         'wal_dest': dest.join('pg_xlog').strpath,
         'recovery_dest': 'remote',
         'destination_path': dest.strpath,
         'temporary_configuration_files': [
             os.path.join(sys_tempdir, 'postgresql.conf'),
             os.path.join(sys_tempdir, 'postgresql.auto.conf')],
         'results': {
             'delete_barman_wal': False,
             'get_wal': False,
             'recovery_start_time': rec_info['results'][
                 'recovery_start_time'
             ],
             'changes': [
                 Assertion._make([
                     'postgresql.conf',
                     0,
                     'archive_command',
                     'false']),
                 Assertion._make([
                     'postgresql.auto.conf',
                     0,
                     'archive_command',
                     'false'])],
             'missing_files': [],
             'recovery_configuration_file': 'recovery.conf',
             'warnings': [
                 Assertion._make([
                     'postgresql.conf',
                     2,
                     'data_directory',
                     'something']),
                 Assertion._make([
                     'postgresql.auto.conf',
                     2,
                     'data_directory',
                     'something'])]},
         'target_epoch': None,
         'configuration_files': [
             'postgresql.conf',
             'postgresql.auto.conf'],
         'target_datetime': None,
         'safe_horizon': None,
         'is_pitr': False,
         'get_wal': False,
     }
     # test failed rsync
     rsync_pg_mock.side_effect = CommandFailedException()
     with pytest.raises(CommandFailedException):
         with closing(executor):
             executor.recover(backup_info, dest.strpath, exclusive=True,
                              remote_command="remote@command")
Beispiel #56
0
    def test_check_redundancy(self, tmpdir):
        """
        Test the check method
        """
        # Setup temp dir and server
        # build a backup_manager and setup a basic configuration
        backup_manager = build_backup_manager(
            name="TestServer",
            global_conf={"barman_home": tmpdir.strpath, "minimum_redundancy": "1"},
        )
        backup_manager.executor = mock.MagicMock()

        # Test the unsatisfied minimum_redundancy option
        strategy_mock = mock.MagicMock()
        backup_manager.check(strategy_mock)
        # Expect a failure from the method
        strategy_mock.result.assert_called_with(
            "TestServer", False, hint="have 0 backups, expected at least 1"
        )
        # Test the satisfied minimum_redundancy option
        b_info = build_test_backup_info(
            backup_id="fake_backup_id",
            server=backup_manager.server,
        )
        b_info.save()

        strategy_mock.reset_mock()
        backup_manager._load_backup_cache()
        backup_manager.check(strategy_mock)
        # Expect a success from the method
        strategy_mock.result.assert_called_with(
            "TestServer", True, hint="have 1 backups, expected at least 1"
        )

        # Test for no failed backups
        strategy_mock.reset_mock()
        backup_manager._load_backup_cache()
        backup_manager.check(strategy_mock)
        # Expect a failure from the method
        strategy_mock.result.assert_any_call(
            "TestServer", True, hint="there are 0 failed backups"
        )

        # Test for failed backups in catalog
        b_info = build_test_backup_info(
            backup_id="failed_backup_id",
            server=backup_manager.server,
            status=BackupInfo.FAILED,
        )
        b_info.save()
        strategy_mock.reset_mock()
        backup_manager._load_backup_cache()
        backup_manager.check(strategy_mock)
        # Expect a failure from the method
        strategy_mock.result.assert_any_call(
            "TestServer", False, hint="there are 1 failed backups"
        )

        # Test unknown compression
        backup_manager.config.compression = "test_compression"
        backup_manager.compression_manager.check.return_value = False
        strategy_mock.reset_mock()
        backup_manager.check(strategy_mock)
        # Expect a failure from the method
        strategy_mock.result.assert_any_call("TestServer", False)

        # Test valid compression
        backup_manager.config.compression = "test_compression"
        backup_manager.compression_manager.check.return_value = True
        strategy_mock.reset_mock()
        backup_manager.check(strategy_mock)
        # Expect a success from the method
        strategy_mock.result.assert_any_call("TestServer", True)
        # Test failure retrieving a compressor
        backup_manager.config.compression = "test_compression"
        backup_manager.compression_manager.check.return_value = True
        backup_manager.compression_manager.get_default_compressor.side_effect = (
            CompressionIncompatibility()
        )
        strategy_mock.reset_mock()
        backup_manager.check(strategy_mock)
        # Expect a failure from the method
        strategy_mock.result.assert_any_call("TestServer", False)
 def test_recovery(
     self, remote_cmd_mock, rsync_pg_mock, copy_controller_mock, tmpdir
 ):
     """
     Test the execution of a recovery
     """
     # Prepare basic directory/files structure
     dest = tmpdir.mkdir("destination")
     base = tmpdir.mkdir("base")
     wals = tmpdir.mkdir("wals")
     backup_info = testing_helpers.build_test_backup_info(tablespaces=[])
     backup_info.config.basebackups_directory = base.strpath
     backup_info.config.wals_directory = wals.strpath
     backup_info.version = 90400
     datadir = base.mkdir(backup_info.backup_id).mkdir("data")
     backup_info.pgdata = datadir.strpath
     postgresql_conf_local = datadir.join("postgresql.conf")
     postgresql_auto_local = datadir.join("postgresql.auto.conf")
     postgresql_conf_local.write(
         "archive_command = something\n" "data_directory = something"
     )
     postgresql_auto_local.write(
         "archive_command = something\n" "data_directory = something"
     )
     shutil.copy2(postgresql_conf_local.strpath, dest.strpath)
     shutil.copy2(postgresql_auto_local.strpath, dest.strpath)
     # Avoid triggering warning for missing config files
     datadir.ensure("pg_hba.conf")
     datadir.ensure("pg_ident.conf")
     # Build an executor
     server = testing_helpers.build_real_server(
         global_conf={"barman_lock_directory": tmpdir.mkdir("lock").strpath},
         main_conf={"wals_directory": wals.strpath},
     )
     executor = RecoveryExecutor(server.backup_manager)
     # test local recovery
     with closing(executor):
         rec_info = executor.recover(backup_info, dest.strpath, exclusive=True)
     # remove not useful keys from the result
     del rec_info["cmd"]
     sys_tempdir = rec_info["tempdir"]
     assert rec_info == {
         "rsync": None,
         "tempdir": sys_tempdir,
         "wal_dest": dest.join("pg_xlog").strpath,
         "recovery_dest": "local",
         "destination_path": dest.strpath,
         "temporary_configuration_files": [
             dest.join("postgresql.conf").strpath,
             dest.join("postgresql.auto.conf").strpath,
         ],
         "results": {
             "delete_barman_wal": False,
             "recovery_start_time": rec_info["results"]["recovery_start_time"],
             "get_wal": False,
             "changes": [
                 Assertion._make(["postgresql.conf", 0, "archive_command", "false"]),
                 Assertion._make(
                     ["postgresql.auto.conf", 0, "archive_command", "false"]
                 ),
             ],
             "missing_files": [],
             "recovery_configuration_file": "recovery.conf",
             "warnings": [
                 Assertion._make(
                     ["postgresql.conf", 2, "data_directory", "something"]
                 ),
                 Assertion._make(
                     ["postgresql.auto.conf", 2, "data_directory", "something"]
                 ),
             ],
         },
         "target_epoch": None,
         "configuration_files": ["postgresql.conf", "postgresql.auto.conf"],
         "target_datetime": None,
         "safe_horizon": None,
         "is_pitr": False,
         "get_wal": False,
     }
     # test remote recovery
     with closing(executor):
         rec_info = executor.recover(
             backup_info,
             dest.strpath,
             remote_command="remote@command",
             exclusive=True,
         )
     # remove not useful keys from the result
     del rec_info["cmd"]
     del rec_info["rsync"]
     sys_tempdir = rec_info["tempdir"]
     assert rec_info == {
         "tempdir": sys_tempdir,
         "wal_dest": dest.join("pg_xlog").strpath,
         "recovery_dest": "remote",
         "destination_path": dest.strpath,
         "temporary_configuration_files": [
             os.path.join(sys_tempdir, "postgresql.conf"),
             os.path.join(sys_tempdir, "postgresql.auto.conf"),
         ],
         "results": {
             "delete_barman_wal": False,
             "get_wal": False,
             "recovery_start_time": rec_info["results"]["recovery_start_time"],
             "changes": [
                 Assertion._make(["postgresql.conf", 0, "archive_command", "false"]),
                 Assertion._make(
                     ["postgresql.auto.conf", 0, "archive_command", "false"]
                 ),
             ],
             "missing_files": [],
             "recovery_configuration_file": "recovery.conf",
             "warnings": [
                 Assertion._make(
                     ["postgresql.conf", 2, "data_directory", "something"]
                 ),
                 Assertion._make(
                     ["postgresql.auto.conf", 2, "data_directory", "something"]
                 ),
             ],
         },
         "target_epoch": None,
         "configuration_files": ["postgresql.conf", "postgresql.auto.conf"],
         "target_datetime": None,
         "safe_horizon": None,
         "is_pitr": False,
         "get_wal": False,
     }
     # test failed rsync
     rsync_pg_mock.side_effect = CommandFailedException()
     with pytest.raises(CommandFailedException):
         with closing(executor):
             executor.recover(
                 backup_info,
                 dest.strpath,
                 exclusive=True,
                 remote_command="remote@command",
             )
Beispiel #58
0
    def test_delete_backup(self, mock_available_backups, tmpdir, caplog):
        """
        Simple test for the deletion of a backup.
        We want to test the behaviour of the delete_backup method
        """
        # Setup of the test backup_manager
        backup_manager = build_backup_manager()
        backup_manager.server.config.name = 'TestServer'
        backup_manager.server.config.barman_lock_directory = tmpdir.strpath
        backup_manager.server.config.backup_options = []

        # Create a fake backup directory inside tmpdir (old format)

        base_dir = tmpdir.mkdir('base')
        backup_dir = base_dir.mkdir('fake_backup_id')
        pg_data = backup_dir.mkdir('pgdata')
        pg_data_v2 = backup_dir.mkdir('data')
        wal_dir = tmpdir.mkdir('wals')
        wal_history_file02 = wal_dir.join('00000002.history')
        wal_history_file03 = wal_dir.join('00000003.history')
        wal_history_file04 = wal_dir.join('00000004.history')
        wal_history_file02.write('1\t0/2000028\tat restore point "myrp"\n')
        wal_history_file03.write('1\t0/2000028\tat restore point "myrp"\n')
        wal_history_file04.write('1\t0/2000028\tat restore point "myrp"\n')
        wal_history_file04.write('2\t0/3000028\tunknown\n')
        wal_file = wal_dir.join('0000000100000000/000000010000000000000001')
        wal_file.ensure()
        xlog_db = wal_dir.join('xlog.db')
        xlog_db.write(
            '000000010000000000000001\t42\t43\tNone\n'
            '00000002.history\t42\t43\tNone\n'
            '00000003.history\t42\t43\tNone\n'
            '00000004.history\t42\t43\tNone\n')
        backup_manager.server.xlogdb.return_value.__enter__.return_value = (
            xlog_db.open())
        backup_manager.server.config.basebackups_directory = base_dir.strpath
        backup_manager.server.config.wals_directory = wal_dir.strpath
        # The following tablespaces are defined in the default backup info
        # generated by build_test_backup_info
        b_info = build_test_backup_info(
            backup_id='fake_backup_id',
            server=backup_manager.server,
        )

        # Make sure we are not trying to delete any WAL file,
        # just by having a previous backup
        b_pre_info = build_test_backup_info(
            backup_id='fake_backup',
            server=backup_manager.server,
        )
        mock_available_backups.return_value = {
            "fake_backup": b_pre_info,
            "fake_backup_id": b_info,
        }

        # Test 1: minimum redundancy not satisfied
        caplog_reset(caplog)
        backup_manager.server.config.minimum_redundancy = 2
        b_info.set_attribute('backup_version', 1)
        build_backup_directories(b_info)
        backup_manager.delete_backup(b_info)
        assert 'WARNING  Skipping delete of backup ' in caplog.text
        assert 'ERROR' not in caplog.text
        assert os.path.exists(pg_data.strpath)
        assert not os.path.exists(pg_data_v2.strpath)
        assert os.path.exists(wal_file.strpath)
        assert os.path.exists(wal_history_file02.strpath)
        assert os.path.exists(wal_history_file03.strpath)
        assert os.path.exists(wal_history_file04.strpath)

        # Test 2: normal delete expecting no errors (old format)
        caplog_reset(caplog)
        backup_manager.server.config.minimum_redundancy = 1
        b_info.set_attribute('backup_version', 1)
        build_backup_directories(b_info)
        backup_manager.delete_backup(b_info)
        # the backup must not exists on disk anymore
        assert 'WARNING' not in caplog.text
        assert 'ERROR' not in caplog.text
        assert not os.path.exists(pg_data.strpath)
        assert not os.path.exists(pg_data_v2.strpath)
        assert os.path.exists(wal_file.strpath)
        assert os.path.exists(wal_history_file02.strpath)
        assert os.path.exists(wal_history_file03.strpath)
        assert os.path.exists(wal_history_file04.strpath)

        # Test 3: delete the backup again, expect a failure in log
        caplog_reset(caplog)
        backup_manager.delete_backup(b_info)
        assert 'ERROR    Failure deleting backup fake_backup_id' in caplog.text
        assert not os.path.exists(pg_data.strpath)
        assert not os.path.exists(pg_data_v2.strpath)
        assert os.path.exists(wal_file.strpath)
        assert os.path.exists(wal_history_file02.strpath)
        assert os.path.exists(wal_history_file03.strpath)
        assert os.path.exists(wal_history_file04.strpath)

        # Test 4: normal delete expecting no errors (new format)
        caplog_reset(caplog)
        b_info.set_attribute('backup_version', 2)
        build_backup_directories(b_info)
        backup_manager.delete_backup(b_info)
        assert 'WARNING' not in caplog.text
        assert 'ERROR' not in caplog.text
        assert not os.path.exists(pg_data.strpath)
        assert not os.path.exists(pg_data_v2.strpath)
        assert os.path.exists(wal_file.strpath)
        assert os.path.exists(wal_history_file02.strpath)
        assert os.path.exists(wal_history_file03.strpath)
        assert os.path.exists(wal_history_file04.strpath)

        # Test 5: normal delete of first backup no errors and no skip
        # removing one of the two backups present (new format)
        # and all the previous wal
        caplog_reset(caplog)
        b_pre_info.set_attribute('backup_version', 2)
        build_backup_directories(b_pre_info)
        backup_manager.delete_backup(b_pre_info)
        assert 'WARNING' not in caplog.text
        assert 'ERROR' not in caplog.text
        assert not os.path.exists(pg_data.strpath)
        assert not os.path.exists(pg_data_v2.strpath)
        assert not os.path.exists(wal_file.strpath)
        assert os.path.exists(wal_history_file02.strpath)
        assert os.path.exists(wal_history_file03.strpath)
        assert os.path.exists(wal_history_file04.strpath)

        # Test 6: normal delete of first backup no errors and no skip
        # removing one of the two backups present (new format)
        # the previous wal is retained as on a different timeline
        caplog_reset(caplog)
        wal_file.ensure()
        b_pre_info.set_attribute('timeline', 2)
        b_pre_info.set_attribute('backup_version', 2)
        build_backup_directories(b_pre_info)
        backup_manager.delete_backup(b_pre_info)
        assert 'WARNING' not in caplog.text
        assert 'ERROR' not in caplog.text
        assert not os.path.exists(pg_data.strpath)
        assert not os.path.exists(pg_data_v2.strpath)
        assert os.path.exists(wal_file.strpath)
        assert os.path.exists(wal_history_file02.strpath)
        assert os.path.exists(wal_history_file03.strpath)
        assert os.path.exists(wal_history_file04.strpath)

        # Test 7: simulate an error deleting the the backup.
        with patch('barman.backup.BackupManager.delete_backup_data')\
                as mock_delete_data:
            caplog_reset(caplog)
            # We force delete_pgdata method to raise an exception.
            mock_delete_data.side_effect = OSError('TestError')
            wal_file.ensure()
            b_pre_info.set_attribute('backup_version', 2)
            build_backup_directories(b_pre_info)
            backup_manager.delete_backup(b_info)
            assert 'TestError' in caplog.text
            assert os.path.exists(wal_file.strpath)
            assert os.path.exists(wal_history_file02.strpath)
            assert os.path.exists(wal_history_file03.strpath)
            assert os.path.exists(wal_history_file04.strpath)
    def test_statistics(self, signal_mock, tempfile_mock, copy_mock,
                        create_and_purge_mock, analyse_mock, rsync_mock,
                        tmpdir, workers):
        """
        Unit test for RsyncCopyController.statistics's code
        """

        # Do a fake copy run to populate the start/stop timestamps.
        # The steps are the same of the full run test
        tempdir = tmpdir.mkdir('tmp')
        tempfile_mock.return_value = tempdir.strpath
        server = build_real_server(
            global_conf={'barman_home': tmpdir.mkdir('home').strpath})
        config = server.config
        executor = server.backup_manager.executor

        rcc = RsyncCopyController(
            path=server.path,
            ssh_command=executor.ssh_command,
            ssh_options=executor.ssh_options,
            network_compression=config.network_compression,
            reuse_backup=None,
            safe_horizon=None,
            workers=workers)

        backup_info = build_test_backup_info(
            server=server,
            pgdata="/pg/data",
            config_file="/etc/postgresql.conf",
            hba_file="/pg/data/pg_hba.conf",
            ident_file="/pg/data/pg_ident.conf",
            begin_xlog="0/2000028",
            begin_wal="000000010000000000000002",
            begin_offset=28)
        backup_info.save()
        # This is to check that all the preparation is done correctly
        assert os.path.exists(backup_info.filename)

        # Silence the access to result properties
        rsync_mock.return_value.out = ''
        rsync_mock.return_value.err = ''
        rsync_mock.return_value.ret = 0

        # Mock analyze directory
        def analyse_func(item):
            l = item.label
            item.dir_file = l + '_dir_file'
            item.exclude_and_protect_file = l + '_exclude_and_protect_file'
            item.safe_list = [_FileItem('mode', 1, 'date', 'path')]
            item.check_list = [_FileItem('mode', 1, 'date', 'path')]

        analyse_mock.side_effect = analyse_func

        rcc.add_directory(label='tbs1',
                          src=':/fake/location/',
                          dst=backup_info.get_data_directory(16387),
                          reuse=None,
                          bwlimit=None,
                          item_class=rcc.TABLESPACE_CLASS)
        rcc.add_directory(label='tbs2',
                          src=':/another/location/',
                          dst=backup_info.get_data_directory(16405),
                          reuse=None,
                          bwlimit=None,
                          item_class=rcc.TABLESPACE_CLASS)
        rcc.add_directory(
            label='pgdata',
            src=':/pg/data/',
            dst=backup_info.get_data_directory(),
            reuse=None,
            bwlimit=None,
            item_class=rcc.PGDATA_CLASS,
            exclude=[
                '/pg_xlog/*', '/pg_log/*', '/recovery.conf', '/postmaster.pid'
            ],
            exclude_and_protect=['pg_tblspc/16387', 'pg_tblspc/16405'])
        rcc.add_file(label='pg_control',
                     src=':/pg/data/global/pg_control',
                     dst='%s/global/pg_control' %
                     backup_info.get_data_directory(),
                     item_class=rcc.PGCONTROL_CLASS)
        rcc.add_file(label='config_file',
                     src=':/etc/postgresql.conf',
                     dst=backup_info.get_data_directory(),
                     item_class=rcc.CONFIG_CLASS,
                     optional=False)
        # Do the fake run
        rcc.copy()

        # Calculate statistics
        result = rcc.statistics()

        # We cannot check the actual result because it is not predictable,
        # so we check that every value is present and is a number and it is
        # greather than 0
        assert result.get('analysis_time') > 0
        assert 'analysis_time_per_item' in result
        for tbs in ('pgdata', 'tbs1', 'tbs2'):
            assert result['analysis_time_per_item'][tbs] > 0

        assert result.get('copy_time') > 0
        assert 'copy_time_per_item' in result
        assert 'serialized_copy_time_per_item' in result
        for tbs in ('pgdata', 'tbs1', 'tbs2', 'config_file', 'pg_control'):
            assert result['copy_time_per_item'][tbs] > 0
            assert result['serialized_copy_time_per_item'][tbs] > 0

        assert result.get('number_of_workers') == rcc.workers
        assert result.get('total_time') > 0