Ejemplo n.º 1
0
    def test_to_json(self, tmpdir):
        server = build_mocked_server(
            main_conf={'basebackups_directory': tmpdir.strpath}, )

        # Build a fake backup
        backup_dir = tmpdir.mkdir('fake_backup_id')
        info_file = backup_dir.join('backup.info')
        info_file.write(BASE_BACKUP_INFO)
        b_info = LocalBackupInfo(server, backup_id="fake_backup_id")

        # This call should not raise
        assert json.dumps(b_info.to_json())
Ejemplo n.º 2
0
    def test_from_json(self, tmpdir):
        server = build_mocked_server(
            main_conf={'basebackups_directory': tmpdir.strpath}, )

        # Build a fake backup
        backup_dir = tmpdir.mkdir('fake_backup_id')
        info_file = backup_dir.join('backup.info')
        info_file.write(BASE_BACKUP_INFO)
        b_info = LocalBackupInfo(server, backup_id="fake_backup_id")

        # Build another BackupInfo from the json dump
        new_binfo = LocalBackupInfo.from_json(server, b_info.to_json())

        assert b_info.to_dict() == new_binfo.to_dict()
Ejemplo n.º 3
0
 def test_backup_info_from_backup_id(self, tmpdir):
     """
     Test the initialization of a BackupInfo object
     using a backup_id as argument
     """
     # We want to test the loading system using a backup_id.
     # So we create a backup.info file into the tmpdir then
     # we instruct the configuration on the position of the
     # testing backup.info file
     server = build_mocked_server(
         main_conf={
             'basebackups_directory': tmpdir.strpath
         },
     )
     infofile = tmpdir.mkdir('fake_name').join('backup.info')
     infofile.write(BASE_BACKUP_INFO)
     # Load the backup.info file using the backup_id
     b_info = LocalBackupInfo(server, backup_id="fake_name")
     assert b_info
     assert b_info.begin_offset == 40
     assert b_info.begin_wal == '000000010000000000000004'
     assert b_info.timeline == 1
     assert isinstance(b_info.tablespaces, list)
     assert b_info.tablespaces[0].name == 'fake_tbs'
     assert b_info.tablespaces[0].oid == 16384
     assert b_info.tablespaces[0].location == '/fake_tmp/tbs'
Ejemplo n.º 4
0
    def get_previous_backup(self,
                            backup_id,
                            status_filter=DEFAULT_STATUS_FILTER):
        """
        Get the previous backup (if any) in the catalog

        :param status_filter: default DEFAULT_STATUS_FILTER. The status of
            the backup returned
        """
        if not isinstance(status_filter, tuple):
            status_filter = tuple(status_filter)
        backup = LocalBackupInfo(self.server, backup_id=backup_id)
        available_backups = self.get_available_backups(status_filter +
                                                       (backup.status, ))
        ids = sorted(available_backups.keys())
        try:
            current = ids.index(backup_id)
            while current > 0:
                res = available_backups[ids[current - 1]]
                if res.status in status_filter:
                    return res
                current -= 1
            return None
        except ValueError:
            raise UnknownBackupIdException('Could not find backup_id %s' %
                                           backup_id)
Ejemplo n.º 5
0
    def validate_last_backup_maximum_age(self, last_backup_maximum_age):
        """
        Evaluate the age of the last available backup in a catalogue.
        If the last backup is older than the specified time interval (age),
        the function returns False. If within the requested age interval,
        the function returns True.

        :param timedate.timedelta last_backup_maximum_age: time interval
            representing the maximum allowed age for the last backup
            in a server catalogue
        :return tuple: a tuple containing the boolean result of the check and
            auxiliary information about the last backup current age
        """
        # Get the ID of the last available backup
        backup_id = self.get_last_backup_id()
        if backup_id:
            # Get the backup object
            backup = LocalBackupInfo(self.server, backup_id=backup_id)
            now = datetime.datetime.now(dateutil.tz.tzlocal())
            # Evaluate the point of validity
            validity_time = now - last_backup_maximum_age
            # Pretty print of a time interval (age)
            msg = human_readable_timedelta(now - backup.end_time)
            # If the backup end time is older than the point of validity,
            # return False, otherwise return true
            if backup.end_time < validity_time:
                return False, msg
            else:
                return True, msg
        else:
            # If no backup is available return false
            return False, "No available backups"
Ejemplo n.º 6
0
 def test_backup_info_save(self, tmpdir):
     """
     Test the save method of a BackupInfo object
     """
     # Check the saving method.
     # Load a backup.info file, modify the BackupInfo object
     # then save it.
     server = build_mocked_server(
         main_conf={'basebackups_directory': tmpdir.strpath}, )
     backup_dir = tmpdir.mkdir('fake_name')
     infofile = backup_dir.join('backup.info')
     b_info = LocalBackupInfo(server, backup_id="fake_name")
     b_info.status = BackupInfo.FAILED
     b_info.save()
     # read the file looking for the modified line
     for line in infofile.readlines():
         if line.startswith("status"):
             assert line.strip() == "status=FAILED"
Ejemplo n.º 7
0
 def _load_backup_cache(self):
     """
     Populate the cache of the available backups, reading information
     from disk.
     """
     self._backup_cache = {}
     # Load all the backups from disk reading the backup.info files
     for filename in glob("%s/*/backup.info" %
                          self.config.basebackups_directory):
         backup = LocalBackupInfo(self.server, filename)
         self._backup_cache[backup.backup_id] = backup
Ejemplo n.º 8
0
    def test_backup_info_version(self, tmpdir):
        """
        Simple test for backup_version management.
        """
        server = build_mocked_server(
            main_conf={'basebackups_directory': tmpdir.strpath}, )

        # new version
        backup_dir = tmpdir.mkdir('fake_backup_id')
        backup_dir.mkdir('data')
        backup_dir.join('backup.info')
        b_info = LocalBackupInfo(server, backup_id="fake_backup_id")
        assert b_info.backup_version == 2

        # old version
        backup_dir = tmpdir.mkdir('another_fake_backup_id')
        backup_dir.mkdir('pgdata')
        backup_dir.join('backup.info')
        b_info = LocalBackupInfo(server, backup_id="another_fake_backup_id")
        assert b_info.backup_version == 1
Ejemplo n.º 9
0
    def test_pgespresso_start_backup(self):
        """
        Test concurrent backup using pgespresso
        """
        # Test: start concurrent backup
        # Build a backup_manager using a mocked server
        server = build_mocked_server(main_conf={
            'backup_options':
            BackupOptions.CONCURRENT_BACKUP
        })
        backup_manager = build_backup_manager(server=server)
        # Mock server.get_pg_setting('data_directory') call
        backup_manager.server.postgres.get_setting.return_value = '/pg/data'
        # Mock server.get_pg_configuration_files() call
        server.postgres.get_configuration_files.return_value = dict(
            config_file="/etc/postgresql.conf",
            hba_file="/pg/pg_hba.conf",
            ident_file="/pg/pg_ident.conf",
        )
        # Mock server.get_pg_tablespaces() call
        tablespaces = [Tablespace._make(('test_tbs', 1234, '/tbs/test'))]
        server.postgres.get_tablespaces.return_value = tablespaces
        server.postgres.server_version = 90500

        # Mock executor._pgespresso_start_backup(label) call
        start_time = datetime.datetime.now(tz.tzlocal()).replace(microsecond=0)
        server.postgres.pgespresso_start_backup.return_value = {
            'backup_label':
                "START WAL LOCATION: 266/4A9C1EF8 "
                "(file 00000010000002660000004A)\n"
                "START TIME: %s" % start_time.strftime('%Y-%m-%d %H:%M:%S %Z'),
        }
        # Build a test empty backup info
        backup_info = LocalBackupInfo(server=backup_manager.server,
                                      backup_id='fake_id2')

        backup_manager.executor.strategy.start_backup(backup_info)

        # Check that all the values are correctly saved inside the BackupInfo
        assert backup_info.pgdata == '/pg/data'
        assert backup_info.config_file == "/etc/postgresql.conf"
        assert backup_info.hba_file == "/pg/pg_hba.conf"
        assert backup_info.ident_file == "/pg/pg_ident.conf"
        assert backup_info.tablespaces == tablespaces
        assert backup_info.status == 'STARTED'
        assert backup_info.timeline == 16
        assert backup_info.begin_xlog == '266/4A9C1EF8'
        assert backup_info.begin_wal == '00000010000002660000004A'
        assert backup_info.begin_offset == 10231544
        assert backup_info.begin_time == start_time
        # Check that the correct call to pg_start_backup has been made
        server.postgres.pgespresso_start_backup.assert_called_with(
            'Barman backup main fake_id2')
Ejemplo n.º 10
0
    def test_exclusive_start_backup(self):
        """
        Basic test for the exclusive start_backup method
        """
        # Build a backup_manager using a mocked server
        server = build_mocked_server(main_conf={
            'backup_options':
            BackupOptions.EXCLUSIVE_BACKUP
        })
        backup_manager = build_backup_manager(server=server)

        # Mock server.get_pg_setting('data_directory') call
        backup_manager.server.postgres.get_setting.return_value = '/pg/data'
        # Mock server.get_pg_configuration_files() call
        server.postgres.get_configuration_files.return_value = dict(
            config_file="/etc/postgresql.conf",
            hba_file="/pg/pg_hba.conf",
            ident_file="/pg/pg_ident.conf",
        )
        # Mock server.get_pg_tablespaces() call
        tablespaces = [Tablespace._make(('test_tbs', 1234, '/tbs/test'))]
        server.postgres.get_tablespaces.return_value = tablespaces

        # Test 1: start exclusive backup
        # Mock server.start_exclusive_backup(label) call
        start_time = datetime.datetime.now()
        server.postgres.start_exclusive_backup.return_value = {
            'location': "A257/44B4C0D8",
            'file_name': "000000060000A25700000044",
            'file_offset': 11845848,
            'timestamp': start_time}

        # Build a test empty backup info
        backup_info = LocalBackupInfo(server=backup_manager.server,
                                      backup_id='fake_id')

        backup_manager.executor.strategy.start_backup(backup_info)

        # Check that all the values are correctly saved inside the BackupInfo
        assert backup_info.pgdata == '/pg/data'
        assert backup_info.config_file == "/etc/postgresql.conf"
        assert backup_info.hba_file == "/pg/pg_hba.conf"
        assert backup_info.ident_file == "/pg/pg_ident.conf"
        assert backup_info.tablespaces == tablespaces
        assert backup_info.status == 'STARTED'
        assert backup_info.timeline == 6
        assert backup_info.begin_xlog == 'A257/44B4C0D8'
        assert backup_info.begin_wal == '000000060000A25700000044'
        assert backup_info.begin_offset == 11845848
        assert backup_info.begin_time == start_time
        # Check that the correct call to start_exclusive_backup has been made
        server.postgres.start_exclusive_backup.assert_called_with(
            'Barman backup main fake_id')
Ejemplo n.º 11
0
    def test_concurrent_start_backup(self):
        """
        Test concurrent backup using 9.6 api
        """
        # Test: start concurrent backup
        # Build a backup_manager using a mocked server
        server = build_mocked_server(main_conf={
            'backup_options':
            BackupOptions.CONCURRENT_BACKUP
        })
        backup_manager = build_backup_manager(server=server)
        # Mock server.get_pg_setting('data_directory') call
        backup_manager.server.postgres.get_setting.return_value = '/pg/data'
        # Mock server.get_pg_configuration_files() call
        server.postgres.get_configuration_files.return_value = dict(
            config_file="/etc/postgresql.conf",
            hba_file="/pg/pg_hba.conf",
            ident_file="/pg/pg_ident.conf",
        )
        # Mock server.get_pg_tablespaces() call
        tablespaces = [Tablespace._make(('test_tbs', 1234, '/tbs/test'))]
        server.postgres.get_tablespaces.return_value = tablespaces
        # this is a postgres 9.6
        server.postgres.server_version = 90600

        # Mock call to new api method
        start_time = datetime.datetime.now()
        server.postgres.start_concurrent_backup.return_value = {
            'location': "A257/44B4C0D8",
            'timeline': 6,
            'timestamp': start_time,
        }
        # Build a test empty backup info
        backup_info = LocalBackupInfo(server=backup_manager.server,
                                      backup_id='fake_id2')

        backup_manager.executor.strategy.start_backup(backup_info)

        # Check that all the values are correctly saved inside the BackupInfo
        assert backup_info.pgdata == '/pg/data'
        assert backup_info.config_file == "/etc/postgresql.conf"
        assert backup_info.hba_file == "/pg/pg_hba.conf"
        assert backup_info.ident_file == "/pg/pg_ident.conf"
        assert backup_info.tablespaces == tablespaces
        assert backup_info.status == 'STARTED'
        assert backup_info.timeline == 6
        assert backup_info.begin_xlog == 'A257/44B4C0D8'
        assert backup_info.begin_wal == '000000060000A25700000044'
        assert backup_info.begin_offset == 11845848
        assert backup_info.begin_time == start_time
Ejemplo n.º 12
0
    def test_postgres_start_backup(self):
        """
        Test concurrent backup using pg_basebackup
        """
        # Test: start concurrent backup
        backup_manager = build_backup_manager(global_conf={
            'backup_method': 'postgres'
        })
        # Mock server.get_pg_setting('data_directory') call
        postgres_mock = backup_manager.server.postgres
        postgres_mock.get_setting.side_effect = [
            '/test/fake_data_dir',
        ]
        # Mock server.get_pg_configuration_files() call
        postgres_mock.get_configuration_files.return_value = dict(
            config_file="/etc/postgresql.conf",
            hba_file="/pg/pg_hba.conf",
            ident_file="/pg/pg_ident.conf",
        )
        # Mock server.get_pg_tablespaces() call
        tablespaces = [Tablespace._make(('test_tbs', 1234, '/tbs/test'))]
        postgres_mock.get_tablespaces.return_value = tablespaces
        # this is a postgres 9.5
        postgres_mock.server_version = 90500

        # Mock call to new api method
        start_time = datetime.datetime.now()
        postgres_mock.current_xlog_info = {
            'location': "A257/44B4C0D8",
            'timestamp': start_time,
        }
        # Build a test empty backup info
        backup_info = LocalBackupInfo(server=backup_manager.server,
                                      backup_id='fake_id2')

        backup_manager.executor.strategy.start_backup(backup_info)

        # Check that all the values are correctly saved inside the BackupInfo
        assert backup_info.pgdata == '/test/fake_data_dir'
        assert backup_info.config_file == "/etc/postgresql.conf"
        assert backup_info.hba_file == "/pg/pg_hba.conf"
        assert backup_info.ident_file == "/pg/pg_ident.conf"
        assert backup_info.tablespaces == tablespaces
        assert backup_info.status == 'STARTED'
        assert backup_info.timeline is None
        assert backup_info.begin_xlog == 'A257/44B4C0D8'
        assert backup_info.begin_wal is None
        assert backup_info.begin_offset is None
        assert backup_info.begin_time == start_time
Ejemplo n.º 13
0
 def test_pg_version(self, tmpdir):
     """
     Test handling of postgres version in BackupInfo object
     """
     infofile = tmpdir.join("backup.info")
     infofile.write(BASE_BACKUP_INFO)
     server = build_mocked_server()
     b_info = LocalBackupInfo(server, info_file=infofile.strpath)
     # BASE_BACKUP_INFO has version 90400 so expect 9.4
     assert b_info.pg_major_version() == "9.4"
     assert b_info.wal_directory() == "pg_xlog"
     # Set backup_info.version to 100600 so expect 10
     b_info.version = 100600
     assert b_info.pg_major_version() == "10"
     assert b_info.wal_directory() == "pg_wal"
Ejemplo n.º 14
0
    def test_backupinfo_load(self, connect_mock, tmpdir):
        server = build_real_server(
            main_conf={"basebackups_directory": tmpdir.strpath}, )

        # Build a fake backup info and try to load id, to ensure that we won't
        # need a PostgreSQL connection to do that
        backup_dir = tmpdir.mkdir("fake_backup_id")
        info_file = backup_dir.join("backup.info")
        info_file.write(BASE_BACKUP_INFO)

        # Monkey patch the PostgreSQL connection function to raise a
        # RuntimeError
        connect_mock.side_effect = RuntimeError

        # The following constructor will raise a RuntimeError if we are
        # needing a PostgreSQL connection
        LocalBackupInfo(server, backup_id="fake_backup_id")
Ejemplo n.º 15
0
 def test_backup_info_from_empty_file(self, tmpdir):
     """
     Test the initialization of a BackupInfo object
     loading data from a backup.info file
     """
     # we want to test the loading of BackupInfo data from local file.
     # So we create a file into the tmpdir containing a
     # valid BackupInfo dump
     infofile = tmpdir.join("backup.info")
     infofile.write('')
     # Mock the server, we don't need it at the moment
     server = build_mocked_server(name='test_server')
     server.backup_manager.mode = 'test-mode'
     # load the data from the backup.info file
     b_info = LocalBackupInfo(server, info_file=infofile.strpath)
     assert b_info
     assert b_info.server_name == 'test_server'
     assert b_info.mode == 'test-mode'
Ejemplo n.º 16
0
    def test_stop_backup_sets_backup_info(
        self,
        _mock_backup_info_from_stop_location,
        _mock_backup_info_from_backup_label,
        _mock_read_backup_label,
        compression,
        format,
        should_set_backup_info,
    ):
        """
        Verifies that the compression is set appropriately in backup_info when
        stopping the backup for the given compression and format.
        """
        # GIVEN a server comfigured for pg_basebackup compression
        server = build_mocked_server(
            global_conf={
                "backup_method": "postgres",
                "backup_compression": compression,
                "backup_compression_format": format,
            })
        # AND a PgBaseBackupCompression for the configured compression
        if compression is not None:
            backup_compression = GZipPgBaseBackupCompression(server.config)
        else:
            backup_compression = None
        # AND a BackupInfo representing an ongoing backup
        backup_info = LocalBackupInfo(server=server, backup_id="fake_id")
        # AND a PostgresBackupStrategy with the configured compression
        mock_postgres = mock.Mock()
        strategy = PostgresBackupStrategy(mock_postgres, "test-server",
                                          backup_compression)

        # WHEN stop_backup is called with the BackupInfo
        strategy.stop_backup(backup_info)

        # THEN the compression field of the BackupInfo is set to the
        # expected compression *if* the compression/format combination
        # is gzip/tar
        if should_set_backup_info:
            assert backup_info.compression == compression
        # OR compression/format is any other combination, compression
        # field should not be set
        else:
            assert backup_info.compression is None
Ejemplo n.º 17
0
    def test_xlog_segment_size(self, tmpdir):
        """
        Test the `xlog_segment_size` field of BackupInfo
        """

        # Create an empty backup info file, to test the
        # default value of xlog_segment_size. It's relevent
        # also for retrocompatibility with backup info which
        # doesn't contain the xlog_segment_size field.

        infofile = tmpdir.join("backup.info")
        infofile.write('')

        # Mock the server, we don't need it at the moment
        server = build_mocked_server(name='test_server')
        server.backup_manager.mode = 'test-mode'

        # load the data from the backup.info file
        b_info = LocalBackupInfo(server, info_file=infofile.strpath)
        assert b_info.xlog_segment_size == 1 << 24
Ejemplo n.º 18
0
 def test_backup_info_from_file(self, tmpdir):
     """
     Test the initialization of a BackupInfo object
     loading data from a backup.info file
     """
     # we want to test the loading of BackupInfo data from local file.
     # So we create a file into the tmpdir containing a
     # valid BackupInfo dump
     infofile = tmpdir.join("backup.info")
     infofile.write(BASE_BACKUP_INFO)
     # Mock the server, we don't need it at the moment
     server = build_mocked_server()
     # load the data from the backup.info file
     b_info = LocalBackupInfo(server, info_file=infofile.strpath)
     assert b_info
     assert b_info.begin_offset == 40
     assert b_info.begin_wal == '000000010000000000000004'
     assert b_info.timeline == 1
     assert isinstance(b_info.tablespaces, list)
     assert b_info.tablespaces[0].name == 'fake_tbs'
     assert b_info.tablespaces[0].oid == 16384
     assert b_info.tablespaces[0].location == '/fake_tmp/tbs'
Ejemplo n.º 19
0
    def test_passive_node_cron(self, subprocess_mock, command_mock,
                               monkeypatch, tmpdir, capsys):
        """
        check the passive node version of cron command

        :param MagicMock subprocess_mock: Mock of
            barman.command_wrappers.BarmanSubProcess
        :param MagicMock command_mock: Mock of
            barman.command_wrappers.Command
        :param monkeypatch monkeypatch: pytest patcher
        :param py.local.path tmpdir: pytest temporary directory
        :param capsys: fixture for reading sysout
        """
        # We need to setup a server object
        barman_home = tmpdir.mkdir("barman_home")
        backup_dir = barman_home.mkdir("main")
        wals_dir = backup_dir.mkdir("wals")
        # Build the configuration for the server using
        # a fake configuration object filled with test values
        config = build_config_from_dicts(
            global_conf=dict(barman_home=str(barman_home)),
            main_conf=dict(compression=None,
                           wals_directory=str(wals_dir),
                           primary_ssh_command='ssh fakeuser@fakehost'))
        server = barman.server.Server(config.get_server('main'))
        # Make the configuration available through the global namespace
        # (required to invoke a subprocess to retrieve the config file name)
        monkeypatch.setattr(barman, '__config__', config)
        # We need to build a test response from the remote server.
        # We use the out property of the command_mock for
        # returning the test response
        command_mock.return_value.out = json.dumps(EXPECTED_MINIMAL)
        server.cron()
        (out, err) = capsys.readouterr()
        # Assertion block 1: the execution of the cron command for passive
        # node should be successful
        assert "Starting copy of backup" in out
        assert "Started copy of WAL files for server" in out

        # Modify the response of the fake remote call
        primary_info = dict(EXPECTED_MINIMAL)
        primary_info['backups'] = []
        primary_info['wals'] = []
        command_mock.return_value.out = json.dumps(primary_info)
        server.cron()
        (out, err) = capsys.readouterr()
        # Assertion block 2: No backup or wal synchronisation required
        assert "No backup synchronisation required" in out
        assert "No WAL synchronisation required for server" in out

        # Add a backup to the remote response
        primary_info = dict(EXPECTED_MINIMAL)
        backup_info_dict = LocalBackupInfo(server,
                                           backup_id='1234567891').to_json()
        primary_info['backups']['1234567891'] = backup_info_dict
        command_mock.return_value.out = json.dumps(primary_info)
        server.cron()
        (out, err) = capsys.readouterr()
        # Assertion block 3: start the copy the first backup
        # of the list (1234567890),
        # and not the one second one (1234567891)
        assert "Starting copy of backup 1234567890" in out
        assert "Started copy of WAL files for server main" in out
        assert "1234567891" not in out

        # Patch on the fly the Lockfile object, testing the locking
        # management of the method.
        with mock.patch.multiple('barman.server',
                                 ServerBackupSyncLock=mock.DEFAULT,
                                 ServerWalSyncLock=mock.DEFAULT) as lock_mocks:
            for item in lock_mocks:
                lock_mocks[item].side_effect = LockFileBusy()
            primary_info = dict(EXPECTED_MINIMAL)
            primary_info['backups']['1234567891'] = backup_info_dict
            command_mock.return_value.out = json.dumps(primary_info)
            server.sync_cron(keep_descriptors=False)
            (out, err) = capsys.readouterr()
            assert "A synchronisation process for backup 1234567890" in out
            assert "WAL synchronisation already running" in out
Ejemplo n.º 20
0
def build_test_backup_info(
        backup_id='1234567890',
        begin_offset=40,
        begin_time=None,
        begin_wal='000000010000000000000002',
        begin_xlog='0/2000028',
        config_file='/pgdata/location/postgresql.conf',
        end_offset=184,
        end_time=None,
        end_wal='000000010000000000000002',
        end_xlog='0/20000B8',
        error=None,
        hba_file='/pgdata/location/pg_hba.conf',
        ident_file='/pgdata/location/pg_ident.conf',
        mode='default',
        pgdata='/pgdata/location',
        server_name='test_server',
        size=12345,
        status=BackupInfo.DONE,
        included_files=None,
        tablespaces=(
            ('tbs1', 16387, '/fake/location'),
            ('tbs2', 16405, '/another/location'),
        ),
        timeline=1,
        version=90302,
        server=None,
        copy_stats=None):
    """
    Create an 'Ad Hoc' BackupInfo object for testing purposes.

    A BackupInfo object is the barman representation of a physical backup,
    for testing purposes is necessary to build a BackupInfo avoiding the usage
    of Mock/MagicMock classes as much as possible.

    :param str backup_id: the id of the backup
    :param int begin_offset: begin_offset of the backup
    :param datetime.datetime|None begin_time: begin_time of the backup
    :param str begin_wal: begin_wal of the backup
    :param str begin_xlog: begin_xlog of the backup
    :param str config_file: config file of the backup
    :param int end_offset: end_offset of the backup
    :param datetime.datetime|None end_time: end_time of the backup
    :param str end_wal: begin_xlog of the backup
    :param str end_xlog: end_xlog of the backup
    :param str|None error: error message for the backup
    :param str hba_file: hba_file for the backup
    :param str ident_file: ident_file for the backup
    :param str mode: mode of execution of the backup
    :param str pgdata: pg_data dir of the backup
    :param str server_name: server name for the backup
    :param int size: dimension of the backup
    :param str status: status of the execution of the backup
    :param list|None included_files: a list of extra configuration files
    :param list|tuple|None tablespaces: a list of tablespaces for the backup
    :param int timeline: timeline of the backup
    :param int version: postgres version of the backup
    :param barman.server.Server|None server: Server object for the backup
    :param dict|None: Copy stats dictionary
    :rtype: barman.infofile.LocalBackupInfo
    """
    if begin_time is None:
        begin_time = datetime.now(tz.tzlocal()) - timedelta(minutes=10)
    if end_time is None:
        end_time = datetime.now(tz.tzlocal())

    # Generate a list of tablespace objects (don't use a list comprehension
    # or in python 2.x the 'item' variable will leak to the main context)
    if tablespaces is not None:
        tablespaces = list(Tablespace._make(item) for item in tablespaces)

    # Manage the server for the Backup info: if no server is provided
    # by the caller use a Mock with a basic configuration
    if server is None:
        server = mock.Mock(name=server_name)
        server.config = build_config_from_dicts().get_server('main')
        server.passive_node = False
        server.backup_manager.name = 'default'

    backup_info = LocalBackupInfo(**locals())
    return backup_info
Ejemplo n.º 21
0
    def backup(self, wait=False, wait_timeout=None):
        """
        Performs a backup for the server

        :param bool wait: wait for all the required WAL files to be archived
        :param int|None wait_timeout:
        :return BackupInfo: the generated BackupInfo
        """
        _logger.debug("initialising backup information")
        self.executor.init()
        backup_info = None
        try:
            # Create the BackupInfo object representing the backup
            backup_info = LocalBackupInfo(
                self.server,
                backup_id=datetime.datetime.now().strftime('%Y%m%dT%H%M%S'))
            backup_info.set_attribute('systemid', self.server.systemid)
            backup_info.save()
            self.backup_cache_add(backup_info)
            output.info("Starting backup using %s method for server %s in %s",
                        self.mode, self.config.name,
                        backup_info.get_basebackup_directory())

            # Run the pre-backup-script if present.
            script = HookScriptRunner(self, 'backup_script', 'pre')
            script.env_from_backup_info(backup_info)
            script.run()

            # Run the pre-backup-retry-script if present.
            retry_script = RetryHookScriptRunner(self, 'backup_retry_script',
                                                 'pre')
            retry_script.env_from_backup_info(backup_info)
            retry_script.run()

            # Do the backup using the BackupExecutor
            self.executor.backup(backup_info)

            # Create a restore point after a backup
            target_name = 'barman_%s' % backup_info.backup_id
            self.server.postgres.create_restore_point(target_name)

            # Free the Postgres connection
            self.server.postgres.close()

            # Compute backup size and fsync it on disk
            self.backup_fsync_and_set_sizes(backup_info)

            # Mark the backup as WAITING_FOR_WALS
            backup_info.set_attribute("status", BackupInfo.WAITING_FOR_WALS)
        # Use BaseException instead of Exception to catch events like
        # KeyboardInterrupt (e.g.: CTRL-C)
        except BaseException as e:
            msg_lines = force_str(e).strip().splitlines()
            # If the exception has no attached message use the raw
            # type name
            if len(msg_lines) == 0:
                msg_lines = [type(e).__name__]
            if backup_info:
                # Use only the first line of exception message
                # in backup_info error field
                backup_info.set_attribute("status", BackupInfo.FAILED)
                backup_info.set_attribute(
                    "error", "failure %s (%s)" %
                    (self.executor.current_action, msg_lines[0]))

            output.error("Backup failed %s.\nDETAILS: %s",
                         self.executor.current_action, '\n'.join(msg_lines))

        else:
            output.info("Backup end at LSN: %s (%s, %08X)",
                        backup_info.end_xlog, backup_info.end_wal,
                        backup_info.end_offset)

            executor = self.executor
            output.info(
                "Backup completed (start time: %s, elapsed time: %s)",
                self.executor.copy_start_time,
                human_readable_timedelta(datetime.datetime.now() -
                                         executor.copy_start_time))

            # If requested, wait for end_wal to be archived
            if wait:
                try:
                    self.server.wait_for_wal(backup_info.end_wal, wait_timeout)
                    self.check_backup(backup_info)
                except KeyboardInterrupt:
                    # Ignore CTRL-C pressed while waiting for WAL files
                    output.info(
                        "Got CTRL-C. Continuing without waiting for '%s' "
                        "to be archived", backup_info.end_wal)

        finally:
            if backup_info:
                backup_info.save()

                # Make sure we are not holding any PostgreSQL connection
                # during the post-backup scripts
                self.server.close()

                # Run the post-backup-retry-script if present.
                try:
                    retry_script = RetryHookScriptRunner(
                        self, 'backup_retry_script', 'post')
                    retry_script.env_from_backup_info(backup_info)
                    retry_script.run()
                except AbortedRetryHookScript as e:
                    # Ignore the ABORT_STOP as it is a post-hook operation
                    _logger.warning(
                        "Ignoring stop request after receiving "
                        "abort (exit code %d) from post-backup "
                        "retry hook script: %s", e.hook.exit_status,
                        e.hook.script)

                # Run the post-backup-script if present.
                script = HookScriptRunner(self, 'backup_script', 'post')
                script.env_from_backup_info(backup_info)
                script.run()

        output.result('backup', backup_info)
        return backup_info
Ejemplo n.º 22
0
    def test_data_dir(self, tmpdir):
        """
        Simple test for the method that is responsible of the build of the
        path to the datadir and to the tablespaces dir according
        with backup_version
        """
        server = build_mocked_server(
            main_conf={'basebackups_directory': tmpdir.strpath}, )

        # Build a fake v2 backup
        backup_dir = tmpdir.mkdir('fake_backup_id')
        data_dir = backup_dir.mkdir('data')
        info_file = backup_dir.join('backup.info')
        info_file.write(BASE_BACKUP_INFO)
        b_info = LocalBackupInfo(server, backup_id="fake_backup_id")

        # Check that the paths are built according with version
        assert b_info.backup_version == 2
        assert b_info.get_data_directory() == data_dir.strpath
        assert b_info.get_data_directory(16384) == (backup_dir.strpath +
                                                    '/16384')

        # Build a fake v1 backup
        backup_dir = tmpdir.mkdir('another_fake_backup_id')
        pgdata_dir = backup_dir.mkdir('pgdata')
        info_file = backup_dir.join('backup.info')
        info_file.write(BASE_BACKUP_INFO)
        b_info = LocalBackupInfo(server, backup_id="another_fake_backup_id")

        # Check that the paths are built according with version
        assert b_info.backup_version == 1
        assert b_info.get_data_directory(16384) == \
            backup_dir.strpath + '/pgdata/pg_tblspc/16384'
        assert b_info.get_data_directory() == pgdata_dir.strpath

        # Check that an exception is raised if an invalid oid
        # is provided to the method
        with pytest.raises(ValueError):
            b_info.get_data_directory(12345)

        # Check that a ValueError exception is raised with an
        # invalid oid when the tablespaces list is None
        b_info.tablespaces = None
        # and expect a value error
        with pytest.raises(ValueError):
            b_info.get_data_directory(16384)
Ejemplo n.º 23
0
    def test_backup(self, rwbb_mock, gpb_mock, backup_copy_mock,
                    capsys, tmpdir):
        """
        Test the execution of a backup

        :param rwbb_mock: mock for the remove_wal_before_backup method
        :param gpb_mock: mock for the get_previous_backup method
        :param backup_copy_mock: mock for the executor's backup_copy method
        :param capsys: stdout capture module
        :param tmpdir: pytest temp directory
        """
        backup_manager = build_backup_manager(global_conf={
            'barman_home': tmpdir.mkdir('home').strpath,
            # Silence the warning for default backup strategy
            'backup_options': 'exclusive_backup',
        })
        backup_info = LocalBackupInfo(backup_manager.server,
                                      backup_id='fake_backup_id')
        backup_info.begin_xlog = "0/2000028"
        backup_info.begin_wal = "000000010000000000000002"
        backup_info.begin_offset = 40
        backup_info.status = BackupInfo.EMPTY
        backup_info.copy_stats = dict(copy_time=100)

        gpb_mock.return_value = None

        rwbb_mock.return_value = ['000000010000000000000001']

        # Test 1: exclusive backup
        backup_manager.executor.strategy = Mock()
        backup_manager.executor.backup(backup_info)
        out, err = capsys.readouterr()
        assert err == ''
        assert (
            "Backup start at LSN: "
            "0/2000028 (000000010000000000000002, 00000028)\n"
            "This is the first backup for server main\n"
            "WAL segments preceding the current backup have been found:\n"
            "\t000000010000000000000001 from server main has been removed\n"
            "Starting backup copy via rsync/SSH for fake_backup_id\n"
            "Copy done (time: 1 minute, 40 seconds)") in out

        gpb_mock.assert_called_with(backup_info.backup_id)
        rwbb_mock.assert_called_with(backup_info)
        backup_manager.executor.strategy.start_backup.assert_called_once_with(
            backup_info)
        backup_copy_mock.assert_called_once_with(backup_info)
        backup_manager.executor.strategy.stop_backup.assert_called_once_with(
            backup_info)

        # Test 2: concurrent backup
        # change the configuration to concurrent backup
        backup_manager.executor.config.backup_options = [
            BackupOptions.CONCURRENT_BACKUP]

        # reset mocks
        gpb_mock.reset_mock()
        rwbb_mock.reset_mock()
        backup_manager.executor.strategy.reset_mock()
        backup_copy_mock.reset_mock()

        # prepare data directory for backup_label generation
        backup_info.backup_label = 'test\nlabel\n'

        backup_manager.executor.backup(backup_info)
        out, err = capsys.readouterr()
        assert err == ''
        assert (
            "Backup start at LSN: "
            "0/2000028 (000000010000000000000002, 00000028)\n"
            "This is the first backup for server main\n"
            "WAL segments preceding the current backup have been found:\n"
            "\t000000010000000000000001 from server main has been removed\n"
            "Starting backup copy via rsync/SSH for fake_backup_id\n"
            "Copy done (time: 1 minute, 40 seconds)") in out

        gpb_mock.assert_called_with(backup_info.backup_id)
        rwbb_mock.assert_called_with(backup_info)
        backup_manager.executor.strategy.start_backup.assert_called_once_with(
            backup_info)
        backup_copy_mock.assert_called_once_with(backup_info)
        backup_manager.executor.strategy.start_backup.assert_called_once_with(
            backup_info)