Beispiel #1
0
 def backup(self):
     """
     Upload a Backup  to S3
     """
     backup_info = BackupInfo(
         backup_id=datetime.datetime.now().strftime('%Y%m%dT%H%M%S'))
     key_prefix = os.path.join(self.cloud_interface.path, self.server_name,
                               'base', backup_info.backup_id)
     controller = S3UploadController(self.cloud_interface, key_prefix,
                                     self.compression)
     strategy = ConcurrentBackupStrategy(self.postgres)
     logging.info("Starting backup %s", backup_info.backup_id)
     strategy.start_backup(backup_info)
     try:
         self.backup_copy(controller, backup_info)
         logging.info("Stopping backup %s", backup_info.backup_id)
         strategy.stop_backup(backup_info)
         pgdata_stat = os.stat(backup_info.pgdata)
         controller.add_fileobj(
             label='backup_label',
             fileobj=BytesIO(backup_info.backup_label.encode('UTF-8')),
             dst='data',
             path='backup_label',
             uid=pgdata_stat.st_uid,
             gid=pgdata_stat.st_gid,
         )
     finally:
         with BytesIO() as backup_info_file:
             backup_info.save(file_object=backup_info_file)
             backup_info_file.seek(0, os.SEEK_SET)
             controller.upload_fileobj(label='backup_info',
                                       fileobj=backup_info_file,
                                       dst='backup.info')
         controller.close()
Beispiel #2
0
    def backup(self):
        """
        Performs a backup for the server
        """
        _logger.debug("initialising backup information")
        self.executor.init()
        backup_info = None
        try:
            # Create the BackupInfo object representing the backup
            backup_info = BackupInfo(
                self.server,
                backup_id=datetime.datetime.now().strftime('%Y%m%dT%H%M%S'))
            backup_info.save()
            self.backup_cache_add(backup_info)
            output.info(
                "Starting backup for server %s in %s",
                self.config.name,
                backup_info.get_basebackup_directory())

            # Run the pre-backup-script if present.
            script = HookScriptRunner(self, 'backup_script', 'pre')
            script.env_from_backup_info(backup_info)
            script.run()

            # Run the pre-backup-retry-script if present.
            retry_script = RetryHookScriptRunner(
                self, 'backup_retry_script', 'pre')
            retry_script.env_from_backup_info(backup_info)
            retry_script.run()

            # Do the backup using the BackupExecutor
            self.executor.backup(backup_info)

            # Compute backup size and fsync it on disk
            self.backup_fsync_and_set_sizes(backup_info)

            # Mark the backup as DONE
            backup_info.set_attribute("status", "DONE")
        # Use BaseException instead of Exception to catch events like
        # KeyboardInterrupt (e.g.: CRTL-C)
        except BaseException, e:
            msg_lines = str(e).strip().splitlines()
            if backup_info:
                # Use only the first line of exception message
                # in backup_info error field
                backup_info.set_attribute("status", "FAILED")
                # If the exception has no attached message use the raw type name
                if len(msg_lines) == 0:
                    msg_lines = [type(e).__name__]
                backup_info.set_attribute(
                    "error",
                    "failure %s (%s)" % (
                        self.executor.current_action, msg_lines[0]))

            output.error("Backup failed %s.\nDETAILS: %s\n%s",
                         self.executor.current_action, msg_lines[0],
                         '\n'.join(msg_lines[1:]))
Beispiel #3
0
    def test_to_json(self, tmpdir):
        server = build_mocked_server(main_conf={"basebackups_directory": tmpdir.strpath})

        # Build a fake backup
        backup_dir = tmpdir.mkdir("fake_backup_id")
        info_file = backup_dir.join("backup.info")
        info_file.write(BASE_BACKUP_INFO)
        b_info = BackupInfo(server, backup_id="fake_backup_id")

        # This call should not raise
        assert json.dumps(b_info.to_json())
    def test_to_json(self, tmpdir):
        server = build_mocked_server(
            main_conf={'basebackups_directory': tmpdir.strpath}, )

        # Build a fake backup
        backup_dir = tmpdir.mkdir('fake_backup_id')
        info_file = backup_dir.join('backup.info')
        info_file.write(BASE_BACKUP_INFO)
        b_info = BackupInfo(server, backup_id="fake_backup_id")

        # This call should not raise
        assert json.dumps(b_info.to_json())
Beispiel #5
0
    def test_from_json(self, tmpdir):
        server = build_mocked_server(main_conf={"basebackups_directory": tmpdir.strpath})

        # Build a fake backup
        backup_dir = tmpdir.mkdir("fake_backup_id")
        info_file = backup_dir.join("backup.info")
        info_file.write(BASE_BACKUP_INFO)
        b_info = BackupInfo(server, backup_id="fake_backup_id")

        # Build another BackupInfo from the json dump
        new_binfo = BackupInfo.from_json(server, b_info.to_json())

        assert b_info.to_dict() == new_binfo.to_dict()
Beispiel #6
0
    def get_backup_info(self, backup_id):
        """
        Load a BackupInfo from S3

        :param str backup_id: The backup id to load
        :rtype: BackupInfo
        """
        backup_info_path = os.path.join(self.prefix, backup_id, 'backup.info')
        backup_info_file = self.cloud_interface.remote_open(backup_info_path)
        if backup_info_file is None:
            return None
        backup_info = BackupInfo(backup_id)
        backup_info.load(file_object=backup_info_file)
        return backup_info
    def test_from_json(self, tmpdir):
        server = build_mocked_server(
            main_conf={'basebackups_directory': tmpdir.strpath}, )

        # Build a fake backup
        backup_dir = tmpdir.mkdir('fake_backup_id')
        info_file = backup_dir.join('backup.info')
        info_file.write(BASE_BACKUP_INFO)
        b_info = BackupInfo(server, backup_id="fake_backup_id")

        # Build another BackupInfo from the json dump
        new_binfo = BackupInfo.from_json(server, b_info.to_json())

        assert b_info.to_dict() == new_binfo.to_dict()
Beispiel #8
0
    def get_restore_path(self, backup_id, status_filter=DEFAULT_STATUS_FILTER):
        """
        Get the list of backup ids required to restore backup_id

        :param status_filter: default DEFAULT_STATUS_FILTER. The status of
            the backups returned
        """
        if not isinstance(status_filter, tuple):
            status_filter = tuple(status_filter)
        backup = BackupInfo(self.server, backup_id=backup_id)
        if backup.incr_lsn is None:
            return [backup_id]
        else:
            available_backups = self.get_available_backups(status_filter +
                                                           (backup.status, ))
            ids = list(reversed(sorted(available_backups.keys())))
            ret = []
            expected_lsn = backup.incr_lsn
            try:
                current = ids.index(backup_id)
                while current < len(ids) - 1:
                    res = available_backups[ids[current + 1]]
                    if res.status in status_filter:
                        if res.begin_xlog == expected_lsn:
                            ret.append(res.backup_id)
                            if res.incr_lsn is None:
                                return list(reversed(ret)) + [backup_id]
                            else:
                                expected_lsn = res.incr_lsn
                    current += 1
                return []
            except ValueError:
                raise RuntimeError('Could not build path for %s' % backup_id)
Beispiel #9
0
    def get_previous_backup(self, backup_id,
                            status_filter=DEFAULT_STATUS_FILTER):
        """
        Get the previous backup (if any) in the catalog

        :param status_filter: default DEFAULT_STATUS_FILTER. The status of
            the backup returned
        """
        if not isinstance(status_filter, tuple):
            status_filter = tuple(status_filter)
        backup = BackupInfo(self.server, backup_id=backup_id)
        available_backups = self.get_available_backups(
            status_filter + (backup.status,))
        ids = sorted(available_backups.keys())
        try:
            current = ids.index(backup_id)
            while current > 0:
                res = available_backups[ids[current - 1]]
                if res.status in status_filter:
                    return res
                current -= 1
            return None
        except ValueError:
            raise UnknownBackupIdException('Could not find backup_id %s' %
                                           backup_id)
Beispiel #10
0
    def validate_last_backup_maximum_age(self, last_backup_maximum_age):
        """
        Evaluate the age of the last available backup in a catalogue.
        If the last backup is older than the specified time interval (age),
        the function returns False. If within the requested age interval,
        the function returns True.

        :param timedate.timedelta last_backup_maximum_age: time interval
            representing the maximum allowed age for the last backup
            in a server catalogue
        :return tuple: a tuple containing the boolean result of the check and
            auxiliary information about the last backup current age
        """
        # Get the ID of the last available backup
        backup_id = self.get_last_backup_id()
        if backup_id:
            # Get the backup object
            backup = BackupInfo(self.server, backup_id=backup_id)
            now = datetime.datetime.now(dateutil.tz.tzlocal())
            # Evaluate the point of validity
            validity_time = now - last_backup_maximum_age
            # Pretty print of a time interval (age)
            msg = human_readable_timedelta(now - backup.end_time)
            # If the backup end time is older than the point of validity,
            # return False, otherwise return true
            if backup.end_time < validity_time:
                return False, msg
            else:
                return True, msg
        else:
            # If no backup is available return false
            return False, "No available backups"
Beispiel #11
0
    def get_increments(self, backup_id, status_filter=DEFAULT_STATUS_FILTER):
        """
        Get the list of backup ids that require this backup id for restore

        :param status_filter: default DEFAULT_STATUS_FILTER. The status of
            the backups returned
        """
        if not isinstance(status_filter, tuple):
            status_filter = tuple(status_filter)
        backup = BackupInfo(self.server, backup_id=backup_id)
        available_backups = self.get_available_backups(status_filter +
                                                       (backup.status, ))
        ids = list(sorted(available_backups.keys()))
        ret = []
        expected_lsns = [backup.begin_xlog]
        try:
            current = ids.index(backup_id)
            while current < len(ids) - 1:
                res = available_backups[ids[current + 1]]
                if res.status in status_filter:
                    if res.incr_lsn is not None and \
                            res.incr_lsn in expected_lsns:
                        ret.append(ids[current + 1])
                        expected_lsns.append(res.begin_xlog)
                current += 1
            return ret
        except ValueError:
            raise RuntimeError('Could not find increments for %s' % backup_id)
Beispiel #12
0
 def test_backup_info_save(self, tmpdir):
     """
     Test the save method of a BackupInfo object
     """
     # Check the saving method.
     # Load a backup.info file, modify the BackupInfo object
     # then save it.
     server = build_mocked_server(main_conf={"basebackups_directory": tmpdir.strpath})
     backup_dir = tmpdir.mkdir("fake_name")
     infofile = backup_dir.join("backup.info")
     b_info = BackupInfo(server, backup_id="fake_name")
     b_info.status = BackupInfo.FAILED
     b_info.save()
     # read the file looking for the modified line
     for line in infofile.readlines():
         if line.startswith("status"):
             assert line.strip() == "status=FAILED"
Beispiel #13
0
 def test_backup_info_save(self, tmpdir):
     """
     Test the save method of a BackupInfo object
     """
     # Check the saving method.
     # Load a backup.info file, modify the BackupInfo object
     # then save it.
     server = mock.MagicMock()
     server.config.basebackups_directory = tmpdir.strpath
     backup_dir = tmpdir.mkdir('fake_name')
     infofile = backup_dir.join('backup.info')
     b_info = BackupInfo(server, backup_id="fake_name")
     b_info.status = BackupInfo.FAILED
     b_info.save()
     # read the file looking for the modified line
     for line in infofile.readlines():
         if line.startswith("status"):
             assert line.strip() == "status=FAILED"
 def test_backup_info_save(self, tmpdir):
     """
     Test the save method of a BackupInfo object
     """
     # Check the saving method.
     # Load a backup.info file, modify the BackupInfo object
     # then save it.
     server = build_mocked_server(
         main_conf={'basebackups_directory': tmpdir.strpath}, )
     backup_dir = tmpdir.mkdir('fake_name')
     infofile = backup_dir.join('backup.info')
     b_info = BackupInfo(server, backup_id="fake_name")
     b_info.status = BackupInfo.FAILED
     b_info.save()
     # read the file looking for the modified line
     for line in infofile.readlines():
         if line.startswith("status"):
             assert line.strip() == "status=FAILED"
    def test_backup_info_version(self, tmpdir):
        """
        Simple test for backup_version management.
        """
        server = build_mocked_server(
            main_conf={'basebackups_directory': tmpdir.strpath}, )

        # new version
        backup_dir = tmpdir.mkdir('fake_backup_id')
        backup_dir.mkdir('data')
        backup_dir.join('backup.info')
        b_info = BackupInfo(server, backup_id="fake_backup_id")
        assert b_info.backup_version == 2

        # old version
        backup_dir = tmpdir.mkdir('another_fake_backup_id')
        backup_dir.mkdir('pgdata')
        backup_dir.join('backup.info')
        b_info = BackupInfo(server, backup_id="another_fake_backup_id")
        assert b_info.backup_version == 1
Beispiel #16
0
 def _load_backup_cache(self):
     """
     Populate the cache of the available backups, reading information
     from disk.
     """
     self._backup_cache = {}
     # Load all the backups from disk reading the backup.info files
     for filename in glob("%s/*/backup.info" %
                          self.config.basebackups_directory):
         backup = BackupInfo(self.server, filename)
         self._backup_cache[backup.backup_id] = backup
Beispiel #17
0
    def test_exclusive_start_backup(self):
        """
        Basic test for the exclusive start_backup method
        """
        # Build a backup_manager using a mocked server
        server = build_mocked_server(main_conf={
            'backup_options':
            BackupOptions.EXCLUSIVE_BACKUP
        })
        backup_manager = build_backup_manager(server=server)

        # Mock server.get_pg_setting('data_directory') call
        backup_manager.server.postgres.get_setting.return_value = '/pg/data'
        # Mock server.get_pg_configuration_files() call
        server.postgres.get_configuration_files.return_value = dict(
            config_file="/etc/postgresql.conf",
            hba_file="/pg/pg_hba.conf",
            ident_file="/pg/pg_ident.conf",
        )
        # Mock server.get_pg_tablespaces() call
        tablespaces = [Tablespace._make(('test_tbs', 1234, '/tbs/test'))]
        server.postgres.get_tablespaces.return_value = tablespaces

        # Test 1: start exclusive backup
        # Mock server.start_exclusive_backup(label) call
        start_time = datetime.datetime.now()
        server.postgres.start_exclusive_backup.return_value = {
            'location': "A257/44B4C0D8",
            'file_name': "000000060000A25700000044",
            'file_offset': 11845848,
            'timestamp': start_time}

        # Build a test empty backup info
        backup_info = BackupInfo(server=backup_manager.server,
                                 backup_id='fake_id')

        backup_manager.executor.strategy.start_backup(backup_info)

        # Check that all the values are correctly saved inside the BackupInfo
        assert backup_info.pgdata == '/pg/data'
        assert backup_info.config_file == "/etc/postgresql.conf"
        assert backup_info.hba_file == "/pg/pg_hba.conf"
        assert backup_info.ident_file == "/pg/pg_ident.conf"
        assert backup_info.tablespaces == tablespaces
        assert backup_info.status == 'STARTED'
        assert backup_info.timeline == 6
        assert backup_info.begin_xlog == 'A257/44B4C0D8'
        assert backup_info.begin_wal == '000000060000A25700000044'
        assert backup_info.begin_offset == 11845848
        assert backup_info.begin_time == start_time
        # Check that the correct call to start_exclusive_backup has been made
        server.postgres.start_exclusive_backup.assert_called_with(
            'Barman backup main fake_id')
Beispiel #18
0
    def test_pgespresso_start_backup(self):
        """
        Test concurrent backup using pgespresso
        """
        # Test: start concurrent backup
        # Build a backup_manager using a mocked server
        server = build_mocked_server(main_conf={
            'backup_options':
            BackupOptions.CONCURRENT_BACKUP
        })
        backup_manager = build_backup_manager(server=server)
        # Mock server.get_pg_setting('data_directory') call
        backup_manager.server.postgres.get_setting.return_value = '/pg/data'
        # Mock server.get_pg_configuration_files() call
        server.postgres.get_configuration_files.return_value = dict(
            config_file="/etc/postgresql.conf",
            hba_file="/pg/pg_hba.conf",
            ident_file="/pg/pg_ident.conf",
        )
        # Mock server.get_pg_tablespaces() call
        tablespaces = [Tablespace._make(('test_tbs', 1234, '/tbs/test'))]
        server.postgres.get_tablespaces.return_value = tablespaces
        server.postgres.server_version = 90500

        # Mock executor._pgespresso_start_backup(label) call
        start_time = datetime.datetime.now(tz.tzlocal()).replace(microsecond=0)
        server.postgres.pgespresso_start_backup.return_value = {
            'backup_label':
                "START WAL LOCATION: 266/4A9C1EF8 "
                "(file 00000010000002660000004A)\n"
                "START TIME: %s" % start_time.strftime('%Y-%m-%d %H:%M:%S %Z'),
        }
        # Build a test empty backup info
        backup_info = BackupInfo(server=backup_manager.server,
                                 backup_id='fake_id2')

        backup_manager.executor.strategy.start_backup(backup_info)

        # Check that all the values are correctly saved inside the BackupInfo
        assert backup_info.pgdata == '/pg/data'
        assert backup_info.config_file == "/etc/postgresql.conf"
        assert backup_info.hba_file == "/pg/pg_hba.conf"
        assert backup_info.ident_file == "/pg/pg_ident.conf"
        assert backup_info.tablespaces == tablespaces
        assert backup_info.status == 'STARTED'
        assert backup_info.timeline == 16
        assert backup_info.begin_xlog == '266/4A9C1EF8'
        assert backup_info.begin_wal == '00000010000002660000004A'
        assert backup_info.begin_offset == 10231544
        assert backup_info.begin_time == start_time
        # Check that the correct call to pg_start_backup has been made
        server.postgres.pgespresso_start_backup.assert_called_with(
            'Barman backup main fake_id2')
Beispiel #19
0
    def test_concurrent_start_backup(self):
        """
        Test concurrent backup using 9.6 api
        """
        # Test: start concurrent backup
        # Build a backup_manager using a mocked server
        server = build_mocked_server(main_conf={
            'backup_options':
            BackupOptions.CONCURRENT_BACKUP
        })
        backup_manager = build_backup_manager(server=server)
        # Mock server.get_pg_setting('data_directory') call
        backup_manager.server.postgres.get_setting.return_value = '/pg/data'
        # Mock server.get_pg_configuration_files() call
        server.postgres.get_configuration_files.return_value = dict(
            config_file="/etc/postgresql.conf",
            hba_file="/pg/pg_hba.conf",
            ident_file="/pg/pg_ident.conf",
        )
        # Mock server.get_pg_tablespaces() call
        tablespaces = [Tablespace._make(('test_tbs', 1234, '/tbs/test'))]
        server.postgres.get_tablespaces.return_value = tablespaces
        # this is a postgres 9.6
        server.postgres.server_version = 90600

        # Mock call to new api method
        start_time = datetime.datetime.now()
        server.postgres.start_concurrent_backup.return_value = {
            'location': "A257/44B4C0D8",
            'timeline': 6,
            'timestamp': start_time,
        }
        # Build a test empty backup info
        backup_info = BackupInfo(server=backup_manager.server,
                                 backup_id='fake_id2')

        backup_manager.executor.strategy.start_backup(backup_info)

        # Check that all the values are correctly saved inside the BackupInfo
        assert backup_info.pgdata == '/pg/data'
        assert backup_info.config_file == "/etc/postgresql.conf"
        assert backup_info.hba_file == "/pg/pg_hba.conf"
        assert backup_info.ident_file == "/pg/pg_ident.conf"
        assert backup_info.tablespaces == tablespaces
        assert backup_info.status == 'STARTED'
        assert backup_info.timeline == 6
        assert backup_info.begin_xlog == 'A257/44B4C0D8'
        assert backup_info.begin_wal == '000000060000A25700000044'
        assert backup_info.begin_offset == 11845848
        assert backup_info.begin_time == start_time
Beispiel #20
0
    def test_postgres_start_backup(self):
        """
        Test concurrent backup using pg_basebackup
        """
        # Test: start concurrent backup
        backup_manager = build_backup_manager(global_conf={
            'backup_method': 'postgres'
        })
        # Mock server.get_pg_setting('data_directory') call
        postgres_mock = backup_manager.server.postgres
        postgres_mock.get_setting.side_effect = [
            '/test/fake_data_dir',
        ]
        # Mock server.get_pg_configuration_files() call
        postgres_mock.get_configuration_files.return_value = dict(
            config_file="/etc/postgresql.conf",
            hba_file="/pg/pg_hba.conf",
            ident_file="/pg/pg_ident.conf",
        )
        # Mock server.get_pg_tablespaces() call
        tablespaces = [Tablespace._make(('test_tbs', 1234, '/tbs/test'))]
        postgres_mock.get_tablespaces.return_value = tablespaces
        # this is a postgres 9.5
        postgres_mock.server_version = 90500

        # Mock call to new api method
        start_time = datetime.datetime.now()
        postgres_mock.current_xlog_info = {
            'location': "A257/44B4C0D8",
            'timestamp': start_time,
        }
        # Build a test empty backup info
        backup_info = BackupInfo(server=backup_manager.server,
                                 backup_id='fake_id2')

        backup_manager.executor.strategy.start_backup(backup_info)

        # Check that all the values are correctly saved inside the BackupInfo
        assert backup_info.pgdata == '/test/fake_data_dir'
        assert backup_info.config_file == "/etc/postgresql.conf"
        assert backup_info.hba_file == "/pg/pg_hba.conf"
        assert backup_info.ident_file == "/pg/pg_ident.conf"
        assert backup_info.tablespaces == tablespaces
        assert backup_info.status == 'STARTED'
        assert backup_info.timeline is None
        assert backup_info.begin_xlog == 'A257/44B4C0D8'
        assert backup_info.begin_wal is None
        assert backup_info.begin_offset is None
        assert backup_info.begin_time == start_time
Beispiel #21
0
    def _retrieve_safe_horizon(self, recovery_info, backup_info, dest):
        """
        Retrieve the safe_horizon for smart copy

        If the target directory contains a previous recovery, it is safe to
        pick the least of the two backup "begin times" (the one we are
        recovering now and the one previously recovered in the target
        directory). Set the value in the given recovery_info dictionary.

        :param dict recovery_info: Dictionary containing all the recovery
            parameters
        :param barman.infofile.BackupInfo backup_info: a backup representation
        :param str dest: recovery destination directory
        """
        # noinspection PyBroadException
        try:
            backup_begin_time = backup_info.begin_time
            # Retrieve previously recovered backup metadata (if available)
            dest_info_txt = recovery_info['cmd'].get_file_content(
                os.path.join(dest, '.barman-recover.info'))
            dest_info = BackupInfo(self.server,
                                   info_file=StringIO(dest_info_txt))
            dest_begin_time = dest_info.begin_time
            # Pick the earlier begin time. Both are tz-aware timestamps because
            # BackupInfo class ensure it
            safe_horizon = min(backup_begin_time, dest_begin_time)
            output.info("Using safe horizon time for smart rsync copy: %s",
                        safe_horizon)
        except FsOperationFailed as e:
            # Setting safe_horizon to None will effectively disable
            # the time-based part of smart_copy method. However it is still
            # faster than running all the transfers with checksum enabled.
            #
            # FsOperationFailed means the .barman-recover.info is not available
            # on destination directory
            safe_horizon = None
            _logger.warning(
                'Unable to retrieve safe horizon time '
                'for smart rsync copy: %s', e)
        except Exception as e:
            # Same as above, but something failed decoding .barman-recover.info
            # or comparing times, so log the full traceback
            safe_horizon = None
            _logger.exception(
                'Error retrieving safe horizon time '
                'for smart rsync copy: %s', e)

        recovery_info['safe_horizon'] = safe_horizon
 def test_backup_info_from_empty_file(self, tmpdir):
     """
     Test the initialization of a BackupInfo object
     loading data from a backup.info file
     """
     # we want to test the loading of BackupInfo data from local file.
     # So we create a file into the tmpdir containing a
     # valid BackupInfo dump
     infofile = tmpdir.join("backup.info")
     infofile.write('')
     # Mock the server, we don't need it at the moment
     server = build_mocked_server(name='test_server')
     server.backup_manager.mode = 'test-mode'
     # load the data from the backup.info file
     b_info = BackupInfo(server, info_file=infofile.strpath)
     assert b_info
     assert b_info.server_name == 'test_server'
     assert b_info.mode == 'test-mode'
    def test_xlog_segment_size(self, tmpdir):
        """
        Test the `xlog_segment_size` field of BackupInfo
        """

        # Create an empty backup info file, to test the
        # default value of xlog_segment_size. It's relevent
        # also for retrocompatibility with backup info which
        # doesn't contain the xlog_segment_size field.

        infofile = tmpdir.join("backup.info")
        infofile.write('')

        # Mock the server, we don't need it at the moment
        server = build_mocked_server(name='test_server')
        server.backup_manager.mode = 'test-mode'

        # load the data from the backup.info file
        b_info = BackupInfo(server, info_file=infofile.strpath)
        assert b_info.xlog_segment_size == 1 << 24
 def test_backup_info_from_file(self, tmpdir):
     """
     Test the initialization of a BackupInfo object
     loading data from a backup.info file
     """
     # we want to test the loading of BackupInfo data from local file.
     # So we create a file into the tmpdir containing a
     # valid BackupInfo dump
     infofile = tmpdir.join("backup.info")
     infofile.write(BASE_BACKUP_INFO)
     # Mock the server, we don't need it at the moment
     server = build_mocked_server()
     # load the data from the backup.info file
     b_info = BackupInfo(server, info_file=infofile.strpath)
     assert b_info
     assert b_info.begin_offset == 40
     assert b_info.begin_wal == '000000010000000000000004'
     assert b_info.timeline == 1
     assert isinstance(b_info.tablespaces, list)
     assert b_info.tablespaces[0].name == 'fake_tbs'
     assert b_info.tablespaces[0].oid == 16384
     assert b_info.tablespaces[0].location == '/fake_tmp/tbs'
 def test_backup_info_from_backup_id(self, tmpdir):
     """
     Test the initialization of a BackupInfo object
     using a backup_id as argument
     """
     # We want to test the loading system using a backup_id.
     # So we create a backup.info file into the tmpdir then
     # we instruct the configuration on the position of the
     # testing backup.info file
     server = build_mocked_server(
         main_conf={'basebackups_directory': tmpdir.strpath}, )
     infofile = tmpdir.mkdir('fake_name').join('backup.info')
     infofile.write(BASE_BACKUP_INFO)
     # Load the backup.info file using the backup_id
     b_info = BackupInfo(server, backup_id="fake_name")
     assert b_info
     assert b_info.begin_offset == 40
     assert b_info.begin_wal == '000000010000000000000004'
     assert b_info.timeline == 1
     assert isinstance(b_info.tablespaces, list)
     assert b_info.tablespaces[0].name == 'fake_tbs'
     assert b_info.tablespaces[0].oid == 16384
     assert b_info.tablespaces[0].location == '/fake_tmp/tbs'
Beispiel #26
0
    def backup(self):
        """
        Performs a backup for the server

        :return BackupInfo: the generated BackupInfo
        """
        _logger.debug("initialising backup information")
        self.executor.init()
        backup_info = None
        try:
            # Create the BackupInfo object representing the backup
            backup_info = BackupInfo(
                self.server,
                backup_id=datetime.datetime.now().strftime('%Y%m%dT%H%M%S'))
            backup_info.save()
            self.backup_cache_add(backup_info)
            output.info(
                "Starting backup using %s method for server %s in %s",
                self.mode,
                self.config.name,
                backup_info.get_basebackup_directory())

            # Run the pre-backup-script if present.
            script = HookScriptRunner(self, 'backup_script', 'pre')
            script.env_from_backup_info(backup_info)
            script.run()

            # Run the pre-backup-retry-script if present.
            retry_script = RetryHookScriptRunner(
                self, 'backup_retry_script', 'pre')
            retry_script.env_from_backup_info(backup_info)
            retry_script.run()

            # Do the backup using the BackupExecutor
            self.executor.backup(backup_info)

            # Compute backup size and fsync it on disk
            self.backup_fsync_and_set_sizes(backup_info)

            # Mark the backup as WAITING_FOR_WALS
            backup_info.set_attribute("status", BackupInfo.WAITING_FOR_WALS)
        # Use BaseException instead of Exception to catch events like
        # KeyboardInterrupt (e.g.: CTRL-C)
        except BaseException as e:
            msg_lines = force_str(e).strip().splitlines()
            if backup_info:
                # Use only the first line of exception message
                # in backup_info error field
                backup_info.set_attribute("status", "FAILED")
                # If the exception has no attached message use the raw
                # type name
                if len(msg_lines) == 0:
                    msg_lines = [type(e).__name__]
                backup_info.set_attribute(
                    "error",
                    "failure %s (%s)" % (
                        self.executor.current_action, msg_lines[0]))

            output.error("Backup failed %s.\nDETAILS: %s\n%s",
                         self.executor.current_action, msg_lines[0],
                         '\n'.join(msg_lines[1:]))

        else:
            output.info("Backup end at LSN: %s (%s, %08X)",
                        backup_info.end_xlog,
                        backup_info.end_wal,
                        backup_info.end_offset)

            executor = self.executor
            output.info(
                "Backup completed (start time: %s, elapsed time: %s)",
                self.executor.copy_start_time,
                human_readable_timedelta(
                    datetime.datetime.now() - executor.copy_start_time))
            # Create a restore point after a backup
            target_name = 'barman_%s' % backup_info.backup_id
            self.server.postgres.create_restore_point(target_name)
        finally:
            if backup_info:
                backup_info.save()

                # Make sure we are not holding any PostgreSQL connection
                # during the post-backup scripts
                self.server.close()

                # Run the post-backup-retry-script if present.
                try:
                    retry_script = RetryHookScriptRunner(
                        self, 'backup_retry_script', 'post')
                    retry_script.env_from_backup_info(backup_info)
                    retry_script.run()
                except AbortedRetryHookScript as e:
                    # Ignore the ABORT_STOP as it is a post-hook operation
                    _logger.warning("Ignoring stop request after receiving "
                                    "abort (exit code %d) from post-backup "
                                    "retry hook script: %s",
                                    e.hook.exit_status, e.hook.script)

                # Run the post-backup-script if present.
                script = HookScriptRunner(self, 'backup_script', 'post')
                script.env_from_backup_info(backup_info)
                script.run()

        output.result('backup', backup_info)
        return backup_info
Beispiel #27
0
    def test_passive_node_cron(self, subprocess_mock, command_mock,
                               monkeypatch, tmpdir, capsys):
        """
        check the passive node version of cron command

        :param MagicMock subprocess_mock: Mock of
            barman.command_wrappers.BarmanSubProcess
        :param MagicMock command_mock: Mock of
            barman.command_wrappers.Command
        :param monkeypatch monkeypatch: pytest patcher
        :param py.local.path tmpdir: pytest temporary directory
        :param capsys: fixture for reading sysout
        """
        # We need to setup a server object
        barman_home = tmpdir.mkdir("barman_home")
        backup_dir = barman_home.mkdir("main")
        wals_dir = backup_dir.mkdir("wals")
        # Build the configuration for the server using
        # a fake configuration object filled with test values
        config = build_config_from_dicts(
            global_conf=dict(barman_home=str(barman_home)),
            main_conf=dict(compression=None,
                           wals_directory=str(wals_dir),
                           primary_ssh_command='ssh fakeuser@fakehost'))
        server = barman.server.Server(config.get_server('main'))
        # Make the configuration available through the global namespace
        # (required to invoke a subprocess to retrieve the config file name)
        monkeypatch.setattr(barman, '__config__', config)
        # We need to build a test response from the remote server.
        # We use the out property of the command_mock for
        # returning the test response
        command_mock.return_value.out = json.dumps(EXPECTED_MINIMAL)
        server.cron()
        (out, err) = capsys.readouterr()
        # Assertion block 1: the execution of the cron command for passive
        # node should be successful
        assert "Starting copy of backup" in out
        assert "Started copy of WAL files for server" in out

        # Modify the response of the fake remote call
        primary_info = dict(EXPECTED_MINIMAL)
        primary_info['backups'] = []
        primary_info['wals'] = []
        command_mock.return_value.out = json.dumps(primary_info)
        server.cron()
        (out, err) = capsys.readouterr()
        # Assertion block 2: No backup or wal synchronisation required
        assert "No backup synchronisation required" in out
        assert "No WAL synchronisation required for server" in out

        # Add a backup to the remote response
        primary_info = dict(EXPECTED_MINIMAL)
        backup_info_dict = BackupInfo(server, backup_id='1234567891').to_json()
        primary_info['backups']['1234567891'] = backup_info_dict
        command_mock.return_value.out = json.dumps(primary_info)
        server.cron()
        (out, err) = capsys.readouterr()
        # Assertion block 3: start the copy the first backup
        # of the list (1234567890),
        # and not the one second one (1234567891)
        assert "Starting copy of backup 1234567890" in out
        assert "Started copy of WAL files for server main" in out
        assert "1234567891" not in out

        # Patch on the fly the Lockfile object, testing the locking
        # management of the method.
        with mock.patch.multiple('barman.server',
                                 ServerBackupSyncLock=mock.DEFAULT,
                                 ServerWalSyncLock=mock.DEFAULT) as lock_mocks:
            for item in lock_mocks:
                lock_mocks[item].side_effect = LockFileBusy()
            primary_info = dict(EXPECTED_MINIMAL)
            primary_info['backups']['1234567891'] = backup_info_dict
            command_mock.return_value.out = json.dumps(primary_info)
            server.sync_cron()
            (out, err) = capsys.readouterr()
            assert "A synchronisation process for backup 1234567890" in out
            assert "WAL synchronisation already running" in out
Beispiel #28
0
    def recover(self,
                backup_info,
                dest,
                tablespaces=None,
                remote_command=None,
                target_tli=None,
                target_time=None,
                target_xid=None,
                target_name=None,
                target_immediate=False,
                exclusive=False,
                target_action=None,
                standby_mode=None):
        """
        Performs a recovery of a backup

        This method should be called in a closing context

        :param barman.infofile.BackupInfo backup_info: the backup to recover
        :param str dest: the destination directory
        :param dict[str,str]|None tablespaces: a tablespace
            name -> location map (for relocation)
        :param str|None remote_command: The remote command to recover
                               the base backup, in case of remote backup.
        :param str|None target_tli: the target timeline
        :param str|None target_time: the target time
        :param str|None target_xid: the target xid
        :param str|None target_name: the target name created previously with
                            pg_create_restore_point() function call
        :param str|None target_immediate: end recovery as soon as consistency
            is reached
        :param bool exclusive: whether the recovery is exclusive or not
        :param str|None target_action: The recovery target action
        :param bool|None standby_mode: standby mode
        """

        # Run the cron to be sure the wal catalog is up to date
        # Prepare a map that contains all the objects required for a recovery
        recovery_info = self._setup(backup_info, remote_command, dest)
        output.info("Starting %s restore for server %s using backup %s",
                    recovery_info['recovery_dest'], self.server.config.name,
                    backup_info.backup_id)
        output.info("Destination directory: %s", dest)
        if remote_command:
            output.info("Remote command: %s", remote_command)

        # If the backup we are recovering is still not validated and we
        # haven't requested the get-wal feature, display a warning message
        if not recovery_info['get_wal']:
            if backup_info.status == BackupInfo.WAITING_FOR_WALS:
                output.warning(
                    "IMPORTANT: You have requested a recovery operation for "
                    "a backup that does not have yet all the WAL files that "
                    "are required for consistency.")

        # Set targets for PITR
        self._set_pitr_targets(recovery_info, backup_info, dest, target_name,
                               target_time, target_tli, target_xid,
                               target_immediate, target_action)

        # Retrieve the safe_horizon for smart copy
        self._retrieve_safe_horizon(recovery_info, backup_info, dest)

        # check destination directory. If doesn't exist create it
        try:
            recovery_info['cmd'].create_dir_if_not_exists(dest)
        except FsOperationFailed as e:
            output.error(
                "unable to initialise destination directory "
                "'%s': %s", dest, e)
            output.close_and_exit()

        # Initialize tablespace directories
        if backup_info.tablespaces:
            self._prepare_tablespaces(backup_info, recovery_info['cmd'], dest,
                                      tablespaces)
        # Copy the base backup
        output.info("Copying the base backup.")
        try:
            self._backup_copy(backup_info, dest, tablespaces, remote_command,
                              recovery_info['safe_horizon'])
        except DataTransferFailure as e:
            output.error("Failure copying base backup: %s", e)
            output.close_and_exit()

        # Copy the backup.info file in the destination as
        # ".barman-recover.info"
        if remote_command:
            try:
                recovery_info['rsync'](backup_info.filename,
                                       ':%s/.barman-recover.info' % dest)
            except CommandFailedException as e:
                output.error('copy of recovery metadata file failed: %s', e)
                output.close_and_exit()
        else:
            backup_info.save(os.path.join(dest, '.barman-recover.info'))

        # Standby mode is not available for PostgreSQL older than 9.0
        if backup_info.version < 90000 and standby_mode:
            raise RecoveryStandbyModeException(
                'standby_mode is available only from PostgreSQL 9.0')

        # Restore the WAL segments. If GET_WAL option is set, skip this phase
        # as they will be retrieved using the wal-get command.
        if not recovery_info['get_wal']:
            # If the backup we restored is still waiting for WALS, read the
            # backup info again and check whether it has been validated.
            # Notify the user if it is still not DONE.
            if backup_info.status == BackupInfo.WAITING_FOR_WALS:
                data = BackupInfo(self.server, backup_info.filename)
                if data.status == BackupInfo.WAITING_FOR_WALS:
                    output.warning(
                        "IMPORTANT: The backup we have recovered IS NOT "
                        "VALID. Required WAL files for consistency are "
                        "missing. Please verify that WAL archiving is "
                        "working correctly or evaluate using the 'get-wal' "
                        "option for recovery")

            output.info("Copying required WAL segments.")

            try:
                # Retrieve a list of required log files
                required_xlog_files = tuple(
                    self.server.get_required_xlog_files(
                        backup_info, target_tli,
                        recovery_info['target_epoch']))

                # Restore WAL segments into the wal_dest directory
                self._xlog_copy(required_xlog_files, recovery_info['wal_dest'],
                                remote_command)
            except DataTransferFailure as e:
                output.error("Failure copying WAL files: %s", e)
                output.close_and_exit()
            except BadXlogSegmentName as e:
                output.error(
                    "invalid xlog segment name %r\n"
                    "HINT: Please run \"barman rebuild-xlogdb %s\" "
                    "to solve this issue", force_str(e), self.config.name)
                output.close_and_exit()
            # If WAL files are put directly in the pg_xlog directory,
            # avoid shipping of just recovered files
            # by creating the corresponding archive status file
            if not recovery_info['is_pitr']:
                output.info("Generating archive status files")
                self._generate_archive_status(recovery_info, remote_command,
                                              required_xlog_files)

        # Generate recovery.conf file (only if needed by PITR or get_wal)
        is_pitr = recovery_info['is_pitr']
        get_wal = recovery_info['get_wal']
        if is_pitr or get_wal or standby_mode:
            output.info("Generating recovery.conf")
            self._generate_recovery_conf(recovery_info, backup_info, dest,
                                         target_immediate, exclusive,
                                         remote_command, target_name,
                                         target_time, target_tli, target_xid,
                                         standby_mode)

        # Create archive_status directory if necessary
        archive_status_dir = os.path.join(recovery_info['wal_dest'],
                                          'archive_status')
        try:
            recovery_info['cmd'].create_dir_if_not_exists(archive_status_dir)
        except FsOperationFailed as e:
            output.error(
                "unable to create the archive_status directory "
                "'%s': %s", archive_status_dir, e)
            output.close_and_exit()

        # As last step, analyse configuration files in order to spot
        # harmful options. Barman performs automatic conversion of
        # some options as well as notifying users of their existence.
        #
        # This operation is performed in three steps:
        # 1) mapping
        # 2) analysis
        # 3) copy
        output.info("Identify dangerous settings in destination directory.")

        self._map_temporary_config_files(recovery_info, backup_info,
                                         remote_command)
        self._analyse_temporary_config_files(recovery_info)
        self._copy_temporary_config_files(dest, remote_command, recovery_info)

        return recovery_info
    def test_data_dir(self, tmpdir):
        """
        Simple test for the method that is responsible of the build of the
        path to the datadir and to the tablespaces dir according
        with backup_version
        """
        server = build_mocked_server(
            main_conf={'basebackups_directory': tmpdir.strpath}, )

        # Build a fake v2 backup
        backup_dir = tmpdir.mkdir('fake_backup_id')
        data_dir = backup_dir.mkdir('data')
        info_file = backup_dir.join('backup.info')
        info_file.write(BASE_BACKUP_INFO)
        b_info = BackupInfo(server, backup_id="fake_backup_id")

        # Check that the paths are built according with version
        assert b_info.backup_version == 2
        assert b_info.get_data_directory() == data_dir.strpath
        assert b_info.get_data_directory(16384) == (backup_dir.strpath +
                                                    '/16384')

        # Build a fake v1 backup
        backup_dir = tmpdir.mkdir('another_fake_backup_id')
        pgdata_dir = backup_dir.mkdir('pgdata')
        info_file = backup_dir.join('backup.info')
        info_file.write(BASE_BACKUP_INFO)
        b_info = BackupInfo(server, backup_id="another_fake_backup_id")

        # Check that the paths are built according with version
        assert b_info.backup_version == 1
        assert b_info.get_data_directory(16384) == \
            backup_dir.strpath + '/pgdata/pg_tblspc/16384'
        assert b_info.get_data_directory() == pgdata_dir.strpath

        # Check that an exception is raised if an invalid oid
        # is provided to the method
        with pytest.raises(ValueError):
            b_info.get_data_directory(12345)

        # Check that a ValueError exception is raised with an
        # invalid oid when the tablespaces list is None
        b_info.tablespaces = None
        # and expect a value error
        with pytest.raises(ValueError):
            b_info.get_data_directory(16384)
Beispiel #30
0
    def test_backup(self, rwbb_mock, gpb_mock, retry_mock, capsys, tmpdir):
        """
        Test the execution of a backup

        :param rwbb_mock: mock for the remove_wal_before_backup method
        :param gpb_mock: mock for the get_previous_backup method
        :param retry_mock: mock for the retry_backup_copy method
        :param capsys: stdout capture module
        :param tmpdir: pytest temp directory
        """
        backup_manager = build_backup_manager(global_conf={
            'barman_home': tmpdir.mkdir('home').strpath
        })
        backup_info = BackupInfo(backup_manager.server,
                                 backup_id='fake_backup_id')
        backup_info.begin_xlog = "0/2000028"
        backup_info.begin_wal = "000000010000000000000002"
        backup_info.begin_offset = 40
        backup_info.status = BackupInfo.EMPTY

        gpb_mock.return_value = None

        # Test 1: exclusive backup
        backup_manager.executor.strategy = Mock()
        backup_manager.executor.backup(backup_info)
        out, err = capsys.readouterr()
        assert err == ''
        assert "Backup start at xlog location: " \
               "0/2000028 (000000010000000000000002, 00000028)\n" \
               "Copying files.\n" \
               "Copy done." in out

        gpb_mock.assert_called_once_with(backup_info.backup_id)
        rwbb_mock.assert_called_once_with(backup_info)
        backup_manager.executor.strategy.start_backup.assert_called_once_with(
            backup_info)
        retry_mock.assert_called_once_with(
            backup_manager.executor.backup_copy, backup_info)
        backup_manager.executor.strategy.stop_backup.assert_called_once_with(
            backup_info)

        # Test 2: concurrent backup
        # change the configuration to concurrent backup
        backup_manager.executor.config.backup_options = [
            BackupOptions.CONCURRENT_BACKUP]

        # reset mocks
        gpb_mock.reset_mock()
        rwbb_mock.reset_mock()
        backup_manager.executor.strategy.reset_mock()
        retry_mock.reset_mock()

        # prepare data directory for backup_label generation
        backup_info.backup_label = 'test\nlabel\n'

        backup_manager.executor.backup(backup_info)
        out, err = capsys.readouterr()
        assert err == ''
        assert "Backup start at xlog location: " \
               "0/2000028 (000000010000000000000002, 00000028)\n" \
               "Copying files.\n" \
               "Copy done." in out

        gpb_mock.assert_called_once_with(backup_info.backup_id)
        rwbb_mock.assert_called_once_with(backup_info)
        backup_manager.executor.strategy.start_backup.assert_called_once_with(
            backup_info)
        retry_mock.assert_called_once_with(
            backup_manager.executor.backup_copy, backup_info)
        backup_manager.executor.strategy.start_backup.assert_called_once_with(
            backup_info)
Beispiel #31
0
    def backup(self):
        """
        Performs a backup for the server
        """
        _logger.debug("initialising backup information")
        self.executor.init()
        backup_info = None
        try:
            # Create the BackupInfo object representing the backup
            backup_info = BackupInfo(
                self.server,
                backup_id=datetime.datetime.now().strftime('%Y%m%dT%H%M%S'))
            backup_info.save()
            self.backup_cache_add(backup_info)
            output.info(
                "Starting backup using %s method for server %s in %s",
                self.mode,
                self.config.name,
                backup_info.get_basebackup_directory())

            # Run the pre-backup-script if present.
            script = HookScriptRunner(self, 'backup_script', 'pre')
            script.env_from_backup_info(backup_info)
            script.run()

            # Run the pre-backup-retry-script if present.
            retry_script = RetryHookScriptRunner(
                self, 'backup_retry_script', 'pre')
            retry_script.env_from_backup_info(backup_info)
            retry_script.run()

            # Do the backup using the BackupExecutor
            self.executor.backup(backup_info)

            # Compute backup size and fsync it on disk
            self.backup_fsync_and_set_sizes(backup_info)

            # Mark the backup as DONE
            backup_info.set_attribute("status", "DONE")
        # Use BaseException instead of Exception to catch events like
        # KeyboardInterrupt (e.g.: CRTL-C)
        except BaseException as e:
            msg_lines = str(e).strip().splitlines()
            if backup_info:
                # Use only the first line of exception message
                # in backup_info error field
                backup_info.set_attribute("status", "FAILED")
                # If the exception has no attached message use the raw
                # type name
                if len(msg_lines) == 0:
                    msg_lines = [type(e).__name__]
                backup_info.set_attribute(
                    "error",
                    "failure %s (%s)" % (
                        self.executor.current_action, msg_lines[0]))

            output.error("Backup failed %s.\nDETAILS: %s\n%s",
                         self.executor.current_action, msg_lines[0],
                         '\n'.join(msg_lines[1:]))

        else:
            output.info("Backup end at xlog location: %s (%s, %08X)",
                        backup_info.end_xlog,
                        backup_info.end_wal,
                        backup_info.end_offset)
            output.info("Backup completed")
            # Create a restore point after a backup
            target_name = 'barman_%s' % backup_info.backup_id
            self.server.postgres.create_restore_point(target_name)
        finally:
            if backup_info:
                backup_info.save()

                # Make sure we are not holding any PostgreSQL connection
                # during the post-backup scripts
                self.server.close()

                # Run the post-backup-retry-script if present.
                try:
                    retry_script = RetryHookScriptRunner(
                        self, 'backup_retry_script', 'post')
                    retry_script.env_from_backup_info(backup_info)
                    retry_script.run()
                except AbortedRetryHookScript as e:
                    # Ignore the ABORT_STOP as it is a post-hook operation
                    _logger.warning("Ignoring stop request after receiving "
                                    "abort (exit code %d) from post-backup "
                                    "retry hook script: %s",
                                    e.hook.exit_status, e.hook.script)

                # Run the post-backup-script if present.
                script = HookScriptRunner(self, 'backup_script', 'post')
                script.env_from_backup_info(backup_info)
                script.run()

        output.result('backup', backup_info)
Beispiel #32
0
    def backup(self):
        """
        Upload a Backup  to S3
        """
        backup_info = BackupInfo(
            backup_id=datetime.datetime.now().strftime('%Y%m%dT%H%M%S'))
        backup_info.set_attribute("systemid", self.postgres.get_systemid())
        key_prefix = os.path.join(
            self.cloud_interface.path,
            self.server_name,
            'base',
            backup_info.backup_id
        )
        controller = S3UploadController(
            self.cloud_interface, key_prefix, self.compression)
        strategy = ConcurrentBackupStrategy(self.postgres)
        logging.info("Starting backup %s", backup_info.backup_id)
        strategy.start_backup(backup_info)
        try:
            self.backup_copy(controller, backup_info)
            logging.info("Stopping backup %s", backup_info.backup_id)
            strategy.stop_backup(backup_info)

            # Create a restore point after a backup
            target_name = 'barman_%s' % backup_info.backup_id
            self.postgres.create_restore_point(target_name)

            # Free the Postgres connection
            self.postgres.close()

            pgdata_stat = os.stat(backup_info.pgdata)
            controller.add_fileobj(
                label='backup_label',
                fileobj=BytesIO(backup_info.backup_label.encode('UTF-8')),
                dst='data',
                path='backup_label',
                uid=pgdata_stat.st_uid,
                gid=pgdata_stat.st_gid,
            )
            # Closing the controller will finalize all the running uploads
            controller.close()

            # Store the end time
            self.copy_end_time = datetime.datetime.now()

            # Store statistics about the copy
            backup_info.set_attribute("copy_stats", controller.statistics())

        # Use BaseException instead of Exception to catch events like
        # KeyboardInterrupt (e.g.: CTRL-C)
        except BaseException as exc:
            # Mark the backup as failed and exit
            self.handle_backup_errors("uploading data", backup_info, exc)
            raise SystemExit(1)
        finally:
            try:
                with BytesIO() as backup_info_file:
                    backup_info.save(file_object=backup_info_file)
                    backup_info_file.seek(0, os.SEEK_SET)
                    key = os.path.join(controller.key_prefix, 'backup.info')
                    logging.info("Uploading %s", key)
                    self.cloud_interface.upload_fileobj(backup_info_file, key)
            except BaseException as exc:
                # Mark the backup as failed and exit
                self.handle_backup_errors("uploading backup.info file",
                                          backup_info, exc)
                raise SystemExit(1)

        logging.info("Backup end at LSN: %s (%s, %08X)",
                     backup_info.end_xlog,
                     backup_info.end_wal,
                     backup_info.end_offset)
        logging.info(
            "Backup completed (start time: %s, elapsed time: %s)",
            self.copy_start_time,
            human_readable_timedelta(
                datetime.datetime.now() - self.copy_start_time))
Beispiel #33
0
    def test_data_dir(self, tmpdir):
        """
        Simple test for the method that is responsible of the build of the
        path to the datadir and to the tablespaces dir according
        with backup_version
        """
        server = build_mocked_server(
            main_conf={
                'basebackups_directory': tmpdir.strpath
            },
        )

        # Build a fake v2 backup
        backup_dir = tmpdir.mkdir('fake_backup_id')
        data_dir = backup_dir.mkdir('data')
        info_file = backup_dir.join('backup.info')
        info_file.write(BASE_BACKUP_INFO)
        b_info = BackupInfo(server, backup_id="fake_backup_id")

        # Check that the paths are built according with version
        assert b_info.backup_version == 2
        assert b_info.get_data_directory() == data_dir.strpath
        assert b_info.get_data_directory(16384) == backup_dir.strpath + '/16384'

        # Build a fake v1 backup
        backup_dir = tmpdir.mkdir('another_fake_backup_id')
        pgdata_dir = backup_dir.mkdir('pgdata')
        info_file = backup_dir.join('backup.info')
        info_file.write(BASE_BACKUP_INFO)
        b_info = BackupInfo(server, backup_id="another_fake_backup_id")

        # Check that the paths are built according with version
        assert b_info.backup_version == 1
        assert b_info.get_data_directory(16384) == \
            backup_dir.strpath + '/pgdata/pg_tblspc/16384'
        assert b_info.get_data_directory() == pgdata_dir.strpath

        # Check that an exception is raised if an invalid oid
        # is provided to the method
        with pytest.raises(ValueError):
            b_info.get_data_directory(12345)

        # Check that a ValueError exception is raised with an
        # invalid oid when the tablespaces list is None
        b_info.tablespaces = None
        # and expect a value error
        with pytest.raises(ValueError):
            b_info.get_data_directory(16384)
def build_test_backup_info(backup_id='1234567890',
                           begin_offset=40,
                           begin_time=None,
                           begin_wal='000000010000000000000002',
                           begin_xlog='0/2000028',
                           config_file='/pgdata/location/postgresql.conf',
                           end_offset=184,
                           end_time=None,
                           end_wal='000000010000000000000002',
                           end_xlog='0/20000B8',
                           error=None,
                           hba_file='/pgdata/location/pg_hba.conf',
                           ident_file='/pgdata/location/pg_ident.conf',
                           mode='default',
                           pgdata='/pgdata/location',
                           server_name='test_server',
                           size=12345,
                           status=BackupInfo.DONE,
                           included_files=None,
                           tablespaces=(
                               ('tbs1', 16387, '/fake/location'),
                               ('tbs2', 16405, '/another/location'),
                           ),
                           timeline=1,
                           version=90302,
                           server=None,
                           copy_stats=None):
    """
    Create an 'Ad Hoc' BackupInfo object for testing purposes.

    A BackupInfo object is the barman representation of a physical backup,
    for testing purposes is necessary to build a BackupInfo avoiding the usage
    of Mock/MagicMock classes as much as possible.

    :param str backup_id: the id of the backup
    :param int begin_offset: begin_offset of the backup
    :param datetime.datetime|None begin_time: begin_time of the backup
    :param str begin_wal: begin_wal of the backup
    :param str begin_xlog: begin_xlog of the backup
    :param str config_file: config file of the backup
    :param int end_offset: end_offset of the backup
    :param datetime.datetime|None end_time: end_time of the backup
    :param str end_wal: begin_xlog of the backup
    :param str end_xlog: end_xlog of the backup
    :param str|None error: error message for the backup
    :param str hba_file: hba_file for the backup
    :param str ident_file: ident_file for the backup
    :param str mode: mode of execution of the backup
    :param str pgdata: pg_data dir of the backup
    :param str server_name: server name for the backup
    :param int size: dimension of the backup
    :param str status: status of the execution of the backup
    :param list|None included_files: a list of extra configuration files
    :param list|tuple|None tablespaces: a list of tablespaces for the backup
    :param int timeline: timeline of the backup
    :param int version: postgres version of the backup
    :param barman.server.Server|None server: Server object for the backup
    :param dict|None: Copy stats dictionary
    :rtype: barman.infofile.BackupInfo
    """
    if begin_time is None:
        begin_time = datetime.now(tz.tzlocal()) - timedelta(minutes=10)
    if end_time is None:
        end_time = datetime.now(tz.tzlocal())

    # Generate a list of tablespace objects (don't use a list comprehension
    # or in python 2.x the 'item' variable will leak to the main context)
    if tablespaces is not None:
        tablespaces = list(Tablespace._make(item) for item in tablespaces)

    # Manage the server for the Backup info: if no server is provided
    # by the caller use a Mock with a basic configuration
    if server is None:
        server = mock.Mock(name=server_name)
        server.config = build_config_from_dicts().get_server('main')
        server.passive_node = False
        server.backup_manager.name = 'default'

    backup_info = BackupInfo(**locals())
    return backup_info
Beispiel #35
0
    def test_backup(self, rwbb_mock, gpb_mock, backup_copy_mock, capsys,
                    tmpdir):
        """
        Test the execution of a backup

        :param rwbb_mock: mock for the remove_wal_before_backup method
        :param gpb_mock: mock for the get_previous_backup method
        :param backup_copy_mock: mock for the executor's backup_copy method
        :param capsys: stdout capture module
        :param tmpdir: pytest temp directory
        """
        backup_manager = build_backup_manager(
            global_conf={'barman_home': tmpdir.mkdir('home').strpath})
        backup_info = BackupInfo(backup_manager.server,
                                 backup_id='fake_backup_id')
        backup_info.begin_xlog = "0/2000028"
        backup_info.begin_wal = "000000010000000000000002"
        backup_info.begin_offset = 40
        backup_info.status = BackupInfo.EMPTY

        gpb_mock.return_value = None

        rwbb_mock.return_value = ['000000010000000000000001']

        # Test 1: exclusive backup
        backup_manager.executor.strategy = Mock()
        backup_manager.executor.backup(backup_info)
        out, err = capsys.readouterr()
        assert err == ''
        assert (
            "Backup start at xlog location: "
            "0/2000028 (000000010000000000000002, 00000028)\n"
            "This is the first backup for server main\n"
            "WAL segments preceding the current backup have been found:\n"
            "\t000000010000000000000001 from server main has been removed\n"
            "Copying files.\n"
            "Copy done.") in out

        gpb_mock.assert_called_with(backup_info.backup_id)
        rwbb_mock.assert_called_with(backup_info)
        backup_manager.executor.strategy.start_backup.assert_called_once_with(
            backup_info)
        backup_copy_mock.assert_called_once_with(backup_info)
        backup_manager.executor.strategy.stop_backup.assert_called_once_with(
            backup_info)

        # Test 2: concurrent backup
        # change the configuration to concurrent backup
        backup_manager.executor.config.backup_options = [
            BackupOptions.CONCURRENT_BACKUP
        ]

        # reset mocks
        gpb_mock.reset_mock()
        rwbb_mock.reset_mock()
        backup_manager.executor.strategy.reset_mock()
        backup_copy_mock.reset_mock()

        # prepare data directory for backup_label generation
        backup_info.backup_label = 'test\nlabel\n'

        backup_manager.executor.backup(backup_info)
        out, err = capsys.readouterr()
        assert err == ''
        assert (
            "Backup start at xlog location: "
            "0/2000028 (000000010000000000000002, 00000028)\n"
            "This is the first backup for server main\n"
            "WAL segments preceding the current backup have been found:\n"
            "\t000000010000000000000001 from server main has been removed\n"
            "Copying files.\n"
            "Copy done.") in out

        gpb_mock.assert_called_with(backup_info.backup_id)
        rwbb_mock.assert_called_with(backup_info)
        backup_manager.executor.strategy.start_backup.assert_called_once_with(
            backup_info)
        backup_copy_mock.assert_called_once_with(backup_info)
        backup_manager.executor.strategy.start_backup.assert_called_once_with(
            backup_info)