Beispiel #1
0
 def get_list_of_files(self, target):
     '''Get the list of files for the current backup'''
     # Walk down the base backup directory
     if target in ('data', 'standalone', 'full'):
         for root, _, files in os.walk(self.get_basebackup_directory()):
             for f in files:
                 yield os.path.join(root, f)
     if target in ('standalone'):
         # List all the WAL files for this backup
         for x in self.get_required_wal_segments():
             hashdir = os.path.join(self.config.wals_directory, xlog.hash_dir(x))
             yield os.path.join(hashdir, x)
     if target in ('wal', 'full'):
         for x, _ in self.server.get_wal_until_next_backup(self):
             hashdir = os.path.join(self.config.wals_directory, xlog.hash_dir(x))
             yield os.path.join(hashdir, x)
Beispiel #2
0
 def test_recover_xlog(self, rsync_pg_mock, tmpdir):
     """
     Test the recovery of the xlogs of a backup
     :param rsync_pg_mock: Mock rsync object for the purpose if this test
     """
     # Build basic folders/files structure
     dest = tmpdir.mkdir('destination')
     wals = tmpdir.mkdir('wals')
     xlog_dir = wals.mkdir(xlog.hash_dir('000000000000000000000002'))
     xlog_file = xlog_dir.join('000000000000000000000002')
     xlog_file.write('dummy content')
     server = testing_helpers.build_real_server(
         main_conf={'wals_directory': wals.strpath})
     # build executor
     executor = RecoveryExecutor(server.backup_manager)
     required_wals = (WalFileInfo.from_xlogdb_line(
         '000000000000000000000002\t42\t43\tNone\n'),)
     executor.xlog_copy(required_wals, dest.strpath, None)
     # check for a correct invocation of rsync using local paths
     rsync_pg_mock.from_file_list.assert_called_once(
         ['000000000000000000000002'],
         xlog_dir.strpath,
         dest.strpath)
     # reset mock calls
     rsync_pg_mock.reset_mock()
     required_wals = (WalFileInfo.from_xlogdb_line(
         '000000000000000000000002\t42\t43\tNone\n'),)
     executor.backup_manager.compression_manager = Mock()
     executor.xlog_copy(required_wals, dest.strpath, 'remote_command')
     # check for the invocation of rsync on a remote call
     rsync_pg_mock.assert_called_once(network_compression=False,
                                      bwlimit=None,
                                      ssh='remote_command')
Beispiel #3
0
    def upload_wal(self, wal_path):
        """
        Upload a WAL file from postgres to S3

        :param str wal_path: Full path of the WAL file
        """
        # Extract the WAL file
        wal_name = self.retrieve_wal_name(wal_path)
        # Use the correct file object for the upload (simple|gzip|bz2)
        file_object = self.retrieve_file_obj(wal_path)
        # Correctly format the destination path on s3
        destination = os.path.join(
            self.cloud_interface.path,
            self.server_name,
            'wals',
            hash_dir(wal_path),
            wal_name
        )

        # Remove initial "/", otherwise we will create a folder with an empty
        # name.
        if destination[0] == '/':
            destination = destination[1:]

        # Put the file in the correct bucket.
        # The put method will handle automatically multipart upload
        self.cloud_interface.upload_fileobj(
            fileobj=file_object,
            key=destination)
Beispiel #4
0
 def test_encrypted_upload_wal(self, rfo_mock, boto_mock):
     """
     Test the upload of a WAL
     """
     # Create a simple CloudWalUploader obj
     cloud_interface = S3CloudInterface("s3://bucket/path/to/dir",
                                        encryption="AES256")
     uploader = CloudWalUploader(cloud_interface, "test-server")
     source = "/wal_dir/000000080000ABFF000000C1"
     # Simulate the file object returned by the retrieve_file_obj method
     rfo_mock.return_value.name = source
     uploader.upload_wal(source)
     session_mock = boto_mock.Session.return_value
     s3_client_mock = session_mock.resource.return_value.meta.client
     # Check the call for the creation of the destination key
     s3_client_mock.upload_fileobj.assert_called_once_with(
         Fileobj=rfo_mock.return_value,
         Bucket=cloud_interface.bucket_name,
         Key=os.path.join(
             cloud_interface.path,
             uploader.server_name,
             "wals",
             hash_dir(source),
             os.path.basename(source),
         ),
         ExtraArgs={"ServerSideEncryption": "AES256"},
     )
    def test_upload_wal(self, rfo_mock, boto_mock):
        """
        Test the upload of a WAL
        """
        # Create a simple S3WalUploader obj
        cloud_interface = CloudInterface(
            's3://bucket/path/to/dir',
            encryption=None)
        uploader = S3WalUploader(
            cloud_interface, 'test-server'
        )
        source = '/wal_dir/000000080000ABFF000000C1'
        # Simulate the file object returned by the retrieve_file_obj method
        rfo_mock.return_value.name = source
        uploader.upload_wal(source)

        session_mock = boto_mock.Session.return_value
        s3_client_mock = session_mock.resource.return_value.meta.client
        # Check the call for the creation of the destination key
        s3_client_mock.upload_fileobj.assert_called_once_with(
            Fileobj=rfo_mock.return_value,
            Bucket=cloud_interface.bucket_name,
            Key=os.path.join(
                cloud_interface.path,
                uploader.server_name,
                'wals',
                hash_dir(source),
                os.path.basename(source))[1:],
            ExtraArgs={}
        )
Beispiel #6
0
 def get_list_of_files(self, target):
     '''Get the list of files for the current backup'''
     # Walk down the base backup directory
     if target in ('data', 'standalone', 'full'):
         for root, _, files in os.walk(self.get_basebackup_directory()):
             for f in files:
                 yield os.path.join(root, f)
     if target in ('standalone'):
         # List all the WAL files for this backup
         for x in self.get_required_wal_segments():
             hashdir = os.path.join(self.config.wals_directory, xlog.hash_dir(x))
             yield os.path.join(hashdir, x)
     if target in ('wal', 'full'):
         for x, _ in self.server.get_wal_until_next_backup(self):
             hashdir = os.path.join(self.config.wals_directory, xlog.hash_dir(x))
             yield os.path.join(hashdir, x)
    def test_upload_wal(self, rfo_mock, ContainerClientMock):
        """
        Test the upload of a WAL
        """
        # Create a simple S3WalUploader obj
        container_name = "container"
        cloud_interface = AzureCloudInterface(
            url="https://account.blob.core.windows.net/container/path/to/dir")
        uploader = CloudWalUploader(cloud_interface, "test-server")
        source = "/wal_dir/000000080000ABFF000000C1"
        # Simulate the file object returned by the retrieve_file_obj method
        rfo_mock.return_value.name = source
        uploader.upload_wal(source)

        ContainerClientMock.from_connection_string.assert_called_once_with(
            conn_str=os.environ["AZURE_STORAGE_CONNECTION_STRING"],
            container_name=container_name,
        )
        container_client = ContainerClientMock.from_connection_string.return_value

        # Check the call for the creation of the destination key
        container_client.upload_blob.assert_called_once_with(
            data=rfo_mock.return_value,
            name=os.path.join(
                cloud_interface.path,
                uploader.server_name,
                "wals",
                hash_dir(source),
                os.path.basename(source),
            ),
            overwrite=True,
        )
Beispiel #8
0
    def upload_wal(self, wal_path, override_tags=None):
        """
        Upload a WAL file from postgres to cloud storage

        :param str wal_path: Full path of the WAL file
        :param List[tuple] override_tags: List of k,v tuples which should override any
          tags already defined in the cloud interface
        """
        # Extract the WAL file
        wal_name = self.retrieve_wal_name(wal_path)
        # Use the correct file object for the upload (simple|gzip|bz2)
        file_object = self.retrieve_file_obj(wal_path)
        # Correctly format the destination path
        destination = os.path.join(
            self.cloud_interface.path,
            self.server_name,
            "wals",
            hash_dir(wal_path),
            wal_name,
        )

        # Put the file in the correct bucket.
        # The put method will handle automatically multipart upload
        self.cloud_interface.upload_fileobj(fileobj=file_object,
                                            key=destination,
                                            override_tags=override_tags)
    def test_recover_xlog(self, rsync_pg_mock, cm_mock, tmpdir):
        """
        Test the recovery of the xlogs of a backup
        :param rsync_pg_mock: Mock rsync object for the purpose if this test
        """
        # Build basic folders/files structure
        dest = tmpdir.mkdir("destination")
        wals = tmpdir.mkdir("wals")
        # Create 3 WAL files with different compressions
        xlog_dir = wals.mkdir(xlog.hash_dir("000000000000000000000002"))
        xlog_plain = xlog_dir.join("000000000000000000000001")
        xlog_gz = xlog_dir.join("000000000000000000000002")
        xlog_bz2 = xlog_dir.join("000000000000000000000003")
        xlog_plain.write("dummy content")
        xlog_gz.write("dummy content gz")
        xlog_bz2.write("dummy content bz2")
        server = testing_helpers.build_real_server(main_conf={"wals_directory": wals.strpath})
        # Prepare compressors mock
        c = {"gzip": mock.Mock(name="gzip"), "bzip2": mock.Mock(name="bzip2")}
        cm_mock.return_value.get_compressor = lambda compression=None, path=None: c[compression]
        # touch destination files to avoid errors on cleanup
        c["gzip"].decompress.side_effect = lambda src, dst: open(dst, "w")
        c["bzip2"].decompress.side_effect = lambda src, dst: open(dst, "w")
        # Build executor
        executor = RecoveryExecutor(server.backup_manager)

        # Test: local copy
        required_wals = (
            WalFileInfo.from_xlogdb_line("000000000000000000000001\t42\t43\tNone\n"),
            WalFileInfo.from_xlogdb_line("000000000000000000000002\t42\t43\tgzip\n"),
            WalFileInfo.from_xlogdb_line("000000000000000000000003\t42\t43\tbzip2\n"),
        )
        executor._xlog_copy(required_wals, dest.strpath, None)
        # Check for a correct invocation of rsync using local paths
        rsync_pg_mock.assert_called_once_with(network_compression=False, bwlimit=None, path=None, ssh=None)
        assert not rsync_pg_mock.return_value.from_file_list.called
        c["gzip"].decompress.assert_called_once_with(xlog_gz.strpath, mock.ANY)
        c["bzip2"].decompress.assert_called_once_with(xlog_bz2.strpath, mock.ANY)

        # Reset mock calls
        rsync_pg_mock.reset_mock()
        c["gzip"].reset_mock()
        c["bzip2"].reset_mock()

        # Test: remote copy
        executor._xlog_copy(required_wals, dest.strpath, "remote_command")
        # Check for the invocation of rsync on a remote call
        rsync_pg_mock.assert_called_once_with(
            network_compression=False, bwlimit=None, path=mock.ANY, ssh="remote_command"
        )
        rsync_pg_mock.return_value.from_file_list.assert_called_once_with(
            ["000000000000000000000001", "000000000000000000000002", "000000000000000000000003"], mock.ANY, mock.ANY
        )
        c["gzip"].decompress.assert_called_once_with(xlog_gz.strpath, mock.ANY)
        c["bzip2"].decompress.assert_called_once_with(xlog_bz2.strpath, mock.ANY)
Beispiel #10
0
    def delete_wal(self, name):
        '''
        Delete a WAL segment, with the given name

        :param name: the name of the WAL to delete
        '''
        hashdir = os.path.join(self.config.wals_directory, xlog.hash_dir(name))
        os.unlink(os.path.join(hashdir, name))
        try:
            os.removedirs(hashdir)
        except:
            pass
Beispiel #11
0
    def get_wal_full_path(self, wal_name):
        """
        Build the full path of a WAL for a server given the name

        :param wal_name: WAL file name
        """
        # Build the path which contains the file
        hash_dir = os.path.join(self.config.wals_directory,
                                xlog.hash_dir(wal_name))
        # Build the WAL file full path
        full_path = os.path.join(hash_dir, wal_name)
        return full_path
Beispiel #12
0
    def delete_wal(self, name):
        '''
        Delete a WAL segment, with the given name

        :param name: the name of the WAL to delete
        '''
        hashdir = os.path.join(self.config.wals_directory, xlog.hash_dir(name))
        os.unlink(os.path.join(hashdir, name))
        try:
            os.removedirs(hashdir)
        except:
            pass
Beispiel #13
0
 def testHashDir(self):
     self.assertEqual(xlog.hash_dir('000000000000000200000001'), '0000000000000002')
     self.assertEqual(xlog.hash_dir('000000010000000000000002'), '0000000100000000')
     self.assertEqual(xlog.hash_dir('000000020000000100000000'), '0000000200000001')
     self.assertEqual(xlog.hash_dir('00000001.history'), '')
     self.assertEqual(xlog.hash_dir('00000002.history'), '')
     self.assertEqual(xlog.hash_dir('00000001000000000000000A.00000020.backup'), '0000000100000000')
     self.assertEqual(xlog.hash_dir('00000002000000050000000A.00000020.backup'), '0000000200000005')
     self.assertRaises(xlog.BadXlogSegmentName, xlog.hash_dir, '00000000000000000000000')
     self.assertRaises(xlog.BadXlogSegmentName, xlog.hash_dir, '0000000000000000000000000')
     self.assertRaises(xlog.BadXlogSegmentName, xlog.hash_dir, '000000000000X00000000000')
Beispiel #14
0
    def delete_wal(self, name):
        '''
        Delete a WAL segment, with the given name

        :param name: the name of the WAL to delete
        '''
        hashdir = os.path.join(self.config.wals_directory, xlog.hash_dir(name))
        try:
            os.unlink(os.path.join(hashdir, name))
            try:
                os.removedirs(hashdir)
            except:
                pass
        except:
            _logger.warning('Expected WAL file %s not found during delete',
                name)
Beispiel #15
0
    def delete_wal(self, name):
        '''
        Delete a WAL segment, with the given name

        :param name: the name of the WAL to delete
        '''
        hashdir = os.path.join(self.config.wals_directory, xlog.hash_dir(name))
        try:
            os.unlink(os.path.join(hashdir, name))
            try:
                os.removedirs(hashdir)
            except:
                pass
        except:
            _logger.warning('Expected WAL file %s not found during delete',
                name)
Beispiel #16
0
    def test_upload_wal(self, rfo_mock, cloud_interface_mock):
        """
        Test upload_wal calls CloudInterface with expected parameters
        """
        bucket_path = "gs://bucket/path/to/dir"
        server_name = "test_server"
        type(cloud_interface_mock).path = mock.PropertyMock(
            return_value=bucket_path)
        uploader = CloudWalUploader(cloud_interface_mock, server_name)
        source = "/wal_dir/000000080000ABFF000000C1"
        # Simulate the file object returned by the retrieve_file_obj method
        rfo_mock.return_value.name = source
        mock_fileobj_length = 42
        rfo_mock.return_value.tell.return_value = mock_fileobj_length
        uploader.upload_wal(source)

        expected_key = os.path.join(bucket_path, server_name, "wals",
                                    hash_dir(source), os.path.basename(source))
        cloud_interface_mock.upload_fileobj.assert_called_once_with(
            fileobj=rfo_mock(), key=expected_key, override_tags=None)
Beispiel #17
0
    def cron_wal_archival(self, compressor, filename):
        '''
        Archive a WAL segment from the incoming directory.
        This function returns the name, the size and the time of the WAL file.

        :param compressor: the compressor for the file (if any)
        :param filename: the name of the WAthe name of the WAL
        '''
        basename = os.path.basename(filename)
        destdir = os.path.join(self.config.wals_directory, xlog.hash_dir(basename))
        destfile = os.path.join(destdir, basename)
        time = os.stat(filename).st_mtime
        if not os.path.isdir(destdir):
            os.makedirs(destdir)
        if compressor:
            compressor(filename, destfile)
            shutil.copystat(filename, destfile)
            os.unlink(filename)
        else:
            os.rename(filename, destfile)
        return basename, os.stat(destfile).st_size, time
Beispiel #18
0
    def cron_wal_archival(self, compressor, filename):
        '''
        Archive a WAL segment from the incoming directory.
        This function returns the name, the size and the time of the WAL file.

        :param compressor: the compressor for the file (if any)
        :param filename: the name of the WAthe name of the WAL
        '''
        basename = os.path.basename(filename)
        destdir = os.path.join(self.config.wals_directory, xlog.hash_dir(basename))
        destfile = os.path.join(destdir, basename)
        time = os.stat(filename).st_mtime
        if not os.path.isdir(destdir):
            os.makedirs(destdir)
        if compressor:
            compressor(filename, destfile)
            shutil.copystat(filename, destfile)
            os.unlink(filename)
        else:
            os.rename(filename, destfile)
        return basename, os.stat(destfile).st_size, time
Beispiel #19
0
 def testHashDir(self):
     self.assertEqual(xlog.hash_dir('000000000000000200000001'),
                      '0000000000000002')
     self.assertEqual(xlog.hash_dir('000000010000000000000002'),
                      '0000000100000000')
     self.assertEqual(xlog.hash_dir('000000020000000100000000'),
                      '0000000200000001')
     self.assertEqual(xlog.hash_dir('00000001.history'), '')
     self.assertEqual(xlog.hash_dir('00000002.history'), '')
     self.assertEqual(
         xlog.hash_dir('00000001000000000000000A.00000020.backup'),
         '0000000100000000')
     self.assertEqual(
         xlog.hash_dir('00000002000000050000000A.00000020.backup'),
         '0000000200000005')
     self.assertRaises(xlog.BadXlogSegmentName, xlog.hash_dir,
                       '00000000000000000000000')
     self.assertRaises(xlog.BadXlogSegmentName, xlog.hash_dir,
                       '0000000000000000000000000')
     self.assertRaises(xlog.BadXlogSegmentName, xlog.hash_dir,
                       '000000000000X00000000000')
Beispiel #20
0
    def test_recover_xlog(self, rsync_pg_mock, cm_mock, tmpdir):
        """
        Test the recovery of the xlogs of a backup
        :param rsync_pg_mock: Mock rsync object for the purpose if this test
        """
        # Build basic folders/files structure
        dest = tmpdir.mkdir('destination')
        wals = tmpdir.mkdir('wals')
        # Create 3 WAL files with different compressions
        xlog_dir = wals.mkdir(xlog.hash_dir('000000000000000000000002'))
        xlog_plain = xlog_dir.join('000000000000000000000001')
        xlog_gz = xlog_dir.join('000000000000000000000002')
        xlog_bz2 = xlog_dir.join('000000000000000000000003')
        xlog_plain.write('dummy content')
        xlog_gz.write('dummy content gz')
        xlog_bz2.write('dummy content bz2')
        server = testing_helpers.build_real_server(
            main_conf={'wals_directory': wals.strpath})
        # Prepare compressors mock
        c = {
            'gzip': mock.Mock(name='gzip'),
            'bzip2': mock.Mock(name='bzip2'),
        }
        cm_mock.return_value.get_compressor = \
            lambda compression=None, path=None: c[compression]
        # touch destination files to avoid errors on cleanup
        c['gzip'].decompress.side_effect = lambda src, dst: open(dst, 'w')
        c['bzip2'].decompress.side_effect = lambda src, dst: open(dst, 'w')
        # Build executor
        executor = RecoveryExecutor(server.backup_manager)

        # Test: local copy
        required_wals = (
            WalFileInfo.from_xlogdb_line(
                '000000000000000000000001\t42\t43\tNone\n'),
            WalFileInfo.from_xlogdb_line(
                '000000000000000000000002\t42\t43\tgzip\n'),
            WalFileInfo.from_xlogdb_line(
                '000000000000000000000003\t42\t43\tbzip2\n'),
        )
        executor._xlog_copy(required_wals, dest.strpath, None)
        # Check for a correct invocation of rsync using local paths
        rsync_pg_mock.assert_called_once_with(network_compression=False,
                                              bwlimit=None,
                                              path=None,
                                              ssh=None)
        assert not rsync_pg_mock.return_value.from_file_list.called
        c['gzip'].decompress.assert_called_once_with(xlog_gz.strpath, mock.ANY)
        c['bzip2'].decompress.assert_called_once_with(xlog_bz2.strpath,
                                                      mock.ANY)

        # Reset mock calls
        rsync_pg_mock.reset_mock()
        c['gzip'].reset_mock()
        c['bzip2'].reset_mock()

        # Test: remote copy
        executor._xlog_copy(required_wals, dest.strpath, 'remote_command')
        # Check for the invocation of rsync on a remote call
        rsync_pg_mock.assert_called_once_with(network_compression=False,
                                              bwlimit=None,
                                              path=mock.ANY,
                                              ssh='remote_command')
        rsync_pg_mock.return_value.from_file_list.assert_called_once_with([
            '000000000000000000000001', '000000000000000000000002',
            '000000000000000000000003'
        ], mock.ANY, mock.ANY)
        c['gzip'].decompress.assert_called_once_with(xlog_gz.strpath, mock.ANY)
        c['bzip2'].decompress.assert_called_once_with(xlog_bz2.strpath,
                                                      mock.ANY)
Beispiel #21
0
 def relpath(self):
     """
     Returns the WAL file path relative to the server's wals_directory
     """
     return os.path.join(xlog.hash_dir(self.name), self.name)
Beispiel #22
0
 def test_hash_dir(self):
     assert xlog.hash_dir('000000000000000200000001') == '0000000000000002'
     assert xlog.hash_dir('000000010000000000000002') == '0000000100000000'
     assert xlog.hash_dir(
         'test/000000020000000100000000') == '0000000200000001'
     assert xlog.hash_dir('00000001.history') == ''
     assert xlog.hash_dir('00000002.history') == ''
     assert xlog.hash_dir(
         '00000001000000000000000A.00000020.backup') == '0000000100000000'
     assert xlog.hash_dir(
         '00000002000000050000000A.00000020.backup') == '0000000200000005'
     with pytest.raises(barman.exceptions.BadXlogSegmentName):
         xlog.hash_dir('00000000000000000000000')
     with pytest.raises(barman.exceptions.BadXlogSegmentName):
         xlog.hash_dir('0000000000000000000000000')
     with pytest.raises(barman.exceptions.BadXlogSegmentName):
         xlog.hash_dir('000000000000X00000000000')
Beispiel #23
0
 def relpath(self):
     """
     Returns the WAL file path relative to the server's wals_directory
     """
     return os.path.join(xlog.hash_dir(self.name), self.name)
Beispiel #24
0
 def test_hash_dir(self):
     assert xlog.hash_dir("000000000000000200000001") == "0000000000000002"
     assert xlog.hash_dir("000000010000000000000002") == "0000000100000000"
     assert xlog.hash_dir("test/000000020000000100000000") == "0000000200000001"
     assert xlog.hash_dir("00000001.history") == ""
     assert xlog.hash_dir("00000002.history") == ""
     assert (
         xlog.hash_dir("00000001000000000000000A.00000020.backup")
         == "0000000100000000"
     )
     assert (
         xlog.hash_dir("00000002000000050000000A.00000020.backup")
         == "0000000200000005"
     )
     with pytest.raises(barman.exceptions.BadXlogSegmentName):
         xlog.hash_dir("00000000000000000000000")
     with pytest.raises(barman.exceptions.BadXlogSegmentName):
         xlog.hash_dir("0000000000000000000000000")
     with pytest.raises(barman.exceptions.BadXlogSegmentName):
         xlog.hash_dir("000000000000X00000000000")
Beispiel #25
0
 def test_hash_dir(self):
     assert xlog.hash_dir(
         '000000000000000200000001') == '0000000000000002'
     assert xlog.hash_dir(
         '000000010000000000000002') == '0000000100000000'
     assert xlog.hash_dir(
         'test/000000020000000100000000') == '0000000200000001'
     assert xlog.hash_dir(
         '00000001.history') == ''
     assert xlog.hash_dir(
         '00000002.history') == ''
     assert xlog.hash_dir(
         '00000001000000000000000A.00000020.backup') == '0000000100000000'
     assert xlog.hash_dir(
         '00000002000000050000000A.00000020.backup') == '0000000200000005'
     with pytest.raises(xlog.BadXlogSegmentName):
         xlog.hash_dir('00000000000000000000000')
     with pytest.raises(xlog.BadXlogSegmentName):
         xlog.hash_dir('0000000000000000000000000')
     with pytest.raises(xlog.BadXlogSegmentName):
         xlog.hash_dir('000000000000X00000000000')
Beispiel #26
0
    def _xlog_copy(self, required_xlog_files, wal_dest, remote_command):
        """
        Restore WAL segments

        :param required_xlog_files: list of all required WAL files
        :param wal_dest: the destination directory for xlog recover
        :param remote_command: default None. The remote command to recover
               the xlog, in case of remote backup.
        """
        # List of required WAL files partitioned by containing directory
        xlogs = collections.defaultdict(list)
        # add '/' suffix to ensure it is a directory
        wal_dest = '%s/' % wal_dest
        # Map of every compressor used with any WAL file in the archive,
        # to be used during this recovery
        compressors = {}
        compression_manager = self.backup_manager.compression_manager
        # Fill xlogs and compressors maps from required_xlog_files
        for wal_info in required_xlog_files:
            hashdir = xlog.hash_dir(wal_info.name)
            xlogs[hashdir].append(wal_info)
            # If a compressor is required, make sure it exists in the cache
            if wal_info.compression is not None and \
                    wal_info.compression not in compressors:
                compressors[wal_info.compression] = \
                    compression_manager.get_compressor(
                        compression=wal_info.compression)

        rsync = RsyncPgData(
            path=self.server.path,
            ssh=remote_command,
            bwlimit=self.config.bandwidth_limit,
            network_compression=self.config.network_compression)
        # If compression is used and this is a remote recovery, we need a
        # temporary directory where to spool uncompressed files,
        # otherwise we either decompress every WAL file in the local
        # destination, or we ship the uncompressed file remotely
        if compressors:
            if remote_command:
                # Decompress to a temporary spool directory
                wal_decompression_dest = tempfile.mkdtemp(
                    prefix='barman_xlog-')
            else:
                # Decompress directly to the destination directory
                wal_decompression_dest = wal_dest
            # Make sure wal_decompression_dest exists
            mkpath(wal_decompression_dest)
        else:
            # If no compression
            wal_decompression_dest = None
        if remote_command:
            # If remote recovery tell rsync to copy them remotely
            # add ':' prefix to mark it as remote
            wal_dest = ':%s' % wal_dest
        total_wals = sum(map(len, xlogs.values()))
        partial_count = 0
        for prefix in sorted(xlogs):
            batch_len = len(xlogs[prefix])
            partial_count += batch_len
            source_dir = os.path.join(self.config.wals_directory, prefix)
            _logger.info("Starting copy of %s WAL files %s/%s from %s to %s",
                         batch_len, partial_count, total_wals,
                         xlogs[prefix][0], xlogs[prefix][-1])
            # If at least one compressed file has been found, activate
            # compression check and decompression for each WAL files
            if compressors:
                for segment in xlogs[prefix]:
                    dst_file = os.path.join(wal_decompression_dest,
                                            segment.name)
                    if segment.compression is not None:
                        compressors[segment.compression].decompress(
                            os.path.join(source_dir, segment.name), dst_file)
                    else:
                        shutil.copy2(os.path.join(source_dir, segment.name),
                                     dst_file)
                if remote_command:
                    try:
                        # Transfer the WAL files
                        rsync.from_file_list(
                            list(segment.name for segment in xlogs[prefix]),
                            wal_decompression_dest, wal_dest)
                    except CommandFailedException as e:
                        msg = ("data transfer failure while copying WAL files "
                               "to directory '%s'") % (wal_dest[1:], )
                        raise DataTransferFailure.from_command_error(
                            'rsync', e, msg)

                    # Cleanup files after the transfer
                    for segment in xlogs[prefix]:
                        file_name = os.path.join(wal_decompression_dest,
                                                 segment.name)
                        try:
                            os.unlink(file_name)
                        except OSError as e:
                            output.warning(
                                "Error removing temporary file '%s': %s",
                                file_name, e)
            else:
                try:
                    rsync.from_file_list(
                        list(segment.name
                             for segment in xlogs[prefix]), "%s/" %
                        os.path.join(self.config.wals_directory, prefix),
                        wal_dest)
                except CommandFailedException as e:
                    msg = "data transfer failure while copying WAL files " \
                          "to directory '%s'" % (wal_dest[1:],)
                    raise DataTransferFailure.from_command_error(
                        'rsync', e, msg)

        _logger.info("Finished copying %s WAL files.", total_wals)

        # Remove local decompression target directory if different from the
        # destination directory (it happens when compression is in use during a
        # remote recovery
        if wal_decompression_dest and wal_decompression_dest != wal_dest:
            shutil.rmtree(wal_decompression_dest)
Beispiel #27
0
    def xlog_copy(self, required_xlog_files, wal_dest, remote_command):
        """
        Restore WAL segments

        :param required_xlog_files: list of all required WAL files
        :param wal_dest: the destination directory for xlog recover
        :param remote_command: default None. The remote command to recover
               the xlog, in case of remote backup.
        """
        # Retrieve the list of required WAL segments
        # according to recovery options
        xlogs = {}
        for wal_info in required_xlog_files:
            hashdir = xlog.hash_dir(wal_info.name)
            if hashdir not in xlogs:
                xlogs[hashdir] = []
            xlogs[hashdir].append(wal_info.name)
        # Check decompression options
        compressor = self.backup_manager.compression_manager.get_compressor()

        rsync = RsyncPgData(
            ssh=remote_command,
            bwlimit=self.config.bandwidth_limit,
            network_compression=self.config.network_compression)
        if remote_command:
            # If remote recovery tell rsync to copy them remotely
            # add ':' prefix to mark it as remote
            # add '/' suffix to ensure it is a directory
            wal_dest = ':%s/' % wal_dest
        else:
            # we will not use rsync: destdir must exists
            mkpath(wal_dest)
        if compressor and remote_command:
            xlog_spool = tempfile.mkdtemp(prefix='barman_xlog-')
        total_wals = sum(map(len, xlogs.values()))
        partial_count = 0
        for prefix in sorted(xlogs):
            batch_len = len(xlogs[prefix])
            partial_count += batch_len
            source_dir = os.path.join(self.config.wals_directory, prefix)
            _logger.info(
                "Starting copy of %s WAL files %s/%s from %s to %s",
                batch_len,
                partial_count,
                total_wals,
                xlogs[prefix][0],
                xlogs[prefix][-1])
            if compressor:
                if remote_command:
                    for segment in xlogs[prefix]:
                        compressor.decompress(os.path.join(source_dir, segment),
                                              os.path.join(xlog_spool, segment))
                    try:
                        rsync.from_file_list(xlogs[prefix],
                                             xlog_spool, wal_dest)
                    except CommandFailedException, e:
                        msg = "data transfer failure while copying WAL files " \
                              "to directory '%s'" % (wal_dest[1:],)
                        raise DataTransferFailure.from_rsync_error(e, msg)

                    # Cleanup files after the transfer
                    for segment in xlogs[prefix]:
                        file_name = os.path.join(xlog_spool, segment)
                        try:
                            os.unlink(file_name)
                        except OSError as e:
                            output.warning(
                                "Error removing temporary file '%s': %s",
                                file_name, e)
                else:
                    # decompress directly to the right place
                    for segment in xlogs[prefix]:
                        compressor.decompress(os.path.join(source_dir, segment),
                                              os.path.join(wal_dest, segment))
            else:
                try:
                    rsync.from_file_list(
                        xlogs[prefix],
                        "%s/" % os.path.join(
                            self.config.wals_directory, prefix),
                        wal_dest)
                except CommandFailedException, e:
                    msg = "data transfer failure while copying WAL files " \
                          "to directory '%s'" % (wal_dest[1:],)
                    raise DataTransferFailure.from_rsync_error(e, msg)
Beispiel #28
0
    def recover(self, backup, dest, tablespaces, target_tli, target_time, target_xid, exclusive, remote_command):
        '''
        Performs a recovery of a backup

        :param backup: the backup to recover
        :param dest: the destination directory
        :param tablespaces: a dictionary of tablespaces
        :param target_tli: the target timeline
        :param target_time: the target time
        :param target_xid: the target xid
        :param exclusive: whether the recovery is exlusive or not
        :param remote_command: default None. The remote command to recover the base backup,
                               in case of remote backup.
        '''
        for line in self.cron(False):
            yield line

        recovery_dest = 'local'
        if remote_command:
            recovery_dest = 'remote'
            rsync = RsyncPgData(ssh=remote_command)
        msg = "Starting %s restore for server %s using backup %s " % (recovery_dest, self.config.name, backup.backup_id)
        yield msg
        _logger.info(msg)

        msg = "Destination directory: %s" % dest
        yield msg
        _logger.info(msg)
        if backup.tablespaces:
            if remote_command:
                # TODO: remote dir preparation
                msg = "Skipping remote directory preparation, you must have done it by yourself."
                yield msg
                _logger.warning(msg)
            else:
                tblspc_dir = os.path.join(dest, 'pg_tblspc')
                if not os.path.exists(tblspc_dir):
                    os.makedirs(tblspc_dir)
                for name, oid, location in backup.tablespaces:
                    try:
                        if name in tablespaces:
                            location = tablespaces[name]
                        tblspc_file = os.path.join(tblspc_dir, str(oid))
                        if os.path.exists(tblspc_file):
                            os.unlink(tblspc_file)
                        if os.path.exists(location) and not os.path.isdir(location):
                            os.unlink(location)
                        if not os.path.exists(location):
                            os.makedirs(location)
                        # test permissiones
                        barman_write_check_file = os.path.join(location, '.barman_write_check')
                        file(barman_write_check_file, 'a').close()
                        os.unlink(barman_write_check_file)
                        os.symlink(location, tblspc_file)
                    except:
                        msg = "ERROR: unable to prepare '%s' tablespace (destination '%s')" % (name, location)
                        _logger.critical(msg)
                        raise SystemExit(msg)
                    yield "\t%s, %s, %s" % (oid, name, location)
        target_epoch = None
        if target_time:
            try:
                target_datetime = dateutil.parser.parse(target_time)
            except:
                msg = "ERROR: unable to parse the target time parameter %r" % target_time
                _logger.critical(msg)
                raise SystemExit(msg)
            target_epoch = time.mktime(target_datetime.timetuple()) + (target_datetime.microsecond / 1000000.)
        if target_time or target_xid or (target_tli and target_tli != backup.timeline):
            targets = {}
            if target_time:
                targets['time'] = str(target_datetime)
            if target_xid:
                targets['xid'] = str(target_xid)
            if target_tli and target_tli != backup.timeline:
                targets['timeline'] = str(target_tli)
            yield "Doing PITR. Recovery target %s" % \
                (", ".join(["%s: %r" % (k, v) for k, v in targets.items()]))

        # Copy the base backup
        msg = "Copying the base backup."
        yield msg
        _logger.info(msg)
        self.recover_basebackup_copy(backup, dest, remote_command)
        _logger.info("Base backup copied.")

        # Prepare WAL segments local directory
        msg = "Copying required wal segments."
        _logger.info(msg)
        yield msg
        if target_time or target_xid or (target_tli and target_tli != backup.timeline):
            wal_dest = os.path.join(dest, 'barman_xlog')
        else:
            wal_dest = os.path.join(dest, 'pg_xlog')
        # Retrieve the list of required WAL segments according to recovery options
        xlogs = {}
        required_xlog_files = tuple(self.server.get_required_xlog_files(backup, target_tli, target_epoch, target_xid))
        for filename in required_xlog_files:
            hashdir = xlog.hash_dir(filename)
            if hashdir not in xlogs:
                xlogs[hashdir] = []
            xlogs[hashdir].append(filename)
        # Check decompression options
        decompressor = self.compression_manager.get_decompressor()

        # Restore WAL segments
        self.recover_xlog_copy(decompressor, xlogs, wal_dest, remote_command)
        _logger.info("Wal segmets copied.")

        # Generate recovery.conf file (only if needed by PITR)
        if target_time or target_xid or (target_tli and target_tli != backup.timeline):
            msg = "Generating recovery.conf"
            yield  msg
            _logger.info(msg)
            if remote_command:
                tempdir = tempfile.mkdtemp(prefix='barman_recovery-')
                recovery = open(os.path.join(tempdir, 'recovery.conf'), 'w')
            else:
                recovery = open(os.path.join(dest, 'recovery.conf'), 'w')
            print >> recovery, "restore_command = 'cp barman_xlog/%f %p'"
            print >> recovery, "recovery_end_command = 'rm -fr barman_xlog'"
            if target_time:
                print >> recovery, "recovery_target_time = '%s'" % target_time
            if target_tli:
                print >> recovery, "recovery_target_timeline = %s" % target_tli
            if target_xid:
                print >> recovery, "recovery_target_xid = '%s'" % target_xid
                if exclusive:
                    print >> recovery, "recovery_target_inclusive = '%s'" % (not exclusive)
            recovery.close()
            if remote_command:
                recovery = rsync.from_file_list(['recovery.conf'], tempdir, ':%s' % dest)
                shutil.rmtree(tempdir)
            _logger.info('recovery.conf generated')
        else:
            # avoid shipping of just recovered pg_xlog files
            if remote_command:
                status_dir = tempfile.mkdtemp(prefix='barman_xlog_status-')
            else:
                status_dir = os.path.join(wal_dest, 'archive_status')
                os.makedirs(status_dir) # no need to check, it must not exist
            for filename in required_xlog_files:
                with file(os.path.join(status_dir, "%s.done" % filename), 'a') as f:
                    f.write('')
            if remote_command:
                retval = rsync('%s/' % status_dir, ':%s' % os.path.join(wal_dest, 'archive_status'))
                if retval != 0:
                    msg = "WARNING: unable to populate pg_xlog/archive_status dorectory"
                    yield msg
                    _logger.warning(msg)
                shutil.rmtree(status_dir)


        # Disable dangerous setting in the target data dir
        if remote_command:
            tempdir = tempfile.mkdtemp(prefix='barman_recovery-')
            pg_config = os.path.join(tempdir, 'postgresql.conf')
            shutil.copy2(os.path.join(backup.get_basebackup_directory(), 'pgdata', 'postgresql.conf'), pg_config)
        else:
            pg_config = os.path.join(dest, 'postgresql.conf')
        if self.pg_config_mangle(pg_config,
                              {'archive_command': 'false'},
                              "%s.origin" % pg_config):
            msg = "The archive_command was set to 'false' to prevent data losses."
            yield msg
            _logger.info(msg)

        # Find dangerous options in the configuration file (locations)
        clashes = self.pg_config_detect_possible_issues(pg_config)

        if remote_command:
            recovery = rsync.from_file_list(['postgresql.conf', 'postgresql.conf.origin'], tempdir, ':%s' % dest)
            shutil.rmtree(tempdir)


        yield ""
        yield "Your PostgreSQL server has been successfully prepared for recovery!"
        yield ""
        yield "Please review network and archive related settings in the PostgreSQL"
        yield "configuration file before starting the just recovered instance."
        yield ""
        if clashes:
            yield "WARNING: Before starting up the recovered PostgreSQL server,"
            yield "please review also the settings of the following configuration"
            yield "options as they might interfere with your current recovery attempt:"
            yield ""

            for name, value in sorted(clashes.items()):
                yield "    %s = %s" % (name, value)

            yield ""
        _logger.info("Recovery completed successful.")
Beispiel #29
0
    def download_wal(self, wal_name, wal_dest):
        """
        Download a WAL file from cloud storage

        :param str wal_name: Name of the WAL file
        :param str wal_dest: Full path of the destination WAL file
        """

        # Correctly format the source path on s3
        source_dir = os.path.join(self.cloud_interface.path, self.server_name,
                                  "wals", hash_dir(wal_name))
        # Add a path separator if needed
        if not source_dir.endswith(os.path.sep):
            source_dir += os.path.sep

        wal_path = os.path.join(source_dir, wal_name)

        remote_name = None
        # Automatically detect compression based on the file extension
        compression = None
        for item in self.cloud_interface.list_bucket(source_dir):
            # perfect match (uncompressed file)
            if item == wal_path:
                remote_name = item
            # look for compressed files or .partial files
            elif item.startswith(wal_path):
                # Detect compression
                basename = item
                for e, c in ALLOWED_COMPRESSIONS.items():
                    if item[-len(e):] == e:
                        # Strip extension
                        basename = basename[:-len(e)]
                        compression = c
                        break

                # Check basename is a known xlog file (.partial?)
                if not is_any_xlog_file(basename):
                    logging.warning("Unknown WAL file: %s", item)
                    continue
                # Exclude backup informative files (not needed in recovery)
                elif is_backup_file(basename):
                    logging.info("Skipping backup file: %s", item)
                    continue

                # Found candidate
                remote_name = item
                logging.info(
                    "Found WAL %s for server %s as %s",
                    wal_name,
                    self.server_name,
                    remote_name,
                )
                break

        if not remote_name:
            logging.info("WAL file %s for server %s does not exists", wal_name,
                         self.server_name)
            raise OperationErrorExit()

        if compression and sys.version_info < (3, 0, 0):
            raise BarmanException(
                "Compressed WALs cannot be restored with Python 2.x - "
                "please upgrade to a supported version of Python 3")

        # Download the file
        logging.debug(
            "Downloading %s to %s (%s)",
            remote_name,
            wal_dest,
            "decompressing " +
            compression if compression else "no compression",
        )
        self.cloud_interface.download_file(remote_name, wal_dest, compression)
    def xlog_copy(self, required_xlog_files, wal_dest, remote_command):
        """
        Restore WAL segments

        :param required_xlog_files: list of all required WAL files
        :param wal_dest: the destination directory for xlog recover
        :param remote_command: default None. The remote command to recover
               the xlog, in case of remote backup.
        """
        # List of required WAL files partitioned by containing directory
        xlogs = collections.defaultdict(list)
        # add '/' suffix to ensure it is a directory
        wal_dest = '%s/' % wal_dest
        # Map of every compressor used with any WAL file in the archive,
        # to be used during this recovery
        compressors = {}
        compression_manager = self.backup_manager.compression_manager
        # Fill xlogs and compressors maps from required_xlog_files
        for wal_info in required_xlog_files:
            hashdir = xlog.hash_dir(wal_info.name)
            xlogs[hashdir].append(wal_info)
            # If a compressor is required, make sure it exists in the cache
            if wal_info.compression is not None and \
                    wal_info.compression not in compressors:
                compressors[wal_info.compression] = \
                    compression_manager.get_compressor(
                        compression=wal_info.compression)

        rsync = RsyncPgData(
            path=self.server.path,
            ssh=remote_command,
            bwlimit=self.config.bandwidth_limit,
            network_compression=self.config.network_compression)
        # If compression is used and this is a remote recovery, we need a
        # temporary directory where to spool uncompressed files,
        # otherwise we either decompress every WAL file in the local
        # destination, or we ship the uncompressed file remotely
        if compressors:
            if remote_command:
                # Decompress to a temporary spool directory
                wal_decompression_dest = tempfile.mkdtemp(
                    prefix='barman_xlog-')
            else:
                # Decompress directly to the destination directory
                wal_decompression_dest = wal_dest
            # Make sure wal_decompression_dest exists
            mkpath(wal_decompression_dest)
        else:
            # If no compression
            wal_decompression_dest = None
        if remote_command:
            # If remote recovery tell rsync to copy them remotely
            # add ':' prefix to mark it as remote
            wal_dest = ':%s' % wal_dest
        total_wals = sum(map(len, xlogs.values()))
        partial_count = 0
        for prefix in sorted(xlogs):
            batch_len = len(xlogs[prefix])
            partial_count += batch_len
            source_dir = os.path.join(self.config.wals_directory, prefix)
            _logger.info(
                "Starting copy of %s WAL files %s/%s from %s to %s",
                batch_len,
                partial_count,
                total_wals,
                xlogs[prefix][0],
                xlogs[prefix][-1])
            # If at least one compressed file has been found, activate
            # compression check and decompression for each WAL files
            if compressors:
                for segment in xlogs[prefix]:
                    dst_file = os.path.join(wal_decompression_dest,
                                            segment.name)
                    if segment.compression is not None:
                        compressors[segment.compression].decompress(
                            os.path.join(source_dir, segment.name),
                            dst_file)
                    else:
                        shutil.copy2(os.path.join(source_dir, segment.name),
                                     dst_file)
                if remote_command:
                    try:
                        # Transfer the WAL files
                        rsync.from_file_list(
                            list(segment.name for segment in xlogs[prefix]),
                            wal_decompression_dest, wal_dest)
                    except CommandFailedException as e:
                        msg = ("data transfer failure while copying WAL files "
                               "to directory '%s'") % (wal_dest[1:],)
                        raise DataTransferFailure.from_rsync_error(e, msg)

                    # Cleanup files after the transfer
                    for segment in xlogs[prefix]:
                        file_name = os.path.join(wal_decompression_dest,
                                                 segment.name)
                        try:
                            os.unlink(file_name)
                        except OSError as e:
                            output.warning(
                                "Error removing temporary file '%s': %s",
                                file_name, e)
            else:
                try:
                    rsync.from_file_list(
                        list(segment.name for segment in xlogs[prefix]),
                        "%s/" % os.path.join(self.config.wals_directory,
                                             prefix),
                        wal_dest)
                except CommandFailedException as e:
                    msg = "data transfer failure while copying WAL files " \
                          "to directory '%s'" % (wal_dest[1:],)
                    raise DataTransferFailure.from_rsync_error(e, msg)

        _logger.info("Finished copying %s WAL files.", total_wals)

        # Remove local decompression target directory if different from the
        # destination directory (it happens when compression is in use during a
        # remote recovery
        if wal_decompression_dest and wal_decompression_dest != wal_dest:
            shutil.rmtree(wal_decompression_dest)
    def test_recover_xlog(self, rsync_pg_mock, cm_mock, tmpdir):
        """
        Test the recovery of the xlogs of a backup
        :param rsync_pg_mock: Mock rsync object for the purpose if this test
        """
        # Build basic folders/files structure
        dest = tmpdir.mkdir('destination')
        wals = tmpdir.mkdir('wals')
        # Create 3 WAL files with different compressions
        xlog_dir = wals.mkdir(xlog.hash_dir('000000000000000000000002'))
        xlog_plain = xlog_dir.join('000000000000000000000001')
        xlog_gz = xlog_dir.join('000000000000000000000002')
        xlog_bz2 = xlog_dir.join('000000000000000000000003')
        xlog_plain.write('dummy content')
        xlog_gz.write('dummy content gz')
        xlog_bz2.write('dummy content bz2')
        server = testing_helpers.build_real_server(
            main_conf={'wals_directory': wals.strpath})
        # Prepare compressors mock
        c = {
            'gzip': Mock(name='gzip'),
            'bzip2': Mock(name='bzip2'),
        }
        cm_mock.return_value.get_compressor = \
            lambda compression=None, path=None: c[compression]
        # touch destination files to avoid errors on cleanup
        c['gzip'].decompress.side_effect = lambda src, dst: open(dst, 'w')
        c['bzip2'].decompress.side_effect = lambda src, dst: open(dst, 'w')
        # Build executor
        executor = RecoveryExecutor(server.backup_manager)

        # Test: local copy
        required_wals = (
            WalFileInfo.from_xlogdb_line(
                '000000000000000000000001\t42\t43\tNone\n'),
            WalFileInfo.from_xlogdb_line(
                '000000000000000000000002\t42\t43\tgzip\n'),
            WalFileInfo.from_xlogdb_line(
                '000000000000000000000003\t42\t43\tbzip2\n'),
        )
        executor.xlog_copy(required_wals, dest.strpath, None)
        # Check for a correct invocation of rsync using local paths
        rsync_pg_mock.assert_called_once_with(
            network_compression=False,
            bwlimit=None, path=None,
            ssh=None)
        assert not rsync_pg_mock.return_value.from_file_list.called
        c['gzip'].decompress.assert_called_once_with(xlog_gz.strpath, ANY)
        c['bzip2'].decompress.assert_called_once_with(xlog_bz2.strpath, ANY)

        # Reset mock calls
        rsync_pg_mock.reset_mock()
        c['gzip'].reset_mock()
        c['bzip2'].reset_mock()

        # Test: remote copy
        executor.xlog_copy(required_wals, dest.strpath, 'remote_command')
        # Check for the invocation of rsync on a remote call
        rsync_pg_mock.assert_called_once_with(
            network_compression=False,
            bwlimit=None, path=ANY,
            ssh='remote_command')
        rsync_pg_mock.return_value.from_file_list.assert_called_once_with(
            [
                '000000000000000000000001',
                '000000000000000000000002',
                '000000000000000000000003'],
            ANY,
            ANY)
        c['gzip'].decompress.assert_called_once_with(xlog_gz.strpath, ANY)
        c['bzip2'].decompress.assert_called_once_with(xlog_bz2.strpath, ANY)
Beispiel #32
0
    def recover(self, backup, dest, tablespaces, target_tli, target_time,
                target_xid, exclusive, remote_command):
        '''
        Performs a recovery of a backup

        :param backup: the backup to recover
        :param dest: the destination directory
        :param tablespaces: a dictionary of tablespaces
        :param target_tli: the target timeline
        :param target_time: the target time
        :param target_xid: the target xid
        :param exclusive: whether the recovery is exlusive or not
        :param remote_command: default None. The remote command to recover the base backup,
                               in case of remote backup.
        '''
        for line in self.cron(False):
            yield line

        recovery_dest = 'local'
        if remote_command:
            recovery_dest = 'remote'
            rsync = RsyncPgData(ssh=remote_command)
        msg = "Starting %s restore for server %s using backup %s " % (
            recovery_dest, self.config.name, backup.backup_id)
        yield msg
        _logger.info(msg)

        msg = "Destination directory: %s" % dest
        yield msg
        _logger.info(msg)
        if backup.tablespaces:
            if remote_command:
                # TODO: remote dir preparation
                msg = "Skipping remote directory preparation, you must have done it by yourself."
                yield msg
                _logger.warning(msg)
            else:
                tblspc_dir = os.path.join(dest, 'pg_tblspc')
                if not os.path.exists(tblspc_dir):
                    os.makedirs(tblspc_dir)
                for name, oid, location in backup.tablespaces:
                    try:
                        if name in tablespaces:
                            location = tablespaces[name]
                        tblspc_file = os.path.join(tblspc_dir, str(oid))
                        if os.path.exists(tblspc_file):
                            os.unlink(tblspc_file)
                        if os.path.exists(
                                location) and not os.path.isdir(location):
                            os.unlink(location)
                        if not os.path.exists(location):
                            os.makedirs(location)
                        # test permissiones
                        barman_write_check_file = os.path.join(
                            location, '.barman_write_check')
                        file(barman_write_check_file, 'a').close()
                        os.unlink(barman_write_check_file)
                        os.symlink(location, tblspc_file)
                    except:
                        msg = "ERROR: unable to prepare '%s' tablespace (destination '%s')" % (
                            name, location)
                        _logger.critical(msg)
                        raise SystemExit(msg)
                    yield "\t%s, %s, %s" % (oid, name, location)
        target_epoch = None
        if target_time:
            try:
                target_datetime = dateutil.parser.parse(target_time)
            except:
                msg = "ERROR: unable to parse the target time parameter %r" % target_time
                _logger.critical(msg)
                raise SystemExit(msg)
            target_epoch = time.mktime(target_datetime.timetuple()) + (
                target_datetime.microsecond / 1000000.)
        if target_time or target_xid or (target_tli
                                         and target_tli != backup.timeline):
            targets = {}
            if target_time:
                targets['time'] = str(target_datetime)
            if target_xid:
                targets['xid'] = str(target_xid)
            if target_tli and target_tli != backup.timeline:
                targets['timeline'] = str(target_tli)
            yield "Doing PITR. Recovery target %s" % \
                (", ".join(["%s: %r" % (k, v) for k, v in targets.items()]))

        # Copy the base backup
        msg = "Copying the base backup."
        yield msg
        _logger.info(msg)
        self.recover_basebackup_copy(backup, dest, remote_command)
        _logger.info("Base backup copied.")

        # Prepare WAL segments local directory
        msg = "Copying required wal segments."
        _logger.info(msg)
        yield msg
        if target_time or target_xid or (target_tli
                                         and target_tli != backup.timeline):
            wal_dest = os.path.join(dest, 'barman_xlog')
        else:
            wal_dest = os.path.join(dest, 'pg_xlog')
        # Retrieve the list of required WAL segments according to recovery options
        xlogs = {}
        required_xlog_files = tuple(
            self.server.get_required_xlog_files(backup, target_tli,
                                                target_epoch, target_xid))
        for filename in required_xlog_files:
            hashdir = xlog.hash_dir(filename)
            if hashdir not in xlogs:
                xlogs[hashdir] = []
            xlogs[hashdir].append(filename)
        # Check decompression options
        decompressor = self.compression_manager.get_decompressor()

        # Restore WAL segments
        self.recover_xlog_copy(decompressor, xlogs, wal_dest, remote_command)
        _logger.info("Wal segmets copied.")

        # Generate recovery.conf file (only if needed by PITR)
        if target_time or target_xid or (target_tli
                                         and target_tli != backup.timeline):
            msg = "Generating recovery.conf"
            yield msg
            _logger.info(msg)
            if remote_command:
                tempdir = tempfile.mkdtemp(prefix='barman_recovery-')
                recovery = open(os.path.join(tempdir, 'recovery.conf'), 'w')
            else:
                recovery = open(os.path.join(dest, 'recovery.conf'), 'w')
            print >> recovery, "restore_command = 'cp barman_xlog/%f %p'"
            print >> recovery, "recovery_end_command = 'rm -fr barman_xlog'"
            if target_time:
                print >> recovery, "recovery_target_time = '%s'" % target_time
            if target_tli:
                print >> recovery, "recovery_target_timeline = %s" % target_tli
            if target_xid:
                print >> recovery, "recovery_target_xid = '%s'" % target_xid
                if exclusive:
                    print >> recovery, "recovery_target_inclusive = '%s'" % (
                        not exclusive)
            recovery.close()
            if remote_command:
                recovery = rsync.from_file_list(['recovery.conf'], tempdir,
                                                ':%s' % dest)
                shutil.rmtree(tempdir)
            _logger.info('recovery.conf generated')
        else:
            # avoid shipping of just recovered pg_xlog files
            if remote_command:
                status_dir = tempfile.mkdtemp(prefix='barman_xlog_status-')
            else:
                status_dir = os.path.join(wal_dest, 'archive_status')
                os.makedirs(status_dir)  # no need to check, it must not exist
            for filename in required_xlog_files:
                with file(os.path.join(status_dir, "%s.done" % filename),
                          'a') as f:
                    f.write('')
            if remote_command:
                retval = rsync(
                    '%s/' % status_dir,
                    ':%s' % os.path.join(wal_dest, 'archive_status'))
                if retval != 0:
                    msg = "WARNING: unable to populate pg_xlog/archive_status dorectory"
                    yield msg
                    _logger.warning(msg)
                shutil.rmtree(status_dir)

        # Disable dangerous setting in the target data dir
        if remote_command:
            tempdir = tempfile.mkdtemp(prefix='barman_recovery-')
            pg_config = os.path.join(tempdir, 'postgresql.conf')
            shutil.copy2(
                os.path.join(backup.get_basebackup_directory(), 'pgdata',
                             'postgresql.conf'), pg_config)
        else:
            pg_config = os.path.join(dest, 'postgresql.conf')
        if self.pg_config_mangle(pg_config, {'archive_command': 'false'},
                                 "%s.origin" % pg_config):
            msg = "The archive_command was set to 'false' to prevent data losses."
            yield msg
            _logger.info(msg)
        if remote_command:
            recovery = rsync.from_file_list(
                ['postgresql.conf', 'postgresql.conf.origin'], tempdir,
                ':%s' % dest)
            shutil.rmtree(tempdir)

        # Found dangerous options in the configuration file (locations)
        clashes = self.pg_config_detect_possible_issues(pg_config)

        yield ""
        yield "Your PostgreSQL server has been successfully prepared for recovery!"
        yield ""
        yield "Please review network and archive related settings in the PostgreSQL"
        yield "configuration file before starting the just recovered instance."
        yield ""
        if clashes:
            yield "WARNING: Before starting up the recovered PostgreSQL server,"
            yield "please review the also settings of the following configuration"
            yield "options as they might interfere with your current recovery attempt:"
            yield ""

            for name, value in sorted(clashes.items()):
                yield "    %s = %s" % (name, value)

            yield ""
        _logger.info("Recovery completed successful.")