Пример #1
0
def backup_everything(run_type, twindb_config, binlogs_only=False):
    """
    Run backup job

    :param run_type: hourly, daily, etc
    :type run_type: str
    :param twindb_config: ConfigParser instance
    :type twindb_config: TwinDBBackupConfig
    :param binlogs_only: If True copy only MySQL binary logs.
    :type binlogs_only: bool
    """
    set_open_files_limit()

    try:
        if not binlogs_only:
            backup_start = time.time()
            backup_files(run_type, twindb_config)
            backup_mysql(run_type, twindb_config)
            backup_binlogs(run_type, twindb_config)
            end = time.time()
            save_measures(backup_start, end)
        else:
            backup_binlogs(run_type, twindb_config)
    except ConfigParser.NoSectionError as err:
        LOG.debug(traceback.format_exc())
        LOG.error(err)
        exit(1)
Пример #2
0
    def get_connection(self):
        """
        Connect to MySQL host and yield a connection.

        :return: MySQL connection
        :raise MySQLSourceError: if can't connect to server
        """
        connection = None
        try:
            connection = pymysql.connect(
                host=self.hostname,
                read_default_file=self.defaults_file,
                connect_timeout=self.connect_timeout,
                cursorclass=pymysql.cursors.DictCursor
            )

            yield connection
        except OperationalError:
            LOG.error(
                "Can't connect to MySQL server on %s",
                self.hostname)
            raise MySQLSourceError(
                "Can't connect to MySQL server on %s"
                % self.hostname)
        finally:
            if connection:
                connection.close()
Пример #3
0
    def disable_wsrep_desync(self):
        """
        Wait till wsrep_local_recv_queue is zero
        and disable wsrep_local_recv_queue then
        """
        max_time = time.time() + 900
        try:
            with self.get_connection() as connection:
                with connection.cursor() as cursor:
                    while time.time() < max_time:
                        cursor.execute("SHOW GLOBAL STATUS LIKE "
                                       "'wsrep_local_recv_queue'")

                        res = {r['Variable_name'].lower(): r['Value'].lower()
                               for r in cursor.fetchall()}

                        if not res.get('wsrep_local_recv_queue'):
                            raise Exception('Unknown status variable '
                                            '"wsrep_local_recv_queue"')

                        if int(res['wsrep_local_recv_queue']) == 0:
                            break

                        time.sleep(1)

                    LOG.debug('Disabling wsrep_desync')
                    cursor.execute("SET GLOBAL wsrep_desync=OFF")
        except pymysql.Error as err:
            LOG.error(err)
Пример #4
0
    def revert_stream(self):
        """
        Un-Apply modifier and return output stream.
        The Base modifier does nothing, so it will return the input stream
        without modifications

        :return: output stream handle
        """
        with self._input as input_stream:
            LOG.debug('Running %s', ' '.join(self._unmodifier_cmd))
            proc = Popen(
                self._unmodifier_cmd,
                stdin=input_stream,
                stdout=PIPE,
                stderr=PIPE
            )
            yield proc.stdout

            _, cerr = proc.communicate()
            if proc.returncode:
                msg = '%s exited with non-zero code.' \
                      % ' '.join(self._unmodifier_cmd)
                LOG.error(msg)
                LOG.error(cerr)
                raise ModifierException(msg)
Пример #5
0
 def _handle_failure_exec(self, err, stderr_file):
     """Cleanup on failure exec"""
     LOG.error(err)
     LOG.error('Failed to run xtrabackup. '
               'Check error output in %s', stderr_file.name)
     self.dst.delete(self.get_name())
     exit(1)
Пример #6
0
    def get_stream(self):
        """
        Get a PIPE handler with content of the source
        :return:
        """
        cmd = [
            self._xtrabackup,
            "--defaults-file=%s" % self._connect_info.defaults_file,
            "--stream=xbstream",
            "--host=127.0.0.1",
            "--backup"
        ]
        cmd += ["--target-dir", "."]
        if self.is_galera():
            cmd.append("--galera-info")
            cmd.append("--no-backup-locks")
        if self.incremental:
            cmd += [
                "--incremental-basedir",
                ".",
                "--incremental-lsn=%d" % self._parent_lsn
            ]
        # If this is a Galera node then additional step needs to be taken to
        # prevent the backups from locking up the cluster.
        wsrep_desynced = False
        LOG.debug('Running %s', ' '.join(cmd))
        stderr_file = tempfile.NamedTemporaryFile(delete=False)
        try:
            if self.is_galera():
                wsrep_desynced = self.enable_wsrep_desync()

            LOG.debug('Running %s', ' '.join(cmd))
            proc_xtrabackup = Popen(cmd,
                                    stderr=stderr_file,
                                    stdout=PIPE)

            yield proc_xtrabackup.stdout

            proc_xtrabackup.communicate()
            if proc_xtrabackup.returncode:
                LOG.error('Failed to run xtrabackup. '
                          'Check error output in %s', stderr_file.name)
                try:
                    if LOG.debug_enabled:
                        with open(stderr_file.name) as xb_out:
                            for line in xb_out:
                                print(line, end='', file=sys.stderr)
                except AttributeError:
                    pass
                self.dst.delete(self.get_name())
                exit(1)
            else:
                LOG.debug('Successfully streamed xtrabackup output')
            self._update_backup_info(stderr_file)
        except OSError as err:
            LOG.error('Failed to run %s: %s', cmd, err)
            exit(1)
        finally:
            if wsrep_desynced:
                self.disable_wsrep_desync()
Пример #7
0
 def backup_mysql(self):
     """FLag to backup MySQL or not"""
     try:
         return self.__cfg.getboolean('source', 'backup_mysql')
     except NoOptionError:
         return False
     except NoSectionError as err:
         LOG.error("Section 'source' is mandatory")
         raise ConfigurationError(err)
Пример #8
0
def restore_mysql(ctx, dst, backup_copy, cache):
    """Restore from mysql backup"""
    LOG.debug('mysql: %r', ctx.obj['twindb_config'])

    if not backup_copy:
        LOG.info('No backup copy specified. Choose one from below:')
        list_available_backups(ctx.obj['twindb_config'])
        exit(1)

    try:
        ensure_empty(dst)

        incomplete_copy = MySQLCopy(
            path=backup_copy
        )
        dst_storage = ctx.obj['twindb_config'].destination(
            backup_source=incomplete_copy.host
        )
        mysql_status = MySQLStatus(dst=dst_storage)

        copies = [
            cp for cp in mysql_status if backup_copy.endswith(cp.name)
        ]
        try:
            copy = copies.pop(0)
        except IndexError:
            raise TwinDBBackupError(
                'Can not find copy %s in MySQL status. '
                'Inspect output of `twindb-backup status` and verify '
                'that correct copy is specified.'
                % backup_copy
            )
        if copies:
            raise TwinDBBackupError(
                'Multiple copies match pattern %s. Make sure you give unique '
                'copy name for restore.'
            )

        if cache:
            restore_from_mysql(
                ctx.obj['twindb_config'],
                copy,
                dst,
                cache=Cache(cache)
            )
        else:
            restore_from_mysql(ctx.obj['twindb_config'], copy, dst)

    except (TwinDBBackupError, CacheException) as err:
        LOG.error(err)
        LOG.debug(traceback.format_exc())
        exit(1)
    except (OSError, IOError) as err:
        LOG.error(err)
        LOG.debug(traceback.format_exc())
        exit(1)
Пример #9
0
 def backup_dirs(self):
     """Directories to backup"""
     try:
         dirs = self.__cfg.get('source', 'backup_dirs')
         return split(dirs)
     except NoOptionError:
         return []
     except NoSectionError as err:
         LOG.error("Section 'source' is mandatory")
         raise ConfigurationError(err)
Пример #10
0
def verify_mysql_backup(twindb_config, dst_path, backup_file, hostname=None):
    """
    Restore mysql backup and measure time

    :param hostname:
    :param backup_file:
    :param dst_path:
    :param twindb_config: tool configuration
    :type twindb_config: TwinDBBackupConfig

    """
    dst = twindb_config.destination(backup_source=hostname)
    status = MySQLStatus(dst=dst)
    copy = None

    if backup_file == "latest":
        copy = status.latest_backup
    else:
        for copy in status:
            if backup_file.endswith(copy.key):
                break
    if copy is None:
        return json.dumps({
            'backup_copy': backup_file,
            'restore_time': 0,
            'success': False
        }, indent=4, sort_keys=True)
    start_restore_time = time.time()
    success = True
    tmp_dir = tempfile.mkdtemp()

    try:

        LOG.debug('Verifying backup copy in %s', tmp_dir)
        restore_from_mysql(twindb_config, copy, dst_path, tmp_dir)
        edit_backup_my_cnf(dst_path)

    except (TwinDBBackupError, OSError, IOError) as err:

        LOG.error(err)
        LOG.debug(traceback.format_exc())
        success = False

    finally:

        shutil.rmtree(tmp_dir, ignore_errors=True)

    end_restore_time = time.time()
    restore_time = end_restore_time - start_restore_time
    return json.dumps({
        'backup_copy': copy.key,
        'restore_time': restore_time,
        'success': success
    }, indent=4, sort_keys=True)
Пример #11
0
def share_backup(ctx, s3_url):
    """Share backup copy for download"""
    if not s3_url:
        LOG.info('No backup copy specified. Choose one from below:')
        list_available_backups(ctx.obj['twindb_config'])
        exit(1)
    try:
        share(ctx.obj['twindb_config'], s3_url)
    except TwinDBBackupError as err:
        LOG.error(err)
        exit(1)
Пример #12
0
def run_command(command, ok_non_zero=False):
    """
    Run shell command locally

    :param command: Command to run
    :type command: list
    :param ok_non_zero: Don't consider non-zero exit code as an error.
    :type ok_non_zero: bool
    :return: file object with stdout as generator to use with ``with``
    """
    try:
        LOG.debug('Running %s', " ".join(command))
        proc = Popen(command, stderr=PIPE, stdout=PIPE)

        yield proc.stdout

        _, cerr = proc.communicate()

        if proc.returncode and not ok_non_zero:
            LOG.error('Command %s exited with error code %d',
                      ' '.join(command),
                      proc.returncode)
            LOG.error(cerr)
            exit(1)
        else:
            LOG.debug('Exited with zero code')

    except OSError as err:
        LOG.error('Failed to run %s',
                  ' '.join(command))
        LOG.error(err)
        exit(1)
Пример #13
0
    def _load(self, status_as_json):
        status = []
        try:
            status_as_obj = json.loads(status_as_json)
        except ValueError:
            raise CorruptedStatus(
                'Could not load status from a bad JSON string %s'
                % (status_as_json, )
            )

        for run_type in INTERVALS:
            for key, value in status_as_obj[run_type].iteritems():

                try:
                    host = key.split('/')[0]
                    file_name = key.split('/')[3]
                    kwargs = {
                        'type': value['type'],
                        'config': self.__serialize_config(value)
                    }
                    keys = [
                        'backup_started',
                        'backup_finished',
                        'binlog',
                        'parent',
                        'lsn',
                        'position',
                        'wsrep_provider_version',
                    ]
                    for copy_key in keys:
                        if copy_key in value:
                            kwargs[copy_key] = value[copy_key]

                    copy = MySQLCopy(
                        host,
                        run_type,
                        file_name,
                        **kwargs
                    )
                    status.append(copy)
                except IndexError as err:
                    LOG.error(err)
                    raise CorruptedStatus('Unexpected key %s' % key)

        return status
Пример #14
0
    def _get_config(self, cfg_path):
        """
        Return parsed config

        :param cfg_path: Path to config
        :type cfg_path: str
        :return: Path and config
        :rtype: ConfigParser.ConfigParser
        """
        cfg = ConfigParser.ConfigParser(allow_no_value=True)
        try:
            cmd = "cat %s" % cfg_path
            with self._ssh_client.get_remote_handlers(cmd) as (_, cout, _):
                cfg.readfp(cout)
        except ConfigParser.ParsingError as err:
            LOG.error(err)
            raise
        return cfg
Пример #15
0
    def find_files(self, prefix, run_type):
        """
        Find files with common prefix and given run type.

        :param prefix: Common prefix.
        :type prefix: str
        :param run_type: daily, hourly, etc
        :type run_type: str
        :return: list of file names
        :rtype: list(str)
        :raise S3DestinationError: if failed to find files.
        """
        s3client = boto3.resource('s3')
        bucket = s3client.Bucket(self.bucket)
        LOG.debug('Listing %s in bucket %s', prefix, bucket)

        # Try to list the bucket several times
        # because of intermittent error NoSuchBucket:
        # https://travis-ci.org/twindb/backup/jobs/204066704
        retry_timeout = time.time() + S3_READ_TIMEOUT
        retry_interval = 2
        while time.time() < retry_timeout:
            try:
                files = []
                all_objects = bucket.objects.filter(Prefix='')
                for file_object in all_objects:
                    if "/" + run_type + "/" in file_object.key:
                        files.append("s3://%s/%s" %
                                     (self.bucket, file_object.key))

                return sorted(files)
            except ClientError as err:
                LOG.warning('%s. Will retry in %d seconds.', err,
                            retry_interval)
                time.sleep(retry_interval)
                retry_interval *= 2

            except Exception as err:
                LOG.error('Failed to list objects in bucket %s: %s',
                          self.bucket, err)
                raise

        raise S3DestinationError('Failed to find files.')
Пример #16
0
def backup(ctx, run_type, lock_file, binlogs_only):
    """Run backup job"""
    try:

        run_backup_job(
            ctx.obj['twindb_config'],
            run_type,
            lock_file=lock_file,
            binlogs_only=binlogs_only
        )
    except TwinDBBackupError as err:
        LOG.error(err)
        LOG.debug(traceback.format_exc())
        exit(1)

    except KeyboardInterrupt:
        LOG.info('Exiting...')
        kill_children()
        exit(1)
Пример #17
0
    def get_remote_handlers(self, cmd):
        """
        Get remote stdin, stdout and stderr handler

        :param cmd: Command for execution
        :type cmd: str
        :return: Remote stdin, stdout and stderr handler
        :rtype: tuple(generator, generator, generator)
        :raise SshDestinationError: if any error
        """
        try:
            with self._shell() as shell:
                LOG.debug("Try to get remote handlers: %s", cmd)
                stdin_, stdout_, stderr_ = shell.exec_command(cmd)
                yield stdin_, stdout_, stderr_

        except SSHException as err:
            LOG.error('Failed to execute %s', cmd)
            raise SshClientException(err)
Пример #18
0
def _run_remote_netcat(
    compress,
    datadir,  # pylint: disable=too-many-arguments
    destination,
    dst,
    netcat_port,
    src,
    xbstream_path,
):
    netcat_cmd = "{xbstream_binary} -x -C {datadir}".format(
        xbstream_binary=xbstream_path, datadir=datadir)
    if compress:
        netcat_cmd = "gunzip -c - | %s" % netcat_cmd

    # find unused port
    while netcat_port < 64000:
        if dst.ensure_tcp_port_listening(netcat_port, wait_timeout=1):
            netcat_port += 1
        else:
            LOG.debug("Will use port %d for streaming", netcat_port)
            break
    proc_netcat = Process(target=dst.netcat,
                          args=(netcat_cmd, ),
                          kwargs={"port": netcat_port})
    LOG.debug("Starting netcat on the destination")
    proc_netcat.start()
    nc_wait_timeout = 10
    if not dst.ensure_tcp_port_listening(netcat_port,
                                         wait_timeout=nc_wait_timeout):
        LOG.error(
            "netcat on the destination "
            "is not ready after %d seconds",
            nc_wait_timeout,
        )
        proc_netcat.terminate()
        exit(1)
    src.clone(
        dest_host=split_host_port(destination)[0],
        port=netcat_port,
        compress=compress,
    )
    proc_netcat.join()
Пример #19
0
def restore_file(ctx, dst, backup_copy):
    """Restore from file backup"""
    LOG.debug('file: %r', ctx.obj['twindb_config'])

    if not backup_copy:
        LOG.info('No backup copy specified. Choose one from below:')
        list_available_backups(ctx.obj['twindb_config'])
        exit(1)

    try:
        ensure_empty(dst)
        copy = FileCopy(path=backup_copy)
        restore_from_file(ctx.obj['twindb_config'], copy, dst)
    except TwinDBBackupError as err:
        LOG.error(err)
        exit(1)
    except KeyboardInterrupt:
        LOG.info('Exiting...')
        kill_children()
        exit(1)
Пример #20
0
def restore_file(ctx, dst, backup_copy):
    """Restore from file backup"""
    LOG.debug('file: %r', ctx.obj['twindb_config'])

    if not backup_copy:
        LOG.info('No backup copy specified. Choose one from below:')
        list_available_backups(ctx.obj['twindb_config'])
        exit(1)

    try:
        ensure_empty(dst)
        copy = FileCopy(path=backup_copy)
        restore_from_file(ctx.obj['twindb_config'], copy, dst)
    except TwinDBBackupError as err:
        LOG.error(err)
        exit(1)
    except KeyboardInterrupt:
        LOG.info('Exiting...')
        kill_children()
        exit(1)
Пример #21
0
def backup_everything(run_type, config):
    """
    Run backup job

    :param run_type: hourly, daily, etc
    :type run_type: str
    :param config: ConfigParser instance
    :type config: ConfigParser.ConfigParser
    """
    set_open_files_limit()

    try:
        backup_start = time.time()
        backup_files(run_type, config)
        backup_mysql(run_type, config)
        end = time.time()
        save_measures(backup_start, end)
    except ConfigParser.NoSectionError as err:
        LOG.error(err)
        exit(1)
Пример #22
0
    def get_stream(self):
        """
        Encrypt the input stream and return it as the output stream

        :return: output stream handle
        :raise: OSError if failed to call the gpg command
        """
        with self.input as input_stream:
            proc = Popen([
                'gpg', '--no-default-keyring', '--trust-model', 'always',
                '--keyring', self.keyring, '--recipient', self.recipient,
                '--encrypt', '--yes', '--batch'
            ],
                         stdin=input_stream,
                         stdout=PIPE,
                         stderr=PIPE)
            yield proc.stdout
            cerr = proc.communicate()
            if proc.returncode:
                LOG.error('gpg exited with non-zero code.')
                LOG.error(cerr)
Пример #23
0
def _extract_xbstream(input_stream,
                      working_dir,
                      xbstream_binary=XBSTREAM_BINARY):
    """
    Extract xbstream stream in directory

    :param input_stream: The stream in xbstream format
    :param working_dir: directory
    :param xbstream_binary: Path to xbstream
    :return: True if extracted successfully
    """
    try:
        cmd = [xbstream_binary, "-x"]
        LOG.debug("Running %s", " ".join(cmd))
        LOG.debug("Working directory: %s", working_dir)
        LOG.debug("Xbstream binary: %s", xbstream_binary)
        proc = Popen(cmd,
                     stdin=input_stream,
                     stdout=PIPE,
                     stderr=PIPE,
                     cwd=working_dir)
        cout, cerr = proc.communicate()
        ret = proc.returncode
        if ret:
            LOG.error("%s exited with code %d", " ".join(cmd), ret)
            if cout:
                LOG.error("STDOUT: %s", cout)
            if cerr:
                LOG.error("STDERR: %s", cerr)
        return ret == 0

    except OSError as err:
        raise TwinDBBackupError("Failed to extract xbstream: %s" % err)
Пример #24
0
    def _load(self, status_as_json):
        status = []
        try:
            status_as_obj = json.loads(status_as_json)
        except ValueError:
            raise CorruptedStatus(
                "Could not load status from a bad JSON string %s"
                % (status_as_json,)
            )

        for run_type in INTERVALS:
            for key, value in status_as_obj[run_type].items():

                try:
                    host = key.split("/")[0]
                    file_name = key.split("/")[3]
                    kwargs = {
                        "type": value["type"],
                        "config": self.__serialize_config(value),
                    }
                    keys = [
                        "backup_started",
                        "backup_finished",
                        "binlog",
                        "parent",
                        "lsn",
                        "position",
                        "wsrep_provider_version",
                    ]
                    for copy_key in keys:
                        if copy_key in value:
                            kwargs[copy_key] = value[copy_key]

                    copy = MySQLCopy(host, run_type, file_name, **kwargs)
                    status.append(copy)
                except IndexError as err:
                    LOG.error(err)
                    raise CorruptedStatus("Unexpected key %s" % key)

        return status
Пример #25
0
    def execute(self, cmd, quiet=False):
        """Execute a command on a remote SSH server.

        :param cmd: Command for execution.
        :type cmd: str
        :param quiet: if quiet is True don't print error messages
        :return: Handlers of stdin, stdout and stderr
        :rtype: tuple
        :raise SshDestinationError: if any error

        """
        try:
            with self._shell() as shell:
                LOG.debug('Executing %s', cmd)
                stdin_, stdout_, stderr_ = shell.exec_command(cmd)
                # while not stdout_.channel.exit_status_ready():
                #     LOG.debug('%s: waiting', cmd)
                #     time.sleep(1)
                exit_code = stdout_.channel.recv_exit_status()
                if exit_code != 0:
                    if not quiet:
                        LOG.error("Failed while execute command %s", cmd)
                        LOG.error(stderr_.read())
                    raise SshClientException('%s exited with code %d'
                                             % (cmd, exit_code))
                return stdin_, stdout_, stderr_

        except (SSHException, IOError) as err:
            if not quiet:
                LOG.error('Failed to execute %s: %s', cmd, err)
            raise SshClientException('Failed to execute %s: %s'
                                     % (cmd, err))
Пример #26
0
    def _load(self, status_as_json):
        status = []
        try:
            status_as_obj = json.loads(status_as_json)
        except ValueError:
            raise CorruptedStatus(
                'Could not load status from a bad JSON string %s' %
                (status_as_json, ))

        for run_type in INTERVALS:
            for key, value in status_as_obj[run_type].iteritems():

                try:
                    host = key.split('/')[0]
                    file_name = key.split('/')[3]
                    kwargs = {
                        'type': value['type'],
                        'config': self.__serialize_config(value)
                    }
                    keys = [
                        'backup_started',
                        'backup_finished',
                        'binlog',
                        'parent',
                        'lsn',
                        'position',
                        'wsrep_provider_version',
                    ]
                    for copy_key in keys:
                        if copy_key in value:
                            kwargs[copy_key] = value[copy_key]

                    copy = MySQLCopy(host, run_type, file_name, **kwargs)
                    status.append(copy)
                except IndexError as err:
                    LOG.error(err)
                    raise CorruptedStatus('Unexpected key %s' % key)

        return status
Пример #27
0
def restore_mysql(cfg, dst, backup_copy, cache):
    """Restore from mysql backup"""
    LOG.debug('mysql: %r', cfg)

    if not backup_copy:
        LOG.info('No backup copy specified. Choose one from below:')
        list_available_backups(cfg)
        exit(1)

    try:
        ensure_empty(dst)
        if cache:
            restore_from_mysql(cfg, backup_copy, dst, cache=Cache(cache))
        else:
            restore_from_mysql(cfg, backup_copy, dst)

    except (TwinDBBackupError, CacheException) as err:
        LOG.error(err)
        exit(1)
    except (OSError, IOError) as err:
        LOG.error(err)
        exit(1)
Пример #28
0
def restore_from_file(config, backup_copy, dst_dir):
    """
    Restore a directory from a backup copy in the directory

    :param config: Tool configuration.
    :type config: ConfigParser.ConfigParser
    :param backup_copy: Backup name.
    :type backup_copy: str
    :param dst_dir: Path to destination directory. Must exist and be empty.
    :type dst_dir: str
    """
    LOG.info('Restoring %s in %s', backup_copy, dst_dir)
    mkdir_p(dst_dir)

    if os.path.exists(backup_copy):
        dst = Local(backup_copy)
        stream = dst.get_stream(backup_copy)
    else:
        dst = get_destination(config)
        stream = dst.get_stream(backup_copy)
        # GPG modifier
        try:
            gpg = Gpg(stream,
                      config.get('gpg', 'recipient'),
                      config.get('gpg', 'keyring'),
                      secret_keyring=config.get('gpg', 'secret_keyring'))
            LOG.debug('Decrypting stream')
            stream = gpg.revert_stream()
        except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
            LOG.debug('Not decrypting the stream')

    with stream as handler:
        try:
            cmd = ["tar", "zvxf", "-"]
            LOG.debug('Running %s', ' '.join(cmd))
            proc = Popen(cmd, stdin=handler, cwd=dst_dir)
            cout, cerr = proc.communicate()
            ret = proc.returncode
            if ret:
                LOG.error('%s exited with code %d', cmd, ret)
                if cout:
                    LOG.error('STDOUT: %s', cout)
                if cerr:
                    LOG.error('STDERR: %s', cerr)
                return
            LOG.info('Successfully restored %s in %s', backup_copy, dst_dir)
        except OSError as err:
            LOG.error('Failed to decompress %s: %s', backup_copy, err)
            exit(1)
Пример #29
0
def _run_remote_netcat(compress, datadir,  # pylint: disable=too-many-arguments
                       destination, dst, netcat_port, src, xbstream_path):
    netcat_cmd = "{xbstream_binary} -x -C {datadir}".format(
        xbstream_binary=xbstream_path,
        datadir=datadir
    )
    if compress:
        netcat_cmd = "gunzip -c - | %s" % netcat_cmd

    # find unused port
    while netcat_port < 64000:
        if dst.ensure_tcp_port_listening(netcat_port, wait_timeout=1):
            netcat_port += 1
        else:
            LOG.debug('Will use port %d for streaming', netcat_port)
            break
    proc_netcat = Process(
        target=dst.netcat,
        args=(netcat_cmd,),
        kwargs={
            'port': netcat_port
        }
    )
    LOG.debug('Starting netcat on the destination')
    proc_netcat.start()
    nc_wait_timeout = 10
    if not dst.ensure_tcp_port_listening(netcat_port,
                                         wait_timeout=nc_wait_timeout):
        LOG.error('netcat on the destination '
                  'is not ready after %d seconds', nc_wait_timeout)
        proc_netcat.terminate()
        exit(1)
    src.clone(
        dest_host=split_host_port(destination)[0],
        port=netcat_port,
        compress=compress
    )
    proc_netcat.join()
Пример #30
0
def run_backup_job(twindb_config,
                   run_type,
                   lock_file=LOCK_FILE,
                   binlogs_only=False):
    """
    Grab a lock waiting up to allowed timeout and start backup jobs

    :param twindb_config: Tool configuration
    :type twindb_config: TwinDBBackupConfig
    :param run_type: Run type
    :type run_type: str
    :param lock_file: File used as a lock
    :type lock_file: str
    :param binlogs_only: If True copy only binlogs.
    :type binlogs_only: bool
    """
    with timeout(get_timeout(run_type)):
        try:
            file_desriptor = open(lock_file, 'w')
            fcntl.flock(file_desriptor, fcntl.LOCK_EX)
            LOG.debug(run_type)
            if getattr(twindb_config.run_intervals, run_type):
                backup_everything(
                    run_type,
                    twindb_config,
                    binlogs_only=binlogs_only
                )
            else:
                LOG.debug('Not running because run_%s is no', run_type)
        except IOError as err:
            if err.errno != errno.EINTR:
                LOG.debug(traceback.format_exc())
                raise LockWaitTimeoutError(err)
            msg = 'Another instance of twindb-backup is running?'
            if run_type == 'hourly':
                LOG.debug(msg)
            else:
                LOG.error(msg)
Пример #31
0
    def is_galera(self):
        """Check if local MySQL instance is a Galera cluster

        :return: True if it's a Galera.
        :rtype: bool
        """
        try:
            with self._cursor() as cursor:
                cursor.execute("SELECT @@wsrep_on as wsrep_on")
                row = cursor.fetchone()

                return (str(row['wsrep_on']).lower() == "1" or
                        str(row['wsrep_on']).lower() == 'on')
        except pymysql.InternalError as err:
            error_code = err.args[0]
            error_message = err.args[1]

            if error_code == 1193:
                LOG.debug('Galera is not supported or not enabled')
                return False
            else:
                LOG.error(error_message)
                raise
    def _save(cmd, handler):

        with handler as input_handler:
            LOG.debug('Running %s', ' '.join(cmd))
            try:
                proc = Popen(cmd,
                             stdin=input_handler,
                             stdout=PIPE,
                             stderr=PIPE)
                cout_ssh, cerr_ssh = proc.communicate()

                ret = proc.returncode
                if ret:
                    if cout_ssh:
                        LOG.info(cout_ssh)
                    if cerr_ssh:
                        LOG.error(cerr_ssh)
                    raise DestinationError('%s exited with error code %d' %
                                           (' '.join(cmd), ret))
                LOG.debug('Exited with code %d', ret)
            except OSError as err:
                raise DestinationError('Failed to run %s: %s' %
                                       (' '.join(cmd), err))
Пример #33
0
        def _download_object(s3_client, bucket_name, key, read_fd, write_fd):
            # The read end of the pipe must be closed in the child process
            # before we start writing to it.
            os.close(read_fd)

            with os.fdopen(write_fd, 'wb') as w_pipe:
                try:
                    retry_interval = 2
                    for _ in xrange(10):
                        try:
                            s3_client.download_fileobj(bucket_name, key,
                                                       w_pipe)
                            return
                        except ClientError as err:
                            LOG.warning(err)
                            LOG.warning('Will retry in %d seconds',
                                        retry_interval)
                            time.sleep(retry_interval)
                            retry_interval *= 2

                except IOError as err:
                    LOG.error(err)
                    exit(1)
Пример #34
0
    def is_galera(self):
        """Check if local MySQL instance is a Galera cluster

        :return: True if it's a Galera.
        :rtype: bool
        """
        try:
            with self._cursor() as cursor:
                cursor.execute("SELECT @@wsrep_on as wsrep_on")
                row = cursor.fetchone()

                return (str(row['wsrep_on']).lower() == "1" or
                        str(row['wsrep_on']).lower() == 'on')
        except pymysql.InternalError as err:
            error_code = err.args[0]
            error_message = err.args[1]

            if error_code == 1193:
                LOG.debug('Galera is not supported or not enabled')
                return False
            else:
                LOG.error(error_message)
                raise
Пример #35
0
    def get_stream(self):
        """
        Get a PIPE handler with content of the source

        :return:
        """
        cmd = "tar cf - %s" % self.path
        try:
            LOG.debug('Running %s', cmd)
            proc = Popen(shlex.split(cmd), stderr=PIPE, stdout=PIPE)

            yield proc.stdout

            _, cerr = proc.communicate()
            if proc.returncode:
                LOG.error('Failed to read from %s: %s', self.path, cerr)
                exit(1)
            else:
                LOG.debug('Successfully streamed %s', self.path)

        except OSError as err:
            LOG.error('Failed to run %s: %s', cmd, err)
            exit(1)
Пример #36
0
    def get_stream(self):
        """
        Get a PIPE handler with content of the source

        :return:
        """
        cmd = "tar cf - %s" % self.path
        try:
            LOG.debug('Running %s', cmd)
            proc = Popen(shlex.split(cmd), stderr=PIPE, stdout=PIPE)

            yield proc.stdout

            _, cerr = proc.communicate()
            if proc.returncode:
                LOG.error('Failed to read from %s: %s', self.path, cerr)
                exit(1)
            else:
                LOG.debug('Successfully streamed %s', self.path)

        except OSError as err:
            LOG.error('Failed to run %s: %s', cmd, err)
            exit(1)
Пример #37
0
def _mysql_service(dst, action):
    """Start or stop MySQL service

    :param dst: Destination server
    :type dst: Ssh
    :param action: string start or stop
    :type action: str
    """
    for service in ['mysqld', 'mysql']:
        try:
            return dst.execute_command(
                "PATH=$PATH:/sbin sudo service %s %s" % (service, action),
                quiet=True
            )
        except SshClientException as err:
            LOG.debug(err)

    try:
        LOG.warning('Failed to %s MySQL with an init script. '
                    'Will try to %s mysqld.', action, action)
        if action == "start":
            ret = dst.execute_command(
                "PATH=$PATH:/sbin sudo bash -c 'nohup mysqld &'",
                background=True
            )
            time.sleep(10)
            return ret
        elif action == "stop":
            return dst.execute_command(
                "PATH=$PATH:/sbin sudo kill $(pidof mysqld)"
            )
    except SshClientException as err:
        LOG.error(err)
        raise OperationError(
            'Failed to %s MySQL on %r'
            % (action, dst)
        )
Пример #38
0
def _mysql_service(dst, action):
    """Start or stop MySQL service

    :param dst: Destination server
    :type dst: Ssh
    :param action: string start or stop
    :type action: str
    """
    for service in ["mysqld", "mysql"]:
        try:
            return dst.execute_command(
                "PATH=$PATH:/sbin sudo service %s %s" % (service, action),
                quiet=True,
            )
        except SshClientException as err:
            LOG.debug(err)

    try:
        LOG.warning(
            "Failed to %s MySQL with an init script. "
            "Will try to %s mysqld.",
            action,
            action,
        )
        if action == "start":
            ret = dst.execute_command(
                "PATH=$PATH:/sbin sudo bash -c 'nohup mysqld &'",
                background=True,
            )
            time.sleep(10)
            return ret
        elif action == "stop":
            return dst.execute_command(
                "PATH=$PATH:/sbin sudo kill $(pidof mysqld)")
    except SshClientException as err:
        LOG.error(err)
        raise OperationError("Failed to %s MySQL on %r" % (action, dst))
Пример #39
0
    def get_full_copy_name(self, file_path):
        """
        For a given backup copy find a parent. If it's a full copy
        then return itself

        :param file_path:
        :return:
        """
        try:
            for run_type in INTERVALS:
                for key in self.status()[run_type].keys():
                    if file_path.endswith(key):
                        if self.status()[run_type][key]['type'] == "full":
                            return file_path
                        else:
                            remote_part = file_path.replace(key, '')
                            parent = self.status()[run_type][key]['parent']
                            result = "%s%s" % (remote_part, parent)
                            return result
        except (TypeError, KeyError) as err:
            LOG.error('Failed to find parent of %s', file_path)
            raise DestinationError(err)

        raise DestinationError('Failed to find parent of %s' % file_path)
Пример #40
0
def run_backup_job(twindb_config,
                   run_type,
                   lock_file=LOCK_FILE,
                   binlogs_only=False):
    """
    Grab a lock waiting up to allowed timeout and start backup jobs

    :param twindb_config: Tool configuration
    :type twindb_config: TwinDBBackupConfig
    :param run_type: Run type
    :type run_type: str
    :param lock_file: File used as a lock
    :type lock_file: str
    :param binlogs_only: If True copy only binlogs.
    :type binlogs_only: bool
    """
    with timeout(get_timeout(run_type)):
        try:
            file_desriptor = open(lock_file, "w")
            fcntl.flock(file_desriptor, fcntl.LOCK_EX)
            LOG.debug(run_type)
            if getattr(twindb_config.run_intervals, run_type):
                backup_everything(run_type,
                                  twindb_config,
                                  binlogs_only=binlogs_only)
            else:
                LOG.debug("Not running because run_%s is no", run_type)
        except IOError as err:
            if err.errno != errno.EINTR:
                LOG.debug(traceback.format_exc())
                raise LockWaitTimeoutError(err)
            msg = "Another instance of twindb-backup is running?"
            if run_type == "hourly":
                LOG.debug(msg)
            else:
                LOG.error(msg)
Пример #41
0
    def get_connection(self):
        """
        Connect to MySQL host and yield a connection.

        :return: MySQL connection
        :raise MySQLSourceError: if can't connect to server
        """
        connection = None
        try:
            connection = pymysql.connect(
                host=self.hostname,
                read_default_file=self.defaults_file,
                connect_timeout=self.connect_timeout,
                cursorclass=pymysql.cursors.DictCursor,
            )

            yield connection
        except OperationalError:
            LOG.error("Can't connect to MySQL server on %s", self.hostname)
            raise MySQLSourceError("Can't connect to MySQL server on %s" %
                                   self.hostname)
        finally:
            if connection:
                connection.close()
Пример #42
0
def restore_file(cfg, dst, backup_copy):
    """Restore from file backup"""
    LOG.debug('file: %r', cfg)

    if not backup_copy:
        LOG.info('No backup copy specified. Choose one from below:')
        list_available_backups(cfg)
        exit(1)

    try:
        ensure_empty(dst)
        copy = FileCopy(
            get_hostname_from_backup_copy(backup_copy),
            basename(backup_copy),
            get_run_type_from_backup_copy(backup_copy)
        )
        restore_from_file(cfg, copy, dst)
    except TwinDBBackupError as err:
        LOG.error(err)
        exit(1)
    except KeyboardInterrupt:
        LOG.info('Exiting...')
        kill_children()
        exit(1)
Пример #43
0
        def _download_object(s3_client, bucket_name, key, read_fd, write_fd):
            # The read end of the pipe must be closed in the child process
            # before we start writing to it.
            os.close(read_fd)

            with os.fdopen(write_fd, 'wb') as w_pipe:
                try:
                    retry_interval = 2
                    for _ in xrange(10):
                        try:
                            s3_client.download_fileobj(bucket_name,
                                                       key,
                                                       w_pipe)
                            return
                        except ClientError as err:
                            LOG.warning(err)
                            LOG.warning('Will retry in %d seconds',
                                        retry_interval)
                            time.sleep(retry_interval)
                            retry_interval *= 2

                except IOError as err:
                    LOG.error(err)
                    exit(1)
Пример #44
0
    def execute(self, cmd, quiet=False, background=False):
        """Execute a command on a remote SSH server.

        :param cmd: Command for execution.
        :type cmd: str
        :param quiet: if quiet is True don't print error messages
        :param background: Don't wait until the command exits.
        :type background: bool
        :return: Strings with stdout and stderr. If command is executed
            in background the method will return None.
        :rtype: tuple
        :raise SshClientException: if any error or non-zero exit code

        """
        max_chunk_size = 1024 * 1024
        try:
            with self._shell() as shell:
                if not background:
                    LOG.debug('Executing command: %s', cmd)
                    stdin_, stdout_, _ = shell.exec_command(cmd)
                    channel = stdout_.channel
                    stdin_.close()
                    channel.shutdown_write()
                    stdout_chunks = []
                    stderr_chunks = []
                    while not channel.closed \
                            or channel.recv_ready() \
                            or channel.recv_stderr_ready():
                        if channel.recv_ready():
                            stdout_chunks.append(
                                channel.recv(max_chunk_size).decode("utf-8"))
                        if channel.recv_stderr_ready():
                            stderr_chunks.append(
                                channel.recv_stderr(max_chunk_size).decode(
                                    "utf-8"))

                    exit_code = channel.recv_exit_status()
                    if exit_code != 0:
                        if not quiet:
                            LOG.error("Failed to execute command %s", cmd)
                            LOG.error(''.join(stderr_chunks))
                        raise SshClientException('%s exited with code %d' %
                                                 (cmd, exit_code))
                    return ''.join(stdout_chunks), ''.join(stderr_chunks)
                else:
                    LOG.debug('Executing in background: %s', cmd)
                    transport = shell.get_transport()
                    channel = transport.open_session()
                    channel.exec_command(cmd)
                    LOG.debug('Ran %s in background', cmd)

        except (SSHException, IOError) as err:
            if not quiet:
                LOG.error('Failed to execute %s: %s', cmd, err)
            raise SshClientException('Failed to execute %s: %s' % (cmd, err))
Пример #45
0
def _extract_xbstream(
        input_stream,
        working_dir,
        xbstream_binary=XBSTREAM_BINARY):
    """
    Extract xbstream stream in directory

    :param input_stream: The stream in xbstream format
    :param working_dir: directory
    :param xbstream_binary: Path to xbstream
    :return: True if extracted successfully
    """
    try:
        cmd = [xbstream_binary, '-x']
        LOG.debug('Running %s', ' '.join(cmd))
        LOG.debug('Working directory: %s', working_dir)
        LOG.debug('Xbstream binary: %s', xbstream_binary)
        proc = Popen(
            cmd,
            stdin=input_stream,
            stdout=PIPE,
            stderr=PIPE,
            cwd=working_dir
        )
        cout, cerr = proc.communicate()
        ret = proc.returncode
        if ret:
            LOG.error('%s exited with code %d', ' '.join(cmd), ret)
            if cout:
                LOG.error('STDOUT: %s', cout)
            if cerr:
                LOG.error('STDERR: %s', cerr)
        return ret == 0

    except OSError as err:
        LOG.error('Failed to extract xbstream: %s', err)
        return False
Пример #46
0
def test___find_all_cnf(mock_get_text_content, mock_list, tmpdir, mycnfs,
                        expected_result_template):
    mycnf_root = Path(tmpdir)

    # Prepare steps (writing config files with content)

    for key in mycnfs.keys():
        mycnf_root.joinpath(key).parent.mkdir(exist_ok=True)
        with open(str(mycnf_root.joinpath(key)), "w") as fp:
            fp.write(mycnfs[key])

    # mock helper functions
    def get_text_content(full_path):
        LOG.debug("Getting content of %s", full_path)
        # cut mysql_root prefix from the full path and lookup for content in the mycnfs dictionary.
        return mycnfs["/".join(
            PurePath(full_path).parts[len(mycnf_root.parts):])]

    def get_list(path, recursive=False, files_only=True):
        return os.listdir(path)

    mock_get_text_content.side_effect = get_text_content
    mock_list.side_effect = get_list
    #
    rmt_sql = RemoteMySQLSource({
        "run_type": INTERVALS[0],
        "backup_type": 'full',
        "mysql_connect_info": MySQLConnectInfo("/"),
        "ssh_connection_info": None
    })
    expected_result = sorted(
        [osp.join(str(mycnf_root), item) for item in expected_result_template])
    actual_result = sorted(rmt_sql._find_all_cnf(
        mycnf_root.joinpath("my.cnf")))
    assert (actual_result == expected_result), LOG.error(
        "Expected: %s\nActual: %s" %
        (pformat(expected_result), pformat(actual_result)))
Пример #47
0
def backup(cfg, run_type, lock_file):
    """Run backup job"""
    try:

        run_backup_job(cfg, run_type, lock_file=lock_file)

    except IOError as err:
        LOG.error(err)
        LOG.debug(traceback.format_exc())
        exit(1)

    except ModifierException as err:
        LOG.error('Error in modifier class')
        LOG.error(err)
        LOG.debug(traceback.format_exc())
        exit(1)
    except KeyboardInterrupt:
        LOG.info('Exiting...')
        kill_children()
        exit(1)
Пример #48
0
def restore_from_mysql_incremental(stream, dst_dir, config, tmp_dir=None,
                                   xtrabackup_binary=XTRABACKUP_BINARY,
                                   xbstream_binary=XBSTREAM_BINARY):
    """
    Restore MySQL datadir from an incremental copy.

    :param stream: Generator that provides backup copy
    :param dst_dir: Path to destination directory. Must exist and be empty.
    :type dst_dir: str
    :param config: Tool configuration.
    :type config: TwinDBBackupConfig
    :param tmp_dir: Path to temp dir
    :type tmp_dir: str
    :param xtrabackup_binary: Path to xtrabackup binary.
    :param xbstream_binary: Path to xbstream binary
    :return: If success, return True
    :rtype: bool
    """
    if tmp_dir is None:
        try:
            inc_dir = tempfile.mkdtemp()
        except (IOError, OSError):
            try:
                empty_dir(dst_dir)
            except (IOError, OSError):
                raise
            raise
    else:
        inc_dir = tmp_dir
    # GPG modifier
    if config.gpg:
        gpg = Gpg(
            stream,
            config.gpg.recipient,
            config.gpg.keyring,
            secret_keyring=config.gpg.secret_keyring
        )
        LOG.debug('Decrypting stream')
        stream = gpg.revert_stream()
    else:
        LOG.debug('Not decrypting the stream')

    stream = config.compression.get_modifier(stream).revert_stream()

    with stream as handler:
        if not _extract_xbstream(handler, inc_dir, xbstream_binary):
            return False

    try:
        mem_usage = psutil.virtual_memory()
        try:
            xtrabackup_cmd = [
                xtrabackup_binary,
                '--use-memory=%d' % (mem_usage.available / 2),
                '--prepare',
                '--apply-log-only',
                '--target-dir=%s' % dst_dir
            ]
            LOG.debug('Running %s', ' '.join(xtrabackup_cmd))
            xtrabackup_proc = Popen(
                xtrabackup_cmd,
                stdout=None,
                stderr=None
            )
            xtrabackup_proc.communicate()
            ret = xtrabackup_proc.returncode
            if ret:
                LOG.error(
                    '%s exited with code %d',
                    " ".join(xtrabackup_cmd),
                    ret)
                return False

            xtrabackup_cmd = [
                xtrabackup_binary,
                '--use-memory=%d' % (mem_usage.available / 2),
                '--prepare',
                "--target-dir=%s" % dst_dir,
                "--incremental-dir=%s" % inc_dir
            ]
            LOG.debug('Running %s', ' '.join(xtrabackup_cmd))
            xtrabackup_proc = Popen(
                xtrabackup_cmd,
                stdout=None,
                stderr=None
            )
            xtrabackup_proc.communicate()
            ret = xtrabackup_proc.returncode
            if ret:
                LOG.error('%s exited with code %d',
                          " ".join(xtrabackup_cmd),
                          ret)
            return ret == 0
        except OSError as err:
            LOG.error('Failed to prepare backup in %s: %s', dst_dir, err)
            return False
    finally:
        try:
            pass
        except OSError as exc:
            if exc.errno != errno.ENOENT:  # ENOENT - no such file or directory
                raise  # re-raise exception
Пример #49
0
    def get_stream(self, copy):
        """
        Get a PIPE handler with content of the backup copy streamed from
        the destination.

        :param copy: Backup copy
        :type copy: BaseCopy
        :return: Stream with backup copy
        :rtype: generator
        :raise S3DestinationError: if failed to stream a backup copy.
        """

        path = "%s/%s" % (self.remote_path, copy.key)
        object_key = urlparse(path).path.lstrip('/')

        def _download_object(s3_client, bucket_name, key, read_fd, write_fd):
            # The read end of the pipe must be closed in the child process
            # before we start writing to it.
            os.close(read_fd)

            with os.fdopen(write_fd, 'wb') as w_pipe:
                try:
                    retry_interval = 2
                    for _ in xrange(10):
                        try:
                            s3_client.download_fileobj(bucket_name,
                                                       key,
                                                       w_pipe)
                            return
                        except ClientError as err:
                            LOG.warning(err)
                            LOG.warning('Will retry in %d seconds',
                                        retry_interval)
                            time.sleep(retry_interval)
                            retry_interval *= 2

                except IOError as err:
                    LOG.error(err)
                    exit(1)

        download_proc = None
        try:
            LOG.debug('Fetching object %s from bucket %s',
                      object_key,
                      self._bucket)

            read_pipe, write_pipe = os.pipe()

            download_proc = Process(target=_download_object,
                                    args=(self.s3_client, self._bucket,
                                          object_key, read_pipe, write_pipe),
                                    name='_download_object')
            download_proc.start()

            # The write end of the pipe must be closed in this process before
            # we start reading from it.
            os.close(write_pipe)
            LOG.debug('read_pipe type: %s', type(read_pipe))
            yield read_pipe

            os.close(read_pipe)
            download_proc.join()

            if download_proc.exitcode:
                LOG.error('Failed to download %s', path)
                # exit(1)

            LOG.debug('Successfully streamed %s', path)

        finally:
            if download_proc:
                download_proc.join()
Пример #50
0
    def get_stream(self, copy):
        """
        Get a PIPE handler with content of the backup copy streamed from
        the destination.

        :param copy: Backup copy
        :type copy: BaseCopy
        :return: Stream with backup copy
        :rtype: generator
        :raise S3DestinationError: if failed to stream a backup copy.
        """

        path = "%s/%s" % (self.remote_path, copy.key)
        object_key = urlparse(path).path.lstrip('/')

        def _download_object(s3_client, bucket_name, key, read_fd, write_fd):
            # The read end of the pipe must be closed in the child process
            # before we start writing to it.
            os.close(read_fd)

            with os.fdopen(write_fd, 'wb') as w_pipe:
                try:
                    retry_interval = 2
                    for _ in xrange(10):
                        try:
                            s3_client.download_fileobj(bucket_name, key,
                                                       w_pipe)
                            return
                        except ClientError as err:
                            LOG.warning(err)
                            LOG.warning('Will retry in %d seconds',
                                        retry_interval)
                            time.sleep(retry_interval)
                            retry_interval *= 2

                except IOError as err:
                    LOG.error(err)
                    exit(1)

        download_proc = None
        try:
            LOG.debug('Fetching object %s from bucket %s', object_key,
                      self.bucket)

            read_pipe, write_pipe = os.pipe()

            download_proc = Process(target=_download_object,
                                    args=(self.s3_client, self.bucket,
                                          object_key, read_pipe, write_pipe),
                                    name='_download_object')
            download_proc.start()

            # The write end of the pipe must be closed in this process before
            # we start reading from it.
            os.close(write_pipe)
            LOG.debug('read_pipe type: %s', type(read_pipe))
            yield read_pipe

            os.close(read_pipe)
            download_proc.join()

            if download_proc.exitcode:
                LOG.error('Failed to download %s', path)
                # exit(1)

            LOG.debug('Successfully streamed %s', path)

        finally:
            if download_proc:
                download_proc.join()
Пример #51
0
def test_clone(
        runner,
        master1,
        slave,
        docker_client,
        config_content_clone,
        client_my_cnf,
        rsa_private_key):

    twindb_config_dir = get_twindb_config_dir(docker_client, runner['Id'])
    twindb_config_host = "%s/twindb-backup-1.cfg" % twindb_config_dir
    twindb_config_guest = '/etc/twindb/twindb-backup-1.cfg'
    my_cnf_path = "%s/my.cnf" % twindb_config_dir

    private_key_host = "%s/private_key" % twindb_config_dir
    private_key_guest = "/etc/twindb/private_key"

    with open(my_cnf_path, "w") as my_cnf:
        my_cnf.write(client_my_cnf)

    with open(private_key_host, "w") as key_fd:
        key_fd.write(rsa_private_key)

    with open(twindb_config_host, 'w') as fp:
        content = config_content_clone.format(
            PRIVATE_KEY=private_key_guest,
            MY_CNF='/etc/twindb/my.cnf'
        )
        fp.write(content)

    cmd = '/usr/sbin/sshd'
    LOG.info('Run SSH daemon on master1_1')
    ret, cout = docker_execute(docker_client, master1['Id'], cmd)
    print(cout)

    cmd = [
        'twindb-backup',
        '--debug',
        '--config', twindb_config_guest,
        'clone',
        'mysql',
        '%s:3306' % master1['ip'],
        '%s:3306' % slave['ip']
    ]
    pause_test(' '.join(cmd))
    ret, cout = docker_execute(docker_client, runner['Id'], cmd)
    print(cout)

    assert ret == 0
    sql_master_2 = RemoteMySQLSource({
        "ssh_host": slave['ip'],
        "ssh_user": '******',
        "ssh_key": private_key_guest,
        "mysql_connect_info": MySQLConnectInfo(
            my_cnf_path,
            hostname=slave['ip']
        ),
        "run_type": INTERVALS[0],
        "backup_type": 'full'
    })

    timeout = time.time() + 30
    while time.time() < timeout:
        with sql_master_2.get_connection() as conn:
            with conn.cursor() as cursor:
                cursor.execute('SHOW SLAVE STATUS')
                row = cursor.fetchone()
                if row['Slave_IO_Running'] == 'Yes' \
                        and row['Slave_SQL_Running'] == 'Yes':

                    LOG.info('Replication is up and running')
                    return

    LOG.error('Replication is not running after 30 seconds timeout')
    assert False
Пример #52
0
def test_clone(runner, master1, slave, docker_client, config_content_clone):

    twindb_config_dir = get_twindb_config_dir(docker_client, runner['Id'])
    twindb_config_host = "%s/twindb-backup-1.cfg" % twindb_config_dir
    twindb_config_guest = '/etc/twindb/twindb-backup-1.cfg'
    my_cnf_path = "%s/my.cnf" % twindb_config_dir

    private_key_host = "%s/private_key" % twindb_config_dir
    private_key_guest = "/etc/twindb/private_key"
    contents = """
[client]
user=dba
password=qwerty
"""
    with open(my_cnf_path, "w") as my_cnf:
        my_cnf.write(contents)

    private_key = """-----BEGIN RSA PRIVATE KEY-----
MIIEoAIBAAKCAQEAyXxAjPShNGAedbaEtltFI6A7RlsyI+4evxTq6uQrgbJ6Hm+p
HBXshXQYXDyVjvytaM+6GKF+r+6+C+6Wc5Xz4lLO/ZiSCdPbyEgqw1JoHrgPNpc6
wmCtjJExxjzvpwSVgbZg3xOdqW1y+TyqeUkXEg/Lm4VZhN1Q/KyGCgBlWuAXoOYR
GhaNWqcnr/Wn5YzVHAx2yJNrurtKLVYVMIkGcN/6OUaPpWqKZLaXiK/28PSZ5GdT
DmxRg4W0pdyGEYQndpPlpLF4w5gNUEhVZM8hWVE29+DIW3XXVYGYchxmkhU7wrGx
xZR+k5AT+7g8VspVS8zNMXM9Z27w55EQuluNMQIBIwKCAQAzz35QIaXLo7APo/Y9
hS8JKTPQQ1YJPTsbMUO4vlRUjPrUoF6vc1oTsCOFbqoddCyXS1u9MNdvEYFThn51
flSn6WhtGJqU0BPxrChA2q0PNqTThfkqqyVQCBQdCFrhzfqPEaPhl1RtZUlzSh01
IWxVGgEn/bfu9xTTQk5aV9+MZQ2XKe4BGzpOZMI/B7ivRCcthEwMTx92opr52bre
4t7DahVLN/2Wu4lxajDzCaKXpjMuL76lFov0mZZN7S8whH5xSx1tpapHqsCAwfLL
k49lDdR8aN6oqoeK0e9w//McIaKxN2FVxD4bcuXiQTjihx+QwQOLmlHSRDKhTsYg
4Q5bAoGBAOgVZM2eqC8hNl5UH//uuxOeBKqwz7L/FtGemNr9m0XG8N9yE/K7A5iX
6EDvDyVI51IlIXdxfK8re5yxfbJ4YevenwdEZZ2O8YRrVByJ53PV9CcVeWL4p6f/
I56sYyDfXcnDTEOVYY0mCfYUfUcSb1ExpuIU4RvuQJg6tvbdxD9FAoGBAN4/pVCT
krRd6PJmt6Dbc2IF6N09OrAnLB3fivGztF5cp+RpyqZK4ve+akLoe1laTg7vNtnF
l/PZtM9v/VT45hb70MFEHO+sKvGa5Yimxkb6YCriJOcLxTysSgFHKz7v+8BqqoHi
qY4fORGwPVDv28I8jKRvcuNHendV/Rdcuk79AoGAd1t1q5NscAJzu3u4r4IXEWc1
mZzClpHROJq1AujTgviZInUu1JqxZGthgHrx2KkmggR3nIOB86/2bdefut7TRhq4
L5+Et24VzxKgSTD6sJnrR0zfV3iQvMxbdizFRBsaSoGyMWLEdHn2fo4xzMem9o6Q
VwNsdMOsMQhA1rsxuiMCgYBr8wcnIxte68jqxC1OIXKOsmnKi3RG7nSDidXF2vE1
JbCiJMGD+Hzeu5KyyLDw4rgzI7uOWKjkJ+obnMuBCy3t6AZPPlcylXPxsaKwFn2Q
MHfaUJWUyzPqRQ4AnukekdINAJv18cAR1Kaw0fHle9Ej1ERP3lxfw6HiMRSHsLJD
nwKBgCIXVhXCDaXOOn8M4ky6k27bnGJrTkrRjHaq4qWiQhzizOBTb+7MjCrJIV28
8knW8+YtEOfl5R053SKQgVsmRjjDfvCirGgqC4kSAN4A6MD+GNVXZVUUjAUBVUbU
8Wt4BxW6kFA7+Su7n8o4DxCqhZYmK9ZUhNjE+uUhxJCJaGr4
-----END RSA PRIVATE KEY-----
"""
    with open(private_key_host, "w") as key_fd:
        key_fd.write(private_key)

    with open(twindb_config_host, 'w') as fp:
        content = config_content_clone.format(
            PRIVATE_KEY=private_key_guest,
            MY_CNF='/etc/twindb/my.cnf'
        )
        fp.write(content)

    cmd = '/usr/sbin/sshd'
    # Run SSH daemon on master1_1
    ret, cout = docker_execute(docker_client, master1['Id'], cmd)
    print(cout)

    cmd = ['twindb-backup', '--debug',
           '--config', twindb_config_guest,
           'clone', 'mysql',
           "%s:3306" % master1['ip'], "%s:3306" % slave['ip']
           ]
    ret, cout = docker_execute(docker_client, runner['Id'], cmd)
    print(cout)

    assert ret == 0
    sql_master_2 = RemoteMySQLSource({
        "ssh_host": slave['ip'],
        "ssh_user": '******',
        "ssh_key": private_key_guest,
        "mysql_connect_info": MySQLConnectInfo(
            my_cnf_path,
            hostname=slave['ip']
        ),
        "run_type": INTERVALS[0],
        "backup_type": 'full'
    })

    timeout = time.time() + 30
    while time.time() < timeout:
        with sql_master_2.get_connection() as conn:
            with conn.cursor() as cursor:
                cursor.execute('SHOW SLAVE STATUS')
                row = cursor.fetchone()
                if row['Slave_IO_Running'] == 'Yes' and row['Slave_SQL_Running'] == 'Yes':
                    LOG.info('Relication is up and running')
                    return
    LOG.error('Replication is not running after 30 seconds timeout')
    assert False
Пример #53
0
def get_container(name,
                  client,
                  network,
                  datadir=None,
                  bootstrap_script=None,
                  last_n=1,
                  twindb_config_dir=None,
                  image=NODE_IMAGE):
    api = client.api

    api.pull(image)
    cwd = os.getcwd()
    LOG.debug('Current directory: %s', cwd)

    binds = {
        cwd: {
            'bind': '/twindb-backup',
            'mode': 'rw',
        }
    }
    if twindb_config_dir:
        LOG.debug('TwinDB config directory: %s', twindb_config_dir)
        mkdir_p(twindb_config_dir, mode=0755)
        binds[twindb_config_dir] = {
            'bind': '/etc/twindb',
            'mode': 'rw',
        }
    if datadir:
        binds[datadir] = {
            'bind': '/var/lib/mysql',
            'mode': 'rw',
        }
    host_config = api.create_host_config(
        binds=binds, dns=['8.8.8.8', '208.67.222.222', '208.67.220.220'])

    ip = '172.%d.3.%d' % (network['second_octet'], last_n)
    networking_config = api.create_networking_config(
        {network['NAME']: api.create_endpoint_config(ipv4_address=ip)})

    LOG.debug(networking_config)

    container_hostname = '%s_%d' % (name, last_n)
    kwargs = {
        'image': image,
        'name': container_hostname,
        'ports': [22, 3306],
        'hostname': container_hostname,
        'host_config': host_config,
        'networking_config': networking_config,
        'volumes': ['/twindb-backup'],
        'environment': {}
    }
    try:
        kwargs['environment'] = {'DEV': os.environ['DEV']}
    except KeyError:
        pass
    if bootstrap_script:
        kwargs['command'] = 'bash %s' % bootstrap_script
    container = api.create_container(**kwargs)
    container['ip'] = ip
    LOG.info('Created container %r', container)
    try:
        api.start(container['Id'])
        LOG.info('Started %r', container)

        return container
    except APIError as err:
        LOG.error(err)
        client.api.remove_container(container=container['Id'], force=True)
Пример #54
0
def clone_mysql(
        cfg,
        source,
        destination,  # pylint: disable=too-many-arguments
        replication_user,
        replication_password,
        netcat_port=9990,
        compress=False):
    """Clone mysql backup of remote machine and stream it to slave"""
    try:
        LOG.debug('Remote MySQL Source: %s', split_host_port(source)[0])
        LOG.debug('MySQL defaults: %s', cfg.get('mysql',
                                                'mysql_defaults_file'))
        LOG.debug('SSH username: %s', cfg.get('ssh', 'ssh_user'))
        LOG.debug('SSH key: %s', cfg.get('ssh', 'ssh_key'))
        src = RemoteMySQLSource({
            "ssh_host":
            split_host_port(source)[0],
            "ssh_user":
            cfg.get('ssh', 'ssh_user'),
            "ssh_key":
            cfg.get('ssh', 'ssh_key'),
            "mysql_connect_info":
            MySQLConnectInfo(cfg.get('mysql', 'mysql_defaults_file'),
                             hostname=split_host_port(source)[0]),
            "run_type":
            INTERVALS[0],
            "backup_type":
            'full'
        })
        xbstream_binary = cfg.get('mysql', 'xbstream_binary')
        LOG.debug('SSH destination: %s', split_host_port(destination)[0])
        LOG.debug('SSH username: %s', cfg.get('ssh', 'ssh_user'))
        LOG.debug('SSH key: %s', cfg.get('ssh', 'ssh_key'))
        dst = Ssh('/tmp',
                  ssh_host=split_host_port(destination)[0],
                  ssh_user=cfg.get('ssh', 'ssh_user'),
                  ssh_key=cfg.get('ssh', 'ssh_key'))
        datadir = src.datadir
        LOG.debug('datadir: %s', datadir)

        if dst.list_files(datadir):
            LOG.error("Destination datadir is not empty: %s", datadir)
            exit(1)

        _run_remote_netcat(compress, datadir, destination, dst, netcat_port,
                           src, xbstream_binary)
        LOG.debug('Copying MySQL config to the destination')
        src.clone_config(dst)

        LOG.debug('Remote MySQL destination: %s',
                  split_host_port(destination)[0])
        LOG.debug('MySQL defaults: %s', cfg.get('mysql',
                                                'mysql_defaults_file'))
        LOG.debug('SSH username: %s', cfg.get('ssh', 'ssh_user'))
        LOG.debug('SSH key: %s', cfg.get('ssh', 'ssh_key'))

        dst_mysql = RemoteMySQLSource({
            "ssh_host":
            split_host_port(destination)[0],
            "ssh_user":
            cfg.get('ssh', 'ssh_user'),
            "ssh_key":
            cfg.get('ssh', 'ssh_key'),
            "mysql_connect_info":
            MySQLConnectInfo(cfg.get('mysql', 'mysql_defaults_file'),
                             hostname=split_host_port(destination)[0]),
            "run_type":
            INTERVALS[0],
            "backup_type":
            'full'
        })

        binlog, position = dst_mysql.apply_backup(datadir)

        LOG.debug('Binlog coordinates: (%s, %d)', binlog, position)

        try:
            LOG.debug('Starting MySQL on the destination')
            _mysql_service(dst, action='start')
            LOG.debug('MySQL started')
        except TwinDBBackupError as err:
            LOG.error(err)
            exit(1)

        LOG.debug('Setting up replication.')
        LOG.debug('Master host: %s', source)
        LOG.debug('Replication user: %s', replication_user)
        LOG.debug('Replication password: %s', replication_password)
        dst_mysql.setup_slave(
            MySQLMasterInfo(host=split_host_port(source)[0],
                            port=split_host_port(source)[1],
                            user=replication_user,
                            password=replication_password,
                            binlog=binlog,
                            binlog_pos=position))
    except (ConfigParser.NoOptionError, OperationalError) as err:
        LOG.error(err)
        exit(1)
Пример #55
0
def clone_mysql(cfg, source, destination,  # pylint: disable=too-many-arguments
                replication_user, replication_password,
                netcat_port=9990,
                compress=False):
    """Clone mysql backup of remote machine and stream it to slave

    :param cfg: TwinDB Backup tool config
    :type cfg: TwinDBBackupConfig
    """
    LOG.debug('Remote MySQL Source: %s', split_host_port(source)[0])
    LOG.debug(
        'MySQL defaults: %s',
        cfg.mysql.defaults_file
    )
    LOG.debug(
        'SSH username: %s',
        cfg.ssh.user
    )
    LOG.debug(
        'SSH key: %s',
        cfg.ssh.key
    )
    src = RemoteMySQLSource(
        {
            "ssh_host": split_host_port(source)[0],
            "ssh_user": cfg.ssh.user,
            "ssh_key": cfg.ssh.key,
            "mysql_connect_info": MySQLConnectInfo(
                cfg.mysql.defaults_file,
                hostname=split_host_port(source)[0]),
            "run_type": INTERVALS[0],
            "backup_type": 'full'
        }
    )
    xbstream_binary = cfg.mysql.xbstream_binary
    LOG.debug('SSH destination: %s', split_host_port(destination)[0])
    LOG.debug('SSH username: %s', cfg.ssh.user)
    LOG.debug('SSH key: %s', cfg.ssh.key)
    dst = Ssh(
        '/tmp',
        ssh_host=split_host_port(destination)[0],
        ssh_user=cfg.ssh.user,
        ssh_key=cfg.ssh.key
    )
    datadir = src.datadir
    LOG.debug('datadir: %s', datadir)

    if dst.list_files(datadir):
        LOG.error("Destination datadir is not empty: %s", datadir)
        exit(1)

    _run_remote_netcat(
        compress,
        datadir,
        destination,
        dst,
        netcat_port,
        src,
        xbstream_binary
    )
    LOG.debug('Copying MySQL config to the destination')
    src.clone_config(dst)

    LOG.debug('Remote MySQL destination: %s',
              split_host_port(destination)[0])
    LOG.debug(
        'MySQL defaults: %s',
        cfg.mysql.defaults_file
    )
    LOG.debug('SSH username: %s', cfg.ssh.user)
    LOG.debug('SSH key: %s', cfg.ssh.key)

    dst_mysql = RemoteMySQLSource({
        "ssh_host": split_host_port(destination)[0],
        "ssh_user": cfg.ssh.user,
        "ssh_key": cfg.ssh.key,
        "mysql_connect_info": MySQLConnectInfo(
            cfg.mysql.defaults_file,
            hostname=split_host_port(destination)[0]
        ),
        "run_type": INTERVALS[0],
        "backup_type": 'full'
    })

    binlog, position = dst_mysql.apply_backup(datadir)

    LOG.debug('Binlog coordinates: (%s, %d)', binlog, position)

    LOG.debug('Starting MySQL on the destination')
    _mysql_service(dst, action='start')
    LOG.debug('MySQL started')

    LOG.debug('Setting up replication.')
    LOG.debug('Master host: %s', source)
    LOG.debug('Replication user: %s', replication_user)
    LOG.debug('Replication password: %s', replication_password)
    dst_mysql.setup_slave(
        MySQLMasterInfo(
            host=split_host_port(source)[0],
            port=split_host_port(source)[1],
            user=replication_user,
            password=replication_password,
            binlog=binlog,
            binlog_pos=position
        )
    )
Пример #56
0
def get_container(
    name,
    client,
    network,
    datadir=None,
    bootstrap_script=None,
    last_n=1,
    twindb_config_dir=None,
    image=NODE_IMAGE,
):
    api = client.api

    api.pull(image)
    cwd = os.getcwd()
    LOG.debug("Current directory: %s", cwd)

    binds = {
        cwd: {
            "bind": "/twindb-backup",
            "mode": "rw",
        }
    }
    if twindb_config_dir:
        LOG.debug("TwinDB config directory: %s", twindb_config_dir)
        mkdir_p(twindb_config_dir, mode=0o755)
        binds[twindb_config_dir] = {
            "bind": "/etc/twindb",
            "mode": "rw",
        }
    if datadir:
        binds[datadir] = {
            "bind": "/var/lib/mysql",
            "mode": "rw",
        }
    host_config = api.create_host_config(
        binds=binds, dns=["8.8.8.8", "208.67.222.222", "208.67.220.220"])

    ip = "172.%d.3.%d" % (network["second_octet"], last_n)
    networking_config = api.create_networking_config(
        {network["NAME"]: api.create_endpoint_config(ipv4_address=ip)})

    LOG.debug(networking_config)

    container_hostname = "%s_%d" % (name, last_n)
    kwargs = {
        "image": image,
        "name": container_hostname,
        "ports": [22, 3306],
        "hostname": container_hostname,
        "host_config": host_config,
        "networking_config": networking_config,
        "volumes": ["/twindb-backup"],
        "environment": {},
    }
    try:
        kwargs["environment"] = {"DEV": os.environ["DEV"]}
    except KeyError:
        pass
    if bootstrap_script:
        kwargs["command"] = "bash %s" % bootstrap_script
    container = api.create_container(**kwargs)
    container["ip"] = ip
    LOG.info("Created container %r", container)
    try:
        api.start(container["Id"])
        LOG.info("Started %r", container)

        return container
    except APIError as err:
        LOG.error(err)
        client.api.remove_container(container=container["Id"], force=True)
Пример #57
0
def verify_mysql_backup(twindb_config, dst_path, backup_file, hostname=None):
    """
    Restore mysql backup and measure time

    :param hostname:
    :param backup_file:
    :param dst_path:
    :param twindb_config: tool configuration
    :type twindb_config: TwinDBBackupConfig

    """
    dst = twindb_config.destination(backup_source=hostname)
    status = MySQLStatus(dst=dst)
    copy = None

    if backup_file == "latest":
        copy = status.latest_backup
    else:
        for copy in status:
            if backup_file.endswith(copy.key):
                break
    if copy is None:
        return json.dumps(
            {
                "backup_copy": backup_file,
                "restore_time": 0,
                "success": False
            },
            indent=4,
            sort_keys=True,
        )
    start_restore_time = time.time()
    success = True
    tmp_dir = tempfile.mkdtemp()

    try:

        LOG.debug("Verifying backup copy in %s", tmp_dir)
        restore_from_mysql(twindb_config, copy, dst_path, tmp_dir)
        edit_backup_my_cnf(dst_path)

    except (TwinDBBackupError, OSError, IOError) as err:

        LOG.error(err)
        LOG.debug(traceback.format_exc())
        success = False

    finally:

        shutil.rmtree(tmp_dir, ignore_errors=True)

    end_restore_time = time.time()
    restore_time = end_restore_time - start_restore_time
    return json.dumps(
        {
            "backup_copy": copy.key,
            "restore_time": restore_time,
            "success": success,
        },
        indent=4,
        sort_keys=True,
    )
Пример #58
0
def restore_from_file(twindb_config, copy, dst_dir):
    """
    Restore a directory from a backup copy in the directory

    :param twindb_config: tool configuration
    :type twindb_config: TwinDBBackupConfig
    :param copy: Instance of BaseCopy or and inheriting classes.
    :type copy: BaseCopy
    :param dst_dir: Path to destination directory. Must exist and be empty.
    :type dst_dir: str
    """
    LOG.info('Restoring %s in %s', copy.key, dst_dir)
    mkdir_p(dst_dir)
    restore_start = time.time()
    keep_local_path = twindb_config.keep_local_path

    if keep_local_path and os.path.exists(osp.join(keep_local_path, copy.key)):
        dst = Local(osp.join(keep_local_path, copy.key))
        stream = dst.get_stream(copy)
    else:
        dst = twindb_config.destination()
        stream = dst.get_stream(copy)

        # GPG modifier
        if twindb_config.gpg:
            gpg = Gpg(
                stream,
                twindb_config.gpg.recipient,
                twindb_config.gpg.keyring,
                secret_keyring=twindb_config.gpg.secret_keyring
            )
            LOG.debug('Decrypting stream')
            stream = gpg.revert_stream()
        else:
            LOG.debug('Not decrypting the stream')

    with stream as handler:
        try:
            LOG.debug('handler type: %s', type(handler))
            LOG.debug('stream type: %s', type(stream))
            cmd = ["tar", "zvxf", "-"]
            LOG.debug('Running %s', ' '.join(cmd))
            proc = Popen(cmd, stdin=handler, cwd=dst_dir)
            cout, cerr = proc.communicate()
            ret = proc.returncode
            if ret:
                LOG.error('%s exited with code %d', cmd, ret)
                if cout:
                    LOG.error('STDOUT: %s', cout)
                if cerr:
                    LOG.error('STDERR: %s', cerr)
                return
            LOG.info('Successfully restored %s in %s', copy.key, dst_dir)
        except (OSError, DestinationError) as err:
            LOG.error('Failed to decompress %s: %s', copy.key, err)
            exit(1)

    export_info(
        twindb_config,
        data=time.time() - restore_start,
        category=ExportCategory.files,
        measure_type=ExportMeasureType.restore
    )
Пример #59
0
def restore_from_mysql_full(stream, dst_dir, config, redo_only=False,
                            xtrabackup_binary=XTRABACKUP_BINARY,
                            xbstream_binary=XBSTREAM_BINARY):
    """
    Restore MySQL datadir from a backup copy

    :param stream: Generator that provides backup copy
    :param dst_dir: Path to destination directory. Must exist and be empty.
    :type dst_dir: str
    :param config: Tool configuration.
    :type config: TwinDBBackupConfig
    :param redo_only: True if the function has to do final apply of
        the redo log. For example, if you restore backup from a full copy
        it should be False. If you restore from incremental copy and
        you restore base full copy redo_only should be True.
    :type redo_only: bool
    :param xtrabackup_binary: path to xtrabackup binary.
    :param xbstream_binary: Path to xbstream binary
    :return: If success, return True
    :rtype: bool
    """
    # GPG modifier
    if config.gpg:
        gpg = Gpg(
            stream,
            config.gpg.recipient,
            config.gpg.keyring,
            secret_keyring=config.gpg.secret_keyring
        )
        LOG.debug('Decrypting stream')
        stream = gpg.revert_stream()
    else:
        LOG.debug('Not decrypting the stream')

    stream = config.compression.get_modifier(stream).revert_stream()

    with stream as handler:
        if not _extract_xbstream(handler, dst_dir, xbstream_binary):
            return False

    mem_usage = psutil.virtual_memory()
    try:
        xtrabackup_cmd = [xtrabackup_binary,
                          '--use-memory=%d' % (mem_usage.available/2),
                          '--prepare']
        if redo_only:
            xtrabackup_cmd += ['--apply-log-only']

        xtrabackup_cmd += ["--target-dir", dst_dir]

        LOG.debug('Running %s', ' '.join(xtrabackup_cmd))
        xtrabackup_proc = Popen(xtrabackup_cmd,
                                stdout=None,
                                stderr=None)
        xtrabackup_proc.communicate()
        ret = xtrabackup_proc.returncode
        if ret:
            LOG.error('%s exited with code %d', " ".join(xtrabackup_cmd), ret)
        return ret == 0
    except OSError as err:
        LOG.error('Failed to prepare backup in %s: %s', dst_dir, err)
        return False