Beispiel #1
0
def _run_remote_netcat(
        compress,
        datadir,  # pylint: disable=too-many-arguments
        destination,
        dst,
        netcat_port,
        src):
    netcat_cmd = "xbstream -x -C {datadir}".format(datadir=datadir)
    if compress:
        netcat_cmd = "gunzip -c - | %s" % netcat_cmd

    # find unused port
    while netcat_port < 64000:
        if dst.ensure_tcp_port_listening(netcat_port, wait_timeout=1):
            netcat_port += 1
        else:
            LOG.debug('Will use port %d for streaming', netcat_port)
            break
    proc_netcat = Process(target=dst.netcat,
                          args=(netcat_cmd, ),
                          kwargs={'port': netcat_port})
    LOG.debug('Starting netcat on the destination')
    proc_netcat.start()
    nc_wait_timeout = 10
    if not dst.ensure_tcp_port_listening(netcat_port,
                                         wait_timeout=nc_wait_timeout):
        LOG.error('netcat on the destination '
                  'is not ready after %d seconds', nc_wait_timeout)
        proc_netcat.terminate()
        exit(1)
    src.clone(dest_host=split_host_port(destination)[0],
              port=netcat_port,
              compress=compress)
    proc_netcat.join()
Beispiel #2
0
def backup_everything(run_type, twindb_config, binlogs_only=False):
    """
    Run backup job

    :param run_type: hourly, daily, etc
    :type run_type: str
    :param twindb_config: ConfigParser instance
    :type twindb_config: TwinDBBackupConfig
    :param binlogs_only: If True copy only MySQL binary logs.
    :type binlogs_only: bool
    """
    set_open_files_limit()

    try:
        if not binlogs_only:
            backup_start = time.time()
            backup_files(run_type, twindb_config)
            backup_mysql(run_type, twindb_config)
            backup_binlogs(run_type, twindb_config)
            end = time.time()
            save_measures(backup_start, end)
        else:
            backup_binlogs(run_type, twindb_config)
    except configparser.NoSectionError as err:
        LOG.debug(traceback.format_exc())
        LOG.error(err)
        exit(1)
Beispiel #3
0
    def setup_slave(self, master_info):  # noqa # pylint: disable=too-many-arguments
        """
        Change master

        :param master_info: Master details.
        :type master_info: MySQLMasterInfo

        """
        try:
            with self._cursor() as cursor:
                query = "CHANGE MASTER TO " \
                        "MASTER_HOST = '{master}', " \
                        "MASTER_USER = '******', " \
                        "MASTER_PORT = {port}, " \
                        "MASTER_PASSWORD = '******', " \
                        "MASTER_LOG_FILE = '{binlog}', " \
                        "MASTER_LOG_POS = {binlog_pos}"\
                    .format(
                        master=master_info.host,
                        user=master_info.user,
                        password=master_info.password,
                        binlog=master_info.binlog,
                        binlog_pos=master_info.binlog_position,
                        port=master_info.port
                    )
                cursor.execute(query)
                cursor.execute("START SLAVE")
            return True
        except pymysql.Error as err:
            LOG.debug(err)
            return False
Beispiel #4
0
    def revert_stream(self):
        """
        Un-Apply modifier and return output stream.
        The Base modifier does nothing, so it will return the input stream
        without modifications

        :return: output stream handle
        """
        with self._input as input_stream:
            LOG.debug('Running %s', ' '.join(self._unmodifier_cmd))
            proc = Popen(
                self._unmodifier_cmd,
                stdin=input_stream,
                stdout=PIPE,
                stderr=PIPE
            )
            yield proc.stdout

            _, cerr = proc.communicate()
            if proc.returncode:
                msg = '%s exited with non-zero code.' \
                      % ' '.join(self._unmodifier_cmd)
                LOG.error(msg)
                LOG.error(cerr)
                raise ModifierException(msg)
Beispiel #5
0
def run_command(command, ok_non_zero=False):
    """
    Run shell command locally

    :param command: Command to run
    :type command: list
    :param ok_non_zero: Don't consider non-zero exit code as an error.
    :type ok_non_zero: bool
    :return: file object with stdout as generator to use with ``with``
    """
    try:
        LOG.debug('Running %s', " ".join(command))
        proc = Popen(command, stderr=PIPE, stdout=PIPE)

        yield proc.stdout

        _, cerr = proc.communicate()

        if proc.returncode and not ok_non_zero:
            LOG.error('Command %s exited with error code %d',
                      ' '.join(command),
                      proc.returncode)
            LOG.error(cerr)
            exit(1)
        else:
            LOG.debug('Exited with zero code')

    except OSError as err:
        LOG.error('Failed to run %s',
                  ' '.join(command))
        LOG.error(err)
        exit(1)
Beispiel #6
0
def verify_mysql_backup(twindb_config, dst_path, backup_file, hostname=None):
    """
    Restore mysql backup and measure time

    :param hostname:
    :param backup_file:
    :param dst_path:
    :param twindb_config: tool configuration
    :type twindb_config: TwinDBBackupConfig

    """
    dst = twindb_config.destination(backup_source=hostname)
    status = MySQLStatus(dst=dst)
    copy = None

    if backup_file == "latest":
        copy = status.latest_backup
    else:
        for copy in status:
            if backup_file.endswith(copy.key):
                break
    if copy is None:
        return json.dumps(
            {
                'backup_copy': backup_file,
                'restore_time': 0,
                'success': False
            },
            indent=4,
            sort_keys=True)
    start_restore_time = time.time()
    success = True
    tmp_dir = tempfile.mkdtemp()

    try:

        LOG.debug('Verifying backup copy in %s', tmp_dir)
        restore_from_mysql(twindb_config, copy, dst_path, tmp_dir)
        edit_backup_my_cnf(dst_path)

    except (TwinDBBackupError, OSError, IOError) as err:

        LOG.error(err)
        LOG.debug(traceback.format_exc())
        success = False

    finally:

        shutil.rmtree(tmp_dir, ignore_errors=True)

    end_restore_time = time.time()
    restore_time = end_restore_time - start_restore_time
    return json.dumps(
        {
            'backup_copy': copy.key,
            'restore_time': restore_time,
            'success': success
        },
        indent=4,
        sort_keys=True)
    def _find_all_cnf(self, root_path):
        """ Return list of embed cnf files

        :param root_path: Path to the originating my.cnf config.
            (/etc/my.cnf, or /etc/mysql/my.cnf)
        :type root_path: Path
        :return: List of all included my.cnf files
        :rtype: list
        """
        files = [str(root_path)]
        cfg_content = self._ssh_client.get_text_content(str(root_path))
        for line in cfg_content.splitlines():
            if "!includedir" in line:
                rel_path = line.split()[1]
                file_list = self._ssh_client.list_files(
                    root_path.parent.joinpath(rel_path),
                    recursive=False,
                    files_only=True,
                )
                LOG.debug(file_list)
                for sub_file in file_list:
                    files.extend(
                        self._find_all_cnf(
                            root_path.parent.joinpath(rel_path).joinpath(
                                sub_file
                            )
                        )
                    )
            elif "!include" in line:
                rel_path = line.split()[1]
                files.extend(
                    self._find_all_cnf(root_path.parent.joinpath(rel_path))
                )
        return files
Beispiel #8
0
    def disable_wsrep_desync(self):
        """
        Wait till wsrep_local_recv_queue is zero
        and disable wsrep_local_recv_queue then
        """
        max_time = time.time() + 900
        try:
            with self.get_connection() as connection:
                with connection.cursor() as cursor:
                    while time.time() < max_time:
                        cursor.execute("SHOW GLOBAL STATUS LIKE "
                                       "'wsrep_local_recv_queue'")

                        res = {r['Variable_name'].lower(): r['Value'].lower()
                               for r in cursor.fetchall()}

                        if not res.get('wsrep_local_recv_queue'):
                            raise Exception('Unknown status variable '
                                            '"wsrep_local_recv_queue"')

                        if int(res['wsrep_local_recv_queue']) == 0:
                            break

                        time.sleep(1)

                    LOG.debug('Disabling wsrep_desync')
                    cursor.execute("SET GLOBAL wsrep_desync=OFF")
        except pymysql.Error as err:
            LOG.error(err)
Beispiel #9
0
    def setup_slave(self, master_info):  # noqa # pylint: disable=too-many-arguments
        """
        Change master

        :param master_info: Master details.
        :type master_info: MySQLMasterInfo

        """
        try:
            with self._cursor() as cursor:
                query = "CHANGE MASTER TO " \
                        "MASTER_HOST = '{master}', " \
                        "MASTER_USER = '******', " \
                        "MASTER_PORT = {port}, " \
                        "MASTER_PASSWORD = '******', " \
                        "MASTER_LOG_FILE = '{binlog}', " \
                        "MASTER_LOG_POS = {binlog_pos}"\
                    .format(
                        master=master_info.host,
                        user=master_info.user,
                        password=master_info.password,
                        binlog=master_info.binlog,
                        binlog_pos=master_info.binlog_position,
                        port=master_info.port
                    )
                cursor.execute(query)
                cursor.execute("START SLAVE")
            return True
        except pymysql.Error as err:
            LOG.debug(err)
            return False
Beispiel #10
0
    def _shell(self):
        """
        Create SSHClient instance and connect to the destination host.

        :return: Connected to the remote destination host shell.
        :rtype: generator(SSHClient)
        :raise SshDestinationError: if the ssh client fails to connect.
        """
        shell = SSHClient()
        shell.set_missing_host_key_policy(AutoAddPolicy())
        try:
            LOG.debug("Connecting to %s:%d as %s with key %s", self._host,
                      self._port, self._user, self._key)
            shell.connect(hostname=self._host,
                          key_filename=self._key,
                          port=self._port,
                          username=self._user)
            yield shell
        except FileNotFoundError:
            raise
        except (AuthenticationException, SSHException, socket.error) as err:
            # print(type(err))
            raise SshClientException(err)
        finally:
            shell.close()
def _mysql_service(dst, action):
    """Start or stop MySQL service

    :param dst: Destination server
    :type dst: Ssh
    :param action: string start or stop
    :type action: str
    """
    for service in ['mysqld', 'mysql']:
        try:
            return dst.execute_command("PATH=$PATH:/sbin sudo service %s %s" %
                                       (service, action),
                                       quiet=True)
        except SshClientException as err:
            LOG.debug(err)

    try:
        LOG.warning(
            'Failed to %s MySQL with an init script. '
            'Will try to %s mysqld.', action, action)
        if action == "start":
            ret = dst.execute_command(
                "PATH=$PATH:/sbin sudo bash -c 'nohup mysqld &'",
                background=True)
            time.sleep(10)
            return ret
        elif action == "stop":
            return dst.execute_command(
                "PATH=$PATH:/sbin sudo kill $(pidof mysqld)")
    except SshClientException as err:
        LOG.error(err)
        raise TwinDBBackupError('Failed to %s MySQL on %r' % (action, dst))
Beispiel #12
0
def backup_everything(run_type, twindb_config, binlogs_only=False):
    """
    Run backup job

    :param run_type: hourly, daily, etc
    :type run_type: str
    :param twindb_config: ConfigParser instance
    :type twindb_config: TwinDBBackupConfig
    :param binlogs_only: If True copy only MySQL binary logs.
    :type binlogs_only: bool
    """
    set_open_files_limit()

    try:
        if not binlogs_only:
            backup_start = time.time()
            backup_files(run_type, twindb_config)
            backup_mysql(run_type, twindb_config)
            backup_binlogs(run_type, twindb_config)
            end = time.time()
            save_measures(backup_start, end)
        else:
            backup_binlogs(run_type, twindb_config)
    except ConfigParser.NoSectionError as err:
        LOG.debug(traceback.format_exc())
        LOG.error(err)
        exit(1)
Beispiel #13
0
    def execute(self, cmd, quiet=False):
        """Execute a command on a remote SSH server.

        :param cmd: Command for execution.
        :type cmd: str
        :param quiet: if quiet is True don't print error messages
        :return: Handlers of stdin, stdout and stderr
        :rtype: tuple
        :raise SshDestinationError: if any error

        """
        try:
            with self._shell() as shell:
                LOG.debug('Executing %s', cmd)
                stdin_, stdout_, stderr_ = shell.exec_command(cmd)
                # while not stdout_.channel.exit_status_ready():
                #     LOG.debug('%s: waiting', cmd)
                #     time.sleep(1)
                exit_code = stdout_.channel.recv_exit_status()
                if exit_code != 0:
                    if not quiet:
                        LOG.error("Failed while execute command %s", cmd)
                        LOG.error(stderr_.read())
                    raise SshClientException('%s exited with code %d'
                                             % (cmd, exit_code))
                return stdin_, stdout_, stderr_

        except (SSHException, IOError) as err:
            if not quiet:
                LOG.error('Failed to execute %s: %s', cmd, err)
            raise SshClientException('Failed to execute %s: %s'
                                     % (cmd, err))
Beispiel #14
0
def test__take_mysql_backup(s3_client, config_content_mysql_only, tmpdir):
    config = tmpdir.join('twindb-backup.cfg')
    content = config_content_mysql_only.format(
        AWS_ACCESS_KEY_ID=os.environ['AWS_ACCESS_KEY_ID'],
        AWS_SECRET_ACCESS_KEY=os.environ['AWS_SECRET_ACCESS_KEY'],
        BUCKET=s3_client.bucket,
        daily_copies=1,
        hourly_copies=2)
    config.write(content)
    cmd = [
        'twindb-backup', '--debug', '--config',
        str(config), 'backup', 'hourly'
    ]
    assert call(cmd) == 0

    cmd = ['twindb-backup', '--config', str(config), 'status']
    proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
    cout, cerr = proc.communicate()

    LOG.debug('STDOUT: %s', cout)
    LOG.debug('STDERR: %s', cerr)

    key = json.loads(cout)['hourly'].keys()[0]

    assert key.endswith('.xbstream.gz')
Beispiel #15
0
def backup_files(run_type, config):
    """Backup local directories

    :param run_type: Run type
    :type run_type: str
    :param config: Configuration
    :type config: TwinDBBackupConfig
    """
    backup_start = time.time()
    try:
        for directory in config.backup_dirs:
            LOG.debug('copying %s', directory)
            src = FileSource(directory, run_type)
            dst = config.destination()
            _backup_stream(config, src, dst)
            src.apply_retention_policy(dst, config, run_type)
    except (
            DestinationError,
            SourceError,
            SshClientException
    ) as err:
        raise OperationError(err)
    export_info(config, data=time.time() - backup_start,
                category=ExportCategory.files,
                measure_type=ExportMeasureType.backup)
Beispiel #16
0
    def apply_backup(self, datadir):
        """
        Apply backup of destination server

        :param datadir: Path to datadir
        :return: Binlog file name and position
        :rtype: tuple
        :raise RemoteMySQLSourceError: if any error.
        """
        try:
            use_memory = "--use-memory %d" % int(self._mem_available() / 2)
        except OSError:
            use_memory = ""
        logfile_path = "/tmp/xtrabackup-apply-log.log"
        cmd = "sudo {xtrabackup} --prepare --apply-log-only " \
              "--target-dir {target_dir} {use_memory} " \
              "> {logfile} 2>&1" \
              "".format(
                  xtrabackup=self._xtrabackup,
                  target_dir=datadir,
                  use_memory=use_memory,
                  logfile=logfile_path
              )

        try:
            self._ssh_client.execute(cmd)
            self._ssh_client.execute("sudo chown -R mysql %s" % datadir)
            return self._get_binlog_info(datadir)
        except SshClientException as err:
            LOG.debug("Logfile is:")
            LOG.debug(self._ssh_client.get_text_content(logfile_path))
            raise RemoteMySQLSourceError(err)
def restore_mysql(cfg, dst, backup_copy, cache):
    """Restore from mysql backup"""
    LOG.debug('mysql: %r', cfg)

    if not backup_copy:
        LOG.info('No backup copy specified. Choose one from below:')
        list_available_backups(cfg)
        exit(1)

    try:
        ensure_empty(dst)
        dst_storage = get_destination(
            cfg,
            get_hostname_from_backup_copy(backup_copy)
        )
        key = dst_storage.basename(backup_copy)
        copy = dst_storage.status()[key]
        if cache:
            restore_from_mysql(cfg, copy, dst, cache=Cache(cache))
        else:
            restore_from_mysql(cfg, copy, dst)

    except (TwinDBBackupError, CacheException) as err:
        LOG.error(err)
        exit(1)
    except (OSError, IOError) as err:
        LOG.error(err)
        exit(1)
Beispiel #18
0
    def setup_slave(self, host, user, password, binlog, binlog_position):  # noqa # pylint: disable=too-many-arguments
        """
        Change master

        :param host: Master host name.
        :type host: str
        :param user: Replication user.
        :param password: Replication password
        :param binlog: Binlog file on the master
        :param binlog_position: Binlog position

        """
        try:
            with self._cursor() as cursor:
                query = "CHANGE MASTER TO " \
                        "MASTER_HOST = '{master}', " \
                        "MASTER_USER = '******', " \
                        "MASTER_PASSWORD = '******', " \
                        "MASTER_LOG_FILE = '{binlog}', " \
                        "MASTER_LOG_POS = {binlog_pos}"\
                    .format(
                        master=host,
                        user=user,
                        password=password,
                        binlog=binlog,
                        binlog_pos=binlog_position)
                cursor.execute(query)
                cursor.execute("START SLAVE")
            return True
        except pymysql.Error as err:
            LOG.debug(err)
            return False
Beispiel #19
0
    def apply_retention_policy(self, dst, config, run_type, status):
        """
        Delete old backup copies.

        :param dst: Destination where the backups are stored.
        :type dst: BaseDestination
        :param config: Tool configuration
        :type config: ConfigParser.ConfigParser
        :param run_type: Run type.
        :type run_type: str
        :param status: Backups status.
        :type status: dict
        :return: Updated status.
        :rtype: dict
        """

        prefix = "{remote_path}/{prefix}/mysql/mysql-".format(
            remote_path=dst.remote_path, prefix=self.get_prefix())
        keep_copies = config.getint('retention', '%s_copies' % run_type)

        objects = dst.list_files(prefix)

        for backup_copy in get_files_to_delete(objects, keep_copies):
            LOG.debug('Deleting remote file %s', backup_copy)
            dst.delete(backup_copy)
            status = self._delete_from_status(status, dst.remote_path,
                                              backup_copy)

        self._delete_local_files('mysql', config)

        return status
Beispiel #20
0
def _extract_xbstream(input_stream, working_dir):
    """
    Extract xbstream stream in directory

    :param input_stream: The stream in xbstream format
    :param working_dir: directory
    :return: True if extracted successfully
    """
    try:
        cmd = ['xbstream', '-x']
        LOG.debug('Running %s', ' '.join(cmd))
        LOG.debug('Working directory: %s', working_dir)
        proc = Popen(cmd,
                     stdin=input_stream,
                     stdout=PIPE,
                     stderr=PIPE,
                     cwd=working_dir)
        cout, cerr = proc.communicate()
        ret = proc.returncode
        if ret:
            LOG.error('%s exited with code %d', ' '.join(cmd), ret)
            if cout:
                LOG.error('STDOUT: %s', cout)
            if cerr:
                LOG.error('STDERR: %s', cerr)
        return ret == 0

    except OSError as err:
        LOG.error('Failed to extract xbstream: %s', err)
        return False
Beispiel #21
0
def run_command(command, ok_non_zero=False):
    """
    Run shell command locally

    :param command: Command to run
    :type command: list
    :param ok_non_zero: Don't consider non-zero exit code as an error.
    :type ok_non_zero: bool
    :return: file object with stdout as generator to use with ``with``
    """
    try:
        LOG.debug("Running %s", " ".join(command))
        proc = Popen(command, stderr=PIPE, stdout=PIPE)

        yield proc.stdout

        _, cerr = proc.communicate()

        if proc.returncode and not ok_non_zero:
            LOG.error(
                "Command %s exited with error code %d",
                " ".join(command),
                proc.returncode,
            )
            LOG.error(cerr)
            sys.exit(1)
        else:
            LOG.debug("Exited with zero code")

    except OSError as err:
        LOG.error("Failed to run %s", " ".join(command))
        LOG.error(err)
        sys.exit(1)
    def get_stream(self):
        """
        Stream content of one binary file.

        :return: stream of bytes with the binlog content.
        """
        with self._mysql_client.cursor() as cursor:
            cursor.execute("SELECT @@log_bin_basename AS log_bin_basename")
            row = cursor.fetchone()
            log_bin_basename = row['log_bin_basename']

        log_bin_dirname = osp.dirname(log_bin_basename)
        log_bin_file = osp.join(log_bin_dirname, self._binlog_file)

        cmd = [
            "cat",
            log_bin_file,
        ]
        try:
            LOG.debug('Running %s', ' '.join(cmd))

            proc = Popen(cmd, stderr=PIPE, stdout=PIPE)

            yield proc.stdout

            _, cerr = proc.communicate()
            if proc.returncode:
                LOG.error('Failed to read from %s: %s', log_bin_file, cerr)
                exit(1)
            else:
                LOG.debug('Successfully streamed %s', log_bin_file)

        except OSError as err:
            LOG.error('Failed to run %s: %s', cmd, err)
            exit(1)
Beispiel #23
0
    def list_files(self, path, recursive=False, files_only=False):
        """
        Get list of file by prefix

        :param path: Path
        :param recursive: Recursive return list of files
        :type path: str
        :type recursive: bool
        :param files_only: Don't list directories if True. Default is False.
        :type files_only: bool
        :return: List of files
        :rtype: list
        """
        rec_cond = "" if recursive else " -maxdepth 1"
        fil_cond = " -type f" if files_only else ""

        cmd = "bash -c 'if test -d {path} ; " \
              "then find {path}{recursive}{files_only}; fi'"
        cmd = cmd.format(path=path, recursive=rec_cond, files_only=fil_cond)
        cout, cerr = self.execute(cmd)
        LOG.debug("COUT:\n%s", cout)
        LOG.debug("CERR:\n%s", cerr)

        if files_only:
            return cout.split()
        else:
            return cout.split()[1:]
Beispiel #24
0
    def revert_stream(self):
        """
        Un-Apply modifier and return output stream.
        The Base modifier does nothing, so it will return the input stream
        without modifications

        :return: output stream handle
        """
        with self._input as input_stream:
            LOG.debug("Running %s", " ".join(self._unmodifier_cmd))
            proc = Popen(
                self._unmodifier_cmd,
                stdin=input_stream,
                stdout=PIPE,
                stderr=PIPE,
            )
            yield proc.stdout

            _, cerr = proc.communicate()
            if proc.returncode:
                msg = "%s exited with non-zero code." % " ".join(
                    self._unmodifier_cmd)
                LOG.error(msg)
                LOG.error(cerr)
                raise ModifierException(msg)
Beispiel #25
0
def get_container(name, bootstrap_script, client, network, last_n=1):
    api = client.api

    api.pull(NODE_IMAGE)
    cwd = os.getcwd()
    host_config = api.create_host_config(
        binds={cwd: {
            'bind': '/twindb-backup',
            'mode': 'rw',
        }},
        dns=['8.8.8.8'])

    ip = '172.%d.3.%d' % (network['second_octet'], last_n)
    networking_config = api.create_networking_config(
        {network['NAME']: api.create_endpoint_config(ipv4_address=ip)})

    LOG.debug(networking_config)

    container = api.create_container(image=NODE_IMAGE,
                                     name='%s_%d' % (name, last_n),
                                     ports=[22, 3306],
                                     host_config=host_config,
                                     networking_config=networking_config,
                                     volumes=['/twindb-backup'],
                                     command='bash %s' % bootstrap_script)
    container['ip'] = ip
    LOG.info('Created container %r', container)
    try:
        api.start(container['Id'])
        LOG.info('Started %r', container)

        return container
    except APIError as err:
        LOG.error(err)
        client.api.remove_container(container=container['Id'], force=True)
Beispiel #26
0
def container_network(docker_client):
    api = docker_client.api
    network = None
    network_params = {
        'NAME': NETWORK_NAME,
        'subnet': None,
        'second_octet': None
    }
    ipam_config = _ipam_config()

    subnet = ipam_config['Config'][0]['Subnet']
    network_params['subnet'] = subnet
    network_params['second_octet'] = int(subnet.split('.')[1])

    try:
        network = api.create_network(name=NETWORK_NAME,
                                     driver="bridge",
                                     ipam=ipam_config,
                                     check_duplicate=True)
        LOG.info('Created subnet %s', network_params['subnet'])
        LOG.debug(network)
    except APIError as err:
        if err.status_code == 500:
            LOG.info('Network %r already exists', network)
        else:
            raise

    yield network_params
    if network:
        api.remove_network(net_id=network['Id'])
Beispiel #27
0
def container_network(docker_client):
    api = docker_client.api
    network = None
    network_params = {
        "NAME": NETWORK_NAME,
        "subnet": None,
        "second_octet": None
    }
    ipam_config = _ipam_config()

    subnet = ipam_config["Config"][0]["Subnet"]
    network_params["subnet"] = subnet
    network_params["second_octet"] = int(subnet.split(".")[1])

    try:
        network = api.create_network(name=NETWORK_NAME,
                                     driver="bridge",
                                     ipam=ipam_config,
                                     check_duplicate=True)
        LOG.info("Created subnet %s", network_params["subnet"])
        LOG.debug(network)
    except APIError as err:
        if err.status_code == 500:
            LOG.info("Network %r already exists", network)
        else:
            raise

    yield network_params
    if network:
        api.remove_network(net_id=network["Id"])
Beispiel #28
0
    def apply_backup(self, datadir):
        """
        Apply backup of destination server

        :param datadir: Path to datadir
        :return: Binlog file name and position
        :rtype: tuple
        :raise RemoteMySQLSourceError: if any error.
        """
        try:
            use_memory = "--use-memory %d" % int(self._mem_available() / 2)
        except OSError:
            use_memory = ""
        logfile_path = "/tmp/xtrabackup-apply-log.log"
        cmd = "sudo {xtrabackup} --prepare --apply-log-only " \
              "--target-dir {target_dir} {use_memory} " \
              "> {logfile} 2>&1" \
              "".format(
                  xtrabackup=self._xtrabackup,
                  target_dir=datadir,
                  use_memory=use_memory,
                  logfile=logfile_path
              )

        try:
            self._ssh_client.execute(cmd)
            self._ssh_client.execute("sudo chown -R mysql %s" % datadir)
            return self._get_binlog_info(datadir)
        except SshClientException as err:
            LOG.debug("Logfile is:")
            LOG.debug(self._ssh_client.get_text_content(logfile_path))
            raise RemoteMySQLSourceError(err)
Beispiel #29
0
    def disable_wsrep_desync(self):
        """
        Wait till wsrep_local_recv_queue is zero
        and disable wsrep_local_recv_queue then
        """
        max_time = time.time() + 900
        try:
            with self.get_connection() as connection:
                with connection.cursor() as cursor:
                    while time.time() < max_time:
                        cursor.execute("SHOW GLOBAL STATUS LIKE "
                                       "'wsrep_local_recv_queue'")

                        res = {r['Variable_name'].lower(): r['Value'].lower()
                               for r in cursor.fetchall()}

                        if not res.get('wsrep_local_recv_queue'):
                            raise Exception('Unknown status variable '
                                            '"wsrep_local_recv_queue"')

                        if int(res['wsrep_local_recv_queue']) == 0:
                            break

                        time.sleep(1)

                    LOG.debug('Disabling wsrep_desync')
                    cursor.execute("SET GLOBAL wsrep_desync=OFF")
        except pymysql.Error as err:
            LOG.error(err)
Beispiel #30
0
def _backup_stream(config, src, dst, callbacks=None):
    stream = src.get_stream()
    # Gzip modifier
    stream = Gzip(stream).get_stream()
    src.suffix += '.gz'
    # KeepLocal modifier
    try:
        keep_local_path = config.get('destination', 'keep_local_path')
        kl_modifier = KeepLocal(stream,
                                os.path.join(keep_local_path, src.get_name()))
        stream = kl_modifier.get_stream()
        if callbacks is not None:
            callbacks.append((kl_modifier, {
                'keep_local_path': keep_local_path,
                'dst': dst
            }))
    except ConfigParser.NoOptionError:
        LOG.debug('keep_local_path is not present in the config file')
    # GPG modifier
    try:
        stream = Gpg(stream, config.get('gpg', 'recipient'),
                     config.get('gpg', 'keyring')).get_stream()
        src.suffix += '.gpg'
    except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
        pass
    except ModifierException as err:
        LOG.warning(err)
        LOG.warning('Will skip encryption')
    if not dst.save(stream, src.get_name()):
        LOG.error('Failed to save backup copy %s', src.get_name())
        exit(1)
Beispiel #31
0
    def get_stream(self, copy):
        """
        Get a PIPE handler with content of the backup copy streamed from
        the destination.

        :param copy: Backup copy.
        :type copy: BaseCopy
        :return: Standard output.
        :rtype: file
        """

        path = "%s/%s" % (self.remote_path, copy.key)
        cmd = "cat %s" % path

        def _read_write_chunk(channel, write_fd, size=1024):
            while channel.recv_ready():
                chunk = channel.recv(size)
                LOG.debug('read %d bytes', len(chunk))
                if chunk:
                    os.write(write_fd, chunk)

        def _write_to_pipe(read_fd, write_fd):
            try:
                os.close(read_fd)

                with self._ssh_client.session() as channel:
                    LOG.debug('Executing %s', cmd)
                    channel.exec_command(cmd)

                    while not channel.exit_status_ready():
                        _read_write_chunk(channel, write_fd)

                    LOG.debug('closing channel')
                    _read_write_chunk(channel, write_fd)
                    channel.recv_exit_status()

            except KeyboardInterrupt:
                return

        read_process = None

        try:
            read_pipe, write_pipe = os.pipe()
            read_process = Process(target=_write_to_pipe,
                                   args=(read_pipe, write_pipe),
                                   name='_write_to_pipe')
            read_process.start()
            os.close(write_pipe)
            yield read_pipe

            os.close(read_pipe)
            read_process.join()

            if read_process.exitcode:
                raise SshDestinationError('Failed to download %s' % path)
            LOG.debug('Successfully streamed %s', path)
        finally:
            if read_process:
                read_process.join()
Beispiel #32
0
def restore_from_file(twindb_config, copy, dst_dir):
    """
    Restore a directory from a backup copy in the directory

    :param twindb_config: tool configuration
    :type twindb_config: TwinDBBackupConfig
    :param copy: Instance of BaseCopy or and inheriting classes.
    :type copy: BaseCopy
    :param dst_dir: Path to destination directory. Must exist and be empty.
    :type dst_dir: str
    """
    LOG.info('Restoring %s in %s', copy.key, dst_dir)
    mkdir_p(dst_dir)
    restore_start = time.time()
    keep_local_path = twindb_config.keep_local_path

    if keep_local_path and os.path.exists(osp.join(keep_local_path, copy.key)):
        dst = Local(osp.join(keep_local_path, copy.key))
        stream = dst.get_stream(copy)
    else:
        dst = twindb_config.destination()
        stream = dst.get_stream(copy)

        # GPG modifier
        if twindb_config.gpg:
            gpg = Gpg(stream,
                      twindb_config.gpg.recipient,
                      twindb_config.gpg.keyring,
                      secret_keyring=twindb_config.gpg.secret_keyring)
            LOG.debug('Decrypting stream')
            stream = gpg.revert_stream()
        else:
            LOG.debug('Not decrypting the stream')

    with stream as handler:
        try:
            LOG.debug('handler type: %s', type(handler))
            LOG.debug('stream type: %s', type(stream))
            cmd = ["tar", "zvxf", "-"]
            LOG.debug('Running %s', ' '.join(cmd))
            proc = Popen(cmd, stdin=handler, cwd=dst_dir)
            cout, cerr = proc.communicate()
            ret = proc.returncode
            if ret:
                LOG.error('%s exited with code %d', cmd, ret)
                if cout:
                    LOG.error('STDOUT: %s', cout)
                if cerr:
                    LOG.error('STDERR: %s', cerr)
                return
            LOG.info('Successfully restored %s in %s', copy.key, dst_dir)
        except (OSError, DestinationError) as err:
            LOG.error('Failed to decompress %s: %s', copy.key, err)
            exit(1)

    export_info(twindb_config,
                data=time.time() - restore_start,
                category=ExportCategory.files,
                measure_type=ExportMeasureType.restore)
Beispiel #33
0
def get_destination(config, hostname=socket.gethostname()):
    """
    Read config and return instance of Destination class.

    :param config: Tool configuration.
    :type config: ConfigParser.ConfigParser
    :param hostname: Local hostname.
    :type hostname: str
    :return: Instance of destination class.
    :rtype: BaseDestination
    """
    destination = None
    try:
        destination = config.get('destination', 'backup_destination')
        LOG.debug('Destination in the config %s', destination)
        destination = destination.strip('"\'')
    except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
        LOG.critical("Backup destination must be specified "
                     "in the config file")
        exit(-1)

    if destination == "ssh":
        host = config.get('ssh', 'backup_host')
        try:
            port = int(config.get('ssh', 'port'))
        except ConfigParser.NoOptionError:
            port = 22
        try:
            ssh_key = config.get('ssh', 'ssh_key')
        except ConfigParser.NoOptionError:
            ssh_key = '/root/.ssh/id_rsa'
            LOG.debug('ssh_key is not defined in config. '
                      'Will use default %s', ssh_key)
        user = config.get('ssh', 'ssh_user')
        remote_path = config.get('ssh', 'backup_dir')
        return Ssh(
            remote_path,
            SshConnectInfo(
                host=host,
                port=port,
                user=user,
                key=ssh_key),
            hostname=hostname)

    elif destination == "s3":
        bucket = config.get('s3', 'BUCKET').strip('"\'')
        access_key_id = config.get('s3', 'AWS_ACCESS_KEY_ID').strip('"\'')
        secret_access_key = config.get('s3',
                                       'AWS_SECRET_ACCESS_KEY').strip('"\'')
        default_region = config.get('s3', 'AWS_DEFAULT_REGION').strip('"\'')

        return S3(bucket, AWSAuthOptions(access_key_id,
                                         secret_access_key,
                                         default_region=default_region),
                  hostname=hostname)

    else:
        LOG.critical('Destination %s is not supported', destination)
        exit(-1)
Beispiel #34
0
    def _update_backup_info(self, stderr_file):
        """Update backup_info from stderr"""

        LOG.debug("xtrabackup error log file %s", stderr_file.name)
        self._backup_info.lsn = self._get_lsn(stderr_file.name)
        self._backup_info.binlog_coordinate = self.get_binlog_coordinates(
            stderr_file.name)
        os.unlink(stderr_file.name)
    def _get_name(self, filename_prefix):

        LOG.debug('Suffix = %s', self.suffix)
        self._name = osp.join(
            self.get_prefix(), self._media_type,
            "{file}-{time}.{suffix}".format(file=filename_prefix,
                                            time=self._created_at,
                                            suffix=self._suffix))
        return self._name
Beispiel #36
0
def verify_mysql(cfg, hostname, dst, backup_copy):
    """Verify backup"""
    LOG.debug('mysql: %r', cfg)

    if not backup_copy:
        list_available_backups(cfg)
        exit(1)

    print(verify_mysql_backup(cfg, dst, backup_copy, hostname))
Beispiel #37
0
def restore_from_file(config, backup_copy, dst_dir):
    """
    Restore a directory from a backup copy in the directory

    :param config: Tool configuration.
    :type config: ConfigParser.ConfigParser
    :param backup_copy: Backup name.
    :type backup_copy: str
    :param dst_dir: Path to destination directory. Must exist and be empty.
    :type dst_dir: str
    """
    LOG.info('Restoring %s in %s', backup_copy, dst_dir)
    mkdir_p(dst_dir)
    restore_start = time.time()
    if os.path.exists(backup_copy):
        dst = Local(backup_copy)
        stream = dst.get_stream(backup_copy)
    else:
        dst = get_destination(config)
        stream = dst.get_stream(backup_copy)
        # GPG modifier
        try:
            gpg = Gpg(stream,
                      config.get('gpg', 'recipient'),
                      config.get('gpg', 'keyring'),
                      secret_keyring=config.get('gpg', 'secret_keyring'))
            LOG.debug('Decrypting stream')
            stream = gpg.revert_stream()
        except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
            LOG.debug('Not decrypting the stream')

    with stream as handler:
        try:
            LOG.debug('handler type: %s', type(handler))
            LOG.debug('stream type: %s', type(stream))
            cmd = ["tar", "zvxf", "-"]
            LOG.debug('Running %s', ' '.join(cmd))
            proc = Popen(cmd, stdin=handler, cwd=dst_dir)
            cout, cerr = proc.communicate()
            ret = proc.returncode
            if ret:
                LOG.error('%s exited with code %d', cmd, ret)
                if cout:
                    LOG.error('STDOUT: %s', cout)
                if cerr:
                    LOG.error('STDERR: %s', cerr)
                return
            LOG.info('Successfully restored %s in %s', backup_copy, dst_dir)
        except (OSError, DestinationError) as err:
            LOG.error('Failed to decompress %s: %s', backup_copy, err)
            exit(1)

    export_info(config,
                data=time.time() - restore_start,
                category=ExportCategory.files,
                measure_type=ExportMeasureType.restore)
Beispiel #38
0
def set_open_files_limit():
    """Detect maximum supported number of open file and set it"""
    max_files = getrlimit(RLIMIT_NOFILE)[0]
    while True:
        try:
            setrlimit(RLIMIT_NOFILE, (max_files, max_files))
            max_files += 1
        except ValueError:
            break
    LOG.debug('Setting max files limit to %d', max_files)
Beispiel #39
0
    def _update_backup_info(self, stderr_file):
        """Update backup_info from stderr"""

        LOG.debug('xtrabackup error log file %s',
                  stderr_file.name)
        self._backup_info.lsn = self._get_lsn(stderr_file.name)
        self._backup_info.binlog_coordinate = self.get_binlog_coordinates(
            stderr_file.name
        )
        os.unlink(stderr_file.name)
Beispiel #40
0
    def save(self, handler, filepath):
        """
        Read from handler and save it to Amazon S3

        :param filepath: save backup copy in a file with this name
        :param handler: stdout handler from backup source
        """
        with handler as file_obj:
            ret = self._upload_object(file_obj, filepath)
            LOG.debug('Returning code %d', ret)
Beispiel #41
0
    def clone_config(self, dst):
        """
        Clone config to destination server

        :param dst: Destination server
        :type dst: Ssh
        """
        cfg_path = self._get_root_my_cnf()
        LOG.debug("Root my.cnf is: %s", cfg_path)
        self._save_cfg(dst, cfg_path)
Beispiel #42
0
def test_get_stream(gs):
    status = MySQLStatus(dst=gs)
    copy = status['master1/daily/mysql/mysql-2019-04-04_05_29_05.xbstream.gz']

    with gs.get_stream(copy) as stream:
        LOG.debug('starting reading from pipe')
        content = stream.read()
        LOG.debug('finished reading from pipe')
    assert len(content), 'Failed to read from GS'
    LOG.info('Read %d bytes', len(content))
Beispiel #43
0
def verify_mysql_backup(twindb_config, dst_path, backup_file, hostname=None):
    """
    Restore mysql backup and measure time

    :param hostname:
    :param backup_file:
    :param dst_path:
    :param twindb_config: tool configuration
    :type twindb_config: TwinDBBackupConfig

    """
    dst = twindb_config.destination(backup_source=hostname)
    status = MySQLStatus(dst=dst)
    copy = None

    if backup_file == "latest":
        copy = status.latest_backup
    else:
        for copy in status:
            if backup_file.endswith(copy.key):
                break
    if copy is None:
        return json.dumps({
            'backup_copy': backup_file,
            'restore_time': 0,
            'success': False
        }, indent=4, sort_keys=True)
    start_restore_time = time.time()
    success = True
    tmp_dir = tempfile.mkdtemp()

    try:

        LOG.debug('Verifying backup copy in %s', tmp_dir)
        restore_from_mysql(twindb_config, copy, dst_path, tmp_dir)
        edit_backup_my_cnf(dst_path)

    except (TwinDBBackupError, OSError, IOError) as err:

        LOG.error(err)
        LOG.debug(traceback.format_exc())
        success = False

    finally:

        shutil.rmtree(tmp_dir, ignore_errors=True)

    end_restore_time = time.time()
    restore_time = end_restore_time - start_restore_time
    return json.dumps({
        'backup_copy': copy.key,
        'restore_time': restore_time,
        'success': success
    }, indent=4, sort_keys=True)
Beispiel #44
0
def bucket_name():
    travis_job_number = os.environ.get('TRAVIS_JOB_NUMBER')
    LOG.debug('TRAVIS_JOB_NUMBER=%s' % travis_job_number)

    number = random.randint(0, 1000000)
    LOG.debug('Default job number %d' % number)

    if travis_job_number:
        bucket = 'twindb-backup-test-travis-%s' % travis_job_number
    else:
        bucket = 'twindb-backup-test-travis-%d' % number

    return '%s-%s' % (bucket, time.time())
Beispiel #45
0
    def candidate_parent(self, run_type):
        """
        Find a backup copy that can be a parent

        :param run_type: See :func:`~get_backup_type`.
        :return: Backup copy or None
        :rtype: MySQLCopy
        """
        full_backup_index = INTERVALS.index(run_type)
        LOG.debug('Looking a parent candidate for %s run', run_type)
        for i in xrange(full_backup_index, len(INTERVALS)):
            period_copies = getattr(self, INTERVALS[i])
            LOG.debug(
                'Checking %d %s copies',
                len(period_copies),
                INTERVALS[i]
            )
            for _, value in period_copies.iteritems():
                try:
                    if value.type == 'full':
                        LOG.debug('Found parent %r', value)
                        return value
                except KeyError:
                    return None
        LOG.debug('No eligible parents')
        return None
Beispiel #46
0
    def enable_wsrep_desync(self):
        """
        Try to enable wsrep_desync

        :return: True if wsrep_desync was enabled. False if not supported
        """
        try:
            with self.get_connection() as connection:
                with connection.cursor() as cursor:
                    cursor.execute('SET GLOBAL wsrep_desync=ON')
            return True
        except pymysql.Error as err:
            LOG.debug(err)
            return False
Beispiel #47
0
    def _get_name(self, filename_prefix):

        LOG.debug('Suffix = %s', self.suffix)
        self._name = osp.join(
            self.get_prefix(),
            self._media_type,
            "{file}-{time}.{suffix}".format(
                file=filename_prefix,
                time=self._created_at,
                suffix=self._suffix
            )

        )
        return self._name
Beispiel #48
0
def s3_client(bucket_name):
    LOG.debug('Bucket: %s' % bucket_name)
    client = S3(
        bucket=bucket_name,
        aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
        aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY']
    )
    try:
        assert client.create_bucket()

        yield client

    finally:

        client.delete_bucket(force=True)
Beispiel #49
0
    def get_stream(self):
        """
        Compress the input stream and return it as the output stream

        :return: output stream handle
        """
        with self._input as input_stream:
            LOG.debug('Running %s', ' '.join(self._modifier_cmd))
            proc = Popen(
                self._modifier_cmd,
                stdin=input_stream,
                stdout=PIPE,
                stderr=PIPE
            )
            yield proc.stdout
            proc.communicate()
Beispiel #50
0
def _backup_stream(config, src, dst, callbacks=None):
    """

    :param config: Tool config
    :type config: TwinDBBackupConfig
    :param src:
    :param dst:
    :param callbacks:
    :return:
    """
    stream = src.get_stream()

    # Compression modifier
    cmp_modifier = config.compression.get_modifier(stream)
    stream = cmp_modifier.get_stream()
    src.suffix += cmp_modifier.suffix

    # KeepLocal modifier
    if config.keep_local_path:
        keep_local_path = config.keep_local_path
        kl_modifier = KeepLocal(
            stream,
            osp.join(
                keep_local_path,
                src.get_name()
            )
        )
        stream = kl_modifier.get_stream()
        if callbacks is not None:
            callbacks.append((kl_modifier, {
                'keep_local_path': keep_local_path,
                'dst': dst
            }))
    else:
        LOG.debug('keep_local_path is not present in the config file')
    # GPG modifier
    if config.gpg:
        gpg_modifier = Gpg(
            stream,
            config.gpg.recipient,
            config.gpg.keyring
        )

        stream = gpg_modifier.get_stream()
        src.suffix += '.gpg'
    dst.save(stream, src.get_name())
Beispiel #51
0
    def apply_retention_policy(self, dst, config, run_type):
        """Apply retention policy
        """
        prefix = "{remote_path}/{prefix}/files/{file}".format(
            remote_path=dst.remote_path,
            prefix=self.get_prefix(),
            file=self._sanitize_filename()
        )
        keep_copies = getattr(config.retention, run_type)

        backups_list = dst.list_files(prefix)

        LOG.debug('Remote copies: %r', backups_list)
        for backup_copy in get_files_to_delete(backups_list, keep_copies):
            LOG.debug('Deleting remote file %s', backup_copy)
            dst.delete(backup_copy)

        self._delete_local_files(self._sanitize_filename(), config)
Beispiel #52
0
def backup(ctx, run_type, lock_file, binlogs_only):
    """Run backup job"""
    try:

        run_backup_job(
            ctx.obj['twindb_config'],
            run_type,
            lock_file=lock_file,
            binlogs_only=binlogs_only
        )
    except TwinDBBackupError as err:
        LOG.error(err)
        LOG.debug(traceback.format_exc())
        exit(1)

    except KeyboardInterrupt:
        LOG.info('Exiting...')
        kill_children()
        exit(1)
Beispiel #53
0
    def run_intervals(self):
        """
        Run intervals config. When to run or not the backup.

        :return: Configuration with data on whether to run the backup tool now.
        :rtype: RunIntervals
        """
        kwargs = {}
        try:
            kwargs = {
                i: self.__cfg.getboolean('intervals', 'run_%s' % i)
                for i in INTERVALS
            }

        except (NoOptionError, NoSectionError) as err:
            LOG.debug(err)
            LOG.debug('Will use default retention policy')

        return RunIntervals(**kwargs)
Beispiel #54
0
    def delete(self, path):
        """Deletes an S3 object.

        :param path: Key of S3 object.
        :type path: str
        :raise S3DestinationError: if failed to delete object.
        """
        key = path.replace(
            's3://%s/' % self._bucket,
            ''
        ) if path.startswith('s3://') else path

        s3client = boto3.resource('s3')
        bucket = s3client.Bucket(self._bucket)

        s3obj = s3client.Object(bucket.name, key)
        LOG.debug('deleting s3://%s/%s', bucket.name, key)

        return s3obj.delete()
Beispiel #55
0
def restore_file(ctx, dst, backup_copy):
    """Restore from file backup"""
    LOG.debug('file: %r', ctx.obj['twindb_config'])

    if not backup_copy:
        LOG.info('No backup copy specified. Choose one from below:')
        list_available_backups(ctx.obj['twindb_config'])
        exit(1)

    try:
        ensure_empty(dst)
        copy = FileCopy(path=backup_copy)
        restore_from_file(ctx.obj['twindb_config'], copy, dst)
    except TwinDBBackupError as err:
        LOG.error(err)
        exit(1)
    except KeyboardInterrupt:
        LOG.info('Exiting...')
        kill_children()
        exit(1)
Beispiel #56
0
def verify_mysql(ctx, hostname, dst, backup_copy):
    """Verify backup"""
    LOG.debug('mysql: %r', ctx.obj['twindb_config'])

    try:
        if not backup_copy:
            list_available_backups(ctx.obj['twindb_config'])
            exit(1)

        print(
            verify_mysql_backup(
                ctx.obj['twindb_config'],
                dst,
                backup_copy,
                hostname
            )
        )

    finally:

        shutil.rmtree(dst, ignore_errors=True)
Beispiel #57
0
def test_init_creates_instance_from_new(status_raw_content):
    status = MySQLStatus(status_raw_content)
    assert status.version == STATUS_FORMAT_VERSION
    key = 'master1/hourly/mysql/mysql-2018-03-28_04_11_16.xbstream.gz'
    copy = MySQLCopy(
        'master1',
        'hourly',
        'mysql-2018-03-28_04_11_16.xbstream.gz',
        backup_started=1522210276,
        backup_finished=1522210295,
        binlog='mysql-bin.000001',
        parent='master1/daily/mysql/mysql-2018-03-28_04_09_53.xbstream.gz',
        lsn=19903207,
        config={
            '/etc/my.cnf': """[mysqld]
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
user=mysql
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0

server_id=100
gtid_mode=ON
log-bin=mysql-bin
log-slave-updates
enforce-gtid-consistency

[mysqld_safe]
log-error=/var/log/mysqld.log
pid-file=/var/run/mysqld/mysqld.pid
"""
        },
        position=46855,
        type='incremental'

    )
    assert key in status.hourly
    LOG.debug("Copy %s: %r", copy.key, copy)
    LOG.debug("Copy from status %s: %r", key, status[key])
    assert status[key] == copy
Beispiel #58
0
    def get_stream(self):
        """
        Get a PIPE handler with content of the source
        :return:
        """
        cmd = [
            self._xtrabackup,
            "--defaults-file=%s" % self._connect_info.defaults_file,
            "--stream=xbstream",
            "--host=127.0.0.1",
            "--backup"
        ]
        cmd += ["--target-dir", "."]
        if self.is_galera():
            cmd.append("--galera-info")
            cmd.append("--no-backup-locks")
        if self.incremental:
            cmd += [
                "--incremental-basedir",
                ".",
                "--incremental-lsn=%d" % self._parent_lsn
            ]
        # If this is a Galera node then additional step needs to be taken to
        # prevent the backups from locking up the cluster.
        wsrep_desynced = False
        LOG.debug('Running %s', ' '.join(cmd))
        stderr_file = tempfile.NamedTemporaryFile(delete=False)
        try:
            if self.is_galera():
                wsrep_desynced = self.enable_wsrep_desync()

            LOG.debug('Running %s', ' '.join(cmd))
            proc_xtrabackup = Popen(cmd,
                                    stderr=stderr_file,
                                    stdout=PIPE)

            yield proc_xtrabackup.stdout

            proc_xtrabackup.communicate()
            if proc_xtrabackup.returncode:
                LOG.error('Failed to run xtrabackup. '
                          'Check error output in %s', stderr_file.name)
                try:
                    if LOG.debug_enabled:
                        with open(stderr_file.name) as xb_out:
                            for line in xb_out:
                                print(line, end='', file=sys.stderr)
                except AttributeError:
                    pass
                self.dst.delete(self.get_name())
                exit(1)
            else:
                LOG.debug('Successfully streamed xtrabackup output')
            self._update_backup_info(stderr_file)
        except OSError as err:
            LOG.error('Failed to run %s: %s', cmd, err)
            exit(1)
        finally:
            if wsrep_desynced:
                self.disable_wsrep_desync()
Beispiel #59
0
    def _upload_object(self, file_obj, object_key):
        """Upload objects to S3 in streaming fashion.

        :param file file_obj: A file like object to upload. At a minimum, it
            must implement the read method, and must return bytes.
        :param str object_key: The destination key where to upload the object.
        :raise S3DestinationError: if failed to upload object.
        """
        remote_name = "s3://{bucket}/{name}".format(
            bucket=self._bucket,
            name=object_key
        )

        LOG.debug("Generating S3 transfer config")
        s3_transfer_config = self.get_transfer_config()

        LOG.debug("Starting to stream to %s", remote_name)
        try:
            self.s3_client.upload_fileobj(file_obj,
                                          self._bucket,
                                          object_key,
                                          Config=s3_transfer_config)
            LOG.debug("Successfully streamed to %s", remote_name)
        except ClientError as err:
            raise S3DestinationError(err)

        return self._validate_upload(object_key)
Beispiel #60
0
    def _validate_upload(self, object_key):
        """
        Validates that upload of an object was successful.
        Raises an exception if the response code is not 200.

        :raise S3DestinationError: if object is not available on
            the destination.
        """
        remote_name = "s3://{bucket}/{name}".format(
            bucket=self._bucket,
            name=object_key
        )

        LOG.debug("Validating upload to %s", remote_name)

        response = self.s3_client.get_object(Bucket=self._bucket,
                                             Key=object_key)
        self.validate_client_response(response)

        LOG.debug("Upload successfully validated")

        return 0