Exemplo n.º 1
0
    def apply_retention_policy(self, dst, config, run_type, status):
        """
        Delete old backup copies.

        :param dst: Destination where the backups are stored.
        :type dst: BaseDestination
        :param config: Tool configuration
        :type config: TwinDBBackupConfig
        :param run_type: Run type.
        :type run_type: str
        :param status: Backups status.
        :type status: Status
        :return: Updated status.
        :rtype: Status
        """

        prefix = osp.join(dst.remote_path, self.get_prefix(), "mysql")
        keep_copies = getattr(config.retention, run_type)

        backups_list = dst.list_files(prefix, files_only=True)
        LOG.debug("Remote copies: %r", backups_list)
        for backup_file in get_files_to_delete(backups_list, keep_copies):
            LOG.debug("Deleting remote file %s", backup_file)
            dst.delete(backup_file)
            try:
                status.remove(backup_file)

            except StatusKeyNotFound as err:
                LOG.warning(err)
                LOG.debug("Status: %r", status)

        self._delete_local_files("mysql", config)

        return status
Exemplo n.º 2
0
    def apply_retention_policy(self, dst, config, run_type, status):
        """
        Delete old backup copies.

        :param dst: Destination where the backups are stored.
        :type dst: BaseDestination
        :param config: Tool configuration
        :type config: ConfigParser.ConfigParser
        :param run_type: Run type.
        :type run_type: str
        :param status: Backups status.
        :type status: Status
        :return: Updated status.
        :rtype: Status
        """

        prefix = "{remote_path}/{prefix}/mysql/mysql-".format(
            remote_path=dst.remote_path, prefix=self.get_prefix())
        keep_copies = config.getint('retention', '%s_copies' % run_type)

        backups_list = dst.list_files(prefix)
        LOG.debug('Remote copies: %r', backups_list)
        for backup_copy in get_files_to_delete(backups_list, keep_copies):
            LOG.debug('Deleting remote file %s', backup_copy)
            dst.delete(backup_copy)
            try:
                status.remove(run_type, dst.basename(backup_copy))
            except StatusKeyNotFound as err:
                LOG.warning(err)
                LOG.debug('Status: %r', status)

        self._delete_local_files('mysql', config)

        return status
Exemplo n.º 3
0
def _mysql_service(dst, action):
    """Start or stop MySQL service

    :param dst: Destination server
    :type dst: Ssh
    :param action: string start or stop
    :type action: str
    """
    for service in ['mysqld', 'mysql']:
        try:
            return dst.execute_command("PATH=$PATH:/sbin sudo service %s %s" %
                                       (service, action),
                                       quiet=True)
        except SshClientException as err:
            LOG.debug(err)

    try:
        LOG.warning(
            'Failed to %s MySQL with an init script. '
            'Will try to %s mysqld.', action, action)
        if action == "start":
            ret = dst.execute_command(
                "PATH=$PATH:/sbin sudo bash -c 'nohup mysqld &'",
                background=True)
            time.sleep(10)
            return ret
        elif action == "stop":
            return dst.execute_command(
                "PATH=$PATH:/sbin sudo kill $(pidof mysqld)")
    except SshClientException as err:
        LOG.error(err)
        raise TwinDBBackupError('Failed to %s MySQL on %r' % (action, dst))
Exemplo n.º 4
0
def main(ctx, cfg, debug, config, version):
    """
    Main entry point

    :param ctx: context (See Click docs (http://click.pocoo.org/6/)
    for explanation)
    :param cfg: instance of ConfigParser
    :type cfg: ConfigParser.ConfigParser
    :param debug: if True enabled debug logging
    :type debug: bool
    :param config: path to configuration file
    :type config: str
    :param version: If True print version string
    :type version: bool
    """
    if not ctx.invoked_subcommand:
        if version:
            print(__version__)
            exit(0)
        else:
            print(ctx.get_help())
            exit(-1)

    setup_logging(LOG, debug=debug)

    if os.path.exists(config):
        cfg.read(config)
    else:
        LOG.warning("Config file %s doesn't exist", config)
Exemplo n.º 5
0
def _backup_stream(config, src, dst, callbacks=None):
    stream = src.get_stream()
    # Gzip modifier
    stream = Gzip(stream).get_stream()
    src.suffix += '.gz'
    # KeepLocal modifier
    try:
        keep_local_path = config.get('destination', 'keep_local_path')
        kl_modifier = KeepLocal(stream,
                                os.path.join(keep_local_path, src.get_name()))
        stream = kl_modifier.get_stream()
        if callbacks is not None:
            callbacks.append((kl_modifier, {
                'keep_local_path': keep_local_path,
                'dst': dst
            }))
    except ConfigParser.NoOptionError:
        LOG.debug('keep_local_path is not present in the config file')
    # GPG modifier
    try:
        stream = Gpg(stream, config.get('gpg', 'recipient'),
                     config.get('gpg', 'keyring')).get_stream()
        src.suffix += '.gpg'
    except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
        pass
    except ModifierException as err:
        LOG.warning(err)
        LOG.warning('Will skip encryption')
    if not dst.save(stream, src.get_name()):
        LOG.error('Failed to save backup copy %s', src.get_name())
        exit(1)
Exemplo n.º 6
0
    def list_files(self,
                   prefix,
                   recursive=False,
                   pattern=None,
                   files_only=False):
        """
        List files in the destination that have common prefix.

        :param prefix: Common prefix. May include the bucket name.
            (e.g. ``s3://my_bucket/foo/``) or simply a prefix in the bucket
            (e.g. ``foo/``).
        :type prefix: str
        :param recursive: Does nothing for this class.
        :return: sorted list of file names.
        :param pattern: files must match with this regexp if specified.
        :type pattern: str
        :param files_only: Does nothing for this class.
        :return: Full S3 url in form ``s3://bucket/path/to/file``.
        :rtype: list(str)
        :raise S3DestinationError: if failed to list files.
        """
        s3client = boto3.resource('s3')
        bucket = s3client.Bucket(self.bucket)

        LOG.debug('Listing bucket %s', self.bucket)
        LOG.debug('prefix = %s', prefix)

        norm_prefix = prefix.replace('s3://%s' % bucket.name, '')
        norm_prefix = norm_prefix.lstrip('/')
        LOG.debug('normal prefix = %s', norm_prefix)

        # Try to list the bucket several times
        # because of intermittent error NoSuchBucket:
        # https://travis-ci.org/twindb/backup/jobs/204053690
        expire = time.time() + S3_READ_TIMEOUT
        retry_interval = 2
        while time.time() < expire:
            try:
                files = []
                all_objects = bucket.objects.filter(Prefix=norm_prefix)
                for file_object in all_objects:
                    if pattern:
                        if re.search(pattern, file_object.key):
                            files.append('s3://{bucket}/{key}'.format(
                                bucket=self.bucket, key=file_object.key))
                    else:
                        files.append('s3://{bucket}/{key}'.format(
                            bucket=self.bucket, key=file_object.key))

                return sorted(files)
            except ClientError as err:
                LOG.warning('%s. Will retry in %d seconds.', err,
                            retry_interval)
                time.sleep(retry_interval)
                retry_interval *= 2

        raise S3DestinationError('Failed to list files.')
Exemplo n.º 7
0
 def _retention(self, section):
     kwargs = {}
     for i in INTERVALS:
         option = "%s_copies" % i
         try:
             kwargs[i] = self.__cfg.getint(section, option)
         except (NoOptionError, NoSectionError):
             LOG.warning("Option %s is not defined in section %s", option,
                         section)
     return RetentionPolicy(**kwargs)
Exemplo n.º 8
0
def _get_status_key(status, key, variable):
    LOG.debug('status = %s', json.dumps(status, indent=4, sort_keys=True))
    LOG.debug('key = %s', key)
    try:
        for run_type in INTERVALS:
            if key in status[run_type]:
                return status[run_type][key][variable]
    except KeyError:
        pass
    LOG.warning('key %s is not found', key)
    return None
Exemplo n.º 9
0
 def _retention(self, section):
     kwargs = {}
     for i in INTERVALS:
         option = '%s_copies' % i
         try:
             kwargs[i] = self.__cfg.getint(section, option)
         except (NoOptionError, NoSectionError):
             LOG.warning(
                 'Option %s is not defined in section %s',
                 option,
                 section
             )
     return RetentionPolicy(**kwargs)
Exemplo n.º 10
0
def backup_files(run_type, config):
    """Backup local directories

    :param run_type: Run type
    :type run_type: str
    :param config: Configuration
    :type config: ConfigParser.ConfigParser
    """
    for directory in get_directories_to_backup(config):
        LOG.debug('copying %s', directory)
        src = FileSource(directory, run_type)
        dst = get_destination(config)

        stream = src.get_stream()

        # Gzip modifier
        stream = Gzip(stream).get_stream()
        src.suffix += '.gz'

        # KeepLocal modifier
        try:
            keep_local_path = config.get('destination', 'keep_local_path')
            # src.suffix = 'tar.gz.aaa'
            dst_name = src.get_name()
            kl_modifier = KeepLocal(stream,
                                    os.path.join(keep_local_path, dst_name))
            stream = kl_modifier.get_stream()
        except ConfigParser.NoOptionError:
            pass

        # GPG modifier
        try:
            keyring = config.get('gpg', 'keyring')
            recipient = config.get('gpg', 'recipient')
            gpg = Gpg(stream, recipient, keyring)
            stream = gpg.get_stream()
            src.suffix += '.gpg'
        except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
            pass
        except ModifierException as err:
            LOG.warning(err)
            LOG.warning('Will skip encryption')

        dst.save(stream, src.get_name())

        src.apply_retention_policy(dst, config, run_type)
Exemplo n.º 11
0
def main(
        ctx,
        debug,  # pylint: disable=too-many-arguments
        config,
        version,
        xtrabackup_binary,
        xbstream_binary):
    """
    Main entry point

    :param ctx: context (See Click docs (http://click.pocoo.org/6/)
    for explanation)
    :param debug: if True enabled debug logging
    :type debug: bool
    :param config: path to configuration file
    :type config: str
    :param version: If True print version string
    :type version: bool
    :param xtrabackup_binary: Path to xtrabackup binary.
    :type xtrabackup_binary: str
    :param xbstream_binary: Path to xbstream binary.
    :type xbstream_binary: str
    """
    if not ctx.invoked_subcommand:
        if version:
            print(__version__)
            exit(0)
        else:
            print(ctx.get_help())
            exit(-1)

    setup_logging(LOG, debug=debug)

    if os.path.exists(config):
        ctx.obj = {'twindb_config': TwinDBBackupConfig(config_file=config)}
        if xtrabackup_binary is not None:
            ctx.obj['twindb_config'].mysql.xtrabackup_binary = \
                xtrabackup_binary
        if xbstream_binary is not None:
            ctx.obj['twindb_config'].mysql.xbstream_binary = \
                xbstream_binary
    else:
        LOG.warning("Config file %s doesn't exist", config)
        exit(os.EX_CONFIG)
Exemplo n.º 12
0
    def clone(self, dest_host, port, compress=False):
        """
        Send backup to destination host

        :param dest_host: Destination host
        :type dest_host: str
        :param port: Port to sending backup
        :type port: int
        :param compress: If True compress stream
        :type compress: bool
        :raise RemoteMySQLSourceError: if any error
        """
        retry = 1
        retry_time = 2
        error_log = "/tmp/{src}_{src_port}-{dst}_{dst_port}.log".format(
            src=self._ssh_client.host,
            src_port=self._ssh_client.port,
            dst=dest_host,
            dst_port=port,
        )
        if compress:
            compress_cmd = "| gzip -c - "
        else:
            compress_cmd = ""

        cmd = (
            'bash -c "sudo %s '
            "--stream=xbstream "
            "--host=127.0.0.1 "
            "--backup "
            "--target-dir ./ 2> %s"
            ' %s | ncat %s %d --send-only"'
            % (self._xtrabackup, error_log, compress_cmd, dest_host, port)
        )
        while retry < 3:
            try:
                return self._ssh_client.execute(cmd)
            except SshClientException as err:
                LOG.warning(err)
                LOG.info("Will try again in after %d seconds", retry_time)
                time.sleep(retry_time)
                retry_time *= 2
                retry += 1
Exemplo n.º 13
0
    def find_files(self, prefix, run_type):
        """
        Find files with common prefix and given run type.

        :param prefix: Common prefix.
        :type prefix: str
        :param run_type: daily, hourly, etc
        :type run_type: str
        :return: list of file names
        :rtype: list(str)
        :raise S3DestinationError: if failed to find files.
        """
        s3client = boto3.resource('s3')
        bucket = s3client.Bucket(self.bucket)
        LOG.debug('Listing %s in bucket %s', prefix, bucket)

        # Try to list the bucket several times
        # because of intermittent error NoSuchBucket:
        # https://travis-ci.org/twindb/backup/jobs/204066704
        retry_timeout = time.time() + S3_READ_TIMEOUT
        retry_interval = 2
        while time.time() < retry_timeout:
            try:
                files = []
                all_objects = bucket.objects.filter(Prefix='')
                for file_object in all_objects:
                    if "/" + run_type + "/" in file_object.key:
                        files.append("s3://%s/%s" %
                                     (self.bucket, file_object.key))

                return sorted(files)
            except ClientError as err:
                LOG.warning('%s. Will retry in %d seconds.', err,
                            retry_interval)
                time.sleep(retry_interval)
                retry_interval *= 2

            except Exception as err:
                LOG.error('Failed to list objects in bucket %s: %s',
                          self.bucket, err)
                raise

        raise S3DestinationError('Failed to find files.')
Exemplo n.º 14
0
def main(ctx, cfg, debug,  # pylint: disable=too-many-arguments
         config, version,
         xtrabackup_binary=XTRABACKUP_BINARY,
         xbstream_binary=XBSTREAM_BINARY):
    """
    Main entry point

    :param ctx: context (See Click docs (http://click.pocoo.org/6/)
    for explanation)
    :param cfg: instance of ConfigParser
    :type cfg: ConfigParser.ConfigParser
    :param debug: if True enabled debug logging
    :type debug: bool
    :param config: path to configuration file
    :type config: str
    :param version: If True print version string
    :type version: bool
    :param xtrabackup_binary: Path to xtrabackup binary.
    :type xtrabackup_binary: str
    :param xbstream_binary: Path to xbstream binary.
    :type xbstream_binary: str
    """
    if not ctx.invoked_subcommand:
        if version:
            print(__version__)
            exit(0)
        else:
            print(ctx.get_help())
            exit(-1)

    setup_logging(LOG, debug=debug)

    if os.path.exists(config):
        cfg.read(config)
        try:
            cfg.set('mysql', 'xtrabackup_binary', xtrabackup_binary)
            cfg.set('mysql', 'xbstream_binary', xbstream_binary)
        except NoSectionError:
            # if there is no mysql section, we will not backup mysql
            pass
    else:
        LOG.warning("Config file %s doesn't exist", config)
Exemplo n.º 15
0
    def _get_file_content(self, path):
        attempts = 10  # up to 1024 seconds
        sleep_time = 2
        while sleep_time <= 2**attempts:
            try:
                response = self.s3_client.get_object(Bucket=self.bucket,
                                                     Key=path)
                self.validate_client_response(response)

                content = response['Body'].read()
                return content
            except ClientError as err:
                LOG.warning('Failed to read s3://%s/%s', self.bucket, path)
                LOG.warning(err)
                LOG.info('Will try again in %d seconds', sleep_time)
                time.sleep(sleep_time)
                sleep_time *= 2
        msg = 'Failed to read s3://%s/%s after %d attempts' \
              % (self.bucket, path, attempts)
        raise TwinDBBackupError(msg)
Exemplo n.º 16
0
    def apply_retention_policy(self, dst, config, run_type, status):
        """
        Delete old backup copies.

        :param dst: Destination where the backups are stored.
        :type dst: BaseDestination
        :param config: Tool configuration
        :type config: TwinDBBackupConfig
        :param run_type: Run type.
        :type run_type: str
        :param status: Backups status.
        :type status: Status
        :return: Updated status.
        :rtype: Status
        """

        prefix = osp.join(
            dst.remote_path,
            self.get_prefix(),
            'mysql'
        )
        keep_copies = getattr(config.retention, run_type)

        backups_list = dst.list_files(
            prefix,
            files_only=True
        )
        LOG.debug('Remote copies: %r', backups_list)
        for backup_file in get_files_to_delete(backups_list, keep_copies):
            LOG.debug('Deleting remote file %s', backup_file)
            dst.delete(backup_file)
            try:
                status.remove(backup_file)

            except StatusKeyNotFound as err:
                LOG.warning(err)
                LOG.debug('Status: %r', status)

        self._delete_local_files('mysql', config)

        return status
Exemplo n.º 17
0
Arquivo: cli.py Projeto: twindb/backup
def main(ctx, debug,  # pylint: disable=too-many-arguments
         config, version,
         xtrabackup_binary, xbstream_binary):
    """
    Main entry point

    :param ctx: context (See Click docs (http://click.pocoo.org/6/)
    for explanation)
    :param debug: if True enabled debug logging
    :type debug: bool
    :param config: path to configuration file
    :type config: str
    :param version: If True print version string
    :type version: bool
    :param xtrabackup_binary: Path to xtrabackup binary.
    :type xtrabackup_binary: str
    :param xbstream_binary: Path to xbstream binary.
    :type xbstream_binary: str
    """
    if not ctx.invoked_subcommand:
        if version:
            print(__version__)
            exit(0)
        else:
            print(ctx.get_help())
            exit(-1)

    setup_logging(LOG, debug=debug)

    if os.path.exists(config):
        ctx.obj = {
            'twindb_config': TwinDBBackupConfig(config_file=config)
        }
        if xtrabackup_binary is not None:
            ctx.obj['twindb_config'].mysql.xtrabackup_binary = \
                xtrabackup_binary
        if xbstream_binary is not None:
            ctx.obj['twindb_config'].mysql.xbstream_binary = \
                xbstream_binary
    else:
        LOG.warning("Config file %s doesn't exist", config)
Exemplo n.º 18
0
    def clone(self, dest_host, port, compress=False):
        """
        Send backup to destination host

        :param dest_host: Destination host
        :type dest_host: str
        :param port: Port to sending backup
        :type port: int
        :param compress: If True compress stream
        :type compress: bool
        :raise RemoteMySQLSourceError: if any error
        """
        retry = 1
        retry_time = 2
        error_log = "/tmp/{src}_{src_port}-{dst}_{dst_port}.log".format(
            src=self._ssh_client.host,
            src_port=self._ssh_client.port,
            dst=dest_host,
            dst_port=port
        )
        if compress:
            compress_cmd = "| gzip -c - "
        else:
            compress_cmd = ""

        cmd = "bash -c \"sudo %s " \
              "--stream=xbstream " \
              "--host=127.0.0.1 " \
              "--backup " \
              "--target-dir ./ 2> %s" \
              " %s | ncat %s %d --send-only\"" \
              % (self._xtrabackup, error_log, compress_cmd, dest_host, port)
        while retry < 3:
            try:
                return self._ssh_client.execute(cmd)
            except SshClientException as err:
                LOG.warning(err)
                LOG.info('Will try again in after %d seconds', retry_time)
                time.sleep(retry_time)
                retry_time *= 2
                retry += 1
Exemplo n.º 19
0
Arquivo: s3.py Projeto: twindb/backup
    def _get_file_content(self, path):
        attempts = 10  # up to 1024 seconds
        sleep_time = 2
        while sleep_time <= 2**attempts:
            try:
                response = self.s3_client.get_object(
                    Bucket=self._bucket,
                    Key=path
                )
                self.validate_client_response(response)

                content = response['Body'].read()
                return content
            except ClientError as err:
                LOG.warning('Failed to read s3://%s/%s', self._bucket, path)
                LOG.warning(err)
                LOG.info('Will try again in %d seconds', sleep_time)
                time.sleep(sleep_time)
                sleep_time *= 2
        msg = 'Failed to read s3://%s/%s after %d attempts' \
              % (self._bucket, path, attempts)
        raise OperationError(msg)
Exemplo n.º 20
0
    def list_files(self, prefix, recursive=False):
        """
        List files in the destination that have common prefix.

        :param prefix: Common prefix.
        :type prefix: str
        :param recursive: Does nothing for this class.
        :return: sorted list of file names
        :rtype: list(str)
        :raise S3DestinationError: if failed to list files.
        """
        s3client = boto3.resource('s3')
        bucket = s3client.Bucket(self.bucket)

        LOG.debug('Listing %s in bucket %s', prefix, self.bucket)

        norm_prefix = prefix.replace('s3://%s/' % bucket.name, '')
        LOG.debug('norm_prefix = %s', norm_prefix)

        # Try to list the bucket several times
        # because of intermittent error NoSuchBucket:
        # https://travis-ci.org/twindb/backup/jobs/204053690
        retry_timeout = time.time() + S3_READ_TIMEOUT
        retry_interval = 2
        while time.time() < retry_timeout:
            try:
                files = []
                all_objects = bucket.objects.filter(Prefix=norm_prefix)
                for file_object in all_objects:
                    files.append(file_object.key)
                return sorted(files)
            except ClientError as err:
                LOG.warning('%s. Will retry in %d seconds.', err,
                            retry_interval)
                time.sleep(retry_interval)
                retry_interval *= 2

        raise S3DestinationError('Failed to list files.')
Exemplo n.º 21
0
        def _download_object(s3_client, bucket_name, key, read_fd, write_fd):
            # The read end of the pipe must be closed in the child process
            # before we start writing to it.
            os.close(read_fd)

            with os.fdopen(write_fd, 'wb') as w_pipe:
                try:
                    retry_interval = 2
                    for _ in xrange(10):
                        try:
                            s3_client.download_fileobj(bucket_name, key,
                                                       w_pipe)
                            return
                        except ClientError as err:
                            LOG.warning(err)
                            LOG.warning('Will retry in %d seconds',
                                        retry_interval)
                            time.sleep(retry_interval)
                            retry_interval *= 2

                except IOError as err:
                    LOG.error(err)
                    exit(1)
Exemplo n.º 22
0
    def _get_file_content(self, path):
        attempts = 10  # up to 1024 seconds
        sleep_time = 2
        while sleep_time <= 2**attempts:
            try:
                response = self.s3_client.get_object(Bucket=self._bucket,
                                                     Key=path)
                self.validate_client_response(response)

                content = response["Body"].read()
                return content
            except ClientError as err:
                LOG.warning("Failed to read s3://%s/%s", self._bucket, path)
                LOG.warning(err)
                LOG.info("Will try again in %d seconds", sleep_time)
                time.sleep(sleep_time)
                sleep_time *= 2
        msg = "Failed to read s3://%s/%s after %d attempts" % (
            self._bucket,
            path,
            attempts,
        )
        raise OperationError(msg)
Exemplo n.º 23
0
def _mysql_service(dst, action):
    """Start or stop MySQL service

    :param dst: Destination server
    :type dst: Ssh
    :param action: string start or stop
    :type action: str
    """
    for service in ['mysqld', 'mysql']:
        try:
            return dst.execute_command(
                "PATH=$PATH:/sbin sudo service %s %s" % (service, action),
                quiet=True
            )
        except SshClientException as err:
            LOG.debug(err)

    try:
        LOG.warning('Failed to %s MySQL with an init script. '
                    'Will try to %s mysqld.', action, action)
        if action == "start":
            ret = dst.execute_command(
                "PATH=$PATH:/sbin sudo bash -c 'nohup mysqld &'",
                background=True
            )
            time.sleep(10)
            return ret
        elif action == "stop":
            return dst.execute_command(
                "PATH=$PATH:/sbin sudo kill $(pidof mysqld)"
            )
    except SshClientException as err:
        LOG.error(err)
        raise OperationError(
            'Failed to %s MySQL on %r'
            % (action, dst)
        )
Exemplo n.º 24
0
    def clone(self, dest_host, port, compress=False):
        """
        Send backup to destination host

        :param dest_host: Destination host
        :type dest_host: str
        :param port: Port to sending backup
        :type port: int
        :param compress: If True compress stream
        :type compress: bool
        :raise RemoteMySQLSourceError: if any error
        """
        retry = 1
        retry_time = 2
        error_log = "/tmp/{src}_{src_port}-{dst}_{dst_port}.log".format(
            src=self._ssh_client.ssh_connect_info.host,
            src_port=self._ssh_client.ssh_connect_info.port,
            dst=dest_host,
            dst_port=port
        )
        if compress:
            compress_cmd = "| gzip -c - "
        else:
            compress_cmd = ""

        cmd = "bash -c \"sudo innobackupex --stream=xbstream ./ 2> %s" \
              " %s | nc %s %d\"" \
              % (error_log, compress_cmd, dest_host, port)
        while retry < 3:
            try:
                return self._ssh_client.execute(cmd)
            except SshClientException as err:
                LOG.warning(err)
                LOG.info('Will try again in after %d seconds', retry_time)
                time.sleep(retry_time)
                retry_time *= 2
                retry += 1
Exemplo n.º 25
0
Arquivo: s3.py Projeto: twindb/backup
        def _download_object(s3_client, bucket_name, key, read_fd, write_fd):
            # The read end of the pipe must be closed in the child process
            # before we start writing to it.
            os.close(read_fd)

            with os.fdopen(write_fd, 'wb') as w_pipe:
                try:
                    retry_interval = 2
                    for _ in xrange(10):
                        try:
                            s3_client.download_fileobj(bucket_name,
                                                       key,
                                                       w_pipe)
                            return
                        except ClientError as err:
                            LOG.warning(err)
                            LOG.warning('Will retry in %d seconds',
                                        retry_interval)
                            time.sleep(retry_interval)
                            retry_interval *= 2

                except IOError as err:
                    LOG.error(err)
                    exit(1)
Exemplo n.º 26
0
def backup_mysql(run_type, config):
    """Take backup of local MySQL instance

    :param run_type: Run type
    :type run_type: str
    :param config: Tool configuration
    :type config: ConfigParser.ConfigParser
    :return: None
    """
    try:
        if not config.getboolean('source', 'backup_mysql'):
            raise TwinDBBackupError('MySQL backups are not enabled in config')

    except (ConfigParser.NoOptionError, TwinDBBackupError) as err:
        LOG.debug(err)
        LOG.debug('Not backing up MySQL')
        return

    dst = get_destination(config)

    try:
        full_backup = config.get('mysql', 'full_backup')
    except ConfigParser.NoOptionError:
        full_backup = 'daily'
    backup_start = time.time()
    src = MySQLSource(
        MySQLConnectInfo(config.get('mysql', 'mysql_defaults_file')), run_type,
        full_backup, dst)

    callbacks = []
    stream = src.get_stream()
    src_name = src.get_name()

    # Gzip modifier
    stream = Gzip(stream).get_stream()
    src_name += '.gz'

    # KeepLocal modifier
    try:
        keep_local_path = config.get('destination', 'keep_local_path')
        kl_modifier = KeepLocal(stream, os.path.join(keep_local_path,
                                                     src_name))
        stream = kl_modifier.get_stream()

        callbacks.append((kl_modifier, {
            'keep_local_path': keep_local_path,
            'dst': dst
        }))

    except ConfigParser.NoOptionError:
        LOG.debug('keep_local_path is not present in the config file')

    # GPG modifier
    try:
        stream = Gpg(stream, config.get('gpg', 'recipient'),
                     config.get('gpg', 'keyring')).get_stream()
        src_name += '.gpg'
    except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
        pass
    except ModifierException as err:
        LOG.warning(err)
        LOG.warning('Will skip encryption')

    if not dst.save(stream, src_name):
        LOG.error('Failed to save backup copy %s', src_name)
        exit(1)
    status = prepare_status(dst, src, run_type, src_name, backup_start)

    src.apply_retention_policy(dst, config, run_type, status)

    dst.status(status)

    LOG.debug('Callbacks are %r', callbacks)
    for callback in callbacks:
        callback[0].callback(**callback[1])
Exemplo n.º 27
0
Arquivo: s3.py Projeto: twindb/backup
    def list_files(self, prefix=None, recursive=False, pattern=None,
                   files_only=False):
        """
        List files in the destination that have common prefix.

        :param prefix: Common prefix. May include the bucket name.
            (e.g. ``s3://my_bucket/foo/``) or simply a prefix in the bucket
            (e.g. ``foo/``).
        :type prefix: str
        :param recursive: Does nothing for this class.
        :return: sorted list of file names.
        :param pattern: files must match with this regexp if specified.
        :type pattern: str
        :param files_only: Does nothing for this class.
        :return: Full S3 url in form ``s3://bucket/path/to/file``.
        :rtype: list(str)
        :raise S3DestinationError: if failed to list files.
        """
        s3client = boto3.resource('s3')
        bucket = s3client.Bucket(self._bucket)

        LOG.debug('Listing bucket %s', self._bucket)
        LOG.debug('prefix = %s', prefix)

        norm_prefix = prefix.replace('s3://%s' % bucket.name, '')
        norm_prefix = norm_prefix.lstrip('/')
        LOG.debug('normal prefix = %s', norm_prefix)

        # Try to list the bucket several times
        # because of intermittent error NoSuchBucket:
        # https://travis-ci.org/twindb/backup/jobs/204053690
        expire = time.time() + S3_READ_TIMEOUT
        retry_interval = 2
        while time.time() < expire:
            try:
                files = []
                all_objects = bucket.objects.filter(Prefix=norm_prefix)
                for file_object in all_objects:
                    if pattern:
                        if re.search(pattern, file_object.key):
                            files.append(
                                's3://{bucket}/{key}'.format(
                                    bucket=self._bucket,
                                    key=file_object.key
                                )
                            )
                    else:
                        files.append(
                            's3://{bucket}/{key}'.format(
                                bucket=self._bucket,
                                key=file_object.key
                            )
                        )

                return sorted(files)
            except ClientError as err:
                LOG.warning(
                    '%s. Will retry in %d seconds.',
                    err,
                    retry_interval
                )
                time.sleep(retry_interval)
                retry_interval *= 2

        raise S3DestinationError('Failed to list files.')