Пример #1
0
def backup_files(run_type, config):
    """Backup local directories

    :param run_type: Run type
    :type run_type: str
    :param config: Configuration
    :type config: TwinDBBackupConfig
    """
    backup_start = time.time()
    try:
        for directory in config.backup_dirs:
            LOG.debug('copying %s', directory)
            src = FileSource(directory, run_type)
            dst = config.destination()
            _backup_stream(config, src, dst)
            src.apply_retention_policy(dst, config, run_type)
    except (
            DestinationError,
            SourceError,
            SshClientException
    ) as err:
        raise OperationError(err)
    export_info(config, data=time.time() - backup_start,
                category=ExportCategory.files,
                measure_type=ExportMeasureType.backup)
Пример #2
0
def backup_everything(run_type, twindb_config, binlogs_only=False):
    """
    Run backup job

    :param run_type: hourly, daily, etc
    :type run_type: str
    :param twindb_config: ConfigParser instance
    :type twindb_config: TwinDBBackupConfig
    :param binlogs_only: If True copy only MySQL binary logs.
    :type binlogs_only: bool
    """
    set_open_files_limit()

    try:
        if not binlogs_only:
            backup_start = time.time()
            backup_files(run_type, twindb_config)
            backup_mysql(run_type, twindb_config)
            backup_binlogs(run_type, twindb_config)
            end = time.time()
            save_measures(backup_start, end)
        else:
            backup_binlogs(run_type, twindb_config)
    except ConfigParser.NoSectionError as err:
        LOG.debug(traceback.format_exc())
        LOG.error(err)
        exit(1)
Пример #3
0
    def get_connection(self):
        """
        Connect to MySQL host and yield a connection.

        :return: MySQL connection
        :raise MySQLSourceError: if can't connect to server
        """
        connection = None
        try:
            connection = pymysql.connect(
                host=self.hostname,
                read_default_file=self.defaults_file,
                connect_timeout=self.connect_timeout,
                cursorclass=pymysql.cursors.DictCursor
            )

            yield connection
        except OperationalError:
            LOG.error(
                "Can't connect to MySQL server on %s",
                self.hostname)
            raise MySQLSourceError(
                "Can't connect to MySQL server on %s"
                % self.hostname)
        finally:
            if connection:
                connection.close()
Пример #4
0
 def _handle_failure_exec(self, err, stderr_file):
     """Cleanup on failure exec"""
     LOG.error(err)
     LOG.error('Failed to run xtrabackup. '
               'Check error output in %s', stderr_file.name)
     self.dst.delete(self.get_name())
     exit(1)
Пример #5
0
    def disable_wsrep_desync(self):
        """
        Wait till wsrep_local_recv_queue is zero
        and disable wsrep_local_recv_queue then
        """
        max_time = time.time() + 900
        try:
            with self.get_connection() as connection:
                with connection.cursor() as cursor:
                    while time.time() < max_time:
                        cursor.execute("SHOW GLOBAL STATUS LIKE "
                                       "'wsrep_local_recv_queue'")

                        res = {r['Variable_name'].lower(): r['Value'].lower()
                               for r in cursor.fetchall()}

                        if not res.get('wsrep_local_recv_queue'):
                            raise Exception('Unknown status variable '
                                            '"wsrep_local_recv_queue"')

                        if int(res['wsrep_local_recv_queue']) == 0:
                            break

                        time.sleep(1)

                    LOG.debug('Disabling wsrep_desync')
                    cursor.execute("SET GLOBAL wsrep_desync=OFF")
        except pymysql.Error as err:
            LOG.error(err)
Пример #6
0
def storage_server(docker_client, container_network):

    bootstrap_script = '/twindb-backup/support/bootstrap/storage_server.sh'
    container = get_container(
        'storage_server',
        docker_client,
        container_network,
        bootstrap_script=bootstrap_script,
        image="centos:centos7",
        last_n=2
    )

    timeout = time.time() + 30 * 60

    while time.time() < timeout:
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        if sock.connect_ex((container['ip'], 22)) == 0:
            break
        time.sleep(1)

    yield container

    if container:
        LOG.info('Removing container %s', container['Id'])
        docker_client.api.remove_container(container=container['Id'],
                                           force=True)
Пример #7
0
    def get_stream(self):
        """
        Get a PIPE handler with content of the source
        :return:
        """
        cmd = [
            self._xtrabackup,
            "--defaults-file=%s" % self._connect_info.defaults_file,
            "--stream=xbstream",
            "--host=127.0.0.1",
            "--backup"
        ]
        cmd += ["--target-dir", "."]
        if self.is_galera():
            cmd.append("--galera-info")
            cmd.append("--no-backup-locks")
        if self.incremental:
            cmd += [
                "--incremental-basedir",
                ".",
                "--incremental-lsn=%d" % self._parent_lsn
            ]
        # If this is a Galera node then additional step needs to be taken to
        # prevent the backups from locking up the cluster.
        wsrep_desynced = False
        LOG.debug('Running %s', ' '.join(cmd))
        stderr_file = tempfile.NamedTemporaryFile(delete=False)
        try:
            if self.is_galera():
                wsrep_desynced = self.enable_wsrep_desync()

            LOG.debug('Running %s', ' '.join(cmd))
            proc_xtrabackup = Popen(cmd,
                                    stderr=stderr_file,
                                    stdout=PIPE)

            yield proc_xtrabackup.stdout

            proc_xtrabackup.communicate()
            if proc_xtrabackup.returncode:
                LOG.error('Failed to run xtrabackup. '
                          'Check error output in %s', stderr_file.name)
                try:
                    if LOG.debug_enabled:
                        with open(stderr_file.name) as xb_out:
                            for line in xb_out:
                                print(line, end='', file=sys.stderr)
                except AttributeError:
                    pass
                self.dst.delete(self.get_name())
                exit(1)
            else:
                LOG.debug('Successfully streamed xtrabackup output')
            self._update_backup_info(stderr_file)
        except OSError as err:
            LOG.error('Failed to run %s: %s', cmd, err)
            exit(1)
        finally:
            if wsrep_desynced:
                self.disable_wsrep_desync()
Пример #8
0
    def setup_slave(self, master_info):  # noqa # pylint: disable=too-many-arguments
        """
        Change master

        :param master_info: Master details.
        :type master_info: MySQLMasterInfo

        """
        try:
            with self._cursor() as cursor:
                query = "CHANGE MASTER TO " \
                        "MASTER_HOST = '{master}', " \
                        "MASTER_USER = '******', " \
                        "MASTER_PORT = {port}, " \
                        "MASTER_PASSWORD = '******', " \
                        "MASTER_LOG_FILE = '{binlog}', " \
                        "MASTER_LOG_POS = {binlog_pos}"\
                    .format(
                        master=master_info.host,
                        user=master_info.user,
                        password=master_info.password,
                        binlog=master_info.binlog,
                        binlog_pos=master_info.binlog_position,
                        port=master_info.port
                    )
                cursor.execute(query)
                cursor.execute("START SLAVE")
            return True
        except pymysql.Error as err:
            LOG.debug(err)
            return False
Пример #9
0
    def apply_backup(self, datadir):
        """
        Apply backup of destination server

        :param datadir: Path to datadir
        :return: Binlog file name and position
        :rtype: tuple
        :raise RemoteMySQLSourceError: if any error.
        """
        try:
            use_memory = "--use-memory %d" % int(self._mem_available() / 2)
        except OSError:
            use_memory = ""
        logfile_path = "/tmp/xtrabackup-apply-log.log"
        cmd = "sudo {xtrabackup} --prepare --apply-log-only " \
              "--target-dir {target_dir} {use_memory} " \
              "> {logfile} 2>&1" \
              "".format(
                  xtrabackup=self._xtrabackup,
                  target_dir=datadir,
                  use_memory=use_memory,
                  logfile=logfile_path
              )

        try:
            self._ssh_client.execute(cmd)
            self._ssh_client.execute("sudo chown -R mysql %s" % datadir)
            return self._get_binlog_info(datadir)
        except SshClientException as err:
            LOG.debug("Logfile is:")
            LOG.debug(self._ssh_client.get_text_content(logfile_path))
            raise RemoteMySQLSourceError(err)
Пример #10
0
 def backup_mysql(self):
     """FLag to backup MySQL or not"""
     try:
         return self.__cfg.getboolean('source', 'backup_mysql')
     except NoOptionError:
         return False
     except NoSectionError as err:
         LOG.error("Section 'source' is mandatory")
         raise ConfigurationError(err)
Пример #11
0
 def backup_dirs(self):
     """Directories to backup"""
     try:
         dirs = self.__cfg.get('source', 'backup_dirs')
         return split(dirs)
     except NoOptionError:
         return []
     except NoSectionError as err:
         LOG.error("Section 'source' is mandatory")
         raise ConfigurationError(err)
Пример #12
0
    def _update_backup_info(self, stderr_file):
        """Update backup_info from stderr"""

        LOG.debug('xtrabackup error log file %s',
                  stderr_file.name)
        self._backup_info.lsn = self._get_lsn(stderr_file.name)
        self._backup_info.binlog_coordinate = self.get_binlog_coordinates(
            stderr_file.name
        )
        os.unlink(stderr_file.name)
Пример #13
0
    def clone_config(self, dst):
        """
        Clone config to destination server

        :param dst: Destination server
        :type dst: Ssh
        """
        cfg_path = self._get_root_my_cnf()
        LOG.debug("Root my.cnf is: %s", cfg_path)
        self._save_cfg(dst, cfg_path)
Пример #14
0
def set_open_files_limit():
    """Detect maximum supported number of open file and set it"""
    max_files = getrlimit(RLIMIT_NOFILE)[0]
    while True:
        try:
            setrlimit(RLIMIT_NOFILE, (max_files, max_files))
            max_files += 1
        except ValueError:
            break
    LOG.debug('Setting max files limit to %d', max_files)
Пример #15
0
    def save(self, handler, filepath):
        """
        Read from handler and save it to Amazon S3

        :param filepath: save backup copy in a file with this name
        :param handler: stdout handler from backup source
        """
        with handler as file_obj:
            ret = self._upload_object(file_obj, filepath)
            LOG.debug('Returning code %d', ret)
Пример #16
0
def kill_children():
    """
    Kill child process
    """
    for proc in multiprocessing.active_children():
        LOG.info('Terminating %r [%d] ...', proc, proc.pid)
        proc.terminate()
    parent = psutil.Process(os.getpid())
    for child in parent.children(recursive=True):
        LOG.info('Terminating process %r', child)
        child.kill()
Пример #17
0
def share_backup(ctx, s3_url):
    """Share backup copy for download"""
    if not s3_url:
        LOG.info('No backup copy specified. Choose one from below:')
        list_available_backups(ctx.obj['twindb_config'])
        exit(1)
    try:
        share(ctx.obj['twindb_config'], s3_url)
    except TwinDBBackupError as err:
        LOG.error(err)
        exit(1)
Пример #18
0
def _print_binlog(dst):
    dst_files = dst.list_files(
        dst.remote_path,
        pattern='/binlog/',
        recursive=True,
        files_only=True
    )
    if dst_files:
        LOG.info('Binary logs:')
        for copy in dst_files:
            print(copy)
Пример #19
0
def _print_media_type(dst, media_type):
    for run_type in INTERVALS:
        pattern = "/%s/%s/" % (run_type, media_type)
        dst_files = dst.list_files(
            dst.remote_path,
            pattern=pattern,
            recursive=True,
            files_only=True
        )
        if dst_files:
            LOG.info('%s %s copies:', media_type, run_type)
            for copy in dst_files:
                print(copy)
Пример #20
0
    def create_bucket(self):
        """Creates the bucket in gcs that will store the backups.

        :raises GCSDestinationError: if failed to create the bucket.
        :raises GCSDestinationError: If authentication error.
        """
        try:
            self._gcs_client.create_bucket(bucket_name=self.bucket)

        except (GoogleAPIError, GoogleAuthError) as err:
            raise GCSDestinationError(err)

        LOG.info('Created bucket %s', self.bucket)
Пример #21
0
 def _retention(self, section):
     kwargs = {}
     for i in INTERVALS:
         option = '%s_copies' % i
         try:
             kwargs[i] = self.__cfg.getint(section, option)
         except (NoOptionError, NoSectionError):
             LOG.warning(
                 'Option %s is not defined in section %s',
                 option,
                 section
             )
     return RetentionPolicy(**kwargs)
Пример #22
0
def bucket_name():
    travis_job_number = os.environ.get('TRAVIS_JOB_NUMBER')
    LOG.debug('TRAVIS_JOB_NUMBER=%s' % travis_job_number)

    number = random.randint(0, 1000000)
    LOG.debug('Default job number %d' % number)

    if travis_job_number:
        bucket = 'twindb-backup-test-travis-%s' % travis_job_number
    else:
        bucket = 'twindb-backup-test-travis-%d' % number

    return '%s-%s' % (bucket, time.time())
Пример #23
0
    def _get_name(self, filename_prefix):

        LOG.debug('Suffix = %s', self.suffix)
        self._name = osp.join(
            self.get_prefix(),
            self._media_type,
            "{file}-{time}.{suffix}".format(
                file=filename_prefix,
                time=self._created_at,
                suffix=self._suffix
            )

        )
        return self._name
Пример #24
0
    def enable_wsrep_desync(self):
        """
        Try to enable wsrep_desync

        :return: True if wsrep_desync was enabled. False if not supported
        """
        try:
            with self.get_connection() as connection:
                with connection.cursor() as cursor:
                    cursor.execute('SET GLOBAL wsrep_desync=ON')
            return True
        except pymysql.Error as err:
            LOG.debug(err)
            return False
Пример #25
0
    def candidate_parent(self, run_type):
        """
        Find a backup copy that can be a parent

        :param run_type: See :func:`~get_backup_type`.
        :return: Backup copy or None
        :rtype: MySQLCopy
        """
        full_backup_index = INTERVALS.index(run_type)
        LOG.debug('Looking a parent candidate for %s run', run_type)
        for i in xrange(full_backup_index, len(INTERVALS)):
            period_copies = getattr(self, INTERVALS[i])
            LOG.debug(
                'Checking %d %s copies',
                len(period_copies),
                INTERVALS[i]
            )
            for _, value in period_copies.iteritems():
                try:
                    if value.type == 'full':
                        LOG.debug('Found parent %r', value)
                        return value
                except KeyError:
                    return None
        LOG.debug('No eligible parents')
        return None
Пример #26
0
def s3_client(bucket_name):
    LOG.debug('Bucket: %s' % bucket_name)
    client = S3(
        bucket=bucket_name,
        aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
        aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY']
    )
    try:
        assert client.create_bucket()

        yield client

    finally:

        client.delete_bucket(force=True)
Пример #27
0
    def delete_bucket(self, force=False):
        """Delete the bucket in gcs that was storing the backups.

        :param force: If the bucket is non-empty then delete the objects
            before deleting the bucket.
        :type force: bool
        :raise GCSDestinationError: if failed to delete the bucket.
        """
        try:
            self._bucket_obj.delete(force=force)

        except (GoogleAPIError, GoogleAuthError) as err:
            raise GCSDestinationError(err)

        LOG.info('Deleted bucket %s', self.bucket)
Пример #28
0
    def get_stream(self):
        """
        Compress the input stream and return it as the output stream

        :return: output stream handle
        """
        with self._input as input_stream:
            LOG.debug('Running %s', ' '.join(self._modifier_cmd))
            proc = Popen(
                self._modifier_cmd,
                stdin=input_stream,
                stdout=PIPE,
                stderr=PIPE
            )
            yield proc.stdout
            proc.communicate()
Пример #29
0
def _backup_stream(config, src, dst, callbacks=None):
    """

    :param config: Tool config
    :type config: TwinDBBackupConfig
    :param src:
    :param dst:
    :param callbacks:
    :return:
    """
    stream = src.get_stream()

    # Compression modifier
    cmp_modifier = config.compression.get_modifier(stream)
    stream = cmp_modifier.get_stream()
    src.suffix += cmp_modifier.suffix

    # KeepLocal modifier
    if config.keep_local_path:
        keep_local_path = config.keep_local_path
        kl_modifier = KeepLocal(
            stream,
            osp.join(
                keep_local_path,
                src.get_name()
            )
        )
        stream = kl_modifier.get_stream()
        if callbacks is not None:
            callbacks.append((kl_modifier, {
                'keep_local_path': keep_local_path,
                'dst': dst
            }))
    else:
        LOG.debug('keep_local_path is not present in the config file')
    # GPG modifier
    if config.gpg:
        gpg_modifier = Gpg(
            stream,
            config.gpg.recipient,
            config.gpg.keyring
        )

        stream = gpg_modifier.get_stream()
        src.suffix += '.gpg'
    dst.save(stream, src.get_name())
Пример #30
0
    def _load(self, status_as_json):
        status = []
        try:
            status_as_obj = json.loads(status_as_json)
        except ValueError:
            raise CorruptedStatus(
                'Could not load status from a bad JSON string %s'
                % (status_as_json, )
            )

        for run_type in INTERVALS:
            for key, value in status_as_obj[run_type].iteritems():

                try:
                    host = key.split('/')[0]
                    file_name = key.split('/')[3]
                    kwargs = {
                        'type': value['type'],
                        'config': self.__serialize_config(value)
                    }
                    keys = [
                        'backup_started',
                        'backup_finished',
                        'binlog',
                        'parent',
                        'lsn',
                        'position',
                        'wsrep_provider_version',
                    ]
                    for copy_key in keys:
                        if copy_key in value:
                            kwargs[copy_key] = value[copy_key]

                    copy = MySQLCopy(
                        host,
                        run_type,
                        file_name,
                        **kwargs
                    )
                    status.append(copy)
                except IndexError as err:
                    LOG.error(err)
                    raise CorruptedStatus('Unexpected key %s' % key)

        return status
Пример #31
0
    def execute(self, cmd, quiet=False):
        """Execute a command on a remote SSH server.

        :param cmd: Command for execution.
        :type cmd: str
        :param quiet: if quiet is True don't print error messages
        :return: Handlers of stdin, stdout and stderr
        :rtype: tuple
        :raise SshDestinationError: if any error

        """
        try:
            with self._shell() as shell:
                stdin_, stdout_, stderr_ = shell.exec_command(cmd)
                exit_code = stdout_.channel.recv_exit_status()
                if exit_code != 0:
                    if not quiet:
                        LOG.error("Failed while execute command %s", cmd)
                        LOG.error(stderr_.read())
                    raise SshClientException('%s exited with code %d' %
                                             (cmd, exit_code))
                return stdin_, stdout_, stderr_

        except SSHException as err:
            if not quiet:
                LOG.error('Failed to execute %s', cmd)
            raise SshClientException(err)
Пример #32
0
    def ensure_tcp_port_listening(self, port, wait_timeout=10):
        """
        Check that tcp port is open and ready to accept connections.
        Keep checking up to wait_timeout seconds.

        :param port: TCP port that is supposed to be listening.
        :type port: int
        :param wait_timeout: wait this many seconds until the port is ready.
        :type wait_timeout: int
        :return: True if the TCP port is listening.
        :rtype: bool
        """
        stop_waiting_at = time.time() + wait_timeout
        while time.time() < stop_waiting_at:
            try:
                cmd = 'netstat -an | grep -w ^tcp | grep -w LISTEN ' \
                      '| grep -w 0.0.0.0:%d' % port
                _, cout, cerr = self.execute_command(cmd)
                LOG.debug('stdout: %s', cout.read())
                LOG.debug('stderr: %s', cerr.read())
                return True
            except SshClientException as err:
                LOG.debug(err)
                time.sleep(1)

        return False
    def clone(self, dest_host, port, compress=False):
        """
        Send backup to destination host

        :param dest_host: Destination host
        :type dest_host: str
        :param port: Port to sending backup
        :type port: int
        :param compress: If True compress stream
        :type compress: bool
        :raise RemoteMySQLSourceError: if any error
        """
        retry = 1
        retry_time = 2
        error_log = "/tmp/{src}_{src_port}-{dst}_{dst_port}.log".format(
            src=self._ssh_client.host,
            src_port=self._ssh_client.port,
            dst=dest_host,
            dst_port=port)
        if compress:
            compress_cmd = "| gzip -c - "
        else:
            compress_cmd = ""

        cmd = "bash -c \"sudo %s " \
              "--stream=xbstream " \
              "--host=127.0.0.1 " \
              "--backup " \
              "--target-dir ./ 2> %s" \
              " %s | ncat %s %d --send-only\"" \
              % (self._xtrabackup, error_log, compress_cmd, dest_host, port)
        while retry < 3:
            try:
                return self._ssh_client.execute(cmd)
            except SshClientException as err:
                LOG.warning(err)
                LOG.info('Will try again in after %d seconds', retry_time)
                time.sleep(retry_time)
                retry_time *= 2
                retry += 1
Пример #34
0
def verify_mysql_backup(twindb_config, dst_path, backup_file, hostname=None):
    """
    Restore mysql backup and measure time

    :param hostname:
    :param backup_file:
    :param dst_path:
    :param twindb_config: tool configuration
    :type twindb_config: TwinDBBackupConfig

    """
    dst = twindb_config.destination(backup_source=hostname)
    status = MySQLStatus(dst=dst)
    copy = None

    if backup_file == "latest":
        copy = status.latest_backup
    else:
        for copy in status:
            if backup_file.endswith(copy.key):
                break
    if copy is None:
        return json.dumps(
            {
                'backup_copy': backup_file,
                'restore_time': 0,
                'success': False
            },
            indent=4,
            sort_keys=True)
    start_restore_time = time.time()
    success = True
    tmp_dir = tempfile.mkdtemp()

    try:

        LOG.debug('Verifying backup copy in %s', tmp_dir)
        restore_from_mysql(twindb_config, copy, dst_path, tmp_dir)
        edit_backup_my_cnf(dst_path)

    except (TwinDBBackupError, OSError, IOError) as err:

        LOG.error(err)
        LOG.debug(traceback.format_exc())
        success = False

    finally:

        shutil.rmtree(tmp_dir, ignore_errors=True)

    end_restore_time = time.time()
    restore_time = end_restore_time - start_restore_time
    return json.dumps(
        {
            'backup_copy': copy.key,
            'restore_time': restore_time,
            'success': success
        },
        indent=4,
        sort_keys=True)
Пример #35
0
    def _load(self, status_as_json):
        status = []
        try:
            status_as_obj = json.loads(status_as_json)
        except ValueError:
            raise CorruptedStatus(
                "Could not load status from a bad JSON string %s"
                % (status_as_json,)
            )

        for run_type in INTERVALS:
            for key, value in status_as_obj[run_type].items():

                try:
                    host = key.split("/")[0]
                    file_name = key.split("/")[3]
                    kwargs = {
                        "type": value["type"],
                        "config": self.__serialize_config(value),
                    }
                    keys = [
                        "backup_started",
                        "backup_finished",
                        "binlog",
                        "parent",
                        "lsn",
                        "position",
                        "wsrep_provider_version",
                    ]
                    for copy_key in keys:
                        if copy_key in value:
                            kwargs[copy_key] = value[copy_key]

                    copy = MySQLCopy(host, run_type, file_name, **kwargs)
                    status.append(copy)
                except IndexError as err:
                    LOG.error(err)
                    raise CorruptedStatus("Unexpected key %s" % key)

        return status
Пример #36
0
def backup_files(run_type, config):
    """Backup local directories

    :param run_type: Run type
    :type run_type: str
    :param config: Configuration
    :type config: TwinDBBackupConfig
    """
    backup_start = time.time()
    try:
        for directory in config.backup_dirs:
            LOG.debug('copying %s', directory)
            src = FileSource(directory, run_type)
            dst = config.destination()
            _backup_stream(config, src, dst)
            src.apply_retention_policy(dst, config, run_type)
    except (DestinationError, SourceError, SshClientException) as err:
        raise OperationError(err)
    export_info(config,
                data=time.time() - backup_start,
                category=ExportCategory.files,
                measure_type=ExportMeasureType.backup)
Пример #37
0
def test_init_creates_instance_from_new(status_raw_content):
    status = MySQLStatus(status_raw_content)
    assert status.version == STATUS_FORMAT_VERSION
    key = 'master1/hourly/mysql/mysql-2018-03-28_04_11_16.xbstream.gz'
    copy = MySQLCopy(
        'master1',
        'hourly',
        'mysql-2018-03-28_04_11_16.xbstream.gz',
        backup_started=1522210276,
        backup_finished=1522210295,
        binlog='mysql-bin.000001',
        parent='master1/daily/mysql/mysql-2018-03-28_04_09_53.xbstream.gz',
        lsn=19903207,
        config={
            '/etc/my.cnf':
            """[mysqld]
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
user=mysql
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0

server_id=100
gtid_mode=ON
log-bin=mysql-bin
log-slave-updates
enforce-gtid-consistency

[mysqld_safe]
log-error=/var/log/mysqld.log
pid-file=/var/run/mysqld/mysqld.pid
"""
        },
        position=46855,
        type='incremental')
    assert key in status.hourly
    LOG.debug("Copy %s: %r", copy.key, copy)
    LOG.debug("Copy from status %s: %r", key, status[key])
    assert status[key] == copy
Пример #38
0
    def create_bucket(self):
        """Creates the bucket in s3 that will store the backups.

        :raise S3DestinationError: if failed to create the bucket.
        """
        bucket_exists = True

        try:
            self.s3_client.head_bucket(Bucket=self.bucket)
        except ClientError as err:
            # We come here meaning we did not find the bucket
            if err.response['ResponseMetadata']['HTTPStatusCode'] == 404:
                bucket_exists = False
            else:
                raise

        if not bucket_exists:
            LOG.info('Created bucket %s', self.bucket)
            response = self.s3_client.create_bucket(Bucket=self.bucket)
            self.validate_client_response(response)

        return True
Пример #39
0
def docker_execute(client, container_id, cmd, tty=False):
    """Execute command in container

    :param client: Docker client class instance
    :type client: APIClient
    :param container_id: Container Id from a dictionary that get_container
        returns.
    :type container_id: str
    :param cmd: Command to execute
    :type cmd: str or list
    :return: A tuple with exit code and output.
    :param tty: Using pseudo-TTY
    :type tty: bool
    :rtype: tuple(int, str)
    """
    LOG.debug("Running %s", " ".join(cmd))
    api = client.api
    executor = api.exec_create(container_id, cmd, tty=tty)
    exec_id = executor["Id"]
    cout = api.exec_start(exec_id)
    ret = api.exec_inspect(exec_id)["ExitCode"]
    return ret, cout.decode("utf-8")
Пример #40
0
    def _load(self, status_as_json):
        status = []
        try:
            status_as_obj = json.loads(status_as_json)
        except ValueError:
            raise CorruptedStatus(
                'Could not load status from a bad JSON string %s' %
                (status_as_json, ))

        for run_type in INTERVALS:
            for key, value in status_as_obj[run_type].iteritems():

                try:
                    host = key.split('/')[0]
                    file_name = key.split('/')[3]
                    kwargs = {
                        'type': value['type'],
                        'config': self.__serialize_config(value)
                    }
                    keys = [
                        'backup_started',
                        'backup_finished',
                        'binlog',
                        'parent',
                        'lsn',
                        'position',
                        'wsrep_provider_version',
                    ]
                    for copy_key in keys:
                        if copy_key in value:
                            kwargs[copy_key] = value[copy_key]

                    copy = MySQLCopy(host, run_type, file_name, **kwargs)
                    status.append(copy)
                except IndexError as err:
                    LOG.error(err)
                    raise CorruptedStatus('Unexpected key %s' % key)

        return status
Пример #41
0
def test__take_mysql_backup(master1, docker_client, s3_client,
                            config_content_mysql_only, client_my_cnf):
    twindb_config_dir = get_twindb_config_dir(docker_client, master1["Id"])

    twindb_config_host = "%s/twindb-backup-1.cfg" % twindb_config_dir
    twindb_config_guest = "/etc/twindb/twindb-backup-1.cfg"
    my_cnf_path = "%s/my.cnf" % twindb_config_dir

    with open(my_cnf_path, "w") as my_cnf:
        my_cnf.write(client_my_cnf)

    with open(twindb_config_host, "w") as fp:
        content = config_content_mysql_only.format(
            AWS_ACCESS_KEY_ID=os.environ["AWS_ACCESS_KEY_ID"],
            AWS_SECRET_ACCESS_KEY=os.environ["AWS_SECRET_ACCESS_KEY"],
            BUCKET=s3_client.bucket,
            daily_copies=1,
            hourly_copies=2,
            MY_CNF="/etc/twindb/my.cnf",
        )
        fp.write(content)

    cmd = [
        "twindb-backup",
        "--debug",
        "--config",
        twindb_config_guest,
        "backup",
        "hourly",
    ]
    ret, cout = docker_execute(docker_client, master1["Id"], cmd)
    assert_and_pause((ret == 0, ), cout)

    cmd = ["twindb-backup", "--config", twindb_config_guest, "status"]
    ret, cout = docker_execute(docker_client, master1["Id"], cmd)
    LOG.debug("STDOUT: %s", cout)
    key = list(json.loads(cout)["hourly"].keys())[0]

    assert_and_pause((key.endswith(".xbstream.gz"), ), key)
Пример #42
0
def slave(docker_client, container_network, tmpdir_factory):
    try:
        platform = os.environ['PLATFORM']
    except KeyError:
        raise EnvironmentError("""The environment variable PLATFORM 
        must be defined. Allowed values are:
        * centos
        * debian
        * ubuntu
        """)
    bootstrap_script = '/twindb-backup/support/bootstrap/master/' \
                       '%s/slave.sh' % platform
    separator_pos = NODE_IMAGE.find(':')
    image_name = NODE_IMAGE[:separator_pos +
                            1] + 'slave_' + NODE_IMAGE[separator_pos + 1:]
    datadir = tmpdir_factory.mktemp('mysql')
    twindb_config_dir = tmpdir_factory.mktemp('twindb')
    container = get_container('slave',
                              docker_client,
                              container_network,
                              str(datadir),
                              twindb_config_dir=str(twindb_config_dir),
                              last_n=2,
                              image=image_name)
    try:
        timeout = time.time() + 30 * 60
        LOG.info('Waiting until port TCP/22 becomes available')
        while time.time() < timeout:
            sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            if sock.connect_ex((container['ip'], 22)) == 0:
                break
            time.sleep(1)
            LOG.info('Still waiting')
        LOG.info('Port TCP/22 is ready')
        ret, _ = docker_execute(docker_client, container['Id'], ['ls'])
        assert ret == 0

        ret, cout = docker_execute(docker_client, container['Id'],
                                   ['bash', bootstrap_script])
        print(cout)
        assert ret == 0

        yield container

    finally:

        LOG.info('Removing container %s', container['Id'])
        docker_client.api.remove_container(container=container['Id'],
                                           force=True)
Пример #43
0
def test_take_mysql_backup_aenc_restores_full(s3_client,
                                              config_content_mysql_aenc,
                                              tmpdir):
    config = tmpdir.join('twindb-backup.cfg')
    content = config_content_mysql_aenc.format(
        AWS_ACCESS_KEY_ID=os.environ['AWS_ACCESS_KEY_ID'],
        AWS_SECRET_ACCESS_KEY=os.environ['AWS_SECRET_ACCESS_KEY'],
        BUCKET=s3_client.bucket,
        daily_copies=1,
        hourly_copies=2)
    config.write(content)
    cmd = ['twindb-backup', '--config', str(config), 'backup', 'daily']
    assert call(cmd) == 0

    cmd = ['twindb-backup', '--config', str(config), 'status']
    proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
    cout, cerr = proc.communicate()

    LOG.debug('STDOUT: %s', cout)
    LOG.debug('STDERR: %s', cerr)

    key = json.loads(cout)['daily'].keys()[0]

    backup_copy = 's3://' + s3_client.bucket + '/' + key
    dst_dir = str(tmpdir.mkdir('dst'))
    cmd = [
        'twindb-backup', '--debug', '--config',
        str(config), 'restore', 'mysql', backup_copy, '--dst', dst_dir
    ]
    assert call(cmd) == 0
    call(['find', dst_dir])
    assert os.path.exists(dst_dir + '/ibdata1')
    assert os.path.exists(dst_dir + '/ib_logfile0')
    assert os.path.exists(dst_dir + '/ib_logfile1')
    assert os.path.exists(dst_dir + '/mysql/user.MYD')
    assert os.path.exists(dst_dir + '/backup-my.cnf')
    assert os.path.exists(dst_dir + '/xtrabackup_logfile')
    assert os.path.exists(dst_dir + '/_config/etc/my.cnf') or \
           os.path.exists(dst_dir + '/_config/etc/mysql/my.cnf')
Пример #44
0
def pause_test(msg):
    """Pause """
    try:
        if os.environ["PAUSE_TEST"]:
            LOG.debug("Test paused")
            LOG.debug(msg)
            import time

            time.sleep(36000)
    except KeyError:
        LOG.debug(
            "Define the PAUSE_TEST environment variable if you'd like to pause the test"
        )
        LOG.debug("export PAUSE_TEST=1")
        pass
Пример #45
0
def runner(docker_client, container_network, tmpdir_factory):
    try:
        platform = os.environ['PLATFORM']
    except KeyError:
        raise EnvironmentError("""The environment variable PLATFORM
            must be defined. Allowed values are:
            * centos
            * debian
            * ubuntu
            """)
    bootstrap_script = '/twindb-backup/support/bootstrap/master/' \
                       '%s/master1.sh' % platform

    datadir = tmpdir_factory.mktemp('mysql')
    twindb_config_dir = tmpdir_factory.mktemp('twindb')
    container = get_container(name="runner",
                              client=docker_client,
                              network=container_network,
                              bootstrap_script=bootstrap_script,
                              last_n=3,
                              twindb_config_dir=str(twindb_config_dir),
                              datadir=datadir)
    try:
        ret, _ = docker_execute(docker_client, container['Id'], ['ls'])
        assert ret == 0

        ret, cout = docker_execute(docker_client, container['Id'],
                                   ['bash', bootstrap_script])
        print(cout)
        assert ret == 0

        yield container

    finally:

        LOG.info('Removing container %s', container['Id'])
        docker_client.api.remove_container(container=container['Id'],
                                           force=True)
Пример #46
0
    def add(self, path, key=None):
        # pylint: disable=line-too-long
        """Add directory to cache.
        The directory may be a full or relative path with backup copy.
        The directory name must match with a file name of the backup copy.
        If backup copy is
        ``/path/to/backups/master1/daily/mysql/mysql-2017-05-13_22_04_06.xbstream.gz``.
        then the directory can be something like
        ``/var/tmp/mysql-2017-05-13_22_04_06.xbstream.gz/``.

        Let's say we want to add
        ``/var/tmp/mysql-2017-05-13_22_04_06.xbstream.gz/`` to the cache in
        ``/var/tmp/cache``.
        Then this method will create directory
        ``/var/tmp/cache/mysql-2017-05-13_22_04_06.xbstream.gz/``.

        If you want to save directory ``/var/tmp/foo`` in cache under
        a key name ``mysql-2017-05-13_22_04_06.xbstream.gz``
        you need to specify the key e.g.
        ``add('/var/tmp/cache', 'mysql-2017-05-13_22_04_06.xbstream.gz')``

        :param path: full or relative path
        :type path: str
        :param key: if specified the directory will be added as this key name
            in the cache
        :raise: CacheException if errors
        """
        if key:
            LOG.debug('Cache key %s', key)
            dst = os.path.join(self.path, key)
        else:
            dst = os.path.join(self.path, os.path.basename(path))

        LOG.debug('Saving content of %s in %s', path, dst)
        try:
            shutil.copytree(path, dst)
        except OSError as err:
            raise CacheException(err)
Пример #47
0
def master1(docker_client, container_network):

    container = _get_master(1, docker_client, container_network)

    timeout = time.time() + 30 * 60

    while time.time() < timeout:
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        if sock.connect_ex((container['ip'], 3306)) == 0:
            break
        time.sleep(1)

    raw_container = docker_client.containers.get('master1')
    privileges_file = "/twindb-backup/vagrant/environment/puppet/" \
                      "modules/profile/files/mysql_grants.sql"
    raw_container.exec_run('bash -c "mysql mysql < %s"'
                           % privileges_file)

    yield container
    if container:
        LOG.info('Removing container %s', container['Id'])
        docker_client.api.remove_container(container=container['Id'],
                                           force=True)
Пример #48
0
def test__take_mysql_backup_aenc_suffix_gpg(s3_client,
                                            config_content_mysql_aenc, tmpdir):
    config = tmpdir.join('twindb-backup.cfg')
    content = config_content_mysql_aenc.format(
        AWS_ACCESS_KEY_ID=os.environ['AWS_ACCESS_KEY_ID'],
        AWS_SECRET_ACCESS_KEY=os.environ['AWS_SECRET_ACCESS_KEY'],
        BUCKET=s3_client.bucket,
        daily_copies=1,
        hourly_copies=2)
    config.write(content)
    cmd = ['twindb-backup', '--config', str(config), 'backup', 'daily']
    assert call(cmd) == 0

    cmd = ['twindb-backup', '--config', str(config), 'status']
    proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
    cout, cerr = proc.communicate()

    LOG.debug('STDOUT: %s', cout)
    LOG.debug('STDERR: %s', cerr)

    key = json.loads(cout)['daily'].keys()[0]

    assert key.endswith('xbstream.gz.gpg')
Пример #49
0
    def is_galera(self):
        """Check if local MySQL instance is a Galera cluster

        :return: True if it's a Galera.
        :rtype: bool
        """
        try:
            with self._cursor() as cursor:
                cursor.execute("SELECT @@wsrep_on as wsrep_on")
                row = cursor.fetchone()

                return (str(row['wsrep_on']).lower() == "1" or
                        str(row['wsrep_on']).lower() == 'on')
        except pymysql.InternalError as err:
            error_code = err.args[0]
            error_message = err.args[1]

            if error_code == 1193:
                LOG.debug('Galera is not supported or not enabled')
                return False
            else:
                LOG.error(error_message)
                raise
Пример #50
0
def _get_master(n, client, network):
    """

    :param n: 1 or 2
    :return: Container
    """
    api = client.api

    api.pull(NODE_IMAGE)
    cwd = os.getcwd()
    host_config = api.create_host_config(
        binds={
            cwd: {
                'bind': '/twindb-backup',
                'mode': 'rw',
            }
        },
        dns=['8.8.8.8']
    )

    ip = '172.%d.3.%d' % (network['second_octet'], n)
    networking_config = api.create_networking_config({
        network['NAME']: api.create_endpoint_config(ipv4_address=ip)
    })

    LOG.debug(networking_config)

    container = api.create_container(
        image=NODE_IMAGE,
        name='master%d' % n,
        ports=[22, 3306],
        host_config=host_config,
        networking_config=networking_config,
        volumes=['/twindb-backup'],
        command='bash /twindb-backup/support/clone/master%d.sh' % n
        # command='/bin/sleep 36000'
    )
    container['ip'] = ip
    LOG.info('Created container %r', container)
    try:
        api.start(container['Id'])
        LOG.info('Started %r', container)

        return container
    except APIError as err:
        LOG.error(err)
        client.api.remove_container(container=container['Id'], force=True)
Пример #51
0
def _backup_stream(config, src, dst, callbacks=None):
    """

    :param config: Tool config
    :type config: TwinDBBackupConfig
    :param src:
    :param dst:
    :param callbacks:
    :return:
    """
    stream = src.get_stream()

    # Compression modifier
    cmp_modifier = config.compression.get_modifier(stream)
    stream = cmp_modifier.get_stream()
    src.suffix += cmp_modifier.suffix

    # KeepLocal modifier
    if config.keep_local_path:
        keep_local_path = config.keep_local_path
        kl_modifier = KeepLocal(stream,
                                osp.join(keep_local_path, src.get_name()))
        stream = kl_modifier.get_stream()
        if callbacks is not None:
            callbacks.append((kl_modifier, {
                "keep_local_path": keep_local_path,
                "dst": dst
            }))
    else:
        LOG.debug("keep_local_path is not present in the config file")
    # GPG modifier
    if config.gpg:
        gpg_modifier = Gpg(stream, config.gpg.recipient, config.gpg.keyring)

        stream = gpg_modifier.get_stream()
        src.suffix += ".gpg"
    dst.save(stream, src.get_name())
Пример #52
0
    def get_connection(self):
        """
        Connect to MySQL host and yield a connection.

        :return: MySQL connection
        :raise MySQLSourceError: if can't connect to server
        """
        connection = None
        try:
            connection = pymysql.connect(
                host=self.hostname,
                read_default_file=self.defaults_file,
                connect_timeout=self.connect_timeout,
                cursorclass=pymysql.cursors.DictCursor,
            )

            yield connection
        except OperationalError:
            LOG.error("Can't connect to MySQL server on %s", self.hostname)
            raise MySQLSourceError("Can't connect to MySQL server on %s" %
                                   self.hostname)
        finally:
            if connection:
                connection.close()
Пример #53
0
def run_backup_job(twindb_config,
                   run_type,
                   lock_file=LOCK_FILE,
                   binlogs_only=False):
    """
    Grab a lock waiting up to allowed timeout and start backup jobs

    :param twindb_config: Tool configuration
    :type twindb_config: TwinDBBackupConfig
    :param run_type: Run type
    :type run_type: str
    :param lock_file: File used as a lock
    :type lock_file: str
    :param binlogs_only: If True copy only binlogs.
    :type binlogs_only: bool
    """
    with timeout(get_timeout(run_type)):
        try:
            file_desriptor = open(lock_file, "w")
            fcntl.flock(file_desriptor, fcntl.LOCK_EX)
            LOG.debug(run_type)
            if getattr(twindb_config.run_intervals, run_type):
                backup_everything(run_type,
                                  twindb_config,
                                  binlogs_only=binlogs_only)
            else:
                LOG.debug("Not running because run_%s is no", run_type)
        except IOError as err:
            if err.errno != errno.EINTR:
                LOG.debug(traceback.format_exc())
                raise LockWaitTimeoutError(err)
            msg = "Another instance of twindb-backup is running?"
            if run_type == "hourly":
                LOG.debug(msg)
            else:
                LOG.error(msg)
Пример #54
0
def backup_server(docker_client, container_network):

    bootstrap_script = '/twindb-backup/support/bootstrap/backup_server.sh'
    container = get_container(
        'backup_server',
        bootstrap_script,
        docker_client,
        container_network
    )

    timeout = time.time() + 30 * 60

    while time.time() < timeout:
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        if sock.connect_ex((container['ip'], 22)) == 0:
            break
        time.sleep(1)

    yield container

    if container:
        LOG.info('Removing container %s', container['Id'])
        docker_client.api.remove_container(container=container['Id'],
                                           force=True)
Пример #55
0
    def get_full_copy_name(self, file_path):
        """
        For a given backup copy find a parent. If it's a full copy
        then return itself

        :param file_path:
        :return:
        """
        try:
            for run_type in INTERVALS:
                for key in self.status()[run_type].keys():
                    if file_path.endswith(key):
                        if self.status()[run_type][key]['type'] == "full":
                            return file_path
                        else:
                            remote_part = file_path.replace(key, '')
                            parent = self.status()[run_type][key]['parent']
                            result = "%s%s" % (remote_part, parent)
                            return result
        except (TypeError, KeyError) as err:
            LOG.error('Failed to find parent of %s', file_path)
            raise DestinationError(err)

        raise DestinationError('Failed to find parent of %s' % file_path)
Пример #56
0
def test_get_stream(gs):
    status = MySQLStatus(dst=gs)
    copy = status['master1/daily/mysql/mysql-2019-04-04_05_29_05.xbstream.gz']

    with gs.get_stream(copy) as stream:
        LOG.debug('starting reading from pipe')
        content = stream.read()
        LOG.debug('finished reading from pipe')
    assert len(content), 'Failed to read from GS'
    LOG.info('Read %d bytes', len(content))
Пример #57
0
def get_destination(config, hostname=socket.gethostname()):
    """
    Read config and return instance of Destination class.

    :param config: Tool configuration.
    :type config: ConfigParser.ConfigParser
    :param hostname: Local hostname.
    :type hostname: str
    :return: Instance of destination class.
    :rtype: BaseDestination
    """
    destination = None
    try:
        destination = config.get('destination', 'backup_destination')
        LOG.debug('Destination in the config %s', destination)
        destination = destination.strip('"\'')
    except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
        LOG.critical("Backup destination must be specified "
                     "in the config file")
        exit(-1)

    if destination == "ssh":
        host = config.get('ssh', 'backup_host')
        try:
            port = config.get('ssh', 'port')
        except ConfigParser.NoOptionError:
            port = 22
        try:
            ssh_key = config.get('ssh', 'ssh_key')
        except ConfigParser.NoOptionError:
            ssh_key = '/root/.ssh/id_rsa'
        user = config.get('ssh', 'ssh_user')
        remote_path = config.get('ssh', 'backup_dir')
        return Ssh(SshConnectInfo(host=host, port=port, user=user,
                                  key=ssh_key),
                   remote_path=remote_path,
                   hostname=hostname)

    elif destination == "s3":
        bucket = config.get('s3', 'BUCKET').strip('"\'')
        access_key_id = config.get('s3', 'AWS_ACCESS_KEY_ID').strip('"\'')
        secret_access_key = config.get('s3',
                                       'AWS_SECRET_ACCESS_KEY').strip('"\'')
        default_region = config.get('s3', 'AWS_DEFAULT_REGION').strip('"\'')

        return S3(bucket,
                  AWSAuthOptions(access_key_id,
                                 secret_access_key,
                                 default_region=default_region),
                  hostname=hostname)

    else:
        LOG.critical('Destination %s is not supported', destination)
        exit(-1)
Пример #58
0
def _get_status_key(status, key, variable):
    LOG.debug('status = %s', json.dumps(status, indent=4, sort_keys=True))
    LOG.debug('key = %s', key)
    try:
        for run_type in INTERVALS:
            if key in status[run_type]:
                return status[run_type][key][variable]
    except KeyError:
        pass
    LOG.warning('key %s is not found', key)
    return None
Пример #59
0
 def _match_files(files, pattern=None):
     LOG.debug('Pattern: %s', pattern)
     LOG.debug('Unfiltered files: %r', files)
     result = []
     for fil in files:
         if pattern:
             if re.search(pattern, fil):
                 result.append(fil)
         else:
             result.append(fil)
     LOG.debug('Filtered files: %r', result)
     return result
Пример #60
0
def restore_file(cfg, dst, backup_copy):
    """Restore from file backup"""
    LOG.debug('file: %r', cfg)

    if not backup_copy:
        LOG.info('No backup copy specified. Choose one from below:')
        list_available_backups(cfg)
        exit(1)

    try:
        ensure_empty(dst)
        restore_from_file(cfg, backup_copy, dst)
    except TwinDBBackupError as err:
        LOG.error(err)
        exit(1)
    except KeyboardInterrupt:
        LOG.info('Exiting...')
        kill_children()
        exit(1)