示例#1
0
    def _run(self):
        # Apply resource's meta
        mnf = cloudfs.Manifest(cloudfs_path=self.cloudfs_source)
        bak = backup.restore(**mnf.meta)

        incrementals = []
        if bak.backup_type == 'incremental':
            incrementals = [bak]
            while bak.prev_cloudfs_source:
                tmpmnf = cloudfs.Manifest(cloudfs_path=bak.prev_cloudfs_source)
                bak = backup.restore(**tmpmnf.meta)
                if bak.backup_type == 'incremental':
                    incrementals.insert(0, bak)
        self.incrementals = incrementals
        if self.incrementals:
            self.log_file = self.incrementals[-1].log_file
            self.log_pos = self.incrementals[-1].log_pos
        else:
            self.log_file = bak.log_file
            self.log_pos = bak.log_pos

        coreutils.clean_dir(__mysql__['data_dir'])

        LOG.info('Downloading the base backup (LSN: 0..%s)', bak.to_lsn)

        trn = largetransfer.Download(bak.cloudfs_source)
        trn.apply_async()

        streamer = xbstream.args(extract=True, directory=__mysql__['data_dir'])
        streamer.popen(stdin=trn.output)

        trn.join()

        LOG.info('Preparing the base backup')
        innobackupex(__mysql__['data_dir'],
                     apply_log=True,
                     redo_only=True,
                     ibbackup='xtrabackup',
                     user=__mysql__['root_user'],
                     password=__mysql__['root_password'])

        if incrementals:
            inc_dir = os.path.join(__mysql__['tmp_dir'],
                                   'xtrabackup-restore-inc')
            i = 0
            for inc in incrementals:
                try:
                    os.makedirs(inc_dir)
                    inc = backup.restore(inc)
                    LOG.info(
                        'Downloading incremental backup #%d (LSN: %s..%s)', i,
                        inc.from_lsn, inc.to_lsn)

                    trn = largetransfer.Download(inc.cloudfs_source)
                    trn.apply_async()

                    streamer = xbstream.args(extract=True, directory=inc_dir)
                    streamer.popen(stdin=trn.output)

                    trn.join()

                    LOG.info('Preparing incremental backup #%d', i)
                    innobackupex(__mysql__['data_dir'],
                                 apply_log=True,
                                 redo_only=True,
                                 incremental_dir=inc_dir,
                                 ibbackup='xtrabackup',
                                 user=__mysql__['root_user'],
                                 password=__mysql__['root_password'])
                    i += 1
                finally:
                    coreutils.remove(inc_dir)

        LOG.info('Preparing the full backup')
        innobackupex(__mysql__['data_dir'],
                     apply_log=True,
                     user=__mysql__['root_user'],
                     password=__mysql__['root_password'])
        coreutils.chown_r(__mysql__['data_dir'], 'mysql', 'mysql')

        self._mysql_init.start()
        if int(__mysql__['replication_master']):
            LOG.info("Master will reset it's binary logs, "
                     "so updating binary log position in backup manifest")
            log_file, log_pos = self._client().master_status()
            meta = mnf.meta
            meta.update({'log_file': log_file, 'log_pos': log_pos})
            mnf.meta = meta
            mnf.save()
示例#2
0
    def _run(self):
        self._check_backup_type()

        kwds = {
            'stream': 'xbstream',
            # Compression is broken
            #'compress': True,
            #'compress_threads': os.sysconf('SC_NPROCESSORS_ONLN'),
            'ibbackup': 'xtrabackup',
            'user': __mysql__['root_user'],
            'password': __mysql__['root_password']
        }
        if self.no_lock:
            kwds['no_lock'] = True
        if not int(__mysql__['replication_master']):
            kwds['safe_slave_backup'] = True
            kwds['slave_info'] = True

        current_lsn = None
        if self.backup_type == 'auto':
            client = self._client()
            innodb_stat = client.fetchone('SHOW ENGINE INNODB STATUS')[0]
            #innodb_stat = client.fetchone('SHOW INNODB STATUS')[0]
            for line in innodb_stat.splitlines():
                m = self._re_lsn_innodb_stat.search(line)
                if m:
                    current_lsn = int(m.group(1))
                    break

        if self.backup_type in ('incremental', 'auto'):
            if self.prev_cloudfs_source:
                # Download manifest and get it's to_lsn
                mnf = cloudfs.Manifest(cloudfs_path=self.prev_cloudfs_source)
                self.from_lsn = mnf.meta['to_lsn']
            else:
                self._check_attr('from_lsn')
            if self.backup_type == 'incremental' or \
                (self.backup_type == 'auto' and current_lsn and current_lsn >= self.from_lsn):
                kwds.update({
                    'incremental': True,
                    'incremental_lsn': self.from_lsn
                })
        LOG.debug('self._config: %s', self._config)
        LOG.debug('kwds: %s', kwds)

        if self.backup_type == 'incremental':
            LOG.info('Creating incremental xtrabackup (from LSN: %s)',
                     self.from_lsn)
        else:
            LOG.info('Creating full xtrabackup')

        with self._xbak_init_lock:
            if self._killed:
                raise Error("Canceled")
            self._xbak = innobackupex.args(__mysql__['tmp_dir'],
                                           **kwds).popen()
            gzip = self.compressor == 'gzip'
            transfer_id = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
            self._transfer = largetransfer.Upload(self._xbak.stdout,
                                                  self.cloudfs_target,
                                                  gzip=gzip,
                                                  transfer_id=transfer_id)

        stderr_thread, stderr = cloudfs.readfp_thread(self._xbak.stderr)

        self._transfer.apply_async()
        self._transfer.join()
        manifesto = self._transfer.manifest

        if self._killed:
            raise Error("Canceled")
        stderr_thread.join()
        self._xbak.wait()
        stderr = stderr[0] if stderr else ''
        if self._xbak.returncode:
            raise Error(stderr)

        with self._xbak_init_lock:
            self._xbak = None
            self._transfer = None

        log_file = log_pos = to_lsn = None
        re_binlog = self._re_binlog \
                    if int(__mysql__['replication_master']) else \
                    self._re_slave_binlog
        for line in stderr.splitlines():
            m = self._re_lsn.search(line) or self._re_lsn_51.search(line)
            if m:
                to_lsn = m.group(1)
                continue
            m = re_binlog.search(line)
            if m:
                log_file = m.group(1)
                log_pos = int(m.group(2))
                continue
            if log_file and log_pos and to_lsn:
                break

        rst = backup.restore(type='xtrabackup',
                             backup_type=self.backup_type,
                             from_lsn=self.from_lsn,
                             to_lsn=to_lsn,
                             cloudfs_source=manifesto.cloudfs_path,
                             prev_cloudfs_source=self.prev_cloudfs_source,
                             log_file=log_file,
                             log_pos=log_pos)

        # Update manifest
        LOG.debug('rst: %s', dict(rst))
        manifesto.meta = dict(rst)
        manifesto.save()

        LOG.info(
            'Created %s xtrabackup. (LSN: %s..%s, log_file: %s, log_pos: %s)',
            rst.backup_type, rst.from_lsn, rst.to_lsn, rst.log_file,
            rst.log_pos)

        return rst