コード例 #1
0
    def _run(self):
        LOG.debug("Running MySQLDumpBackup")
        client = mysql_svc.MySQLClient(__mysql__['root_user'],
                                       __mysql__['root_password'])
        self._databases = client.list_databases()

        with self._run_lock:
            if self._killed:
                raise Error("Canceled")
            self.transfer = cloudfs.LargeTransfer(self._gen_src,
                                                  self._dst,
                                                  streamer=None,
                                                  chunk_size=self.chunk_size)
        result = self.transfer.run()
        if not result:
            raise Error("Error while transfering to cloud storage")

        def log_stderr(popen):
            LOG.debug("mysqldump log_stderr communicate")
            out, err = popen.communicate()
            LOG.debug("mysqldump log_stderr communicate done")
            if err:
                LOG.debug("mysqldump stderr: %s", err)

        map(log_stderr, self._popens)

        if self._killed:
            raise Error("Canceled")

        result = transfer_result_to_backup_result(result)
        return result
コード例 #2
0
def download_with_old_transfer(context):
    src = context.manifest.cloudfs_path
    dst = os.path.join(context.tmp_dir, 'output')
    os.mkdir(dst)
    tr = cloudfs.LargeTransfer(src, dst)
    tr.run()
    context.downloaded_files = [os.path.join(dst, os.listdir(dst)[0])]
コード例 #3
0
    def _run(self):
        LOG.debug("Running MySQLDumpBackup")
        client = mysql_svc.MySQLClient(__mysql__['root_user'],
                                       __mysql__['root_password'])
        self._databases = client.list_databases()

        with self._run_lock:
            if self._killed:
                raise Error("Canceled")
            self.transfer = cloudfs.LargeTransfer(self._gen_src,
                                                  self._dst,
                                                  streamer=None,
                                                  chunk_size=self.chunk_size)
        result = self.transfer.run()
        if not result:
            raise Error("Error while transfering to cloud storage")

        def log_stderr(popen):
            LOG.debug("mysqldump log_stderr communicate")
            out, err = popen.communicate()
            LOG.debug("mysqldump log_stderr communicate done")
            if err:
                LOG.debug("mysqldump (code %s) stderr for %s: %s",
                          popen.returncode, popen.db_name, err)
            return popen.db_name, popen.returncode, err

        mysqldump_results = map(log_stderr, self._popens)

        if self._killed:
            raise Error("Canceled")

        mysqldump_errors = []
        for db_name, retcode, err in mysqldump_results:
            if retcode:
                mysqldump_errors.append('%s: "%s"' % (db_name, err))
        if mysqldump_errors:
            raise Error("Mysqldump has returned a non-zero code.\n" +
                        '\n'.join(mysqldump_errors))

        parts = transfer_result_to_backup_result(result)
        return backup.restore(type='mysqldump',
                              cloudfs_source=result.cloudfs_path,
                              parts=parts,
                              description=self.description,
                              tags=self.tags)
コード例 #4
0
    def upload_lvm_snapshot(self, lvm_snap, tags, path):
        """
        Method which uploads data from lvm snapshot to cloud storage and
        updates snapshot status.

        EphVolume runs this method in separate thread
        """

        try:
            self._snap_status = self.QUEUED
            mpoint = tempfile.mkdtemp()
            opts = []
            if coreutils.blkid(lvm_snap.device).get('type') == 'xfs':
                opts += ['-o', 'nouuid,ro']
            mount.mount(lvm_snap.device, mpoint, *opts)

            self.data_size = coreutils.statvfs(mpoint)['used']

            try:
                transfer = cloudfs.LargeTransfer(src=mpoint + '/',
                                                 dst=path,
                                                 tar_it=True,
                                                 gzip_it=True,
                                                 tags=tags,
                                                 transfer_id=self.id)
                self._snap_status = self.IN_PROGRESS
                manifesto = transfer.run()
                self.path = manifesto.cloudfs_path
                self._snap_status = self.COMPLETED

            finally:
                mount.umount(mpoint)
                os.rmdir(mpoint)

        except:
            self._snap_status = self.FAILED
            LOG.exception('Caught error while uploading LVM snapshot')
        finally:
            lvm_snap.destroy()
コード例 #5
0
    def _ensure(self):
        # snap should be applied after layout: download and extract data.
        # this could be done on already ensured volume.
        # Example: resync slave data

        if not self._lvm_volume:
            # First of all, merge self config and snapshot config
            self.snap = storage2.snapshot(self.snap) if self.snap else None

            for attr in ('disk', 'fstype', 'size', 'vg', 'mpoint'):
                if not getattr(self, attr, None):
                    if not self.snap or not getattr(self.snap, attr, None):
                        raise storage2.StorageError(
                            'Missing ephemeral volume attribute "%s"' % attr)
                    setattr(self, attr, getattr(self.snap, attr))

            self.disk = storage2.volume(self.disk)
            # Compatibility with storage v1
            if self.disk.device and self.disk.type == 'base':
                if self.disk.device.startswith('/dev/sd'):
                    self.disk = storage2.volume(type='ec2_ephemeral',
                                                name='ephemeral0')
                elif 'google' in self.disk.device:
                    self.disk = storage2.volume(type='gce_ephemeral',
                                                name='ephemeral-disk-0')

            self._lvm_volume = storage2.volume(type='lvm',
                                               pvs=[self.disk],
                                               size=self.size + 'VG',
                                               vg=self.vg,
                                               name='data')

        self._lvm_volume.ensure()
        self.device = self._lvm_volume.device
        # To allow ensure(mkfs=True, mount=True) after volume passed
        # scalarizr 1st initialization
        self.fscreated = self.is_fs_created()

        if self.snap:
            self.snap = storage2.snapshot(self.snap)
            # umount device to allow filesystem re-creation
            if self.mounted_to():
                self.umount()
            self.mkfs(force=True)

            tmp_mpoint = not self.mpoint
            if tmp_mpoint:
                tmp_mpoint = tempfile.mkdtemp()
                self.mpoint = tmp_mpoint

            try:
                transfer = cloudfs.LargeTransfer(self.snap.path,
                                                 self.mpoint + '/')
                self.mount()
                if hasattr(self.snap, 'data_size'):
                    fs_free = coreutils.statvfs(self.mpoint)['avail']
                    if fs_free < int(self.snap.data_size):
                        raise storage2.StorageError(
                            'Not enough free space'
                            ' on device %s to restore snapshot.' % self.device)

                result = transfer.run()
                if result.get('failed'):
                    err = result['failed'][0]['exc_info'][1]
                    raise storage2.StorageError('Failed to download snapshot'
                                                'data. %s' % err)
            except:
                e = sys.exc_info()[1]
                raise storage2.StorageError("Snapshot restore error: %s" % e)
            finally:
                try:
                    self.umount()
                finally:
                    if tmp_mpoint:
                        self.mpoint = None
                        os.rmdir(tmp_mpoint)

            self.snap = None
コード例 #6
0
    def _run(self):
        # Apply resource's meta
        mnf = cloudfs.Manifest(cloudfs_path=self.cloudfs_source)
        bak = backup.restore(**mnf.meta)

        incrementals = []
        if bak.backup_type == 'incremental':
            incrementals = [bak]
            while bak.prev_cloudfs_source:
                tmpmnf = cloudfs.Manifest(cloudfs_path=bak.prev_cloudfs_source)
                bak = backup.restore(**tmpmnf.meta)
                if bak.backup_type == 'incremental':
                    incrementals.insert(0, bak)
        self.incrementals = incrementals
        if self.incrementals:
            self.log_file = self.incrementals[-1].log_file
            self.log_pos = self.incrementals[-1].log_pos
        else:
            self.log_file = bak.log_file
            self.log_pos = bak.log_pos

        coreutils.clean_dir(__mysql__['data_dir'])

        LOG.info('Downloading the base backup (LSN: 0..%s)', bak.to_lsn)
        trn = cloudfs.LargeTransfer(bak.cloudfs_source,
                                    __mysql__['data_dir'],
                                    streamer=xbstream.args(
                                        extract=True,
                                        directory=__mysql__['data_dir']))
        trn.run()

        LOG.info('Preparing the base backup')
        innobackupex(__mysql__['data_dir'],
                     apply_log=True,
                     redo_only=True,
                     user=__mysql__['root_user'],
                     password=__mysql__['root_password'])

        if incrementals:
            inc_dir = os.path.join(__mysql__['tmp_dir'],
                                   'xtrabackup-restore-inc')
            i = 0
            for inc in incrementals:
                try:
                    os.makedirs(inc_dir)
                    inc = backup.restore(inc)
                    LOG.info(
                        'Downloading incremental backup #%d (LSN: %s..%s)', i,
                        inc.from_lsn, inc.to_lsn)
                    trn = cloudfs.LargeTransfer(inc.cloudfs_source,
                                                inc_dir,
                                                streamer=xbstream.args(
                                                    extract=True,
                                                    directory=inc_dir))

                    trn.run(
                    )  # todo: Largetransfer should support custom decompressor proc
                    LOG.info('Preparing incremental backup #%d', i)
                    innobackupex(__mysql__['data_dir'],
                                 apply_log=True,
                                 redo_only=True,
                                 incremental_dir=inc_dir,
                                 user=__mysql__['root_user'],
                                 password=__mysql__['root_password'])
                    i += 1
                finally:
                    coreutils.remove(inc_dir)

        LOG.info('Preparing the full backup')
        innobackupex(__mysql__['data_dir'],
                     apply_log=True,
                     user=__mysql__['root_user'],
                     password=__mysql__['root_password'])
        coreutils.chown_r(__mysql__['data_dir'], 'mysql', 'mysql')

        self._mysql_init.start()
        if int(__mysql__['replication_master']):
            LOG.info("Master will reset it's binary logs, "
                     "so updating binary log position in backup manifest")
            log_file, log_pos = self._client().master_status()
            meta = mnf.meta
            meta.update({'log_file': log_file, 'log_pos': log_pos})
            mnf.meta = meta
            mnf.save()
コード例 #7
0
    def _run(self):
        self._check_backup_type()

        kwds = {
            'stream': 'xbstream',
            # Compression is broken
            #'compress': True,
            #'compress_threads': os.sysconf('SC_NPROCESSORS_ONLN'),
            'user': __mysql__['root_user'],
            'password': __mysql__['root_password']
        }
        if self.no_lock:
            kwds['no_lock'] = True
        if not int(__mysql__['replication_master']):
            kwds['safe_slave_backup'] = True
            kwds['slave_info'] = True

        current_lsn = None
        if self.backup_type == 'auto':
            client = self._client()
            innodb_stat = client.fetchone('SHOW INNODB STATUS')[0]
            for line in innodb_stat.splitlines():
                m = self._re_lsn_innodb_stat.search(line)
                if m:
                    current_lsn = int(m.group(1))
                    break

        if self.backup_type in ('incremental', 'auto'):
            if self.prev_cloudfs_source:
                # Download manifest and get it's to_lsn
                mnf = cloudfs.Manifest(cloudfs_path=self.prev_cloudfs_source)
                self.from_lsn = mnf.meta['to_lsn']
            else:
                self._check_attr('from_lsn')
            if self.backup_type == 'incremental' or \
                (self.backup_type == 'auto' and current_lsn and current_lsn >= self.from_lsn):
                kwds.update({
                    'incremental': True,
                    'incremental_lsn': self.from_lsn
                })
        LOG.debug('self._config: %s', self._config)
        LOG.debug('kwds: %s', kwds)

        if self.backup_type == 'incremental':
            LOG.info('Creating incremental xtrabackup (from LSN: %s)',
                     self.from_lsn)
        else:
            LOG.info('Creating full xtrabackup')

        with self._xbak_init_lock:
            if self._killed:
                raise Error("Canceled")
            self._xbak = innobackupex.args(__mysql__['tmp_dir'],
                                           **kwds).popen()
            LOG.debug('Creating LargeTransfer, src=%s dst=%s',
                      self._xbak.stdout, self.cloudfs_target)
            self._transfer = cloudfs.LargeTransfer([self._xbak.stdout],
                                                   self.cloudfs_target,
                                                   compressor=self.compressor)
        manifesto = self._transfer.run()
        if self._killed:
            raise Error("Canceled")
        stderr = self._xbak.communicate()[1]
        if self._xbak.returncode:
            raise Error(stderr)

        with self._xbak_init_lock:
            self._xbak = None
            self._transfer = None

        log_file = log_pos = to_lsn = None
        re_binlog = self._re_binlog \
                    if int(__mysql__['replication_master']) else \
                    self._re_slave_binlog
        for line in stderr.splitlines():
            m = self._re_lsn.search(line) or self._re_lsn_51.search(line)
            if m:
                to_lsn = m.group(1)
                continue
            m = re_binlog.search(line)
            if m:
                log_file = m.group(1)
                log_pos = int(m.group(2))
                continue
            if log_file and log_pos and to_lsn:
                break

        rst = backup.restore(type='xtrabackup',
                             backup_type=self.backup_type,
                             from_lsn=self.from_lsn,
                             to_lsn=to_lsn,
                             cloudfs_source=manifesto.cloudfs_path,
                             prev_cloudfs_source=self.prev_cloudfs_source,
                             log_file=log_file,
                             log_pos=log_pos)

        # Update manifest
        LOG.debug('rst: %s', dict(rst))
        manifesto.meta = dict(rst)
        manifesto.save()

        LOG.info(
            'Created %s xtrabackup. (LSN: %s..%s, log_file: %s, log_pos: %s)',
            rst.backup_type, rst.from_lsn, rst.to_lsn, rst.log_file,
            rst.log_pos)

        return rst