Example #1
0
    def test__init__custom(self):
        def progress_cb(progres):
            return

        upload = largetransfer.Upload(['src'],
                                      'dst',
                                      transfer_id='xxx',
                                      description='desc',
                                      tags='tags',
                                      gzip=False,
                                      use_pigz=False,
                                      pool_size=1,
                                      chunk_size=10,
                                      progress_cb=progress_cb,
                                      cb_interval=10)
        assert upload.gzip is False
        assert upload.use_pigz is False
        assert upload._pool_size == 1
        assert upload._chunk_size == 10
        assert upload._simple is False
        assert upload._cb_interval == 10
        assert upload._progress_cb == progress_cb
        assert hasattr(upload.src, '__iter__')
        assert upload._manifest is not None
        assert upload._manifest['description'] == 'desc'
        assert upload._manifest['tags'] == 'tags'
        assert upload.dst == os.path.join('dst', 'xxx')
        assert upload._callback_thread is None
Example #2
0
        def do_backup(op):
            try:
                self.redis_instances.save_all()
                dbs = [r.db_path for r in self.redis_instances if r.db_path]

                cloud_storage_path = bus.platform.scalrfs.backups(
                    BEHAVIOUR)  #? __node__.platform
                LOG.info("Uploading backup to cloud storage (%s)",
                         cloud_storage_path)

                def progress_cb(progress):
                    LOG.debug('Uploading %s bytes' % progress)

                uploader = largetransfer.Upload(dbs,
                                                cloud_storage_path,
                                                progress_cb=progress_cb)
                uploader.apply_async()
                uploader.join()
                manifest = uploader.manifest

                result = transfer_result_to_backup_result(manifest)

                node.__node__.messaging.send(
                    DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                    dict(db_type=BEHAVIOUR, status='ok', backup_parts=result))

                return result  #?

            except (Exception, BaseException), e:
                LOG.exception(e)

                # Notify Scalr about error
                node.__node__.messaging.send(
                    DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                    dict(db_type=BEHAVIOUR, status='error', last_error=str(e)))
Example #3
0
    def test_apply_async3(self):
        """
        Test apply_async method with progress callback function
        """
        def put(*args, **kwds):
            time.sleep(2)

        cloudfs_types['file'].put = mock.MagicMock(side_effect=put)
        file_path, size, md5_sum = make_file()
        stream = open(file_path, 'rb')

        def progress_cb(progress):
            return

        upload = largetransfer.Upload([stream],
                                      tmp_dir,
                                      chunk_size=2,
                                      progress_cb=progress_cb)
        upload.apply_async()

        assert upload.running
        assert upload._callback_thread
        assert upload._callback_thread.is_alive()

        upload.join()
Example #4
0
    def test_terminate2(self):
        """
        Test terminate method then upload is blocked
        """
        def put(*args, **kwds):
            # call blocking method wait()
            threading.Event().wait()

        cloudfs_types['file'].put = mock.MagicMock(side_effect=put)
        file_path, size, md5_sum = make_file()
        stream = open(file_path, 'rb')

        def progress_cb(progress):
            return

        upload = largetransfer.Upload([stream],
                                      tmp_dir,
                                      chunk_size=2,
                                      progress_cb=progress_cb)
        upload.apply_async()

        # wait start
        time.sleep(5)

        assert upload.error is None, upload.error

        assert upload.running
        assert upload._callback_thread
        assert upload._callback_thread.is_alive()

        upload.terminate()

        assert upload.process is None
        assert not upload.running
        assert upload._callback_thread is None
Example #5
0
 def test_apply_async1(self, cloudfs_mock):
     file_path, size, md5_sum = make_file()
     stream = open(file_path, 'rb')
     upload = largetransfer.Upload([stream], tmp_dir, chunk_size=2)
     upload.apply_async()
     time.sleep(1)
     assert not upload.running
Example #6
0
    def test_progress(self):
        assert_flag = multiprocessing.Value('i', 0)
        progress_list = []

        def put(*args, **kwds):
            for i in range(100):
                kwds['report_to'](i, 0)
                time.sleep(0.01)
            for i in range(100):
                if i not in progress_list:
                    break
            else:
                assert_flag.value = 1

        cloudfs_types['file'].put = mock.MagicMock(side_effect=put)
        file_path, size, md5_sum = make_file()

        def _on_progress(bytes_completed, size):
            progress_list.append(bytes_completed)

        upload = largetransfer.Upload([file_path], tmp_dir, simple=True)
        upload._on_progress = _on_progress
        upload.apply_async()
        upload.join()

        assert assert_flag.value == 1
Example #7
0
 def test_stop1(self):
     """
     Test stop method when upload is not running
     """
     upload = largetransfer.Upload('src', 'dst')
     upload._kill = mock.MagicMock()
     upload.stop()
     assert upload._kill.call_count == 0
Example #8
0
def i_upload_file(context, simple):
    simple == 'True'
    sources = [source.src for source in context.sources]
    context.tr = largetransfer.Upload(sources, context.remote_dir, simple=simple)
    context.tr.apply_async()
    try:
        context.tr.join()
    except:
        context.error = sys.exc_info()
Example #9
0
    def do_backup(self):
        tmpdir = None
        dumps = []
        tmp_path = os.path.join(__postgresql__['storage_dir'], 'tmp')
        try:
            # Get databases list
            psql = PSQL(user=self.postgresql.root_user.name)
            databases = psql.list_pg_databases()
            if 'template0' in databases:
                databases.remove('template0')

            if not os.path.exists(tmp_path):
                os.makedirs(tmp_path)

            # Dump all databases
            LOG.info("Dumping all databases")
            tmpdir = tempfile.mkdtemp(dir=tmp_path)
            chown_r(tmpdir, self.postgresql.root_user.name)

            def _single_backup(db_name):
                dump_path = tmpdir + os.sep + db_name + '.sql'
                pg_args = '%s %s --no-privileges -f %s' % (PG_DUMP, db_name, dump_path)
                su_args = [SU_EXEC, '-', self.postgresql.root_user.name, '-c', pg_args]
                err = system2(su_args)[1]
                if err:
                    raise HandlerError('Error while dumping database %s: %s' % (db_name, err))  # ?
                dumps.append(dump_path)

            for db_name in databases:
                _single_backup(db_name)

            cloud_storage_path = __node__.platform.scalrfs.backups(BEHAVIOUR)

            suffix = 'master' if int(__postgresql__[OPT_REPLICATION_MASTER]) else 'slave'
            backup_tags = {'scalr-purpose': 'postgresql-%s' % suffix}

            LOG.info("Uploading backup to %s with tags %s" % (cloud_storage_path, backup_tags))

            def progress_cb(progress):
                LOG.debug('Uploading %s bytes' % progress)

            uploader = largetransfer.Upload(dumps, cloud_storage_path, progress_cb=progress_cb)
            try:
                uploader.apply_async()
                uploader.join()
                manifest = uploader.manifest

                LOG.info("Postgresql backup uploaded to cloud storage under %s", cloud_storage_path)
                LOG.debug(manifest.data)

                return transfer_result_to_backup_result(manifest)
            except:
                uploader.terminate()
                raise
        finally:
            if tmpdir:
                shutil.rmtree(tmpdir, ignore_errors=True)
Example #10
0
 def test_terminate1(self):
     """
     Test terminate method then upload is not running
     """
     upload = largetransfer.Upload('src', 'dst')
     upload._kill = mock.MagicMock()
     upload.terminate()
     msg = 'Wrong call count for upload._kill method, expected %s, gotten %s' %\
         (0, upload._kill.call_count)
     assert upload._kill.call_count == 0, msg
Example #11
0
 def test__init__default(self):
     upload = largetransfer.Upload(['src'], 'dst')
     assert upload.gzip is True
     assert upload.use_pigz is True
     assert upload._pool_size == largetransfer.DEFAULT_POOL_SIZE
     assert upload._chunk_size == largetransfer.DEFAULT_CHUNK_SIZE
     assert upload._simple is False
     assert upload._cb_interval == largetransfer.DEFAULT_CALLBACK_INTERVAL
     assert hasattr(upload.src, '__iter__')
     assert upload._manifest is not None
     assert upload._callback_thread is None
Example #12
0
 def test_error1(self, cloudfs_mock, queue_mock):
     upload = largetransfer.Upload(['src'], tmp_dir)
     upload._error_queue = mock.MagicMock()
     upload.apply_async()
     assert upload._error is None
     upload.error
     upload._error_queue.get.assert_called_once()
     upload.error
     upload.error
     msg = 'Wrong call count for upload._error_queue.get method, expected %s, gotten %s' %\
         (3, upload._error_queue.get.call_count)
     assert upload._error_queue.get.call_count == 3, msg
Example #13
0
def i_upload_stream_with_gzipping(context, gzip):
    gzip == 'True'
    src = [cloudfs.NamedStream(stream.src, os.path.basename(stream.src.name)) for stream in context.sources]
    for s in src:
        LOG.debug(s.name)
    time.sleep(1)
    if len(src) == 1:
        src = src[0]
    context.tr = largetransfer.Upload(src, context.remote_dir, gzip=gzip, chunk_size=5)
    context.tr.apply_async()
    try:
        context.tr.join()
    except:
        context.error = sys.exc_info()
    context.manifest = context.tr.manifest
    context.gzip = True
Example #14
0
    def _run(self):
        LOG.debug("Running MySQLDumpBackup")
        client = mysql_svc.MySQLClient(__mysql__['root_user'],
                                       __mysql__['root_password'])
        self._databases = client.list_databases()

        with self._run_lock:
            if self._killed:
                raise Error("Canceled")
            src_gen = self._gen_src()
            transfer_id = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
            self.transfer = largetransfer.Upload(src_gen,
                                                 self._dst,
                                                 chunk_size=self.chunk_size,
                                                 transfer_id=transfer_id)
        self.transfer.apply_async()
        self.transfer.join()
        result = self.transfer.manifest

        def log_stderr(popen):
            LOG.debug("mysqldump log_stderr communicate")
            out, err = popen.communicate()
            LOG.debug("mysqldump log_stderr communicate done")
            if err:
                LOG.debug("mysqldump (code %s) stderr for %s: %s",
                          popen.returncode, popen.db_name, err)
            return popen.db_name, popen.returncode, err

        mysqldump_results = map(log_stderr, self._popens)

        if self._killed:
            raise Error("Canceled")

        mysqldump_errors = []
        for db_name, retcode, err in mysqldump_results:
            if retcode:
                mysqldump_errors.append('%s: "%s"' % (db_name, err))
        if mysqldump_errors:
            raise Error("Mysqldump has returned a non-zero code.\n" +
                        '\n'.join(mysqldump_errors))

        parts = transfer_result_to_backup_result(result)
        return backup.restore(type='mysqldump',
                              cloudfs_source=result.cloudfs_path,
                              parts=parts,
                              description=self.description,
                              tags=self.tags)
Example #15
0
    def test_error2(self, cloudfs_mock, queue_mock):
        def side_effect(*args, **kwds):
            raise Queue.Empty()

        upload = largetransfer.Upload(['src'], tmp_dir)
        upload._error_queue = mock.MagicMock()
        upload._error_queue.get.side_effect = side_effect
        upload.apply_async()
        assert upload._error is None

        upload.error
        upload._error_queue.get.assert_called_once()
        upload.error
        upload.error
        msg = 'Wrong call count for upload._error_queue.get method, expected %s, gotten %s' %\
            (3, upload._error_queue.get.call_count)
        assert upload._error_queue.get.call_count == 3, msg
Example #16
0
    def test_join2(self):
        """
        Test join method then upload is started
        """
        def put(*args, **kwds):
            time.sleep(2)

        cloudfs_types['file'].put = mock.MagicMock(side_effect=put)
        file_path, size, md5_sum = make_file()
        stream = open(file_path, 'rb')

        upload = largetransfer.Upload([stream], tmp_dir, chunk_size=2)
        upload.apply_async()
        upload.join()

        assert upload.error is None
        assert upload.process is None
Example #17
0
    def test_final_progress(self):
        def put(*args, **kwds):
            time.sleep(5)

        file_path, size, md5_sum = make_file(size=10)
        stream = open(file_path, 'rb')
        cloudfs_types['file'].put = mock.MagicMock(side_effect=put)
        progress_cb = mock.MagicMock()
        upload = largetransfer.Upload([stream],
                                      tmp_dir,
                                      gzip=False,
                                      chunk_size=2,
                                      progress_cb=progress_cb)
        upload.apply_async()
        upload.join()

        assert upload.progress == 10 * 1024 * 1024, upload.progress
        assert progress_cb.call_count > 2
Example #18
0
    def upload_lvm_snapshot(self, lvm_snap, tags, path):
        """
        Method which uploads data from lvm snapshot to cloud storage and
        updates snapshot status.

        EphVolume runs this method in separate thread
        """


        try:
            self._snap_status = self.QUEUED
            mpoint = tempfile.mkdtemp()
            opts = []
            if coreutils.blkid(lvm_snap.device).get('type') == 'xfs':
                opts += ['-o', 'nouuid,ro']
            mount.mount(lvm_snap.device, mpoint, *opts)

            self.data_size = coreutils.statvfs(mpoint)['used']

            try:
                cmd = ['/bin/tar', 'cp', mpoint]
                stream = subprocess.Popen(cmd, stdout=subprocess.PIPE, close_fds=True).stdout
                src = cloudfs.NamedStream(stream, 'lvm_snapshot', streamer='tar', extension='tar')
                dst = path
                transfer = largetransfer.Upload(src, dst, tags=tags, transfer_id=self.id)
                self._snap_status = self.IN_PROGRESS
                transfer.apply_async()
                transfer.join()
                manifesto = transfer.manifest
                self.path = manifesto.cloudfs_path
                self._snap_status = self.COMPLETED

            finally:
                mount.umount(mpoint)
                os.rmdir(mpoint)

        except:
            self._snap_status = self.FAILED
            LOG.exception('Caught error while uploading LVM snapshot')
        finally:
            lvm_snap.destroy()
Example #19
0
    def test_join3(self):
        """
        Test join method raises exception
        """
        def put(*args, **kwds):
            raise Exception('Error message')

        cloudfs_types['file'].put = mock.MagicMock(side_effect=put)
        file_path, size, md5_sum = make_file()
        stream = open(file_path, 'rb')

        upload = largetransfer.Upload([stream], tmp_dir, chunk_size=2)
        upload.apply_async()
        try:
            upload.join()
        except largetransfer.TransferError as e:
            assert len(e.args) == 3
            assert e.args[0] == 'Exception'
            assert e.args[1] == 'Error message'
        else:
            assert False
Example #20
0
    def do_backup(self):
        self.redis_instances.save_all()
        dbs = [r.db_path for r in self.redis_instances if r.db_path]

        cloud_storage_path = __node__.platform.scalrfs.backups(BEHAVIOUR)
        LOG.info("Uploading backup to cloud storage (%s)", cloud_storage_path)

        def progress_cb(progress):
            LOG.debug('Uploading %s bytes' % progress)

        uploader = largetransfer.Upload(dbs,
                                        cloud_storage_path,
                                        progress_cb=progress_cb)
        try:
            uploader.apply_async()
            uploader.join()
            manifest = uploader.manifest

            return transfer_result_to_backup_result(manifest)
        except:
            uploader.terminate()
            raise
Example #21
0
def i_start_upload(context, simple):
    simple == 'True'
    sources = [source.src for source in context.sources]
    context.tr = largetransfer.Upload(sources, context.remote_dir, simple=simple)
    context.tr.apply_async()
Example #22
0
    def rebundle(self):
        rebundle_dir = tempfile.mkdtemp()

        try:
            pl = bus.platform
            proj_id = pl.get_numeric_project_id()
            proj_name = pl.get_project_id()
            cloudstorage = pl.get_storage_conn()

            # Determine the root filesystem size
            devices = coreutils.df()
            root_disk = firstmatched(lambda x: x.mpoint == '/', devices)
            if not root_disk:
                raise HandlerError("Can't find root device")
            # in bytes adjusted to 512 block device size
            fssize = (root_disk.size * 1000 / 512) * 512

            # Old code. Should be reworked
            if os.path.exists('/dev/root'):
                root_part_path = os.path.realpath('/dev/root')
            else:
                rootfs_stat = os.stat('/')
                root_device_minor = os.minor(rootfs_stat.st_dev)
                root_device_major = os.major(rootfs_stat.st_dev)
                root_part_path = os.path.realpath('/dev/block/{0}:{1}'.format(
                    root_device_major, root_device_minor))

            root_part_sysblock_path = glob.glob(
                '/sys/block/*/%s' % os.path.basename(root_part_path))[0]
            root_device = '/dev/%s' % os.path.basename(
                os.path.dirname(root_part_sysblock_path))

            arch_name = '%s.tar.gz' % self._role_name.lower()
            arch_path = os.path.join(rebundle_dir, arch_name)

            # update gcimagebundle
            try:
                pkgmgr.latest(self.gcimagebundle_pkg_name)
            except:
                e = sys.exc_info()[1]
                LOG.warn('Gcimagebundle update failed: %s' % e)

            if os_dist.redhat_family:
                semanage = software.which('semanage')
                if not semanage:
                    pkgmgr.installed('policycoreutils-python')
                    semanage = software.which('semanage')

                util.system2((semanage, 'permissive', '-a', 'rsync_t'))

            gc_img_bundle_bin = software.which('gcimagebundle')

            o, e, p = util.system2(
                (gc_img_bundle_bin, '-d', root_device, '-e', ','.join(
                    self.exclude_dirs), '--fssize', str(fssize), '-o',
                 rebundle_dir, '--output_file_name', arch_name),
                raise_exc=False)
            if p:
                raise HandlerError(
                    'Gcimagebundle util returned non-zero code %s. Stderr: %s'
                    % (p, e))

            try:
                LOG.info('Uploading compressed image to cloud storage')
                tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint(
                    1, 1000000), int(time.time()))
                remote_dir = 'gcs://%s' % tmp_bucket_name

                def progress_cb(progress):
                    LOG.debug('Uploading {perc}%'.format(
                        perc=progress / os.path.getsize(arch_path)))

                uploader = largetransfer.Upload(arch_path,
                                                remote_dir,
                                                simple=True,
                                                progress_cb=progress_cb)
                uploader.apply_async()
                try:
                    try:
                        uploader.join()
                    except:
                        if uploader.error:
                            error = uploader.error[1]
                        else:
                            error = sys.exc_info()[1]
                        msg = 'Image upload failed. Error:\n{error}'
                        msg = msg.format(error=error)
                        raise HandlerError(msg)
                except:
                    with util.capture_exception(LOG):
                        objs = cloudstorage.objects()
                        objs.delete(bucket=tmp_bucket_name,
                                    object=arch_name).execute()
                    cloudstorage.buckets().delete(
                        bucket=tmp_bucket_name).execute()
            finally:
                os.unlink(arch_path)

        finally:
            shutil.rmtree(rebundle_dir)

        goog_image_name = self._role_name.lower().replace(
            '_', '-') + '-' + str(int(time.time()))
        try:
            LOG.info('Registering new image %s' % goog_image_name)
            compute = pl.get_compute_conn()

            image_url = 'http://storage.googleapis.com/%s/%s' % (
                tmp_bucket_name, arch_name)

            req_body = dict(name=goog_image_name,
                            sourceType='RAW',
                            rawDisk=dict(source=image_url))

            req = compute.images().insert(project=proj_id, body=req_body)
            operation = req.execute()['name']

            LOG.info('Waiting for image to register')

            def image_is_ready():
                req = compute.globalOperations().get(project=proj_id,
                                                     operation=operation)
                res = req.execute()
                if res['status'] == 'DONE':
                    if res.get('error'):
                        errors = []
                        for e in res['error']['errors']:
                            err_text = '%s: %s' % (e['code'], e['message'])
                            errors.append(err_text)
                        raise Exception('\n'.join(errors))
                    return True
                return False

            util.wait_until(image_is_ready, logger=LOG, timeout=600)

        finally:
            try:
                objs = cloudstorage.objects()
                objs.delete(bucket=tmp_bucket_name, object=arch_name).execute()
                cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute()
            except:
                e = sys.exc_info()[1]
                LOG.error('Failed to remove image compressed source: %s' % e)

        return '%s/images/%s' % (proj_name, goog_image_name)
Example #23
0
 def test__init__list_src(self):
     upload = largetransfer.Upload(['src'], 'dst')
     assert hasattr(upload.src, '__iter__')
     assert upload._manifest is not None
     assert upload._simple is False
     assert upload._callback_thread is None
Example #24
0
 def test__init__simple(self):
     upload = largetransfer.Upload(['src'], 'dst', simple=True)
     assert hasattr(upload.src, '__iter__')
     assert upload._manifest is None
     assert upload._simple is True
     assert upload._callback_thread is None
Example #25
0
    def _run(self):
        self._check_backup_type()

        kwds = {
            'stream': 'xbstream',
            # Compression is broken
            #'compress': True,
            #'compress_threads': os.sysconf('SC_NPROCESSORS_ONLN'),
            'ibbackup': 'xtrabackup',
            'user': __mysql__['root_user'],
            'password': __mysql__['root_password']
        }
        if self.no_lock:
            kwds['no_lock'] = True
        if not int(__mysql__['replication_master']):
            kwds['safe_slave_backup'] = True
            kwds['slave_info'] = True

        current_lsn = None
        if self.backup_type == 'auto':
            client = self._client()
            innodb_stat = client.fetchone('SHOW ENGINE INNODB STATUS')[0]
            #innodb_stat = client.fetchone('SHOW INNODB STATUS')[0]
            for line in innodb_stat.splitlines():
                m = self._re_lsn_innodb_stat.search(line)
                if m:
                    current_lsn = int(m.group(1))
                    break

        if self.backup_type in ('incremental', 'auto'):
            if self.prev_cloudfs_source:
                # Download manifest and get it's to_lsn
                mnf = cloudfs.Manifest(cloudfs_path=self.prev_cloudfs_source)
                self.from_lsn = mnf.meta['to_lsn']
            else:
                self._check_attr('from_lsn')
            if self.backup_type == 'incremental' or \
                (self.backup_type == 'auto' and current_lsn and current_lsn >= self.from_lsn):
                kwds.update({
                    'incremental': True,
                    'incremental_lsn': self.from_lsn
                })
        LOG.debug('self._config: %s', self._config)
        LOG.debug('kwds: %s', kwds)

        if self.backup_type == 'incremental':
            LOG.info('Creating incremental xtrabackup (from LSN: %s)',
                     self.from_lsn)
        else:
            LOG.info('Creating full xtrabackup')

        with self._xbak_init_lock:
            if self._killed:
                raise Error("Canceled")
            self._xbak = innobackupex.args(__mysql__['tmp_dir'],
                                           **kwds).popen()
            gzip = self.compressor == 'gzip'
            transfer_id = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
            self._transfer = largetransfer.Upload(self._xbak.stdout,
                                                  self.cloudfs_target,
                                                  gzip=gzip,
                                                  transfer_id=transfer_id)

        stderr_thread, stderr = cloudfs.readfp_thread(self._xbak.stderr)

        self._transfer.apply_async()
        self._transfer.join()
        manifesto = self._transfer.manifest

        if self._killed:
            raise Error("Canceled")
        stderr_thread.join()
        self._xbak.wait()
        stderr = stderr[0] if stderr else ''
        if self._xbak.returncode:
            raise Error(stderr)

        with self._xbak_init_lock:
            self._xbak = None
            self._transfer = None

        log_file = log_pos = to_lsn = None
        re_binlog = self._re_binlog \
                    if int(__mysql__['replication_master']) else \
                    self._re_slave_binlog
        for line in stderr.splitlines():
            m = self._re_lsn.search(line) or self._re_lsn_51.search(line)
            if m:
                to_lsn = m.group(1)
                continue
            m = re_binlog.search(line)
            if m:
                log_file = m.group(1)
                log_pos = int(m.group(2))
                continue
            if log_file and log_pos and to_lsn:
                break

        rst = backup.restore(type='xtrabackup',
                             backup_type=self.backup_type,
                             from_lsn=self.from_lsn,
                             to_lsn=to_lsn,
                             cloudfs_source=manifesto.cloudfs_path,
                             prev_cloudfs_source=self.prev_cloudfs_source,
                             log_file=log_file,
                             log_pos=log_pos)

        # Update manifest
        LOG.debug('rst: %s', dict(rst))
        manifesto.meta = dict(rst)
        manifesto.save()

        LOG.info(
            'Created %s xtrabackup. (LSN: %s..%s, log_file: %s, log_pos: %s)',
            rst.backup_type, rst.from_lsn, rst.to_lsn, rst.log_file,
            rst.log_pos)

        return rst
Example #26
0
 def test_not_running(self):
     upload = largetransfer.Upload(['src'], 'dst')
     assert not upload.running
Example #27
0
    def snapshot(self, op, name):
        rebundle_dir = tempfile.mkdtemp()
        archive_path = ''
        try:
            pl = __node__['platform']
            proj_id = pl.get_numeric_project_id()
            proj_name = pl.get_project_id()
            cloudstorage = pl.get_storage_conn()

            root_part_path = None
            for d in coreutils.df():
                if d.mpoint == '/':
                    root_part_path = d.device
                    break
            else:
                raise ImageAPIError('Failed to find root volume')

            root_part_sysblock_path = glob.glob(
                '/sys/block/*/%s' % os.path.basename(root_part_path))[0]
            root_device = '/dev/%s' % os.path.basename(
                os.path.dirname(root_part_sysblock_path))

            archive_name = '%s.tar.gz' % name.lower()
            archive_path = os.path.join(rebundle_dir, archive_name)

            self._prepare_software()

            gcimagebundle_bin = software.which('gcimagebundle')

            out, err, code = util.system2(
                (gcimagebundle_bin, '-d', root_device, '-e', ','.join(
                    self.exclude_dirs), '-o', rebundle_dir,
                 '--output_file_name', archive_name),
                raise_exc=False)
            if code:
                raise ImageAPIError(
                    'Gcimagebundle util returned non-zero code %s. Stderr: %s'
                    % (code, err))

            LOG.info('Uploading compressed image to cloud storage')
            tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint(
                1, 1000000), int(time.time()))
            remote_dir = 'gcs://%s' % tmp_bucket_name

            def progress_cb(progress):
                LOG.debug('Uploading {perc}%'.format(
                    perc=progress / os.path.getsize(archive_path)))

            uploader = largetransfer.Upload(archive_path,
                                            remote_dir,
                                            simple=True,
                                            progress_cb=progress_cb)
            uploader.apply_async()
            try:
                uploader.join()
            except:
                if uploader.error:
                    error = uploader.error[1]
                else:
                    error = sys.exc_info()[1]
                msg = 'Image upload failed. Error:\n{error}'
                msg = msg.format(error=error)
                self._remove_bucket(tmp_bucket_name, archive_name,
                                    cloudstorage)
                raise ImageAPIError(msg)
        finally:
            shutil.rmtree(rebundle_dir)
            if os.path.exists(archive_path):
                os.remove(archive_path)

        image_name = name.lower().replace('_', '-') + '-' + str(
            int(time.time()))
        self._register_image(image_name, tmp_bucket_name, archive_name,
                             cloudstorage)

        return '%s/images/%s' % (proj_name, image_name)
Example #28
0
        def do_backup(op):
            tmpdir = None
            dumps = []
            tmp_path = os.path.join(__postgresql__['storage_dir'], 'tmp')
            try:
                # Get databases list
                psql = PSQL(user=self.postgresql.root_user.name)
                databases = psql.list_pg_databases()
                if 'template0' in databases:
                    databases.remove('template0')

                if not os.path.exists(tmp_path):
                    os.makedirs(tmp_path)

                # Dump all databases
                LOG.info("Dumping all databases")
                tmpdir = tempfile.mkdtemp(dir=tmp_path)
                chown_r(tmpdir, self.postgresql.root_user.name)

                def _single_backup(db_name):
                    dump_path = tmpdir + os.sep + db_name + '.sql'
                    pg_args = '%s %s --no-privileges -f %s' % (
                        PG_DUMP, db_name, dump_path)
                    su_args = [
                        SU_EXEC, '-', self.postgresql.root_user.name, '-c',
                        pg_args
                    ]
                    err = system2(su_args)[1]
                    if err:
                        raise HandlerError(
                            'Error while dumping database %s: %s' %
                            (db_name, err))  #?
                    dumps.append(dump_path)

                for db_name in databases:
                    _single_backup(db_name)

                cloud_storage_path = __node__.platform.scalrfs.backups(
                    BEHAVIOUR)

                suffix = 'master' if int(
                    __postgresql__[OPT_REPLICATION_MASTER]) else 'slave'
                backup_tags = {'scalr-purpose': 'postgresql-%s' % suffix}

                LOG.info("Uploading backup to %s with tags %s" %
                         (cloud_storage_path, backup_tags))

                def progress_cb(progress):
                    LOG.debug('Uploading %s bytes' % progress)

                uploader = largetransfer.Upload(dumps,
                                                cloud_storage_path,
                                                progress_cb=progress_cb)
                uploader.apply_async()
                uploader.join()
                manifest = uploader.manifest

                LOG.info(
                    "Postgresql backup uploaded to cloud storage under %s",
                    cloud_storage_path)
                LOG.debug(manifest.data)

                # Notify Scalr
                result = transfer_result_to_backup_result(manifest)
                __node__.messaging.send(
                    DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                    dict(db_type=BEHAVIOUR, status='ok', backup_parts=result))

                return result

            except (Exception, BaseException), e:
                LOG.exception(e)

                # Notify Scalr about error
                __node__.messaging.send(
                    DbMsrMessages.DBMSR_CREATE_BACKUP_RESULT,
                    dict(db_type=BEHAVIOUR, status='error', last_error=str(e)))
Example #29
0
 def test_join1(self):
     """
     Test join method then upload is not started
     """
     upload = largetransfer.Upload('src', 'dst')
     upload.join()
Example #30
0
 def test_initial_progress(self):
     upload = largetransfer.Upload('src', 'dst')
     assert upload.progress == 0