Exemple #1
0
    def restore(self, queue, volume, download_finished):
        tmp_mpoint = mkdtemp()
        volume.mount(tmp_mpoint)
        try:
            try:
                cmd1 = (which('pigz'), '-d')
            except LookupError:
                cmd1 = ('gzip', '-d')
            cmd2 = ('tar', 'px', '-C', tmp_mpoint)

            compressor = subprocess.Popen(cmd1,
                                          stdin=subprocess.PIPE,
                                          stdout=subprocess.PIPE,
                                          stderr=subprocess.PIPE,
                                          close_fds=True)
            tar = subprocess.Popen(cmd2,
                                   stdin=compressor.stdout,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE,
                                   close_fds=True)
            self.concat_chunks(queue, download_finished, compressor.stdin)

            compressor.stdin.close()
            r_code = compressor.wait()
            if r_code:
                raise Exception('Archiver finished with return code %s' %
                                r_code)

            r_code = tar.wait()
            if r_code:
                raise Exception('Tar finished with return code %s' % r_code)
        finally:
            mount.umount(tmp_mpoint)
Exemple #2
0
    def umount(self):
        try:
            self._check(fstype=False, device=True)
        except:
            return

        mod_mount.umount(self.device)
Exemple #3
0
    def umount(self):
        try:
            self._check(fstype=False, device=True)
        except:
            return

        mod_mount.umount(self.device)
Exemple #4
0
    def _create(self, volume, snapshot, snap_lv, tranzit_path,  complete_cb):
        try:
            chunk_prefix = '%s.data' % snapshot.id
            snapshot.path = None
            snap_mpoint = mkdtemp()
            try:
                opts = []
                if volume.fstype == 'xfs':
                    opts += ['-o', 'nouuid,ro']
                mount.mount(snap_lv, snap_mpoint, *opts)
                tar_cmd = ['tar', 'cp', '-C', snap_mpoint, '.']

                pigz_bins = whereis('pigz')
                compress_cmd = [pigz_bins[0] if pigz_bins else 'gzip', '-5']

                self._logger.debug("Creating and compressing snapshot data.")
                tar = subprocess.Popen(tar_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
                compress = subprocess.Popen(compress_cmd, stdin=tar.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
                tar.stdout.close() # Allow tar to receive a SIGPIPE if compress exits.
                split = threading.Thread(target=self._split, name='split',
                                  args=(compress.stdout, tranzit_path, chunk_prefix, snapshot))
                split.start()

                uploaders = []
                for i in range(2):
                    uploader = threading.Thread(name="Uploader-%s" % i, target=self._uploader,
                                                                      args=(volume.snap_backend['path'], snapshot))
                    self._logger.debug("Starting uploader '%s'", uploader.getName())

                    uploader.start()
                    uploaders.append(uploader)
                self._logger.debug('uploaders started. waiting compress')

                compress.wait()
                self._logger.debug('compress completed (code: %s). waiting split', compress.returncode)
                if compress.returncode:
                    raise StorageError('Compress process terminated with exit code %s. <err>: %s' % (compress.returncode, compress.stderr.read()))

                split.join()
                self._logger.debug('split completed. waiting uploaders')

                for uploader in uploaders:
                    uploader.join()
                self._logger.debug('uploaders completed')

                if self._inner_exc_info:
                    t, e, s = self._inner_exc_info
                    raise t, e, s

            finally:
                self._return_ev.set()
                mount.umount(snap_mpoint)
                os.rmdir(snap_mpoint)
                self._lvm.remove_lv(snap_lv)
                self._inner_exc_info = None
            self._state_map[snapshot.id] = Snapshot.COMPLETED
        except (Exception, BaseException), e:
            self._state_map[snapshot.id] = Snapshot.FAILED
            self._logger.exception('Snapshot creation failed. %s' % e)
Exemple #5
0
    def handle_request(self, req_message, resp_message):
        self.umounted = False
        self.device_name = cassandra.ini.get(CNF_SECTION, OPT_STORAGE_DEVICE_NAME)

        cassandra.stop_service()
        system2('sync', shell=True)

        mount.umount(self.device_name)
        self.umounted = True

        volume_id = cassandra.ini.get(CNF_SECTION, OPT_STORAGE_VOLUME_ID)
        resp_message.body.update(dict(
                status          = 'ok',
                snapshot_id = cassandra.create_snapshot(volume_id),
                timestamp   = time.strftime('%Y-%m-%d %H-%M')
        ))
Exemple #6
0
    def upload_lvm_snapshot(self, lvm_snap, tags, path):
        """
        Method which uploads data from lvm snapshot to cloud storage and
        updates snapshot status.

        EphVolume runs this method in separate thread
        """


        try:
            self._snap_status = self.QUEUED
            mpoint = tempfile.mkdtemp()
            opts = []
            if coreutils.blkid(lvm_snap.device).get('type') == 'xfs':
                opts += ['-o', 'nouuid,ro']
            mount.mount(lvm_snap.device, mpoint, *opts)

            self.data_size = coreutils.statvfs(mpoint)['used']

            try:
                transfer = cloudfs.LargeTransfer(
					src=mpoint + '/',
                        dst=path,
                        tar_it=True,
                        gzip_it=True,
                        tags=tags,
                        transfer_id=self.id)
                self._snap_status = self.IN_PROGRESS
                manifesto = transfer.run()
                self.path = manifesto.cloudfs_path
                self._snap_status = self.COMPLETED

            finally:
                mount.umount(mpoint)
                os.rmdir(mpoint)

        except:
            self._snap_status = self.FAILED
            LOG.exception('Caught error while uploading LVM snapshot')
        finally:
            lvm_snap.destroy()
Exemple #7
0
    def upload_lvm_snapshot(self, lvm_snap, tags, path):
        """
        Method which uploads data from lvm snapshot to cloud storage and
        updates snapshot status.

        EphVolume runs this method in separate thread
        """


        try:
            self._snap_status = self.QUEUED
            mpoint = tempfile.mkdtemp()
            opts = []
            if coreutils.blkid(lvm_snap.device).get('type') == 'xfs':
                opts += ['-o', 'nouuid,ro']
            mount.mount(lvm_snap.device, mpoint, *opts)

            self.data_size = coreutils.statvfs(mpoint)['used']

            try:
                cmd = ['/bin/tar', 'cp', mpoint]
                stream = subprocess.Popen(cmd, stdout=subprocess.PIPE, close_fds=True).stdout
                src = cloudfs.NamedStream(stream, 'lvm_snapshot', streamer='tar', extension='tar')
                dst = path
                transfer = largetransfer.Upload(src, dst, tags=tags, transfer_id=self.id)
                self._snap_status = self.IN_PROGRESS
                transfer.apply_async()
                transfer.join()
                manifesto = transfer.manifest
                self.path = manifesto.cloudfs_path
                self._snap_status = self.COMPLETED

            finally:
                mount.umount(mpoint)
                os.rmdir(mpoint)

        except:
            self._snap_status = self.FAILED
            LOG.exception('Caught error while uploading LVM snapshot')
        finally:
            lvm_snap.destroy()
Exemple #8
0
    def restore(self, queue, volume, download_finished):
        tmp_mpoint = mkdtemp()
        volume.mount(tmp_mpoint)
        try:
            pigz_bins = whereis('pigz')
            cmd1 = ('pigz' if pigz_bins else 'gzip', '-d')
            cmd2 = ('tar', 'px', '-C', tmp_mpoint)

            compressor = subprocess.Popen(cmd1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
            tar      = subprocess.Popen(cmd2, stdin=compressor.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
            self.concat_chunks(queue, download_finished, compressor.stdin)

            compressor.stdin.close()
            r_code = compressor.wait()
            if r_code:
                raise Exception('Archiver finished with return code %s' % r_code)

            r_code = tar.wait()
            if r_code:
                raise Exception('Tar finished with return code %s' % r_code)
        finally:
            mount.umount(tmp_mpoint)
Exemple #9
0
    def upload_lvm_snapshot(self, lvm_snap, tags, path):
        """
        Method which uploads data from lvm snapshot to cloud storage and
        updates snapshot status.

        EphVolume runs this method in separate thread
        """

        try:
            self._snap_status = self.QUEUED
            mpoint = tempfile.mkdtemp()
            opts = []
            if coreutils.blkid(lvm_snap.device).get('type') == 'xfs':
                opts += ['-o', 'nouuid,ro']
            mount.mount(lvm_snap.device, mpoint, *opts)

            self.data_size = coreutils.statvfs(mpoint)['used']

            try:
                transfer = cloudfs.LargeTransfer(src=mpoint + '/',
                                                 dst=path,
                                                 tar_it=True,
                                                 gzip_it=True,
                                                 tags=tags,
                                                 transfer_id=self.id)
                self._snap_status = self.IN_PROGRESS
                manifesto = transfer.run()
                self.path = manifesto.cloudfs_path
                self._snap_status = self.COMPLETED

            finally:
                mount.umount(mpoint)
                os.rmdir(mpoint)

        except:
            self._snap_status = self.FAILED
            LOG.exception('Caught error while uploading LVM snapshot')
        finally:
            lvm_snap.destroy()
Exemple #10
0
def test_umount_not_mounted():
    m = mock.Mock(side_effect=linux.LinuxError('', '', 'umount: /mnt: not mounted\n', 1, ()))
    with mock.patch('scalarizr.linux.system', m):
        mount.umount('/mnt')
        assert m.called
Exemple #11
0
    def rebundle(self):
        rebundle_dir = tempfile.mkdtemp()

        try:
            pl = bus.platform
            proj_id = pl.get_numeric_project_id()
            proj_name = pl.get_project_id()
            cloudstorage = pl.new_storage_client()

            tmp_mount_dir = os.path.join(rebundle_dir, 'root')
            os.makedirs(tmp_mount_dir)

            image_name = 'disk.raw'
            image_path = os.path.join(rebundle_dir, image_name)

            root_size = coreutils.statvfs('/')['size']
            LOG.debug('Creating image file %s' % image_path)
            with open(image_path, 'w') as f:
                f.truncate(root_size + 1 * 1024)

            try:

                LOG.debug('Creating partition table on image')
                system(('parted', image_path, 'mklabel', 'msdos'))
                system(('parted', image_path, 'mkpart', 'primary', 'ext2', 1,
                        str(root_size / (1024 * 1024))))

                # Map disk image
                out = system(('kpartx', '-av', image_path))[0]
                try:
                    loop = re.search('(/dev/loop\d+)', out).group(1)
                    root_dev_name = '/dev/mapper/%sp1' % loop.split('/')[-1]

                    LOG.info('Creating filesystem')
                    storage2.filesystem('ext4').mkfs(root_dev_name)
                    dev_uuid = uuid.uuid4()
                    system(('tune2fs', '-U', str(dev_uuid), root_dev_name))

                    mount.mount(root_dev_name, tmp_mount_dir)
                    try:
                        lines = system(('/bin/mount', '-l'))[0].splitlines()
                        exclude_dirs = set()
                        for line in lines:
                            mpoint = line.split()[2]
                            if mpoint != '/':
                                exclude_dirs.add(mpoint)

                        exclude_dirs.update(self.exclude_dirs)

                        excludes = [
                            os.path.join(ex, '**') for ex in exclude_dirs
                        ]
                        excludes.extend(self.exclude_files)
                        excludes.extend(self._excludes)

                        LOG.info('Copying root filesystem to image')
                        rsync('/',
                              tmp_mount_dir,
                              archive=True,
                              hard_links=True,
                              times=True,
                              sparse=True,
                              exclude=excludes)

                        LOG.info('Cleanup image')
                        self._create_spec_devices(tmp_mount_dir)

                        LOG.debug('Removing roles-builder user')
                        sh = pexpect.spawn('/bin/sh')
                        try:
                            sh.sendline('chroot %s' % tmp_mount_dir)
                            sh.expect('#')
                            sh.sendline('userdel -rf %s' % ROLEBUILDER_USER)
                            sh.expect('#')
                        finally:
                            sh.close()
                        """ Patch fstab"""
                        fstab_path = os.path.join(tmp_mount_dir, 'etc/fstab')
                        if os.path.exists(fstab_path):
                            with open(fstab_path) as f:
                                fstab = f.read()

                            new_fstab = re.sub('UUID=\S+\s+/\s+(.*)',
                                               'UUID=%s / \\1' % dev_uuid,
                                               fstab)

                            with open(fstab_path, 'w') as f:
                                f.write(new_fstab)

                    finally:
                        mount.umount(root_dev_name)
                finally:
                    system(('kpartx', '-d', image_path))

                LOG.info('Compressing image.')
                arch_name = '%s.tar.gz' % self._role_name.lower()
                arch_path = os.path.join(rebundle_dir, arch_name)

                tar = Tar()
                tar.create().gzip().sparse()
                tar.archive(arch_path)
                tar.add(image_name, rebundle_dir)
                system(str(tar), shell=True)

            finally:
                os.unlink(image_path)

            try:
                LOG.info('Uploading compressed image to cloud storage')
                uploader = transfer.Transfer(logger=LOG)
                tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint(
                    1, 1000000), int(time.time()))

                try:
                    remote_path = 'gcs://%s/' % tmp_bucket_name
                    uploader.upload((arch_path, ), remote_path)
                except:
                    try:
                        objs = cloudstorage.objects()
                        objs.delete(bucket=tmp_bucket_name,
                                    object=arch_name).execute()
                    except:
                        pass

                    cloudstorage.buckets().delete(
                        bucket=tmp_bucket_name).execute()
                    raise

            finally:
                os.unlink(arch_path)

        finally:
            shutil.rmtree(rebundle_dir)

        try:
            goog_image_name = self._role_name.lower().replace('_', '-')
            LOG.info('Registering new image %s' % goog_image_name)
            # TODO: check duplicate names
            compute = pl.new_compute_client()

            current_image_fq = pl.get_image().split('/')
            current_img_project = current_image_fq[1]
            current_img_name = current_image_fq[3]
            current_img_obj = compute.images().get(
                project=current_img_project, image=current_img_name).execute()
            kernel = current_img_obj['preferredKernel']

            image_url = 'http://storage.googleapis.com/%s/%s' % (
                tmp_bucket_name, arch_name)

            req_body = dict(name=goog_image_name,
                            sourceType='RAW',
                            preferredKernel=kernel,
                            rawDisk=dict(containerType='TAR',
                                         source=image_url))

            req = compute.images().insert(project=proj_id, body=req_body)
            operation = req.execute()['name']

            LOG.info('Waiting for image to register')

            def image_is_ready():
                req = compute.globalOperations().get(project=proj_id,
                                                     operation=operation)
                res = req.execute()
                if res['status'] == 'DONE':
                    if res.get('error'):
                        errors = []
                        for e in res['error']['errors']:
                            err_text = '%s: %s' % (e['code'], e['message'])
                            errors.append(err_text)
                        raise Exception('\n'.join(errors))
                    return True
                return False

            wait_until(image_is_ready, logger=LOG, timeout=600)

        finally:
            objs = cloudstorage.objects()
            objs.delete(bucket=tmp_bucket_name, object=arch_name).execute()
            cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute()

        return '%s/images/%s' % (proj_name, goog_image_name)
Exemple #12
0
    def rebundle(self):
        rebundle_dir = tempfile.mkdtemp()

        try:
            pl = bus.platform
            proj_id = pl.get_numeric_project_id()
            proj_name = pl.get_project_id()
            cloudstorage = pl.new_storage_client()

            tmp_mount_dir = os.path.join(rebundle_dir, 'root')
            os.makedirs(tmp_mount_dir)

            image_name      = 'disk.raw'
            image_path      = os.path.join(rebundle_dir, image_name)

            root_size = coreutils.statvfs('/')['size']
            LOG.debug('Creating image file %s' % image_path)
            with open(image_path, 'w') as f:
                f.truncate(root_size + 1*1024)

            try:

                LOG.debug('Creating partition table on image')
                system(('parted', image_path, 'mklabel', 'msdos'))
                system(('parted', image_path, 'mkpart', 'primary', 'ext2', 1, str(root_size/(1024*1024))))

                # Map disk image
                out = system(('kpartx', '-av', image_path))[0]
                try:
                    loop = re.search('(/dev/loop\d+)', out).group(1)
                    root_dev_name = '/dev/mapper/%sp1' % loop.split('/')[-1]

                    LOG.info('Creating filesystem')
                    storage2.filesystem('ext4').mkfs(root_dev_name)
                    dev_uuid = uuid.uuid4()
                    system(('tune2fs', '-U', str(dev_uuid), root_dev_name))

                    mount.mount(root_dev_name, tmp_mount_dir)
                    try:
                        lines = system(('/bin/mount', '-l'))[0].splitlines()
                        exclude_dirs = set()
                        for line in lines:
                            mpoint = line.split()[2]
                            if mpoint != '/':
                                exclude_dirs.add(mpoint)

                        exclude_dirs.update(self.exclude_dirs)

                        excludes = [os.path.join(ex, '**') for ex in exclude_dirs]
                        excludes.extend(self.exclude_files)
                        excludes.extend(self._excludes)

                        LOG.info('Copying root filesystem to image')
                        rsync('/', tmp_mount_dir, archive=True,
                                                                          hard_links=True,
                                                                          times=True,
                                                                          sparse=True,
                                                                          exclude=excludes)

                        LOG.info('Cleanup image')
                        self._create_spec_devices(tmp_mount_dir)

                        LOG.debug('Removing roles-builder user')
                        sh = pexpect.spawn('/bin/sh')
                        try:
                            sh.sendline('chroot %s' % tmp_mount_dir)
                            sh.expect('#')
                            sh.sendline('userdel -rf %s' % ROLEBUILDER_USER)
                            sh.expect('#')
                        finally:
                            sh.close()

                        """ Patch fstab"""
                        fstab_path = os.path.join(tmp_mount_dir, 'etc/fstab')
                        if os.path.exists(fstab_path):
                            with open(fstab_path) as f:
                                fstab = f.read()

                            new_fstab = re.sub('UUID=\S+\s+/\s+(.*)', 'UUID=%s / \\1' % dev_uuid, fstab)

                            with open(fstab_path, 'w') as f:
                                f.write(new_fstab)

                    finally:
                        mount.umount(root_dev_name)
                finally:
                    system(('kpartx', '-d', image_path))

                LOG.info('Compressing image.')
                arch_name = '%s.tar.gz' % self._role_name.lower()
                arch_path = os.path.join(rebundle_dir, arch_name)

                tar = Tar()
                tar.create().gzip().sparse()
                tar.archive(arch_path)
                tar.add(image_name, rebundle_dir)
                system(str(tar), shell=True)

            finally:
                os.unlink(image_path)

            try:
                LOG.info('Uploading compressed image to cloud storage')
                uploader = transfer.Transfer(logger=LOG)
                tmp_bucket_name = 'scalr-images-%s-%s' % (
                                                        random.randint(1,1000000), int(time.time()))

                try:
                    remote_path = 'gcs://%s/' % tmp_bucket_name
                    uploader.upload((arch_path,), remote_path)
                except:
                    try:
                        objs = cloudstorage.objects()
                        objs.delete(bucket=tmp_bucket_name, object=arch_name).execute()
                    except:
                        pass

                    cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute()
                    raise

            finally:
                os.unlink(arch_path)

        finally:
            shutil.rmtree(rebundle_dir)

        try:
            goog_image_name = self._role_name.lower().replace('_', '-')
            LOG.info('Registering new image %s' % goog_image_name)
            # TODO: check duplicate names
            compute = pl.new_compute_client()

            current_image_fq = pl.get_image().split('/')
            current_img_project = current_image_fq[1]
            current_img_name = current_image_fq[3]
            current_img_obj = compute.images().get(project=current_img_project,
                                                            image=current_img_name).execute()
            kernel = current_img_obj['preferredKernel']

            image_url = 'http://storage.googleapis.com/%s/%s' % (
                                                                            tmp_bucket_name, arch_name)

            req_body = dict(
                    name=goog_image_name,
                    sourceType='RAW',
                    preferredKernel=kernel,
                    rawDisk=dict(
                            containerType='TAR',
                            source=image_url
                    )
            )

            req = compute.images().insert(project=proj_id, body=req_body)
            operation = req.execute()['name']

            LOG.info('Waiting for image to register')
            def image_is_ready():
                req = compute.globalOperations().get(project=proj_id, operation=operation)
                res = req.execute()
                if res['status'] == 'DONE':
                    if res.get('error'):
                        errors = []
                        for e in res['error']['errors']:
                            err_text = '%s: %s' % (e['code'], e['message'])
                            errors.append(err_text)
                        raise Exception('\n'.join(errors))
                    return True
                return False
            wait_until(image_is_ready, logger=LOG, timeout=600)

        finally:
            objs = cloudstorage.objects()
            objs.delete(bucket=tmp_bucket_name, object=arch_name).execute()
            cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute()

        return '%s/images/%s' % (proj_name, goog_image_name)
Exemple #13
0
 def _tearDown(self):
     mount.umount("/dev/sdb1", clean_fstab=True)
Exemple #14
0
def test_umount_raise_error():
    m = mock.Mock(side_effect=linux.LinuxError(
        '', '', 'umount: /: device is busy.\n', 1, ()))
    with mock.patch('scalarizr.linux.system', m):
        mount.umount('/')
Exemple #15
0
def test_umount_not_mounted():
    m = mock.Mock(side_effect=linux.LinuxError(
        '', '', 'umount: /mnt: not mounted\n', 1, ()))
    with mock.patch('scalarizr.linux.system', m):
        mount.umount('/mnt')
        assert m.called
Exemple #16
0
def test_umount():
    with mock.patch('scalarizr.linux.system') as m:
        mount.umount('/mnt/mpoint')
        assert m.called
Exemple #17
0
	def umount(self):
		self._check()
		mod_mount.umount(self.device)
Exemple #18
0
def test_umount():
    with mock.patch('scalarizr.linux.system') as m:
        mount.umount('/mnt/mpoint')
        assert m.called
Exemple #19
0
 def _tearDown(self):
     mount.umount("/dev/sdb1", clean_fstab=True)
Exemple #20
0
 def _umount_detach_delete_volume(self, devname, volume):
     mount.umount(devname)
     ebstool.detach_volume(None, volume, logger=self._logger)
     volume.delete()
Exemple #21
0
    def _create(self, volume, snapshot, snap_lv, tranzit_path, complete_cb):
        try:
            chunk_prefix = '%s.data' % snapshot.id
            snapshot.path = None
            snap_mpoint = mkdtemp()
            try:
                opts = []
                if volume.fstype == 'xfs':
                    opts += ['-o', 'nouuid,ro']
                mount.mount(snap_lv, snap_mpoint, *opts)
                tar_cmd = ['tar', 'cp', '-C', snap_mpoint, '.']

                if which('pigz'):
                    compress_cmd = [which('pigz'), '-5']
                else:
                    compress_cmd = ['gzip', '-5']

                self._logger.debug("Creating and compressing snapshot data.")
                tar = subprocess.Popen(tar_cmd,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE,
                                       close_fds=True)
                compress = subprocess.Popen(compress_cmd,
                                            stdin=tar.stdout,
                                            stdout=subprocess.PIPE,
                                            stderr=subprocess.PIPE,
                                            close_fds=True)
                tar.stdout.close(
                )  # Allow tar to receive a SIGPIPE if compress exits.
                split = threading.Thread(target=self._split,
                                         name='split',
                                         args=(compress.stdout, tranzit_path,
                                               chunk_prefix, snapshot))
                split.start()

                uploaders = []
                for i in range(2):
                    uploader = threading.Thread(
                        name="Uploader-%s" % i,
                        target=self._uploader,
                        args=(volume.snap_backend['path'], snapshot))
                    self._logger.debug("Starting uploader '%s'",
                                       uploader.getName())

                    uploader.start()
                    uploaders.append(uploader)
                self._logger.debug('uploaders started. waiting compress')

                compress.wait()
                self._logger.debug(
                    'compress completed (code: %s). waiting split',
                    compress.returncode)
                if compress.returncode:
                    raise StorageError(
                        'Compress process terminated with exit code %s. <err>: %s'
                        % (compress.returncode, compress.stderr.read()))

                split.join()
                self._logger.debug('split completed. waiting uploaders')

                for uploader in uploaders:
                    uploader.join()
                self._logger.debug('uploaders completed')

                if self._inner_exc_info:
                    t, e, s = self._inner_exc_info
                    raise t, e, s

            finally:
                self._return_ev.set()
                mount.umount(snap_mpoint)
                os.rmdir(snap_mpoint)
                self._lvm.remove_lv(snap_lv)
                self._inner_exc_info = None
            self._state_map[snapshot.id] = Snapshot.COMPLETED
        except (Exception, BaseException), e:
            self._state_map[snapshot.id] = Snapshot.FAILED
            self._logger.exception('Snapshot creation failed. %s' % e)
Exemple #22
0
def test_umount_raise_error():
    m = mock.Mock(side_effect=linux.LinuxError('', '', 'umount: /: device is busy.\n', 1, ()))
    with mock.patch('scalarizr.linux.system', m):
        mount.umount('/')
Exemple #23
0
 def _destroy(self, force, **kwds):
     mount.umount(self.mpoint)