コード例 #1
0
ファイル: ec2.py プロジェクト: worldline/scalarizr
 def _assure_space(self):
     """
     Assures that there is enough free space on destination device for image
     """
     avail_space = coreutils.statvfs(self.destination)['avail'] / 1024 / 1024
     if avail_space <= self.image_size:
         os.mkdir('/mnt/temp-vol')
         LOG.debug('Making temp volume')
         self.temp_vol = self.make_volume({'size': self.image_size, 
             'tags': {'scalr-status': 'temporary'}},
             '/mnt/temp-vol',
             mount_vol=True)
         self.destination = '/mnt/temp-vol'
コード例 #2
0
ファイル: eph.py プロジェクト: AnyBucket/scalarizr
    def upload_lvm_snapshot(self, lvm_snap, tags, path):
        """
        Method which uploads data from lvm snapshot to cloud storage and
        updates snapshot status.

        EphVolume runs this method in separate thread
        """


        try:
            self._snap_status = self.QUEUED
            mpoint = tempfile.mkdtemp()
            opts = []
            if coreutils.blkid(lvm_snap.device).get('type') == 'xfs':
                opts += ['-o', 'nouuid,ro']
            mount.mount(lvm_snap.device, mpoint, *opts)

            self.data_size = coreutils.statvfs(mpoint)['used']

            try:
                transfer = cloudfs.LargeTransfer(
					src=mpoint + '/',
                        dst=path,
                        tar_it=True,
                        gzip_it=True,
                        tags=tags,
                        transfer_id=self.id)
                self._snap_status = self.IN_PROGRESS
                manifesto = transfer.run()
                self.path = manifesto.cloudfs_path
                self._snap_status = self.COMPLETED

            finally:
                mount.umount(mpoint)
                os.rmdir(mpoint)

        except:
            self._snap_status = self.FAILED
            LOG.exception('Caught error while uploading LVM snapshot')
        finally:
            lvm_snap.destroy()
コード例 #3
0
ファイル: eph.py プロジェクト: kenorb-contrib/scalarizr
    def upload_lvm_snapshot(self, lvm_snap, tags, path):
        """
        Method which uploads data from lvm snapshot to cloud storage and
        updates snapshot status.

        EphVolume runs this method in separate thread
        """


        try:
            self._snap_status = self.QUEUED
            mpoint = tempfile.mkdtemp()
            opts = []
            if coreutils.blkid(lvm_snap.device).get('type') == 'xfs':
                opts += ['-o', 'nouuid,ro']
            mount.mount(lvm_snap.device, mpoint, *opts)

            self.data_size = coreutils.statvfs(mpoint)['used']

            try:
                cmd = ['/bin/tar', 'cp', mpoint]
                stream = subprocess.Popen(cmd, stdout=subprocess.PIPE, close_fds=True).stdout
                src = cloudfs.NamedStream(stream, 'lvm_snapshot', streamer='tar', extension='tar')
                dst = path
                transfer = largetransfer.Upload(src, dst, tags=tags, transfer_id=self.id)
                self._snap_status = self.IN_PROGRESS
                transfer.apply_async()
                transfer.join()
                manifesto = transfer.manifest
                self.path = manifesto.cloudfs_path
                self._snap_status = self.COMPLETED

            finally:
                mount.umount(mpoint)
                os.rmdir(mpoint)

        except:
            self._snap_status = self.FAILED
            LOG.exception('Caught error while uploading LVM snapshot')
        finally:
            lvm_snap.destroy()
コード例 #4
0
    def upload_lvm_snapshot(self, lvm_snap, tags, path):
        """
        Method which uploads data from lvm snapshot to cloud storage and
        updates snapshot status.

        EphVolume runs this method in separate thread
        """

        try:
            self._snap_status = self.QUEUED
            mpoint = tempfile.mkdtemp()
            opts = []
            if coreutils.blkid(lvm_snap.device).get('type') == 'xfs':
                opts += ['-o', 'nouuid,ro']
            mount.mount(lvm_snap.device, mpoint, *opts)

            self.data_size = coreutils.statvfs(mpoint)['used']

            try:
                transfer = cloudfs.LargeTransfer(src=mpoint + '/',
                                                 dst=path,
                                                 tar_it=True,
                                                 gzip_it=True,
                                                 tags=tags,
                                                 transfer_id=self.id)
                self._snap_status = self.IN_PROGRESS
                manifesto = transfer.run()
                self.path = manifesto.cloudfs_path
                self._snap_status = self.COMPLETED

            finally:
                mount.umount(mpoint)
                os.rmdir(mpoint)

        except:
            self._snap_status = self.FAILED
            LOG.exception('Caught error while uploading LVM snapshot')
        finally:
            lvm_snap.destroy()
コード例 #5
0
ファイル: eph.py プロジェクト: AnyBucket/scalarizr
    def _ensure(self):
        # snap should be applied after layout: download and extract data.
        # this could be done on already ensured volume.
        # Example: resync slave data

        if not self._lvm_volume:
            # First of all, merge self config and snapshot config
            self.snap = storage2.snapshot(self.snap) if self.snap else None

            for attr in ('fstype', 'size', 'vg', 'mpoint'):
                if not getattr(self, attr, None):
                    if not self.snap or not getattr(self.snap, attr, None):
                        raise storage2.StorageError('Missing ephemeral volume attribute "%s"' % attr)
                    setattr(self, attr, getattr(self.snap, attr))
            if not (self.disk or self.disks):
                raise storage2.StorageError('Missing "disk" or "disks" attribute')

            if self.disk:
                self.disk = storage2.volume(self.disk)
                # Compatibility with storage v1
                if self.disk.device and self.disk.type == 'base':
                    if self.disk.device.startswith('/dev/sd'):
                        self.disk = storage2.volume(type='ec2_ephemeral', name='ephemeral0')
                    elif 'google' in self.disk.device:
                        self.disk = storage2.volume(type='gce_ephemeral', name='ephemeral-disk-0')

            self._lvm_volume = storage2.volume(
                            type='lvm',
                            pvs=[self.disk] if self.disk else self.disks,
                            size=self.size + 'VG',
                            vg=self.vg,
                            name='data')

        self._lvm_volume.ensure()
        self.device = self._lvm_volume.device
        # To allow ensure(mkfs=True, mount=True) after volume passed
        # scalarizr 1st initialization
        self.fscreated = self.is_fs_created()

        if self.snap:
            self.snap = storage2.snapshot(self.snap)
            # umount device to allow filesystem re-creation
            if self.mounted_to():
                self.umount()
            self.mkfs(force=True)

            tmp_mpoint = not self.mpoint
            if tmp_mpoint:
                tmp_mpoint = tempfile.mkdtemp()
                self.mpoint = tmp_mpoint

            try:
                transfer = cloudfs.LargeTransfer(self.snap.path, self.mpoint + '/')
                self.mount()
                if hasattr(self.snap, 'data_size'):
                    fs_free = coreutils.statvfs(self.mpoint)['avail']
                    if fs_free < int(self.snap.data_size):
                        raise storage2.StorageError('Not enough free space'
                                        ' on device %s to restore snapshot.' %
                                        self.device)

                result = transfer.run()
                if result.get('failed'):
                    err = result['failed'][0]['exc_info'][1]
                    raise storage2.StorageError('Failed to download snapshot'
                                                                            'data. %s' % err)
            except:
                e = sys.exc_info()[1]
                raise storage2.StorageError("Snapshot restore error: %s" % e)
            finally:
                try:
                    self.umount()
                finally:
                    if tmp_mpoint:
                        self.mpoint = None
                        os.rmdir(tmp_mpoint)

            self.snap = None
コード例 #6
0
ファイル: rebundle.py プロジェクト: notbrain/scalarizr
    def rebundle(self):
        rebundle_dir = tempfile.mkdtemp()

        try:
            pl = bus.platform
            proj_id = pl.get_numeric_project_id()
            proj_name = pl.get_project_id()
            cloudstorage = pl.new_storage_client()

            tmp_mount_dir = os.path.join(rebundle_dir, 'root')
            os.makedirs(tmp_mount_dir)

            image_name      = 'disk.raw'
            image_path      = os.path.join(rebundle_dir, image_name)

            root_size = coreutils.statvfs('/')['size']
            LOG.debug('Creating image file %s' % image_path)
            with open(image_path, 'w') as f:
                f.truncate(root_size + 1*1024)

            try:

                LOG.debug('Creating partition table on image')
                system(('parted', image_path, 'mklabel', 'msdos'))
                system(('parted', image_path, 'mkpart', 'primary', 'ext2', 1, str(root_size/(1024*1024))))

                # Map disk image
                out = system(('kpartx', '-av', image_path))[0]
                try:
                    loop = re.search('(/dev/loop\d+)', out).group(1)
                    root_dev_name = '/dev/mapper/%sp1' % loop.split('/')[-1]

                    LOG.info('Creating filesystem')
                    storage2.filesystem('ext4').mkfs(root_dev_name)
                    dev_uuid = uuid.uuid4()
                    system(('tune2fs', '-U', str(dev_uuid), root_dev_name))

                    mount.mount(root_dev_name, tmp_mount_dir)
                    try:
                        lines = system(('/bin/mount', '-l'))[0].splitlines()
                        exclude_dirs = set()
                        for line in lines:
                            mpoint = line.split()[2]
                            if mpoint != '/':
                                exclude_dirs.add(mpoint)

                        exclude_dirs.update(self.exclude_dirs)

                        excludes = [os.path.join(ex, '**') for ex in exclude_dirs]
                        excludes.extend(self.exclude_files)
                        excludes.extend(self._excludes)

                        LOG.info('Copying root filesystem to image')
                        rsync('/', tmp_mount_dir, archive=True,
                                                                          hard_links=True,
                                                                          times=True,
                                                                          sparse=True,
                                                                          exclude=excludes)

                        LOG.info('Cleanup image')
                        self._create_spec_devices(tmp_mount_dir)

                        LOG.debug('Removing roles-builder user')
                        sh = pexpect.spawn('/bin/sh')
                        try:
                            sh.sendline('chroot %s' % tmp_mount_dir)
                            sh.expect('#')
                            sh.sendline('userdel -rf %s' % ROLEBUILDER_USER)
                            sh.expect('#')
                        finally:
                            sh.close()

                        """ Patch fstab"""
                        fstab_path = os.path.join(tmp_mount_dir, 'etc/fstab')
                        if os.path.exists(fstab_path):
                            with open(fstab_path) as f:
                                fstab = f.read()

                            new_fstab = re.sub('UUID=\S+\s+/\s+(.*)', 'UUID=%s / \\1' % dev_uuid, fstab)

                            with open(fstab_path, 'w') as f:
                                f.write(new_fstab)

                    finally:
                        mount.umount(root_dev_name)
                finally:
                    system(('kpartx', '-d', image_path))

                LOG.info('Compressing image.')
                arch_name = '%s.tar.gz' % self._role_name.lower()
                arch_path = os.path.join(rebundle_dir, arch_name)

                tar = Tar()
                tar.create().gzip().sparse()
                tar.archive(arch_path)
                tar.add(image_name, rebundle_dir)
                system(str(tar), shell=True)

            finally:
                os.unlink(image_path)

            try:
                LOG.info('Uploading compressed image to cloud storage')
                uploader = transfer.Transfer(logger=LOG)
                tmp_bucket_name = 'scalr-images-%s-%s' % (
                                                        random.randint(1,1000000), int(time.time()))

                try:
                    remote_path = 'gcs://%s/' % tmp_bucket_name
                    uploader.upload((arch_path,), remote_path)
                except:
                    try:
                        objs = cloudstorage.objects()
                        objs.delete(bucket=tmp_bucket_name, object=arch_name).execute()
                    except:
                        pass

                    cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute()
                    raise

            finally:
                os.unlink(arch_path)

        finally:
            shutil.rmtree(rebundle_dir)

        try:
            goog_image_name = self._role_name.lower().replace('_', '-')
            LOG.info('Registering new image %s' % goog_image_name)
            # TODO: check duplicate names
            compute = pl.new_compute_client()

            current_image_fq = pl.get_image().split('/')
            current_img_project = current_image_fq[1]
            current_img_name = current_image_fq[3]
            current_img_obj = compute.images().get(project=current_img_project,
                                                            image=current_img_name).execute()
            kernel = current_img_obj['preferredKernel']

            image_url = 'http://storage.googleapis.com/%s/%s' % (
                                                                            tmp_bucket_name, arch_name)

            req_body = dict(
                    name=goog_image_name,
                    sourceType='RAW',
                    preferredKernel=kernel,
                    rawDisk=dict(
                            containerType='TAR',
                            source=image_url
                    )
            )

            req = compute.images().insert(project=proj_id, body=req_body)
            operation = req.execute()['name']

            LOG.info('Waiting for image to register')
            def image_is_ready():
                req = compute.globalOperations().get(project=proj_id, operation=operation)
                res = req.execute()
                if res['status'] == 'DONE':
                    if res.get('error'):
                        errors = []
                        for e in res['error']['errors']:
                            err_text = '%s: %s' % (e['code'], e['message'])
                            errors.append(err_text)
                        raise Exception('\n'.join(errors))
                    return True
                return False
            wait_until(image_is_ready, logger=LOG, timeout=600)

        finally:
            objs = cloudstorage.objects()
            objs.delete(bucket=tmp_bucket_name, object=arch_name).execute()
            cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute()

        return '%s/images/%s' % (proj_name, goog_image_name)
コード例 #7
0
    def _ensure(self):
        # snap should be applied after layout: download and extract data.
        # this could be done on already ensured volume.
        # Example: resync slave data

        if not self._lvm_volume:
            # First of all, merge self config and snapshot config
            self.snap = storage2.snapshot(self.snap) if self.snap else None

            for attr in ('disk', 'fstype', 'size', 'vg', 'mpoint'):
                if not getattr(self, attr, None):
                    if not self.snap or not getattr(self.snap, attr, None):
                        raise storage2.StorageError(
                            'Missing ephemeral volume attribute "%s"' % attr)
                    setattr(self, attr, getattr(self.snap, attr))

            self.disk = storage2.volume(self.disk)
            # Compatibility with storage v1
            if self.disk.device and self.disk.type == 'base':
                if self.disk.device.startswith('/dev/sd'):
                    self.disk = storage2.volume(type='ec2_ephemeral',
                                                name='ephemeral0')
                elif 'google' in self.disk.device:
                    self.disk = storage2.volume(type='gce_ephemeral',
                                                name='ephemeral-disk-0')

            self._lvm_volume = storage2.volume(type='lvm',
                                               pvs=[self.disk],
                                               size=self.size + 'VG',
                                               vg=self.vg,
                                               name='data')

        self._lvm_volume.ensure()
        self.device = self._lvm_volume.device
        # To allow ensure(mkfs=True, mount=True) after volume passed
        # scalarizr 1st initialization
        self.fscreated = self.is_fs_created()

        if self.snap:
            self.snap = storage2.snapshot(self.snap)
            # umount device to allow filesystem re-creation
            if self.mounted_to():
                self.umount()
            self.mkfs(force=True)

            tmp_mpoint = not self.mpoint
            if tmp_mpoint:
                tmp_mpoint = tempfile.mkdtemp()
                self.mpoint = tmp_mpoint

            try:
                transfer = cloudfs.LargeTransfer(self.snap.path,
                                                 self.mpoint + '/')
                self.mount()
                if hasattr(self.snap, 'data_size'):
                    fs_free = coreutils.statvfs(self.mpoint)['avail']
                    if fs_free < int(self.snap.data_size):
                        raise storage2.StorageError(
                            'Not enough free space'
                            ' on device %s to restore snapshot.' % self.device)

                result = transfer.run()
                if result.get('failed'):
                    err = result['failed'][0]['exc_info'][1]
                    raise storage2.StorageError('Failed to download snapshot'
                                                'data. %s' % err)
            except:
                e = sys.exc_info()[1]
                raise storage2.StorageError("Snapshot restore error: %s" % e)
            finally:
                try:
                    self.umount()
                finally:
                    if tmp_mpoint:
                        self.mpoint = None
                        os.rmdir(tmp_mpoint)

            self.snap = None
コード例 #8
0
    def rebundle(self):
        rebundle_dir = tempfile.mkdtemp()

        try:
            pl = bus.platform
            proj_id = pl.get_numeric_project_id()
            proj_name = pl.get_project_id()
            cloudstorage = pl.new_storage_client()

            tmp_mount_dir = os.path.join(rebundle_dir, 'root')
            os.makedirs(tmp_mount_dir)

            image_name = 'disk.raw'
            image_path = os.path.join(rebundle_dir, image_name)

            root_size = coreutils.statvfs('/')['size']
            LOG.debug('Creating image file %s' % image_path)
            with open(image_path, 'w') as f:
                f.truncate(root_size + 1 * 1024)

            try:

                LOG.debug('Creating partition table on image')
                system(('parted', image_path, 'mklabel', 'msdos'))
                system(('parted', image_path, 'mkpart', 'primary', 'ext2', 1,
                        str(root_size / (1024 * 1024))))

                # Map disk image
                out = system(('kpartx', '-av', image_path))[0]
                try:
                    loop = re.search('(/dev/loop\d+)', out).group(1)
                    root_dev_name = '/dev/mapper/%sp1' % loop.split('/')[-1]

                    LOG.info('Creating filesystem')
                    storage2.filesystem('ext4').mkfs(root_dev_name)
                    dev_uuid = uuid.uuid4()
                    system(('tune2fs', '-U', str(dev_uuid), root_dev_name))

                    mount.mount(root_dev_name, tmp_mount_dir)
                    try:
                        lines = system(('/bin/mount', '-l'))[0].splitlines()
                        exclude_dirs = set()
                        for line in lines:
                            mpoint = line.split()[2]
                            if mpoint != '/':
                                exclude_dirs.add(mpoint)

                        exclude_dirs.update(self.exclude_dirs)

                        excludes = [
                            os.path.join(ex, '**') for ex in exclude_dirs
                        ]
                        excludes.extend(self.exclude_files)
                        excludes.extend(self._excludes)

                        LOG.info('Copying root filesystem to image')
                        rsync('/',
                              tmp_mount_dir,
                              archive=True,
                              hard_links=True,
                              times=True,
                              sparse=True,
                              exclude=excludes)

                        LOG.info('Cleanup image')
                        self._create_spec_devices(tmp_mount_dir)

                        LOG.debug('Removing roles-builder user')
                        sh = pexpect.spawn('/bin/sh')
                        try:
                            sh.sendline('chroot %s' % tmp_mount_dir)
                            sh.expect('#')
                            sh.sendline('userdel -rf %s' % ROLEBUILDER_USER)
                            sh.expect('#')
                        finally:
                            sh.close()
                        """ Patch fstab"""
                        fstab_path = os.path.join(tmp_mount_dir, 'etc/fstab')
                        if os.path.exists(fstab_path):
                            with open(fstab_path) as f:
                                fstab = f.read()

                            new_fstab = re.sub('UUID=\S+\s+/\s+(.*)',
                                               'UUID=%s / \\1' % dev_uuid,
                                               fstab)

                            with open(fstab_path, 'w') as f:
                                f.write(new_fstab)

                    finally:
                        mount.umount(root_dev_name)
                finally:
                    system(('kpartx', '-d', image_path))

                LOG.info('Compressing image.')
                arch_name = '%s.tar.gz' % self._role_name.lower()
                arch_path = os.path.join(rebundle_dir, arch_name)

                tar = Tar()
                tar.create().gzip().sparse()
                tar.archive(arch_path)
                tar.add(image_name, rebundle_dir)
                system(str(tar), shell=True)

            finally:
                os.unlink(image_path)

            try:
                LOG.info('Uploading compressed image to cloud storage')
                uploader = transfer.Transfer(logger=LOG)
                tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint(
                    1, 1000000), int(time.time()))

                try:
                    remote_path = 'gcs://%s/' % tmp_bucket_name
                    uploader.upload((arch_path, ), remote_path)
                except:
                    try:
                        objs = cloudstorage.objects()
                        objs.delete(bucket=tmp_bucket_name,
                                    object=arch_name).execute()
                    except:
                        pass

                    cloudstorage.buckets().delete(
                        bucket=tmp_bucket_name).execute()
                    raise

            finally:
                os.unlink(arch_path)

        finally:
            shutil.rmtree(rebundle_dir)

        try:
            goog_image_name = self._role_name.lower().replace('_', '-')
            LOG.info('Registering new image %s' % goog_image_name)
            # TODO: check duplicate names
            compute = pl.new_compute_client()

            current_image_fq = pl.get_image().split('/')
            current_img_project = current_image_fq[1]
            current_img_name = current_image_fq[3]
            current_img_obj = compute.images().get(
                project=current_img_project, image=current_img_name).execute()
            kernel = current_img_obj['preferredKernel']

            image_url = 'http://storage.googleapis.com/%s/%s' % (
                tmp_bucket_name, arch_name)

            req_body = dict(name=goog_image_name,
                            sourceType='RAW',
                            preferredKernel=kernel,
                            rawDisk=dict(containerType='TAR',
                                         source=image_url))

            req = compute.images().insert(project=proj_id, body=req_body)
            operation = req.execute()['name']

            LOG.info('Waiting for image to register')

            def image_is_ready():
                req = compute.globalOperations().get(project=proj_id,
                                                     operation=operation)
                res = req.execute()
                if res['status'] == 'DONE':
                    if res.get('error'):
                        errors = []
                        for e in res['error']['errors']:
                            err_text = '%s: %s' % (e['code'], e['message'])
                            errors.append(err_text)
                        raise Exception('\n'.join(errors))
                    return True
                return False

            wait_until(image_is_ready, logger=LOG, timeout=600)

        finally:
            objs = cloudstorage.objects()
            objs.delete(bucket=tmp_bucket_name, object=arch_name).execute()
            cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute()

        return '%s/images/%s' % (proj_name, goog_image_name)