Exemple #1
0
    def move_mysqldir_to(self, storage_path):
        LOG.info('Moving mysql dir to %s' % storage_path)
        for directive, dirname in (
                        ('mysqld/log_bin', os.path.join(storage_path,STORAGE_BINLOG)),
                        ('mysqld/datadir', os.path.join(storage_path,STORAGE_DATA_DIR) + '/')
                        ):

            dest = os.path.dirname(dirname)
            if os.path.isdir(dest):
                LOG.info('No need to move %s to %s: already in place.' % (directive, dest))
            else:
                os.makedirs(dest)

                raw_value = self.my_cnf.get(directive)
                LOG.debug('directive %s:%s' % (directive, raw_value))
                if raw_value:
                    src_dir = os.path.dirname(raw_value + "/") + "/"
                    LOG.debug('source path: %s' % src_dir)
                    if os.path.isdir(src_dir) and src_dir != dest:
                        selinuxenabled = software.which('selinuxenabled')
                        if selinuxenabled:
                            if not system2((selinuxenabled, ), raise_exc=False)[2]:
                                if not system2((software.which('getsebool'), 'mysqld_disable_trans'), raise_exc=False)[2]:
                                    LOG.debug('Make SELinux rule for rsync')
                                    system2((software.which('setsebool'), '-P', 'mysqld_disable_trans', '1'))

                        LOG.info('Copying mysql directory \'%s\' to \'%s\'', src_dir, dest)
                        rsync(src_dir, dest, archive=True, exclude=['ib_logfile*', '*.sock'])

            self.my_cnf.set(directive, dirname)
            chown_r(dest, "mysql", "mysql")
            # Adding rules to apparmor config
            if disttool.is_debian_based():
                _add_apparmor_rules(dest)
Exemple #2
0
    def move_mysqldir_to(self, storage_path):
        LOG.info('Moving mysql dir to %s' % storage_path)
        for directive, dirname in (
            ('mysqld/log_bin', os.path.join(storage_path, STORAGE_BINLOG)),
            ('mysqld/datadir',
             os.path.join(storage_path, STORAGE_DATA_DIR) + '/')):

            dest = os.path.dirname(dirname)
            if os.path.isdir(dest):
                LOG.info('No need to move %s to %s: already in place.' %
                         (directive, dest))
            else:
                os.makedirs(dest)

                raw_value = self.my_cnf.get(directive)
                LOG.debug('directive %s:%s' % (directive, raw_value))
                if raw_value and node.__node__['platform'] != 'openstack':
                    src_dir = os.path.dirname(raw_value + "/") + "/"
                    LOG.debug('source path: %s' % src_dir)
                    if os.path.isdir(src_dir) and src_dir != dest:
                        selinuxenabled = software.which('selinuxenabled')
                        if selinuxenabled:
                            if not system2(
                                (selinuxenabled, ), raise_exc=False)[2]:
                                if not system2((software.which('getsebool'),
                                                'mysqld_disable_trans'),
                                               raise_exc=False)[2]:
                                    LOG.debug('Make SELinux rule for rsync')
                                    system2((software.which('setsebool'), '-P',
                                             'mysqld_disable_trans', '1'))
                                else:
                                    semanage = get_semanage()
                                    system2((semanage, 'fcontext', '-a', '-t',
                                             'bin_t', '/usr/bin/rsync'))
                                    system2((software.which('restorecon'),
                                             '-v', '/usr/bin/rsync'))

                        LOG.info('Copying mysql directory \'%s\' to \'%s\'',
                                 src_dir, dest)
                        rsync(src_dir,
                              dest,
                              archive=True,
                              exclude=['ib_logfile*', '*.sock'])

            self.my_cnf.set(directive, dirname)
            chown_r(dest, "mysql", "mysql")
            # Adding rules to apparmor config
            if disttool.is_debian_based():
                _add_apparmor_rules(dest)
Exemple #3
0
    def on_host_init_response(self, msg):
        '''
        Store volume configuration from HIR message
        '''
        self.LOG.debug('Called on_host_init_response')
        ini = msg.body.get(_bhs.cloud_controller, {})
        self.volume_config = ini.pop(
            'volume_config',
            dict(type='loop', file='/mnt/cfdata.loop', size=500))
        '''
        Plug storage, initialize database
        Why here? cause before_host_up routines could be executed after MysqlHandler
        and it will lead to fail
        '''
        with bus.initialization_op as op:
            with op.phase(self._phase_cloudfoundry):
                with op.step(self._step_create_storage):
                    # Initialize storage
                    LOG.info('Initializing vcap data storage')
                    tmp_mpoint = '/mnt/tmp.vcap'
                    try:
                        self.volume = self._plug_storage(mpoint=tmp_mpoint)
                        if not _cf.valid_datadir(tmp_mpoint):
                            LOG.info('Copying data from %s to storage',
                                     _datadir)
                            rsync(_datadir + '/',
                                  tmp_mpoint,
                                  archive=True,
                                  delete=True)

                        LOG.debug('Mounting storage to %s', _datadir)
                        self.volume.umount()
                        self.volume.mount(_datadir)
                    except:
                        LOG.exception('Failed to initialize storage')
                    finally:
                        if os.path.exists(tmp_mpoint):
                            os.removedirs(tmp_mpoint)
                    self.volume_config = self.volume.config()

                with op.step(self._step_locate_nginx):
                    _cf.components[
                        'cloud_controller'].allow_external_app_uris = True
                    self._locate_nginx()

                with op.step(self._step_create_database):
                    _cf.init_db()
Exemple #4
0
    def on_host_init_response(self, msg):
        '''
        Store volume configuration from HIR message
        '''
        log = bus.init_op.logger if bus.init_op else self.LOG
        self.LOG.debug('Called on_host_init_response')
        ini = msg.body.get(_bhs.cloud_controller, {})
        self.volume_config = ini.pop('volume_config', dict(
            type='loop', 
            file='/mnt/cfdata.loop',
            size=500
        ))

        '''
        Plug storage, initialize database
        Why here? cause before_host_up routines could be executed after MysqlHandler
        and it will lead to fail
        '''

        log.info('Creating VCAP data storage')
        # Initialize storage
        tmp_mpoint = '/mnt/tmp.vcap'
        try:
            self.volume = self._plug_storage(mpoint=tmp_mpoint)
            if not _cf.valid_datadir(tmp_mpoint):
                LOG.info('Copying data from %s to storage', _datadir)
                rsync(_datadir + '/', tmp_mpoint, archive=True, delete=True)

            LOG.debug('Mounting storage to %s', _datadir)
            self.volume.umount()
            self.volume.mount(_datadir)
        except:
            LOG.exception('Failed to initialize storage')
        finally:
            if os.path.exists(tmp_mpoint):
                os.removedirs(tmp_mpoint)
        self.volume_config = self.volume.config()

        log.info('Locating Nginx frontend')
        _cf.components['cloud_controller'].allow_external_app_uris = True
        self._locate_nginx()

        log.info('Creating CloudController database')
        _cf.init_db()
Exemple #5
0
    def _copy_rec(self, source, dest, xattr=True):
        LOG.info("Copying %s into the image %s", source, dest)
        rsync_longs = dict(archive=True, sparse=True, times=True)
        if self.excludes:
            rsync_longs['exclude'] = list(self.excludes)
        #rsync = filetool.Rsync()
        #rsync.archive().times().sparse().links().quietly()
        #rsync.archive().sparse().xattributes()
        #rsync.archive().sparse().times()

        if xattr:
            rsync_longs['xattrs'] = True
        try:
            rsync.rsync(source, dest, **rsync_longs)
        except linux.LinuxError, e:
            if e.returncode == 24:
                LOG.warn(
                    "rsync exited with error code 24. This means a partial transfer due to vanished "
                    + "source files. In most cases files are copied normally")
            elif e.returncode == 23:
                LOG.warn(
                    "rsync seemed successful but exited with error code 23. This probably means "
                    +
                    "that your version of rsync was built against a kernel with HAVE_LUTIMES defined, "
                    +
                    "although the current kernel was not built with this option enabled. The bundling "
                    +
                    "process will thus ignore the error and continue bundling.  If bundling completes "
                    +
                    "successfully, your image should be perfectly usable. We, however, recommend that "
                    +
                    "you install a version of rsync that handles this situation more elegantly."
                )
            elif e.returncode == 1 and xattr:
                LOG.warn(
                    "rsync with preservation of extended file attributes failed. Retrying rsync "
                    +
                    "without attempting to preserve extended file attributes..."
                )
                self._copy_rec(source, dest, xattr=False)
            else:
                raise
Exemple #6
0
    def on_host_init_response(self, msg):
        """
        Store volume configuration from HIR message
        """
        self.LOG.debug("Called on_host_init_response")
        ini = msg.body.get(_bhs.cloud_controller, {})
        self.volume_config = ini.pop("volume_config", dict(type="loop", file="/mnt/cfdata.loop", size=500))

        """
        Plug storage, initialize database
        Why here? cause before_host_up routines could be executed after MysqlHandler
        and it will lead to fail
        """
        with bus.initialization_op as op:
            with op.phase(self._phase_cloudfoundry):
                with op.step(self._step_create_storage):
                    # Initialize storage
                    LOG.info("Initializing vcap data storage")
                    tmp_mpoint = "/mnt/tmp.vcap"
                    try:
                        self.volume = self._plug_storage(mpoint=tmp_mpoint)
                        if not _cf.valid_datadir(tmp_mpoint):
                            LOG.info("Copying data from %s to storage", _datadir)
                            rsync(_datadir + "/", tmp_mpoint, archive=True, delete=True)

                        LOG.debug("Mounting storage to %s", _datadir)
                        self.volume.umount()
                        self.volume.mount(_datadir)
                    except:
                        LOG.exception("Failed to initialize storage")
                    finally:
                        if os.path.exists(tmp_mpoint):
                            os.removedirs(tmp_mpoint)
                    self.volume_config = self.volume.config()

                with op.step(self._step_locate_nginx):
                    _cf.components["cloud_controller"].allow_external_app_uris = True
                    self._locate_nginx()

                with op.step(self._step_create_database):
                    _cf.init_db()
Exemple #7
0
    def _copy_rec(self, source, dest, xattr=True):
        LOG.info("Copying %s into the image %s", source, dest)
        rsync_longs = dict(archive=True,
                                           sparse=True,
                                           times=True)
        if self.excludes:
            rsync_longs['exclude'] = list(self.excludes)
        #rsync = filetool.Rsync()
        #rsync.archive().times().sparse().links().quietly()
        #rsync.archive().sparse().xattributes()
        #rsync.archive().sparse().times()

        if xattr:
            rsync_longs['xattrs'] = True
        try:
            rsync.rsync(source, dest, **rsync_longs)
        except linux.LinuxError, e:
            if e.returncode == 24:
                LOG.warn(
                        "rsync exited with error code 24. This means a partial transfer due to vanished " +
                        "source files. In most cases files are copied normally"
                )
            elif e.returncode == 23:
                LOG.warn(
                        "rsync seemed successful but exited with error code 23. This probably means " +
                "that your version of rsync was built against a kernel with HAVE_LUTIMES defined, " +
        "although the current kernel was not built with this option enabled. The bundling " +
                        "process will thus ignore the error and continue bundling.  If bundling completes " +
                "successfully, your image should be perfectly usable. We, however, recommend that " +
                        "you install a version of rsync that handles this situation more elegantly.")
            elif e.returncode == 1 and xattr:
                LOG.warn(
                        "rsync with preservation of extended file attributes failed. Retrying rsync " +
                "without attempting to preserve extended file attributes...")
                self._copy_rec(source, dest, xattr=False)
            else:
                raise
Exemple #8
0
    def rebundle(self):
        rebundle_dir = tempfile.mkdtemp()

        try:
            pl = bus.platform
            proj_id = pl.get_numeric_project_id()
            proj_name = pl.get_project_id()
            cloudstorage = pl.new_storage_client()

            tmp_mount_dir = os.path.join(rebundle_dir, 'root')
            os.makedirs(tmp_mount_dir)

            image_name      = 'disk.raw'
            image_path      = os.path.join(rebundle_dir, image_name)

            root_size = coreutils.statvfs('/')['size']
            LOG.debug('Creating image file %s' % image_path)
            with open(image_path, 'w') as f:
                f.truncate(root_size + 1*1024)

            try:

                LOG.debug('Creating partition table on image')
                system(('parted', image_path, 'mklabel', 'msdos'))
                system(('parted', image_path, 'mkpart', 'primary', 'ext2', 1, str(root_size/(1024*1024))))

                # Map disk image
                out = system(('kpartx', '-av', image_path))[0]
                try:
                    loop = re.search('(/dev/loop\d+)', out).group(1)
                    root_dev_name = '/dev/mapper/%sp1' % loop.split('/')[-1]

                    LOG.info('Creating filesystem')
                    storage2.filesystem('ext4').mkfs(root_dev_name)
                    dev_uuid = uuid.uuid4()
                    system(('tune2fs', '-U', str(dev_uuid), root_dev_name))

                    mount.mount(root_dev_name, tmp_mount_dir)
                    try:
                        lines = system(('/bin/mount', '-l'))[0].splitlines()
                        exclude_dirs = set()
                        for line in lines:
                            mpoint = line.split()[2]
                            if mpoint != '/':
                                exclude_dirs.add(mpoint)

                        exclude_dirs.update(self.exclude_dirs)

                        excludes = [os.path.join(ex, '**') for ex in exclude_dirs]
                        excludes.extend(self.exclude_files)
                        excludes.extend(self._excludes)

                        LOG.info('Copying root filesystem to image')
                        rsync('/', tmp_mount_dir, archive=True,
                                                                          hard_links=True,
                                                                          times=True,
                                                                          sparse=True,
                                                                          exclude=excludes)

                        LOG.info('Cleanup image')
                        self._create_spec_devices(tmp_mount_dir)

                        LOG.debug('Removing roles-builder user')
                        sh = pexpect.spawn('/bin/sh')
                        try:
                            sh.sendline('chroot %s' % tmp_mount_dir)
                            sh.expect('#')
                            sh.sendline('userdel -rf %s' % ROLEBUILDER_USER)
                            sh.expect('#')
                        finally:
                            sh.close()

                        """ Patch fstab"""
                        fstab_path = os.path.join(tmp_mount_dir, 'etc/fstab')
                        if os.path.exists(fstab_path):
                            with open(fstab_path) as f:
                                fstab = f.read()

                            new_fstab = re.sub('UUID=\S+\s+/\s+(.*)', 'UUID=%s / \\1' % dev_uuid, fstab)

                            with open(fstab_path, 'w') as f:
                                f.write(new_fstab)

                    finally:
                        mount.umount(root_dev_name)
                finally:
                    system(('kpartx', '-d', image_path))

                LOG.info('Compressing image.')
                arch_name = '%s.tar.gz' % self._role_name.lower()
                arch_path = os.path.join(rebundle_dir, arch_name)

                tar = Tar()
                tar.create().gzip().sparse()
                tar.archive(arch_path)
                tar.add(image_name, rebundle_dir)
                system(str(tar), shell=True)

            finally:
                os.unlink(image_path)

            try:
                LOG.info('Uploading compressed image to cloud storage')
                uploader = transfer.Transfer(logger=LOG)
                tmp_bucket_name = 'scalr-images-%s-%s' % (
                                                        random.randint(1,1000000), int(time.time()))

                try:
                    remote_path = 'gcs://%s/' % tmp_bucket_name
                    uploader.upload((arch_path,), remote_path)
                except:
                    try:
                        objs = cloudstorage.objects()
                        objs.delete(bucket=tmp_bucket_name, object=arch_name).execute()
                    except:
                        pass

                    cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute()
                    raise

            finally:
                os.unlink(arch_path)

        finally:
            shutil.rmtree(rebundle_dir)

        try:
            goog_image_name = self._role_name.lower().replace('_', '-')
            LOG.info('Registering new image %s' % goog_image_name)
            # TODO: check duplicate names
            compute = pl.new_compute_client()

            current_image_fq = pl.get_image().split('/')
            current_img_project = current_image_fq[1]
            current_img_name = current_image_fq[3]
            current_img_obj = compute.images().get(project=current_img_project,
                                                            image=current_img_name).execute()
            kernel = current_img_obj['preferredKernel']

            image_url = 'http://storage.googleapis.com/%s/%s' % (
                                                                            tmp_bucket_name, arch_name)

            req_body = dict(
                    name=goog_image_name,
                    sourceType='RAW',
                    preferredKernel=kernel,
                    rawDisk=dict(
                            containerType='TAR',
                            source=image_url
                    )
            )

            req = compute.images().insert(project=proj_id, body=req_body)
            operation = req.execute()['name']

            LOG.info('Waiting for image to register')
            def image_is_ready():
                req = compute.globalOperations().get(project=proj_id, operation=operation)
                res = req.execute()
                if res['status'] == 'DONE':
                    if res.get('error'):
                        errors = []
                        for e in res['error']['errors']:
                            err_text = '%s: %s' % (e['code'], e['message'])
                            errors.append(err_text)
                        raise Exception('\n'.join(errors))
                    return True
                return False
            wait_until(image_is_ready, logger=LOG, timeout=600)

        finally:
            objs = cloudstorage.objects()
            objs.delete(bucket=tmp_bucket_name, object=arch_name).execute()
            cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute()

        return '%s/images/%s' % (proj_name, goog_image_name)
Exemple #9
0
                finally:
                    fp.close()

            self._logger.debug('Trying to extract cassandra snapshot to temporary EBS storage')
            snap = tarfile.open(snap_path)
            snap.extractall(TMP_EBS_MNTPOINT)
            snap.close()
            self._logger.debug('Snapshot successfully extracted')
            os.remove(snap_path)

            ebs_devname, ebs_volume = self._create_attach_mount_volume(storage_size, auto_mount=True, mpoint=cassandra.storage_path)
            self._update_config({OPT_STORAGE_VOLUME_ID : ebs_volume.id, OPT_STORAGE_DEVICE_NAME : ebs_devname})
            self._create_dbstorage_fslayout()

            self._logger.debug('Copying snapshot')
            out = rsync(TMP_EBS_MNTPOINT+os.sep, cassandra.data_file_directory, archive=True)
            if out[2]:
                raise HandlerError('Error while copying snapshot content from temp ebs to permanent: %s', out[1])

            self._logger.debug('Snapshot successfully copied from temporary ebs to permanent')

            cassandra.cassandra_conf.set('Storage/AutoBootstrap', 'False')

            self._change_dbstorage_location()
            snap_id = cassandra.create_snapshot(ebs_volume.id)
            cassandra.start_service()
            message.cassandra.update(dict(volume_id = ebs_volume.id, snapshot_id=snap_id))
        finally:
            try:
                self._umount_detach_delete_volume(tmp_ebs_devname, temp_ebs_dev)
                os.removedirs(TMP_EBS_MNTPOINT)
Exemple #10
0
    def rebundle(self):
        rebundle_dir = tempfile.mkdtemp()

        try:
            pl = bus.platform
            proj_id = pl.get_numeric_project_id()
            proj_name = pl.get_project_id()
            cloudstorage = pl.new_storage_client()

            tmp_mount_dir = os.path.join(rebundle_dir, 'root')
            os.makedirs(tmp_mount_dir)

            image_name = 'disk.raw'
            image_path = os.path.join(rebundle_dir, image_name)

            root_size = coreutils.statvfs('/')['size']
            LOG.debug('Creating image file %s' % image_path)
            with open(image_path, 'w') as f:
                f.truncate(root_size + 1 * 1024)

            try:

                LOG.debug('Creating partition table on image')
                system(('parted', image_path, 'mklabel', 'msdos'))
                system(('parted', image_path, 'mkpart', 'primary', 'ext2', 1,
                        str(root_size / (1024 * 1024))))

                # Map disk image
                out = system(('kpartx', '-av', image_path))[0]
                try:
                    loop = re.search('(/dev/loop\d+)', out).group(1)
                    root_dev_name = '/dev/mapper/%sp1' % loop.split('/')[-1]

                    LOG.info('Creating filesystem')
                    storage2.filesystem('ext4').mkfs(root_dev_name)
                    dev_uuid = uuid.uuid4()
                    system(('tune2fs', '-U', str(dev_uuid), root_dev_name))

                    mount.mount(root_dev_name, tmp_mount_dir)
                    try:
                        lines = system(('/bin/mount', '-l'))[0].splitlines()
                        exclude_dirs = set()
                        for line in lines:
                            mpoint = line.split()[2]
                            if mpoint != '/':
                                exclude_dirs.add(mpoint)

                        exclude_dirs.update(self.exclude_dirs)

                        excludes = [
                            os.path.join(ex, '**') for ex in exclude_dirs
                        ]
                        excludes.extend(self.exclude_files)
                        excludes.extend(self._excludes)

                        LOG.info('Copying root filesystem to image')
                        rsync('/',
                              tmp_mount_dir,
                              archive=True,
                              hard_links=True,
                              times=True,
                              sparse=True,
                              exclude=excludes)

                        LOG.info('Cleanup image')
                        self._create_spec_devices(tmp_mount_dir)

                        LOG.debug('Removing roles-builder user')
                        sh = pexpect.spawn('/bin/sh')
                        try:
                            sh.sendline('chroot %s' % tmp_mount_dir)
                            sh.expect('#')
                            sh.sendline('userdel -rf %s' % ROLEBUILDER_USER)
                            sh.expect('#')
                        finally:
                            sh.close()
                        """ Patch fstab"""
                        fstab_path = os.path.join(tmp_mount_dir, 'etc/fstab')
                        if os.path.exists(fstab_path):
                            with open(fstab_path) as f:
                                fstab = f.read()

                            new_fstab = re.sub('UUID=\S+\s+/\s+(.*)',
                                               'UUID=%s / \\1' % dev_uuid,
                                               fstab)

                            with open(fstab_path, 'w') as f:
                                f.write(new_fstab)

                    finally:
                        mount.umount(root_dev_name)
                finally:
                    system(('kpartx', '-d', image_path))

                LOG.info('Compressing image.')
                arch_name = '%s.tar.gz' % self._role_name.lower()
                arch_path = os.path.join(rebundle_dir, arch_name)

                tar = Tar()
                tar.create().gzip().sparse()
                tar.archive(arch_path)
                tar.add(image_name, rebundle_dir)
                system(str(tar), shell=True)

            finally:
                os.unlink(image_path)

            try:
                LOG.info('Uploading compressed image to cloud storage')
                uploader = transfer.Transfer(logger=LOG)
                tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint(
                    1, 1000000), int(time.time()))

                try:
                    remote_path = 'gcs://%s/' % tmp_bucket_name
                    uploader.upload((arch_path, ), remote_path)
                except:
                    try:
                        objs = cloudstorage.objects()
                        objs.delete(bucket=tmp_bucket_name,
                                    object=arch_name).execute()
                    except:
                        pass

                    cloudstorage.buckets().delete(
                        bucket=tmp_bucket_name).execute()
                    raise

            finally:
                os.unlink(arch_path)

        finally:
            shutil.rmtree(rebundle_dir)

        try:
            goog_image_name = self._role_name.lower().replace('_', '-')
            LOG.info('Registering new image %s' % goog_image_name)
            # TODO: check duplicate names
            compute = pl.new_compute_client()

            current_image_fq = pl.get_image().split('/')
            current_img_project = current_image_fq[1]
            current_img_name = current_image_fq[3]
            current_img_obj = compute.images().get(
                project=current_img_project, image=current_img_name).execute()
            kernel = current_img_obj['preferredKernel']

            image_url = 'http://storage.googleapis.com/%s/%s' % (
                tmp_bucket_name, arch_name)

            req_body = dict(name=goog_image_name,
                            sourceType='RAW',
                            preferredKernel=kernel,
                            rawDisk=dict(containerType='TAR',
                                         source=image_url))

            req = compute.images().insert(project=proj_id, body=req_body)
            operation = req.execute()['name']

            LOG.info('Waiting for image to register')

            def image_is_ready():
                req = compute.globalOperations().get(project=proj_id,
                                                     operation=operation)
                res = req.execute()
                if res['status'] == 'DONE':
                    if res.get('error'):
                        errors = []
                        for e in res['error']['errors']:
                            err_text = '%s: %s' % (e['code'], e['message'])
                            errors.append(err_text)
                        raise Exception('\n'.join(errors))
                    return True
                return False

            wait_until(image_is_ready, logger=LOG, timeout=600)

        finally:
            objs = cloudstorage.objects()
            objs.delete(bucket=tmp_bucket_name, object=arch_name).execute()
            cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute()

        return '%s/images/%s' % (proj_name, goog_image_name)