def _prepare_params(self, ctxt, params, allowed):
        if not allowed.issuperset(params):
            invalid_keys = set(params).difference(allowed)
            msg = _('Invalid filter keys: %s') % ', '.join(invalid_keys)
            raise exception.InvalidInput(reason=msg)

        if params.get('binary') not in (None, 'cinder-volume',
                                        'cinder-scheduler'):
            msg = _('binary must be empty or set to cinder-volume or '
                    'cinder-scheduler')
            raise exception.InvalidInput(reason=msg)

        for boolean in ('disabled', 'is_up'):
            if params.get(boolean) is not None:
                params[boolean] = utils.get_bool_param(boolean, params)

        resource_type = params.get('resource_type')

        if resource_type:
            resource_type = resource_type.title()
            types = cleanable.CinderCleanableObject.cleanable_resource_types
            if resource_type not in types:
                msg = (_('Resource type %s not valid, must be ') %
                       resource_type)
                msg = utils.build_or_str(types, msg + '%s.')
                raise exception.InvalidInput(reason=msg)
            params['resource_type'] = resource_type

        resource_id = params.get('resource_id')
        if resource_id:
            if not uuidutils.is_uuid_like(resource_id):
                msg = (_('Resource ID must be a UUID, and %s is not.') %
                       resource_id)
                raise exception.InvalidInput(reason=msg)

            # If we have the resource type but we don't have where it is
            # located, we get it from the DB to limit the distribution of the
            # request by the scheduler, otherwise it will be distributed to all
            # the services.
            location_keys = {'service_id', 'cluster_name', 'host'}
            if not location_keys.intersection(params):
                workers = db.worker_get_all(ctxt, resource_id=resource_id,
                                            binary=params.get('binary'),
                                            resource_type=resource_type)

                if len(workers) == 0:
                    msg = (_('There is no resource with UUID %s pending '
                             'cleanup.'), resource_id)
                    raise exception.InvalidInput(reason=msg)
                if len(workers) > 1:
                    msg = (_('There are multiple resources with UUID %s '
                             'pending cleanup.  Please be more specific.'),
                           resource_id)
                    raise exception.InvalidInput(reason=msg)

                worker = workers[0]
                params.update(service_id=worker.service_id,
                              resource_type=worker.resource_type)

        return params
Example #2
0
    def cleanup(self, req, body=None):
        """Do the cleanup on resources from a specific service/host/node."""
        # Let the wsgi middleware convert NotAuthorized exceptions
        ctxt = req.environ['cinder.context']
        ctxt.authorize(policy.CLEAN_POLICY)
        body = body or {}

        for boolean in ('disabled', 'is_up'):
            if body.get(boolean) is not None:
                body[boolean] = strutils.bool_from_string(body[boolean])

        resource_type = body.get('resource_type')

        if resource_type:
            resource_type = resource_type.title()
            types = cleanable.CinderCleanableObject.cleanable_resource_types
            if resource_type not in types:
                valid_types = utils.build_or_str(types)
                msg = _('Resource type %(resource_type)s not valid,'
                        ' must be %(valid_types)s')
                msg = msg % {
                    "resource_type": resource_type,
                    "valid_types": valid_types
                }
                raise exception.InvalidInput(reason=msg)
            body['resource_type'] = resource_type

        resource_id = body.get('resource_id')
        if resource_id:

            # If we have the resource type but we don't have where it is
            # located, we get it from the DB to limit the distribution of the
            # request by the scheduler, otherwise it will be distributed to all
            # the services.
            location_keys = {'service_id', 'cluster_name', 'host'}
            if not location_keys.intersection(body):
                workers = db.worker_get_all(ctxt,
                                            resource_id=resource_id,
                                            binary=body.get('binary'),
                                            resource_type=resource_type)

                if len(workers) == 0:
                    msg = (_('There is no resource with UUID %s pending '
                             'cleanup.'), resource_id)
                    raise exception.InvalidInput(reason=msg)
                if len(workers) > 1:
                    msg = (_('There are multiple resources with UUID %s '
                             'pending cleanup.  Please be more specific.'),
                           resource_id)
                    raise exception.InvalidInput(reason=msg)

                worker = workers[0]
                body.update(service_id=worker.service_id,
                            resource_type=worker.resource_type)

        body['until'] = timeutils.utcnow()

        # NOTE(geguileo): If is_up is not specified in the request
        # CleanupRequest's default will be used (False)
        cleanup_request = objects.CleanupRequest(**body)
        cleaning, unavailable = self.sch_api.work_cleanup(
            ctxt, cleanup_request)
        return {
            'cleaning': workers_view.ViewBuilder.service_list(cleaning),
            'unavailable': workers_view.ViewBuilder.service_list(unavailable),
        }
Example #3
0
    def do_setup(self, context):
        self.configuration.max_over_subscription_ratio = (
            vutils.get_max_over_subscription_ratio(
                self.configuration.max_over_subscription_ratio,
                supports_auto=False))

        if not self.configuration.max_over_subscription_ratio > 0:
            msg = _("Config 'max_over_subscription_ratio' invalid. Must be > "
                    "0: %s") % self.configuration.max_over_subscription_ratio
            LOG.error(msg)
            raise exception.NfsException(msg)

        packages = ('mount.nfs', '/usr/sbin/mount')
        for package in packages:
            try:
                self._execute(package, check_exit_code=False, run_as_root=True)
                break
            except OSError as exc:
                if exc.errno != errno.ENOENT:
                    raise
                LOG.error('%s is not installed.', package)
        else:
            msg = utils.build_or_str(packages, '%s needs to be installed.')
            raise exception.NfsException(msg)

        lcfg = self.configuration
        LOG.info('Connecting to host: %s.', lcfg.san_ip)

        host = lcfg.san_ip
        user = lcfg.san_login
        password = lcfg.san_password
        https_port = lcfg.zfssa_https_port

        credentials = ['san_ip', 'san_login', 'san_password', 'zfssa_data_ip']

        for cred in credentials:
            if not getattr(lcfg, cred, None):
                exception_msg = _('%s not set in cinder.conf') % cred
                LOG.error(exception_msg)
                raise exception.CinderException(exception_msg)

        self.zfssa = factory_zfssa()
        self.zfssa.set_host(host, timeout=lcfg.zfssa_rest_timeout)

        auth_str = base64.encode_as_text('%s:%s' % (user, password))
        self.zfssa.login(auth_str)

        self.zfssa.create_project(lcfg.zfssa_nfs_pool,
                                  lcfg.zfssa_nfs_project,
                                  compression=lcfg.zfssa_nfs_share_compression,
                                  logbias=lcfg.zfssa_nfs_share_logbias)

        share_args = {
            'sharedav': 'rw',
            'sharenfs': 'rw',
            'root_permissions': '777',
            'compression': lcfg.zfssa_nfs_share_compression,
            'logbias': lcfg.zfssa_nfs_share_logbias
        }

        self.zfssa.create_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project,
                                lcfg.zfssa_nfs_share, share_args)

        share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool,
                                             lcfg.zfssa_nfs_project,
                                             lcfg.zfssa_nfs_share)

        mountpoint = share_details['mountpoint']

        self.mount_path = lcfg.zfssa_data_ip + ':' + mountpoint
        https_path = 'https://' + lcfg.zfssa_data_ip + ':' + https_port + \
            '/shares' + mountpoint

        LOG.debug('NFS mount path: %s', self.mount_path)
        LOG.debug('WebDAV path to the share: %s', https_path)

        self.shares = {}
        mnt_opts = self.configuration.zfssa_nfs_mount_options
        self.shares[self.mount_path] = mnt_opts if len(mnt_opts) > 1 else None

        # Initialize the WebDAV client
        self.zfssa.set_webdav(https_path, auth_str)

        # Edit http service so that WebDAV requests are always authenticated
        args = {'https_port': https_port, 'require_login': True}

        self.zfssa.modify_service('http', args)
        self.zfssa.enable_service('http')

        if lcfg.zfssa_enable_local_cache:
            LOG.debug('Creating local cache directory %s.',
                      lcfg.zfssa_cache_directory)
            self.zfssa.create_directory(lcfg.zfssa_cache_directory)
Example #4
0
    def cleanup(self, req, body=None):
        """Do the cleanup on resources from a specific service/host/node."""
        # Let the wsgi middleware convert NotAuthorized exceptions
        ctxt = req.environ['cinder.context']
        ctxt.authorize(policy.CLEAN_POLICY)
        body = body or {}

        for boolean in ('disabled', 'is_up'):
            if body.get(boolean) is not None:
                body[boolean] = strutils.bool_from_string(body[boolean])

        resource_type = body.get('resource_type')

        if resource_type:
            resource_type = resource_type.title()
            types = cleanable.CinderCleanableObject.cleanable_resource_types
            if resource_type not in types:
                valid_types = utils.build_or_str(types)
                msg = _('Resource type %(resource_type)s not valid,'
                        ' must be %(valid_types)s')
                msg = msg % {"resource_type": resource_type,
                             "valid_types": valid_types}
                raise exception.InvalidInput(reason=msg)
            body['resource_type'] = resource_type

        resource_id = body.get('resource_id')
        if resource_id:

            # If we have the resource type but we don't have where it is
            # located, we get it from the DB to limit the distribution of the
            # request by the scheduler, otherwise it will be distributed to all
            # the services.
            location_keys = {'service_id', 'cluster_name', 'host'}
            if not location_keys.intersection(body):
                workers = db.worker_get_all(ctxt, resource_id=resource_id,
                                            binary=body.get('binary'),
                                            resource_type=resource_type)

                if len(workers) == 0:
                    msg = (_('There is no resource with UUID %s pending '
                             'cleanup.'), resource_id)
                    raise exception.InvalidInput(reason=msg)
                if len(workers) > 1:
                    msg = (_('There are multiple resources with UUID %s '
                             'pending cleanup.  Please be more specific.'),
                           resource_id)
                    raise exception.InvalidInput(reason=msg)

                worker = workers[0]
                body.update(service_id=worker.service_id,
                            resource_type=worker.resource_type)

        body['until'] = timeutils.utcnow()

        # NOTE(geguileo): If is_up is not specified in the request
        # CleanupRequest's default will be used (False)
        cleanup_request = objects.CleanupRequest(**body)
        cleaning, unavailable = self.sch_api.work_cleanup(ctxt,
                                                          cleanup_request)
        return {
            'cleaning': workers_view.ViewBuilder.service_list(cleaning),
            'unavailable': workers_view.ViewBuilder.service_list(unavailable),
        }
Example #5
0
    def do_setup(self, context):
        if not self.configuration.max_over_subscription_ratio > 0:
            msg = _("Config 'max_over_subscription_ratio' invalid. Must be > "
                    "0: %s") % self.configuration.max_over_subscription_ratio
            LOG.error(msg)
            raise exception.NfsException(msg)

        packages = ('mount.nfs', '/usr/sbin/mount')
        for package in packages:
            try:
                self._execute(package, check_exit_code=False, run_as_root=True)
                break
            except OSError as exc:
                if exc.errno != errno.ENOENT:
                    raise
                LOG.error('%s is not installed.', package)
        else:
            msg = utils.build_or_str(packages, '%s needs to be installed.')
            raise exception.NfsException(msg)

        lcfg = self.configuration
        LOG.info('Connecting to host: %s.', lcfg.san_ip)

        host = lcfg.san_ip
        user = lcfg.san_login
        password = lcfg.san_password
        https_port = lcfg.zfssa_https_port

        credentials = ['san_ip', 'san_login', 'san_password', 'zfssa_data_ip']

        for cred in credentials:
            if not getattr(lcfg, cred, None):
                exception_msg = _('%s not set in cinder.conf') % cred
                LOG.error(exception_msg)
                raise exception.CinderException(exception_msg)

        self.zfssa = factory_zfssa()
        self.zfssa.set_host(host, timeout=lcfg.zfssa_rest_timeout)

        auth_str = base64.encode_as_text('%s:%s' % (user, password))
        self.zfssa.login(auth_str)

        self.zfssa.create_project(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project,
                                  compression=lcfg.zfssa_nfs_share_compression,
                                  logbias=lcfg.zfssa_nfs_share_logbias)

        share_args = {
            'sharedav': 'rw',
            'sharenfs': 'rw',
            'root_permissions': '777',
            'compression': lcfg.zfssa_nfs_share_compression,
            'logbias': lcfg.zfssa_nfs_share_logbias
        }

        self.zfssa.create_share(lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project,
                                lcfg.zfssa_nfs_share, share_args)

        share_details = self.zfssa.get_share(lcfg.zfssa_nfs_pool,
                                             lcfg.zfssa_nfs_project,
                                             lcfg.zfssa_nfs_share)

        mountpoint = share_details['mountpoint']

        self.mount_path = lcfg.zfssa_data_ip + ':' + mountpoint
        https_path = 'https://' + lcfg.zfssa_data_ip + ':' + https_port + \
            '/shares' + mountpoint

        LOG.debug('NFS mount path: %s', self.mount_path)
        LOG.debug('WebDAV path to the share: %s', https_path)

        self.shares = {}
        mnt_opts = self.configuration.zfssa_nfs_mount_options
        self.shares[self.mount_path] = mnt_opts if len(mnt_opts) > 1 else None

        # Initialize the WebDAV client
        self.zfssa.set_webdav(https_path, auth_str)

        # Edit http service so that WebDAV requests are always authenticated
        args = {'https_port': https_port,
                'require_login': True}

        self.zfssa.modify_service('http', args)
        self.zfssa.enable_service('http')

        if lcfg.zfssa_enable_local_cache:
            LOG.debug('Creating local cache directory %s.',
                      lcfg.zfssa_cache_directory)
            self.zfssa.create_directory(lcfg.zfssa_cache_directory)