Esempio n. 1
0
    def get_host_config(self, host):
        ceph_backend = StorageBackendConfig.get_backend_conf(
            self.dbapi, constants.CINDER_BACKEND_CEPH)
        if not ceph_backend:
            return {}  # ceph is not configured

        config = {}
        if host.personality in [constants.CONTROLLER, constants.STORAGE]:
            config.update(self._get_ceph_osd_config(host))
        config.update(self._get_ceph_mon_config(host))
        return config
Esempio n. 2
0
    def get_system_config(self):
        ceph_rook_backend = StorageBackendConfig.get_backend_conf(
            self.dbapi, constants.SB_TYPE_CEPH_ROOK)

        enable = False
        if ceph_rook_backend:
            enable = True

        config = {
            'platform::rook::params::service_enabled': enable,
        }
        return config
Esempio n. 3
0
    def _get_conf_overrides(self):
        ceph_backend = self._get_primary_ceph_backend()
        if not ceph_backend and not self._rook_ceph:
            rbd_store_pool = ""
            rbd_store_user = ""
            replication = 1
        elif self._rook_ceph:
            rbd_store_pool = constants.CEPH_POOL_IMAGES_NAME
            rbd_store_user = RBD_STORE_USER

            replication = 2
            if utils.is_aio_simplex_system(self.dbapi):
                replication = 1
        else:
            rbd_store_pool = app_constants.CEPH_POOL_IMAGES_NAME
            rbd_store_user = RBD_STORE_USER
            replication, min_replication = \
                StorageBackendConfig.get_ceph_pool_replication(self.dbapi)

        if not self._rook_ceph:
            # Only the primary Ceph tier is used for the glance images pool
            rule_name = "{0}{1}{2}".format(
                constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH],
                constants.CEPH_CRUSH_TIER_SUFFIX,
                "-ruleset").replace('-', '_')
        else:
            rule_name = "storage_tier_ruleset"

        conf = {
            'glance': {
                'DEFAULT': {
                    'graceful_shutdown': True,
                    'show_image_direct_url': True,
                },
                'glance_store': {
                    'chunk_size': app_constants.CEPH_POOL_IMAGES_CHUNK_SIZE,
                    'filesystem_store_datadir': constants.GLANCE_IMAGE_PATH,
                    'rbd_store_pool': rbd_store_pool,
                    'rbd_store_user': rbd_store_user,
                    'rbd_store_replication': replication,
                    'rbd_store_crush_rule': rule_name,
                }
            }
        }

        if ceph_backend:
            conf['ceph'] = self._get_ceph_overrides()
        elif self._rook_ceph:
            conf['ceph'] = {
                'admin_keyring': self._get_rook_ceph_admin_keyring()
            }

        return conf
Esempio n. 4
0
    def get_cinder_volume_types(self):
        """Obtain the current list of volume types."""
        volume_types_list = []

        if StorageBackendConfig.is_service_enabled(self.dbapi,
                                                   constants.SB_SVC_CINDER,
                                                   filter_shared=True):
            try:
                volume_types_list = self._get_cinderclient().volume_types.list()
            except Exception as e:
                LOG.error("get_cinder_volume_types: Failed to access Cinder client: %s" % e)

        return volume_types_list
Esempio n. 5
0
    def _get_filtered_ceph_monitor_ips_using_function(self, name_filter):
        """ Extracts the ceph monitor ips to a list based on a filter

        :param name_filter: A filter function returning a boolean.

        :returns: List of filtered monitor ips.
        """
        monitors = []
        for name, addr in StorageBackendConfig.get_ceph_mon_ip_addresses(
                self.dbapi).items():
            if name_filter(name):
                monitors.append(addr)

        return monitors
Esempio n. 6
0
 def _get_ceph_monitor_ips(self, name_filter=None):
     system = self._get_system()
     if system.system_type == constants.TIS_AIO_BUILD:
         if system.system_mode == constants.SYSTEM_MODE_SIMPLEX:
             # ceph monitor for controller-0
             monitors = self._get_filtered_ceph_monitor_ips_by_name(
                 constants.CEPH_MON_0)
         else:
             # ceph monitor for controller-floating
             monitors = self._get_filtered_ceph_monitor_ips_by_name(
                 constants.CEPH_FLOATING_MON)
     elif name_filter:
         monitors = self._get_filtered_ceph_monitor_ips_using_function(
             name_filter)
     else:
         monitors = StorageBackendConfig.get_ceph_mon_ip_addresses(
             self.dbapi).values()
     return monitors
Esempio n. 7
0
    def _get_conf_ceph_overrides(self):
        ceph_backend = self._get_primary_ceph_backend()
        if not ceph_backend:
            return {}

        primary_tier_name =\
            constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH]

        replication, min_replication =\
            StorageBackendConfig.get_ceph_pool_replication(self.dbapi)

        pools = {}
        for backend in self.dbapi.storage_ceph_get_list():
            if backend.tier_name == primary_tier_name:
                pool_name = app_constants.CEPH_POOL_VOLUMES_NAME
            else:
                pool_name = "%s-%s" % (app_constants.CEPH_POOL_VOLUMES_NAME,
                                      backend.tier_name)
            rule_name = "{0}{1}{2}".format(
                backend.tier_name, constants.CEPH_CRUSH_TIER_SUFFIX,
                "-ruleset").replace('-', '_')
            pool = {
                'replication': replication,
                'crush_rule': rule_name.encode('utf8', 'strict'),
                'chunk_size': app_constants.CEPH_POOL_VOLUMES_CHUNK_SIZE,
                'app_name': app_constants.CEPH_POOL_VOLUMES_APP_NAME
            }
            pools[pool_name.encode('utf8', 'strict')] = pool
            if backend.name == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
                # Backup uses the same replication and crush rule as
                # the default storage backend
                pool_backup = {
                    'replication': replication,
                    'crush_rule': rule_name.encode('utf8', 'strict'),
                    'chunk_size': app_constants.CEPH_POOL_BACKUP_CHUNK_SIZE,
                    'app_name': app_constants.CEPH_POOL_BACKUP_APP_NAME
                }
                pools['backup'] = dict(pool_backup)

        return {
            'monitors': self._get_formatted_ceph_monitor_ips(),
            'admin_keyring': 'null',
            'pools': pools
        }
Esempio n. 8
0
    def _get_conf_overrides(self):
        ceph_backend = self._get_primary_ceph_backend()
        if not ceph_backend:
            rbd_store_pool = ""
            rbd_store_user = ""
            replication = 1
        else:
            rbd_store_pool = constants.CEPH_POOL_IMAGES_NAME
            rbd_store_user = RBD_STORE_USER
            replication, min_replication = \
                StorageBackendConfig.get_ceph_pool_replication(self.dbapi)

        # Only the primary Ceph tier is used for the glance images pool
        rule_name = "{0}{1}{2}".format(
            constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH],
            constants.CEPH_CRUSH_TIER_SUFFIX, "-ruleset").replace('-', '_')

        conf = {
            'glance': {
                'DEFAULT': {
                    'graceful_shutdown': True,
                    'show_image_direct_url': True,
                },
                'glance_store': {
                    'filesystem_store_datadir': constants.GLANCE_IMAGE_PATH,
                    'rbd_store_pool': rbd_store_pool,
                    'rbd_store_user': rbd_store_user,
                    'rbd_store_replication': replication,
                    'rbd_store_crush_rule': rule_name,
                }
            }
        }

        if ceph_backend:
            conf['ceph'] = self._get_ceph_overrides()

        return conf
Esempio n. 9
0
def _cinder_volumes_patch_semantic_checks(caps_dict):
    # make sure that only valid capabilities are provided
    valid_caps = set([constants.LVG_CINDER_PARAM_LVM_TYPE])
    invalid_caps = set(caps_dict.keys()) - valid_caps

    # Do we have something unexpected?
    if len(invalid_caps) > 0:
        raise wsme.exc.ClientSideError(
            _("Invalid parameter(s) for volume group %s: %s " %
              (constants.LVG_CINDER_VOLUMES, ", ".join(
                  str(i) for i in invalid_caps))))

    # make sure that we are modifying something
    elif len(caps_dict) == 0:
        msg = _('No parameter specified. No action taken')
        raise wsme.exc.ClientSideError(msg)

    # Reject modifications of cinder volume provisioning type if
    # lvm storage backend is enabled
    if (constants.LVG_CINDER_PARAM_LVM_TYPE in caps_dict
            and StorageBackendConfig.has_backend(
                pecan.request.dbapi, constants.CINDER_BACKEND_LVM)):
        msg = _('Cinder volumes LVM type modification denied. '
                'LVM Storage Backend is added.')
        raise wsme.exc.ClientSideError(msg)

    # Make sure that cinder volumes provisioning type is a valid value
    if constants.LVG_CINDER_PARAM_LVM_TYPE in caps_dict and \
       caps_dict[constants.LVG_CINDER_PARAM_LVM_TYPE] not in \
       [constants.LVG_CINDER_LVM_TYPE_THIN,
            constants.LVG_CINDER_LVM_TYPE_THICK]:
        msg = _('Invalid parameter: %s must be %s or %s' %
                (constants.LVG_CINDER_PARAM_LVM_TYPE,
                 constants.LVG_CINDER_LVM_TYPE_THIN,
                 constants.LVG_CINDER_LVM_TYPE_THICK))
        raise wsme.exc.ClientSideError(msg)
Esempio n. 10
0
    def _get_conf_overrides(self):
        ceph_backend = self._get_primary_ceph_backend()
        if not ceph_backend:
            rbd_store_pool = ""
            rbd_store_user = ""
            replication = 1
        else:
            rbd_store_pool = constants.CEPH_POOL_IMAGES_NAME
            rbd_store_user = RBD_STORE_USER
            replication, min_replication = \
                StorageBackendConfig.get_ceph_pool_replication(self.dbapi)

        # Only the primary Ceph tier is used for the glance images pool, so
        # the crush ruleset is 0.
        ruleset = 0

        conf = {
            'glance': {
                'DEFAULT': {
                    'graceful_shutdown': True,
                    'show_image_direct_url': True,
                },
                'glance_store': {
                    'filesystem_store_datadir': constants.GLANCE_IMAGE_PATH,
                    'rbd_store_pool': rbd_store_pool,
                    'rbd_store_user': rbd_store_user,
                    'rbd_store_replication': replication,
                    'rbd_store_crush_rule': ruleset,
                }
            }
        }

        if ceph_backend:
            conf['ceph'] = self._get_ceph_overrides()

        return conf
Esempio n. 11
0
    def app_rbd_actions(self, app_obj, dbapi, app_name, operation):
        """ Perform rbd actions for an application based on operation

        :param app_obj: AppOperator object
        :param dbapi: dbapi
        :param app_name: application name
        :param operation: operation being performed
        """

        if app_name in [
                constants.HELM_APP_CERT_MANAGER, constants.HELM_APP_OIDC_AUTH,
                constants.HELM_APP_NGINX_IC
        ]:
            return

        LOG.info("app_rbd_actions app: %s operation: %s" %
                 (app_name, operation))

        # TODO(ksmith): Further decouple this by moving this logic to the
        # application derived class in openstack and just pass here.
        # Since RBD provisioner requires Ceph, return false when not enabled
        if not StorageBackendConfig.has_backend(dbapi, constants.SB_TYPE_CEPH):
            rbd_provisioner_required = False
        else:
            rbd_provisioner_required = True

        if operation == constants.APP_APPLY_OP:
            if rbd_provisioner_required:
                app_obj._create_rbd_provisioner_secrets(app_name)
        elif operation == constants.APP_REMOVE_OP:
            if rbd_provisioner_required:
                app_obj._delete_rbd_provisioner_secrets(app_name)
            if app_name == constants.HELM_APP_OPENSTACK:
                app_obj._delete_ceph_persistent_volume_claim(
                    common.HELM_NS_OPENSTACK)
                app_obj._delete_namespace(common.HELM_NS_OPENSTACK)
Esempio n. 12
0
    def update_many(self, isystem_uuid, patch):
        """Update the current controller_fs configuration."""

        if self._from_isystems and not isystem_uuid:
            raise exception.InvalidParameterValue(_(
                "System id not specified."))

        # Validate input filesystem names
        controller_fs_list = pecan.request.dbapi.controller_fs_get_list()
        valid_fs_list = []
        if controller_fs_list:
            valid_fs_list = {fs.name: fs.size for fs in controller_fs_list}

        reinstall_required = False
        reboot_required = False
        force_resize = False
        modified_fs = []

        for p_list in patch:
            p_obj_list = jsonpatch.JsonPatch(p_list)

            for p_obj in p_obj_list:
                if p_obj['path'] == '/action':
                    value = p_obj['value']
                    patch.remove(p_list)
                    if value == constants.FORCE_ACTION:
                        force_resize = True
                        LOG.info("Force action resize selected")
                        break

        for p_list in patch:
            p_obj_list = jsonpatch.JsonPatch(p_list)
            for p_obj in p_obj_list:
                if p_obj['path'] == '/name':
                    fs_display_name = p_obj['value']
                    if fs_display_name == constants.FILESYSTEM_DISPLAY_NAME_CGCS:
                        fs_name = constants.FILESYSTEM_NAME_CGCS
                    else:
                        fs_name = fs_display_name
                elif p_obj['path'] == '/size':
                    size = p_obj['value']

            if fs_name not in valid_fs_list.keys() or fs_display_name == constants.FILESYSTEM_NAME_CGCS:
                msg = _("ControllerFs update failed: invalid filesystem "
                        "'%s' " % fs_display_name)
                raise wsme.exc.ClientSideError(msg)
            elif not cutils.is_int_like(size):
                msg = _("ControllerFs update failed: filesystem '%s' "
                        "size must be an integer " % fs_display_name)
                raise wsme.exc.ClientSideError(msg)
            elif int(size) <= int(valid_fs_list[fs_name]):
                msg = _("ControllerFs update failed: size for filesystem '%s' "
                        "should be bigger than %s " % (
                            fs_display_name, valid_fs_list[fs_name]))
                raise wsme.exc.ClientSideError(msg)
            elif (fs_name == constants.FILESYSTEM_NAME_CGCS and
                  StorageBackendConfig.get_backend(pecan.request.dbapi,
                                                   constants.CINDER_BACKEND_CEPH)):
                if force_resize:
                    LOG.warn("Force resize ControllerFs: %s, though Ceph "
                             "storage backend is configured" % fs_display_name)
                else:
                    raise wsme.exc.ClientSideError(
                        _("ControllerFs %s size is not modifiable as Ceph is "
                          "configured. Update size via Ceph Storage Pools." %
                          fs_display_name))

            if fs_name in constants.SUPPORTED_REPLICATED_FILEYSTEM_LIST:
                if utils.is_drbd_fs_resizing():
                    raise wsme.exc.ClientSideError(
                        _("A drbd sync operation is currently in progress. "
                          "Retry again later.")
                    )

            modified_fs += [fs_name]

        controller_fs_list_new = []
        for fs in controller_fs_list:
            replaced = False
            for p_list in patch:
                p_obj_list = jsonpatch.JsonPatch(p_list)
                for p_obj in p_obj_list:
                    if p_obj['path'] == '/name':
                        if p_obj['value'] == constants.FILESYSTEM_DISPLAY_NAME_CGCS:
                            p_obj['value'] = constants.FILESYSTEM_NAME_CGCS

                    if p_obj['value'] == fs['name']:
                        try:
                            controller_fs_list_new += [ControllerFs(
                                      **jsonpatch.apply_patch(fs.as_dict(), p_obj_list))]
                            replaced = True
                            break
                        except utils.JSONPATCH_EXCEPTIONS as e:
                            raise exception.PatchError(patch=p_list, reason=e)
                if replaced:
                    break
            if not replaced:
                controller_fs_list_new += [fs]

        cgtsvg_growth_gib = _check_controller_multi_fs_data(
                               pecan.request.context,
                               controller_fs_list_new)

        if _check_controller_state():
            _check_controller_multi_fs(controller_fs_list_new,
                                       cgtsvg_growth_gib=cgtsvg_growth_gib)
            for fs in controller_fs_list_new:
                if fs.name in modified_fs:
                    value = {'size': fs.size}
                    if fs.replicated:
                        value.update({'state': constants.CONTROLLER_FS_RESIZING_IN_PROGRESS})
                    pecan.request.dbapi.controller_fs_update(fs.uuid, value)

        try:
            # perform rpc to conductor to perform config apply
            pecan.request.rpcapi.update_storage_config(
                    pecan.request.context,
                    update_storage=False,
                    reinstall_required=reinstall_required,
                    reboot_required=reboot_required,
                    filesystem_list=modified_fs
            )

        except Exception as e:
            msg = _("Failed to update filesystem size ")
            LOG.error("%s with patch %s with exception %s" % (msg, patch, e))
            raise wsme.exc.ClientSideError(msg)
Esempio n. 13
0
    def get_system_health(self, context, force=False):
        """Returns the general health of the system

        Checks the following:
        - All hosts are provisioned
        - All hosts are patch current
        - All hosts are unlocked/enabled
        - All hosts having matching configs
        - No management affecting alarms
        - For ceph systems: The storage cluster is healthy
        - All kubernetes nodes are ready
        - All kubernetes control plane pods are ready
        """

        hosts = self._dbapi.ihost_get_list()
        output = _('System Health:\n')
        health_ok = True

        unprovisioned_hosts, provisioned_hosts = \
            self._check_hosts_provisioned(hosts)
        success = unprovisioned_hosts == 0
        output += (_('All hosts are provisioned: [%s]\n') %
                   (Health.SUCCESS_MSG if success else Health.FAIL_MSG))
        if not success:
            output += _('%s Unprovisioned hosts\n') % unprovisioned_hosts
            # Set the hosts to the provisioned_hosts. This will allow the other
            # checks to continue
            hosts = provisioned_hosts

        health_ok = health_ok and success

        success, error_hosts = self._check_hosts_enabled(hosts)
        output += _('All hosts are unlocked/enabled: [%s]\n') \
            % (Health.SUCCESS_MSG if success else Health.FAIL_MSG)
        if not success:
            output += _('Locked or disabled hosts: %s\n') \
                % ', '.join(error_hosts)

        health_ok = health_ok and success

        success, error_hosts = self._check_hosts_config(hosts)
        output += _('All hosts have current configurations: [%s]\n') \
            % (Health.SUCCESS_MSG if success else Health.FAIL_MSG)
        if not success:
            output += _('Hosts with out of date configurations: %s\n') \
                % ', '.join(error_hosts)

        health_ok = health_ok and success

        success, error_hosts, missing_hosts = self._check_patch_current(hosts)
        output += _('All hosts are patch current: [%s]\n') \
            % (Health.SUCCESS_MSG if success else Health.FAIL_MSG)
        if not success:
            if error_hosts:
                output += _('Hosts not patch current: %s\n') \
                    % ', '.join(error_hosts)
            if missing_hosts:
                output += _('Hosts without patch data: %s\n') \
                    % ', '.join(missing_hosts)

        health_ok = health_ok and success

        if StorageBackendConfig.has_backend(self._dbapi,
                                            constants.CINDER_BACKEND_CEPH):
            success = self._check_ceph()
            output += _('Ceph Storage Healthy: [%s]\n') \
                % (Health.SUCCESS_MSG if success else Health.FAIL_MSG)

        health_ok = health_ok and success

        success, allowed, affecting = self._check_alarms(context, force)
        output += _('No alarms: [%s]\n') \
            % (Health.SUCCESS_MSG if success else Health.FAIL_MSG)
        if not success:
            output += _('[%s] alarms found, [%s] of which are management '
                        'affecting\n') % (allowed + affecting, affecting)

        health_ok = health_ok and success

        success, error_nodes = self._check_kube_nodes_ready()
        output += _('All kubernetes nodes are ready: [%s]\n') \
            % (Health.SUCCESS_MSG if success else Health.FAIL_MSG)
        if not success:
            output += _('Kubernetes nodes not ready: %s\n') \
                % ', '.join(error_nodes)

        health_ok = health_ok and success

        success, error_nodes = self._check_kube_control_plane_pods()
        output += _('All kubernetes control plane pods are ready: [%s]\n') \
            % (Health.SUCCESS_MSG if success else Health.FAIL_MSG)
        if not success:
            output += _('Kubernetes control plane pods not ready: %s\n') \
                % ', '.join(error_nodes)

        health_ok = health_ok and success

        return health_ok, output
Esempio n. 14
0
def _create(ceph_mon):
    # validate host
    try:
        chost = pecan.request.dbapi.ihost_get(ceph_mon['ihost_uuid'])
    except exception.ServerNotFound:
        raise wsme.exc.ClientSideError(
            _("Host not found uuid: %s ." % ceph_mon['ihost_uuid']))

    ceph_mon['forihostid'] = chost['id']

    # check if ceph monitor is already configured
    if pecan.request.dbapi.ceph_mon_get_by_ihost(ceph_mon['forihostid']):
        raise wsme.exc.ClientSideError(
            _("Ceph monitor already configured for host '%s'." % chost['hostname']))

    # only one instance of the 3rd ceph monitor is allowed
    ceph_mons = pecan.request.dbapi.ceph_mon_get_list()
    for mon in ceph_mons:
        h = pecan.request.dbapi.ihost_get(mon['forihostid'])
        if h.personality in [constants.STORAGE, constants.WORKER]:
            raise wsme.exc.ClientSideError(
                _("Ceph monitor already configured for host '%s'." % h['hostname']))

    # Adding a ceph monitor to a worker selects Ceph's deployment model
    if chost['personality'] == constants.WORKER:
        # Only if replication model is CONTROLLER or not yet defined
        stor_model = ceph.get_ceph_storage_model()
        worker_stor_models = [constants.CEPH_CONTROLLER_MODEL, constants.CEPH_UNDEFINED_MODEL]
        if stor_model not in worker_stor_models:
            raise wsme.exc.ClientSideError(
                _("Can not add a storage monitor to a worker if "
                  "ceph's deployments model is already set to %s." % stor_model))

        replication, min_replication = \
            StorageBackendConfig.get_ceph_max_replication(pecan.request.dbapi)
        supported_replication = constants.CEPH_CONTROLLER_MODEL_REPLICATION_SUPPORTED
        if replication not in supported_replication:
            raise wsme.exc.ClientSideError(
                _("Ceph monitor can be added to a worker only if "
                  "replication is set to: %s'. Please update replication "
                  "before configuring a monitor on a worker node." % supported_replication))

    # host must be locked and online unless this is controller-0
    if (chost['hostname'] != constants.CONTROLLER_0_HOSTNAME and
            (chost['availability'] != constants.AVAILABILITY_ONLINE or
            chost['administrative'] != constants.ADMIN_LOCKED)):
        raise wsme.exc.ClientSideError(
            _("Host %s must be locked and online." % chost['hostname']))

    ceph_mon = _set_defaults(ceph_mon)

    _check_ceph_mon(ceph_mon)

    controller_fs_utils._check_controller_fs(
        ceph_mon_gib_new=ceph_mon['ceph_mon_gib'])

    pecan.request.rpcapi.reserve_ip_for_first_storage_node(
        pecan.request.context)

    # Size of ceph-mon logical volume must be the same for all
    # monitors so we get the size from any or use default.
    ceph_mons = pecan.request.dbapi.ceph_mon_get_list()
    if ceph_mons:
        ceph_mon['ceph_mon_gib'] = ceph_mons[0]['ceph_mon_gib']

    # In case we add the monitor on a worker node, the state
    # and task must be set properly.
    if chost.personality == constants.WORKER:
        ceph_mon['state'] = constants.SB_STATE_CONFIGURING
        ctrls = pecan.request.dbapi.ihost_get_by_personality(
             constants.CONTROLLER)
        valid_ctrls = [
                ctrl for ctrl in ctrls if
                (ctrl.administrative == constants.ADMIN_LOCKED and
                 ctrl.availability == constants.AVAILABILITY_ONLINE) or
                (ctrl.administrative == constants.ADMIN_UNLOCKED and
                 ctrl.operational == constants.OPERATIONAL_ENABLED)]

        tasks = {}
        for ctrl in valid_ctrls:
            tasks[ctrl.hostname] = constants.SB_STATE_CONFIGURING

        ceph_mon['task'] = str(tasks)

    LOG.info("Creating ceph-mon DB entry for host uuid %s: %s" %
             (ceph_mon['ihost_uuid'], str(ceph_mon)))
    new_ceph_mon = pecan.request.dbapi.ceph_mon_create(ceph_mon)

    # We update the base config when adding a dynamic monitor.
    # At this moment the only possibility to add a dynamic monitor
    # is on a worker node, so we check for that.
    if chost.personality == constants.WORKER:
        try:
            # Storage nodes are not supported on a controller based
            # storage model.
            personalities = [constants.CONTROLLER, constants.WORKER]
            pecan.request.rpcapi.update_ceph_base_config(
                pecan.request.context,
                personalities)
        except Exception:
            values = {'state': constants.SB_STATE_CONFIG_ERR, 'task': None}
            pecan.request.dbapi.ceph_mon_update(new_ceph_mon['uuid'], values)
            raise

    # The return value needs to be iterable, so make it a list.
    return [new_ceph_mon]
Esempio n. 15
0
 def ip_addresses(self):
     parent = pecan.request.path.split('/')[:-1][-1]
     if parent != "ceph_mon":
         raise exception.HTTPNotFound
     return StorageBackendConfig.get_ceph_mon_ip_addresses(
         pecan.request.dbapi)
Esempio n. 16
0
    def patch(self, cephmon_uuid, patch):
        """Update the current storage configuration."""

        if not StorageBackendConfig.has_backend_configured(
            pecan.request.dbapi,
            constants.CINDER_BACKEND_CEPH
        ):
            raise wsme.exc.ClientSideError(
                _("Ceph backend is not configured.")
            )

        rpc_cephmon = objects.ceph_mon.get_by_uuid(pecan.request.context,
                                                   cephmon_uuid)
        is_ceph_mon_gib_changed = False

        patch = [p for p in patch if '/controller' not in p['path']]

        # Check if either ceph mon size or disk has to change.
        for p in patch:
            if '/ceph_mon_gib' in p['path']:
                if rpc_cephmon.ceph_mon_gib != p['value']:
                    is_ceph_mon_gib_changed = True

        if not is_ceph_mon_gib_changed:
            LOG.info("ceph_mon parameters are not changed")
            raise wsme.exc.ClientSideError(
                _("Warning: ceph_mon parameters are not changed."))

        # replace isystem_uuid and ceph_mon_uuid with corresponding
        patch_obj = jsonpatch.JsonPatch(patch)
        state_rel_path = ['/uuid', '/id', '/forihostid',
                          '/device_node', '/device_path']
        if any(p['path'] in state_rel_path for p in patch_obj):
            raise wsme.exc.ClientSideError(_("The following fields can not be "
                                             "modified: %s" %
                                             state_rel_path))

        try:
            cephmon = CephMon(**jsonpatch.apply_patch(
                rpc_cephmon.as_dict(),
                patch_obj))
        except utils.JSONPATCH_EXCEPTIONS as e:
            raise exception.PatchError(patch=patch, reason=e)

        if is_ceph_mon_gib_changed:
            _check_ceph_mon(cephmon.as_dict(), rpc_cephmon.as_dict())
            controller_fs_utils._check_controller_fs(
                ceph_mon_gib_new=cephmon.ceph_mon_gib)

        for field in objects.ceph_mon.fields:
            if rpc_cephmon[field] != cephmon.as_dict()[field]:
                rpc_cephmon[field] = cephmon.as_dict()[field]

        LOG.info("SYS_I  cephmon: %s " % cephmon.as_dict())

        try:
            rpc_cephmon.save()
        except exception.HTTPNotFound:
            msg = _("Ceph Mon update failed: uuid %s : "
                    " patch %s"
                    % (rpc_cephmon.uuid, patch))
            raise wsme.exc.ClientSideError(msg)

        if is_ceph_mon_gib_changed:
            # Update the task for ceph storage backend.
            StorageBackendConfig.update_backend_states(
                pecan.request.dbapi,
                constants.CINDER_BACKEND_CEPH,
                task=constants.SB_TASK_RESIZE_CEPH_MON_LV
            )

            # Mark controllers and storage node as Config out-of-date.
            pecan.request.rpcapi.update_storage_config(
                pecan.request.context,
                update_storage=is_ceph_mon_gib_changed,
                reinstall_required=False
            )

        return CephMon.convert_with_links(rpc_cephmon)
Esempio n. 17
0
def _check_profile(stor):
    # semantic check: whether system has a ceph backend
    if not StorageBackendConfig.has_backend_configured(pecan.request.dbapi,
                                                       constants.SB_TYPE_CEPH):
        raise wsme.exc.ClientSideError(
            _("System must have a %s backend" % constants.SB_TYPE_CEPH))
Esempio n. 18
0
def _check_controller_multi_fs_data(context, controller_fs_list_new,
                                    modified_fs):
    """ Check controller filesystem data and return growth
        returns: cgtsvg_growth_gib
    """

    cgtsvg_growth_gib = 0

    # Check if we need img_conversions
    img_conversion_required = False
    lvdisplay_keys = [
        constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_DATABASE],
        constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_CGCS],
        constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_BACKUP],
        constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_SCRATCH],
        constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_GNOCCHI]
    ]

    # On primary region, img-conversions always exists in controller_fs DB table.
    # On secondary region, if both glance and cinder are sharing from the primary
    # region, img-conversions won't exist in controller_fs DB table. We already
    # have semantic check not to allow img-conversions resizing.
    if (StorageBackendConfig.has_backend(pecan.request.dbapi,
                                         constants.SB_TYPE_LVM)
            or StorageBackendConfig.has_backend(pecan.request.dbapi,
                                                constants.SB_TYPE_CEPH)):
        img_conversion_required = True
        lvdisplay_keys.append(constants.FILESYSTEM_LV_DICT[
            constants.FILESYSTEM_NAME_IMG_CONVERSIONS])

    if (constants.FILESYSTEM_NAME_IMG_CONVERSIONS in modified_fs
            and not img_conversion_required):
        raise wsme.exc.ClientSideError(
            _("%s is not modifiable: no cinder backend is "
              "currently configured.") %
            constants.FILESYSTEM_NAME_IMG_CONVERSIONS)

    lvdisplay_dict = pecan.request.rpcapi.get_controllerfs_lv_sizes(context)

    for key in lvdisplay_keys:
        if not lvdisplay_dict.get(key, None):
            raise wsme.exc.ClientSideError(
                _("Unable to determine the "
                  "current size of %s. "
                  "Rejecting modification "
                  "request." % key))

    for fs in controller_fs_list_new:
        lv = fs.logical_volume
        if lvdisplay_dict.get(lv, None):
            orig = int(float(lvdisplay_dict[lv]))
            new = int(fs.size)
            if fs.name == constants.FILESYSTEM_NAME_DATABASE:
                orig = orig / 2

            if orig > new:
                raise wsme.exc.ClientSideError(
                    _("'%s'  must be at least: "
                      "%s" % (fs.name, orig)))
            if fs.name == constants.FILESYSTEM_NAME_DATABASE:
                cgtsvg_growth_gib += 2 * (new - orig)
            else:
                cgtsvg_growth_gib += (new - orig)

    LOG.info("_check_controller_multi_fs_data cgtsvg_growth_gib=%s" %
             cgtsvg_growth_gib)

    return cgtsvg_growth_gib
Esempio n. 19
0
    def get_system_config(self):
        ceph_backend = StorageBackendConfig.get_backend_conf(
            self.dbapi, constants.CINDER_BACKEND_CEPH)
        if not ceph_backend:
            return {}  # ceph is not configured

        ceph_mon_ips = StorageBackendConfig.get_ceph_mon_ip_addresses(
            self.dbapi)

        controller_hosts = [constants.CONTROLLER_0_HOSTNAME, constants.CONTROLLER_1_HOSTNAME]
        mon_2_host = [mon['hostname'] for mon in self.dbapi.ceph_mon_get_list() if
                      mon['hostname'] not in controller_hosts]
        if len(mon_2_host) > 1:
            raise exception.SysinvException(
                        'Too many ceph monitor hosts, expected 1, got: %s.' % mon_2_host)
        if mon_2_host:
            mon_2_host = mon_2_host[0]
        else:
            mon_2_host = None

        mon_0_ip = ceph_mon_ips['ceph-mon-0-ip']
        mon_1_ip = ceph_mon_ips['ceph-mon-1-ip']
        mon_2_ip = ceph_mon_ips.get('ceph-mon-2-ip', None)
        floating_mon_ip = ceph_mon_ips['ceph-floating-mon-ip']

        mon_0_addr = self._format_ceph_mon_address(mon_0_ip)
        mon_1_addr = self._format_ceph_mon_address(mon_1_ip)
        if mon_2_ip:
            mon_2_addr = self._format_ceph_mon_address(mon_2_ip)
        else:
            mon_2_addr = None
        floating_mon_addr = self._format_ceph_mon_address(floating_mon_ip)

        # ceph can not bind to multiple address families, so only enable IPv6
        # if the monitors are IPv6 addresses
        ms_bind_ipv6 = (netaddr.IPAddress(mon_0_ip).version ==
                        constants.IPV6_FAMILY)

        ksuser = self._get_service_user_name(self.SERVICE_NAME_RGW)

        return {
            'ceph::ms_bind_ipv6': ms_bind_ipv6,

            'platform::ceph::params::service_enabled': True,

            'platform::ceph::params::floating_mon_host':
                constants.CONTROLLER_HOSTNAME,
            'platform::ceph::params::mon_0_host':
                constants.CONTROLLER_0_HOSTNAME,
            'platform::ceph::params::mon_1_host':
                constants.CONTROLLER_1_HOSTNAME,
            'platform::ceph::params::mon_2_host': mon_2_host,

            'platform::ceph::params::floating_mon_ip': floating_mon_ip,
            'platform::ceph::params::mon_0_ip': mon_0_ip,
            'platform::ceph::params::mon_1_ip': mon_1_ip,
            'platform::ceph::params::mon_2_ip': mon_2_ip,

            'platform::ceph::params::floating_mon_addr': floating_mon_addr,
            'platform::ceph::params::mon_0_addr': mon_0_addr,
            'platform::ceph::params::mon_1_addr': mon_1_addr,
            'platform::ceph::params::mon_2_addr': mon_2_addr,

            'platform::ceph::params::rgw_admin_user':
                ksuser,
            'platform::ceph::params::rgw_admin_domain':
                self._get_service_user_domain_name(),
            'platform::ceph::params::rgw_admin_project':
                self._get_service_tenant_name(),

            'platform::ceph::rgw::keystone::auth::auth_name':
                ksuser,
            'platform::ceph::rgw::keystone::auth::public_url':
                self._get_rgw_public_url(),
            'platform::ceph::rgw::keystone::auth::internal_url':
                self._get_rgw_internal_url(),
            'platform::ceph::rgw::keystone::auth::admin_url':
                self._get_rgw_admin_url(),
            'platform::ceph::rgw::keystone::auth::region':
                self._get_rgw_region_name(),
            'platform::ceph::rgw::keystone::auth::tenant':
                self._get_service_tenant_name(),
        }
Esempio n. 20
0
def _check(op, lvg):
    # Semantic checks
    LOG.debug("Semantic check for %s operation" % op)

    # Check host and host state
    _check_host(lvg)

    # Check for required volume group name
    if lvg['lvm_vg_name'] not in constants.LVG_ALLOWED_VGS:
        grp = "'%s', '%s', or '%s'" % (constants.LVG_NOVA_LOCAL,
                                       constants.LVG_CINDER_VOLUMES,
                                       constants.LVG_CGTS_VG)
        raise wsme.exc.ClientSideError(
            _("Volume Group name (%s) must be \"%s\"") %
            (lvg['lvm_vg_name'], grp))
    lvg_caps = lvg['capabilities']
    if op == "add":
        if lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            # Cinder VG type must be the same on both controllers
            mate_lvg = _get_mate_ctrl_lvg(lvg)
            lvm_type = lvg_caps.get(constants.LVG_CINDER_PARAM_LVM_TYPE)
            if mate_lvg and lvm_type:
                # lvm_type may be None & we avoid setting defaults in a _check function
                mate_type = mate_lvg['capabilities'][
                    constants.LVG_CINDER_PARAM_LVM_TYPE]
                if lvm_type != mate_type:
                    raise wsme.exc.ClientSideError(
                        _(
                            "LVG %(lvm_type)s for %(vg_name)s must be %(type)s, the same on"
                            " both controllers." % {
                                'lvm_type':
                                constants.LVG_CINDER_PARAM_LVM_TYPE,
                                'vg_name': lvg['lvm_vg_name'],
                                'type': mate_type
                            }))
        if lvg['lvm_vg_name'] == constants.LVG_CGTS_VG:
            raise wsme.exc.ClientSideError(
                _("%s volume group already exists") % constants.LVG_CGTS_VG)
        elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            pass
        elif lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL:
            pass

    elif op == "modify":
        # Sanity check: parameters

        if lvg['lvm_vg_name'] in [
                constants.LVG_CGTS_VG, constants.LVG_NOVA_LOCAL
        ]:
            raise wsme.exc.ClientSideError(
                _("%s volume group does not have "
                  "any parameters to modify") % lvg['lvm_vg_name'])
        elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            if constants.LVG_CINDER_PARAM_LVM_TYPE not in lvg_caps:
                raise wsme.exc.ClientSideError(
                    _('Internal Error: %s parameter missing for volume '
                      'group.') % constants.LVG_CINDER_PARAM_LVM_TYPE)
            else:
                # Make sure that cinder volumes provisioning type is a valid value
                if constants.LVG_CINDER_PARAM_LVM_TYPE in lvg_caps and \
                   lvg_caps[constants.LVG_CINDER_PARAM_LVM_TYPE] not in \
                   [constants.LVG_CINDER_LVM_TYPE_THIN,
                        constants.LVG_CINDER_LVM_TYPE_THICK]:
                    msg = _('Invalid parameter: %s must be %s or %s' %
                            (constants.LVG_CINDER_PARAM_LVM_TYPE,
                             constants.LVG_CINDER_LVM_TYPE_THIN,
                             constants.LVG_CINDER_LVM_TYPE_THICK))
                    raise wsme.exc.ClientSideError(msg)

    elif op == "delete":
        if lvg['lvm_vg_name'] == constants.LVG_CGTS_VG:
            raise wsme.exc.ClientSideError(
                _("%s volume group cannot be deleted") % constants.LVG_CGTS_VG)
        elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            if ((lvg['vg_state'] in [constants.PROVISIONED, constants.LVG_ADD])
                    and StorageBackendConfig.has_backend(
                        pecan.request.dbapi, constants.CINDER_BACKEND_LVM)):
                raise wsme.exc.ClientSideError(
                    _("cinder-volumes LVG cannot be removed once it is "
                      "provisioned and LVM backend is added."))
        elif lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL:
            # We never have more than 1 LV in nova-local VG
            pass
    else:
        raise wsme.exc.ClientSideError(
            _("Internal Error: Invalid Volume Group operation: %s" % op))

    return lvg
Esempio n. 21
0
    def get_system_config(self):
        ceph_backend = StorageBackendConfig.get_backend_conf(
            self.dbapi, constants.CINDER_BACKEND_CEPH)
        if not ceph_backend:
            return {}  # ceph is not configured

        ceph_mon_ips = StorageBackendConfig.get_ceph_mon_ip_addresses(
            self.dbapi)

        if not ceph_mon_ips:
            return {}  # system configuration is not yet ready

        controller_hosts = [
            constants.CONTROLLER_0_HOSTNAME, constants.CONTROLLER_1_HOSTNAME
        ]
        mon_2_host = [
            mon['hostname'] for mon in self.dbapi.ceph_mon_get_list()
            if mon['hostname'] not in controller_hosts
        ]
        if len(mon_2_host) > 1:
            raise exception.SysinvException(
                'Too many ceph monitor hosts, expected 1, got: %s.' %
                mon_2_host)
        if mon_2_host:
            mon_2_host = mon_2_host[0]
        else:
            mon_2_host = None

        mon_0_ip = ceph_mon_ips[constants.CEPH_MON_0]
        mon_1_ip = ceph_mon_ips[constants.CEPH_MON_1]
        mon_2_ip = ceph_mon_ips.get(constants.CEPH_MON_2, None)
        floating_mon_ip = ceph_mon_ips[constants.CEPH_FLOATING_MON]

        mon_0_addr = self._format_ceph_mon_address(mon_0_ip)
        mon_1_addr = self._format_ceph_mon_address(mon_1_ip)
        if mon_2_ip:
            mon_2_addr = self._format_ceph_mon_address(mon_2_ip)
        else:
            mon_2_addr = None
        floating_mon_addr = self._format_ceph_mon_address(floating_mon_ip)

        # ceph can not bind to multiple address families, so only enable IPv6
        # if the monitors are IPv6 addresses
        ms_bind_ipv6 = (
            netaddr.IPAddress(mon_0_ip).version == constants.IPV6_FAMILY)

        skip_osds_during_restore = \
            (utils.is_std_system(self.dbapi) and
            ceph_backend.task == constants.SB_TASK_RESTORE)

        is_sx_to_dx_migration = self._get_system_capability(
            'simplex_to_duplex_migration')

        config = {
            'ceph::ms_bind_ipv6':
            ms_bind_ipv6,
            'platform::ceph::params::service_enabled':
            True,
            'platform::ceph::params::floating_mon_host':
            constants.CONTROLLER_HOSTNAME,
            'platform::ceph::params::mon_0_host':
            constants.CONTROLLER_0_HOSTNAME,
            'platform::ceph::params::mon_1_host':
            constants.CONTROLLER_1_HOSTNAME,
            'platform::ceph::params::mon_2_host':
            mon_2_host,
            'platform::ceph::params::floating_mon_ip':
            floating_mon_ip,
            'platform::ceph::params::mon_0_ip':
            mon_0_ip,
            'platform::ceph::params::mon_1_ip':
            mon_1_ip,
            'platform::ceph::params::mon_2_ip':
            mon_2_ip,
            'platform::ceph::params::floating_mon_addr':
            floating_mon_addr,
            'platform::ceph::params::mon_0_addr':
            mon_0_addr,
            'platform::ceph::params::mon_1_addr':
            mon_1_addr,
            'platform::ceph::params::mon_2_addr':
            mon_2_addr,
            'platform::ceph::params::rgw_enabled':
            self._is_radosgw_enabled(),
            'platform::ceph::rgw::keystone::swift_endpts_enabled':
            False,
            'platform::ceph::rgw::keystone::rgw_admin_user':
            self._get_service_user_name(self.SERVICE_NAME_RGW),
            'platform::ceph::rgw::keystone::rgw_admin_password':
            self._get_service_password(self.SERVICE_NAME_RGW),
            'platform::ceph::rgw::keystone::rgw_admin_domain':
            self._get_service_user_domain_name(),
            'platform::ceph::rgw::keystone::rgw_admin_project':
            self._get_service_tenant_name(),
            'platform::ceph::params::skip_osds_during_restore':
            skip_osds_during_restore,
            'platform::ceph::params::simplex_to_duplex_migration':
            bool(is_sx_to_dx_migration),
        }

        if is_sx_to_dx_migration:
            cephfs_filesystems = self._get_cephfs_filesystems()
            if cephfs_filesystems:
                config[
                    'platform::ceph::params::cephfs_filesystems'] = cephfs_filesystems

        if (utils.is_openstack_applied(self.dbapi) and utils.is_chart_enabled(
                self.dbapi, constants.HELM_APP_OPENSTACK,
                self.HELM_CHART_SWIFT, common.HELM_NS_OPENSTACK)):
            app = self.dbapi.kube_app_get(constants.HELM_APP_OPENSTACK)
            override = self.dbapi.helm_override_get(app.id,
                                                    self.SERVICE_NAME_RGW,
                                                    common.HELM_NS_OPENSTACK)
            password = override.system_overrides.get(self.SERVICE_NAME_RGW,
                                                     None)
            if password:
                swift_auth_password = password.encode('utf8', 'strict')
                config.update({
                    'platform::ceph::rgw::keystone::swift_endpts_enabled':
                    True
                })
                config.pop('platform::ceph::rgw::keystone::rgw_admin_user')
                config.update({
                    'platform::ceph::rgw::keystone::rgw_admin_password':
                    swift_auth_password
                })
                config.update({
                    'platform::ceph::rgw::keystone::rgw_admin_domain':
                    self.RADOSGW_SERVICE_DOMAIN_NAME
                })
                config.update({
                    'platform::ceph::rgw::keystone::rgw_admin_project':
                    self.RADOSGW_SERVICE_PROJECT_NAME
                })
            else:
                raise exception.SysinvException(
                    "Unable to retreive containerized swift auth password")

        return config
    def post(self, body):
        """Create new Service Parameter."""

        resource = body.get('resource')
        personality = body.get('personality')

        if personality is not None or resource is not None:
            return self.post_custom_resource(body, personality, resource)

        service = self._get_service(body)

        section = body.get('section')
        if not section:
            raise wsme.exc.ClientSideError(_("Unspecified section name."))
        elif section not in service_parameter.SERVICE_PARAMETER_SCHEMA[
                service]:
            msg = _("Invalid service section %s." % section)
            raise wsme.exc.ClientSideError(msg)

        new_records = []
        parameters = body.get('parameters')
        if not parameters:
            raise wsme.exc.ClientSideError(_("Unspecified parameters."))

        if service == constants.SERVICE_TYPE_CEPH:
            if not StorageBackendConfig.has_backend_configured(
                    pecan.request.dbapi, constants.CINDER_BACKEND_CEPH):
                msg = _("Ceph backend is required.")
                raise wsme.exc.ClientSideError(msg)

        for name, value in parameters.items():
            new_record = {
                'service': service,
                'section': section,
                'name': name,
                'value': value,
            }
            self._check_parameter_syntax(new_record)

            existing = False
            try:
                pecan.request.dbapi.service_parameter_get_one(
                    service, section, name)
                existing = True
            except exception.NotFound:
                pass
            except exception.MultipleResults:
                # We'll check/handle this in the "finally" block
                existing = True
            finally:
                if existing:
                    msg = _("Service parameter add failed: "
                            "Parameter already exists: "
                            "service=%s section=%s name=%s" %
                            (service, section, name))
                    raise wsme.exc.ClientSideError(msg)

            new_records.append(new_record)

        svc_params = []
        for n in new_records:
            try:
                new_parm = pecan.request.dbapi.service_parameter_create(n)
            except exception.NotFound:
                msg = _("Service parameter add failed:  "
                        "service %s section %s name %s value %s" %
                        (service, section, n.name, n.value))
                raise wsme.exc.ClientSideError(msg)
            svc_params.append(new_parm)

        try:
            pecan.request.rpcapi.update_service_config(pecan.request.context,
                                                       service)
        except rpc_common.RemoteError as e:
            # rollback create service parameters
            for p in svc_params:
                try:
                    pecan.request.dbapi.service_parameter_destroy_uuid(p.uuid)
                    LOG.warn(
                        _("Rollback service parameter create: "
                          "destroy uuid {}".format(p.uuid)))
                except exception.SysinvException:
                    pass
            raise wsme.exc.ClientSideError(str(e.value))
        except Exception as e:
            with excutils.save_and_reraise_exception():
                LOG.exception(e)

        return ServiceParameterCollection.convert_with_links(svc_params,
                                                             limit=None,
                                                             url=None,
                                                             expand=False,
                                                             sort_key='id',
                                                             sort_dir='asc')
Esempio n. 23
0
def _check(op, lvg):
    # Semantic checks
    LOG.debug("Semantic check for %s operation" % op)

    # Check host and host state
    _check_host(lvg)

    # Check for required volume group name
    if lvg['lvm_vg_name'] not in constants.LVG_ALLOWED_VGS:
        grp = "'%s', '%s', or '%s'" % (constants.LVG_NOVA_LOCAL,
                                       constants.LVG_CINDER_VOLUMES,
                                       constants.LVG_CGTS_VG)
        raise wsme.exc.ClientSideError(
            _("Volume Group name (%s) must be \"%s\"") % (lvg['lvm_vg_name'],
                                                          grp))
    lvg_caps = lvg['capabilities']
    if op == "add":
        if lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            # Cinder VG type must be the same on both controllers
            mate_lvg = _get_mate_ctrl_lvg(lvg)
            lvm_type = lvg_caps.get(constants.LVG_CINDER_PARAM_LVM_TYPE)
            if mate_lvg and lvm_type:
                # lvm_type may be None & we avoid setting defaults in a _check function
                mate_type = mate_lvg['capabilities'][constants.LVG_CINDER_PARAM_LVM_TYPE]
                if lvm_type != mate_type:
                    raise wsme.exc.ClientSideError(
                        _("LVG %(lvm_type)s for %(vg_name)s must be %(type)s, the same on"
                          " both controllers." % {'lvm_type': constants.LVG_CINDER_PARAM_LVM_TYPE,
                                                  'vg_name': lvg['lvm_vg_name'],
                                                  'type': mate_type}))
        if lvg['lvm_vg_name'] == constants.LVG_CGTS_VG:
            raise wsme.exc.ClientSideError(_("%s volume group already exists") %
                                           constants.LVG_CGTS_VG)
        elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            pass
        elif lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL:
            pass

    elif op == "modify":
        # Sanity check: parameters

        if lvg['lvm_vg_name'] == constants.LVG_CGTS_VG:
            raise wsme.exc.ClientSideError(_("%s volume group does not have "
                                             "any parameters to modify") %
                                           constants.LVG_CGTS_VG)
        elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            if constants.LVG_CINDER_PARAM_LVM_TYPE not in lvg_caps:
                raise wsme.exc.ClientSideError(
                    _('Internal Error: %s parameter missing for volume '
                      'group.') % constants.LVG_CINDER_PARAM_LVM_TYPE)
            else:
                # Make sure that cinder volumes provisioning type is a valid value
                if constants.LVG_CINDER_PARAM_LVM_TYPE in lvg_caps and \
                   lvg_caps[constants.LVG_CINDER_PARAM_LVM_TYPE] not in \
                   [constants.LVG_CINDER_LVM_TYPE_THIN,
                        constants.LVG_CINDER_LVM_TYPE_THICK]:
                    msg = _('Invalid parameter: %s must be %s or %s' %
                            (constants.LVG_CINDER_PARAM_LVM_TYPE,
                             constants.LVG_CINDER_LVM_TYPE_THIN,
                             constants.LVG_CINDER_LVM_TYPE_THICK))
                    raise wsme.exc.ClientSideError(msg)

        elif lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL:
            # instance_backing: This is a required parameter
            if constants.LVG_NOVA_PARAM_BACKING not in lvg_caps:
                raise wsme.exc.ClientSideError(
                    _('Internal Error: %s parameter missing for volume '
                      'group.') % constants.LVG_NOVA_PARAM_BACKING)
            else:
                # Instances backed by remote ephemeral storage can only be
                # used on systems that have a Ceph (internal or external)
                # backend.
                if ((lvg_caps.get(constants.LVG_NOVA_PARAM_BACKING) ==
                     constants.LVG_NOVA_BACKING_REMOTE) and
                        not StorageBackendConfig.has_backend_configured(
                            pecan.request.dbapi,
                            constants.SB_TYPE_CEPH,
                            service=constants.SB_SVC_NOVA,
                            check_only_defaults=False,
                            rpcapi=pecan.request.rpcapi) and
                        not StorageBackendConfig.has_backend_configured(
                            pecan.request.dbapi,
                            constants.SB_TYPE_CEPH_EXTERNAL,
                            service=constants.SB_SVC_NOVA,
                            check_only_defaults=False,
                            rpcapi=pecan.request.rpcapi)):
                    raise wsme.exc.ClientSideError(
                        _('Invalid value for instance_backing. Instances '
                          'backed by remote ephemeral storage can only be '
                          'used on systems that have a Ceph (internal or '
                          'external) backend.'))

            if (lvg['lvm_cur_lv'] > 1):
                raise wsme.exc.ClientSideError(
                    _("Can't modify the volume group: %s. There are currently "
                      "%d instance volumes present in the volume group. "
                      "Terminate or migrate all instances from the worker to "
                      "allow volume group madifications." %
                        (lvg['lvm_vg_name'], lvg['lvm_cur_lv'] - 1)))

    elif op == "delete":
        if lvg['lvm_vg_name'] == constants.LVG_CGTS_VG:
            raise wsme.exc.ClientSideError(_("%s volume group cannot be deleted") %
                                           constants.LVG_CGTS_VG)
        elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            if ((lvg['vg_state'] in
                [constants.PROVISIONED, constants.LVG_ADD]) and
                StorageBackendConfig.has_backend(
                    pecan.request.dbapi, constants.CINDER_BACKEND_LVM)):
                raise wsme.exc.ClientSideError(
                    _("cinder-volumes LVG cannot be removed once it is "
                      "provisioned and LVM backend is added."))
        elif lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL:
            if (lvg['lvm_cur_lv'] and lvg['lvm_cur_lv'] > 1):
                raise wsme.exc.ClientSideError(
                    _("Can't delete volume group: %s. There are currently %d "
                      "instance volumes present in the volume group. Terminate"
                      " or migrate all instances from the worker to allow "
                      "volume group deletion." % (lvg['lvm_vg_name'],
                                                  lvg['lvm_cur_lv'] - 1)))
    else:
        raise wsme.exc.ClientSideError(
            _("Internal Error: Invalid Volume Group operation: %s" % op))

    return lvg
Esempio n. 24
0
def _check_host(stor):
    ihost_id = stor['forihostid']
    ihost = pecan.request.dbapi.ihost_get(ihost_id)
    stor_model = ceph.get_ceph_storage_model()

    # semantic check: whether OSD can be added to this host.
    if stor_model == constants.CEPH_STORAGE_MODEL:
        if ihost.personality != constants.STORAGE:
            msg = ("Storage model is '%s'. Storage devices can only be added "
                   "to storage nodes." % stor_model)
            raise wsme.exc.ClientSideError(_(msg))
    elif stor_model == constants.CEPH_CONTROLLER_MODEL:
        if ihost.personality != constants.CONTROLLER:
            msg = ("Storage model is '%s'. Storage devices can only be added "
                   "to controller nodes." % stor_model)
            raise wsme.exc.ClientSideError(_(msg))
    elif stor_model == constants.CEPH_UNDEFINED_MODEL:
        msg = ("Please install storage-0 or configure a Ceph monitor "
               "on a worker node before adding storage devices.")
        raise wsme.exc.ClientSideError(_(msg))

    # semantic check: whether host is operationally acceptable
    if (stor_model == constants.CEPH_CONTROLLER_MODEL or
            stor_model == constants.CEPH_AIO_SX_MODEL):
        if (ihost['administrative'] == constants.ADMIN_UNLOCKED and
                ihost['operational'] != constants.OPERATIONAL_ENABLED):
            msg = _("Host %s must be unlocked and operational state "
                    "enabled." % ihost['hostname'])
            raise wsme.exc.ClientSideError(msg)
    else:
        if ihost['administrative'] != constants.ADMIN_LOCKED:
            raise wsme.exc.ClientSideError(_("Host %s must be locked." %
                                             ihost['hostname']))

    # semantic check: only storage nodes are allowed without k8s
    if (not utils.is_kubernetes_config(pecan.request.dbapi) and
            ihost['personality'] != constants.STORAGE):
        msg = ("Host personality must be 'storage' or kubernetes enabled.")
        raise wsme.exc.ClientSideError(_(msg))

    # semantic check: whether system has a ceph backend
    if not StorageBackendConfig.has_backend_configured(
            pecan.request.dbapi,
            constants.SB_TYPE_CEPH
    ):
        raise wsme.exc.ClientSideError(_(
            "System must have a %s backend" % constants.SB_TYPE_CEPH))

    # semantic check: whether at least 2 unlocked hosts are monitors
    if not utils.is_aio_system(pecan.request.dbapi):
        ceph_helper = ceph.CephApiOperator()
        num_monitors, required_monitors, quorum_names = \
            ceph_helper.get_monitors_status(pecan.request.dbapi)
        # CGTS 503 for now update monitors requirement until controller-0 is
        # inventoried
        # CGTS 1448
        if num_monitors < required_monitors:
            raise wsme.exc.ClientSideError(_(
                "Only %d storage monitor available. "
                "At least %s unlocked and enabled hosts with monitors are "
                "required. Please ensure hosts with monitors are unlocked "
                "and enabled.") % (num_monitors, required_monitors))