コード例 #1
0
def _check_lvg(op, pv):
    # semantic check whether idisk is associated
    ilvgid = pv.get('forilvgid') or pv.get('ilvg_uuid')
    if ilvgid is None:
        LOG.warn("check_lvg: lvg is None from pv.  return.")
        return

    # Get the associated volume group record
    ilvg = pecan.request.dbapi.ilvg_get(ilvgid)

    # In a combo node we also have cinder and drbd physical volumes.
    if ilvg.lvm_vg_name not in constants.LVG_ALLOWED_VGS:
        raise wsme.exc.ClientSideError(
            _("This operation can not be performed"
              " on Local Volume Group %s" % ilvg.lvm_vg_name))

    # Make sure that the volume group is in the adding/provisioned state
    if ilvg.vg_state == constants.LVG_DEL:
        raise wsme.exc.ClientSideError(
            _("Local volume Group. %s set to be deleted. Add it again to allow"
              " adding physical volumes. " % ilvg.lvm_vg_name))

    # Semantic Checks: Based on PV operations
    if op == "add":
        if ilvg.lvm_vg_name == constants.LVG_CGTS_VG:
            controller_fs_list = pecan.request.dbapi.controller_fs_get_list()
            for controller_fs in controller_fs_list:
                if controller_fs.state == constants.CONTROLLER_FS_RESIZING_IN_PROGRESS:
                    msg = _(
                        "Filesystem (%s) resize is in progress. Wait fot the resize "
                        "to finish before adding a physical volume to the cgts-vg "
                        "volume group." % controller_fs.name)
                    raise wsme.exc.ClientSideError(msg)

    elif op == "delete":
        if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG):
            raise wsme.exc.ClientSideError(
                _("Physical volumes cannot be removed from the cgts-vg volume "
                  "group."))
        if ilvg.lvm_vg_name == constants.LVG_CINDER_VOLUMES:
            if ((pv['pv_state'] in [constants.PROVISIONED, constants.PV_ADD])
                    and StorageBackendConfig.has_backend(
                        pecan.request.dbapi, constants.CINDER_BACKEND_LVM)):
                raise wsme.exc.ClientSideError(
                    _("Physical volume %s cannot be removed from cinder-volumes LVG once "
                      "it is provisioned and LVM backend is added." %
                      pv['lvm_pv_name']))

    elif op == "modify":
        pass
    else:
        raise wsme.exc.ClientSideError(
            _("Internal Error: Invalid Physical Volume operation: %s" % op))

    # LVG check passes
    pv['lvm_vg_name'] = ilvg.lvm_vg_name

    return
コード例 #2
0
ファイル: lifecycle_utils.py プロジェクト: starlingx/config
def _is_rbd_provisioner_required(app_op):
    """ Check if RBD provisioner is required

    :param app_op: AppOperator object

    """
    if StorageBackendConfig.has_backend(app_op._dbapi, constants.SB_TYPE_CEPH):
        return True
    else:
        return False
コード例 #3
0
    def app_rbd_actions(self, app_obj, dbapi, app_name, operation):
        """ Perform rbd actions for an application based on operation

        :param app_obj: AppOperator object
        :param dbapi: dbapi
        :param app_name: application name
        :param operation: operation being performed
        """

        if app_name in [
                constants.HELM_APP_CERT_MANAGER, constants.HELM_APP_OIDC_AUTH,
                constants.HELM_APP_NGINX_IC
        ]:
            return

        LOG.info("app_rbd_actions app: %s operation: %s" %
                 (app_name, operation))

        # TODO(ksmith): Further decouple this by moving this logic to the
        # application derived class in openstack and just pass here.
        # Since RBD provisioner requires Ceph, return false when not enabled
        if not StorageBackendConfig.has_backend(dbapi, constants.SB_TYPE_CEPH):
            rbd_provisioner_required = False
        else:
            rbd_provisioner_required = True

        if operation == constants.APP_APPLY_OP:
            if rbd_provisioner_required:
                app_obj._create_rbd_provisioner_secrets(app_name)
        elif operation == constants.APP_REMOVE_OP:
            if rbd_provisioner_required:
                app_obj._delete_rbd_provisioner_secrets(app_name)
            if app_name == constants.HELM_APP_OPENSTACK:
                app_obj._delete_ceph_persistent_volume_claim(
                    common.HELM_NS_OPENSTACK)
                app_obj._delete_namespace(common.HELM_NS_OPENSTACK)
コード例 #4
0
def _cinder_volumes_patch_semantic_checks(caps_dict):
    # make sure that only valid capabilities are provided
    valid_caps = set([constants.LVG_CINDER_PARAM_LVM_TYPE])
    invalid_caps = set(caps_dict.keys()) - valid_caps

    # Do we have something unexpected?
    if len(invalid_caps) > 0:
        raise wsme.exc.ClientSideError(
            _("Invalid parameter(s) for volume group %s: %s " %
              (constants.LVG_CINDER_VOLUMES, ", ".join(
                  str(i) for i in invalid_caps))))

    # make sure that we are modifying something
    elif len(caps_dict) == 0:
        msg = _('No parameter specified. No action taken')
        raise wsme.exc.ClientSideError(msg)

    # Reject modifications of cinder volume provisioning type if
    # lvm storage backend is enabled
    if (constants.LVG_CINDER_PARAM_LVM_TYPE in caps_dict
            and StorageBackendConfig.has_backend(
                pecan.request.dbapi, constants.CINDER_BACKEND_LVM)):
        msg = _('Cinder volumes LVM type modification denied. '
                'LVM Storage Backend is added.')
        raise wsme.exc.ClientSideError(msg)

    # Make sure that cinder volumes provisioning type is a valid value
    if constants.LVG_CINDER_PARAM_LVM_TYPE in caps_dict and \
       caps_dict[constants.LVG_CINDER_PARAM_LVM_TYPE] not in \
       [constants.LVG_CINDER_LVM_TYPE_THIN,
            constants.LVG_CINDER_LVM_TYPE_THICK]:
        msg = _('Invalid parameter: %s must be %s or %s' %
                (constants.LVG_CINDER_PARAM_LVM_TYPE,
                 constants.LVG_CINDER_LVM_TYPE_THIN,
                 constants.LVG_CINDER_LVM_TYPE_THICK))
        raise wsme.exc.ClientSideError(msg)
コード例 #5
0
    def get_system_health(self, context, force=False):
        """Returns the general health of the system

        Checks the following:
        - All hosts are provisioned
        - All hosts are patch current
        - All hosts are unlocked/enabled
        - All hosts having matching configs
        - No management affecting alarms
        - For ceph systems: The storage cluster is healthy
        - All kubernetes nodes are ready
        - All kubernetes control plane pods are ready
        """

        hosts = self._dbapi.ihost_get_list()
        output = _('System Health:\n')
        health_ok = True

        unprovisioned_hosts, provisioned_hosts = \
            self._check_hosts_provisioned(hosts)
        success = unprovisioned_hosts == 0
        output += (_('All hosts are provisioned: [%s]\n') %
                   (Health.SUCCESS_MSG if success else Health.FAIL_MSG))
        if not success:
            output += _('%s Unprovisioned hosts\n') % unprovisioned_hosts
            # Set the hosts to the provisioned_hosts. This will allow the other
            # checks to continue
            hosts = provisioned_hosts

        health_ok = health_ok and success

        success, error_hosts = self._check_hosts_enabled(hosts)
        output += _('All hosts are unlocked/enabled: [%s]\n') \
            % (Health.SUCCESS_MSG if success else Health.FAIL_MSG)
        if not success:
            output += _('Locked or disabled hosts: %s\n') \
                % ', '.join(error_hosts)

        health_ok = health_ok and success

        success, error_hosts = self._check_hosts_config(hosts)
        output += _('All hosts have current configurations: [%s]\n') \
            % (Health.SUCCESS_MSG if success else Health.FAIL_MSG)
        if not success:
            output += _('Hosts with out of date configurations: %s\n') \
                % ', '.join(error_hosts)

        health_ok = health_ok and success

        success, error_hosts, missing_hosts = self._check_patch_current(hosts)
        output += _('All hosts are patch current: [%s]\n') \
            % (Health.SUCCESS_MSG if success else Health.FAIL_MSG)
        if not success:
            if error_hosts:
                output += _('Hosts not patch current: %s\n') \
                    % ', '.join(error_hosts)
            if missing_hosts:
                output += _('Hosts without patch data: %s\n') \
                    % ', '.join(missing_hosts)

        health_ok = health_ok and success

        if StorageBackendConfig.has_backend(self._dbapi,
                                            constants.CINDER_BACKEND_CEPH):
            success = self._check_ceph()
            output += _('Ceph Storage Healthy: [%s]\n') \
                % (Health.SUCCESS_MSG if success else Health.FAIL_MSG)

        health_ok = health_ok and success

        success, allowed, affecting = self._check_alarms(context, force)
        output += _('No alarms: [%s]\n') \
            % (Health.SUCCESS_MSG if success else Health.FAIL_MSG)
        if not success:
            output += _('[%s] alarms found, [%s] of which are management '
                        'affecting\n') % (allowed + affecting, affecting)

        health_ok = health_ok and success

        success, error_nodes = self._check_kube_nodes_ready()
        output += _('All kubernetes nodes are ready: [%s]\n') \
            % (Health.SUCCESS_MSG if success else Health.FAIL_MSG)
        if not success:
            output += _('Kubernetes nodes not ready: %s\n') \
                % ', '.join(error_nodes)

        health_ok = health_ok and success

        success, error_nodes = self._check_kube_control_plane_pods()
        output += _('All kubernetes control plane pods are ready: [%s]\n') \
            % (Health.SUCCESS_MSG if success else Health.FAIL_MSG)
        if not success:
            output += _('Kubernetes control plane pods not ready: %s\n') \
                % ', '.join(error_nodes)

        health_ok = health_ok and success

        return health_ok, output
コード例 #6
0
def _check(op, lvg):
    # Semantic checks
    LOG.debug("Semantic check for %s operation" % op)

    # Check host and host state
    _check_host(lvg)

    # Check for required volume group name
    if lvg['lvm_vg_name'] not in constants.LVG_ALLOWED_VGS:
        grp = "'%s', '%s', or '%s'" % (constants.LVG_NOVA_LOCAL,
                                       constants.LVG_CINDER_VOLUMES,
                                       constants.LVG_CGTS_VG)
        raise wsme.exc.ClientSideError(
            _("Volume Group name (%s) must be \"%s\"") %
            (lvg['lvm_vg_name'], grp))
    lvg_caps = lvg['capabilities']
    if op == "add":
        if lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            # Cinder VG type must be the same on both controllers
            mate_lvg = _get_mate_ctrl_lvg(lvg)
            lvm_type = lvg_caps.get(constants.LVG_CINDER_PARAM_LVM_TYPE)
            if mate_lvg and lvm_type:
                # lvm_type may be None & we avoid setting defaults in a _check function
                mate_type = mate_lvg['capabilities'][
                    constants.LVG_CINDER_PARAM_LVM_TYPE]
                if lvm_type != mate_type:
                    raise wsme.exc.ClientSideError(
                        _(
                            "LVG %(lvm_type)s for %(vg_name)s must be %(type)s, the same on"
                            " both controllers." % {
                                'lvm_type':
                                constants.LVG_CINDER_PARAM_LVM_TYPE,
                                'vg_name': lvg['lvm_vg_name'],
                                'type': mate_type
                            }))
        if lvg['lvm_vg_name'] == constants.LVG_CGTS_VG:
            raise wsme.exc.ClientSideError(
                _("%s volume group already exists") % constants.LVG_CGTS_VG)
        elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            pass
        elif lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL:
            pass

    elif op == "modify":
        # Sanity check: parameters

        if lvg['lvm_vg_name'] in [
                constants.LVG_CGTS_VG, constants.LVG_NOVA_LOCAL
        ]:
            raise wsme.exc.ClientSideError(
                _("%s volume group does not have "
                  "any parameters to modify") % lvg['lvm_vg_name'])
        elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            if constants.LVG_CINDER_PARAM_LVM_TYPE not in lvg_caps:
                raise wsme.exc.ClientSideError(
                    _('Internal Error: %s parameter missing for volume '
                      'group.') % constants.LVG_CINDER_PARAM_LVM_TYPE)
            else:
                # Make sure that cinder volumes provisioning type is a valid value
                if constants.LVG_CINDER_PARAM_LVM_TYPE in lvg_caps and \
                   lvg_caps[constants.LVG_CINDER_PARAM_LVM_TYPE] not in \
                   [constants.LVG_CINDER_LVM_TYPE_THIN,
                        constants.LVG_CINDER_LVM_TYPE_THICK]:
                    msg = _('Invalid parameter: %s must be %s or %s' %
                            (constants.LVG_CINDER_PARAM_LVM_TYPE,
                             constants.LVG_CINDER_LVM_TYPE_THIN,
                             constants.LVG_CINDER_LVM_TYPE_THICK))
                    raise wsme.exc.ClientSideError(msg)

    elif op == "delete":
        if lvg['lvm_vg_name'] == constants.LVG_CGTS_VG:
            raise wsme.exc.ClientSideError(
                _("%s volume group cannot be deleted") % constants.LVG_CGTS_VG)
        elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            if ((lvg['vg_state'] in [constants.PROVISIONED, constants.LVG_ADD])
                    and StorageBackendConfig.has_backend(
                        pecan.request.dbapi, constants.CINDER_BACKEND_LVM)):
                raise wsme.exc.ClientSideError(
                    _("cinder-volumes LVG cannot be removed once it is "
                      "provisioned and LVM backend is added."))
        elif lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL:
            # We never have more than 1 LV in nova-local VG
            pass
    else:
        raise wsme.exc.ClientSideError(
            _("Internal Error: Invalid Volume Group operation: %s" % op))

    return lvg
コード例 #7
0
def _check_controller_multi_fs_data(context, controller_fs_list_new,
                                    modified_fs):
    """ Check controller filesystem data and return growth
        returns: cgtsvg_growth_gib
    """

    cgtsvg_growth_gib = 0

    # Check if we need img_conversions
    img_conversion_required = False
    lvdisplay_keys = [
        constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_DATABASE],
        constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_CGCS],
        constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_BACKUP],
        constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_SCRATCH],
        constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_GNOCCHI]
    ]

    # On primary region, img-conversions always exists in controller_fs DB table.
    # On secondary region, if both glance and cinder are sharing from the primary
    # region, img-conversions won't exist in controller_fs DB table. We already
    # have semantic check not to allow img-conversions resizing.
    if (StorageBackendConfig.has_backend(pecan.request.dbapi,
                                         constants.SB_TYPE_LVM)
            or StorageBackendConfig.has_backend(pecan.request.dbapi,
                                                constants.SB_TYPE_CEPH)):
        img_conversion_required = True
        lvdisplay_keys.append(constants.FILESYSTEM_LV_DICT[
            constants.FILESYSTEM_NAME_IMG_CONVERSIONS])

    if (constants.FILESYSTEM_NAME_IMG_CONVERSIONS in modified_fs
            and not img_conversion_required):
        raise wsme.exc.ClientSideError(
            _("%s is not modifiable: no cinder backend is "
              "currently configured.") %
            constants.FILESYSTEM_NAME_IMG_CONVERSIONS)

    lvdisplay_dict = pecan.request.rpcapi.get_controllerfs_lv_sizes(context)

    for key in lvdisplay_keys:
        if not lvdisplay_dict.get(key, None):
            raise wsme.exc.ClientSideError(
                _("Unable to determine the "
                  "current size of %s. "
                  "Rejecting modification "
                  "request." % key))

    for fs in controller_fs_list_new:
        lv = fs.logical_volume
        if lvdisplay_dict.get(lv, None):
            orig = int(float(lvdisplay_dict[lv]))
            new = int(fs.size)
            if fs.name == constants.FILESYSTEM_NAME_DATABASE:
                orig = orig / 2

            if orig > new:
                raise wsme.exc.ClientSideError(
                    _("'%s'  must be at least: "
                      "%s" % (fs.name, orig)))
            if fs.name == constants.FILESYSTEM_NAME_DATABASE:
                cgtsvg_growth_gib += 2 * (new - orig)
            else:
                cgtsvg_growth_gib += (new - orig)

    LOG.info("_check_controller_multi_fs_data cgtsvg_growth_gib=%s" %
             cgtsvg_growth_gib)

    return cgtsvg_growth_gib
コード例 #8
0
def _check(op, lvg):
    # Semantic checks
    LOG.debug("Semantic check for %s operation" % op)

    # Check host and host state
    _check_host(lvg)

    # Check for required volume group name
    if lvg['lvm_vg_name'] not in constants.LVG_ALLOWED_VGS:
        grp = "'%s', '%s', or '%s'" % (constants.LVG_NOVA_LOCAL,
                                       constants.LVG_CINDER_VOLUMES,
                                       constants.LVG_CGTS_VG)
        raise wsme.exc.ClientSideError(
            _("Volume Group name (%s) must be \"%s\"") % (lvg['lvm_vg_name'],
                                                          grp))
    lvg_caps = lvg['capabilities']
    if op == "add":
        if lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            # Cinder VG type must be the same on both controllers
            mate_lvg = _get_mate_ctrl_lvg(lvg)
            lvm_type = lvg_caps.get(constants.LVG_CINDER_PARAM_LVM_TYPE)
            if mate_lvg and lvm_type:
                # lvm_type may be None & we avoid setting defaults in a _check function
                mate_type = mate_lvg['capabilities'][constants.LVG_CINDER_PARAM_LVM_TYPE]
                if lvm_type != mate_type:
                    raise wsme.exc.ClientSideError(
                        _("LVG %(lvm_type)s for %(vg_name)s must be %(type)s, the same on"
                          " both controllers." % {'lvm_type': constants.LVG_CINDER_PARAM_LVM_TYPE,
                                                  'vg_name': lvg['lvm_vg_name'],
                                                  'type': mate_type}))
        if lvg['lvm_vg_name'] == constants.LVG_CGTS_VG:
            raise wsme.exc.ClientSideError(_("%s volume group already exists") %
                                           constants.LVG_CGTS_VG)
        elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            pass
        elif lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL:
            pass

    elif op == "modify":
        # Sanity check: parameters

        if lvg['lvm_vg_name'] == constants.LVG_CGTS_VG:
            raise wsme.exc.ClientSideError(_("%s volume group does not have "
                                             "any parameters to modify") %
                                           constants.LVG_CGTS_VG)
        elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            if constants.LVG_CINDER_PARAM_LVM_TYPE not in lvg_caps:
                raise wsme.exc.ClientSideError(
                    _('Internal Error: %s parameter missing for volume '
                      'group.') % constants.LVG_CINDER_PARAM_LVM_TYPE)
            else:
                # Make sure that cinder volumes provisioning type is a valid value
                if constants.LVG_CINDER_PARAM_LVM_TYPE in lvg_caps and \
                   lvg_caps[constants.LVG_CINDER_PARAM_LVM_TYPE] not in \
                   [constants.LVG_CINDER_LVM_TYPE_THIN,
                        constants.LVG_CINDER_LVM_TYPE_THICK]:
                    msg = _('Invalid parameter: %s must be %s or %s' %
                            (constants.LVG_CINDER_PARAM_LVM_TYPE,
                             constants.LVG_CINDER_LVM_TYPE_THIN,
                             constants.LVG_CINDER_LVM_TYPE_THICK))
                    raise wsme.exc.ClientSideError(msg)

        elif lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL:
            # instance_backing: This is a required parameter
            if constants.LVG_NOVA_PARAM_BACKING not in lvg_caps:
                raise wsme.exc.ClientSideError(
                    _('Internal Error: %s parameter missing for volume '
                      'group.') % constants.LVG_NOVA_PARAM_BACKING)
            else:
                # Instances backed by remote ephemeral storage can only be
                # used on systems that have a Ceph (internal or external)
                # backend.
                if ((lvg_caps.get(constants.LVG_NOVA_PARAM_BACKING) ==
                     constants.LVG_NOVA_BACKING_REMOTE) and
                        not StorageBackendConfig.has_backend_configured(
                            pecan.request.dbapi,
                            constants.SB_TYPE_CEPH,
                            service=constants.SB_SVC_NOVA,
                            check_only_defaults=False,
                            rpcapi=pecan.request.rpcapi) and
                        not StorageBackendConfig.has_backend_configured(
                            pecan.request.dbapi,
                            constants.SB_TYPE_CEPH_EXTERNAL,
                            service=constants.SB_SVC_NOVA,
                            check_only_defaults=False,
                            rpcapi=pecan.request.rpcapi)):
                    raise wsme.exc.ClientSideError(
                        _('Invalid value for instance_backing. Instances '
                          'backed by remote ephemeral storage can only be '
                          'used on systems that have a Ceph (internal or '
                          'external) backend.'))

            if (lvg['lvm_cur_lv'] > 1):
                raise wsme.exc.ClientSideError(
                    _("Can't modify the volume group: %s. There are currently "
                      "%d instance volumes present in the volume group. "
                      "Terminate or migrate all instances from the worker to "
                      "allow volume group madifications." %
                        (lvg['lvm_vg_name'], lvg['lvm_cur_lv'] - 1)))

    elif op == "delete":
        if lvg['lvm_vg_name'] == constants.LVG_CGTS_VG:
            raise wsme.exc.ClientSideError(_("%s volume group cannot be deleted") %
                                           constants.LVG_CGTS_VG)
        elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            if ((lvg['vg_state'] in
                [constants.PROVISIONED, constants.LVG_ADD]) and
                StorageBackendConfig.has_backend(
                    pecan.request.dbapi, constants.CINDER_BACKEND_LVM)):
                raise wsme.exc.ClientSideError(
                    _("cinder-volumes LVG cannot be removed once it is "
                      "provisioned and LVM backend is added."))
        elif lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL:
            if (lvg['lvm_cur_lv'] and lvg['lvm_cur_lv'] > 1):
                raise wsme.exc.ClientSideError(
                    _("Can't delete volume group: %s. There are currently %d "
                      "instance volumes present in the volume group. Terminate"
                      " or migrate all instances from the worker to allow "
                      "volume group deletion." % (lvg['lvm_vg_name'],
                                                  lvg['lvm_cur_lv'] - 1)))
    else:
        raise wsme.exc.ClientSideError(
            _("Internal Error: Invalid Volume Group operation: %s" % op))

    return lvg