コード例 #1
0
ファイル: openstack.py プロジェクト: MarioCarrilloA/config
    def get_cinder_services(self):
        service_list = []

        # Check to see if cinder is present
        # TODO(rchurch): Need to refactor with storage backend
        if ((StorageBackendConfig.has_backend_configured(self.dbapi, constants.CINDER_BACKEND_CEPH)) or
                (StorageBackendConfig.has_backend_configured(self.dbapi, constants.CINDER_BACKEND_LVM))):
            try:
                service_list = self._get_cinderclient().services.list()
            except Exception as e:
                LOG.error("get_cinder_services:Failed to access Cinder client: %s" % e)

        return service_list
コード例 #2
0
def _check_host(stor):
    ihost_id = stor['forihostid']
    ihost = pecan.request.dbapi.ihost_get(ihost_id)
    stor_model = ceph.get_ceph_storage_model()

    # semantic check: whether OSD can be added to this host.
    if stor_model == constants.CEPH_STORAGE_MODEL:
        if ihost.personality != constants.STORAGE:
            msg = ("Storage model is '%s'. Storage devices can only be added "
                   "to storage nodes." % stor_model)
            raise wsme.exc.ClientSideError(_(msg))
    elif stor_model == constants.CEPH_CONTROLLER_MODEL:
        if ihost.personality != constants.CONTROLLER:
            msg = ("Storage model is '%s'. Storage devices can only be added "
                   "to controller nodes." % stor_model)
            raise wsme.exc.ClientSideError(_(msg))
    elif stor_model == constants.CEPH_UNDEFINED_MODEL:
        msg = ("Please install storage-0 or configure a Ceph monitor "
               "on a worker node before adding storage devices.")
        raise wsme.exc.ClientSideError(_(msg))

    # semantic check: whether host is operationally acceptable
    if (stor_model == constants.CEPH_CONTROLLER_MODEL
            or stor_model == constants.CEPH_AIO_SX_MODEL):
        if (ihost['administrative'] == constants.ADMIN_UNLOCKED
                and ihost['operational'] != constants.OPERATIONAL_ENABLED):
            msg = _("Host %s must be unlocked and operational state "
                    "enabled." % ihost['hostname'])
            raise wsme.exc.ClientSideError(msg)
    else:
        if ihost['administrative'] != constants.ADMIN_LOCKED:
            raise wsme.exc.ClientSideError(
                _("Host %s must be locked." % ihost['hostname']))

    # semantic check: only storage nodes are allowed without k8s
    if (not utils.is_kubernetes_config(pecan.request.dbapi)
            and ihost['personality'] != constants.STORAGE):
        msg = ("Host personality must be 'storage' or kubernetes enabled.")
        raise wsme.exc.ClientSideError(_(msg))

    # semantic check: whether system has a ceph backend
    if not StorageBackendConfig.has_backend_configured(pecan.request.dbapi,
                                                       constants.SB_TYPE_CEPH):
        raise wsme.exc.ClientSideError(
            _("System must have a %s backend" % constants.SB_TYPE_CEPH))

    # semantic check: whether at least 2 unlocked hosts are monitors
    if not utils.is_aio_system(pecan.request.dbapi):
        ceph_helper = ceph.CephApiOperator()
        num_monitors, required_monitors, quorum_names = \
            ceph_helper.get_monitors_status(pecan.request.dbapi)
        # CGTS 503 for now update monitors requirement until controller-0 is
        # inventoried
        # CGTS 1448
        if num_monitors < required_monitors:
            raise wsme.exc.ClientSideError(
                _("Only %d storage monitor available. "
                  "At least %s unlocked and enabled hosts with monitors are "
                  "required. Please ensure hosts with monitors are unlocked "
                  "and enabled.") % (num_monitors, required_monitors))
コード例 #3
0
    def delete(self, uuid):
        """Delete a Service Parameter instance."""
        parameter = objects.service_parameter.get_by_uuid(
            pecan.request.context, uuid)

        if parameter.service == constants.SERVICE_TYPE_CEPH:
            if not StorageBackendConfig.has_backend_configured(
                    pecan.request.dbapi, constants.CINDER_BACKEND_CEPH):
                msg = _("Ceph backend is required.")
                raise wsme.exc.ClientSideError(msg)

        if parameter.section == \
                constants.SERVICE_PARAM_SECTION_PLATFORM_MAINTENANCE:
            msg = _("Platform Maintenance Parameter '%s' is required." %
                    parameter.name)
            raise wsme.exc.ClientSideError(msg)

        pecan.request.dbapi.service_parameter_destroy_uuid(uuid)
        try:
            pecan.request.rpcapi.update_service_config(pecan.request.context,
                                                       parameter.service)
        except rpc_common.RemoteError as e:
            # rollback destroy service parameter
            try:
                parameter = parameter.as_dict()
                pecan.request.dbapi.service_parameter_create(parameter)
                LOG.warn(
                    _("Rollback service parameter destroy: "
                      "create parameter with values={}".format(parameter)))
                # rollback parameter has a different uuid
            except exception.SysinvException:
                pass
            raise wsme.exc.ClientSideError(str(e.value))
コード例 #4
0
    def patch(self, uuid, patch):
        """Updates attributes of Service Parameter."""

        parameter = objects.service_parameter.get_by_uuid(
            pecan.request.context, uuid)
        if parameter.service == constants.SERVICE_TYPE_CEPH:
            if not StorageBackendConfig.has_backend_configured(
                    pecan.request.dbapi, constants.CINDER_BACKEND_CEPH):
                msg = _("Ceph backend is required.")
                raise wsme.exc.ClientSideError(msg)

        if parameter.personality is not None or parameter.resource is not None:
            return self.patch_custom_resource(uuid, patch,
                                              parameter.personality,
                                              parameter.resource)

        parameter = parameter.as_dict()
        old_parameter = copy.deepcopy(parameter)

        updates = self._get_updates(patch)
        parameter.update(updates)

        self._check_parameter_syntax(parameter)

        updated_parameter = pecan.request.dbapi.service_parameter_update(
            uuid, updates)

        try:
            pecan.request.rpcapi.update_service_config(pecan.request.context,
                                                       parameter['service'])
        except rpc_common.RemoteError as e:
            # rollback service parameter update
            try:
                pecan.request.dbapi.service_parameter_update(
                    uuid, old_parameter)
                LOG.warn(
                    _("Rollback service parameter update: "
                      "uuid={}, old_values={}".format(uuid, old_parameter)))
            except exception.SysinvException:
                pass
            raise wsme.exc.ClientSideError(str(e.value))

        # Before we can return the service parameter, we need
        # to ensure that this updated parameter is not "protected"
        # which may need to be obfuscated.
        service = updated_parameter['service']
        section = updated_parameter['section']
        name = updated_parameter['name']

        if service in service_parameter.SERVICE_PARAMETER_SCHEMA \
                and section in service_parameter.SERVICE_PARAMETER_SCHEMA[service]:
            schema = service_parameter.SERVICE_PARAMETER_SCHEMA[service][
                section]
            if service_parameter.SERVICE_PARAM_PROTECTED in schema:
                # parameter is to be protected
                if name in schema[service_parameter.SERVICE_PARAM_PROTECTED]:
                    updated_parameter[
                        'value'] = service_parameter.SERVICE_VALUE_PROTECTION_MASK

        return ServiceParameter.convert_with_links(updated_parameter)
コード例 #5
0
ファイル: storage.py プロジェクト: starlingx/config
def _check_profile(stor):
    # semantic check: whether system has a ceph backend
    if not StorageBackendConfig.has_backend_configured(
        pecan.request.dbapi,
        constants.SB_TYPE_CEPH
    ):
        raise wsme.exc.ClientSideError(_(
            "System must have a %s backend" % constants.SB_TYPE_CEPH))
コード例 #6
0
ファイル: openstack.py プロジェクト: MarioCarrilloA/config
    def get_cinder_volumes(self):
        volumes = []

        # Check to see if cinder is present
        # TODO(rchurch): Need to refactor with storage backend
        if ((StorageBackendConfig.has_backend_configured(self.dbapi, constants.CINDER_BACKEND_CEPH)) or
                (StorageBackendConfig.has_backend_configured(self.dbapi, constants.CINDER_BACKEND_LVM))):
            search_opts = {
                'all_tenants': 1
            }
            try:
                volumes = self._get_cinderclient().volumes.list(
                    search_opts=search_opts)
            except Exception as e:
                LOG.error("get_cinder_volumes: Failed to access Cinder client: %s" % e)

        return volumes
コード例 #7
0
ファイル: ceph_mon.py プロジェクト: MarioCarrilloA/config
    def patch(self, cephmon_uuid, patch):
        """Update the current storage configuration."""

        if not StorageBackendConfig.has_backend_configured(
            pecan.request.dbapi,
            constants.CINDER_BACKEND_CEPH
        ):
            raise wsme.exc.ClientSideError(
                _("Ceph backend is not configured.")
            )

        rpc_cephmon = objects.ceph_mon.get_by_uuid(pecan.request.context,
                                                   cephmon_uuid)
        is_ceph_mon_gib_changed = False

        patch = [p for p in patch if '/controller' not in p['path']]

        # Check if either ceph mon size or disk has to change.
        for p in patch:
            if '/ceph_mon_gib' in p['path']:
                if rpc_cephmon.ceph_mon_gib != p['value']:
                    is_ceph_mon_gib_changed = True

        if not is_ceph_mon_gib_changed:
            LOG.info("ceph_mon parameters are not changed")
            raise wsme.exc.ClientSideError(
                _("Warning: ceph_mon parameters are not changed."))

        # replace isystem_uuid and ceph_mon_uuid with corresponding
        patch_obj = jsonpatch.JsonPatch(patch)
        state_rel_path = ['/uuid', '/id', '/forihostid',
                          '/device_node', '/device_path']
        if any(p['path'] in state_rel_path for p in patch_obj):
            raise wsme.exc.ClientSideError(_("The following fields can not be "
                                             "modified: %s" %
                                             state_rel_path))

        try:
            cephmon = CephMon(**jsonpatch.apply_patch(
                rpc_cephmon.as_dict(),
                patch_obj))
        except utils.JSONPATCH_EXCEPTIONS as e:
            raise exception.PatchError(patch=patch, reason=e)

        if is_ceph_mon_gib_changed:
            _check_ceph_mon(cephmon.as_dict(), rpc_cephmon.as_dict())
            controller_fs_utils._check_controller_fs(
                ceph_mon_gib_new=cephmon.ceph_mon_gib)

        for field in objects.ceph_mon.fields:
            if rpc_cephmon[field] != cephmon.as_dict()[field]:
                rpc_cephmon[field] = cephmon.as_dict()[field]

        LOG.info("SYS_I  cephmon: %s " % cephmon.as_dict())

        try:
            rpc_cephmon.save()
        except exception.HTTPNotFound:
            msg = _("Ceph Mon update failed: uuid %s : "
                    " patch %s"
                    % (rpc_cephmon.uuid, patch))
            raise wsme.exc.ClientSideError(msg)

        if is_ceph_mon_gib_changed:
            # Update the task for ceph storage backend.
            StorageBackendConfig.update_backend_states(
                pecan.request.dbapi,
                constants.CINDER_BACKEND_CEPH,
                task=constants.SB_TASK_RESIZE_CEPH_MON_LV
            )

            # Mark controllers and storage node as Config out-of-date.
            pecan.request.rpcapi.update_storage_config(
                pecan.request.context,
                update_storage=is_ceph_mon_gib_changed,
                reinstall_required=False
            )

        return CephMon.convert_with_links(rpc_cephmon)
コード例 #8
0
    def post(self, body):
        """Create new Service Parameter."""

        resource = body.get('resource')
        personality = body.get('personality')

        if personality is not None or resource is not None:
            return self.post_custom_resource(body, personality, resource)

        service = self._get_service(body)

        section = body.get('section')
        if not section:
            raise wsme.exc.ClientSideError(_("Unspecified section name."))
        elif section not in service_parameter.SERVICE_PARAMETER_SCHEMA[
                service]:
            msg = _("Invalid service section %s." % section)
            raise wsme.exc.ClientSideError(msg)

        new_records = []
        parameters = body.get('parameters')
        if not parameters:
            raise wsme.exc.ClientSideError(_("Unspecified parameters."))

        if service == constants.SERVICE_TYPE_CEPH:
            if not StorageBackendConfig.has_backend_configured(
                    pecan.request.dbapi, constants.CINDER_BACKEND_CEPH):
                msg = _("Ceph backend is required.")
                raise wsme.exc.ClientSideError(msg)

        for name, value in parameters.items():
            new_record = {
                'service': service,
                'section': section,
                'name': name,
                'value': value,
            }
            self._check_parameter_syntax(new_record)

            existing = False
            try:
                pecan.request.dbapi.service_parameter_get_one(
                    service, section, name)
                existing = True
            except exception.NotFound:
                pass
            except exception.MultipleResults:
                # We'll check/handle this in the "finally" block
                existing = True
            finally:
                if existing:
                    msg = _("Service parameter add failed: "
                            "Parameter already exists: "
                            "service=%s section=%s name=%s" %
                            (service, section, name))
                    raise wsme.exc.ClientSideError(msg)

            new_records.append(new_record)

        svc_params = []
        for n in new_records:
            try:
                new_parm = pecan.request.dbapi.service_parameter_create(n)
            except exception.NotFound:
                msg = _("Service parameter add failed:  "
                        "service %s section %s name %s value %s" %
                        (service, section, n.name, n.value))
                raise wsme.exc.ClientSideError(msg)
            svc_params.append(new_parm)

        try:
            pecan.request.rpcapi.update_service_config(pecan.request.context,
                                                       service)
        except rpc_common.RemoteError as e:
            # rollback create service parameters
            for p in svc_params:
                try:
                    pecan.request.dbapi.service_parameter_destroy_uuid(p.uuid)
                    LOG.warn(
                        _("Rollback service parameter create: "
                          "destroy uuid {}".format(p.uuid)))
                except exception.SysinvException:
                    pass
            raise wsme.exc.ClientSideError(str(e.value))
        except Exception as e:
            with excutils.save_and_reraise_exception():
                LOG.exception(e)

        return ServiceParameterCollection.convert_with_links(svc_params,
                                                             limit=None,
                                                             url=None,
                                                             expand=False,
                                                             sort_key='id',
                                                             sort_dir='asc')
コード例 #9
0
def _check(op, lvg):
    # Semantic checks
    LOG.debug("Semantic check for %s operation" % op)

    # Check host and host state
    _check_host(lvg)

    # Check for required volume group name
    if lvg['lvm_vg_name'] not in constants.LVG_ALLOWED_VGS:
        grp = "'%s', '%s', or '%s'" % (constants.LVG_NOVA_LOCAL,
                                       constants.LVG_CINDER_VOLUMES,
                                       constants.LVG_CGTS_VG)
        raise wsme.exc.ClientSideError(
            _("Volume Group name (%s) must be \"%s\"") % (lvg['lvm_vg_name'],
                                                          grp))
    lvg_caps = lvg['capabilities']
    if op == "add":
        if lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            # Cinder VG type must be the same on both controllers
            mate_lvg = _get_mate_ctrl_lvg(lvg)
            lvm_type = lvg_caps.get(constants.LVG_CINDER_PARAM_LVM_TYPE)
            if mate_lvg and lvm_type:
                # lvm_type may be None & we avoid setting defaults in a _check function
                mate_type = mate_lvg['capabilities'][constants.LVG_CINDER_PARAM_LVM_TYPE]
                if lvm_type != mate_type:
                    raise wsme.exc.ClientSideError(
                        _("LVG %(lvm_type)s for %(vg_name)s must be %(type)s, the same on"
                          " both controllers." % {'lvm_type': constants.LVG_CINDER_PARAM_LVM_TYPE,
                                                  'vg_name': lvg['lvm_vg_name'],
                                                  'type': mate_type}))
        if lvg['lvm_vg_name'] == constants.LVG_CGTS_VG:
            raise wsme.exc.ClientSideError(_("%s volume group already exists") %
                                           constants.LVG_CGTS_VG)
        elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            pass
        elif lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL:
            pass

    elif op == "modify":
        # Sanity check: parameters

        if lvg['lvm_vg_name'] == constants.LVG_CGTS_VG:
            raise wsme.exc.ClientSideError(_("%s volume group does not have "
                                             "any parameters to modify") %
                                           constants.LVG_CGTS_VG)
        elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            if constants.LVG_CINDER_PARAM_LVM_TYPE not in lvg_caps:
                raise wsme.exc.ClientSideError(
                    _('Internal Error: %s parameter missing for volume '
                      'group.') % constants.LVG_CINDER_PARAM_LVM_TYPE)
            else:
                # Make sure that cinder volumes provisioning type is a valid value
                if constants.LVG_CINDER_PARAM_LVM_TYPE in lvg_caps and \
                   lvg_caps[constants.LVG_CINDER_PARAM_LVM_TYPE] not in \
                   [constants.LVG_CINDER_LVM_TYPE_THIN,
                        constants.LVG_CINDER_LVM_TYPE_THICK]:
                    msg = _('Invalid parameter: %s must be %s or %s' %
                            (constants.LVG_CINDER_PARAM_LVM_TYPE,
                             constants.LVG_CINDER_LVM_TYPE_THIN,
                             constants.LVG_CINDER_LVM_TYPE_THICK))
                    raise wsme.exc.ClientSideError(msg)

        elif lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL:
            # instance_backing: This is a required parameter
            if constants.LVG_NOVA_PARAM_BACKING not in lvg_caps:
                raise wsme.exc.ClientSideError(
                    _('Internal Error: %s parameter missing for volume '
                      'group.') % constants.LVG_NOVA_PARAM_BACKING)
            else:
                # Instances backed by remote ephemeral storage can only be
                # used on systems that have a Ceph (internal or external)
                # backend.
                if ((lvg_caps.get(constants.LVG_NOVA_PARAM_BACKING) ==
                     constants.LVG_NOVA_BACKING_REMOTE) and
                        not StorageBackendConfig.has_backend_configured(
                            pecan.request.dbapi,
                            constants.SB_TYPE_CEPH,
                            service=constants.SB_SVC_NOVA,
                            check_only_defaults=False,
                            rpcapi=pecan.request.rpcapi) and
                        not StorageBackendConfig.has_backend_configured(
                            pecan.request.dbapi,
                            constants.SB_TYPE_CEPH_EXTERNAL,
                            service=constants.SB_SVC_NOVA,
                            check_only_defaults=False,
                            rpcapi=pecan.request.rpcapi)):
                    raise wsme.exc.ClientSideError(
                        _('Invalid value for instance_backing. Instances '
                          'backed by remote ephemeral storage can only be '
                          'used on systems that have a Ceph (internal or '
                          'external) backend.'))

            if (lvg['lvm_cur_lv'] > 1):
                raise wsme.exc.ClientSideError(
                    _("Can't modify the volume group: %s. There are currently "
                      "%d instance volumes present in the volume group. "
                      "Terminate or migrate all instances from the worker to "
                      "allow volume group madifications." %
                        (lvg['lvm_vg_name'], lvg['lvm_cur_lv'] - 1)))

    elif op == "delete":
        if lvg['lvm_vg_name'] == constants.LVG_CGTS_VG:
            raise wsme.exc.ClientSideError(_("%s volume group cannot be deleted") %
                                           constants.LVG_CGTS_VG)
        elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
            if ((lvg['vg_state'] in
                [constants.PROVISIONED, constants.LVG_ADD]) and
                StorageBackendConfig.has_backend(
                    pecan.request.dbapi, constants.CINDER_BACKEND_LVM)):
                raise wsme.exc.ClientSideError(
                    _("cinder-volumes LVG cannot be removed once it is "
                      "provisioned and LVM backend is added."))
        elif lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL:
            if (lvg['lvm_cur_lv'] and lvg['lvm_cur_lv'] > 1):
                raise wsme.exc.ClientSideError(
                    _("Can't delete volume group: %s. There are currently %d "
                      "instance volumes present in the volume group. Terminate"
                      " or migrate all instances from the worker to allow "
                      "volume group deletion." % (lvg['lvm_vg_name'],
                                                  lvg['lvm_cur_lv'] - 1)))
    else:
        raise wsme.exc.ClientSideError(
            _("Internal Error: Invalid Volume Group operation: %s" % op))

    return lvg