Example #1
0
class DockerRegistryCredentialNotFound(NotFound):
    message = _("Credentials to access local docker registry "
                "for user %(name)s could not be found.")
Example #2
0
class DuplicateMessageError(RPCException):
    message = _("Found duplicate message(%(msg_id)s). Skipping it.")
Example #3
0
class UnsupportedRpcVersion(RPCException):
    message = _("Specified RPC version, %(version)s, not supported by "
                "this endpoint.")
Example #4
0
    def patch(self, stor_uuid, patch):
        """Update an existing stor."""
        if self._from_ihosts:
            raise exception.OperationNotPermitted

        if self._from_tier:
            raise exception.OperationNotPermitted

        try:
            rpc_stor = objects.storage.get_by_uuid(
                           pecan.request.context, stor_uuid)
        except exception.ServerNotFound:
            raise wsme.exc.ClientSideError(_("No stor with the provided"
                                             " uuid: %s" % stor_uuid))
        # replace ihost_uuid and istor_uuid with corresponding
        patch_obj = jsonpatch.JsonPatch(patch)
        for p in patch_obj:
            if p['path'] == '/ihost_uuid':
                p['path'] = '/forihostid'
                ihost = objects.host.get_by_uuid(pecan.request.context,
                                                 p['value'])
                p['value'] = ihost.id
            elif p['path'] == '/tier_uuid':
                p['path'] = '/fortierid'
                tier = objects.tier.get_by_uuid(pecan.request.context,
                                                p['value'])
                p['value'] = tier.id

        try:
            stor = Storage(**jsonpatch.apply_patch(
                                               rpc_stor.as_dict(),
                                               patch_obj))

        except utils.JSONPATCH_EXCEPTIONS as e:
            raise exception.PatchError(patch=patch, reason=e)

        # Semantic Checks
        _check_host(stor.as_dict())
        _check_disk(stor.as_dict())

        if (hasattr(stor, 'journal_size_mib') or
                hasattr(stor, 'journal_location')):
            _check_journal(rpc_stor, stor.as_dict())

        # Journal partitions can be either collocated with the OSD or external.
        # Any location change requires that the device_nodes of the remaining
        # journals of the external journal disk to be updated, therefore we back
        # up the external journal stor before updating it with the new value
        journal_stor_uuid = None
        if rpc_stor['journal_location'] != getattr(stor, 'journal_location'):
            if rpc_stor['uuid'] == getattr(stor, 'journal_location'):
                # journal partition becomes collocated, backup the prev journal
                journal_stor_uuid = rpc_stor['journal_location']
                setattr(stor, 'journal_size_mib',
                        CONF.journal.journal_default_size)
            else:
                # journal partition moves to external journal disk
                journal_stor_uuid = getattr(stor, 'journal_location')
        else:
            if (hasattr(stor, 'journal_size_mib') and
                    rpc_stor['uuid'] == rpc_stor['journal_location']):
                raise wsme.exc.ClientSideError(_(
                    "Invalid update: Size of collocated journal is fixed."))

        # Update only the fields that have changed
        updated = False
        for field in objects.storage.fields:
            if rpc_stor[field] != getattr(stor, field):
                rpc_stor[field] = getattr(stor, field)
                updated = True

        if not updated:
            # None of the data fields have been updated, return!
            return Storage.convert_with_links(rpc_stor)

        # Set status for newly created OSD.
        if rpc_stor['function'] == constants.STOR_FUNCTION_OSD:
            ihost_id = rpc_stor['forihostid']
            ihost = pecan.request.dbapi.ihost_get(ihost_id)
            if ihost['operational'] == constants.OPERATIONAL_ENABLED:
                # We are running live manifests
                rpc_stor['state'] = constants.SB_STATE_CONFIGURING
            else:
                rpc_stor['state'] = constants.SB_STATE_CONFIGURING_ON_UNLOCK

        # Save istor
        rpc_stor.save()

        # Update device nodes for the journal disk
        if journal_stor_uuid:
            try:
                pecan.request.dbapi.journal_update_dev_nodes(journal_stor_uuid)
                # Refresh device node for current stor, if changed by prev call
                st = pecan.request.dbapi.istor_get(rpc_stor['id'])
                rpc_stor['journal_path'] = st.journal_path
            except Exception as e:
                LOG.exception(e)

        # Run runtime manifests to update configuration
        runtime_manifests = False
        if (rpc_stor['state'] == constants.SB_STATE_CONFIGURING and
                rpc_stor['function'] == constants.STOR_FUNCTION_OSD):
            runtime_manifests = True

        pecan.request.rpcapi.update_ceph_osd_config(pecan.request.context,
                                                    ihost, rpc_stor['uuid'],
                                                    runtime_manifests)

        return Storage.convert_with_links(rpc_stor)
Example #5
0
def _check_journal_location(journal_location, stor, action):
    """Chooses a valid journal location or returns a corresponding error."""

    if journal_location:
        if not uuidutils.is_uuid_like(journal_location):
            raise exception.InvalidUUID(uuid=journal_location)

    # If a journal location is provided by the user.
    if journal_location:
        # Check that the journal location is that of an existing stor object.
        try:
            requested_journal_onistor = pecan.request.dbapi.istor_get(
                journal_location)
        except exception.ServerNotFound:
            raise wsme.exc.ClientSideError(_(
                "No journal stor with the provided uuid: %s" %
                journal_location))

        # Check that the provided stor is assigned to the same host as the OSD.
        if (requested_journal_onistor.forihostid != stor['forihostid']):
            raise wsme.exc.ClientSideError(_(
                "The provided stor belongs to another "
                "host."))

        # If the action is journal create, don't let the journal be
        # collocated.
        if action == constants.ACTION_CREATE_JOURNAL:
            if (requested_journal_onistor.function !=
                    constants.STOR_FUNCTION_JOURNAL):
                raise wsme.exc.ClientSideError(_(
                    "The provided uuid belongs to a stor "
                    "that is not of journal type."))

        # If the action is journal update:
        # - if the new journal location is not collocated, check that the
        #   location is of journal type.
        # - if the new journal location is collocated, allow it.
        if action == constants.ACTION_UPDATE_JOURNAL:
            if requested_journal_onistor.uuid != stor['uuid']:
                if (requested_journal_onistor.function !=
                        constants.STOR_FUNCTION_JOURNAL):
                    raise wsme.exc.ClientSideError(_(
                        "The provided uuid belongs to a stor "
                        "that is not of journal type."))

    # If no journal location is provided by the user.
    else:
        # Check if there is a journal storage designated for the present host.
        existing_journal_stors = pecan.request.dbapi.istor_get_by_ihost_function(
            stor['forihostid'], constants.STOR_FUNCTION_JOURNAL)

        # If more than one journal stor is assigned to the host, the user
        # should choose only one journal location.
        #
        # If there is only one journal stor assigned to the host, then that's
        # where the journal will reside.
        #
        # If there are no journal stors assigned to the host, then the journal
        # is collocated.
        if 'uuid' in stor:
            if len(existing_journal_stors) > 1:
                available_journals = ""
                for stor_obj in existing_journal_stors:
                    available_journals = (available_journals +
                                         stor_obj.uuid + "\n")
                raise wsme.exc.ClientSideError(_(
                      "Multiple journal stors are available. Choose from:\n%s"
                      % available_journals))
            elif len(existing_journal_stors) == 1:
                journal_location = existing_journal_stors[0].uuid
            elif len(existing_journal_stors) == 0:
                journal_location = stor['uuid']

    return journal_location
Example #6
0
    def _get_ports_collection(self, uuid, interface_uuid, node_uuid,
                              marker, limit, sort_key, sort_dir,
                              expand=False, resource_url=None):

        if self._from_ihosts and not uuid:
            raise exception.InvalidParameterValue(_(
                  "Host id not specified."))

        if self._from_iinterface and not uuid:
            raise exception.InvalidParameterValue(_(
                  "Interface id not specified."))

        if self._from_inode and not uuid:
            raise exception.InvalidParameterValue(_(
                  "inode id not specified."))

        limit = utils.validate_limit(limit)
        sort_dir = utils.validate_sort_dir(sort_dir)

        marker_obj = None
        if marker:
            marker_obj = objects.ethernet_port.get_by_uuid(
                                        pecan.request.context,
                                        marker)

        if self._from_ihosts:
            ports = pecan.request.dbapi.ethernet_port_get_by_host(
                                                    uuid, limit,
                                                    marker_obj,
                                                    sort_key=sort_key,
                                                    sort_dir=sort_dir)
        elif self._from_inode:
            ports = pecan.request.dbapi.ethernet_port_get_by_numa_node(
                                                    uuid, limit,
                                                    marker_obj,
                                                    sort_key=sort_key,
                                                    sort_dir=sort_dir)
        elif self._from_iinterface:
            ports = pecan.request.dbapi.ethernet_port_get_by_interface(
                                                    uuid,
                                                    limit,
                                                    marker_obj,
                                                    sort_key=sort_key,
                                                    sort_dir=sort_dir)
        else:
            if uuid and not interface_uuid:
                ports = pecan.request.dbapi.ethernet_port_get_by_host(
                                                    uuid, limit,
                                                    marker_obj,
                                                    sort_key=sort_key,
                                                    sort_dir=sort_dir)
            elif uuid and interface_uuid:   # Need ihost_uuid ?
                ports = pecan.request.dbapi.ethernet_port_get_by_host_interface(
                                                    uuid,
                                                    interface_uuid,
                                                    limit,
                                                    marker_obj,
                                                    sort_key=sort_key,
                                                    sort_dir=sort_dir)

            elif interface_uuid:   # Need ihost_uuid ?
                ports = pecan.request.dbapi.ethernet_port_get_by_host_interface(
                                                    uuid,  # None
                                                    interface_uuid,
                                                    limit,
                                                    marker_obj,
                                                    sort_key=sort_key,
                                                    sort_dir=sort_dir)

            else:
                ports = pecan.request.dbapi.ethernet_port_get_list(
                                                    limit, marker_obj,
                                                    sort_key=sort_key,
                                                    sort_dir=sort_dir)

        return EthernetPortCollection.convert_with_links(ports, limit,
                                                 url=resource_url,
                                                 expand=expand,
                                                 sort_key=sort_key,
                                                 sort_dir=sort_dir)
Example #7
0
 def get_one(self, uuid):
     sm_service = sm_api.service_show(uuid)
     if sm_service is None:
         raise wsme.exc.ClientSideError(_(
                 "Service %s could not be found") % uuid)
     return SMService(**sm_service)
Example #8
0
class PowerStateFailure(SysinvException):
    message = _("Failed to set node power state to %(pstate)s.")
Example #9
0
class ExclusiveLockRequired(NotAuthorized):
    message = _("An exclusive lock is required, "
                "but the current context has a shared lock.")
Example #10
0
class SDNControllerMismatchedAF(SysinvException):
    message = _("The SDN controller IP %(ip_address)s does not match "
                "the address family of the OAM interface.")
Example #11
0
class SDNControllerRequiredParamsMissing(SysinvException):
    message = _("One or more required SDN controller parameters are missing.")
Example #12
0
class SDNControllerCannotUnlockCompute(NotAuthorized):
    message = _("Atleast one SDN controller needs to be added "
                "in order to unlock a Compute node on an SDN system.")
Example #13
0
class SDNControllerNotFound(NotFound):
    message = _("SDN Controller %(uuid)s could not be found.")
Example #14
0
class SDNNotEnabled(SysinvException):
    message = _("SDN configuration is not enabled.")
Example #15
0
    def patch(self, extoam_uuid, patch):
        """Update the current OAM configuration."""
        if self._from_isystems:
            raise exception.OperationNotPermitted

        rpc_extoam = objects.oam_network.get_by_uuid(pecan.request.context,
                                                     extoam_uuid)

        # this is required for cases where action is appended
        for p in patch:
            if '/action' in p['path']:
                patch.remove(p)
                break

        # replace isystem_uuid and iextoam_uuid with corresponding
        patch_obj = jsonpatch.JsonPatch(patch)

        state_rel_path = [
            '/uuid',
            '/id',
            '/created_at',
            '/updated_at',
            '/forisystemid',
            '/isystem_uuid',
        ]

        if any(p['path'] in state_rel_path for p in patch_obj):
            raise wsme.exc.ClientSideError(
                _("The following fields can not be "
                  "modified: %s from this level." % state_rel_path))

        extoam_orig = copy.deepcopy(rpc_extoam)
        for p in patch_obj:
            if p['path'] == '/isystem_uuid':
                isystem = objects.system.get_by_uuid(pecan.request.context,
                                                     p['value'])
                p['path'] = '/forisystemid'
                p['value'] = isystem.id

        try:
            extoam = OAMNetwork(
                **jsonpatch.apply_patch(rpc_extoam.as_dict(), patch_obj))

        except utils.JSONPATCH_EXCEPTIONS as e:
            raise exception.PatchError(patch=patch, reason=e)

        region_config = self._get_region_config()

        # extoam.region_config = region_config
        LOG.info("extoam %s, region_config=%s " %
                 (extoam.as_dict(), str(region_config)))

        extoam = _check_extoam_data(extoam_orig.as_dict(), extoam.as_dict(),
                                    region_config)

        try:
            # Update only the fields that have changed
            for field in objects.oam_network.fields:
                if rpc_extoam[field] != extoam[field]:
                    rpc_extoam[field] = extoam[field]

            rpc_extoam.save()

            pecan.request.rpcapi.update_oam_config(pecan.request.context)

            return OAMNetwork.convert_with_links(rpc_extoam)

        except exception.HTTPNotFound:
            msg = _("OAM IP update failed: system %s extoam %s: patch %s" %
                    (isystem['systemname'], extoam, patch))
            raise wsme.exc.ClientSideError(msg)
Example #16
0
class NodeInUse(SysinvException):
    message = _("Unable to complete the requested action because node "
                "%(node)s is currently in use by another process.")
Example #17
0
    def update_many(self, ihost_uuid, patch):
        """Update existing filesystems for a host."""

        LOG.info("patch_data: %s" % patch)

        # Validate input filesystem names
        current_host_fs_list = pecan.request.dbapi.host_fs_get_by_ihost(ihost_uuid)
        host = pecan.request.dbapi.ihost_get(ihost_uuid)

        modified_fs = []

        for p_list in patch:
            p_obj_list = jsonpatch.JsonPatch(p_list)

            for p_obj in p_obj_list:
                if p_obj['path'] == '/action':
                    value = p_obj['value']
                    patch.remove(p_list)

        for p_list in patch:
            p_obj_list = jsonpatch.JsonPatch(p_list)
            for p_obj in p_obj_list:
                if p_obj['path'] == '/name':
                    fs_display_name = p_obj['value']
                    fs_name = fs_display_name
                elif p_obj['path'] == '/size':
                    size = p_obj['value']

            if fs_name not in [fs['name'] for fs in current_host_fs_list]:
                msg = _("HostFs update failed: invalid filesystem "
                        "'%s' " % fs_display_name)
                raise wsme.exc.ClientSideError(msg)

            elif not cutils.is_int_like(size):
                msg = _("HostFs update failed: filesystem '%s' "
                        "size must be an integer " % fs_display_name)
                raise wsme.exc.ClientSideError(msg)

            current_size = [fs['size'] for
                            fs in current_host_fs_list
                            if fs['name'] == fs_name][0]

            if int(size) <= int(current_size):
                msg = _("HostFs update failed: size for filesystem '%s' "
                        "should be bigger than %s " % (
                            fs_display_name, current_size))
                raise wsme.exc.ClientSideError(msg)

            modified_fs += [fs_name]

        if not modified_fs:
            msg = _("HostFs update failed: no filesystems to update")
            raise wsme.exc.ClientSideError(msg)

        host_fs_list_new = []
        for fs in current_host_fs_list:
            replaced = False
            for p_list in patch:
                p_obj_list = jsonpatch.JsonPatch(p_list)
                for p_obj in p_obj_list:
                    if p_obj['value'] == fs['name']:
                        try:
                            host_fs_list_new += [HostFs(
                                      **jsonpatch.apply_patch(fs.as_dict(), p_obj_list))]
                            replaced = True
                            break
                        except utils.JSONPATCH_EXCEPTIONS as e:
                            raise exception.PatchError(patch=p_list, reason=e)
                if replaced:
                    break
            if not replaced:
                host_fs_list_new += [fs]

        requested_growth_gib = \
            _calculate_requested_growth(current_host_fs_list, host_fs_list_new)

        LOG.info("Requested growth in GiB: %s" % requested_growth_gib)

        cgtsvg_free_space_gib = utils.get_node_cgtsvg_limit(host)

        if requested_growth_gib > cgtsvg_free_space_gib:
            msg = _("HostFs update failed: Not enough free space on %s. "
                    "Current free space %s GiB, "
                    "requested total increase %s GiB" %
                    (constants.LVG_CGTS_VG, cgtsvg_free_space_gib, requested_growth_gib))
            LOG.warning(msg)
            raise wsme.exc.ClientSideError(msg)

        for fs in host_fs_list_new:
            if fs.name in modified_fs:
                value = {'size': fs.size}
                pecan.request.dbapi.host_fs_update(fs.uuid, value)

        try:
            if (host.invprovision in [constants.PROVISIONED,
                                      constants.PROVISIONING]):

                # perform rpc to conductor to perform config apply
                pecan.request.rpcapi.update_host_filesystem_config(
                        pecan.request.context,
                        host=host,
                        filesystem_list=modified_fs,)

        except Exception as e:
            msg = _("Failed to update filesystem size for %s" % host.name)
            LOG.error("%s with patch %s with exception %s" % (msg, patch, e))
            raise wsme.exc.ClientSideError(msg)
Example #18
0
class NodeInWrongPowerState(SysinvException):
    message = _("Can not change instance association while node "
                "%(node)s is in power state %(pstate)s.")
Example #19
0
 def _check_host(host_uuid):
     host = pecan.request.dbapi.ihost_get(host_uuid)
     if host.administrative != constants.ADMIN_LOCKED:
         msg = _("Operation Rejected: Host '%s' is adminstrative '%s' " %
                 (host.hostname, host.administrative))
         raise wsme.exc.ClientSideError(msg)
Example #20
0
class NodeNotConfigured(SysinvException):
    message = _("Can not change power state because node %(node)s "
                "is not fully configured.")
Example #21
0
class DBInvalidUnicodeParameter(Exception):
    message = _("Invalid Parameter: "
                "Unicode is not supported by the current database.")
Example #22
0
class ChassisNotEmpty(SysinvException):
    message = _("Cannot complete the requested action because chassis "
                "%(chassis)s contains nodes.")
Example #23
0
def _check_host(stor):
    ihost_id = stor['forihostid']
    ihost = pecan.request.dbapi.ihost_get(ihost_id)
    stor_model = ceph.get_ceph_storage_model()

    # semantic check: whether OSD can be added to this host.
    if stor_model == constants.CEPH_STORAGE_MODEL:
        if ihost.personality != constants.STORAGE:
            msg = ("Storage model is '%s'. Storage devices can only be added "
                   "to storage nodes." % stor_model)
            raise wsme.exc.ClientSideError(_(msg))
    elif stor_model == constants.CEPH_CONTROLLER_MODEL:
        if ihost.personality != constants.CONTROLLER:
            msg = ("Storage model is '%s'. Storage devices can only be added "
                   "to controller nodes." % stor_model)
            raise wsme.exc.ClientSideError(_(msg))
    elif stor_model == constants.CEPH_UNDEFINED_MODEL:
        msg = ("Please install storage-0 or configure a Ceph monitor "
               "on a worker node before adding storage devices.")
        raise wsme.exc.ClientSideError(_(msg))

    # semantic check: whether host is operationally acceptable
    if (stor_model == constants.CEPH_CONTROLLER_MODEL or
            stor_model == constants.CEPH_AIO_SX_MODEL):
        if (ihost['administrative'] == constants.ADMIN_UNLOCKED and
                ihost['operational'] != constants.OPERATIONAL_ENABLED):
            msg = _("Host %s must be unlocked and operational state "
                    "enabled." % ihost['hostname'])
            raise wsme.exc.ClientSideError(msg)
    else:
        if ihost['administrative'] != constants.ADMIN_LOCKED:
            raise wsme.exc.ClientSideError(_("Host %s must be locked." %
                                             ihost['hostname']))

    # semantic check: only storage nodes are allowed without k8s
    if (not utils.is_kubernetes_config(pecan.request.dbapi) and
            ihost['personality'] != constants.STORAGE):
        msg = ("Host personality must be 'storage' or kubernetes enabled.")
        raise wsme.exc.ClientSideError(_(msg))

    # semantic check: whether system has a ceph backend
    if not StorageBackendConfig.has_backend_configured(
            pecan.request.dbapi,
            constants.SB_TYPE_CEPH
    ):
        raise wsme.exc.ClientSideError(_(
            "System must have a %s backend" % constants.SB_TYPE_CEPH))

    # semantic check: whether at least 2 unlocked hosts are monitors
    if not utils.is_aio_system(pecan.request.dbapi):
        ceph_helper = ceph.CephApiOperator()
        num_monitors, required_monitors, quorum_names = \
            ceph_helper.get_monitors_status(pecan.request.dbapi)
        # CGTS 503 for now update monitors requirement until controller-0 is
        # inventoried
        # CGTS 1448
        if num_monitors < required_monitors:
            raise wsme.exc.ClientSideError(_(
                "Only %d storage monitor available. "
                "At least %s unlocked and enabled hosts with monitors are "
                "required. Please ensure hosts with monitors are unlocked "
                "and enabled.") % (num_monitors, required_monitors))
Example #24
0
class IPMIFailure(SysinvException):
    message = _("IPMI call failed: %(cmd)s.")
Example #25
0
def _create(stor, iprofile=None):

    LOG.debug("storage._create stor with params: %s" % stor)
    # Init
    osd_create = False

    # Get host
    ihostId = stor.get('forihostid') or stor.get('ihost_uuid')
    if not ihostId:
        raise wsme.exc.ClientSideError(_("No host provided for stor creation."))

    ihost = pecan.request.dbapi.ihost_get(ihostId)
    if uuidutils.is_uuid_like(ihostId):
        forihostid = ihost['id']
    else:
        forihostid = ihostId
    stor.update({'forihostid': forihostid})

    # SEMANTIC CHECKS
    if iprofile:
        _check_profile(stor)
    else:
        _check_host(stor)

    try:
        idisk_uuid = _check_disk(stor)
    except exception.ServerNotFound:
        raise wsme.exc.ClientSideError(_("No disk with the provided "
                                         "uuid: %s" % stor['idisk_uuid']))

    # Assign the function if necessary.
    function = stor['function']
    if function:
        if function == constants.STOR_FUNCTION_OSD and not iprofile:
            osd_create = True
    else:
        function = stor['function'] = constants.STOR_FUNCTION_OSD
        if not iprofile:
            osd_create = True

    create_attrs = {}
    create_attrs.update(stor)

    # Set status for newly created OSD.
    if function == constants.STOR_FUNCTION_OSD:
        ihost_id = stor['forihostid']
        ihost = pecan.request.dbapi.ihost_get(ihost_id)
        if ihost['operational'] == constants.OPERATIONAL_ENABLED:
            # We are running live manifests
            create_attrs['state'] = constants.SB_STATE_CONFIGURING
        else:
            create_attrs['state'] = constants.SB_STATE_CONFIGURING_ON_UNLOCK
    else:
        create_attrs['state'] = constants.SB_STATE_CONFIGURED

    if function == constants.STOR_FUNCTION_OSD:
        # Get the tier the stor should be associated with
        tierId = stor.get('fortierid') or stor.get('tier_uuid')
        if not tierId:
            # Get the available tiers. If only one exists (the default tier)
            # then add it.
            default_ceph_tier_name = constants.SB_TIER_DEFAULT_NAMES[
                constants.SB_TIER_TYPE_CEPH]
            tier_list = pecan.request.dbapi.storage_tier_get_list()
            if (len(tier_list) == 1 and
                    tier_list[0].name == default_ceph_tier_name):
                tierId = tier_list[0].uuid
            else:
                raise wsme.exc.ClientSideError(
                    _("Multiple storage tiers are present. A tier is required "
                      "for stor creation."))

        try:
            tier = pecan.request.dbapi.storage_tier_get(tierId)
        except exception.StorageTierNotFound:
            raise wsme.exc.ClientSideError(_("No tier with id %s found.") % tierId)

        create_attrs['fortierid'] = tier.id

        if not iprofile:
            try:
                journal_location = \
                    _check_journal_location(stor['journal_location'],
                                            stor,
                                            constants.ACTION_CREATE_JOURNAL)
            except exception.InvalidUUID as e:
                raise wsme.exc.ClientSideError(_(str(e)))

            # If the journal is collocated, make sure its size is set to the
            # default one.
            if 'uuid' in stor and journal_location == stor['uuid']:
                stor['journal_size_mib'] = CONF.journal.journal_default_size
            elif journal_location:
                if not stor['journal_size_mib']:
                    stor['journal_size_mib'] = \
                        CONF.journal.journal_default_size

                journal_istor = pecan.request.dbapi.istor_get(journal_location)
                journal_idisk_uuid = journal_istor.idisk_uuid

                # Find out if there is enough space to keep the journal on the
                # journal stor.
                _check_journal_space(journal_idisk_uuid,
                                     journal_location,
                                     stor['journal_size_mib'])

    elif function == constants.STOR_FUNCTION_JOURNAL:
        # Check that the journal stor resides on a device of SSD type.
        idisk = pecan.request.dbapi.idisk_get(idisk_uuid)
        if (idisk.device_type != constants.DEVICE_TYPE_SSD and
                idisk.device_type != constants.DEVICE_TYPE_NVME):
            raise wsme.exc.ClientSideError(_(
                "Invalid stor device type: only SSD and NVME devices are supported"
                " for journal functions."))

    if osd_create is True:
        # Get the next free OSD ID in the system
        stors = pecan.request.dbapi.istor_get_list(sort_key='osdid', sort_dir='asc')
        stors_ids = [s['osdid'] for s in stors if s['osdid'] is not None]
        if stors_ids:
            candidate_ids = [i for i in range(0, stors_ids[-1] + 2) if i not in stors_ids]
            create_attrs['osdid'] = candidate_ids[0]
        else:
            create_attrs['osdid'] = 0
    else:
        create_attrs['osdid'] = None

    new_stor = pecan.request.dbapi.istor_create(forihostid,
                                                create_attrs)

    # Associate the disk to db record
    values = {'foristorid': new_stor.id}
    pecan.request.dbapi.idisk_update(idisk_uuid,
                                     values)

    # Journals are created only for OSDs
    if new_stor.get("function") == constants.STOR_FUNCTION_OSD:
        if iprofile or not journal_location:
            # iprofile either provides a valid location or assumes
            # collocation. For collocation: stor['journal_location'] =
            # stor['uuid'], since sometimes we get the UUID of the newly
            # created stor late, we can only set it late.
            journal_location = stor['journal_location'] if \
                                stor.get('journal_location') else new_stor['uuid']
        new_journal = _create_journal(journal_location,
                                      stor['journal_size_mib'],
                                      new_stor)

        # Update the attributes of the journal partition for the current stor.
        setattr(new_stor, "journal_path", new_journal.get("device_path"))
        setattr(new_stor, "journal_location", new_journal.get("onistor_uuid"))
        setattr(new_stor, "journal_size", new_journal.get("size_mib"))

        if not iprofile:
            # Update the state of the storage tier
            try:
                pecan.request.dbapi.storage_tier_update(
                    tier.id,
                    {'status': constants.SB_TIER_STATUS_IN_USE})
            except exception.StorageTierNotFound as e:
                # Shouldn't happen. Log exception. Stor is created but tier status
                # is not updated.
                LOG.exception(e)

        # Apply runtime manifests for OSDs on "available" nodes.
        runtime_manifests = False
        if ihost['operational'] == constants.OPERATIONAL_ENABLED:
            runtime_manifests = True

        pecan.request.rpcapi.update_ceph_osd_config(pecan.request.context,
                                                    ihost, new_stor['uuid'],
                                                    runtime_manifests)

    return new_stor
Example #26
0
class SSHConnectFailed(SysinvException):
    message = _("Failed to establish SSH connection to host %(host)s.")
Example #27
0
class InvalidRPCConnectionReuse(RPCException):
    message = _("Invalid reuse of an RPC connection.")
Example #28
0
def _check_extoam_data(extoam_orig, extoam, region_config=False):

    subnetkey = 'oam_subnet'
    if subnetkey in extoam.keys():
        subnet = extoam[subnetkey]
        try:
            subnet = IPNetwork(subnet)
        except AddrFormatError:
            raise wsme.exc.ClientSideError(
                _("Invalid subnet %s %s."
                  "Please configure a valid subnet") % (subnetkey, subnet))

        try:
            utils.is_valid_subnet(subnet)
        except Exception:
            raise wsme.exc.ClientSideError(
                _("Invalid subnet %s %s."
                  "Please check and configure a valid OAM Subnet.") %
                (subnetkey, subnet))

    skip_oam_gateway_ip_check = False
    gateway_ipkey = 'oam_gateway_ip'
    gateway_ip = extoam.get(gateway_ipkey) or ""
    if gateway_ipkey in extoam.keys():
        ogateway_ip = extoam_orig.get(gateway_ipkey) or ""
        osubnet = extoam_orig.get(subnetkey) or ""
        if not ogateway_ip and osubnet:
            if gateway_ip:
                raise wsme.exc.ClientSideError(
                    _("OAM gateway IP is not allowed to be configured %s %s. "
                      "There is already a management gateway address configured."
                      ) % (ogateway_ip, gateway_ip))
            else:
                skip_oam_gateway_ip_check = True

    for k, v in extoam.items():
        if k in extoam_ip_address_keys:

            if skip_oam_gateway_ip_check:
                if k == "oam_gateway_ip":
                    continue
            if utils.get_system_mode() == constants.SYSTEM_MODE_SIMPLEX:
                if k == "oam_c0_ip" or k == 'oam_c1_ip':
                    continue
            try:
                v = IPAddress(v)
            except (AddrFormatError, ValueError):
                raise wsme.exc.ClientSideError(
                    _("Invalid address %s in %s."
                      " Please configure a valid"
                      " IPv%s address") % (v, k, str(subnet.version)))

            utils.is_valid_address_within_subnet(v, subnet)

    oam_c0_ip = extoam.get('oam_c0_ip') or ""
    oam_c1_ip = extoam.get('oam_c1_ip') or ""

    # check for unique if not empty
    if oam_c0_ip and oam_c0_ip == oam_c1_ip:
        raise wsme.exc.ClientSideError(
            _("Invalid address: "
              "oam_c0_ip=%s and oam_c1_ip=%s must be unique. ") %
            (oam_c0_ip, oam_c1_ip))

    if gateway_ip and (gateway_ip == oam_c0_ip) or (gateway_ip == oam_c1_ip):
        raise wsme.exc.ClientSideError(
            _("Invalid address: "
              "oam_c0_ip=%s, oam_c1_ip=%s, oam_gateway_ip=%s must be unique.")
            % (oam_c0_ip, oam_c1_ip, gateway_ip))

    # Region Mode, check if addresses are within start and end range
    # Gateway address is not used in region mode
    subnet = IPNetwork(extoam.get('oam_subnet'))
    floating_address = IPAddress(extoam.get('oam_floating_ip'))
    start_address = IPAddress(extoam.get('oam_start_ip'))
    end_address = IPAddress(extoam.get('oam_end_ip'))
    # check whether start and end addresses are within the oam_subnet range
    if start_address not in subnet:
        if region_config:
            raise wsme.exc.ClientSideError(
                _("Invalid oam_start_ip=%s. Please configure a valid IP address"
                  ) % start_address)
        LOG.info("Updating oam_start_ip=%s to %s" % (start_address, subnet[1]))
        extoam['oam_start_ip'] = subnet[1]
        start_address = IPAddress(extoam.get('oam_start_ip'))

    if end_address not in subnet:
        if region_config:
            raise wsme.exc.ClientSideError(
                _("Invalid oam_end_ip=%s. Please configure a valid IP address")
                % end_address)
        LOG.info("Updating oam_end_ip=%s to %s" % (end_address, subnet[-2]))
        extoam['oam_end_ip'] = subnet[-2]
        end_address = IPAddress(extoam.get('oam_end_ip'))

    if floating_address not in IPRange(start_address, end_address):
        raise wsme.exc.ClientSideError(
            _("Invalid oam_floating_ip=%s. Please configure a valid IP address "
              "in range") % floating_address)

    if oam_c0_ip and IPAddress(oam_c0_ip) not in IPRange(
            start_address, end_address):
        raise wsme.exc.ClientSideError(
            _("Invalid oam_c0_ip=%s. Please configure a valid IP address "
              "in range") % oam_c0_ip)

    if oam_c1_ip and IPAddress(oam_c1_ip) not in IPRange(
            start_address, end_address):
        raise wsme.exc.ClientSideError(
            _("Invalid oam_c1_ip=%s. Please configure a valid IP address "
              "in range") % oam_c1_ip)

    return extoam
Example #29
0
class UnsupportedRpcEnvelopeVersion(RPCException):
    message = _("Specified RPC envelope version, %(version)s, "
                "not supported by this endpoint.")
Example #30
0
class KubeAppReleasesNotFound(NotFound):
    message = _("No releases found for application %(app_id)s")