예제 #1
0
def _create(ceph_mon):
    # validate host
    try:
        chost = pecan.request.dbapi.ihost_get(ceph_mon['ihost_uuid'])
    except exception.ServerNotFound:
        raise wsme.exc.ClientSideError(
            _("Host not found uuid: %s ." % ceph_mon['ihost_uuid']))

    ceph_mon['forihostid'] = chost['id']

    # check if ceph monitor is already configured
    if pecan.request.dbapi.ceph_mon_get_by_ihost(ceph_mon['forihostid']):
        raise wsme.exc.ClientSideError(
            _("Ceph monitor already configured for host '%s'." % chost['hostname']))

    # only one instance of the 3rd ceph monitor is allowed
    ceph_mons = pecan.request.dbapi.ceph_mon_get_list()
    for mon in ceph_mons:
        h = pecan.request.dbapi.ihost_get(mon['forihostid'])
        if h.personality in [constants.STORAGE, constants.WORKER]:
            raise wsme.exc.ClientSideError(
                _("Ceph monitor already configured for host '%s'." % h['hostname']))

    # Adding a ceph monitor to a worker selects Ceph's deployment model
    if chost['personality'] == constants.WORKER:
        # Only if replication model is CONTROLLER or not yet defined
        stor_model = ceph.get_ceph_storage_model()
        worker_stor_models = [constants.CEPH_CONTROLLER_MODEL, constants.CEPH_UNDEFINED_MODEL]
        if stor_model not in worker_stor_models:
            raise wsme.exc.ClientSideError(
                _("Can not add a storage monitor to a worker if "
                  "ceph's deployments model is already set to %s." % stor_model))

        replication, min_replication = \
            StorageBackendConfig.get_ceph_max_replication(pecan.request.dbapi)
        supported_replication = constants.CEPH_CONTROLLER_MODEL_REPLICATION_SUPPORTED
        if replication not in supported_replication:
            raise wsme.exc.ClientSideError(
                _("Ceph monitor can be added to a worker only if "
                  "replication is set to: %s'. Please update replication "
                  "before configuring a monitor on a worker node." % supported_replication))

    # host must be locked and online unless this is controller-0
    if (chost['hostname'] != constants.CONTROLLER_0_HOSTNAME and
            (chost['availability'] != constants.AVAILABILITY_ONLINE or
            chost['administrative'] != constants.ADMIN_LOCKED)):
        raise wsme.exc.ClientSideError(
            _("Host %s must be locked and online." % chost['hostname']))

    ceph_mon = _set_defaults(ceph_mon)

    # Size of ceph-mon logical volume must be the same for all
    # monitors so we get the size from any or use default.
    ceph_mons = pecan.request.dbapi.ceph_mon_get_list()
    if ceph_mons:
        ceph_mon['ceph_mon_gib'] = ceph_mons[0]['ceph_mon_gib']

    _check_ceph_mon(ceph_mon)

    controller_fs_utils._check_ceph_mon_growth(ceph_mon['ceph_mon_gib'])
    utils.check_all_ceph_mon_growth(ceph_mon['ceph_mon_gib'], chost)

    pecan.request.rpcapi.reserve_ip_for_first_storage_node(
        pecan.request.context)

    # In case we add the monitor on a worker node, the state
    # and task must be set properly.
    if chost.personality == constants.WORKER:
        ceph_mon['state'] = constants.SB_STATE_CONFIGURING
        ctrls = pecan.request.dbapi.ihost_get_by_personality(
             constants.CONTROLLER)
        valid_ctrls = [
                ctrl for ctrl in ctrls if
                (ctrl.administrative == constants.ADMIN_LOCKED and
                 ctrl.availability == constants.AVAILABILITY_ONLINE) or
                (ctrl.administrative == constants.ADMIN_UNLOCKED and
                 ctrl.operational == constants.OPERATIONAL_ENABLED)]

        tasks = {}
        for ctrl in valid_ctrls:
            tasks[ctrl.hostname] = constants.SB_STATE_CONFIGURING

        ceph_mon['task'] = str(tasks)

    LOG.info("Creating ceph-mon DB entry for host uuid %s: %s" %
             (ceph_mon['ihost_uuid'], str(ceph_mon)))
    new_ceph_mon = pecan.request.dbapi.ceph_mon_create(ceph_mon)

    # We update the base config when adding a dynamic monitor.
    # At this moment the only possibility to add a dynamic monitor
    # is on a worker node, so we check for that.
    if chost.personality == constants.WORKER:
        try:
            # Storage nodes are not supported on a controller based
            # storage model.
            personalities = [constants.CONTROLLER, constants.WORKER]
            pecan.request.rpcapi.update_ceph_base_config(
                pecan.request.context,
                personalities)
        except Exception:
            values = {'state': constants.SB_STATE_CONFIG_ERR, 'task': None}
            pecan.request.dbapi.ceph_mon_update(new_ceph_mon['uuid'], values)
            raise

    # The return value needs to be iterable, so make it a list.
    return [new_ceph_mon]
예제 #2
0
    def patch(self, cephmon_uuid, patch):
        """Update the current storage configuration."""

        if not StorageBackendConfig.has_backend_configured(
            pecan.request.dbapi,
            constants.CINDER_BACKEND_CEPH
        ):
            raise wsme.exc.ClientSideError(
                _("Ceph backend is not configured.")
            )

        rpc_cephmon = objects.ceph_mon.get_by_uuid(pecan.request.context,
                                                   cephmon_uuid)
        is_ceph_mon_gib_changed = False

        patch = [p for p in patch if '/controller' not in p['path']]

        # Check if either ceph mon size or disk has to change.
        for p in patch:
            if '/ceph_mon_gib' in p['path']:
                if rpc_cephmon.ceph_mon_gib != p['value']:
                    is_ceph_mon_gib_changed = True

        if not is_ceph_mon_gib_changed:
            LOG.info("ceph_mon parameters are not changed")
            raise wsme.exc.ClientSideError(
                _("Warning: ceph_mon parameters are not changed."))

        # replace isystem_uuid and ceph_mon_uuid with corresponding
        patch_obj = jsonpatch.JsonPatch(patch)
        state_rel_path = ['/uuid', '/id', '/forihostid',
                          '/device_node', '/device_path']
        if any(p['path'] in state_rel_path for p in patch_obj):
            raise wsme.exc.ClientSideError(_("The following fields can not be "
                                             "modified: %s" %
                                             state_rel_path))

        try:
            cephmon = CephMon(**jsonpatch.apply_patch(
                rpc_cephmon.as_dict(),
                patch_obj))
        except utils.JSONPATCH_EXCEPTIONS as e:
            raise exception.PatchError(patch=patch, reason=e)

        if is_ceph_mon_gib_changed:
            _check_ceph_mon(cephmon.as_dict(), rpc_cephmon.as_dict())
            controller_fs_utils._check_ceph_mon_growth(
                cephmon.ceph_mon_gib)
            utils.check_all_ceph_mon_growth(cephmon.ceph_mon_gib)

        for field in objects.ceph_mon.fields:
            if rpc_cephmon[field] != cephmon.as_dict()[field]:
                rpc_cephmon[field] = cephmon.as_dict()[field]

        LOG.info("SYS_I  cephmon: %s " % cephmon.as_dict())

        try:
            rpc_cephmon.save()
        except exception.HTTPNotFound:
            msg = _("Ceph Mon update failed: uuid %s : "
                    " patch %s"
                    % (rpc_cephmon.uuid, patch))
            raise wsme.exc.ClientSideError(msg)

        if is_ceph_mon_gib_changed:
            # Update the task for ceph storage backend.
            StorageBackendConfig.update_backend_states(
                pecan.request.dbapi,
                constants.CINDER_BACKEND_CEPH,
                task=constants.SB_TASK_RESIZE_CEPH_MON_LV
            )

            # Mark controllers and storage node as Config out-of-date.
            pecan.request.rpcapi.update_storage_config(
                pecan.request.context,
                update_storage=is_ceph_mon_gib_changed,
                reinstall_required=False
            )

        return CephMon.convert_with_links(rpc_cephmon)