Esempio n. 1
0
def _check_host(stor):
    ihost_id = stor['forihostid']
    ihost = pecan.request.dbapi.ihost_get(ihost_id)
    stor_model = ceph.get_ceph_storage_model()

    # semantic check: whether OSD can be added to this host.
    if stor_model == constants.CEPH_STORAGE_MODEL:
        if ihost.personality != constants.STORAGE:
            msg = ("Storage model is '%s'. Storage devices can only be added "
                   "to storage nodes." % stor_model)
            raise wsme.exc.ClientSideError(_(msg))
    elif stor_model == constants.CEPH_CONTROLLER_MODEL:
        if ihost.personality != constants.CONTROLLER:
            msg = ("Storage model is '%s'. Storage devices can only be added "
                   "to controller nodes." % stor_model)
            raise wsme.exc.ClientSideError(_(msg))
    elif stor_model == constants.CEPH_UNDEFINED_MODEL:
        msg = ("Please install storage-0 or configure a Ceph monitor "
               "on a worker node before adding storage devices.")
        raise wsme.exc.ClientSideError(_(msg))

    # semantic check: whether host is operationally acceptable
    if (stor_model == constants.CEPH_CONTROLLER_MODEL
            or stor_model == constants.CEPH_AIO_SX_MODEL):
        if (ihost['administrative'] == constants.ADMIN_UNLOCKED
                and ihost['operational'] != constants.OPERATIONAL_ENABLED):
            msg = _("Host %s must be unlocked and operational state "
                    "enabled." % ihost['hostname'])
            raise wsme.exc.ClientSideError(msg)
    else:
        if ihost['administrative'] != constants.ADMIN_LOCKED:
            raise wsme.exc.ClientSideError(
                _("Host %s must be locked." % ihost['hostname']))

    # semantic check: only storage nodes are allowed without k8s
    if (not utils.is_kubernetes_config(pecan.request.dbapi)
            and ihost['personality'] != constants.STORAGE):
        msg = ("Host personality must be 'storage' or kubernetes enabled.")
        raise wsme.exc.ClientSideError(_(msg))

    # semantic check: whether system has a ceph backend
    if not StorageBackendConfig.has_backend_configured(pecan.request.dbapi,
                                                       constants.SB_TYPE_CEPH):
        raise wsme.exc.ClientSideError(
            _("System must have a %s backend" % constants.SB_TYPE_CEPH))

    # semantic check: whether at least 2 unlocked hosts are monitors
    if not utils.is_aio_system(pecan.request.dbapi):
        ceph_helper = ceph.CephApiOperator()
        num_monitors, required_monitors, quorum_names = \
            ceph_helper.get_monitors_status(pecan.request.dbapi)
        # CGTS 503 for now update monitors requirement until controller-0 is
        # inventoried
        # CGTS 1448
        if num_monitors < required_monitors:
            raise wsme.exc.ClientSideError(
                _("Only %d storage monitor available. "
                  "At least %s unlocked and enabled hosts with monitors are "
                  "required. Please ensure hosts with monitors are unlocked "
                  "and enabled.") % (num_monitors, required_monitors))
Esempio n. 2
0
    def convert_with_links(cls, rpc_cluster, expand=True):
        cluster = Cluster(**rpc_cluster.as_dict())
        if not expand:
            cluster.unset_fields_except([
                'uuid', 'cluster_uuid', 'type', 'name', 'peers', 'tiers',
                'deployment_model'
            ])

        # All Ceph type clusters have the same storage model
        if cluster.type == constants.CLUSTER_TYPE_CEPH:
            try:
                # Storage model is defined dynamically, displayed by CLI
                # and used by Horizon.
                cluster.deployment_model = ceph.get_ceph_storage_model()
            except Exception:
                cluster.deployment_model = constants.CEPH_UNDEFINED_MODEL
        else:
            cluster.deployment_model = None

        cluster.links = [
            link.Link.make_link('self', pecan.request.host_url, 'clusters',
                                cluster.uuid),
            link.Link.make_link('bookmark',
                                pecan.request.host_url,
                                'clusters',
                                cluster.uuid,
                                bookmark=True)
        ]
        if expand:
            cluster.storage_tiers = [
                link.Link.make_link('self', pecan.request.host_url, 'clusters',
                                    cluster.uuid + "/storage_tiers"),
                link.Link.make_link('bookmark',
                                    pecan.request.host_url,
                                    'clusters',
                                    cluster.uuid + "/storage_tiers",
                                    bookmark=True)
            ]

        return cluster
Esempio n. 3
0
def _create(ceph_mon):
    # validate host
    try:
        chost = pecan.request.dbapi.ihost_get(ceph_mon['ihost_uuid'])
    except exception.ServerNotFound:
        raise wsme.exc.ClientSideError(
            _("Host not found uuid: %s ." % ceph_mon['ihost_uuid']))

    ceph_mon['forihostid'] = chost['id']

    # check if ceph monitor is already configured
    if pecan.request.dbapi.ceph_mon_get_by_ihost(ceph_mon['forihostid']):
        raise wsme.exc.ClientSideError(
            _("Ceph monitor already configured for host '%s'." % chost['hostname']))

    # only one instance of the 3rd ceph monitor is allowed
    ceph_mons = pecan.request.dbapi.ceph_mon_get_list()
    for mon in ceph_mons:
        h = pecan.request.dbapi.ihost_get(mon['forihostid'])
        if h.personality in [constants.STORAGE, constants.WORKER]:
            raise wsme.exc.ClientSideError(
                _("Ceph monitor already configured for host '%s'." % h['hostname']))

    # Adding a ceph monitor to a worker selects Ceph's deployment model
    if chost['personality'] == constants.WORKER:
        # Only if replication model is CONTROLLER or not yet defined
        stor_model = ceph.get_ceph_storage_model()
        worker_stor_models = [constants.CEPH_CONTROLLER_MODEL, constants.CEPH_UNDEFINED_MODEL]
        if stor_model not in worker_stor_models:
            raise wsme.exc.ClientSideError(
                _("Can not add a storage monitor to a worker if "
                  "ceph's deployments model is already set to %s." % stor_model))

        replication, min_replication = \
            StorageBackendConfig.get_ceph_max_replication(pecan.request.dbapi)
        supported_replication = constants.CEPH_CONTROLLER_MODEL_REPLICATION_SUPPORTED
        if replication not in supported_replication:
            raise wsme.exc.ClientSideError(
                _("Ceph monitor can be added to a worker only if "
                  "replication is set to: %s'. Please update replication "
                  "before configuring a monitor on a worker node." % supported_replication))

    # host must be locked and online unless this is controller-0
    if (chost['hostname'] != constants.CONTROLLER_0_HOSTNAME and
            (chost['availability'] != constants.AVAILABILITY_ONLINE or
            chost['administrative'] != constants.ADMIN_LOCKED)):
        raise wsme.exc.ClientSideError(
            _("Host %s must be locked and online." % chost['hostname']))

    ceph_mon = _set_defaults(ceph_mon)

    _check_ceph_mon(ceph_mon)

    controller_fs_utils._check_controller_fs(
        ceph_mon_gib_new=ceph_mon['ceph_mon_gib'])

    pecan.request.rpcapi.reserve_ip_for_first_storage_node(
        pecan.request.context)

    # Size of ceph-mon logical volume must be the same for all
    # monitors so we get the size from any or use default.
    ceph_mons = pecan.request.dbapi.ceph_mon_get_list()
    if ceph_mons:
        ceph_mon['ceph_mon_gib'] = ceph_mons[0]['ceph_mon_gib']

    # In case we add the monitor on a worker node, the state
    # and task must be set properly.
    if chost.personality == constants.WORKER:
        ceph_mon['state'] = constants.SB_STATE_CONFIGURING
        ctrls = pecan.request.dbapi.ihost_get_by_personality(
             constants.CONTROLLER)
        valid_ctrls = [
                ctrl for ctrl in ctrls if
                (ctrl.administrative == constants.ADMIN_LOCKED and
                 ctrl.availability == constants.AVAILABILITY_ONLINE) or
                (ctrl.administrative == constants.ADMIN_UNLOCKED and
                 ctrl.operational == constants.OPERATIONAL_ENABLED)]

        tasks = {}
        for ctrl in valid_ctrls:
            tasks[ctrl.hostname] = constants.SB_STATE_CONFIGURING

        ceph_mon['task'] = str(tasks)

    LOG.info("Creating ceph-mon DB entry for host uuid %s: %s" %
             (ceph_mon['ihost_uuid'], str(ceph_mon)))
    new_ceph_mon = pecan.request.dbapi.ceph_mon_create(ceph_mon)

    # We update the base config when adding a dynamic monitor.
    # At this moment the only possibility to add a dynamic monitor
    # is on a worker node, so we check for that.
    if chost.personality == constants.WORKER:
        try:
            # Storage nodes are not supported on a controller based
            # storage model.
            personalities = [constants.CONTROLLER, constants.WORKER]
            pecan.request.rpcapi.update_ceph_base_config(
                pecan.request.context,
                personalities)
        except Exception:
            values = {'state': constants.SB_STATE_CONFIG_ERR, 'task': None}
            pecan.request.dbapi.ceph_mon_update(new_ceph_mon['uuid'], values)
            raise

    # The return value needs to be iterable, so make it a list.
    return [new_ceph_mon]