Example #1
0
    def get_monitors_status(self, db_api):
        num_inv_monitors = 0
        if cutils.is_aio_system(db_api):
            required_monitors = constants.MIN_STOR_MONITORS_AIO
        else:
            required_monitors = constants.MIN_STOR_MONITORS_MULTINODE
        quorum_names = []
        inventory_monitor_names = []

        # first check that the monitors are available in sysinv
        monitor_list = db_api.ceph_mon_get_list()
        for mon in monitor_list:
            ihost = db_api.ihost_get(mon['forihostid'])
            host_action = ihost['ihost_action'] or ""
            locking = (host_action.startswith(constants.LOCK_ACTION)
                       or host_action.startswith(constants.FORCE_LOCK_ACTION))
            if (ihost['administrative'] == constants.ADMIN_UNLOCKED
                    and ihost['operational'] == constants.OPERATIONAL_ENABLED
                    and not locking):
                num_inv_monitors += 1
                inventory_monitor_names.append(ihost['hostname'])

        LOG.info("Active ceph monitors in inventory = %s" %
                 str(inventory_monitor_names))

        # check that the cluster is actually operational.
        # if we can get the monitor quorum from ceph, then
        # the cluster is truly operational
        if num_inv_monitors >= required_monitors:
            try:
                quorum_names = self._osd_quorum_names()
            except Exception:
                # if the cluster is not responding to requests
                # we set the quorum_names to an empty list , indicating a problem
                quorum_names = []
                LOG.error("Ceph cluster not responding to requests.")

        LOG.info("Active ceph monitors in ceph cluster = %s" %
                 str(quorum_names))

        # There may be cases where a host is in an unlocked-available state,
        # but the monitor is down due to crashes or manual removal.
        # For such cases, we determine the list of active ceph monitors to be
        # the intersection of the sysinv reported unlocked-available monitor
        # hosts and the monitors reported in the quorum via the ceph API.
        active_monitors = list(
            set(inventory_monitor_names) & set(quorum_names))
        num_active_monitors = len(active_monitors)
        if (num_inv_monitors and num_active_monitors == 0
                and cutils.is_initial_config_complete()
                and not cutils.is_aio_system(db_api)):
            # The active controller always has a monitor.
            # We are on standard or storage, initial configuration
            # was completed and Ceph is down so we can't check if
            # it is working. Assume it is.
            num_active_monitors = 1
        LOG.info("Active ceph monitors = %s" % str(active_monitors))

        return num_active_monitors, required_monitors, active_monitors
Example #2
0
 def readonly_attrs():
     """These attributes cannot be updated."""
     # Once the initial configuration is complete, pool resizing is
     # disallowed
     if cutils.is_initial_config_complete():
         return ['/network', '/prefix']
     else:
         return ['/network']
Example #3
0
 def _check_pool_readonly(self, address_pool_id):
     networks = pecan.request.dbapi.networks_get_by_pool(address_pool_id)
     # Pool is considered readonly after the initial configuration is
     # complete. During bootstrap it should be modifiable even though
     # it is allocated to a network.
     if networks and cutils.is_initial_config_complete():
         # network managed address pool, no changes permitted
         raise exception.AddressPoolReadonly()
Example #4
0
    def _get_host_join_command(self, host):
        config = {}
        if not utils.is_initial_config_complete():
            return config

        join_cmd = self._get_kubernetes_join_cmd(host)
        config.update({'platform::kubernetes::params::join_cmd': join_cmd})

        return config
Example #5
0
 def _check_pool_readonly(self, addrpool):
     # The system controller's network pools are expected writeable for re-home
     # a subcloud to new system controllers.
     if addrpool.name not in SYSTEM_CONTROLLER_ADDRPOOLS:
         networks = pecan.request.dbapi.networks_get_by_pool(addrpool.id)
         # An addresspool except the system controller's pools, is considered
         # readonly after the initial configuration is complete. During bootstrap
         # it should be modifiable even though it is allocated to a network.
         if networks and cutils.is_initial_config_complete():
             # network managed address pool, no changes permitted
             raise exception.AddressPoolReadonly()
Example #6
0
 def delete(self, network_uuid):
     """Delete a network."""
     network = pecan.request.dbapi.network_get(network_uuid)
     if cutils.is_initial_config_complete() and \
         network['type'] in [constants.NETWORK_TYPE_MGMT,
                             constants.NETWORK_TYPE_OAM,
                             constants.NETWORK_TYPE_CLUSTER_HOST,
                             constants.NETWORK_TYPE_CLUSTER_POD,
                             constants.NETWORK_TYPE_CLUSTER_SERVICE]:
         msg = _("Cannot delete type {} network {} after initial "
                 "configuration completion".format(network['type'],
                                                   network_uuid))
         raise wsme.exc.ClientSideError(msg)
     pecan.request.dbapi.network_destroy(network_uuid)
Example #7
0
 def delete(self, address_pool_uuid):
     """Delete an IP address pool."""
     addrpool = self._get_one(address_pool_uuid)
     self._check_pool_readonly(addrpool.id)
     addresses = pecan.request.dbapi.addresses_get_by_pool(addrpool.id)
     if addresses:
         if cutils.is_initial_config_complete():
             raise exception.AddressPoolInUseByAddresses()
         else:
             # Must be a request as a result of network reconfiguration
             # during bootstrap. Delete the addresses in the pool
             # before deleting the pool
             for addr in addresses:
                 pecan.request.dbapi.address_destroy(addr.uuid)
     pecan.request.dbapi.address_pool_destroy(address_pool_uuid)
Example #8
0
 def delete(self, address_pool_uuid):
     """Delete an IP address pool."""
     addrpool = self._get_one(address_pool_uuid)
     self._check_pool_readonly(addrpool)
     addresses = pecan.request.dbapi.addresses_get_by_pool(addrpool.id)
     if addresses:
         # All the initial configured addresspools are not deleteable,
         # except the system controller's network addresspool, which
         # can be deleted/re-added during re-homing a subcloud to new
         # system controllers
         if cutils.is_initial_config_complete() and \
            (addrpool.name not in SYSTEM_CONTROLLER_ADDRPOOLS):
             raise exception.AddressPoolInUseByAddresses()
         else:
             # Must be a request as a result of network reconfiguration
             # during bootstrap. Delete the addresses in the pool
             # before deleting the pool
             for addr in addresses:
                 pecan.request.dbapi.address_destroy(addr.uuid)
     pecan.request.dbapi.address_pool_destroy(address_pool_uuid)
Example #9
0
    def _create_network(self, network):
        # Perform syntactic validation
        network.validate_syntax()
        network = network.as_dict()
        network['uuid'] = str(uuid.uuid4())

        # Perform semantic validation
        self._check_network_type(network['type'])

        pool_uuid = network.pop('pool_uuid', None)
        if pool_uuid:
            pool = pecan.request.dbapi.address_pool_get(pool_uuid)
            network.update({'address_pool_id': pool.id})

        # Attempt to create the new network record
        result = pecan.request.dbapi.network_create(network)

        self._create_network_addresses(pool, network)

        # If the host has already been created, make an RPC request
        # reconfigure the service endpoints. As oam network is processed
        # after management network, check only for NETWORK_TYPE_OAM to
        # avoid potentially making two reconfigure_service_endpoints
        # rpc requests in succession.
        chosts = pecan.request.dbapi.ihost_get_by_personality(
            constants.CONTROLLER)
        if (len(chosts) == 1
                and network['type'] == constants.NETWORK_TYPE_OAM):
            pecan.request.rpcapi.reconfigure_service_endpoints(
                pecan.request.context, chosts[0])

        # After the initial configration completed, we can still delete/add
        # the system controller networks in a subcloud's controller to
        # re-home a subcloud to a new central cloud. In this case, we want
        # to update the related services configurations in runtime.
        if cutils.is_initial_config_complete() and \
            network['type'] in [constants.NETWORK_TYPE_SYSTEM_CONTROLLER,
                                constants.NETWORK_TYPE_SYSTEM_CONTROLLER_OAM]:
            self._update_system_controller_network_config(network['type'])
        return Network.convert_with_links(result)
Example #10
0
    def _get_host_join_command(self, host):
        config = {}
        if not utils.is_initial_config_complete():
            return config

        # The token expires after 24 hours and is needed for a reinstall.
        # The puppet manifest handles the case where the node already exists.
        try:
            join_cmd_additions = ''
            if host.personality == constants.CONTROLLER:
                # Upload the certificates used during kubeadm join
                # The cert key will be printed in the last line of the output
                cmd = [
                    'kubeadm', 'init', 'phase', 'upload-certs',
                    '--upload-certs', '--config',
                    '/etc/kubernetes/kubeadm.yaml'
                ]
                cmd_output = subprocess.check_output(cmd)
                cert_key = cmd_output.strip().split('\n')[-1]
                join_cmd_additions = " --control-plane --certificate-key %s" % cert_key

            cmd = [
                'kubeadm', 'token', 'create', '--print-join-command',
                '--description',
                'Bootstrap token for %s' % host.hostname
            ]
            join_cmd = subprocess.check_output(cmd)
            join_cmd_additions += " --cri-socket /var/run/containerd/containerd.sock"
            join_cmd = join_cmd.strip() + join_cmd_additions
        except subprocess.CalledProcessError:
            raise exception.SysinvException(
                'Failed to generate bootstrap token')

        config.update({'platform::kubernetes::params::join_cmd': join_cmd})

        return config
Example #11
0
 def _check_from_pool(self, pool_uuid):
     # Disallow the removal of an allocated address after the initial
     # configuration is complete.
     if pool_uuid and cutils.is_initial_config_complete():
         raise exception.AddressAllocatedFromPool()
Example #12
0
    def patch(self, isystem_uuid, patch):
        """Update an existing isystem.

        :param isystem_uuid: UUID of a isystem.
        :param patch: a json PATCH document to apply to this isystem.
        """
        rpc_isystem = objects.system.get_by_uuid(pecan.request.context,
                                                 isystem_uuid)
        system_dict = rpc_isystem.as_dict()
        updates = self._get_updates(patch)
        change_https = False
        change_sdn = False
        change_dc_role = False
        vswitch_type = None

        # prevent description field from being updated
        for p in jsonpatch.JsonPatch(patch):
            if p['path'] == '/software_version':
                raise wsme.exc.ClientSideError(
                    _("software_version field "
                      "cannot be modified."))

            if p['path'] == '/system_type':
                if rpc_isystem is not None:
                    if rpc_isystem.system_type is not None:
                        raise wsme.exc.ClientSideError(
                            _("system_type field "
                              "cannot be "
                              "modified."))

            if (p['path'] == '/system_mode'
                    and p.get('value') != rpc_isystem.system_mode):
                if rpc_isystem is not None and \
                   rpc_isystem.system_mode is not None:
                    if rpc_isystem.system_type != constants.TIS_AIO_BUILD:
                        raise wsme.exc.ClientSideError(
                            "system_mode can only be modified on an "
                            "AIO system")
                    system_mode_options = [
                        constants.SYSTEM_MODE_DUPLEX,
                        constants.SYSTEM_MODE_DUPLEX_DIRECT
                    ]
                    new_system_mode = p['value']
                    # Allow modification to system mode during bootstrap. Once the
                    # initial configuration is complete, this type of request will
                    # be bound to the conditions below.
                    if cutils.is_initial_config_complete():
                        if rpc_isystem.system_mode == \
                                constants.SYSTEM_MODE_SIMPLEX:
                            msg = _("Cannot modify system mode when it is "
                                    "already set to %s." %
                                    rpc_isystem.system_mode)
                            raise wsme.exc.ClientSideError(msg)
                        elif new_system_mode == constants.SYSTEM_MODE_SIMPLEX:
                            msg = _(
                                "Cannot modify system mode to simplex when "
                                "it is set to %s " % rpc_isystem.system_mode)
                            raise wsme.exc.ClientSideError(msg)
                    else:
                        system_mode_options.append(
                            constants.SYSTEM_MODE_SIMPLEX)

                    if new_system_mode not in system_mode_options:
                        raise wsme.exc.ClientSideError(
                            "Invalid value for system_mode, it can only"
                            " be modified to '%s' or '%s'" %
                            (constants.SYSTEM_MODE_DUPLEX,
                             constants.SYSTEM_MODE_DUPLEX_DIRECT))

            if p['path'] == '/timezone':
                timezone = p['value']
                if not os.path.isfile("/usr/share/zoneinfo/%s" % timezone):
                    raise wsme.exc.ClientSideError(
                        _("Timezone file %s "
                          "does not exist." % timezone))

            if p['path'] == '/sdn_enabled':
                sdn_enabled = p['value'].lower()
                patch.remove(p)

            if p['path'] == '/https_enabled':
                https_enabled = p['value'].lower()
                patch.remove(p)

            if p['path'] == '/distributed_cloud_role':
                distributed_cloud_role = p['value']
                patch.remove(p)

            if p['path'] == '/vswitch_type':
                vswitch_type = p['value']
                patch.remove(p)

            if p['path'] == '/security_feature':
                security_feature = p['value']
                patch.remove(p)

        try:
            patched_system = jsonpatch.apply_patch(system_dict,
                                                   jsonpatch.JsonPatch(patch))
        except api_utils.JSONPATCH_EXCEPTIONS as e:
            raise exception.PatchError(patch=patch, reason=e)

        if 'sdn_enabled' in updates:
            if sdn_enabled != rpc_isystem['capabilities']['sdn_enabled']:
                self._check_hosts()
                change_sdn = True
                if sdn_enabled == 'true':
                    self._verify_sdn_enabled()
                    patched_system['capabilities']['sdn_enabled'] = True
                else:
                    self._verify_sdn_disabled()
                    patched_system['capabilities']['sdn_enabled'] = False

        if 'https_enabled' in updates:
            # Pre-check: if user is setting https_enabled to false
            # while 'ssl' cert is managed by cert-manager, return error
            # (Otherwise, cert-mon will turn https back on during cert-renewal process)
            managed_by_cm = self._kube_op.kube_get_secret(
                constants.PLATFORM_CERT_SECRET_NAME,
                constants.CERT_NAMESPACE_PLATFORM_CERTS)
            if https_enabled == 'false' and managed_by_cm is not None:
                msg = "Certificate is currently being managed by cert-manager. " \
                    "Remove %s Certificate and Secret before disabling https." % \
                    constants.PLATFORM_CERT_SECRET_NAME
                raise wsme.exc.ClientSideError(_(msg))

            if https_enabled != rpc_isystem['capabilities']['https_enabled']:
                change_https = True
                if https_enabled == 'true':
                    patched_system['capabilities']['https_enabled'] = True
                else:
                    patched_system['capabilities']['https_enabled'] = False
            else:
                raise wsme.exc.ClientSideError(
                    _("https_enabled is already set"
                      " as %s" % https_enabled))

        if 'distributed_cloud_role' in updates:
            # At this point dc role cannot be changed after initial
            # configuration is complete
            if (rpc_isystem['distributed_cloud_role'] is not None
                    and cutils.is_initial_config_complete()):
                raise wsme.exc.ClientSideError(
                    _("distributed_cloud_role is already set "
                      " as %s" % rpc_isystem['distributed_cloud_role']))
            # allow set the role to None before the initial config
            # is complete
            elif ((distributed_cloud_role in [
                    constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER,
                    constants.DISTRIBUTED_CLOUD_ROLE_SUBCLOUD
            ] or distributed_cloud_role is None)
                  and not cutils.is_initial_config_complete()):
                change_dc_role = True
                patched_system[
                    'distributed_cloud_role'] = distributed_cloud_role
            else:
                raise wsme.exc.ClientSideError(
                    _("Unexpected value %s specified"
                      " for distributed_cloud_role" % distributed_cloud_role))

        if 'vswitch_type' in updates:
            if vswitch_type == rpc_isystem['capabilities']['vswitch_type']:
                raise wsme.exc.ClientSideError(
                    _("vswitch_type is already set"
                      " as %s" % vswitch_type))
            patched_system['capabilities']['vswitch_type'] = vswitch_type

        if 'security_feature' in updates:
            # Security feature string must be translated from user values to
            # kernel options
            if (security_feature in
                    constants.SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_OPTS):
                security_feature_value = \
                    constants.SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_OPTS[security_feature]
                patched_system['security_feature'] = security_feature_value
            else:
                raise wsme.exc.ClientSideError(
                    _("Unexpected value %s specified for "
                      "security_feature" % security_feature))

        # Update only the fields that have changed
        name = ""
        system_mode = ""
        timezone = ""
        capabilities = {}
        distributed_cloud_role = ""
        security_feature = ""

        for field in objects.system.fields:
            if rpc_isystem[field] != patched_system[field]:
                rpc_isystem[field] = patched_system[field]
                if field == 'name':
                    name = rpc_isystem[field]
                if field == 'system_mode':
                    system_mode = rpc_isystem[field]
                if field == 'timezone':
                    timezone = rpc_isystem[field]
                if field == 'capabilities':
                    capabilities = rpc_isystem[field]
                if field == 'distributed_cloud_role':
                    distributed_cloud_role = rpc_isystem[field]
                if field == 'security_feature':
                    security_feature = rpc_isystem[field]

        delta = rpc_isystem.obj_what_changed()
        delta_handle = list(delta)
        rpc_isystem.save()

        if name:
            LOG.info("update system name")
            pecan.request.rpcapi.configure_isystemname(pecan.request.context,
                                                       name)
        if 'system_mode' in delta_handle:
            LOG.info("update system mode %s" % system_mode)
            pecan.request.rpcapi.update_system_mode_config(
                pecan.request.context)
        if timezone:
            LOG.info("update system timezone to %s" % timezone)
            pecan.request.rpcapi.configure_system_timezone(
                pecan.request.context)
        if capabilities:
            if change_sdn:
                LOG.info("update sdn to %s" % capabilities)
                pecan.request.rpcapi.update_sdn_enabled(pecan.request.context)
            if change_https:
                LOG.info("update https to %s" % capabilities)
                pecan.request.rpcapi.configure_system_https(
                    pecan.request.context)
            if vswitch_type:
                LOG.info("update vswitch_type to %s" % capabilities)
                pecan.request.rpcapi.update_vswitch_type(pecan.request.context)

        if distributed_cloud_role and change_dc_role:
            LOG.info("update distributed cloud role to %s" %
                     distributed_cloud_role)
            pecan.request.rpcapi.update_distributed_cloud_role(
                pecan.request.context)

        # check if we need to config the system controller database
        if (change_dc_role and distributed_cloud_role
                == constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER):
            hosts = pecan.request.dbapi.ihost_get_by_personality(
                constants.CONTROLLER)
            # this is a replay case after the first host has been created
            if len(hosts) == 1:
                pecan.request.rpcapi.configure_system_controller(
                    pecan.request.context, hosts[0])

        if 'security_feature' in delta_handle:
            LOG.info("update security_feature %s" % security_feature)
            pecan.request.rpcapi.update_security_feature_config(
                pecan.request.context)

        return System.convert_with_links(rpc_isystem)
Example #13
0
def _check(self, op, tier):
    # Semantic checks
    LOG.debug("storage_tier: Semantic check for %s operation" % op)

    # Check storage tier parameters
    _check_parameters(tier)

    if op == "add":
        # See if this storage tier already exists
        tiers = pecan.request.dbapi.storage_tier_get_all(name=tier['name'])
        if len(tiers) != 0:
            raise wsme.exc.ClientSideError(
                _("Storage tier (%s) "
                  "already present." % tier['name']))

        # Deny adding secondary tier if initial configuration is not done.
        if not cutils.is_initial_config_complete():
            msg = _(
                "Operation denied. Adding secondary tiers to a cluster requires "
                "initial configuration to be complete and controller node unlocked."
            )
            raise wsme.exc.ClientSideError(msg)

        if cutils.is_aio_system(pecan.request.dbapi):
            # Deny adding secondary tiers if primary tier backend is not configured
            # for cluster.
            clusterId = tier.get('forclusterid') or tier.get('cluster_uuid')
            cluster_tiers = pecan.request.dbapi.storage_tier_get_by_cluster(
                clusterId)
            configured = False if cluster_tiers else True
            for t in cluster_tiers:
                if t.forbackendid:
                    bk = pecan.request.dbapi.storage_backend_get(
                        t.forbackendid)
                    if bk.state != constants.SB_STATE_CONFIGURED:
                        msg = _("Operation denied. Storage backend '%s' "
                                "of tier '%s' must be in '%s' state." %
                                (bk.name, t['name'],
                                 constants.SB_STATE_CONFIGURED))
                        raise wsme.exc.ClientSideError(msg)
                    configured = True
            if not configured:
                msg = _(
                    "Operation denied. Adding secondary tiers to a cluster requires "
                    "primary tier storage backend of this cluster to be configured."
                )
                raise wsme.exc.ClientSideError(msg)
        else:
            # Deny adding secondary tier if ceph is down on standard
            num_monitors, required_monitors, __ = \
                self._ceph.get_monitors_status(pecan.request.dbapi)
            if num_monitors < required_monitors:
                raise wsme.exc.ClientSideError(
                    _("Operation denied. Ceph is not operational. "
                      "Only %d storage monitor available. "
                      "At least %s unlocked and enabled hosts with "
                      "monitors are required. Please ensure hosts "
                      "with monitors are unlocked and enabled.") %
                    (num_monitors, required_monitors))

    elif op == "delete":
        if tier['name'] == constants.SB_TIER_DEFAULT_NAMES[
                constants.SB_TIER_TYPE_CEPH]:
            raise wsme.exc.ClientSideError(
                _("Storage Tier %s cannot be "
                  "deleted.") % tier['name'])

        if tier['status'] != constants.SB_TIER_STATUS_DEFINED:
            raise wsme.exc.ClientSideError(
                _("Storage Tier %s cannot be "
                  "deleted. It is %s") % (tier['name'], tier['status']))
    elif op == "modify":
        pass
    else:
        raise wsme.exc.ClientSideError(
            _("Internal Error: Invalid storage tier operation: %s" % op))

    return tier
Example #14
0
    def patch(self, isystem_uuid, patch):
        """Update an existing isystem.

        :param isystem_uuid: UUID of a isystem.
        :param patch: a json PATCH document to apply to this isystem.
        """
        rpc_isystem = objects.system.get_by_uuid(pecan.request.context,
                                                 isystem_uuid)
        system_dict = rpc_isystem.as_dict()
        updates = self._get_updates(patch)
        change_https = False
        change_sdn = False
        change_dc_role = False
        vswitch_type = None

        # prevent description field from being updated
        for p in jsonpatch.JsonPatch(patch):
            if p['path'] == '/software_version':
                raise wsme.exc.ClientSideError(
                    _("software_version field "
                      "cannot be modified."))

            if p['path'] == '/system_type':
                if rpc_isystem is not None:
                    if rpc_isystem.system_type is not None:
                        raise wsme.exc.ClientSideError(
                            _("system_type field "
                              "cannot be "
                              "modified."))

            if (p['path'] == '/system_mode'
                    and p.get('value') != rpc_isystem.system_mode):
                if rpc_isystem is not None and \
                   rpc_isystem.system_mode is not None:
                    if rpc_isystem.system_type != constants.TIS_AIO_BUILD:
                        raise wsme.exc.ClientSideError(
                            "system_mode can only be modified on an "
                            "AIO system")
                    system_mode_options = [
                        constants.SYSTEM_MODE_DUPLEX,
                        constants.SYSTEM_MODE_DUPLEX_DIRECT
                    ]
                    new_system_mode = p['value']
                    # Allow modification to system mode during bootstrap. Once the
                    # initial configuration is complete, this type of request will
                    # be bound to the conditions below.
                    if cutils.is_initial_config_complete():
                        if rpc_isystem.system_mode == \
                                constants.SYSTEM_MODE_SIMPLEX:
                            msg = _("Cannot modify system mode when it is "
                                    "already set to %s." %
                                    rpc_isystem.system_mode)
                            raise wsme.exc.ClientSideError(msg)
                        elif new_system_mode == constants.SYSTEM_MODE_SIMPLEX:
                            msg = _(
                                "Cannot modify system mode to simplex when "
                                "it is set to %s " % rpc_isystem.system_mode)
                            raise wsme.exc.ClientSideError(msg)
                    else:
                        system_mode_options.append(
                            constants.SYSTEM_MODE_SIMPLEX)

                    if new_system_mode not in system_mode_options:
                        raise wsme.exc.ClientSideError(
                            "Invalid value for system_mode, it can only"
                            " be modified to '%s' or '%s'" %
                            (constants.SYSTEM_MODE_DUPLEX,
                             constants.SYSTEM_MODE_DUPLEX_DIRECT))

            if p['path'] == '/timezone':
                timezone = p['value']
                if not os.path.isfile("/usr/share/zoneinfo/%s" % timezone):
                    raise wsme.exc.ClientSideError(
                        _("Timezone file %s "
                          "does not exist." % timezone))

            if p['path'] == '/sdn_enabled':
                sdn_enabled = p['value'].lower()
                patch.remove(p)

            if p['path'] == '/https_enabled':
                https_enabled = p['value'].lower()
                patch.remove(p)

            if p['path'] == '/distributed_cloud_role':
                distributed_cloud_role = p['value']
                patch.remove(p)

            if p['path'] == '/vswitch_type':
                vswitch_type = p['value']
                patch.remove(p)

            if p['path'] == '/security_feature':
                security_feature = p['value']
                patch.remove(p)

        try:
            patched_system = jsonpatch.apply_patch(system_dict,
                                                   jsonpatch.JsonPatch(patch))
        except api_utils.JSONPATCH_EXCEPTIONS as e:
            raise exception.PatchError(patch=patch, reason=e)

        if 'sdn_enabled' in updates:
            if sdn_enabled != rpc_isystem['capabilities']['sdn_enabled']:
                self._check_hosts()
                change_sdn = True
                if sdn_enabled == 'true':
                    self._verify_sdn_enabled()
                    patched_system['capabilities']['sdn_enabled'] = True
                else:
                    self._verify_sdn_disabled()
                    patched_system['capabilities']['sdn_enabled'] = False

        if 'https_enabled' in updates:
            if https_enabled != rpc_isystem['capabilities']['https_enabled']:
                change_https = True
                if https_enabled == 'true':
                    patched_system['capabilities']['https_enabled'] = True
                else:
                    patched_system['capabilities']['https_enabled'] = False
            else:
                raise wsme.exc.ClientSideError(
                    _("https_enabled is already set"
                      " as %s" % https_enabled))

        if 'distributed_cloud_role' in updates:
            # At this point dc role cannot be changed after config_controller
            # and config_subcloud
            if rpc_isystem['distributed_cloud_role'] is None and \
                            distributed_cloud_role in \
                            [constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER,
                             constants.DISTRIBUTED_CLOUD_ROLE_SUBCLOUD]:

                change_dc_role = True
                patched_system[
                    'distributed_cloud_role'] = distributed_cloud_role
            else:
                raise wsme.exc.ClientSideError(
                    _("distributed_cloud_role is already set "
                      " as %s" % rpc_isystem['distributed_cloud_role']))

        if 'vswitch_type' in updates:
            if vswitch_type == rpc_isystem['capabilities']['vswitch_type']:
                raise wsme.exc.ClientSideError(
                    _("vswitch_type is already set"
                      " as %s" % vswitch_type))
            patched_system['capabilities']['vswitch_type'] = vswitch_type

        if 'security_feature' in updates:
            # Security feature string must be translated from user values to
            # kernel options
            if (security_feature in
                    constants.SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_OPTS):
                security_feature_value = \
                    constants.SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_OPTS[security_feature]
                patched_system['security_feature'] = security_feature_value
            else:
                raise wsme.exc.ClientSideError(
                    _("Unexpected value %s specified for "
                      "security_feature" % security_feature))

        # Update only the fields that have changed
        name = ""
        contact = ""
        location = ""
        system_mode = ""
        timezone = ""
        capabilities = {}
        distributed_cloud_role = ""
        security_feature = ""

        for field in objects.system.fields:
            if rpc_isystem[field] != patched_system[field]:
                rpc_isystem[field] = patched_system[field]
                if field == 'name':
                    name = rpc_isystem[field]
                if field == 'contact':
                    contact = rpc_isystem[field]
                if field == 'location':
                    location = rpc_isystem[field]
                if field == 'system_mode':
                    system_mode = rpc_isystem[field]
                if field == 'timezone':
                    timezone = rpc_isystem[field]
                if field == 'capabilities':
                    capabilities = rpc_isystem[field]
                if field == 'distributed_cloud_role':
                    distributed_cloud_role = rpc_isystem[field]
                if field == 'security_feature':
                    security_feature = rpc_isystem[field]

        delta = rpc_isystem.obj_what_changed()
        delta_handle = list(delta)
        rpc_isystem.save()

        if name:
            LOG.info("update system name")
            pecan.request.rpcapi.configure_isystemname(pecan.request.context,
                                                       name)
        if name or location or contact:
            LOG.info("update SNMP config")
            pecan.request.rpcapi.update_snmp_config(pecan.request.context)
        if 'system_mode' in delta_handle:
            LOG.info("update system mode %s" % system_mode)
            pecan.request.rpcapi.update_system_mode_config(
                pecan.request.context)
        if timezone:
            LOG.info("update system timezone to %s" % timezone)
            pecan.request.rpcapi.configure_system_timezone(
                pecan.request.context)
        if capabilities:
            if change_sdn:
                LOG.info("update sdn to %s" % capabilities)
                pecan.request.rpcapi.update_sdn_enabled(pecan.request.context)
            if change_https:
                LOG.info("update https to %s" % capabilities)
                pecan.request.rpcapi.configure_system_https(
                    pecan.request.context)
            if vswitch_type:
                LOG.info("update vswitch_type to %s" % capabilities)
                pecan.request.rpcapi.update_vswitch_type(pecan.request.context)

        if distributed_cloud_role and change_dc_role:
            LOG.info("update distributed cloud role to %s" %
                     distributed_cloud_role)
            pecan.request.rpcapi.update_distributed_cloud_role(
                pecan.request.context)

        if 'security_feature' in delta_handle:
            LOG.info("update security_feature %s" % security_feature)
            pecan.request.rpcapi.update_security_feature_config(
                pecan.request.context)

        return System.convert_with_links(rpc_isystem)