def patch(self, node_uuid, patch): """Update an existing node.""" if self._from_ihosts: raise exception.OperationNotPermitted rpc_node = objects.node.get_by_uuid(pecan.request.context, node_uuid) # replace ihost_uuid and inode_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/ihost_uuid': p['path'] = '/forihostid' ihost = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = ihost.id try: node = Node(**jsonpatch.apply_patch(rpc_node.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.node.fields: if rpc_node[field] != getattr(node, field): rpc_node[field] = getattr(node, field) rpc_node.save() return Node.convert_with_links(rpc_node)
def patch(self, device_uuid, patch): """Update an existing device.""" if self._from_ihosts: raise exception.OperationNotPermitted rpc_device = objects.pci_device.get_by_uuid(pecan.request.context, device_uuid) # replace host_uuid with corresponding host_id patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/host_uuid': p['path'] = '/host_id' host = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = host.id try: device = PCIDevice( **jsonpatch.apply_patch(rpc_device.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Semantic checks host = pecan.request.dbapi.ihost_get(device.host_id) _check_host(host) sriov_update = _check_device_sriov(device.as_dict(), host) # Update fields that have changed for field in objects.pci_device.fields: value = getattr(device, field) if rpc_device[field] != value: _check_field(field) if (field in ['sriov_vf_driver', 'driver'] and value == 'none'): rpc_device[field] = None else: rpc_device[field] = getattr(device, field) if field == 'sriov_numvfs': # Save desired number of VFs in extra_info since # sriov_numvfs may get overwritten by concurrent inventory report expected_numvfs = { 'expected_numvfs': rpc_device[field] } if not rpc_device['extra_info']: rpc_device['extra_info'] = str(expected_numvfs) else: extra_info = literal_eval(rpc_device['extra_info']) extra_info.update(expected_numvfs) rpc_device['extra_info'] = str(extra_info) rpc_device.save() if sriov_update: pecan.request.rpcapi.update_sriov_config(pecan.request.context, host['uuid']) return PCIDevice.convert_with_links(rpc_device)
def patch(self, load_id, patch): """Update an existing load.""" # TODO (dsulliva) # This is a stub. We will need to place reasonable limits on what can # be patched as we add to the upgrade system. This portion of the API # likely will not be publicly accessible. rpc_load = objects.load.get_by_uuid(pecan.request.context, load_id) utils.validate_patch(patch) patch_obj = jsonpatch.JsonPatch(patch) try: load = Load(**jsonpatch.apply_patch(rpc_load.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) fields = objects.load.fields for field in fields: if rpc_load[field] != getattr(load, field): rpc_load[field] = getattr(load, field) rpc_load.save() return Load.convert_with_links(rpc_load)
def patch(self, itrapdest_uuid, patch): """Update an existing itrapdest. :param itrapdest_uuid: UUID of a itrapdest. :param patch: a json PATCH document to apply to this itrapdest. """ rpc_itrapdest = objects.trapdest.get_by_uuid(pecan.request.context, itrapdest_uuid) try: itrap = TrapDest(**jsonpatch.apply_patch( rpc_itrapdest.as_dict(), jsonpatch.JsonPatch(patch))) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed ip = "" for field in objects.trapdest.fields: if rpc_itrapdest[field] != getattr(itrap, field): rpc_itrapdest[field] = getattr(itrap, field) if field == 'ip_address': ip = rpc_itrapdest[field] rpc_itrapdest.save() if ip: LOG.debug("Modify destination IP: uuid (%s), ip (%s", itrapdest_uuid, ip) return TrapDest.convert_with_links(rpc_itrapdest)
def patch(self, icommunity_uuid, patch): """Update an existing icommunity. :param icommunity_uuid: UUID of a icommunity. :param patch: a json PATCH document to apply to this icommunity. """ rpc_icommunity = objects.community.get_by_uuid(pecan.request.context, icommunity_uuid) try: icomm = Community(**jsonpatch.apply_patch(rpc_icommunity.as_dict(), jsonpatch.JsonPatch(patch))) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed comm = "" for field in objects.community.fields: if rpc_icommunity[field] != getattr(icomm, field): rpc_icommunity[field] = getattr(icomm, field) if field == 'community': comm = rpc_icommunity[field] rpc_icommunity.save() if comm: LOG.debug("Modify community: uuid (%s) community (%s) ", icommunity_uuid, comm) return Community.convert_with_links(rpc_icommunity)
def patch(self, lvg_uuid, patch): """Update an existing lvg.""" if self._from_ihosts: raise exception.OperationNotPermitted LOG.debug("patch_data: %s" % patch) rpc_lvg = objects.lvg.get_by_uuid(pecan.request.context, lvg_uuid) # replace ihost_uuid and ilvg_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/ihost_uuid': p['path'] = '/forihostid' ihost = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = ihost.id elif p['path'] == '/capabilities': p['value'] = jsonutils.loads(p['value']) # perform checks based on the current vs.requested modifications _lvg_pre_patch_checks(rpc_lvg, patch_obj) try: lvg = LVG(**jsonpatch.apply_patch(rpc_lvg.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Semantic Checks _check("modify", lvg.as_dict()) try: # Update only the fields that have changed for field in objects.lvg.fields: if rpc_lvg[field] != getattr(lvg, field): rpc_lvg[field] = getattr(lvg, field) # Update mate controller LVG type for cinder-volumes if lvg.lvm_vg_name == constants.LVG_CINDER_VOLUMES: mate_lvg = _get_mate_ctrl_lvg(lvg.as_dict()) lvm_type = lvg.capabilities.get( constants.LVG_CINDER_PARAM_LVM_TYPE) if mate_lvg and lvm_type: mate_lvg_caps = mate_lvg['capabilities'] mate_type = mate_lvg_caps.get( constants.LVG_CINDER_PARAM_LVM_TYPE) if lvm_type != mate_type: mate_lvg_caps[ constants.LVG_CINDER_PARAM_LVM_TYPE] = lvm_type pecan.request.dbapi.ilvg_update( mate_lvg['uuid'], {'capabilities': mate_lvg_caps}) # Save rpc_lvg.save() return LVG.convert_with_links(rpc_lvg) except exception.HTTPNotFound: msg = _("LVG update failed: host %s vg %s : patch %s" % (ihost['hostname'], lvg.lvm_vg_name, patch)) raise wsme.exc.ClientSideError(msg)
def patch(self, storage_backend_uuid, patch): """Update the current Storage Backend.""" if self._from_isystems: raise exception.OperationNotPermitted # This is the base class call into the appropriate backend class to # update return _patch(storage_backend_uuid, patch) rpc_storage_backend = objects.storage_backend.get_by_uuid( pecan.request.context, storage_backend_uuid) # action = None for p in patch: # if '/action' in p['path']: # value = p['value'] # patch.remove(p) # if value in (constants.APPLY_ACTION, # constants.INSTALL_ACTION): # action = value # elif p['path'] == '/capabilities': if p['path'] == '/capabilities': p['value'] = jsonutils.loads(p['value']) # replace isystem_uuid and storage_backend_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) state_rel_path = ['/uuid', '/forisystemid', '/isystem_uuid'] if any(p['path'] in state_rel_path for p in patch_obj): raise wsme.exc.ClientSideError( _("The following fields can not be " "modified: %s" % state_rel_path)) for p in patch_obj: if p['path'] == '/isystem_uuid': isystem = objects.system.get_by_uuid(pecan.request.context, p['value']) p['path'] = '/forisystemid' p['value'] = isystem.id break try: storage_backend = StorageBackend(**jsonpatch.apply_patch( rpc_storage_backend.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.storage_backend.fields: if rpc_storage_backend[field] != getattr(storage_backend, field): rpc_storage_backend[field] = getattr(storage_backend, field) # Save storage_backend rpc_storage_backend.save() return StorageBackend.convert_with_links(rpc_storage_backend)
def patch(self, partition_uuid, patch): """Update an existing partition.""" if self._from_ihosts: raise exception.OperationNotPermitted LOG.info("Partition patch_data: %s" % patch) rpc_partition = objects.partition.get_by_uuid(pecan.request.context, partition_uuid) # replace ihost_uuid and partition_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) ihost = None for p in patch_obj: if p['path'] == '/ihost_uuid': p['path'] = '/forihostid' ihost = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = ihost.id # Perform checks based on the current vs.requested modifications. if not ihost: ihost = pecan.request.dbapi.ihost_get(rpc_partition.forihostid) LOG.info("from partition get ihost=%s" % ihost.hostname) _partition_pre_patch_checks(rpc_partition, patch_obj, ihost) try: partition = Partition( **jsonpatch.apply_patch(rpc_partition.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Perform post patch semantic checks. _semantic_checks(constants.PARTITION_CMD_MODIFY, partition.as_dict()) partition.status = constants.PARTITION_MODIFYING_STATUS try: # Update only the fields that have changed for field in objects.partition.fields: if rpc_partition[field] != getattr(partition, field): rpc_partition[field] = getattr(partition, field) # Save. rpc_partition.save() # Instruct puppet to implement the change. pecan.request.rpcapi.update_partition_config( pecan.request.context, rpc_partition) return Partition.convert_with_links(rpc_partition) except exception.HTTPNotFound: msg = _( "Partition update failed: host %s partition %s : patch %s" % (ihost['hostname'], partition['device_path'], patch)) raise wsme.exc.ClientSideError(msg)
def patch(self, remotelogging_uuid, patch): """Update the remotelogging configuration.""" rpc_remotelogging = objects.remotelogging.get_by_uuid( pecan.request.context, remotelogging_uuid) action = None for p in patch: if '/action' in p['path']: value = p['value'] patch.remove(p) if value in (constants.APPLY_ACTION, constants.INSTALL_ACTION): action = value break patch_obj = jsonpatch.JsonPatch(patch) state_rel_path = ['/uuid', '/id'] if any(p['path'] in state_rel_path for p in patch_obj): raise wsme.exc.ClientSideError( _("The following fields can not be " "modified: %s" % state_rel_path)) try: remotelogging = RemoteLogging(**jsonpatch.apply_patch( rpc_remotelogging.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) remotelogging = _check_remotelogging_data("modify", remotelogging.as_dict()) try: # Update only the fields that have changed for field in objects.remotelogging.fields: if rpc_remotelogging[field] != remotelogging[field]: rpc_remotelogging[field] = remotelogging[field] rpc_remotelogging.save() if action == constants.APPLY_ACTION: # perform rpc to conductor to perform config apply pecan.request.rpcapi.update_remotelogging_config( pecan.request.context, timeout=REMOTELOGGING_RPC_TIMEOUT) return RemoteLogging.convert_with_links(rpc_remotelogging) except exception.HTTPNotFound: msg = _("remotelogging update failed: %s : patch %s" % (remotelogging['ip_address'], patch)) raise wsme.exc.ClientSideError(msg)
def patch(self, datanetwork_id, patch): """Update an existing datanetwork.""" rpc_datanetwork = \ objects.datanetwork.get_by_uuid( pecan.request.context, datanetwork_id) utils.validate_patch(patch) patch_obj = jsonpatch.JsonPatch(patch) LOG.info("datanetwork patch_obj=%s" % patch_obj) try: datanetwork = DataNetwork( **jsonpatch.apply_patch(rpc_datanetwork.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) LOG.info("rpc_datanetwork=%s datanetwork=%s" % (rpc_datanetwork.as_dict(), datanetwork)) fields = objects.datanetwork.fields for field in fields: if (field in rpc_datanetwork and rpc_datanetwork[field] != getattr(datanetwork, field)): rpc_datanetwork[field] = getattr(datanetwork, field) delta = rpc_datanetwork.obj_what_changed() if not delta: return DataNetwork.convert_with_links(rpc_datanetwork) delta_list = list(delta) allowed_updates = ['mtu', 'description'] if not set(delta_list).issubset(allowed_updates): extra = set(allowed_updates).difference(delta_list) raise wsme.exc.ClientSideError( _("DataNetwork '%s' attributes '%s' may not be modified ") % (rpc_datanetwork.uuid, extra)) values = {} if 'mtu' in delta_list: self._check_update_mtu(rpc_datanetwork) values.update({'mtu': rpc_datanetwork.mtu}) if 'description' in delta_list: values.update({'description': rpc_datanetwork.description}) rpc_datanetwork.save() return DataNetwork.convert_with_links(rpc_datanetwork)
def patch(self, ptp_uuid, patch): """Update the current PTP configuration.""" rpc_ptp = objects.ptp.get_by_uuid(pecan.request.context, ptp_uuid) patch_obj = jsonpatch.JsonPatch(patch) state_rel_path = ['/uuid', '/id'] if any(p['path'] in state_rel_path for p in patch_obj): raise wsme.exc.ClientSideError(_("The following fields can not be " "modified: %s" % state_rel_path)) try: ptp = PTP(**jsonpatch.apply_patch(rpc_ptp.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) ptp = ptp.as_dict() try: # Update only the fields that have changed for field in objects.ptp.fields: if rpc_ptp[field] != ptp[field]: rpc_ptp[field] = ptp[field] delta = rpc_ptp.obj_what_changed() if 'transport' in delta and rpc_ptp.transport == constants.PTP_TRANSPORT_UDP: self._validate_ptp_udp_transport() if delta: rpc_ptp.save() # perform rpc to conductor to perform config apply pecan.request.rpcapi.update_ptp_config(pecan.request.context) else: LOG.info("No PTP config changes") return PTP.convert_with_links(rpc_ptp) except exception.HTTPNotFound: msg = _("PTP update failed: %s %s %s : patch %s" % (ptp['mode'], ptp['transport'], ptp['mechanism'], patch)) raise wsme.exc.ClientSideError(msg)
def patch(self, id, patch): """Update an existing lldp tlv.""" if self._from_lldp_agents: raise exception.OperationNotPermitted if self._from_lldp_neighbours: raise exception.OperationNotPermitted rpc_tlv = objects.lldp_tlv.get_by_id( pecan.request.context, id) # replace agent_uuid and neighbour_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/agent_uuid': p['path'] = '/agent_id' agent = objects.lldp_agent.get_by_uuid(pecan.request.context, p['value']) p['value'] = agent.id if p['path'] == '/neighbour_uuid': p['path'] = '/neighbour_id' try: neighbour = objects.lldp_neighbour.get_by_uuid( pecan.request.context, p['value']) p['value'] = neighbour.id except exception.SysinvException as e: LOG.exception(e) p['value'] = None try: tlv = LLDPTLV( **jsonpatch.apply_patch(rpc_tlv.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.lldp_tlv.fields: if rpc_tlv[field] != getattr(tlv, field): rpc_tlv[field] = getattr(tlv, field) rpc_tlv.save() return LLDPTLV.convert_with_links(rpc_tlv)
def patch(self, uuid, patch): """Update an existing lldp neighbour.""" if self._from_ihosts: raise exception.OperationNotPermitted if self._from_ports: raise exception.OperationNotPermitted rpc_neighbour = objects.lldp_neighbour.get_by_uuid( pecan.request.context, uuid) # replace host_uuid and port_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/host_uuid': p['path'] = '/host_id' host = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = host.id if p['path'] == '/port_uuid': p['path'] = '/port_id' try: port = objects.port.get_by_uuid( pecan.request.context, p['value']) p['value'] = port.id except exception.SysinvException as e: LOG.exception(e) p['value'] = None try: neighbour = LLDPNeighbour( **jsonpatch.apply_patch(rpc_neighbour.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.lldp_neighbour.fields: if rpc_neighbour[field] != getattr(neighbour, field): rpc_neighbour[field] = getattr(neighbour, field) rpc_neighbour.save() return LLDPNeighbour.convert_with_links(rpc_neighbour)
def patch(self, service_name, patch): """Update the service configuration.""" rpc_service = objects.service.\ get_by_service_name(pecan.request.context, str(service_name)) patch_obj = jsonpatch.JsonPatch(patch) state_rel_path = ['/id'] if any(p['path'] in state_rel_path for p in patch_obj): raise wsme.exc.ClientSideError(_("The following fields can not be " "modified: %s" % state_rel_path)) try: service = Service(**jsonpatch.apply_patch( rpc_service.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) service = _check_service_data( "modify", service.as_dict()) try: # Update only the fields that have changed for field in objects.service.fields: if rpc_service[field] != service[field]: rpc_service[field] = service[field] rpc_service.save() pecan.request.rpcapi.update_service_config( pecan.request.context, service_name, do_apply=True) return Service.convert_with_links(rpc_service) except exception.HTTPNotFound: msg = _("service update failed: %s : patch %s" % (service_name, patch)) raise wsme.exc.ClientSideError(msg)
def patch(self, pv_uuid, patch): """Update an existing pv.""" if self._from_ihosts: raise exception.OperationNotPermitted LOG.debug("patch_data: %s" % patch) rpc_pv = objects.pv.get_by_uuid( pecan.request.context, pv_uuid) # replace ihost_uuid and ipv_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/ihost_uuid': p['path'] = '/forihostid' ihost = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = ihost.id try: pv = PV(**jsonpatch.apply_patch(rpc_pv.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Semantic Checks _check("modify", pv.as_dict()) try: # Update only the fields that have changed for field in objects.pv.fields: if rpc_pv[field] != getattr(pv, field): rpc_pv[field] = getattr(pv, field) # Save and return rpc_pv.save() return PV.convert_with_links(rpc_pv) except exception.HTTPNotFound: msg = _("PV update failed: host %s pv %s : patch %s" % (ihost['hostname'], pv.lvm_pv_name, patch)) raise wsme.exc.ClientSideError(msg)
def patch(self, port_uuid, patch): """Update an existing port.""" if self._from_ihosts: raise exception.OperationNotPermitted rpc_port = objects.ethernet_port.get_by_uuid( pecan.request.context, port_uuid) # replace ihost_uuid and iinterface_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/host_uuid': p['path'] = '/host_id' host = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = host.id if p['path'] == '/interface_uuid': p['path'] = '/interface_id' try: interface = objects.interface.get_by_uuid( pecan.request.context, p['value']) p['value'] = interface.id except exception.SysinvException: p['value'] = None try: port = EthernetPort(**jsonpatch.apply_patch(rpc_port.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.ethernet_port.fields: if rpc_port[field] != getattr(port, field): rpc_port[field] = getattr(port, field) rpc_port.save() return EthernetPort.convert_with_links(rpc_port)
def patch(self, device_uuid, patch): """Update an existing device.""" if self._from_ihosts: raise exception.OperationNotPermitted rpc_device = objects.pci_device.get_by_uuid(pecan.request.context, device_uuid) # replace host_uuid and with corresponding patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/host_uuid': p['path'] = '/host_id' host = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = host.id try: device = PCIDevice( **jsonpatch.apply_patch(rpc_device.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Semantic checks host = pecan.request.dbapi.ihost_get(device.host_id) _check_host(host) # Update fields that have changed for field in objects.pci_device.fields: if rpc_device[field] != getattr(device, field): _check_field(field) rpc_device[field] = getattr(device, field) rpc_device.save() return PCIDevice.convert_with_links(rpc_device)
def update_many(self, isystem_uuid, patch): """Update the current controller_fs configuration.""" if self._from_isystems and not isystem_uuid: raise exception.InvalidParameterValue(_( "System id not specified.")) # Validate input filesystem names controller_fs_list = pecan.request.dbapi.controller_fs_get_list() valid_fs_list = [] if controller_fs_list: valid_fs_list = {fs.name: fs.size for fs in controller_fs_list} reinstall_required = False reboot_required = False force_resize = False modified_fs = [] for p_list in patch: p_obj_list = jsonpatch.JsonPatch(p_list) for p_obj in p_obj_list: if p_obj['path'] == '/action': value = p_obj['value'] patch.remove(p_list) if value == constants.FORCE_ACTION: force_resize = True LOG.info("Force action resize selected") break for p_list in patch: p_obj_list = jsonpatch.JsonPatch(p_list) for p_obj in p_obj_list: if p_obj['path'] == '/name': fs_display_name = p_obj['value'] if fs_display_name == constants.FILESYSTEM_DISPLAY_NAME_CGCS: fs_name = constants.FILESYSTEM_NAME_CGCS else: fs_name = fs_display_name elif p_obj['path'] == '/size': size = p_obj['value'] if fs_name not in valid_fs_list.keys() or fs_display_name == constants.FILESYSTEM_NAME_CGCS: msg = _("ControllerFs update failed: invalid filesystem " "'%s' " % fs_display_name) raise wsme.exc.ClientSideError(msg) elif not cutils.is_int_like(size): msg = _("ControllerFs update failed: filesystem '%s' " "size must be an integer " % fs_display_name) raise wsme.exc.ClientSideError(msg) elif int(size) <= int(valid_fs_list[fs_name]): msg = _("ControllerFs update failed: size for filesystem '%s' " "should be bigger than %s " % ( fs_display_name, valid_fs_list[fs_name])) raise wsme.exc.ClientSideError(msg) elif (fs_name == constants.FILESYSTEM_NAME_CGCS and StorageBackendConfig.get_backend(pecan.request.dbapi, constants.CINDER_BACKEND_CEPH)): if force_resize: LOG.warn("Force resize ControllerFs: %s, though Ceph " "storage backend is configured" % fs_display_name) else: raise wsme.exc.ClientSideError( _("ControllerFs %s size is not modifiable as Ceph is " "configured. Update size via Ceph Storage Pools." % fs_display_name)) if fs_name in constants.SUPPORTED_REPLICATED_FILEYSTEM_LIST: if utils.is_drbd_fs_resizing(): raise wsme.exc.ClientSideError( _("A drbd sync operation is currently in progress. " "Retry again later.") ) modified_fs += [fs_name] controller_fs_list_new = [] for fs in controller_fs_list: replaced = False for p_list in patch: p_obj_list = jsonpatch.JsonPatch(p_list) for p_obj in p_obj_list: if p_obj['path'] == '/name': if p_obj['value'] == constants.FILESYSTEM_DISPLAY_NAME_CGCS: p_obj['value'] = constants.FILESYSTEM_NAME_CGCS if p_obj['value'] == fs['name']: try: controller_fs_list_new += [ControllerFs( **jsonpatch.apply_patch(fs.as_dict(), p_obj_list))] replaced = True break except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=p_list, reason=e) if replaced: break if not replaced: controller_fs_list_new += [fs] cgtsvg_growth_gib = _check_controller_multi_fs_data( pecan.request.context, controller_fs_list_new) if _check_controller_state(): _check_controller_multi_fs(controller_fs_list_new, cgtsvg_growth_gib=cgtsvg_growth_gib) for fs in controller_fs_list_new: if fs.name in modified_fs: value = {'size': fs.size} if fs.replicated: value.update({'state': constants.CONTROLLER_FS_RESIZING_IN_PROGRESS}) pecan.request.dbapi.controller_fs_update(fs.uuid, value) try: # perform rpc to conductor to perform config apply pecan.request.rpcapi.update_storage_config( pecan.request.context, update_storage=False, reinstall_required=reinstall_required, reboot_required=reboot_required, filesystem_list=modified_fs ) except Exception as e: msg = _("Failed to update filesystem size ") LOG.error("%s with patch %s with exception %s" % (msg, patch, e)) raise wsme.exc.ClientSideError(msg)
def patch(self, cephmon_uuid, patch): """Update the current storage configuration.""" if not StorageBackendConfig.has_backend_configured( pecan.request.dbapi, constants.CINDER_BACKEND_CEPH ): raise wsme.exc.ClientSideError( _("Ceph backend is not configured.") ) rpc_cephmon = objects.ceph_mon.get_by_uuid(pecan.request.context, cephmon_uuid) is_ceph_mon_gib_changed = False patch = [p for p in patch if '/controller' not in p['path']] # Check if either ceph mon size or disk has to change. for p in patch: if '/ceph_mon_gib' in p['path']: if rpc_cephmon.ceph_mon_gib != p['value']: is_ceph_mon_gib_changed = True if not is_ceph_mon_gib_changed: LOG.info("ceph_mon parameters are not changed") raise wsme.exc.ClientSideError( _("Warning: ceph_mon parameters are not changed.")) # replace isystem_uuid and ceph_mon_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) state_rel_path = ['/uuid', '/id', '/forihostid', '/device_node', '/device_path'] if any(p['path'] in state_rel_path for p in patch_obj): raise wsme.exc.ClientSideError(_("The following fields can not be " "modified: %s" % state_rel_path)) try: cephmon = CephMon(**jsonpatch.apply_patch( rpc_cephmon.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) if is_ceph_mon_gib_changed: _check_ceph_mon(cephmon.as_dict(), rpc_cephmon.as_dict()) controller_fs_utils._check_controller_fs( ceph_mon_gib_new=cephmon.ceph_mon_gib) for field in objects.ceph_mon.fields: if rpc_cephmon[field] != cephmon.as_dict()[field]: rpc_cephmon[field] = cephmon.as_dict()[field] LOG.info("SYS_I cephmon: %s " % cephmon.as_dict()) try: rpc_cephmon.save() except exception.HTTPNotFound: msg = _("Ceph Mon update failed: uuid %s : " " patch %s" % (rpc_cephmon.uuid, patch)) raise wsme.exc.ClientSideError(msg) if is_ceph_mon_gib_changed: # Update the task for ceph storage backend. StorageBackendConfig.update_backend_states( pecan.request.dbapi, constants.CINDER_BACKEND_CEPH, task=constants.SB_TASK_RESIZE_CEPH_MON_LV ) # Mark controllers and storage node as Config out-of-date. pecan.request.rpcapi.update_storage_config( pecan.request.context, update_storage=is_ceph_mon_gib_changed, reinstall_required=False ) return CephMon.convert_with_links(rpc_cephmon)
def patch(self, memory_uuid, patch): """Update an existing memory.""" if self._from_ihosts: raise exception.OperationNotPermitted rpc_port = objects.memory.get_by_uuid(pecan.request.context, memory_uuid) if 'forihostid' in rpc_port: ihostId = rpc_port['forihostid'] else: ihostId = rpc_port['ihost_uuid'] host_id = pecan.request.dbapi.ihost_get(ihostId) if host_id['personality'] == constants.STORAGE: raise exception.OperationNotPermitted vm_hugepages_nr_2M_pending = None vm_hugepages_nr_1G_pending = None vswitch_hugepages_reqd = None vswitch_hugepages_size_mib = None platform_reserved_mib = None vm_pending_as_percentage = None for p in patch: if p['path'] == '/platform_reserved_mib': platform_reserved_mib = p['value'] if p['path'] == '/vm_hugepages_nr_2M_pending': vm_hugepages_nr_2M_pending = p['value'] if p['path'] == '/vm_hugepages_nr_1G_pending': vm_hugepages_nr_1G_pending = p['value'] if p['path'] == '/vswitch_hugepages_reqd': vswitch_hugepages_reqd = p['value'] if p['path'] == '/vswitch_hugepages_size_mib': vswitch_hugepages_size_mib = p['value'] if p['path'] == '/vm_pending_as_percentage': vm_pending_as_percentage = p['value'] if vm_pending_as_percentage is None: vm_pending_as_percentage = rpc_port["vm_pending_as_percentage"] elif vm_pending_as_percentage == "True": if vm_hugepages_nr_2M_pending is not None: patch.append({ 'op': 'replace', 'path': '/vm_hugepages_2M_percentage', 'value': vm_hugepages_nr_2M_pending }) if vm_hugepages_nr_1G_pending is not None: patch.append({ 'op': 'replace', 'path': '/vm_hugepages_1G_percentage', 'value': vm_hugepages_nr_1G_pending }) # The host must be locked if host_id: _check_host(host_id) else: raise wsme.exc.ClientSideError( _("Hostname or uuid must be defined")) if cutils.host_has_function(host_id, constants.WORKER): try: # Semantics checks and update hugepage memory accounting patch = _check_huge_values( rpc_port, patch, vm_hugepages_nr_2M_pending, vm_hugepages_nr_1G_pending, vswitch_hugepages_reqd, vswitch_hugepages_size_mib, platform_reserved_mib, vm_pending_as_percentage) except wsme.exc.ClientSideError as e: inode = pecan.request.dbapi.inode_get( inode_id=rpc_port.forinodeid) numa_node = inode.numa_node msg = _('Processor {0}:'.format(numa_node)) + e.message raise wsme.exc.ClientSideError(msg) else: # Standard/system controller or storage node if (vm_hugepages_nr_2M_pending is not None or vm_hugepages_nr_1G_pending is not None or vswitch_hugepages_reqd is not None or vswitch_hugepages_size_mib is not None): raise wsme.exc.ClientSideError( _("Hugepages memory configuration is not supported for this node." )) # Semantics checks for platform memory _check_memory(pecan.request.dbapi, rpc_port, host_id, platform_reserved_mib, vm_hugepages_nr_2M_pending, vm_hugepages_nr_1G_pending, vswitch_hugepages_reqd, vswitch_hugepages_size_mib, vm_pending_as_percentage) # only allow patching allocated_function and capabilities # replace ihost_uuid and inode_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/ihost_uuid': p['path'] = '/forihostid' ihost = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = ihost.id if p['path'] == '/inode_uuid': p['path'] = '/forinodeid' try: inode = objects.node.get_by_uuid(pecan.request.context, p['value']) p['value'] = inode.id except exception.SysinvException: p['value'] = None try: memory = Memory( **jsonpatch.apply_patch(rpc_port.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.memory.fields: if rpc_port[field] != getattr(memory, field): rpc_port[field] = getattr(memory, field) rpc_port.save() pecan.request.rpcapi.update_grub_config(pecan.request.context, host_id['uuid'], force=True) return Memory.convert_with_links(rpc_port)
def patch(self, sensor_uuid, patch): """Update an existing sensor.""" if self._from_ihosts: raise exception.OperationNotPermitted rpc_sensor = objects.sensor.get_by_uuid(pecan.request.context, sensor_uuid) if rpc_sensor.datatype == 'discrete': rpc_sensor = objects.sensor_discrete.get_by_uuid( pecan.request.context, sensor_uuid) elif rpc_sensor.datatype == 'analog': rpc_sensor = objects.sensor_analog.get_by_uuid( pecan.request.context, sensor_uuid) else: raise wsme.exc.ClientSideError(_("Invalid datatype=%s" % rpc_sensor.datatype)) rpc_sensor_orig = copy.deepcopy(rpc_sensor) # replace ihost_uuid and isensorgroup_uuid with corresponding utils.validate_patch(patch) patch_obj = jsonpatch.JsonPatch(patch) my_host_uuid = None for p in patch_obj: if p['path'] == '/host_uuid': p['path'] = '/host_id' host = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = host.id my_host_uuid = host.uuid if p['path'] == '/sensorgroup_uuid': p['path'] = '/sensorgroup_id' try: sensorgroup = objects.sensorgroup.get_by_uuid( pecan.request.context, p['value']) p['value'] = sensorgroup.id LOG.info("sensorgroup_uuid=%s id=%s" % (p['value'], sensorgroup.id)) except exception.SysinvException: p['value'] = None try: sensor = Sensor(**jsonpatch.apply_patch(rpc_sensor.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed if rpc_sensor.datatype == 'discrete': fields = objects.sensor_discrete.fields else: fields = objects.sensor_analog.fields for field in fields: if rpc_sensor[field] != getattr(sensor, field): rpc_sensor[field] = getattr(sensor, field) delta = rpc_sensor.obj_what_changed() sensor_suppress_attrs = ['suppress'] force_action = False if any(x in delta for x in sensor_suppress_attrs): valid_suppress = ['True', 'False', 'true', 'false', 'force_action'] if rpc_sensor.suppress.lower() not in valid_suppress: raise wsme.exc.ClientSideError(_("Invalid suppress value, " "select 'True' or 'False'")) elif rpc_sensor.suppress.lower() == 'force_action': LOG.info("suppress=%s" % rpc_sensor.suppress.lower()) rpc_sensor.suppress = rpc_sensor_orig.suppress force_action = True self._semantic_modifiable_fields(patch_obj, force_action) if not pecan.request.user_agent.startswith('hwmon'): hwmon_sensor = cutils.removekeys_nonhwmon( rpc_sensor.as_dict()) if not my_host_uuid: host = objects.host.get_by_uuid(pecan.request.context, rpc_sensor.host_id) my_host_uuid = host.uuid LOG.warn("Missing host_uuid updated=%s" % my_host_uuid) hwmon_sensor.update({'host_uuid': my_host_uuid}) hwmon_response = hwmon_api.sensor_modify( self._api_token, self._hwmon_address, self._hwmon_port, hwmon_sensor, constants.HWMON_DEFAULT_TIMEOUT_IN_SECS) if not hwmon_response: hwmon_response = {'status': 'fail', 'reason': 'no response', 'action': 'retry'} if hwmon_response['status'] != 'pass': msg = _("HWMON has returned with " "a status of %s, reason: %s, " "recommended action: %s") % ( hwmon_response.get('status'), hwmon_response.get('reason'), hwmon_response.get('action')) if force_action: LOG.error(msg) else: raise wsme.exc.ClientSideError(msg) rpc_sensor.save() return Sensor.convert_with_links(rpc_sensor)
def patch(self, isystem_uuid, patch): """Update an existing isystem. :param isystem_uuid: UUID of a isystem. :param patch: a json PATCH document to apply to this isystem. """ rpc_isystem = objects.system.get_by_uuid(pecan.request.context, isystem_uuid) system_dict = rpc_isystem.as_dict() updates = self._get_updates(patch) change_https = False change_sdn = False change_dc_role = False vswitch_type = None # prevent description field from being updated for p in jsonpatch.JsonPatch(patch): if p['path'] == '/software_version': raise wsme.exc.ClientSideError( _("software_version field " "cannot be modified.")) if p['path'] == '/system_type': if rpc_isystem is not None: if rpc_isystem.system_type is not None: raise wsme.exc.ClientSideError( _("system_type field " "cannot be " "modified.")) if (p['path'] == '/system_mode' and p.get('value') != rpc_isystem.system_mode): if rpc_isystem is not None and \ rpc_isystem.system_mode is not None: if rpc_isystem.system_type != constants.TIS_AIO_BUILD: raise wsme.exc.ClientSideError( "system_mode can only be modified on an " "AIO system") system_mode_options = [ constants.SYSTEM_MODE_DUPLEX, constants.SYSTEM_MODE_DUPLEX_DIRECT ] new_system_mode = p['value'] if rpc_isystem.system_mode == \ constants.SYSTEM_MODE_SIMPLEX: msg = _("Cannot modify system mode when it is " "already set to %s." % rpc_isystem.system_mode) raise wsme.exc.ClientSideError(msg) elif new_system_mode == constants.SYSTEM_MODE_SIMPLEX: msg = _("Cannot modify system mode to simplex when " "it is set to %s " % rpc_isystem.system_mode) raise wsme.exc.ClientSideError(msg) if new_system_mode not in system_mode_options: raise wsme.exc.ClientSideError( "Invalid value for system_mode, it can only" " be modified to '%s' or '%s'" % (constants.SYSTEM_MODE_DUPLEX, constants.SYSTEM_MODE_DUPLEX_DIRECT)) if p['path'] == '/timezone': timezone = p['value'] if not os.path.isfile("/usr/share/zoneinfo/%s" % timezone): raise wsme.exc.ClientSideError( _("Timezone file %s " "does not exist." % timezone)) if p['path'] == '/sdn_enabled': sdn_enabled = p['value'].lower() patch.remove(p) if p['path'] == '/https_enabled': https_enabled = p['value'].lower() patch.remove(p) if p['path'] == '/distributed_cloud_role': distributed_cloud_role = p['value'] patch.remove(p) if p['path'] == '/vswitch_type': vswitch_type = p['value'] patch.remove(p) if p['path'] == '/security_feature': security_feature = p['value'] patch.remove(p) try: patched_system = jsonpatch.apply_patch(system_dict, jsonpatch.JsonPatch(patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) if 'sdn_enabled' in updates: if sdn_enabled != rpc_isystem['capabilities']['sdn_enabled']: self._check_hosts() change_sdn = True if sdn_enabled == 'true': self._verify_sdn_enabled() patched_system['capabilities']['sdn_enabled'] = True else: self._verify_sdn_disabled() patched_system['capabilities']['sdn_enabled'] = False if 'https_enabled' in updates: if https_enabled != rpc_isystem['capabilities']['https_enabled']: change_https = True if https_enabled == 'true': patched_system['capabilities']['https_enabled'] = True else: patched_system['capabilities']['https_enabled'] = False else: raise wsme.exc.ClientSideError( _("https_enabled is already set" " as %s" % https_enabled)) if 'distributed_cloud_role' in updates: # At this point dc role cannot be changed after config_controller # and config_subcloud if rpc_isystem['distributed_cloud_role'] is None and \ distributed_cloud_role in \ [constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER, constants.DISTRIBUTED_CLOUD_ROLE_SUBCLOUD]: change_dc_role = True patched_system[ 'distributed_cloud_role'] = distributed_cloud_role else: raise wsme.exc.ClientSideError( _("distributed_cloud_role is already set " " as %s" % rpc_isystem['distributed_cloud_role'])) if 'vswitch_type' in updates: if vswitch_type == rpc_isystem['capabilities']['vswitch_type']: raise wsme.exc.ClientSideError( _("vswitch_type is already set" " as %s" % vswitch_type)) patched_system['capabilities']['vswitch_type'] = vswitch_type if 'security_feature' in updates: # Security feature string must be translated from user values to # kernel options if (security_feature in constants.SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_OPTS): security_feature_value = \ constants.SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_OPTS[security_feature] patched_system['security_feature'] = security_feature_value else: raise wsme.exc.ClientSideError( _("Unexpected value %s specified for " "security_feature" % security_feature)) # Update only the fields that have changed name = "" contact = "" location = "" system_mode = "" timezone = "" capabilities = {} distributed_cloud_role = "" security_feature = "" for field in objects.system.fields: if rpc_isystem[field] != patched_system[field]: rpc_isystem[field] = patched_system[field] if field == 'name': name = rpc_isystem[field] if field == 'contact': contact = rpc_isystem[field] if field == 'location': location = rpc_isystem[field] if field == 'system_mode': system_mode = rpc_isystem[field] if field == 'timezone': timezone = rpc_isystem[field] if field == 'capabilities': capabilities = rpc_isystem[field] if field == 'distributed_cloud_role': distributed_cloud_role = rpc_isystem[field] if field == 'security_feature': security_feature = rpc_isystem[field] delta = rpc_isystem.obj_what_changed() delta_handle = list(delta) rpc_isystem.save() if name: LOG.info("update system name") pecan.request.rpcapi.configure_isystemname(pecan.request.context, name) if name or location or contact: LOG.info("update SNMP config") pecan.request.rpcapi.update_snmp_config(pecan.request.context) if 'system_mode' in delta_handle: LOG.info("update system mode %s" % system_mode) pecan.request.rpcapi.update_system_mode_config( pecan.request.context) if timezone: LOG.info("update system timezone to %s" % timezone) pecan.request.rpcapi.configure_system_timezone( pecan.request.context) if capabilities: if change_sdn: LOG.info("update sdn to %s" % capabilities) pecan.request.rpcapi.update_sdn_enabled(pecan.request.context) if change_https: LOG.info("update https to %s" % capabilities) pecan.request.rpcapi.configure_system_https( pecan.request.context) if vswitch_type: LOG.info("update vswitch_type to %s" % capabilities) pecan.request.rpcapi.update_vswitch_type(pecan.request.context) if distributed_cloud_role and change_dc_role: LOG.info("update distributed cloud role to %s" % distributed_cloud_role) pecan.request.rpcapi.update_distributed_cloud_role( pecan.request.context) if 'security_feature' in delta_handle: LOG.info("update security_feature %s" % security_feature) pecan.request.rpcapi.update_security_feature_config( pecan.request.context) return System.convert_with_links(rpc_isystem)
def update_many(self, isystem_uuid, patch): """Update the current controller_fs configuration.""" if self._from_isystems and not isystem_uuid: raise exception.InvalidParameterValue( _("System id not specified.")) # Validate input filesystem names controller_fs_list = pecan.request.dbapi.controller_fs_get_list() valid_fs_list = [] if controller_fs_list: valid_fs_list = {fs.name: fs.size for fs in controller_fs_list} reinstall_required = False reboot_required = False modified_fs = [] update_fs_list = [] for p_list in patch: p_obj_list = jsonpatch.JsonPatch(p_list) for p_obj in p_obj_list: if p_obj['path'] == '/name': fs_name = p_obj['value'] if fs_name in update_fs_list: msg = _("Duplicate fs_name " "'%s' in parameter list" % fs_name) raise wsme.exc.ClientSideError(msg) else: update_fs_list.append(fs_name) elif p_obj['path'] == '/size': size = p_obj['value'] if fs_name not in valid_fs_list.keys(): msg = _("ControllerFs update failed: invalid filesystem " "'%s' " % fs_name) raise wsme.exc.ClientSideError(msg) elif not cutils.is_int_like(size): msg = _("ControllerFs update failed: filesystem '%s' " "size must be an integer " % fs_name) raise wsme.exc.ClientSideError(msg) elif int(size) <= int(valid_fs_list[fs_name]): msg = _("ControllerFs update failed: size for filesystem '%s' " "should be bigger than %s " % (fs_name, valid_fs_list[fs_name])) raise wsme.exc.ClientSideError(msg) if fs_name in constants.SUPPORTED_REPLICATED_FILEYSTEM_LIST: if utils.is_drbd_fs_resizing(): raise wsme.exc.ClientSideError( _("A drbd sync operation is currently in progress. " "Retry again later.")) modified_fs += [fs_name] controller_fs_list_new = [] for fs in controller_fs_list: replaced = False for p_list in patch: p_obj_list = jsonpatch.JsonPatch(p_list) for p_obj in p_obj_list: if p_obj['value'] == fs['name']: try: controller_fs_list_new += [ ControllerFs(**jsonpatch.apply_patch( fs.as_dict(), p_obj_list)) ] replaced = True break except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=p_list, reason=e) if replaced: break if not replaced: controller_fs_list_new += [fs] cgtsvg_growth_gib = _check_controller_multi_fs_data( pecan.request.context, controller_fs_list_new) if _check_controller_state(): _check_controller_multi_fs(controller_fs_list_new, cgtsvg_growth_gib=cgtsvg_growth_gib) for fs in controller_fs_list_new: if fs.name in modified_fs: value = {'size': fs.size} if fs.replicated: value.update({ 'state': constants.CONTROLLER_FS_RESIZING_IN_PROGRESS }) pecan.request.dbapi.controller_fs_update(fs.uuid, value) try: # perform rpc to conductor to perform config apply pecan.request.rpcapi.update_storage_config( pecan.request.context, update_storage=False, reinstall_required=reinstall_required, reboot_required=reboot_required, filesystem_list=modified_fs) except Exception as e: msg = _("Failed to update filesystem size ") LOG.error("%s with patch %s with exception %s" % (msg, patch, e)) raise wsme.exc.ClientSideError(msg)
def patch(self, ntp_uuid, patch): """Update the current NTP configuration.""" if self._from_isystems: raise exception.OperationNotPermitted rpc_ntp = objects.ntp.get_by_uuid(pecan.request.context, ntp_uuid) action = None for p in patch: if '/action' in p['path']: value = p['value'] patch.remove(p) if value in (constants.APPLY_ACTION, constants.INSTALL_ACTION): action = value break # replace isystem_uuid and intp_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) state_rel_path = ['/uuid', '/id', 'forisystemid', 'isystem_uuid'] if any(p['path'] in state_rel_path for p in patch_obj): raise wsme.exc.ClientSideError( _("The following fields can not be " "modified: %s" % state_rel_path)) for p in patch_obj: if p['path'] == '/isystem_uuid': isystem = objects.system.get_by_uuid(pecan.request.context, p['value']) p['path'] = '/forisystemid' p['value'] = isystem.id try: # Keep an original copy of the ntp data ntp_orig = rpc_ntp.as_dict() ntp = NTP(**jsonpatch.apply_patch(rpc_ntp.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) LOG.warn("ntp %s" % ntp.as_dict()) ntp = _check_ntp_data("modify", ntp.as_dict()) try: # Update only the fields that have changed for field in objects.ntp.fields: if rpc_ntp[field] != ntp[field]: rpc_ntp[field] = ntp[field] delta = rpc_ntp.obj_what_changed() if delta: rpc_ntp.save() if action == constants.APPLY_ACTION: # perform rpc to conductor to perform config apply pecan.request.rpcapi.update_ntp_config( pecan.request.context) else: LOG.info("No NTP config changes") return NTP.convert_with_links(rpc_ntp) except Exception as e: # rollback database changes for field in ntp_orig: if rpc_ntp[field] != ntp_orig[field]: rpc_ntp[field] = ntp_orig[field] rpc_ntp.save() msg = _("Failed to update the NTP configuration") if e == exception.HTTPNotFound: msg = _("NTP update failed: system %s if %s : patch %s" % (isystem['systemname'], ntp['ifname'], patch)) raise wsme.exc.ClientSideError(msg)
def patch(self, infra_uuid, patch): """Update the current infrastructure network config.""" if self._from_isystems: raise exception.OperationNotPermitted rpc_infra = objects.infra_network.get_by_uuid(pecan.request.context, infra_uuid) infra_orig = copy.deepcopy(rpc_infra) action = None for p in patch: if '/action' in p['path']: value = p['value'] patch.remove(p) if value in (constants.APPLY_ACTION, constants.INSTALL_ACTION): action = value break # replace isystem_uuid and iinfra_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) state_rel_path = [ '/uuid', '/id', '/forisystemid', '/isystem_uuid', '/created_at', '/updated_at', ] if any(p['path'] in state_rel_path for p in patch_obj): raise wsme.exc.ClientSideError( _("The following fields can not be " "modified: %s from this level." % state_rel_path)) self._check_host_states() if action == constants.APPLY_ACTION: self._check_host_interfaces() for p in patch_obj: if p['path'] == '/isystem_uuid': isystem = objects.system.get_by_uuid(pecan.request.context, p['value']) p['path'] = '/forisystemid' p['value'] = isystem.id try: infra = InfraNetwork( **jsonpatch.apply_patch(rpc_infra.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) infra = self._check_infra_data(infra.as_dict(), infra_orig.as_dict()) changed_fields = [] try: # Update only the fields that have changed for field in objects.infra_network.fields: if rpc_infra[field] != infra[field]: rpc_infra[field] = infra[field] changed_fields.append(field) rpc_infra.save() if action == constants.APPLY_ACTION: # perform rpc to conductor to perform config apply pecan.request.rpcapi.update_infra_config(pecan.request.context) return InfraNetwork.convert_with_links(rpc_infra) except exception.HTTPNotFound: msg = _( "Infrastructure IP update failed: system %s infra %s: patch %s" % (isystem['systemname'], infra, patch)) raise wsme.exc.ClientSideError(msg)
def patch(self, memory_uuid, patch): """Update an existing memory.""" if self._from_ihosts: raise exception.OperationNotPermitted rpc_port = objects.memory.get_by_uuid(pecan.request.context, memory_uuid) if 'forihostid' in rpc_port: ihostId = rpc_port['forihostid'] else: ihostId = rpc_port['ihost_uuid'] host_id = pecan.request.dbapi.ihost_get(ihostId) vm_hugepages_nr_2M_pending = None vm_hugepages_nr_1G_pending = None vswitch_hugepages_reqd = None vswitch_hugepages_size_mib = None platform_reserved_mib = None for p in patch: if p['path'] == '/platform_reserved_mib': platform_reserved_mib = p['value'] if p['path'] == '/vm_hugepages_nr_2M_pending': vm_hugepages_nr_2M_pending = p['value'] if p['path'] == '/vm_hugepages_nr_1G_pending': vm_hugepages_nr_1G_pending = p['value'] if p['path'] == '/vswitch_hugepages_reqd': vswitch_hugepages_reqd = p['value'] if p['path'] == '/vswitch_hugepages_size_mib': vswitch_hugepages_size_mib = p['value'] # The host must be locked if host_id: _check_host(host_id) else: raise wsme.exc.ClientSideError( _("Hostname or uuid must be defined")) try: # Semantics checks and update hugepage memory accounting patch = _check_huge_values(rpc_port, patch, vm_hugepages_nr_2M_pending, vm_hugepages_nr_1G_pending, vswitch_hugepages_reqd, vswitch_hugepages_size_mib, platform_reserved_mib) except wsme.exc.ClientSideError as e: inode = pecan.request.dbapi.inode_get(inode_id=rpc_port.forinodeid) numa_node = inode.numa_node msg = _('Processor {0}:'.format(numa_node)) + e.message raise wsme.exc.ClientSideError(msg) # Semantics checks for platform memory _check_memory(rpc_port, host_id, platform_reserved_mib, vm_hugepages_nr_2M_pending, vm_hugepages_nr_1G_pending, vswitch_hugepages_reqd, vswitch_hugepages_size_mib) # only allow patching allocated_function and capabilities # replace ihost_uuid and inode_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/ihost_uuid': p['path'] = '/forihostid' ihost = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = ihost.id if p['path'] == '/inode_uuid': p['path'] = '/forinodeid' try: inode = objects.node.get_by_uuid(pecan.request.context, p['value']) p['value'] = inode.id except exception.SysinvException: p['value'] = None try: memory = Memory( **jsonpatch.apply_patch(rpc_port.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.memory.fields: if rpc_port[field] != getattr(memory, field): rpc_port[field] = getattr(memory, field) rpc_port.save() return Memory.convert_with_links(rpc_port)
def _patch(storlvm_uuid, patch): # Obtain current storage object. rpc_storlvm = objects.storage_lvm.get_by_uuid(pecan.request.context, storlvm_uuid) patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/capabilities': p['value'] = jsonutils.loads(p['value']) ostorlvm = copy.deepcopy(rpc_storlvm) # perform checks based on the current vs.requested modifications _pre_patch_checks(rpc_storlvm, patch_obj) # Obtain a storage object with the patch applied. try: storlvm_config = StorageLVM( **jsonpatch.apply_patch(rpc_storlvm.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update current storage object. for field in objects.storage_lvm.fields: if (field in storlvm_config.as_dict() and rpc_storlvm[field] != storlvm_config.as_dict()[field]): rpc_storlvm[field] = storlvm_config.as_dict()[field] # Obtain the fields that have changed. delta = rpc_storlvm.obj_what_changed() if len(delta ) == 0 and rpc_storlvm['state'] != constants.SB_STATE_CONFIG_ERR: raise wsme.exc.ClientSideError( _("No changes to the existing backend settings were detected.")) allowed_attributes = ['services', 'capabilities', 'task'] for d in delta: if d not in allowed_attributes: raise wsme.exc.ClientSideError( _("Can not modify '%s' with this operation." % d)) LOG.info("SYS_I orig storage_lvm: %s " % ostorlvm.as_dict()) LOG.info("SYS_I new storage_lvm: %s " % storlvm_config.as_dict()) # Execute the common semantic checks for all backends, if backend is not # present this will not return api_helper.common_checks(constants.SB_API_OP_MODIFY, rpc_storlvm.as_dict()) # Run the backend specific semantic checks _check_backend_lvm(constants.SB_API_OP_MODIFY, rpc_storlvm.as_dict(), True) try: rpc_storlvm.save() # Enable the backend changes: _apply_backend_changes(constants.SB_API_OP_MODIFY, rpc_storlvm) return StorageLVM.convert_with_links(rpc_storlvm) except exception.HTTPNotFound: msg = _("Storlvm update failed: storlvm %s : " " patch %s" % (storlvm_config, patch)) raise wsme.exc.ClientSideError(msg)
def patch(self, stor_uuid, patch): """Update an existing stor.""" if self._from_ihosts: raise exception.OperationNotPermitted if self._from_tier: raise exception.OperationNotPermitted try: rpc_stor = objects.storage.get_by_uuid(pecan.request.context, stor_uuid) except exception.ServerNotFound: raise wsme.exc.ClientSideError( _("No stor with the provided" " uuid: %s" % stor_uuid)) # replace ihost_uuid and istor_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/ihost_uuid': p['path'] = '/forihostid' ihost = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = ihost.id elif p['path'] == '/tier_uuid': p['path'] = '/fortierid' tier = objects.tier.get_by_uuid(pecan.request.context, p['value']) p['value'] = tier.id try: stor = Storage( **jsonpatch.apply_patch(rpc_stor.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Semantic Checks _check_host(stor.as_dict()) _check_disk(stor.as_dict()) if (hasattr(stor, 'journal_size_mib') or hasattr(stor, 'journal_location')): _check_journal(rpc_stor, stor.as_dict()) # Journal partitions can be either collocated with the OSD or external. # Any location change requires that the device_nodes of the remaining # journals of the external journal disk to be updated, therefore we back # up the external journal stor before updating it with the new value journal_stor_uuid = None if rpc_stor['journal_location'] != getattr(stor, 'journal_location'): if rpc_stor['uuid'] == getattr(stor, 'journal_location'): # journal partition becomes collocated, backup the prev journal journal_stor_uuid = rpc_stor['journal_location'] setattr(stor, 'journal_size_mib', CONF.journal.journal_default_size) else: # journal partition moves to external journal disk journal_stor_uuid = getattr(stor, 'journal_location') else: if (hasattr(stor, 'journal_size_mib') and rpc_stor['uuid'] == rpc_stor['journal_location']): raise wsme.exc.ClientSideError( _("Invalid update: Size of collocated journal is fixed.")) # Update only the fields that have changed updated = False for field in objects.storage.fields: if rpc_stor[field] != getattr(stor, field): rpc_stor[field] = getattr(stor, field) updated = True if not updated: # None of the data fields have been updated, return! return Storage.convert_with_links(rpc_stor) # Set status for newly created OSD. if rpc_stor['function'] == constants.STOR_FUNCTION_OSD: ihost_id = rpc_stor['forihostid'] ihost = pecan.request.dbapi.ihost_get(ihost_id) if ihost['operational'] == constants.OPERATIONAL_ENABLED: # We are running live manifests rpc_stor['state'] = constants.SB_STATE_CONFIGURING else: rpc_stor['state'] = constants.SB_STATE_CONFIGURING_ON_UNLOCK # Save istor rpc_stor.save() # Update device nodes for the journal disk if journal_stor_uuid: try: pecan.request.dbapi.journal_update_dev_nodes(journal_stor_uuid) # Refresh device node for current stor, if changed by prev call st = pecan.request.dbapi.istor_get(rpc_stor['id']) rpc_stor['journal_path'] = st.journal_path except Exception as e: LOG.exception(e) # Run runtime manifests to update configuration runtime_manifests = False if (rpc_stor['state'] == constants.SB_STATE_CONFIGURING and rpc_stor['function'] == constants.STOR_FUNCTION_OSD): runtime_manifests = True pecan.request.rpcapi.update_ceph_osd_config(pecan.request.context, ihost, rpc_stor['uuid'], runtime_manifests) return Storage.convert_with_links(rpc_stor)
def patch(self, cpu_uuid, patch): """Update an existing cpu.""" if self._from_ihosts: raise exception.OperationNotPermitted rpc_port = objects.cpu.get_by_uuid(pecan.request.context, cpu_uuid) # only allow patching allocated_function and capabilities # replace ihost_uuid and inode_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) from_profile = False action = None for p in patch_obj: if p['path'] == '/ihost_uuid': p['path'] = '/forihostid' ihost = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = ihost.id if p['path'] == '/inode_uuid': p['path'] = '/forinodeid' try: inode = objects.node.get_by_uuid(pecan.request.context, p['value']) p['value'] = inode.id except exception.SysinvException: p['value'] = None if p['path'] == '/allocated_function': from_profile = True if p['path'] == '/action': value = p['value'] patch.remove(p) if value in (constants.APPLY_ACTION, constants.INSTALL_ACTION): action = value # Clean up patch extra_args = {} for p in patch[:]: path = p['path'] if 'num_cores_on_processor' in path: extra_args[path.lstrip('/')] = p['value'] patch.remove(p) if path == '/function': extra_args[path.lstrip('/')] = p['value'] patch.remove(p) # Apply patch try: cpu = CPU(**jsonpatch.apply_patch(rpc_port.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) for key, val in extra_args.items(): setattr(cpu, key, val) # Semantic checks ihost = pecan.request.dbapi.ihost_get(cpu.forihostid) _check_host(ihost) if not from_profile: _check_cpu(cpu, ihost) # Update only the fields that have changed try: for field in objects.cpu.fields: if rpc_port[field] != getattr(cpu, field): rpc_port[field] = getattr(cpu, field) rpc_port.save() if action == constants.APPLY_ACTION: # perform rpc to conductor to perform config apply pecan.request.rpcapi.update_cpu_config(pecan.request.context) return CPU.convert_with_links(rpc_port) except exception.HTTPNotFound: msg = _("Cpu update failed: host %s cpu %s : patch %s" % (ihost.hostname, CPU.uuid, patch)) raise wsme.exc.ClientSideError(msg)
def patch(self, isystem_uuid, patch): """Update an existing isystem. :param isystem_uuid: UUID of a isystem. :param patch: a json PATCH document to apply to this isystem. """ rpc_isystem = objects.system.get_by_uuid(pecan.request.context, isystem_uuid) system_dict = rpc_isystem.as_dict() updates = self._get_updates(patch) change_https = False change_sdn = False change_dc_role = False vswitch_type = None # prevent description field from being updated for p in jsonpatch.JsonPatch(patch): if p['path'] == '/software_version': raise wsme.exc.ClientSideError( _("software_version field " "cannot be modified.")) if p['path'] == '/system_type': if rpc_isystem is not None: if rpc_isystem.system_type is not None: raise wsme.exc.ClientSideError( _("system_type field " "cannot be " "modified.")) if (p['path'] == '/system_mode' and p.get('value') != rpc_isystem.system_mode): if rpc_isystem is not None and \ rpc_isystem.system_mode is not None: if rpc_isystem.system_type != constants.TIS_AIO_BUILD: raise wsme.exc.ClientSideError( "system_mode can only be modified on an " "AIO system") system_mode_options = [ constants.SYSTEM_MODE_DUPLEX, constants.SYSTEM_MODE_DUPLEX_DIRECT ] new_system_mode = p['value'] # Allow modification to system mode during bootstrap. Once the # initial configuration is complete, this type of request will # be bound to the conditions below. if cutils.is_initial_config_complete(): if rpc_isystem.system_mode == \ constants.SYSTEM_MODE_SIMPLEX: msg = _("Cannot modify system mode when it is " "already set to %s." % rpc_isystem.system_mode) raise wsme.exc.ClientSideError(msg) elif new_system_mode == constants.SYSTEM_MODE_SIMPLEX: msg = _( "Cannot modify system mode to simplex when " "it is set to %s " % rpc_isystem.system_mode) raise wsme.exc.ClientSideError(msg) else: system_mode_options.append( constants.SYSTEM_MODE_SIMPLEX) if new_system_mode not in system_mode_options: raise wsme.exc.ClientSideError( "Invalid value for system_mode, it can only" " be modified to '%s' or '%s'" % (constants.SYSTEM_MODE_DUPLEX, constants.SYSTEM_MODE_DUPLEX_DIRECT)) if p['path'] == '/timezone': timezone = p['value'] if not os.path.isfile("/usr/share/zoneinfo/%s" % timezone): raise wsme.exc.ClientSideError( _("Timezone file %s " "does not exist." % timezone)) if p['path'] == '/sdn_enabled': sdn_enabled = p['value'].lower() patch.remove(p) if p['path'] == '/https_enabled': https_enabled = p['value'].lower() patch.remove(p) if p['path'] == '/distributed_cloud_role': distributed_cloud_role = p['value'] patch.remove(p) if p['path'] == '/vswitch_type': vswitch_type = p['value'] patch.remove(p) if p['path'] == '/security_feature': security_feature = p['value'] patch.remove(p) try: patched_system = jsonpatch.apply_patch(system_dict, jsonpatch.JsonPatch(patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) if 'sdn_enabled' in updates: if sdn_enabled != rpc_isystem['capabilities']['sdn_enabled']: self._check_hosts() change_sdn = True if sdn_enabled == 'true': self._verify_sdn_enabled() patched_system['capabilities']['sdn_enabled'] = True else: self._verify_sdn_disabled() patched_system['capabilities']['sdn_enabled'] = False if 'https_enabled' in updates: # Pre-check: if user is setting https_enabled to false # while 'ssl' cert is managed by cert-manager, return error # (Otherwise, cert-mon will turn https back on during cert-renewal process) managed_by_cm = self._kube_op.kube_get_secret( constants.PLATFORM_CERT_SECRET_NAME, constants.CERT_NAMESPACE_PLATFORM_CERTS) if https_enabled == 'false' and managed_by_cm is not None: msg = "Certificate is currently being managed by cert-manager. " \ "Remove %s Certificate and Secret before disabling https." % \ constants.PLATFORM_CERT_SECRET_NAME raise wsme.exc.ClientSideError(_(msg)) if https_enabled != rpc_isystem['capabilities']['https_enabled']: change_https = True if https_enabled == 'true': patched_system['capabilities']['https_enabled'] = True else: patched_system['capabilities']['https_enabled'] = False else: raise wsme.exc.ClientSideError( _("https_enabled is already set" " as %s" % https_enabled)) if 'distributed_cloud_role' in updates: # At this point dc role cannot be changed after initial # configuration is complete if (rpc_isystem['distributed_cloud_role'] is not None and cutils.is_initial_config_complete()): raise wsme.exc.ClientSideError( _("distributed_cloud_role is already set " " as %s" % rpc_isystem['distributed_cloud_role'])) # allow set the role to None before the initial config # is complete elif ((distributed_cloud_role in [ constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER, constants.DISTRIBUTED_CLOUD_ROLE_SUBCLOUD ] or distributed_cloud_role is None) and not cutils.is_initial_config_complete()): change_dc_role = True patched_system[ 'distributed_cloud_role'] = distributed_cloud_role else: raise wsme.exc.ClientSideError( _("Unexpected value %s specified" " for distributed_cloud_role" % distributed_cloud_role)) if 'vswitch_type' in updates: if vswitch_type == rpc_isystem['capabilities']['vswitch_type']: raise wsme.exc.ClientSideError( _("vswitch_type is already set" " as %s" % vswitch_type)) patched_system['capabilities']['vswitch_type'] = vswitch_type if 'security_feature' in updates: # Security feature string must be translated from user values to # kernel options if (security_feature in constants.SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_OPTS): security_feature_value = \ constants.SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_OPTS[security_feature] patched_system['security_feature'] = security_feature_value else: raise wsme.exc.ClientSideError( _("Unexpected value %s specified for " "security_feature" % security_feature)) # Update only the fields that have changed name = "" system_mode = "" timezone = "" capabilities = {} distributed_cloud_role = "" security_feature = "" for field in objects.system.fields: if rpc_isystem[field] != patched_system[field]: rpc_isystem[field] = patched_system[field] if field == 'name': name = rpc_isystem[field] if field == 'system_mode': system_mode = rpc_isystem[field] if field == 'timezone': timezone = rpc_isystem[field] if field == 'capabilities': capabilities = rpc_isystem[field] if field == 'distributed_cloud_role': distributed_cloud_role = rpc_isystem[field] if field == 'security_feature': security_feature = rpc_isystem[field] delta = rpc_isystem.obj_what_changed() delta_handle = list(delta) rpc_isystem.save() if name: LOG.info("update system name") pecan.request.rpcapi.configure_isystemname(pecan.request.context, name) if 'system_mode' in delta_handle: LOG.info("update system mode %s" % system_mode) pecan.request.rpcapi.update_system_mode_config( pecan.request.context) if timezone: LOG.info("update system timezone to %s" % timezone) pecan.request.rpcapi.configure_system_timezone( pecan.request.context) if capabilities: if change_sdn: LOG.info("update sdn to %s" % capabilities) pecan.request.rpcapi.update_sdn_enabled(pecan.request.context) if change_https: LOG.info("update https to %s" % capabilities) pecan.request.rpcapi.configure_system_https( pecan.request.context) if vswitch_type: LOG.info("update vswitch_type to %s" % capabilities) pecan.request.rpcapi.update_vswitch_type(pecan.request.context) if distributed_cloud_role and change_dc_role: LOG.info("update distributed cloud role to %s" % distributed_cloud_role) pecan.request.rpcapi.update_distributed_cloud_role( pecan.request.context) # check if we need to config the system controller database if (change_dc_role and distributed_cloud_role == constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER): hosts = pecan.request.dbapi.ihost_get_by_personality( constants.CONTROLLER) # this is a replay case after the first host has been created if len(hosts) == 1: pecan.request.rpcapi.configure_system_controller( pecan.request.context, hosts[0]) if 'security_feature' in delta_handle: LOG.info("update security_feature %s" % security_feature) pecan.request.rpcapi.update_security_feature_config( pecan.request.context) return System.convert_with_links(rpc_isystem)