class Controller(rest.RestController): """Version 1 API controller root.""" isystems = system.SystemController() ihosts = host.HostController() helm_charts = helm_charts.HelmChartsController() inodes = node.NodeController() icpus = cpu.CPUController() imemorys = memory.MemoryController() iinterfaces = interface.InterfaceController() ports = port.PortController() ethernet_ports = ethernet_port.EthernetPortController() istors = storage.StorageController() ilvgs = lvg.LVGController() ipvs = pv.PVController() idisks = disk.DiskController() partitions = partition.PartitionController() iprofile = profile.ProfileController() itrapdest = trapdest.TrapDestController() icommunity = community.CommunityController() iuser = user.UserController() idns = dns.DNSController() intp = ntp.NTPController() ptp = ptp.PTPController() iextoam = network_oam.OAMNetworkController() controller_fs = controller_fs.ControllerFsController() storage_backend = storage_backend.StorageBackendController() storage_lvm = storage_lvm.StorageLVMController() storage_file = storage_file.StorageFileController() storage_external = storage_external.StorageExternalController() storage_ceph = storage_ceph.StorageCephController() storage_tiers = storage_tier.StorageTierController() storage_ceph_external = \ storage_ceph_external.StorageCephExternalController() ceph_mon = ceph_mon.CephMonController() drbdconfig = drbdconfig.drbdconfigsController() addresses = address.AddressController() addrpools = address_pool.AddressPoolController() routes = route.RouteController() certificate = certificate.CertificateController() isensors = sensor.SensorController() isensorgroups = sensorgroup.SensorGroupController() loads = load.LoadController() pci_devices = pci_device.PCIDeviceController() upgrade = upgrade.UpgradeController() networks = network.NetworkController() interface_networks = interface_network.InterfaceNetworkController() service_parameter = service_parameter.ServiceParameterController() clusters = cluster.ClusterController() lldp_agents = lldp_agent.LLDPAgentController() lldp_neighbours = lldp_neighbour.LLDPNeighbourController() services = service.SMServiceController() servicenodes = servicenode.SMServiceNodeController() servicegroup = servicegroup.SMServiceGroupController() health = health.HealthController() registry_image = registry_image.RegistryImageController() remotelogging = remotelogging.RemoteLoggingController() sdn_controller = sdn_controller.SDNControllerController() license = license.LicenseController() labels = label.LabelController() fernet_repo = fernet_repo.FernetKeyController() apps = kube_app.KubeAppController() datanetworks = datanetwork.DataNetworkController() interface_datanetworks = interface_datanetwork.InterfaceDataNetworkController() host_fs = host_fs.HostFsController() @wsme_pecan.wsexpose(V1) def get(self): # NOTE: The reason why convert() it's being called for every # request is because we need to get the host url from # the request object to make the links. return V1.convert()
class StorageController(rest.RestController): """REST controller for istors.""" idisks = disk.DiskController(from_ihosts=True, from_istor=True) "Expose idisks as a sub-element of istors" _custom_actions = { 'detail': ['GET'], } def __init__(self, from_ihosts=False, from_tier=False): self._from_ihosts = from_ihosts self._from_tier = from_tier def _get_stors_collection(self, uuid, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): if self._from_ihosts and not uuid: raise exception.InvalidParameterValue(_("Host id not specified.")) if self._from_tier and not uuid: raise exception.InvalidParameterValue( _("Storage tier id not specified.")) limit = utils.validate_limit(limit) sort_dir = utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.storage.get_by_uuid(pecan.request.context, marker) if self._from_ihosts: stors = pecan.request.dbapi.istor_get_by_ihost(uuid, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) elif self._from_tier: stors = pecan.request.dbapi.istor_get_by_tier(uuid, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) else: stors = pecan.request.dbapi.istor_get_list(limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) return StorageCollection.convert_with_links(stors, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) @wsme_pecan.wsexpose(StorageCollection, types.uuid, types.uuid, int, wtypes.text, wtypes.text) def get_all(self, uuid=None, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of stors.""" return self._get_stors_collection(uuid, marker, limit, sort_key, sort_dir) @wsme_pecan.wsexpose(StorageCollection, types.uuid, types.uuid, int, wtypes.text, wtypes.text) def detail(self, ihost_uuid=None, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of stors with detail.""" # NOTE(lucasagomes): /detail should only work agaist collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "istors": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['stors', 'detail']) return self._get_stors_collection(ihost_uuid, marker, limit, sort_key, sort_dir, expand, resource_url) @wsme_pecan.wsexpose(Storage, types.uuid) def get_one(self, stor_uuid): """Retrieve information about the given stor.""" if self._from_ihosts: raise exception.OperationNotPermitted if self._from_tier: raise exception.OperationNotPermitted rpc_stor = objects.storage.get_by_uuid(pecan.request.context, stor_uuid) return Storage.convert_with_links(rpc_stor) @cutils.synchronized(LOCK_NAME) @wsme_pecan.wsexpose(Storage, body=Storage) def post(self, stor): """Create a new stor.""" if self._from_ihosts: raise exception.OperationNotPermitted if self._from_tier: raise exception.OperationNotPermitted try: stor = stor.as_dict() LOG.debug("stor post dict= %s" % stor) new_stor = _create(stor) except exception.SysinvException as e: LOG.exception(e) raise wsme.exc.ClientSideError( _("Invalid data: failed to create a storage object")) except subprocess.CalledProcessError as esub: LOG.exception(esub) raise wsme.exc.ClientSideError( _("Internal error: failed to create a storage object")) return Storage.convert_with_links(new_stor) @cutils.synchronized(LOCK_NAME) @wsme.validate(types.uuid, [StoragePatchType]) @wsme_pecan.wsexpose(Storage, types.uuid, body=[StoragePatchType]) def patch(self, stor_uuid, patch): """Update an existing stor.""" if self._from_ihosts: raise exception.OperationNotPermitted if self._from_tier: raise exception.OperationNotPermitted try: rpc_stor = objects.storage.get_by_uuid(pecan.request.context, stor_uuid) except exception.ServerNotFound: raise wsme.exc.ClientSideError( _("No stor with the provided" " uuid: %s" % stor_uuid)) # replace ihost_uuid and istor_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/ihost_uuid': p['path'] = '/forihostid' ihost = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = ihost.id elif p['path'] == '/tier_uuid': p['path'] = '/fortierid' tier = objects.tier.get_by_uuid(pecan.request.context, p['value']) p['value'] = tier.id try: stor = Storage( **jsonpatch.apply_patch(rpc_stor.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Semantic Checks _check_host(stor.as_dict()) _check_disk(stor.as_dict()) if (hasattr(stor, 'journal_size_mib') or hasattr(stor, 'journal_location')): _check_journal(rpc_stor, stor.as_dict()) # Journal partitions can be either collocated with the OSD or external. # Any location change requires that the device_nodes of the remaining # journals of the external journal disk to be updated, therefore we back # up the external journal stor before updating it with the new value journal_stor_uuid = None if rpc_stor['journal_location'] != getattr(stor, 'journal_location'): if rpc_stor['uuid'] == getattr(stor, 'journal_location'): # journal partition becomes collocated, backup the prev journal journal_stor_uuid = rpc_stor['journal_location'] setattr(stor, 'journal_size_mib', CONF.journal.journal_default_size) else: # journal partition moves to external journal disk journal_stor_uuid = getattr(stor, 'journal_location') else: if (hasattr(stor, 'journal_size_mib') and rpc_stor['uuid'] == rpc_stor['journal_location']): raise wsme.exc.ClientSideError( _("Invalid update: Size of collocated journal is fixed.")) # Update only the fields that have changed updated = False for field in objects.storage.fields: if rpc_stor[field] != getattr(stor, field): rpc_stor[field] = getattr(stor, field) updated = True if not updated: # None of the data fields have been updated, return! return Storage.convert_with_links(rpc_stor) # Set status for newly created OSD. if rpc_stor['function'] == constants.STOR_FUNCTION_OSD: ihost_id = rpc_stor['forihostid'] ihost = pecan.request.dbapi.ihost_get(ihost_id) if ihost['operational'] == constants.OPERATIONAL_ENABLED: # We are running live manifests rpc_stor['state'] = constants.SB_STATE_CONFIGURING else: rpc_stor['state'] = constants.SB_STATE_CONFIGURING_ON_UNLOCK # Save istor rpc_stor.save() # Update device nodes for the journal disk if journal_stor_uuid: try: pecan.request.dbapi.journal_update_dev_nodes(journal_stor_uuid) # Refresh device node for current stor, if changed by prev call st = pecan.request.dbapi.istor_get(rpc_stor['id']) rpc_stor['journal_path'] = st.journal_path except Exception as e: LOG.exception(e) # Run runtime manifests to update configuration runtime_manifests = False if (rpc_stor['state'] == constants.SB_STATE_CONFIGURING and rpc_stor['function'] == constants.STOR_FUNCTION_OSD): runtime_manifests = True pecan.request.rpcapi.update_ceph_osd_config(pecan.request.context, ihost, rpc_stor['uuid'], runtime_manifests) return Storage.convert_with_links(rpc_stor) @cutils.synchronized(LOCK_NAME) @wsme_pecan.wsexpose(None, types.uuid, status_code=204) def delete(self, stor_uuid): """Delete a stor.""" if self._from_ihosts: raise exception.OperationNotPermitted if self._from_tier: raise exception.OperationNotPermitted try: stor = pecan.request.dbapi.istor_get(stor_uuid) except Exception as e: LOG.exception(e) raise # Make sure that we are allowed to delete _check_host(stor) # Delete the stor if supported if stor.function == constants.STOR_FUNCTION_JOURNAL: # Host must be locked ihost_id = stor['forihostid'] ihost = pecan.request.dbapi.ihost_get(ihost_id) if ihost['administrative'] != constants.ADMIN_LOCKED: raise wsme.exc.ClientSideError( _("Host %s must be locked." % ihost['hostname'])) self.delete_stor(stor_uuid) else: raise wsme.exc.ClientSideError( _("Deleting a Storage Function other than %s is not " "supported on this setup") % constants.STOR_FUNCTION_JOURNAL) def delete_stor(self, stor_uuid): """Delete a stor""" stor = objects.storage.get_by_uuid(pecan.request.context, stor_uuid) try: # The conductor will handle removing the stor, not all functions # need special handling if stor.function == constants.STOR_FUNCTION_OSD: pecan.request.rpcapi.unconfigure_osd_istor( pecan.request.context, stor) elif stor.function == constants.STOR_FUNCTION_JOURNAL: pecan.request.dbapi.istor_disable_journal(stor_uuid) # Now remove the stor from DB pecan.request.dbapi.istor_remove_disk_association(stor_uuid) pecan.request.dbapi.istor_destroy(stor_uuid) except Exception as e: LOG.exception(e) raise
class PVController(rest.RestController): """REST controller for ipvs.""" idisks = disk_api.DiskController(from_ihosts=True, from_ipv=True) "Expose idisks as a sub-element of ipvs" _custom_actions = { 'detail': ['GET'], } def __init__(self, from_ihosts=False, from_ilvg=False): self._from_ihosts = from_ihosts self._from_ilvg = from_ilvg def _get_pvs_collection(self, ihost_uuid, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): if self._from_ihosts and not ihost_uuid: raise exception.InvalidParameterValue(_("Host id not specified.")) limit = utils.validate_limit(limit) sort_dir = utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.pv.get_by_uuid(pecan.request.context, marker) if ihost_uuid: pvs = pecan.request.dbapi.ipv_get_by_ihost(ihost_uuid, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) else: pvs = pecan.request.dbapi.ipv_get_list(limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) return PVCollection.convert_with_links(pvs, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) @wsme_pecan.wsexpose(PVCollection, types.uuid, types.uuid, int, wtypes.text, wtypes.text) def get_all(self, ihost_uuid=None, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of pvs.""" return self._get_pvs_collection(ihost_uuid, marker, limit, sort_key, sort_dir) @wsme_pecan.wsexpose(PVCollection, types.uuid, types.uuid, int, wtypes.text, wtypes.text) def detail(self, ihost_uuid=None, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of pvs with detail.""" # NOTE(lucasagomes): /detail should only work against collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "ipvs": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['pvs', 'detail']) return self._get_pvs_collection(ihost_uuid, marker, limit, sort_key, sort_dir, expand, resource_url) @wsme_pecan.wsexpose(PV, types.uuid) def get_one(self, pv_uuid): """Retrieve information about the given pv.""" if self._from_ihosts: raise exception.OperationNotPermitted rpc_pv = objects.pv.get_by_uuid(pecan.request.context, pv_uuid) return PV.convert_with_links(rpc_pv) @cutils.synchronized(LOCK_NAME) @wsme_pecan.wsexpose(PV, body=PV) def post(self, pv): """Create a new pv.""" if self._from_ihosts: raise exception.OperationNotPermitted try: pv = pv.as_dict() LOG.debug("pv post dict= %s" % pv) new_pv = _create(pv) except exception.SysinvException as e: LOG.exception(e) raise wsme.exc.ClientSideError( _("Invalid data: failed to create " "a physical volume object")) return PV.convert_with_links(new_pv) @cutils.synchronized(LOCK_NAME) @wsme.validate(types.uuid, [PVPatchType]) @wsme_pecan.wsexpose(PV, types.uuid, body=[PVPatchType]) def patch(self, pv_uuid, patch): """Update an existing pv.""" if self._from_ihosts: raise exception.OperationNotPermitted LOG.debug("patch_data: %s" % patch) rpc_pv = objects.pv.get_by_uuid(pecan.request.context, pv_uuid) # replace ihost_uuid and ipv_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/ihost_uuid': p['path'] = '/forihostid' ihost = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = ihost.id try: pv = PV(**jsonpatch.apply_patch(rpc_pv.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Semantic Checks _check("modify", pv.as_dict()) try: # Update only the fields that have changed for field in objects.pv.fields: if rpc_pv[field] != getattr(pv, field): rpc_pv[field] = getattr(pv, field) # Save and return rpc_pv.save() return PV.convert_with_links(rpc_pv) except exception.HTTPNotFound: msg = _("PV update failed: host %s pv %s : patch %s" % (ihost['hostname'], pv.lvm_pv_name, patch)) raise wsme.exc.ClientSideError(msg) @cutils.synchronized(LOCK_NAME) @wsme_pecan.wsexpose(None, types.uuid, status_code=204) def delete(self, pv_uuid): """Delete a pv.""" if self._from_ihosts: raise exception.OperationNotPermitted delete_pv(pv_uuid)