def _create(storage_lvm): # Set the default for the storage backend storage_lvm = _set_default_values(storage_lvm) # Execute the common semantic checks for all backends, if a specific backend # is not specified this will not return api_helper.common_checks(constants.SB_API_OP_CREATE, storage_lvm) # Run the backend specific semantic checks to validate that we have all the # required parameters for manifest application _check_backend_lvm(constants.SB_API_OP_CREATE, storage_lvm, storage_lvm.pop('confirmed', False)) StorageBackendConfig.set_img_conversions_defaults(pecan.request.dbapi, controller_fs_api) # We have a valid configuration. create it. system = pecan.request.dbapi.isystem_get_one() storage_lvm['forisystemid'] = system.id storage_lvm_obj = pecan.request.dbapi.storage_lvm_create(storage_lvm) # Retreive the main StorageBackend object. storage_backend_obj = pecan.request.dbapi.storage_backend_get( storage_lvm_obj.id) # Enable the backend: _apply_backend_changes(constants.SB_API_OP_CREATE, storage_backend_obj) return storage_backend_obj
def _create(storage_file): # Set the default for the storage backend storage_file = _set_default_values(storage_file) # Execute the common semantic checks for all backends, if a backend is # not present this will not return api_helper.common_checks(constants.SB_API_OP_CREATE, storage_file) # Run the backend specific semantic checks _check_backend_file(constants.SB_API_OP_CREATE, storage_file, storage_file.pop('confirmed', False)) # We have a valid configuration. create it. system = pecan.request.dbapi.isystem_get_one() storage_file['forisystemid'] = system.id storage_file_obj = pecan.request.dbapi.storage_file_create(storage_file) # Retreive the main StorageBackend object. storage_backend_obj = pecan.request.dbapi.storage_backend_get( storage_file_obj.id) # Enable the backend: _apply_backend_changes(constants.SB_API_OP_CREATE, storage_backend_obj) return storage_file_obj
def _create(storage_ceph_ext): storage_ceph_ext = _set_defaults_ceph_external(storage_ceph_ext) # Execute the common semantic checks for all backends, if a specific backend # is not specified this will not return api_helper.common_checks(constants.SB_API_OP_CREATE, storage_ceph_ext) _check_backend_ceph_external(storage_ceph_ext) _check_and_update_services(storage_ceph_ext) # Conditionally update the DB based on any previous create attempts. This # creates the StorageCeph object. system = pecan.request.dbapi.isystem_get_one() storage_ceph_ext['forisystemid'] = system.id storage_ceph_ext_obj = pecan.request.dbapi.storage_ceph_external_create( storage_ceph_ext) # Retrieve the main StorageBackend object. storage_backend_obj = pecan.request.dbapi.storage_backend_get( storage_ceph_ext_obj.id) _apply_ceph_external_backend_changes(constants.SB_API_OP_CREATE, sb_obj=storage_ceph_ext) return storage_backend_obj
def _create(storage_ceph_rook): # Set the default for the storage backend storage_ceph_rook = _set_default_values(storage_ceph_rook) # Execute the common semantic checks for all backends, if a backend is # not present this will not return api_helper.common_checks(constants.SB_API_OP_CREATE, storage_ceph_rook) # Run the backend specific semantic checks _check_backend_ceph_rook(constants.SB_API_OP_CREATE, storage_ceph_rook, storage_ceph_rook.pop('confirmed', False)) # We have a valid configuration. create it. system = pecan.request.dbapi.isystem_get_one() storage_ceph_rook['forisystemid'] = system.id storage_ceph_rook_obj = pecan.request.dbapi.storage_ceph_rook_create( storage_ceph_rook) # Retreive the main StorageBackend object. storage_backend_obj = pecan.request.dbapi.storage_backend_get( storage_ceph_rook_obj.id) # Only apply runtime manifests if at least one controller is unlocked and # available/degraded. controller_hosts = pecan.request.dbapi.ihost_get_by_personality( constants.CONTROLLER) valid_controller_hosts = [ h for h in controller_hosts if h['administrative'] == constants.ADMIN_UNLOCKED and h['availability'] in [constants.AVAILABILITY_AVAILABLE, constants.AVAILABILITY_DEGRADED] ] if valid_controller_hosts: _apply_backend_changes(constants.SB_API_OP_CREATE, storage_backend_obj) return storage_ceph_rook_obj
def _delete(sb_uuid): # For now delete operation only deletes DB entry storage_external_obj = pecan.request.dbapi.storage_external_get(sb_uuid) # LOG.error("delete %s" % storage_external_obj.as_dict()) # Execute the common semantic checks for all backends, if backend is not # present this will not return api_helper.common_checks(constants.SB_API_OP_DELETE, storage_external_obj.as_dict()) # Run the backend specific semantic checks _check_backend_external(constants.SB_API_OP_DELETE, storage_external_obj.as_dict(), True) # Enable the backend changes: _apply_backend_changes(constants.SB_API_OP_DELETE, storage_external_obj) try: pecan.request.dbapi.storage_backend_destroy(storage_external_obj.id) except exception.HTTPNotFound: msg = _("Deletion of backend %s failed" % storage_external_obj.uuid) raise wsme.exc.ClientSideError(msg)
def _patch(storlvm_uuid, patch): # Obtain current storage object. rpc_storlvm = objects.storage_lvm.get_by_uuid(pecan.request.context, storlvm_uuid) patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/capabilities': p['value'] = jsonutils.loads(p['value']) ostorlvm = copy.deepcopy(rpc_storlvm) # perform checks based on the current vs.requested modifications _pre_patch_checks(rpc_storlvm, patch_obj) # Obtain a storage object with the patch applied. try: storlvm_config = StorageLVM( **jsonpatch.apply_patch(rpc_storlvm.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update current storage object. for field in objects.storage_lvm.fields: if (field in storlvm_config.as_dict() and rpc_storlvm[field] != storlvm_config.as_dict()[field]): rpc_storlvm[field] = storlvm_config.as_dict()[field] # Obtain the fields that have changed. delta = rpc_storlvm.obj_what_changed() if len(delta ) == 0 and rpc_storlvm['state'] != constants.SB_STATE_CONFIG_ERR: raise wsme.exc.ClientSideError( _("No changes to the existing backend settings were detected.")) allowed_attributes = ['services', 'capabilities', 'task'] for d in delta: if d not in allowed_attributes: raise wsme.exc.ClientSideError( _("Can not modify '%s' with this operation." % d)) LOG.info("SYS_I orig storage_lvm: %s " % ostorlvm.as_dict()) LOG.info("SYS_I new storage_lvm: %s " % storlvm_config.as_dict()) # Execute the common semantic checks for all backends, if backend is not # present this will not return api_helper.common_checks(constants.SB_API_OP_MODIFY, rpc_storlvm.as_dict()) # Run the backend specific semantic checks _check_backend_lvm(constants.SB_API_OP_MODIFY, rpc_storlvm.as_dict(), True) try: rpc_storlvm.save() # Enable the backend changes: _apply_backend_changes(constants.SB_API_OP_MODIFY, rpc_storlvm) return StorageLVM.convert_with_links(rpc_storlvm) except exception.HTTPNotFound: msg = _("Storlvm update failed: storlvm %s : " " patch %s" % (storlvm_config, patch)) raise wsme.exc.ClientSideError(msg)
def _patch(stor_ceph_ext_uuid, patch): # Obtain current storage object. rpc_stor_ceph_ext = objects.storage_ceph_external.get_by_uuid( pecan.request.context, stor_ceph_ext_uuid) ostor_ceph_ext = copy.deepcopy(rpc_stor_ceph_ext) patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/capabilities': p['value'] = jsonutils.loads(p['value']) # perform checks based on the current vs.requested modifications _pre_patch_checks(rpc_stor_ceph_ext, patch_obj) # Obtain a storage object with the patch applied. try: stor_ceph_ext_config = StorageCephExternal( **jsonpatch.apply_patch(rpc_stor_ceph_ext.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update current storage object. for field in objects.storage_ceph_external.fields: if (field in stor_ceph_ext_config.as_dict() and rpc_stor_ceph_ext[field] != stor_ceph_ext_config.as_dict()[field]): rpc_stor_ceph_ext[field] = stor_ceph_ext_config.as_dict()[field] # Obtain the fields that have changed. delta = rpc_stor_ceph_ext.obj_what_changed() if len(delta) == 0 and rpc_stor_ceph_ext[ 'state'] != constants.SB_STATE_CONFIG_ERR: raise wsme.exc.ClientSideError( _("No changes to the existing backend settings were detected.")) allowed_attributes = ['services', 'ceph_conf', 'capabilities', 'task'] for d in delta: if d not in allowed_attributes: raise wsme.exc.ClientSideError( _("Can not modify '%s' with this operation." % d)) LOG.info("SYS_I orig storage_ceph_external: %s " % ostor_ceph_ext.as_dict()) LOG.info("SYS_I new storage_ceph_external: %s " % stor_ceph_ext_config.as_dict()) # Execute the common semantic checks for all backends, if backend is not # present this will not return api_helper.common_checks(constants.SB_API_OP_MODIFY, rpc_stor_ceph_ext) _check_backend_ceph_external(rpc_stor_ceph_ext) _check_and_update_services(rpc_stor_ceph_ext) rpc_stor_ceph_ext.save() _apply_ceph_external_backend_changes(constants.SB_API_OP_MODIFY, sb_obj=rpc_stor_ceph_ext, orig_sb_obj=ostor_ceph_ext) return StorageCephExternal.convert_with_links(rpc_stor_ceph_ext)