def add_command_parsers(subparsers): command_object = DBCommand() parser = subparsers.add_parser( 'upgrade', help=_("Upgrade the database schema to the latest version. " "Optionally, use --revision to specify an alembic revision " "string to upgrade to.")) parser.set_defaults(func=command_object.upgrade) parser.add_argument('--revision', nargs='?') parser = subparsers.add_parser( 'revision', help=_("Create a new alembic revision. " "Use --message to set the message string.")) parser.set_defaults(func=command_object.revision) parser.add_argument('-m', '--message') parser.add_argument('--autogenerate', action='store_true') parser = subparsers.add_parser('stamp') parser.set_defaults(func=command_object.stamp) parser.add_argument('--revision', nargs='?') parser = subparsers.add_parser( 'version', help=_("Print the current version information and exit.")) parser.set_defaults(func=command_object.version) parser = subparsers.add_parser('create_schema', help=_("Create the database schema.")) parser.set_defaults(func=command_object.create_schema)
def _send_events(self, events): """Send events to Nova external events API. :param events: List of events to send to Nova. :raises: exception.InvalidAPIResponse, on unexpected error """ url = "/os-server-external-events" body = {"events": events} response = self.nova_client.post(url, json=body) # NOTE(Sundar): Response status should always be 200/207. See # https://review.opendev.org/#/c/698037/ if response.status_code == 200: LOG.info("Sucessfully sent events to Nova, events: %(events)s", {"events": events}) elif response.status_code == 207: # NOTE(Sundar): If Nova returns per-event code of 422, that # is due to a race condition where Nova has not associated # the instance with a host yet. See # https://bugs.launchpad.net/nova/+bug/1855752 events = [ev for ev in response.json()['events']] event_codes = {ev['code'] for ev in events} if len(event_codes) == 1: # all events have same event code if event_codes == {422}: LOG.info( 'Ignoring Nova notification error that the ' 'instance %s is not yet associated with a host.', events[0]['server_uuid']) else: msg = _('Unexpected event code %(code)s ' 'for instance %(inst)s') msg = msg % { 'code': event_codes.pop(), 'inst': events[0]["server_uuid"] } raise exception.InvalidAPIResponse(service='Nova', api=url[1:], msg=msg) else: msg = _('All event responses are expected to ' 'have the same event code. Instance: %(inst)s') msg = msg % {'inst': events[0]['server_uuid']} raise exception.InvalidAPIResponse(service='Nova', api=url[1:], msg=msg) else: # Unexpected return code from Nova msg = _('Failed to send events %(ev)s: HTTP %(code)s: %(txt)s') msg = msg % { 'ev': events, 'code': response.status_code, 'txt': response.text } raise exception.InvalidAPIResponse(service='Nova', api=url[1:], msg=msg)
class AcceleratorException(Exception): """Base Accelerator Exception To correctly use this class, inherit from it and define a '_msg_fmt' property. That message will get printf'd with the keyword arguments provided to the constructor. If you need to access the message from an exception you should use six.text_type(exc) """ _msg_fmt = _("An unknown exception occurred.") code = http_client.INTERNAL_SERVER_ERROR headers = {} safe = False def __init__(self, message=None, **kwargs): self.kwargs = _ensure_exception_kwargs_serializable( self.__class__.__name__, kwargs) if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass if not message: if kwargs: message = self._msg_fmt % kwargs else: message = self._msg_fmt super(AcceleratorException, self).__init__(message)
def get_one(self, dp_uuid_or_name): """Retrieve a single device profile by uuid or name.""" context = pecan.request.context if uuidutils.is_uuid_like(dp_uuid_or_name): LOG.info('[device_profiles] get_one. uuid=%s', dp_uuid_or_name) obj_devprof = objects.DeviceProfile.get_by_uuid( context, dp_uuid_or_name) else: if api.request.version.minor >= versions.MINOR_2_DP_BY_NAME: LOG.info('[device_profiles] get_one. name=%s', dp_uuid_or_name) obj_devprof = \ objects.DeviceProfile.get_by_name(context, dp_uuid_or_name) else: raise exception.NotAcceptable( _("Request not acceptable. The minimal required API " "version should be %(base)s.%(opr)s") % { 'base': versions.BASE_VERSION, 'opr': versions.MINOR_2_DP_BY_NAME }) if not obj_devprof: LOG.warning("Device profile with %s not found!", dp_uuid_or_name) raise exception.ResourceNotFound(resource='Device profile', msg='with %s' % dp_uuid_or_name) api_obj_devprof = self.get_device_profile(obj_devprof) ret = {"device_profile": api_obj_devprof} LOG.info('[device_profiles] get_one returned: %s', ret) # TODO(Sundar) Replace this with convert_with_links() return wsme.api.Response(ret, status_code=HTTPStatus.OK, return_type=wsme.types.DictType)
def new_child(self, name, parent, uuid=None, generation=None): """Creates a new child provider with the given name and uuid under the given parent. :param name: The name of the new child provider :param parent: Either name or UUID of the parent provider :param uuid: The UUID of the new child provider :param generation: Generation to set for the new child provider :returns: the UUID of the new provider :raises ValueError if a provider with the specified uuid or name already exists; or if parent_uuid points to a nonexistent provider. """ with self.lock: try: self._find_with_lock(uuid or name) except ValueError: pass else: err = _("Provider %s already exists.") raise ValueError(err % (uuid or name)) parent_node = self._find_with_lock(parent) p = _Provider(name, uuid, generation, parent_node.uuid) parent_node.add_child(p) return p.uuid
def new_root(self, name, uuid, generation=None): """Adds a new root provider to the tree, returning its UUID. :param name: The name of the new root provider :param uuid: The UUID of the new root provider :param generation: Generation to set for the new root provider :returns: the UUID of the new provider :raises: ValueError if a provider with the specified uuid already exists in the tree. """ with self.lock: exists = True try: self._find_with_lock(uuid) except ValueError: exists = False if exists: err = _("Provider %s already exists.") raise ValueError(err % uuid) p = _Provider(name, uuid=uuid, generation=generation) self.roots.append(p) return p.uuid
class Checks(upgradecheck.UpgradeCommands): """Various upgrade checks should be added as separate methods in this class and added to _upgrade_checks tuple. """ def _check_policy_json(self): "Checks to see if policy file is JSON-formatted policy file." msg = _("Your policy file is JSON-formatted which is " "deprecated since Victoria release (Cyborg 5.0.0). " "You need to switch to YAML-formatted file. You can use the " "``oslopolicy-convert-json-to-yaml`` tool to convert existing " "JSON-formatted files to YAML-formatted files in a " "backwards-compatible manner: " "https://docs.openstack.org/oslo.policy/" "latest/cli/oslopolicy-convert-json-to-yaml.html.") status = upgradecheck.Result(upgradecheck.Code.SUCCESS) # NOTE(gmann): Check if policy file exist and is in # JSON format by actually loading the file not just # by checking the extension. policy_path = CONF.find_file(CONF.oslo_policy.policy_file) if policy_path and fileutils.is_json(policy_path): status = upgradecheck.Result(upgradecheck.Code.FAILURE, msg) return status # The format of the check functions is to return an # oslo_upgradecheck.upgradecheck.Result # object with the appropriate # oslo_upgradecheck.upgradecheck.Code and details set. # If the check hits warnings or failures then those should be stored # in the returned Result's "details" attribute. The # summary will be rolled up at the end of the check() method. _upgrade_checks = ( # Added in Victoria (_('Policy File JSON to YAML Migration'), _check_policy_json), )
def get_sdk_adapter(service_type, check_service=False): """Construct an openstacksdk-brokered Adapter for a given service type. We expect to find a conf group whose name corresponds to the service_type's project according to the service-types-authority. That conf group must provide ksa auth, session, and adapter options. :param service_type: String name of the service type for which the Adapter is to be constructed. :param check_service: If True, we will query the endpoint to make sure the service is alive, raising ServiceUnavailable if it is not. :return: An openstack.proxy.Proxy object for the specified service_type. :raise: ConfGroupForServiceTypeNotFound If no conf group name could be found for the specified service_type. :raise: ServiceUnavailable if check_service is True and the service is down """ confgrp = _get_conf_group(service_type) sess = _get_auth_and_session(confgrp) try: conn = connection.Connection(session=sess, oslo_conf=CONF, service_types={service_type}, strict_proxies=check_service) except sdk_exc.ServiceDiscoveryException as e: raise exception.ServiceUnavailable( _("The %(service_type)s service is unavailable: %(error)s") % { 'service_type': service_type, 'error': six.text_type(e) }) return getattr(conn, service_type)
def _validate_arq_patch(self, patch): """Validate a single patch for an ARQ. :param patch: a JSON PATCH document. The patch must be of the form [{..}], as specified in the value field of arq_uuid in patch() method below. :returns: dict of valid fields """ valid_fields = { 'hostname': None, 'device_rp_uuid': None, 'instance_uuid': None } if ((not all(p['op'] == 'add' for p in patch)) and (not all(p['op'] == 'remove' for p in patch))): raise exception.PatchError(reason='Every op must be add or remove') for p in patch: path = p['path'].lstrip('/') if path not in valid_fields.keys(): reason = 'Invalid path in patch {}'.format(p['path']) raise exception.PatchError(reason=reason) if p['op'] == 'add': valid_fields[path] = p['value'] not_found = [ field for field, value in valid_fields.items() if value is None ] if patch[0]['op'] == 'add' and len(not_found) > 0: msg = ','.join(not_found) reason = _('Fields absent in patch {}').format(msg) raise exception.PatchError(reason=reason) return valid_fields
def _ensure_exception_kwargs_serializable(exc_class_name, kwargs): """Ensure that kwargs are serializable Ensure that all kwargs passed to exception constructor can be passed over RPC, by trying to convert them to JSON, or, as a last resort, to string. If it is not possible, unserializable kwargs will be removed, letting the receiver to handle the exception string as it is configured to. :param exc_class_name: an AcceleratorException class name. :param kwargs: a dictionary of keyword arguments passed to the exception constructor. :returns: a dictionary of serializable keyword arguments. """ serializers = [(json.dumps, _('when converting to JSON')), (six.text_type, _('when converting to string'))] exceptions = collections.defaultdict(list) serializable_kwargs = {} for k, v in kwargs.items(): for serializer, msg in serializers: try: serializable_kwargs[k] = serializer(v) exceptions.pop(k, None) break except Exception as e: exceptions[k].append( '(%(serializer_type)s) %(e_type)s: %(e_contents)s' % { 'serializer_type': msg, 'e_contents': e, 'e_type': e.__class__.__name__ }) if exceptions: LOG.error( "One or more arguments passed to the %(exc_class)s " "constructor as kwargs can not be serialized. The " "serialized arguments: %(serialized)s. These " "unserialized kwargs were dropped because of the " "exceptions encountered during their " "serialization:\n%(errors)s", dict(errors=';\n'.join("%s: %s" % (k, '; '.join(v)) for k, v in exceptions.items()), exc_class=exc_class_name, serialized=serializable_kwargs)) # We might be able to actually put the following keys' values into # format string, but there is no guarantee, drop it just in case. for k in exceptions: del kwargs[k] return serializable_kwargs
def apply_jsonpatch(doc, patch): for p in patch: if p['op'] == 'add' and p['path'].count('/') == 1: if p['path'].lstrip('/') not in doc: msg = _('Adding a new attribute (%s) to the root of ' ' the resource is not allowed') raise wsme.exc.ClientSideError(msg % p['path']) return jsonpatch.apply_patch(doc, jsonpatch.JsonPatch(patch))
def process_sort_params(self, sort_keys, sort_dirs, default_keys=['created_at', 'id'], default_dir='asc'): # Determine direction to use for when adding default keys if sort_dirs and len(sort_dirs) != 0: default_dir_value = sort_dirs[0] else: default_dir_value = default_dir # Create list of keys (do not modify the input list) if sort_keys: result_keys = list(sort_keys) else: result_keys = [] # If a list of directions is not provided, # use the default sort direction for all provided keys if sort_dirs: result_dirs = [] # Verify sort direction for sort_dir in sort_dirs: if sort_dir not in ('asc', 'desc'): msg = _("Unknown sort direction, must be 'desc' or 'asc'") raise exception.InvalidInput(reason=msg) result_dirs.append(sort_dir) else: result_dirs = [default_dir_value for _sort_key in result_keys] # Ensure that the key and direction length match while len(result_dirs) < len(result_keys): result_dirs.append(default_dir_value) # Unless more direction are specified, which is an error if len(result_dirs) > len(result_keys): msg = _("Sort direction size exceeds sort key size") raise exception.InvalidInput(reason=msg) # Ensure defaults are included for key in default_keys: if key not in result_keys: result_keys.append(key) result_dirs.append(default_dir_value) return result_keys, result_dirs
class CyborgException(Exception): """Base Cyborg Exception To correctly use this class, inherit from it and define a '_msg_fmt' property. That message will get printf'd with the keyword arguments provided to the constructor. If you need to access the message from an exception you should use six.text_type(exc) """ _msg_fmt = _("An unknown exception occurred.") code = http_client.INTERNAL_SERVER_ERROR headers = {} safe = False def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass if not message: try: message = self._msg_fmt % kwargs except Exception: # kwargs doesn't match a variable in self._msg_fmt # log the issue and the kwargs LOG.exception('Exception in string format operation') for name, value in kwargs.items(): LOG.error("%(name)s: %(value)s", { "name": name, "value": value }) if CONF.fatal_exception_format_errors: raise else: # at least get the core self._msg_fmt out if something # happened message = self._msg_fmt super(CyborgException, self).__init__(message) def __str__(self): """Encode to utf-8 then wsme api can consume it as well.""" if not six.PY3: return six.text_type(self.args[0]).encode('utf-8') return self.args[0] def __unicode__(self): """Return a unicode representation of the exception message.""" return six.text_type(self.args[0])
def port_update(self, context, uuid, values): if 'uuid' in values: msg = _("Cannot overwrite UUID for existing Port.") raise exception.InvalidParameterValue(err=msg) try: return self._do_update_port(context, uuid, values) except db_exc.DBDuplicateEntry as e: if 'name' in e.columns: raise exception.PortDuplicateName(name=values['name'])
def deployable_update(self, context, uuid, values): if 'uuid' in values: msg = _("Cannot overwrite UUID for an existing Deployable.") raise exception.InvalidParameterValue(err=msg) try: return self._do_update_deployable(context, uuid, values) except db_exc.DBDuplicateEntry as e: if 'name' in e.columns: raise exception.DuplicateDeployableName(name=values['name'])
def main(): command_opt = cfg.SubCommandOpt('command', title='Command', help=_('Available commands'), handler=add_command_parsers) CONF.register_cli_opt(command_opt) service.prepare_service(sys.argv) CONF.command.func()
def validate(patch): _path = '/' + patch.path.split('/')[1] if _path in patch.internal_attrs(): msg = _("'%s' is an internal attribute and can not be updated") raise wsme.exc.ClientSideError(msg % patch.path) if patch.path in patch.non_removable_attrs() and patch.op == 'remove': msg = _("'%s' is a mandatory attribute and can not be removed") raise wsme.exc.ClientSideError(msg % patch.path) if patch.op != 'remove': if patch.value is wsme.Unset: msg = _("'add' and 'replace' operations need a value") raise wsme.exc.ClientSideError(msg) ret = {'path': patch.path, 'op': patch.op} if patch.value is not wsme.Unset: ret['value'] = patch.value return ret
def accelerator_update(self, context, uuid, values): if 'uuid' in values: msg = _("Cannot overwrite UUID for an existing Accelerator.") raise exception.InvalidParameterValue(err=msg) try: return self._do_update_accelerator(context, uuid, values) except db_exc.DBDuplicateEntry as e: if 'name' in e.columns: raise exception.DuplicateAcceleratorName(name=values['name'])
def _validate_arq_patch(self, patch): """Validate a single patch for an ARQ. :param patch: a JSON PATCH document. The patch must be of the form [{..}], as specified in the value field of arq_uuid in patch() method below. :returns: dict of valid fields """ valid_fields = { 'hostname': None, 'device_rp_uuid': None, 'instance_uuid': None } if utils.allow_project_id(): valid_fields['project_id'] = None if ((not all(p['op'] == 'add' for p in patch)) and (not all(p['op'] == 'remove' for p in patch))): raise exception.PatchError(reason='Every op must be add or remove') for p in patch: path = p['path'].lstrip('/') if path == 'project_id' and not utils.allow_project_id(): raise exception.NotAcceptable( _("Request not acceptable. The minimal required API " "version should be %(base)s.%(opr)s") % { 'base': versions.BASE_VERSION, 'opr': versions.MINOR_1_PROJECT_ID }) if path not in valid_fields.keys(): reason = 'Invalid path in patch {}'.format(p['path']) raise exception.PatchError(reason=reason) if p['op'] == 'add': valid_fields[path] = p['value'] not_found = [ field for field, value in valid_fields.items() if value is None ] if patch[0]['op'] == 'add' and len(not_found) > 0: msg = ','.join(not_found) reason = _('Fields absent in patch {}').format(msg) raise exception.PatchError(reason=reason) return valid_fields
def install_driver(self, driver_id, driver_type): accelerator = self.dbconn.accelerator_query(None, driver_id) if accelerator: self.initialize_connection(accelerator, None) self.do_setup() ctrlr = self.get_controller() nsid = self.get_allocated_nsid(ctrlr) self.attach_instance(nsid) else: msg = (_("Could not find %s accelerator") % driver_type) raise exception.InvalidAccelerator(msg)
def get_py_client(server): """Get the py_client instance :param server: server. :return: Boolean. :raise: InvalidAccelerator. """ if server in SERVERS: py = PySPDK(server) return py else: msg = (_("Could not find %s accelerator") % server) raise exception.InvalidAccelerator(msg)
def check_for_setup_error(py, server): """Check server's status :param py: py_client. :param server: server. :return: Boolean. :raise: AcceleratorException. """ if py.is_alive(): return True else: msg = (_("%s accelerator is down") % server) raise exception.AcceleratorException(msg)
def _check_if_already_bound(context, valid_fields): patch_fields = list(valid_fields.values())[0] instance_uuid = patch_fields['instance_uuid'] extarqs = objects.ExtARQ.list(context) extarqs_for_instance = [ extarq for extarq in extarqs if extarq.arq['instance_uuid'] == instance_uuid ] if extarqs_for_instance: # duplicate binding request msg = _('Instance {} already has accelerator requests. ' 'Cannot bind additional ARQs.') reason = msg.format(instance_uuid) raise exception.PatchError(reason=reason)
def _paginate_query(context, model, limit, marker, sort_key, sort_dir, query): sort_keys = ['id'] if sort_key and sort_key not in sort_keys: sort_keys.insert(0, sort_key) try: query = sqlalchemyutils.paginate_query(query, model, limit, sort_keys, marker=marker, sort_dir=sort_dir) except db_exc.InvalidSortKey: raise exception.InvalidParameterValue( _('The sort_key value "%(key)s" is an invalid field for sorting') % {'key': sort_key}) return query.all()
def __init__(self, app, conf, public_api_routes=None): public_api_routes = public_api_routes or [] self.app = app route_pattern_tpl = '%s(\.json)?$' try: self.public_api_routes = [re.compile(route_pattern_tpl % route_tpl) for route_tpl in public_api_routes] except re.error as e: msg = _('Cannot compile public API routes: %s') % e LOG.error(msg) raise exception.ConfigInvalid(error_msg=msg) super(AuthTokenMiddleware, self).__init__(app, conf)
def _check_policy_json(self): "Checks to see if policy file is JSON-formatted policy file." msg = _("Your policy file is JSON-formatted which is " "deprecated since Victoria release (Cyborg 5.0.0). " "You need to switch to YAML-formatted file. You can use the " "``oslopolicy-convert-json-to-yaml`` tool to convert existing " "JSON-formatted files to YAML-formatted files in a " "backwards-compatible manner: " "https://docs.openstack.org/oslo.policy/" "latest/cli/oslopolicy-convert-json-to-yaml.html.") status = upgradecheck.Result(upgradecheck.Code.SUCCESS) # NOTE(gmann): Check if policy file exist and is in # JSON format by actually loading the file not just # by checking the extension. policy_path = CONF.find_file(CONF.oslo_policy.policy_file) if policy_path and fileutils.is_json(policy_path): status = upgradecheck.Result(upgradecheck.Code.FAILURE, msg) return status
def get_accelerator_client(py, accelerator): """Get the specific client that communicates with server :param py: py_client. :param accelerator: accelerator. :return: acc_client. :raise: InvalidAccelerator. """ acc_client = None if accelerator == 'vhost': acc_client = VhostTgt(py) return acc_client elif accelerator == 'nvmf': acc_client = NvmfTgt(py) return acc_client else: exc_msg = (_("accelerator_client %(acc_client) is missing") % acc_client) raise exception.InvalidAccelerator(exc_msg)
class Checks(upgradecheck.UpgradeCommands): """Various upgrade checks should be added as separate methods in this class and added to _upgrade_checks tuple. """ def _check_placeholder(self): # TODO(whoami-rajat):This is just a placeholder for upgrade checks, # it should be removed when the actual checks are added return upgradecheck.Result(upgradecheck.Code.SUCCESS) # The format of the check functions is to return an # oslo_upgradecheck.upgradecheck.Result # object with the appropriate # oslo_upgradecheck.upgradecheck.Code and details set. # If the check hits warnings or failures then those should be stored # in the returned Result's "details" attribute. The # summary will be rolled up at the end of the check() method. _upgrade_checks = ( # In the future there should be some real checks added here (_('Placeholder'), _check_placeholder), )
def __init__(self, name, use_ssl=False): """Initialize, but do not start the WSGI server. :param name: The name of the WSGI server given to the loader. :param use_ssl: Wraps the socket in an SSL context if True. :returns: None """ self.name = name self.app = app.VersionSelectorApplication() self.workers = (CONF.api.api_workers or processutils.get_worker_count()) if self.workers and self.workers < 1: raise exception.ConfigInvalid( _("api_workers value of %d is invalid, " "must be greater than 0.") % self.workers) self.server = wsgi.Server(CONF, self.name, self.app, host=CONF.api.host_ip, port=CONF.api.port, use_ssl=use_ssl)
# License for the specific language governing permissions and limitations # under the License. """Cyborg Default Config Setting""" import os import socket from oslo_config import cfg from cyborg.common.i18n import _ exc_log_opts = [ cfg.BoolOpt('fatal_exception_format_errors', default=False, help=_('Used if there is a formatting error when generating ' 'an exception message (a programming error). If True, ' 'raise an exception; if False, use the unformatted ' 'message.')), ] service_opts = [ cfg.HostAddressOpt('host', default=socket.gethostname(), sample_default='localhost', help=_( 'Name of this node. This can be an opaque ' 'identifier. It is not necessarily a hostname, ' 'FQDN, or IP address. However, the node name ' 'must be valid within an AMQP key, and if using ' 'ZeroMQ, a valid hostname, FQDN, or IP address.')), cfg.IntOpt('periodic_interval', default=60,