Exemple #1
0
def load_class_by_alias_or_classname(namespace, name):
    """Load class using stevedore alias or the class name
    :param namespace: namespace where the alias is defined
    :param name: alias or class name of the class to be loaded
    :returns class if calls can be loaded
    :raises ImportError if class cannot be loaded
    """

    if not name:
        LOG.error(_LE("Alias or class name is not set"))
        raise ImportError(_("Class not found."))
    try:
        # Try to resolve class by alias
        mgr = driver.DriverManager(namespace, name)
        class_to_load = mgr.driver
    except RuntimeError:
        e1_info = sys.exc_info()
        # Fallback to class name
        try:
            class_to_load = importutils.import_class(name)
        except (ImportError, ValueError):
            LOG.error(_LE("Error loading class by alias"), exc_info=e1_info)
            LOG.error(_LE("Error loading class by class name"), exc_info=True)
            raise ImportError(_("Class not found."))
    return class_to_load
 def _notify_loop(self, resource, event, trigger, **kwargs):
     """The notification loop."""
     errors = []
     callbacks = list(self._callbacks[resource].get(event, {}).items())
     LOG.debug("Notify callbacks %s for %s, %s",
               callbacks, resource, event)
     # TODO(armax): consider using a GreenPile
     for callback_id, callback in callbacks:
         try:
             callback(resource, event, trigger, **kwargs)
         except Exception as e:
             abortable_event = (
                 event.startswith(events.BEFORE) or
                 event.startswith(events.PRECOMMIT)
             )
             if not abortable_event:
                 LOG.exception(_LE("Error during notification for "
                                   "%(callback)s %(resource)s, %(event)s"),
                               {'callback': callback_id,
                                'resource': resource, 'event': event})
             else:
                 LOG.error(_LE("Callback %(callback)s raised %(error)s"),
                           {'callback': callback_id, 'error': e})
             errors.append(exceptions.NotificationError(callback_id, e))
     return errors
 def _db_event_handler(self, mapper, _conn, target):
     try:
         tenant_id = target['tenant_id']
     except AttributeError:
         with excutils.save_and_reraise_exception():
             LOG.error(_LE("Model class %s does not have a tenant_id "
                           "attribute"), target)
     self._dirty_tenants.add(tenant_id)
Exemple #4
0
    def __call__(self, request):
        """WSGI method that controls (de)serialization and method dispatch."""

        LOG.info(_LI("%(method)s %(url)s"), {
            "method": request.method,
            "url": request.url
        })

        try:
            action, args, accept = self.deserializer.deserialize(request)
        except exception.InvalidContentType:
            msg = _("Unsupported Content-Type")
            LOG.exception(_LE("InvalidContentType: %s"), msg)
            return Fault(webob.exc.HTTPBadRequest(explanation=msg))
        except n_exc.MalformedRequestBody:
            msg = _("Malformed request body")
            LOG.exception(_LE("MalformedRequestBody: %s"), msg)
            return Fault(webob.exc.HTTPBadRequest(explanation=msg))

        try:
            action_result = self.dispatch(request, action, args)
        except webob.exc.HTTPException as ex:
            LOG.info(_LI("HTTP exception thrown: %s"), ex)
            action_result = Fault(ex, self._fault_body_function)
        except Exception:
            LOG.exception(_LE("Internal error"))
            # Do not include the traceback to avoid returning it to clients.
            action_result = Fault(webob.exc.HTTPServerError(),
                                  self._fault_body_function)

        if isinstance(action_result, dict) or action_result is None:
            response = self.serializer.serialize(action_result,
                                                 accept,
                                                 action=action)
        else:
            response = action_result

        try:
            LOG.info(_LI("%(url)s returned with HTTP %(status)d"),
                     dict(url=request.url, status=response.status_int))
        except AttributeError as e:
            LOG.info(_LI("%(url)s returned a fault: %(exception)s"),
                     dict(url=request.url, exception=e))

        return response
Exemple #5
0
def extract_exc_details(e):
    for attr in ('_error_context_msg', '_error_context_args'):
        if not hasattr(e, attr):
            return _LE('No details.')
    details = e._error_context_msg
    args = e._error_context_args
    if args is _NO_ARGS_MARKER:
        return details
    return details % args
 def _check_extension(self, extension):
     """Checks for required methods in extension objects."""
     try:
         LOG.debug('Ext name: %s', extension.get_name())
         LOG.debug('Ext alias: %s', extension.get_alias())
         LOG.debug('Ext description: %s', extension.get_description())
         LOG.debug('Ext updated: %s', extension.get_updated())
     except AttributeError:
         LOG.exception(_LE("Exception loading extension"))
         return False
     return isinstance(extension, ExtensionDescriptor)
Exemple #7
0
    def dispatch(self, request, action, action_args):
        """Find action-specific method on controller and call it."""

        controller_method = getattr(self.controller, action)
        try:
            #NOTE(salvatore-orlando): the controller method must have
            # an argument whose name is 'request'
            return controller_method(request=request, **action_args)
        except TypeError:
            LOG.exception(_LE('Invalid request'))
            return Fault(webob.exc.HTTPBadRequest())
    def extend_resources(self, version, attr_map):
        """Extend resources with additional resources or attributes.

        :param attr_map: the existing mapping from resource name to
        attrs definition.

        After this function, we will extend the attr_map if an extension
        wants to extend this map.
        """
        processed_exts = {}
        exts_to_process = self.extensions.copy()
        check_optionals = True
        # Iterate until there are unprocessed extensions or if no progress
        # is made in a whole iteration
        while exts_to_process:
            processed_ext_count = len(processed_exts)
            for ext_name, ext in list(exts_to_process.items()):
                # Process extension only if all required extensions
                # have been processed already
                required_exts_set = set(ext.get_required_extensions())
                if required_exts_set - set(processed_exts):
                    continue
                optional_exts_set = set(ext.get_optional_extensions())
                if check_optionals and optional_exts_set - set(processed_exts):
                    continue
                extended_attrs = ext.get_extended_resources(version)
                for res, resource_attrs in six.iteritems(extended_attrs):
                    attr_map.setdefault(res, {}).update(resource_attrs)
                processed_exts[ext_name] = ext
                del exts_to_process[ext_name]
            if len(processed_exts) == processed_ext_count:
                # if we hit here, it means there are unsatisfied
                # dependencies. try again without optionals since optionals
                # are only necessary to set order if they are present.
                if check_optionals:
                    check_optionals = False
                    continue
                # Exit loop as no progress was made
                break
        if exts_to_process:
            unloadable_extensions = set(exts_to_process.keys())
            LOG.error(
                _LE("Unable to process extensions (%s) because "
                    "the configured plugins do not satisfy "
                    "their requirements. Some features will not "
                    "work as expected."), ', '.join(unloadable_extensions))
            self._check_faulty_extensions(unloadable_extensions)
        # Extending extensions' attributes map.
        for ext in processed_exts.values():
            ext.update_attributes_map(attr_map)
    def _load_all_extensions(self):
        """Load extensions from the configured path.

        The extension name is constructed from the module_name. If your
        extension module is named widgets.py, the extension class within that
        module should be 'Widgets'.

        See tests/unit/extensions/foxinsocks.py for an example extension
        implementation.
        """

        for path in self.path.split(':'):
            if os.path.exists(path):
                self._load_all_extensions_from_path(path)
            else:
                LOG.error(_LE("Extension path '%s' doesn't exist!"), path)
Exemple #10
0
    def _get_socket(self, host, port, backlog):
        bind_addr = (host, port)
        # TODO(dims): eventlet's green dns/socket module does not actually
        # support IPv6 in getaddrinfo(). We need to get around this in the
        # future or monitor upstream for a fix
        try:
            info = socket.getaddrinfo(bind_addr[0], bind_addr[1],
                                      socket.AF_UNSPEC, socket.SOCK_STREAM)[0]
            family = info[0]
            bind_addr = info[-1]
        except Exception:
            LOG.exception(_LE("Unable to listen on %(host)s:%(port)s"), {
                'host': host,
                'port': port
            })
            sys.exit(1)

        sock = None
        retry_until = time.time() + CONF.retry_until_window
        while not sock and time.time() < retry_until:
            try:
                sock = eventlet.listen(bind_addr,
                                       backlog=backlog,
                                       family=family)
            except socket.error as err:
                with excutils.save_and_reraise_exception() as ctxt:
                    if err.errno == errno.EADDRINUSE:
                        ctxt.reraise = False
                        eventlet.sleep(0.1)
        if not sock:
            raise RuntimeError(
                _("Could not bind to %(host)s:%(port)s "
                  "after trying for %(time)d seconds") % {
                      'host': host,
                      'port': port,
                      'time': CONF.retry_until_window
                  })
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        # sockets can hang around forever without keepalive
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)

        # This option isn't available in the OS X version of eventlet
        if hasattr(socket, 'TCP_KEEPIDLE'):
            sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
                            CONF.tcp_keepidle)

        return sock
Exemple #11
0
 def call(self, ctxt, method, **kwargs):
     # two methods with the same name in different namespaces should
     # be tracked independently
     if self._original_context.target.namespace:
         scoped_method = '%s.%s' % (self._original_context.target.namespace,
                                    method)
     else:
         scoped_method = method
     # set the timeout from the global method timeout tracker for this
     # method
     self._original_context.timeout = self._METHOD_TIMEOUTS[scoped_method]
     try:
         return self._original_context.call(ctxt, method, **kwargs)
     except oslo_messaging.MessagingTimeout:
         with excutils.save_and_reraise_exception():
             wait = random.uniform(0, TRANSPORT.conf.rpc_response_timeout)
             LOG.error(
                 _LE("Timeout in RPC method %(method)s. Waiting for "
                     "%(wait)s seconds before next attempt. If the "
                     "server is not down, consider increasing the "
                     "rpc_response_timeout option as Neutron "
                     "server(s) may be overloaded and unable to "
                     "respond quickly enough."), {
                         'wait': int(round(wait)),
                         'method': scoped_method
                     })
             ceiling = TRANSPORT.conf.rpc_response_timeout * 10
             new_timeout = min(self._original_context.timeout * 2, ceiling)
             if new_timeout > self._METHOD_TIMEOUTS[scoped_method]:
                 LOG.warning(
                     _LW("Increasing timeout for %(method)s calls "
                         "to %(new)s seconds. Restart the agent to "
                         "restore it to the default value."), {
                             'method': scoped_method,
                             'new': new_timeout
                         })
                 self._METHOD_TIMEOUTS[scoped_method] = new_timeout
             time.sleep(wait)
Exemple #12
0
    def decorator(f):
        try:
            # NOTE(kevinbenton): we use pecan's util function here because it
            # deals with the horrors of finding args of already decorated
            # functions
            ctx_arg_index = p_util.getargspec(f).args.index(context_var_name)
        except ValueError:
            raise RuntimeError(
                _LE("Could not find position of var %s") % context_var_name)
        f_with_retry = retry_db_errors(f)

        @six.wraps(f)
        def wrapped(*args, **kwargs):
            # only use retry wrapper if we aren't nested in an active
            # transaction
            if context_var_name in kwargs:
                context = kwargs[context_var_name]
            else:
                context = args[ctx_arg_index]
            method = f if context.session.is_active else f_with_retry
            return method(*args, **kwargs)

        return wrapped
Exemple #13
0
 def _emulate_bulk_create(self, obj_creator, request, body, parent_id=None):
     objs = []
     try:
         for item in body[self._collection]:
             kwargs = {self._resource: item}
             if parent_id:
                 kwargs[self._parent_id_name] = parent_id
             fields_to_strip = self._exclude_attributes_by_policy(
                 request.context, item)
             objs.append(
                 self._filter_attributes(request.context,
                                         obj_creator(
                                             request.context, **kwargs),
                                         fields_to_strip=fields_to_strip))
         return objs
     # Note(salvatore-orlando): broad catch as in theory a plugin
     # could raise any kind of exception
     except Exception:
         with excutils.save_and_reraise_exception():
             for obj in objs:
                 obj_deleter = getattr(self._plugin,
                                       self._plugin_handlers[self.DELETE])
                 try:
                     kwargs = ({
                         self._parent_id_name: parent_id
                     } if parent_id else {})
                     obj_deleter(request.context, obj['id'], **kwargs)
                 except Exception:
                     # broad catch as our only purpose is to log the
                     # exception
                     LOG.exception(
                         _LE("Unable to undo add for "
                             "%(resource)s %(id)s"), {
                                 'resource': self._resource,
                                 'id': obj['id']
                             })
Exemple #14
0
def create_object_with_dependency(creator, dep_getter, dep_creator,
                                  dep_id_attr, dep_deleter):
    """Creates an object that binds to a dependency while handling races.

    creator is a function that expected to take the result of either
    dep_getter or dep_creator.

    The result of dep_getter and dep_creator must have an attribute of
    dep_id_attr be used to determine if the dependency changed during object
    creation.

    dep_deleter will be called with a the result of dep_creator if the creator
    function fails due to a non-dependency reason or the retries are exceeded.

    dep_getter should return None if the dependency does not exist.

    dep_creator can raise a DBDuplicateEntry to indicate that a concurrent
    create of the dependency occurred and the process will restart to get the
    concurrently created one.

    This function will return both the created object and the dependency it
    used/created.

    This function protects against all of the cases where the dependency can
    be concurrently removed by catching exceptions and restarting the
    process of creating the dependency if one no longer exists. It will
    give up after neutron.db.api.MAX_RETRIES and raise the exception it
    encounters after that.
    """
    result, dependency, dep_id, made_locally = None, None, None, False
    for attempts in range(1, db_api.MAX_RETRIES + 1):
        # we go to max + 1 here so the exception handlers can raise their
        # errors at the end
        try:
            dependency = dep_getter()
            if not dependency:
                dependency = dep_creator()
                made_locally = True
            dep_id = getattr(dependency, dep_id_attr)
        except db_exc.DBDuplicateEntry:
            # dependency was concurrently created.
            with excutils.save_and_reraise_exception() as ctx:
                if attempts < db_api.MAX_RETRIES:
                    # sleep for a random time between 0 and 1 second to
                    # make sure a concurrent worker doesn't retry again
                    # at exactly the same time
                    time.sleep(random.uniform(0, 1))
                    ctx.reraise = False
                    continue
        try:
            result = creator(dependency)
            break
        except Exception:
            with excutils.save_and_reraise_exception() as ctx:
                # check if dependency we tried to use was removed during
                # object creation
                if attempts < db_api.MAX_RETRIES:
                    dependency = dep_getter()
                    if not dependency or dep_id != getattr(
                            dependency, dep_id_attr):
                        ctx.reraise = False
                        continue
                # we have exceeded retries or have encountered a non-dependency
                # related failure so we try to clean up the dependency if we
                # created it before re-raising
                if made_locally and dependency:
                    try:
                        dep_deleter(dependency)
                    except Exception:
                        LOG.exception(_LE("Failed cleaning up dependency %s"),
                                      dep_id)
    return result, dependency
Exemple #15
0
    def __call__(self, target, creds, enforcer):
        if self.target_field not in target:
            # policy needs a plugin check
            # target field is in the form resource:field
            # however if they're not separated by a colon, use an underscore
            # as a separator for backward compatibility

            def do_split(separator):
                parent_res, parent_field = self.target_field.split(
                    separator, 1)
                return parent_res, parent_field

            for separator in (':', '_'):
                try:
                    parent_res, parent_field = do_split(separator)
                    break
                except ValueError:
                    LOG.debug("Unable to find ':' as separator in %s.",
                              self.target_field)
            else:
                # If we are here split failed with both separators
                err_reason = (_("Unable to find resource name in %s") %
                              self.target_field)
                LOG.error(err_reason)
                raise exceptions.PolicyCheckError(policy="%s:%s" %
                                                  (self.kind, self.match),
                                                  reason=err_reason)
            parent_foreign_key = attributes.RESOURCE_FOREIGN_KEYS.get(
                "%ss" % parent_res, None)
            if not parent_foreign_key:
                err_reason = (_("Unable to verify match:%(match)s as the "
                                "parent resource: %(res)s was not found") % {
                                    'match': self.match,
                                    'res': parent_res
                                })
                LOG.error(err_reason)
                raise exceptions.PolicyCheckError(policy="%s:%s" %
                                                  (self.kind, self.match),
                                                  reason=err_reason)
            # NOTE(salv-orlando): This check currently assumes the parent
            # resource is handled by the core plugin. It might be worth
            # having a way to map resources to plugins so to make this
            # check more general
            f = getattr(directory.get_plugin(), 'get_%s' % parent_res)
            # f *must* exist, if not found it is better to let neutron
            # explode. Check will be performed with admin context
            context = importutils.import_module('neutron.context')
            try:
                data = f(context.get_admin_context(),
                         target[parent_foreign_key],
                         fields=[parent_field])
                target[self.target_field] = data[parent_field]
            except exceptions.NotFound as e:
                # NOTE(kevinbenton): a NotFound exception can occur if a
                # list operation is happening at the same time as one of
                # the parents and its children being deleted. So we issue
                # a RetryRequest so the API will redo the lookup and the
                # problem items will be gone.
                raise db_exc.RetryRequest(e)
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_LE('Policy check error while calling %s!'),
                                  f)
        match = self.match % target
        if self.kind in creds:
            return match == six.text_type(creds[self.kind])
        return False