def _load_all_extensions_from_path(self, path):
     # Sorting the extension list makes the order in which they
     # are loaded predictable across a cluster of load-balanced
     # Neutron Servers
     for f in sorted(os.listdir(path)):
         try:
             LOG.debug('Loading extension file: %s', f)
             mod_name, file_ext = os.path.splitext(os.path.split(f)[-1])
             ext_path = os.path.join(path, f)
             if file_ext.lower() == '.py' and not mod_name.startswith('_'):
                 mod = imp.load_source(mod_name, ext_path)
                 ext_name = mod_name[0].upper() + mod_name[1:]
                 new_ext_class = getattr(mod, ext_name, None)
                 if not new_ext_class:
                     LOG.warning(
                         _LW('Did not find expected name '
                             '"%(ext_name)s" in %(file)s'), {
                                 'ext_name': ext_name,
                                 'file': ext_path
                             })
                     continue
                 new_ext = new_ext_class()
                 self.add_extension(new_ext)
         except Exception as exception:
             LOG.warning(
                 _LW("Extension file %(f)s wasn't loaded due to "
                     "%(exception)s"), {
                         'f': f,
                         'exception': exception
                     })
Exemple #2
0
    def service_providers(self):
        """Return the service providers for the extension module."""
        providers = []
        # Attempt to read the config from cfg.CONF first; when passing
        # --config-dir, the option is merged from all the definitions
        # made across all the imported config files
        try:
            providers = cfg.CONF.service_providers.service_provider
        except cfg.NoSuchOptError:
            pass

        # Alternatively, if the option is not available, try to load
        # it from the provider module's config file; this may be
        # necessary, if modules are loaded on the fly (DevStack may
        # be an example)
        if not providers:
            providers = self.ini().service_providers.service_provider

            if providers:
                versionutils.report_deprecated_feature(
                    LOG,
                    _LW('Implicit loading of service providers from '
                        'neutron_*.conf files is deprecated and will be '
                        'removed in Ocata release.'))

        return providers
 def unregister_events(self):
     try:
         event.remove(self._model_class, 'after_insert',
                      self._db_event_handler)
         event.remove(self._model_class, 'after_delete',
                      self._db_event_handler)
     except sql_exc.InvalidRequestError:
         LOG.warning(_LW("No sqlalchemy event for resource %s found"),
                     self.name)
 def _plugins_implement_interface(self, extension):
     if extension.get_plugin_interface() is None:
         return True
     for plugin in self.plugins.values():
         if isinstance(plugin, extension.get_plugin_interface()):
             return True
     LOG.warning(
         _LW("Loaded plugins do not implement extension "
             "%s interface"), extension.get_alias())
     return False
Exemple #5
0
def _get_pagination_max_limit():
    max_limit = -1
    if (cfg.CONF.pagination_max_limit.lower() !=
            constants.PAGINATION_INFINITE):
        try:
            max_limit = int(cfg.CONF.pagination_max_limit)
            if max_limit == 0:
                raise ValueError()
        except ValueError:
            LOG.warning(
                _LW("Invalid value for pagination_max_limit: %s. It "
                    "should be an integer greater to 0"),
                cfg.CONF.pagination_max_limit)
    return max_limit
Exemple #6
0
def get_provider_driver_class(driver, namespace=SERVICE_PROVIDERS):
    """Return path to provider driver class

    In order to keep backward compatibility with configs < Kilo, we need to
    translate driver class paths after advanced services split. This is done by
    defining old class path as entry point in neutron package.
    """
    try:
        driver_manager = stevedore.driver.DriverManager(
            namespace, driver).driver
    except ImportError:
        return driver
    except RuntimeError:
        return driver
    new_driver = "%s.%s" % (driver_manager.__module__,
                            driver_manager.__name__)
    LOG.warning(_LW(
        "The configured driver %(driver)s has been moved, automatically "
        "using %(new_driver)s instead. Please update your config files, "
        "as this automatic fixup will be removed in a future release."),
        {'driver': driver, 'new_driver': new_driver})
    return new_driver
Exemple #7
0
 def call(self, ctxt, method, **kwargs):
     # two methods with the same name in different namespaces should
     # be tracked independently
     if self._original_context.target.namespace:
         scoped_method = '%s.%s' % (self._original_context.target.namespace,
                                    method)
     else:
         scoped_method = method
     # set the timeout from the global method timeout tracker for this
     # method
     self._original_context.timeout = self._METHOD_TIMEOUTS[scoped_method]
     try:
         return self._original_context.call(ctxt, method, **kwargs)
     except oslo_messaging.MessagingTimeout:
         with excutils.save_and_reraise_exception():
             wait = random.uniform(0, TRANSPORT.conf.rpc_response_timeout)
             LOG.error(
                 _LE("Timeout in RPC method %(method)s. Waiting for "
                     "%(wait)s seconds before next attempt. If the "
                     "server is not down, consider increasing the "
                     "rpc_response_timeout option as Neutron "
                     "server(s) may be overloaded and unable to "
                     "respond quickly enough."), {
                         'wait': int(round(wait)),
                         'method': scoped_method
                     })
             ceiling = TRANSPORT.conf.rpc_response_timeout * 10
             new_timeout = min(self._original_context.timeout * 2, ceiling)
             if new_timeout > self._METHOD_TIMEOUTS[scoped_method]:
                 LOG.warning(
                     _LW("Increasing timeout for %(method)s calls "
                         "to %(new)s seconds. Restart the agent to "
                         "restore it to the default value."), {
                             'method': scoped_method,
                             'new': new_timeout
                         })
                 self._METHOD_TIMEOUTS[scoped_method] = new_timeout
             time.sleep(wait)
def _build_subattr_match_rule(attr_name, attr, action, target):
    """Create the rule to match for sub-attribute policy checks."""
    # TODO(salv-orlando): Instead of relying on validator info, introduce
    # typing for API attributes
    # Expect a dict as type descriptor
    validate = attr['validate']
    key = [k for k in validate.keys() if k.startswith('type:dict')]
    if not key:
        LOG.warning(
            _LW("Unable to find data type descriptor "
                "for attribute %s"), attr_name)
        return
    data = validate[key[0]]
    if not isinstance(data, dict):
        LOG.debug(
            "Attribute type descriptor is not a dict. Unable to "
            "generate any sub-attr policy rule for %s.", attr_name)
        return
    sub_attr_rules = [
        policy.RuleCheck('rule',
                         '%s:%s:%s' % (action, attr_name, sub_attr_name))
        for sub_attr_name in data if sub_attr_name in target[attr_name]
    ]
    return policy.AndCheck(sub_attr_rules)
 def register_resource(self, resource):
     if resource.name in self._resources:
         LOG.warning(_LW('%s is already registered'), resource.name)
     if resource.name in self._tracked_resource_mappings:
         resource.register_events()
     self._resources[resource.name] = resource