Example #1
0
def monkey_patch():
    """If the CONF.monkey_patch set as True,
    this function patches a decorator
    for all functions in specified modules.
    You can set decorators for each modules
    using CONF.monkey_patch_modules.
    The format is "Module path:Decorator function".
    Parameters of the decorator is as follows.

    name - name of the function
    function - object of the function
    """
    # If CONF.monkey_patch is not True, this function do nothing.
    if not CONF.monkey_patch:
        return
    # Get list of modules and decorators
    for module_and_decorator in CONF.monkey_patch_modules:
        module, decorator_name = module_and_decorator.split(':')
        # import decorator function
        decorator = importutils.import_class(decorator_name)
        __import__(module)
        # Retrieve module information using pyclbr
        module_data = pyclbr.readmodule_ex(module)
        for key in module_data.keys():
            # set the decorator for the class methods
            if isinstance(module_data[key], pyclbr.Class):
                clz = importutils.import_class("%s.%s" % (module, key))
                for method, func in inspect.getmembers(clz, inspect.ismethod):
                    setattr(clz, method,
                            decorator("%s.%s.%s" % (module, key, method), func))
            # set the decorator for the function
            if isinstance(module_data[key], pyclbr.Function):
                func = importutils.import_class("%s.%s" % (module, key))
                setattr(sys.modules[module], key,
                        decorator("%s.%s" % (module, key), func))
 def setUp(self):
     super(OVSAgentTestBase, self).setUp()
     conn_patcher = mock.patch("neutron.agent.ovsdb.native.connection.Connection.start")
     conn_patcher.start()
     self.addCleanup(conn_patcher.stop)
     self.br_int_cls = importutils.import_class(self._BR_INT_CLASS)
     self.br_phys_cls = importutils.import_class(self._BR_PHYS_CLASS)
     self.br_tun_cls = importutils.import_class(self._BR_TUN_CLASS)
 def test_datasource_update_method_push(self):
     drivers = {driver: utils.import_class(self.conf[driver].driver)
                for driver in self.conf.datasources.types}
     push_drivers = ListenerService._get_push_drivers(
         drivers=drivers, conf=self.conf)
     self.assertSequenceEqual(set(push_drivers), {utils.import_class(
         self.conf[NOVA_INSTANCE_DATASOURCE].driver), utils.import_class(
         self.conf[ZABBIX_DATASOURCE_PUSH].driver)})
Example #4
0
    def __init__(self):
        mysql_app = importutils.import_class(MYSQL_APP)
        mysql_app_status = importutils.import_class(MYSQL_APP_STATUS)
        mysql_admin = importutils.import_class(MYSQL_ADMIN)

        super(Manager, self).__init__(mysql_app, mysql_app_status,
                                      mysql_admin, REPLICATION_STRATEGY,
                                      REPLICATION_NAMESPACE,
                                      REPLICATION_STRATEGY_CLASS, MANAGER)
Example #5
0
 def __init__(self, *args, **kwargs):
     super(ContainerShareDriver, self).__init__([True], *args, **kwargs)
     self.configuration.append_config_values(container_opts)
     self.backend_name = self.configuration.safe_get(
         "share_backend_name") or "Docker"
     self.container = importutils.import_class(
         self.configuration.container_helper)(
             configuration=self.configuration)
     self.storage = importutils.import_class(
         self.configuration.container_storage_helper)(
             configuration=self.configuration)
     self._helpers = {}
Example #6
0
def monkey_patch():
    """Patch decorator.

    If the Flags.monkey_patch set as True,
    this function patches a decorator
    for all functions in specified modules.
    You can set decorators for each modules
    using CONF.monkey_patch_modules.
    The format is "Module path:Decorator function".
    Example: 'manila.api.ec2.cloud:' \
     manila.openstack.common.notifier.api.notify_decorator'

    Parameters of the decorator is as follows.
    (See manila.openstack.common.notifier.api.notify_decorator)

    name - name of the function
    function - object of the function
    """
    # If CONF.monkey_patch is not True, this function do nothing.
    if not CONF.monkey_patch:
        return
    # Get list of modules and decorators
    for module_and_decorator in CONF.monkey_patch_modules:
        module, decorator_name = module_and_decorator.split(':')
        # import decorator function
        decorator = importutils.import_class(decorator_name)
        __import__(module)
        # Retrieve module information using pyclbr
        module_data = pyclbr.readmodule_ex(module)
        for key in module_data.keys():
            # set the decorator for the class methods
            if isinstance(module_data[key], pyclbr.Class):
                clz = importutils.import_class("%s.%s" % (module, key))
                # NOTE(vponomaryov): we need to distinguish class methods types
                # for py2 and py3, because the concept of 'unbound methods' has
                # been removed from the python3.x
                if six.PY3:
                    member_type = inspect.isfunction
                else:
                    member_type = inspect.ismethod
                for method, func in inspect.getmembers(clz, member_type):
                    setattr(
                        clz, method,
                        decorator("%s.%s.%s" % (module, key, method), func))
            # set the decorator for the function
            if isinstance(module_data[key], pyclbr.Function):
                func = importutils.import_class("%s.%s" % (module, key))
                setattr(sys.modules[module], key,
                        decorator("%s.%s" % (module, key), func))
Example #7
0
    def _prepare_calls(raw_calls):
        """Prepares delayed calls for invocation.

        After delayed calls were selected from DB they still need to be
        prepared for further usage, we need to build final target methods
        and deserialize arguments, if needed.

        :param raw_calls: Delayed calls fetched from DB (DB models).
        :return: A list of tuples (target_auth_context, target_method,
         method_args) where all data is properly deserialized.
        """

        result = []

        for call in raw_calls:
            LOG.debug(
                'Preparing next delayed call. '
                '[ID=%s, factory_method_path=%s, target_method_name=%s, '
                'method_arguments=%s]', call.id, call.factory_method_path,
                call.target_method_name, call.method_arguments
            )

            target_auth_context = copy.deepcopy(call.auth_context)

            if call.factory_method_path:
                factory = importutils.import_class(call.factory_method_path)

                target_method = getattr(factory(), call.target_method_name)
            else:
                target_method = importutils.import_class(
                    call.target_method_name
                )

            method_args = copy.deepcopy(call.method_arguments)

            if call.serializers:
                # Deserialize arguments.
                for arg_name, ser_path in call.serializers.items():
                    serializer = importutils.import_class(ser_path)()

                    deserialized = serializer.deserialize(
                        method_args[arg_name]
                    )

                    method_args[arg_name] = deserialized

            result.append((target_auth_context, target_method, method_args))

        return result
Example #8
0
 def check_for_setup_error(self, *args, **kwargs):
     host_id = self.configuration.safe_get("neutron_host_id")
     neutron_class = importutils.import_class(
         'manila.network.neutron.neutron_network_plugin.'
         'NeutronNetworkPlugin'
     )
     actual_class = importutils.import_class(
         self.configuration.safe_get("network_api_class"))
     if host_id is None and issubclass(actual_class, neutron_class):
         msg = _("%s requires neutron_host_id to be "
                 "specified.") % neutron_class
         raise exception.ManilaException(msg)
     elif host_id is None:
         LOG.warning(_LW("neutron_host_id is not specified. This driver "
                         "might not work as expected without it."))
Example #9
0
    def __init__(self, *args, **kwargs):
        """Initialize the driver."""

        super(IBMStorageDriver, self).__init__(*args, **kwargs)

        self.configuration.append_config_values(driver_opts)

        proxy = importutils.import_class(self.configuration.proxy)

        active_backend_id = kwargs.get('active_backend_id', None)

        # Driver additional flags should be specified in the cinder.conf
        # preferably in each backend configuration.

        self.proxy = proxy(
            {
                "user": self.configuration.san_login,
                "password": self.configuration.san_password,
                "address": self.configuration.san_ip,
                "vol_pool": self.configuration.san_clustername,
                "connection_type": self.configuration.connection_type,
                "chap": self.configuration.chap,
                "management_ips": self.configuration.management_ips
            },
            LOG,
            exception,
            driver=self,
            active_backend_id=active_backend_id)
Example #10
0
 def __init__(self, plugin):
     self._resource_name = RESOURCE_NAME
     self._plugin = plugin
     self._driver = importutils.import_class(
         cfg.CONF.QUOTAS.quota_driver
     )
     self._update_extended_attributes = True
Example #11
0
 def __init__(self, virtapi):
     super(DockerDriver, self).__init__(virtapi)
     self._docker = None
     vif_class = importutils.import_class(CONF.docker.vif_driver)
     self.vif_driver = vif_class()
     self.firewall_driver = firewall.load_driver(
         default='nova.virt.firewall.NoopFirewallDriver')
Example #12
0
    def __init__(self, service_instance_manager):
        self.get_config_option = service_instance_manager.get_config_option
        self.vif_driver = importutils.import_class(
            self.get_config_option("interface_driver"))()

        if service_instance_manager.driver_config:
            self._network_config_group = (
                service_instance_manager.driver_config.network_config_group or
                service_instance_manager.driver_config.config_group)
        else:
            self._network_config_group = None

        self.use_admin_port = False
        self.use_service_network = True
        self._neutron_api = None
        self._service_network_id = None
        self.connect_share_server_to_tenant_network = (
            self.get_config_option('connect_share_server_to_tenant_network'))

        self.admin_network_id = self.get_config_option('admin_network_id')
        self.admin_subnet_id = self.get_config_option('admin_subnet_id')

        if self.admin_network_id and self.admin_subnet_id:
            self.use_admin_port = True
        if self.use_admin_port and self.connect_share_server_to_tenant_network:
            self.use_service_network = False
Example #13
0
    def __init__(self, app, conf, **local_conf):
        # Determine the context class to use
        self.ctxcls = RequestContext
        if 'context_class' in local_conf:
            self.ctxcls = importutils.import_class(local_conf['context_class'])

        super(ContextMiddleware, self).__init__(app)
Example #14
0
    def __init__(self, application, limits=None, limiter=None, **kwargs):
        """Initialize new `RateLimitingMiddleware`.

        `RateLimitingMiddleware` wraps the given WSGI application and
        sets up the given limits.

        @param application: WSGI application to wrap
        @param limits: String describing limits
        @param limiter: String identifying class for representing limits

        Other parameters are passed to the constructor for the limiter.
        """
        base_wsgi.Middleware.__init__(self, application)

        # Select the limiter class
        if limiter is None:
            limiter = Limiter
        else:
            limiter = importutils.import_class(limiter)

        # Parse the limits, if any are provided
        if limits is not None:
            limits = limiter.parse_limits(limits)

        self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs)
Example #15
0
 def _get_response_code(self, req):
     req_method = req.environ['REQUEST_METHOD']
     controller = importutils.import_class('keystone.common.controller')
     code = None
     if isinstance(self, controller.V3Controller) and req_method == 'POST':
         code = (201, 'Created')
     return code
Example #16
0
 def start(self):
     LOG.debug("IPABackend start")
     self.request = requests.Session()
     authclassname = cfg.CONF[self.name].ipa_auth_driver_class
     authclass = importutils.import_class(authclassname)
     self.request.auth = authclass(cfg.CONF[self.name].ipa_client_keytab, cfg.CONF[self.name].ipa_host)
     ipa_base_url = cfg.CONF[self.name].ipa_base_url
     if ipa_base_url.startswith("http"):  # full URL
         self.baseurl = ipa_base_url
     else:  # assume relative to https://host[:port]
         self.baseurl = "https://" + cfg.CONF[self.name].ipa_host
         ipa_port = cfg.CONF[self.name].ipa_port
         if ipa_port != IPA_DEFAULT_PORT:
             self.baseurl += ":" + str(ipa_port)
         self.baseurl += ipa_base_url
     ipa_json_url = cfg.CONF[self.name].ipa_json_url
     if ipa_json_url.startswith("http"):  # full URL
         self.jsonurl = ipa_json_url
     else:  # assume relative to https://host[:port]
         self.jsonurl = self.baseurl + ipa_json_url
     xtra_hdrs = {"Content-Type": "application/json", "Referer": self.baseurl}
     self.request.headers.update(xtra_hdrs)
     self.request.verify = cfg.CONF[self.name].ipa_ca_cert
     self.ntries = cfg.CONF[self.name].ipa_connect_retries
     self.force = cfg.CONF[self.name].ipa_force_ns_use
Example #17
0
 def __init__(self, host=None, conf=None):
     super(DhcpAgent, self).__init__(host=host)
     self.needs_resync_reasons = collections.defaultdict(list)
     self.dhcp_ready_ports = set()
     self.conf = conf or cfg.CONF
     # If 'resync_throttle' is configured more than 'resync_interval' by
     # mistake, raise exception and log with message.
     if self.conf.resync_throttle > self.conf.resync_interval:
         msg = _("DHCP agent must have resync_throttle <= resync_interval")
         LOG.exception(msg)
         raise exceptions.InvalidConfigurationOption(
             opt_name='resync_throttle',
             opt_value=self.conf.resync_throttle)
     self._periodic_resync_event = threading.Event()
     self.cache = NetworkCache()
     self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver)
     self.plugin_rpc = DhcpPluginApi(topics.PLUGIN, self.conf.host)
     # create dhcp dir to store dhcp info
     dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path)
     fileutils.ensure_tree(dhcp_dir, mode=0o755)
     self.dhcp_version = self.dhcp_driver_cls.check_version()
     self._populate_networks_cache()
     # keep track of mappings between networks and routers for
     # metadata processing
     self._metadata_routers = {}  # {network_id: router_id}
     self._process_monitor = external_process.ProcessMonitor(
         config=self.conf,
         resource_type='dhcp')
     self._pool_size = DHCP_PROCESS_GREENLET_MIN
     self._pool = eventlet.GreenPool(size=self._pool_size)
     self._queue = queue.ResourceProcessingQueue()
Example #18
0
    def setUp(self):
        super(NeutronPolicyTestCase, self).setUp()
        policy.refresh()
        # Add Fake resources to RESOURCE_ATTRIBUTE_MAP
        attributes.RESOURCE_ATTRIBUTE_MAP.update(FAKE_RESOURCES)
        self.rules = dict((k, common_policy.parse_rule(v)) for k, v in {
            "context_is_admin": "role:admin",
            "context_is_advsvc": "role:advsvc",
            "admin_or_network_owner": "rule:context_is_admin or "
                                      "tenant_id:%(network:tenant_id)s",
            "admin_or_owner": ("rule:context_is_admin or "
                               "tenant_id:%(tenant_id)s"),
            "admin_only": "rule:context_is_admin",
            "regular_user": "******",
            "shared": "field:networks:shared=True",
            "external": "field:networks:router:external=True",
            "default": '@',

            "create_network": "rule:admin_or_owner",
            "create_network:shared": "rule:admin_only",
            "update_network": '@',
            "update_network:shared": "rule:admin_only",
            "get_network": "rule:admin_or_owner or rule:shared or "
                           "rule:external or rule:context_is_advsvc",
            "create_subnet": "rule:admin_or_network_owner",
            "create_port:mac": "rule:admin_or_network_owner or "
                               "rule:context_is_advsvc",
            "update_port": "rule:admin_or_owner or rule:context_is_advsvc",
            "get_port": "rule:admin_or_owner or rule:context_is_advsvc",
            "delete_port": "rule:admin_or_owner or rule:context_is_advsvc",
            "create_fake_resource": "rule:admin_or_owner",
            "create_fake_resource:attr": "rule:admin_or_owner",
            "create_fake_resource:attr:sub_attr_1": "rule:admin_or_owner",
            "create_fake_resource:attr:sub_attr_2": "rule:admin_only",

            "create_fake_policy:": "rule:admin_or_owner",
            "get_firewall_policy": "rule:admin_or_owner or "
                            "rule:shared",
            "get_firewall_rule": "rule:admin_or_owner or "
                            "rule:shared",

            "insert_rule": "rule:admin_or_owner",
            "remove_rule": "rule:admin_or_owner",
        }.items())

        def remove_fake_resource():
            del attributes.RESOURCE_ATTRIBUTE_MAP["%ss" % FAKE_RESOURCE_NAME]

        self.patcher = mock.patch.object(neutron.policy,
                                         'init',
                                         new=self.fakepolicyinit)
        self.patcher.start()
        self.addCleanup(remove_fake_resource)
        self.context = context.Context('fake', 'fake', roles=['user'])
        plugin_klass = importutils.import_class(
            "neutron.db.db_base_plugin_v2.NeutronDbPluginV2")
        self.manager_patcher = mock.patch('neutron.manager.NeutronManager')
        fake_manager = self.manager_patcher.start()
        fake_manager_instance = fake_manager.return_value
        fake_manager_instance.plugin = plugin_klass()
Example #19
0
def schedule_call(factory_method_path, target_method_name,
                  run_after, serializers=None, **method_args):
    """Schedules call and lately invokes target_method.

    Add this call specification to DB, and then after run_after
    seconds service CallScheduler invokes the target_method.

    :param factory_method_path: Full python-specific path to
    factory method for target object construction.
    :param target_method_name: Name of target object method which
    will be invoked.
    :param run_after: Value in seconds.
    param serializers: map of argument names and their serializer class paths.
     Use when an argument is an object of specific type, and needs to be
      serialized. Example:
      { "result": "mistral.utils.serializer.ResultSerializer"}
      Serializer for the object type must implement serializer interface
       in mistral/utils/serializer.py
    :param method_args: Target method keyword arguments.
    """
    ctx_serializer = context.RpcContextSerializer(
        context.JsonPayloadSerializer()
    )

    ctx = (
        ctx_serializer.serialize_context(context.ctx())
        if context.has_ctx() else {}
    )

    execution_time = (datetime.datetime.now() +
                      datetime.timedelta(seconds=run_after))

    if serializers:
        for arg_name, serializer_path in serializers.items():
            if arg_name not in method_args:
                raise exc.MistralException(
                    "Serializable method argument %s"
                    " not found in method_args=%s"
                    % (arg_name, method_args))
            try:
                serializer = importutils.import_class(serializer_path)()
            except ImportError as e:
                raise ImportError("Cannot import class %s: %s"
                                  % (serializer_path, e))

            method_args[arg_name] = serializer.serialize(
                method_args[arg_name]
            )

    values = {
        'factory_method_path': factory_method_path,
        'target_method_name': target_method_name,
        'execution_time': execution_time,
        'auth_context': ctx,
        'serializers': serializers,
        'method_arguments': method_args,
        'processing': False
    }

    db_api.create_delayed_call(values)
Example #20
0
 def _get_response_code(self, req):
     req_method = req.environ["REQUEST_METHOD"]
     controller = importutils.import_class("keystone.common.controller")
     code = None
     if isinstance(self, controller.V3Controller) and req_method == "POST":
         code = (201, "Created")
     return code
Example #21
0
 def _get_connector(self):
     connector = cinder_conf.volume_connector
     if not connector or connector not in volume_connector_conf:
         msg = _LE("Must provide an valid volume connector")
         LOG.error(msg)
         raise exceptions.FuxiException(msg)
     return importutils.import_class(volume_connector_conf[connector])()
Example #22
0
    def load_extension(self, ext_factory):
        """Execute an extension factory.

        Loads an extension.  The 'ext_factory' is the name of a
        callable that will be imported and called with one
        argument--the extension manager.  The factory callable is
        expected to call the register() method at least once.
        """

        LOG.debug("Loading extension %s", ext_factory)

        if isinstance(ext_factory, six.string_types):
            if ext_factory.startswith('nova.api.openstack.compute.contrib'):
                LOG.warning(_LW("The legacy v2 API module already moved into"
                             "'nova.api.openstack.compute.legacy_v2.contrib'. "
                             "Use new path instead of old path %s"),
                         ext_factory)
                ext_factory = ext_factory.replace('contrib',
                                                  'legacy_v2.contrib')
            # Load the factory
            factory = importutils.import_class(ext_factory)
        else:
            factory = ext_factory

        # Call it
        LOG.debug("Calling extension factory %s", ext_factory)
        factory(self)
Example #23
0
    def __init__(self, *args, **kwargs):
        """Do initialization."""

        LOG.debug("Invoking base constructor for Manila HDS HNAS Driver.")
        super(HDSHNASDriver, self).__init__(False, *args, **kwargs)

        LOG.debug("Setting up attributes for Manila HDS HNAS Driver.")
        self.configuration.append_config_values(hds_hnas_opts)

        LOG.debug("Reading config parameters for Manila HDS HNAS Driver.")
        self.backend_name = self.configuration.safe_get('share_backend_name')
        hnas_helper = self.configuration.safe_get('hds_hnas_driver_helper')
        hnas_ip = self.configuration.safe_get('hds_hnas_ip')
        hnas_username = self.configuration.safe_get('hds_hnas_user')
        hnas_password = self.configuration.safe_get('hds_hnas_password')
        hnas_evs_id = self.configuration.safe_get('hds_hnas_evs_id')
        self.hnas_evs_ip = self.configuration.safe_get('hds_hnas_evs_ip')
        self.fs_name = self.configuration.safe_get('hds_hnas_file_system_name')
        ssh_private_key = self.configuration.safe_get(
            'hds_hnas_ssh_private_key')
        cluster_admin_ip0 = self.configuration.safe_get(
            'hds_hnas_cluster_admin_ip0')
        self.private_storage = kwargs.get('private_storage')
        job_timeout = self.configuration.safe_get(
            'hds_hnas_stalled_job_timeout')

        if hnas_helper is None:
            msg = _("The config parameter hds_hnas_driver_helper is not set.")
            raise exception.InvalidParameterValue(err=msg)

        if hnas_evs_id is None:
            msg = _("The config parameter hds_hnas_evs_id is not set.")
            raise exception.InvalidParameterValue(err=msg)

        if self.hnas_evs_ip is None:
            msg = _("The config parameter hds_hnas_evs_ip is not set.")
            raise exception.InvalidParameterValue(err=msg)

        if hnas_ip is None:
            msg = _("The config parameter hds_hnas_ip is not set.")
            raise exception.InvalidParameterValue(err=msg)

        if hnas_username is None:
            msg = _("The config parameter hds_hnas_user is not set.")
            raise exception.InvalidParameterValue(err=msg)

        if hnas_password is None and ssh_private_key is None:
            msg = _("Credentials configuration parameters missing: "
                    "you need to set hds_hnas_password or "
                    "hds_hnas_ssh_private_key.")
            raise exception.InvalidParameterValue(err=msg)

        LOG.debug("Initializing HNAS Layer.")

        helper = importutils.import_class(hnas_helper)

        self.hnas = helper(hnas_ip, hnas_username, hnas_password,
                           ssh_private_key, cluster_admin_ip0,
                           hnas_evs_id, self.hnas_evs_ip, self.fs_name,
                           job_timeout)
Example #24
0
    def __init__(self, storage=None, *args, **kwargs):
        """Init method.

        :param storage: None or inheritor of StorageDriver abstract class
        :param config_group: Optional -- Config group used for loading settings
        :param context: Optional -- Current context
        :param backend_host: Optional -- Driver host
        """

        config_group_name = kwargs.get('config_group')
        CONF.register_opts(private_data_opts, group=config_group_name)

        if storage is not None:
            self._storage = storage
        elif 'context' in kwargs and 'backend_host' in kwargs:
            if config_group_name:
                conf = getattr(CONF, config_group_name)
            else:
                conf = CONF
            storage_class = conf.drivers_private_storage_class
            cls = importutils.import_class(storage_class)
            self._storage = cls(kwargs.get('context'),
                                kwargs.get('backend_host'))
        else:
            msg = _("You should provide 'storage' parameter or"
                    " 'context' and 'backend_host' parameters.")
            raise ValueError(msg)
Example #25
0
    def new_task_executor(self, context):
        try:
            # NOTE(flaper87): Backwards compatibility layer.
            # It'll allow us to provide a deprecation path to
            # users that are currently consuming the `eventlet`
            # executor.
            task_executor = CONF.task.task_executor
            if task_executor == 'eventlet':
                # NOTE(jokke): Making sure we do not log the deprecation
                # warning 1000 times or anything crazy like that.
                if not TaskExecutorFactory.eventlet_deprecation_warned:
                    msg = _LW("The `eventlet` executor has been deprecated. "
                              "Use `taskflow` instead.")
                    LOG.warn(msg)
                    TaskExecutorFactory.eventlet_deprecation_warned = True
                task_executor = 'taskflow'

            executor_cls = ('glance.async_.%s_executor.'
                            'TaskExecutor' % task_executor)
            LOG.debug("Loading %s executor", task_executor)
            executor = importutils.import_class(executor_cls)
            return executor(context,
                            self.task_repo,
                            self.image_repo,
                            self.image_factory)
        except ImportError:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("Failed to load the %s executor provided "
                                  "in the config.") % CONF.task.task_executor)
Example #26
0
    def __init__(self, *args, **kwargs):
        """Initialize the driver."""

        super(XIVDS8KDriver, self).__init__(*args, **kwargs)

        self.configuration.append_config_values(xiv_ds8k_opts)

        proxy = importutils.import_class(self.configuration.xiv_ds8k_proxy)

        active_backend_id = kwargs.get('active_backend_id', None)

        # NOTE: All Array specific configurations are prefixed with:
        # "xiv_ds8k_array_"
        # These additional flags should be specified in the cinder.conf
        # preferably in each backend configuration.

        self.xiv_ds8k_proxy = proxy(
            {
                "xiv_ds8k_user": self.configuration.san_login,
                "xiv_ds8k_pass": self.configuration.san_password,
                "xiv_ds8k_address": self.configuration.san_ip,
                "xiv_ds8k_vol_pool": self.configuration.san_clustername,
                "xiv_ds8k_connection_type":
                self.configuration.xiv_ds8k_connection_type,
                "xiv_chap": self.configuration.xiv_chap,
                "management_ips": self.configuration.management_ips
            },
            LOG,
            exception,
            driver=self,
            active_backend_id=active_backend_id)
Example #27
0
    def __init__(self, host, binary, topic, manager, report_interval=None,
                 periodic_interval=None, periodic_fuzzy_delay=None,
                 service_name=None, *args, **kwargs):
        super(Service, self).__init__()

        if not rpc.initialized():
            rpc.init(CONF)

        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        manager_class = importutils.import_class(self.manager_class_name)
        if CONF.profiler.enabled:
            manager_class = profiler.trace_cls("rpc")(manager_class)

        self.manager = manager_class(host=self.host,
                                     service_name=service_name,
                                     *args, **kwargs)
        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.basic_config_check()
        self.saved_args, self.saved_kwargs = args, kwargs
        self.timers = []

        setup_profiler(binary, host)
        self.rpcserver = None
Example #28
0
def load_class_by_alias_or_classname(namespace, name):
    """Load class using stevedore alias or the class name
    :param namespace: namespace where the alias is defined
    :param name: alias or class name of the class to be loaded
    :returns class if calls can be loaded
    :raises ImportError if class cannot be loaded
    """

    if not name:
        LOG.error(_LE("Alias or class name is not set"))
        raise ImportError(_("Class not found."))
    try:
        # Try to resolve class by alias
        mgr = driver.DriverManager(namespace, name)
        class_to_load = mgr.driver
    except RuntimeError:
        e1_info = sys.exc_info()
        # Fallback to class name
        try:
            class_to_load = importutils.import_class(name)
        except (ImportError, ValueError):
            LOG.error(_LE("Error loading class by alias"),
                      exc_info=e1_info)
            LOG.error(_LE("Error loading class by class name"),
                      exc_info=True)
            raise ImportError(_("Class not found."))
    return class_to_load
Example #29
0
    def do_request(self, method, **kwargs):
        """
        Simple do_request override. This method serializes
        the outgoing body and builds the command that will
        be sent.

        :params method: The remote python method to call
        :params kwargs: Dynamic parameters that will be
            passed to the remote method.
        """
        content = self.bulk_request([{'command': method,
                                      'kwargs': kwargs}])

        # NOTE(flaper87): Return the first result if
        # a single command was executed.
        content = content[0]

        # NOTE(flaper87): Check if content is an error
        # and re-raise it if raise_exc is True. Before
        # checking if content contains the '_error' key,
        # verify if it is an instance of dict - since the
        # RPC call may have returned something different.
        if self.raise_exc and (isinstance(content, dict)
                               and '_error' in content):
            error = content['_error']
            try:
                exc_cls = imp.import_class(error['cls'])
                raise exc_cls(error['val'])
            except ImportError:
                # NOTE(flaper87): The exception
                # class couldn't be imported, using
                # a generic exception.
                raise exception.RPCError(**error)
        return content
Example #30
0
    def _apply_region_proxy(self, proxy_list):
        if isinstance(proxy_list, list):
            proxies = []

            for item in proxy_list:
                if isinstance(item, str):
                    LOG.debug('Importing class %s as KVS proxy.', item)
                    pxy = importutils.import_class(item)
                else:
                    pxy = item

                if issubclass(pxy, proxy.ProxyBackend):
                    proxies.append(pxy)
                else:
                    pxy_cls_name = reflection.get_class_name(
                        pxy, fully_qualified=False)
                    LOG.warning(_LW('%s is not a dogpile.proxy.ProxyBackend'),
                                pxy_cls_name)

            for proxy_cls in reversed(proxies):
                proxy_cls_name = reflection.get_class_name(
                    proxy_cls, fully_qualified=False)
                LOG.info(_LI('Adding proxy \'%(proxy)s\' to KVS %(name)s.'),
                         {'proxy': proxy_cls_name,
                          'name': self._region.name})
                self._region.wrap(proxy_cls)
Example #31
0
 def __init__(self, host=None):
     super(DhcpAgent, self).__init__(host=host)
     self.needs_resync_reasons = collections.defaultdict(list)
     self.conf = cfg.CONF
     self.cache = NetworkCache()
     self.root_helper = config.get_root_helper(self.conf)
     self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver)
     ctx = context.get_admin_context_without_session()
     self.plugin_rpc = DhcpPluginApi(topics.PLUGIN, ctx,
                                     self.conf.use_namespaces)
     # create dhcp dir to store dhcp info
     dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path)
     if not os.path.isdir(dhcp_dir):
         os.makedirs(dhcp_dir, 0o755)
     self.dhcp_version = self.dhcp_driver_cls.check_version()
     self._populate_networks_cache()
     self._process_monitor = external_process.ProcessMonitor(
         config=self.conf,
         root_helper=self.root_helper,
         resource_type='dhcp')
Example #32
0
    def _get_manager(self):
        """Initialize a Manager object appropriate for this service.

        Use the service name to look up a Manager subclass from the
        configuration and initialize an instance. If no class name
        is configured, just return None.

        :returns: a Manager instance, or None.

        """
        fl = '%s_manager' % self.name
        if fl not in CONF:
            return None

        manager_class_name = CONF.get(fl, None)
        if not manager_class_name:
            return None

        manager_class = importutils.import_class(manager_class_name)
        return manager_class()
Example #33
0
    def get_class(api_name, version, version_map):
        """Returns the client class for the requested API version

        :param api_name: the name of the API, e.g. 'compute', 'image', etc
        :param version: the requested API version
        :param version_map: a dict of client classes keyed by version
        :rtype: a client class for the requested API version
        """
        try:
            client_path = version_map[str(version)]
        except (KeyError, ValueError):
            msg = _("Invalid %(api_name)s client version '%(version)s'. "
                    "Must be one of: %(version_map)s") % {
                        'api_name': api_name,
                        'version': version,
                        'version_map': ', '.join(version_map.keys())
                    }
            raise exceptions.UnsupportedVersion(msg)

        return importutils.import_class(client_path)
Example #34
0
    def load_extension(self, ext_factory):
        """Execute an extension factory.

        Loads an extension.  The 'ext_factory' is the name of a
        callable that will be imported and called with one
        argument--the extension manager.  The factory callable is
        expected to call the register() method at least once.
        """

        LOG.debug("Loading extension %s", ext_factory)

        if isinstance(ext_factory, six.string_types):
            # Load the factory
            factory = importutils.import_class(ext_factory)
        else:
            factory = ext_factory

        # Call it
        LOG.debug("Calling extension factory %s", ext_factory)
        factory(self)
def load_driver(plugin, ofc_manager):

    if (PROVIDER_OPENFLOW in ROUTER_DRIVER_MAP
            and not ofc_manager.driver.router_supported):
        LOG.warning(
            _LW('OFC does not support router with provider=%(provider)s, '
                'so removed it from supported provider '
                '(new router driver map=%(driver_map)s)'), {
                    'provider': PROVIDER_OPENFLOW,
                    'driver_map': ROUTER_DRIVER_MAP
                })
        del ROUTER_DRIVER_MAP[PROVIDER_OPENFLOW]

    if cfg.CONF.PROVIDER.default_router_provider not in ROUTER_DRIVER_MAP:
        LOG.error(
            _LE('default_router_provider %(default)s is supported! '
                'Please specify one of %(supported)s'), {
                    'default': cfg.CONF.PROVIDER.default_router_provider,
                    'supported': ROUTER_DRIVER_MAP.keys()
                })
        raise SystemExit(1)

    enabled_providers = (set(cfg.CONF.PROVIDER.router_providers +
                             [cfg.CONF.PROVIDER.default_router_provider])
                         & set(ROUTER_DRIVER_MAP.keys()))

    for driver in enabled_providers:
        driver_klass = importutils.import_class(ROUTER_DRIVER_MAP[driver])
        ROUTER_DRIVERS[driver] = driver_klass(plugin, ofc_manager)

    LOG.info(_LI('Enabled router drivers: %s'), ROUTER_DRIVERS.keys())

    if not ROUTER_DRIVERS:
        LOG.error(
            _LE('No router provider is enabled. neutron-server '
                'terminated! (supported=%(supported)s, '
                'configured=%(config)s)'), {
                    'supported': ROUTER_DRIVER_MAP.keys(),
                    'config': cfg.CONF.PROVIDER.router_providers
                })
        raise SystemExit(1)
Example #36
0
    def generate_scenario(description):
        """Generates the test scenario list for a given description.

        :param description: A file or dictionary with the following entries:
            name (required) name for the api
            http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
            url (required) the url to be appended to the catalog url with '%s'
                for each resource mentioned
            resources: (optional) A list of resource names such as "server",
                "flavor", etc. with an element for each '%s' in the url. This
                method will call self.get_resource for each element when
                constructing the positive test case template so negative
                subclasses are expected to return valid resource ids when
                appropriate.
            json-schema (optional) A valid json schema that will be used to
                create invalid data for the api calls. For "GET" and "HEAD",
                the data is used to generate query strings appended to the url,
                otherwise for the body of the http call.
        """
        LOG.debug(description)
        generator = importutils.import_class(CONF.negative.test_generator)()
        generator.validate_schema(description)
        schema = description.get("json-schema", None)
        resources = description.get("resources", [])
        scenario_list = []
        expected_result = None
        for resource in resources:
            if isinstance(resource, dict):
                expected_result = resource['expected_result']
                resource = resource['name']
            LOG.debug("Add resource to test %s" % resource)
            scn_name = "inv_res_%s" % (resource)
            scenario_list.append((scn_name, {
                "resource": (resource, str(uuid.uuid4())),
                "expected_result": expected_result
            }))
        if schema is not None:
            for scenario in generator.generate_scenarios(schema):
                scenario_list.append((scenario['_negtest_name'], scenario))
        LOG.debug(scenario_list)
        return scenario_list
Example #37
0
    def factory(protocol,
                root_helper,
                driver=None,
                use_multipath=False,
                device_scan_attempts=initiator.DEVICE_SCAN_ATTEMPTS_DEFAULT,
                arch=None,
                *args,
                **kwargs):
        """Build a Connector object based upon protocol and architecture."""

        _mapping = get_connector_mapping(arch)

        LOG.debug("Factory for %(protocol)s on %(arch)s", {
            'protocol': protocol,
            'arch': arch
        })
        protocol = protocol.upper()

        # set any special kwargs needed by connectors
        if protocol in (initiator.NFS, initiator.GLUSTERFS, initiator.SCALITY,
                        initiator.QUOBYTE, initiator.VZSTORAGE):
            kwargs.update({'mount_type': protocol.lower()})
        elif protocol == initiator.ISER:
            kwargs.update({'transport': 'iser'})

        # now set all the default kwargs
        kwargs.update({
            'root_helper': root_helper,
            'driver': driver,
            'use_multipath': use_multipath,
            'device_scan_attempts': device_scan_attempts,
        })

        connector = _mapping.get(protocol)
        if not connector:
            msg = (_("Invalid InitiatorConnector protocol "
                     "specified %(protocol)s") % dict(protocol=protocol))
            raise exception.InvalidConnectorProtocol(msg)

        conn_cls = importutils.import_class(connector)
        return conn_cls(*args, **kwargs)
Example #38
0
    def __init__(self, chassis_name):
        self.next_network_id = 0
        self.db_store = db_store.DbStore()
        self.chassis_name = chassis_name
        self.ip = cfg.CONF.df.local_ip
        self.tunnel_type = cfg.CONF.df.tunnel_type
        self.sync_finished = False
        nb_driver_class = importutils.import_class(cfg.CONF.df.nb_db_class)
        self.nb_api = api_nb.NbApi(nb_driver_class(),
                                   use_pubsub=cfg.CONF.df.enable_df_pub_sub)
        self.vswitch_api = ovsdb_vswitch_impl.OvsdbSwitchApi(
            self.ip, self.nb_api)
        kwargs = dict(nb_api=self.nb_api,
                      vswitch_api=self.vswitch_api,
                      db_store=self.db_store)
        app_mgr = AppManager.get_instance()
        self.open_flow_app = app_mgr.instantiate(RyuDFAdapter, **kwargs)

        self.topology = None
        self.enable_selective_topo_dist = \
            cfg.CONF.df.enable_selective_topology_distribution
Example #39
0
 def __init__(self, host, binary, topic, manager, report_interval=None,
              periodic_enable=None, periodic_fuzzy_delay=None,
              periodic_interval_max=None, db_allowed=True,
              *args, **kwargs):
     super(Service, self).__init__()
     self.host = host
     self.binary = binary
     self.topic = topic
     self.manager_class_name = manager
     self.servicegroup_api = servicegroup.API()
     manager_class = importutils.import_class(self.manager_class_name)
     self.manager = manager_class(host=self.host, *args, **kwargs)
     self.rpcserver = None
     self.report_interval = report_interval
     self.periodic_enable = periodic_enable
     self.periodic_fuzzy_delay = periodic_fuzzy_delay
     self.periodic_interval_max = periodic_interval_max
     self.saved_args, self.saved_kwargs = args, kwargs
     self.backdoor_port = None
     self.conductor_api = conductor.API(use_local=db_allowed)
     self.conductor_api.wait_until_ready(context.get_admin_context())
Example #40
0
def get_client_class(version):
    version = str(version)
    if version in DEPRECATED_VERSIONS:
        warnings.warn(
            _LW("Version %(deprecated_version)s is deprecated, using "
                "alternative version %(alternative)s instead.") % {
                    "deprecated_version": version,
                    "alternative": DEPRECATED_VERSIONS[version]
                })
        version = DEPRECATED_VERSIONS[version]
    try:
        return importutils.import_class("novaclient.v%s.client.Client" %
                                        version)
    except ImportError:
        available_versions = _get_available_client_versions()
        msg = _("Invalid client version '%(version)s'. must be one of: "
                "%(keys)s") % {
                    'version': version,
                    'keys': ', '.join(available_versions)
                }
        raise exceptions.UnsupportedVersion(msg)
Example #41
0
def get_tenant_quotas(tenant_id, driver=None):
    if not driver:
        driver = importutils.import_class(cfg.CONF.QUOTAS.quota_driver)

    neutron_context = request.context.get('neutron_context')
    if tenant_id == 'tenant':
        # NOTE(salv-orlando): Read the following before the code in order
        # to avoid puking.
        # There is a weird undocumented behaviour of the Neutron quota API
        # as 'tenant' is used as an API action to return the identifier
        # of the tenant in the request context. This is used exclusively
        # for interaction with python-neutronclient and is a possibly
        # unnecessary 'whoami' API endpoint. Pending resolution of this
        # API issue, this controller will just treat the magic string
        # 'tenant' (and only that string) and return the response expected
        # by python-neutronclient
        return {'tenant': {'tenant_id': neutron_context.tenant_id}}
    tenant_quotas = driver.get_tenant_quotas(
        neutron_context, resource_registry.get_all_resources(), tenant_id)
    tenant_quotas['tenant_id'] = tenant_id
    return {RESOURCE_NAME: tenant_quotas}
Example #42
0
    def __init__(self,
                 host,
                 binary,
                 topic,
                 manager,
                 report_interval=None,
                 periodic_interval=None,
                 periodic_fuzzy_delay=None,
                 *args,
                 **kwargs):

        self.binary = binary
        self.manager_class_name = manager
        manager_class = importutils.import_class(self.manager_class_name)
        self.manager = manager_class(host=host, *args, **kwargs)
        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.saved_args, self.saved_kwargs = args, kwargs
        self.timers = []
        super(Service, self).__init__(host, topic, manager=self.manager)
Example #43
0
 def __init__(self, *args, **kwargs):
     LOG.warning(_LW('The cells feature of Nova is considered experimental '
                     'by the OpenStack project because it receives much '
                     'less testing than the rest of Nova. This may change '
                     'in the future, but current deployers should be aware '
                     'that the use of it in production right now may be '
                     'risky. Also note that cells does not currently '
                     'support rolling upgrades, it is assumed that cells '
                     'deployments are upgraded lockstep so n-1 cells '
                     'compatibility does not work.'))
     # Mostly for tests.
     cell_state_manager = kwargs.pop('cell_state_manager', None)
     super(CellsManager, self).__init__(service_name='cells',
                                        *args, **kwargs)
     if cell_state_manager is None:
         cell_state_manager = cells_state.CellStateManager
     self.state_manager = cell_state_manager()
     self.msg_runner = messaging.MessageRunner(self.state_manager)
     cells_driver_cls = importutils.import_class(
             CONF.cells.driver)
     self.driver = cells_driver_cls()
     self.instances_to_heal = iter([])
Example #44
0
    def _apply_region_proxy(self, proxy_list):
        if isinstance(proxy_list, list):
            proxies = []

            for item in proxy_list:
                if isinstance(item, str):
                    LOG.debug('Importing class %s as KVS proxy.', item)
                    pxy = importutils.import_class(item)
                else:
                    pxy = item

                if issubclass(pxy, proxy.ProxyBackend):
                    proxies.append(pxy)
                else:
                    LOG.warning(_LW('%s is not a dogpile.proxy.ProxyBackend'),
                                pxy.__name__)

            for proxy_cls in reversed(proxies):
                LOG.info(_LI('Adding proxy \'%(proxy)s\' to KVS %(name)s.'),
                         {'proxy': proxy_cls.__name__,
                          'name': self._region.name})
                self._region.wrap(proxy_cls)
Example #45
0
    def setUp(self):
        super(NeutronPolicyTestCase, self).setUp()
        policy.refresh()
        # Add Fake resources to RESOURCE_ATTRIBUTE_MAP
        attributes.RESOURCE_ATTRIBUTE_MAP.update(FAKE_RESOURCES)
        self._set_rules()

        def remove_fake_resource():
            del attributes.RESOURCE_ATTRIBUTE_MAP["%ss" % FAKE_RESOURCE_NAME]

        self.patcher = mock.patch.object(neutron.policy,
                                         'init',
                                         new=self.fakepolicyinit)
        self.patcher.start()
        self.addCleanup(remove_fake_resource)
        self.context = context.Context('fake', 'fake', roles=['user'])
        plugin_klass = importutils.import_class(
            "neutron.db.db_base_plugin_v2.NeutronDbPluginV2")
        self.manager_patcher = mock.patch('neutron.manager.NeutronManager')
        fake_manager = self.manager_patcher.start()
        fake_manager_instance = fake_manager.return_value
        fake_manager_instance.plugin = plugin_klass()
Example #46
0
    def __init__(self, application, limits=None, limiter=None, **kwargs):
        """Initialize class, wrap WSGI app, and set up given limits.

        :param application: WSGI application to wrap
        :param limits: String describing limits
        :param limiter: String identifying class for representing limits

        Other parameters are passed to the constructor for the limiter.
        """
        base_wsgi.Middleware.__init__(self, application)

        # Select the limiter class
        if limiter is None:
            limiter = Limiter
        else:
            limiter = importutils.import_class(limiter)

        # Parse the limits, if any are provided
        if limits is not None:
            limits = limiter.parse_limits(limits)

        self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs)
Example #47
0
    def __init__(self,
                 host,
                 binary,
                 topic,
                 manager,
                 report_interval=None,
                 periodic_interval=None,
                 periodic_fuzzy_delay=None,
                 service_name=None,
                 *args,
                 **kwargs):
        super(Service, self).__init__()

        if not rpc.initialized():
            rpc.init(CONF)

        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        manager_class = importutils.import_class(self.manager_class_name)
        #modify by yanghc mantis:0000300 begin
        #manager_class = profiler.trace_cls("rpc")(manager_class)
        if CONF.profiler.profiler_enabled:
            manager_class = profiler.trace_cls("rpc")(manager_class)
        #modify by yanghc mantis:0000300 begin end

        self.manager = manager_class(host=self.host,
                                     service_name=service_name,
                                     *args,
                                     **kwargs)
        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.basic_config_check()
        self.saved_args, self.saved_kwargs = args, kwargs
        self.timers = []

        setup_profiler(binary, host)
Example #48
0
 def configure_driver(self):
     """
     Configure the driver for the cache and, if it fails to configure,
     fall back to using the SQLite driver which has no odd dependencies
     """
     try:
         self.driver = self.driver_class()
         self.driver.configure()
     except exception.BadDriverConfiguration as config_err:
         driver_module = self.driver_class.__module__
         LOG.warn(
             _LW("Image cache driver "
                 "'%(driver_module)s' failed to configure. "
                 "Got error: '%(config_err)s"), {
                     'driver_module': driver_module,
                     'config_err': config_err
                 })
         LOG.info(_LI("Defaulting to SQLite driver."))
         default_module = __name__ + '.drivers.sqlite.Driver'
         self.driver_class = importutils.import_class(default_module)
         self.driver = self.driver_class()
         self.driver.configure()
Example #49
0
    def _process_cmd(self, msgid, cmd, *args):
        """Executes the requested command in an execution thread.

        This executes a call within a thread executor and returns the results
        of the execution.

        :param msgid: The message identifier.
        :param cmd: The `Message` type indicating the command type.
        :param args: The function, args, and kwargs if a Message.CALL type.
        :return: A tuple of the return status, optional call output, and
                 optional error information.
        """
        if cmd == Message.PING:
            return (Message.PONG.value, )

        try:
            if cmd != Message.CALL:
                raise ProtocolError(_('Unknown privsep cmd: %s') % cmd)

            # Extract the callable and arguments
            name, f_args, f_kwargs = args
            func = importutils.import_class(name)
            if not self.context.is_entrypoint(func):
                msg = _('Invalid privsep function: %s not exported') % name
                raise NameError(msg)

            ret = func(*f_args, **f_kwargs)
            return (Message.RET.value, ret)
        except Exception as e:
            LOG.debug(
                'privsep: Exception during request[%(msgid)s]: '
                '%(err)s', {
                    'msgid': msgid,
                    'err': e
                },
                exc_info=True)
            cls = e.__class__
            cls_name = '%s.%s' % (cls.__module__, cls.__name__)
            return (Message.ERR.value, cls_name, e.args)
Example #50
0
    def _persist_job(job):
        ctx_serializer = context.RpcContextSerializer()

        ctx = (ctx_serializer.serialize_context(context.ctx())
               if context.has_ctx() else {})

        execute_at = (utils.utc_now_sec() +
                      datetime.timedelta(seconds=job.run_after))

        args = job.func_args
        arg_serializers = job.func_arg_serializers

        if arg_serializers:
            for arg_name, serializer_path in arg_serializers.items():
                if arg_name not in args:
                    raise exc.MistralException(
                        "Serializable function argument %s"
                        " not found in func_args=%s" % (arg_name, args))
                try:
                    serializer = importutils.import_class(serializer_path)()
                except ImportError as e:
                    raise ImportError("Cannot import class %s: %s" %
                                      (serializer_path, e))

                args[arg_name] = serializer.serialize(args[arg_name])

        values = {
            'run_after': job.run_after,
            'target_factory_func_name': job.target_factory_func_name,
            'func_name': job.func_name,
            'func_args': args,
            'func_arg_serializers': arg_serializers,
            'auth_ctx': ctx,
            'execute_at': execute_at,
            'captured_at': None,
            'key': job.key
        }

        return db_api.create_scheduled_job(values)
Example #51
0
    def get_matching_classes(self, loadable_class_names):
        """Get loadable classes from a list of names.

        Each name can be a full module path or the full path to a method that
        returns classes to use. The latter behavior is useful to specify a
        method that returns a list of classes to use in a default case.
        """

        classes = []
        for cls_name in loadable_class_names:
            obj = importutils.import_class(cls_name)
            if self._is_correct_class(obj):
                classes.append(obj)
            elif inspect.isfunction(obj):
                # Get list of classes from a function
                for cls in obj():
                    classes.append(cls)
            else:
                error_str = 'Not a class of the correct type'
                raise exception.ClassNotFound(class_name=cls_name,
                                              exception=error_str)
        return classes
Example #52
0
 def __init__(self, host, binary, topic, manager, report_interval=None,
              periodic_enable=None, periodic_fuzzy_delay=None,
              periodic_interval_max=None, *args, **kwargs):
     super(Service, self).__init__()
     self.host = host
     self.binary = binary
     self.topic = topic
     self.manager_class_name = manager
     self.servicegroup_api = servicegroup.API()
     manager_class = importutils.import_class(self.manager_class_name)
     if objects_base.NovaObject.indirection_api:
         conductor_api = conductor.API()
         conductor_api.wait_until_ready(context.get_admin_context())
     self.manager = manager_class(host=self.host, *args, **kwargs)
     self.rpcserver = None
     self.report_interval = report_interval
     self.periodic_enable = periodic_enable
     self.periodic_fuzzy_delay = periodic_fuzzy_delay
     self.periodic_interval_max = periodic_interval_max
     self.saved_args, self.saved_kwargs = args, kwargs
     self.backdoor_port = None
     setup_profiler(binary, self.host)
Example #53
0
def include_var(name,
                rawtext,
                text,
                lineno,
                inliner,
                options=None,
                content=None):
    """


    :param name: The local name of the interpreted role, the role name
        actually used in the document.
    :param rawtext: A string containing the enitre interpreted text input,
        including the role and markup. Return it as a problematic node
        linked to a system message if a problem is encountered.
    :param text: The interpreted text content.
    :param lineno: The line number where the interpreted text begins.
    :param inliner: The docutils.parsers.rst.states.Inliner object that
        called include_var. It contains the several attributes useful for
        error reporting and document tree access.
    :param options: A dictionary of directive options for customization
        (from the "role" directive), to be interpreted by the role function.
        Used for additional attributes for the generated elements and other
        functionality.
    :param content: A list of strings, the directive content for
        customization (from the "role" directive). To be interpreted by the
        role function.

    :return:
    """
    obj = importutils.import_class(text)
    if isinstance(obj, (tuple, list)):
        obj = ", ".join(obj)
    elif isinstance(obj, dict):
        obj = json.dumps(dict, indent=4)
    else:
        obj = str(obj)
    return [nodes.Text(obj)], []
Example #54
0
    def __init__(self,
                 host,
                 binary,
                 topic,
                 manager,
                 periodic_enable=None,
                 periodic_fuzzy_delay=None,
                 periodic_interval_max=None):
        super(Service, self).__init__()

        if not rpc.initialized():
            rpc.init(CONF)

        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        manager_class = importutils.import_class(self.manager_class_name)
        self.rpcserver = None
        self.manager = manager_class(host=self.host)
        self.periodic_enable = periodic_enable
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.periodic_interval_max = periodic_interval_max
Example #55
0
    def __init__(self, host, binary, topic, manager, report_interval=None,
                 periodic_interval=None, periodic_fuzzy_delay=None,
                 service_name=None, *args, **kwargs):
        super(Service, self).__init__()

        rpc.init(CONF)

        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        manager_class = importutils.import_class(self.manager_class_name)
        self.manager = manager_class(host=self.host,
                                     service_name=service_name,
                                     *args, **kwargs)
        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.basic_config_check()
        self.saved_args, self.saved_kwargs = args, kwargs
        self.timers = []

        self.rpcserver = None
Example #56
0
 def _preprocess_logs(self, logs):
     resetting_roots = set()
     log_by_root = {}
     resource_paths = ('resource', 'service_graph', 'infra', 'tree',
                       'status')
     for log in logs:
         if log.action == aim_tree.ActionLog.RESET:
             resetting_roots.add(log.root_rn)
         action = log.action
         aim_res = None
         for path in resource_paths:
             try:
                 klass = importutils.import_class(
                     'aim.api.' + path + '.%s' % log.object_type)
                 aim_res = klass(**utils.json_loads(log.object_dict))
             except ImportError:
                 pass
         if not aim_res:
             LOG.warn('Aim resource for event %s not found' % log)
             continue
         log_by_root.setdefault(log.root_rn, []).append(
             (action, aim_res, log))
     return log_by_root, resetting_roots
def get_config_hash(file_dir, resource_mapping, exts=['conf']):
    res = {}
    if not os.path.isdir(file_dir):
        logger.debug(
            "Directory {} not found. Returning emty dict".format(file_dir))
        return {}

    conf_files = [conf for conf in os.listdir(file_dir)
                  if conf.split('.')[-1] in exts]

    for conf_file in conf_files:
        if conf_file in resource_mapping.keys():
            drv = resource_mapping[conf_file].get(
                'driver',
                'fuel_external_git.drivers.openstack_config.OpenStackConfig'
            )
            drv_class = importutils.import_class(drv)
            config = drv_class(
                os.path.join(file_dir, conf_file),
                resource_mapping[conf_file]['resource']
            )
            res[config.config_name] = config.to_config_dict()
    return res
Example #58
0
    def __init__(self,
                 host,
                 binary,
                 topic,
                 manager,
                 report_interval=None,
                 periodic_interval=None,
                 periodic_fuzzy_delay=None,
                 service_name=None,
                 coordination=False,
                 *args,
                 **kwargs):
        super(Service, self).__init__()
        if not rpc.initialized():
            rpc.init(CONF)
        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        manager_class = importutils.import_class(self.manager_class_name)
        if CONF.profiler.enabled and profiler is not None:
            manager_class = profiler.trace_cls("rpc")(manager_class)

        self.service = None
        self.manager = manager_class(host=self.host,
                                     service_name=service_name,
                                     *args,
                                     **kwargs)
        self.availability_zone = self.manager.availability_zone
        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.saved_args, self.saved_kwargs = args, kwargs
        self.timers = []
        self.coordinator = coordination

        setup_profiler(binary, host)
Example #59
0
    def _load_driver_provider(self):
        """Loads service drivers.
        """
        driver_providers = [
            driver_set.split(':')
            for driver_set in CONF.ml2.driver_list.split(',')
        ]

        while [''] in driver_providers:
            driver_providers.remove([''])

        if not driver_providers:
            LOG.info(_("Can't find service drivers"))
            return

        LOG.debug(_("Loading service drivers: %s"), driver_providers)
        for driver_name, driver_class in driver_providers:
            if driver_name is None or driver_class is None:
                continue

            LOG.debug(_("Loading service driver: %(dn)s, class path: %(cla)s"),
                      {
                          "dn": driver_name,
                          "cla": driver_class
                      })
            driver_cls = importutils.import_class(driver_class)
            driver_inst = driver_cls()

            self.driver_provider[driver_name] = driver_inst

            LOG.info(
                _("Successfully loaded %(name)s driver. "
                  "Description: %(desc)s"), {
                      "name": driver_name,
                      "desc": driver_class
                  })
Example #60
0
    def __init__(self, host, binary, topic, manager, periodic_interval=None,
                 periodic_fuzzy_delay=None, service_name=None,
                 *args, **kwargs):
        super(Service, self).__init__()



        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        manager_class = importutils.import_class(self.manager_class_name)
        manager_class = profiler.trace_cls("rpc")(manager_class)

        self.manager = manager_class(host=self.host,
                                     service_name=service_name,
                                     *args, **kwargs)
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.saved_args, self.saved_kwargs = args, kwargs
        self.timers = []

        setup_profiler(binary, host)
        self.rpcserver = None