def with_dummy_plugin_distribution_1_and_3(): """ Reduces plugin list to dummy-dist-1 and dummy-dist-3 (one configuration file (in dist 1), models, notebook folder). Any previous state of plugins is restored during teardown. """ _setup() dummy_dist_1 = Mock(importlib_metadata.Distribution) dummy_dist_1.name = "dummy-dist-1" dummy_dist_3 = Mock(importlib_metadata.Distribution) dummy_dist_3.name = "dummy-dist-3" new_entry_points = [ importlib_metadata.EntryPoint( name="test_plugin_1", value="tests.dummy_plugins.dist_1.dummy_plugin_1", group=MODEL_PLUGIN_ID, ), importlib_metadata.EntryPoint( name="test_plugin_4", value="tests.dummy_plugins.dist_1.dummy_plugin_4", group=MODEL_PLUGIN_ID, ), importlib_metadata.EntryPoint( name="test_plugin_5", value="tests.dummy_plugins.dist_3.dummy_plugin_5", group=MODEL_PLUGIN_ID, ), ] new_entry_points[0].dist = new_entry_points[1].dist = dummy_dist_1 new_entry_points[2].dist = dummy_dist_3 _update_entry_map(new_entry_points) yield _teardown()
def test_multiple_drivers(self): # The idea for this test was contributed by clayg: # https://gist.github.com/clayg/6311348 extensions = [ extension.Extension( 'backend', importlib_metadata.EntryPoint('backend', 'pkg1:driver', 'backend'), 'pkg backend', None, ), extension.Extension( 'backend', importlib_metadata.EntryPoint('backend', 'pkg2:driver', 'backend'), 'pkg backend', None, ), ] try: dm = driver.DriverManager.make_test_instance(extensions[0]) # Call the initialization code that verifies the extension dm._init_plugins(extensions) except exception.MultipleMatches as err: self.assertIn("Multiple", str(err)) else: self.fail('Should have had an error')
def with_dummy_plugins(): """ Reduces plugin list to: - dummy-dist-1 with plugins test_plugin_1 and test_plugin_4 (one configuration file, no models, notebook folder). - dummy-dist-2 with plugins test_plugin_2 and test_plugin_3 (3 configuration files, model folder, no notebooks). - dummy-dist-3 with plugins test_plugin_5 (no configuration file, model folder, notebook folder). Any previous state of plugins is restored during teardown. """ _setup() dummy_dist_1 = Mock(importlib_metadata.Distribution) dummy_dist_1.name = "dummy-dist-1" dummy_dist_2 = Mock(importlib_metadata.Distribution) dummy_dist_2.name = "dummy-dist-2" dummy_dist_3 = Mock(importlib_metadata.Distribution) dummy_dist_3.name = "dummy-dist-3" new_entry_points = [ importlib_metadata.EntryPoint( name="test_plugin_1", value="tests.dummy_plugins.dist_1.dummy_plugin_1", group=MODEL_PLUGIN_ID, ), importlib_metadata.EntryPoint( name="test_plugin_4", value="tests.dummy_plugins.dist_1.dummy_plugin_4", group=MODEL_PLUGIN_ID, ), importlib_metadata.EntryPoint( name="test_plugin_2", value="tests.dummy_plugins.dist_2.dummy_plugin_2", group=MODEL_PLUGIN_ID, ), importlib_metadata.EntryPoint( name="test_plugin_3", value="tests.dummy_plugins.dist_2.dummy_plugin_3", group=MODEL_PLUGIN_ID, ), importlib_metadata.EntryPoint( name="test_plugin_5", value="tests.dummy_plugins.dist_3.dummy_plugin_5", group=MODEL_PLUGIN_ID, ), ] new_entry_points[0].dist = new_entry_points[1].dist = dummy_dist_1 new_entry_points[2].dist = new_entry_points[3].dist = dummy_dist_2 new_entry_points[4].dist = dummy_dist_3 _update_entry_map(new_entry_points) yield _teardown()
def get_group_all(self, group, path=None): result = [] data = self._get_data_for_path(path) group_data = data.get('groups', {}).get(group, []) for vals in group_data: result.append(importlib_metadata.EntryPoint(*vals)) return result
def setUp(self): self.ext1 = extension.Extension( 'name', importlib_metadata.EntryPoint( 'name', 'module.name:attribute.name [extra]', 'group_name', ), mock.Mock(), None, ) self.ext2 = extension.Extension( 'name', importlib_metadata.EntryPoint( 'name', 'module:attribute', 'group_name', ), mock.Mock(), None, )
def _make_ext(name, docstring): def inner(): pass inner.__doc__ = docstring m1 = importlib_metadata.EntryPoint( name, '{}_module:{}'.format(name, name), 'group', ) return extension.Extension(name, m1, inner, None)
def with_dummy_plugin_4(): """ Reduces plugin list to dummy-dist-1 with plugin test_plugin_4 (no configuration file, no model folder, notebooks). Any previous state of plugins is restored during teardown. """ _setup() dummy_dist_1 = Mock(importlib_metadata.Distribution) dummy_dist_1.name = "dummy-dist-1" new_entry_points = [ importlib_metadata.EntryPoint( name="test_plugin_4", value="tests.dummy_plugins.dist_1.dummy_plugin_4", group=MODEL_PLUGIN_ID, ) ] new_entry_points[0].dist = dummy_dist_1 _update_entry_map(new_entry_points) yield _teardown()
def register_plugin(name, plugin_type, entry_point) -> None: """Registers a plugin dynamically without needing to install as a package. Args: name (str): Name of plugin to be referenced. plugin_type (str): Type to determine plugin namespace. entry_point (str): Entry point in the form: some.module:some.attr Raises: Exception: Raised when plugin_type is not supported. """ if plugin_type == "system": namespace = "roast.component.system" elif plugin_type == "testsuite": namespace = "roast.component.testsuite" elif plugin_type == "serial": namespace = "roast.serial" elif plugin_type == "board": namespace = "roast.board" elif plugin_type == "relay": namespace = "roast.relay" else: err_msg = f"Plugin type {plugin_type} is not supported." log.error(err_msg) raise Exception(err_msg) ep = importlib_metadata.EntryPoint(name, entry_point, namespace) e = ExtensionManager(namespace) if namespace in e.ENTRY_POINT_CACHE: entry_points = e.ENTRY_POINT_CACHE.get(namespace) if name not in [entry_point.name for entry_point in entry_points]: entry_points.append(ep) e.ENTRY_POINT_CACHE[namespace] = entry_points else: e.ENTRY_POINT_CACHE[namespace] = [ep] ep.load()
def __init__(self, cloud=None, config=None, session=None, app_name=None, app_version=None, extra_services=None, strict=False, use_direct_get=False, task_manager=None, rate_limit=None, oslo_conf=None, service_types=None, global_request_id=None, strict_proxies=False, pool_executor=None, **kwargs): """Create a connection to a cloud. A connection needs information about how to connect, how to authenticate and how to select the appropriate services to use. The recommended way to provide this information is by referencing a named cloud config from an existing `clouds.yaml` file. The cloud name ``envvars`` may be used to consume a cloud configured via ``OS_`` environment variables. A pre-existing :class:`~openstack.config.cloud_region.CloudRegion` object can be passed in lieu of a cloud name, for cases where the user already has a fully formed CloudRegion and just wants to use it. Similarly, if for some reason the user already has a :class:`~keystoneauth1.session.Session` and wants to use it, it may be passed in. :param str cloud: Name of the cloud from config to use. :param config: CloudRegion object representing the config for the region of the cloud in question. :type config: :class:`~openstack.config.cloud_region.CloudRegion` :param session: A session object compatible with :class:`~keystoneauth1.session.Session`. :type session: :class:`~keystoneauth1.session.Session` :param str app_name: Name of the application to be added to User Agent. :param str app_version: Version of the application to be added to User Agent. :param extra_services: List of :class:`~openstack.service_description.ServiceDescription` objects describing services that openstacksdk otherwise does not know about. :param bool use_direct_get: For get methods, make specific REST calls for server-side filtering instead of making list calls and filtering client-side. Default false. :param task_manager: Ignored. Exists for backwards compat during transition. Rate limit parameters should be passed directly to the `rate_limit` parameter. :param rate_limit: Client-side rate limit, expressed in calls per second. The parameter can either be a single float, or it can be a dict with keys as service-type and values as floats expressing the calls per second for that service. Defaults to None, which means no rate-limiting is performed. :param oslo_conf: An oslo.config CONF object. :type oslo_conf: :class:`~oslo_config.cfg.ConfigOpts` An oslo.config ``CONF`` object that has been populated with ``keystoneauth1.loading.register_adapter_conf_options`` in groups named by the OpenStack service's project name. :param service_types: A list/set of service types this Connection should support. All other service types will be disabled (will error if used). **Currently only supported in conjunction with the ``oslo_conf`` kwarg.** :param global_request_id: A Request-id to send with all interactions. :param strict_proxies: If True, check proxies on creation and raise ServiceDiscoveryException if the service is unavailable. :type strict_proxies: bool Throw an ``openstack.exceptions.ServiceDiscoveryException`` if the endpoint for a given service doesn't work. This is useful for OpenStack services using sdk to talk to other OpenStack services where it can be expected that the deployer config is correct and errors should be reported immediately. Default false. :param pool_executor: :type pool_executor: :class:`~futurist.Executor` A futurist ``Executor`` object to be used for concurrent background activities. Defaults to None in which case a ThreadPoolExecutor will be created if needed. :param kwargs: If a config is not provided, the rest of the parameters provided are assumed to be arguments to be passed to the CloudRegion constructor. """ self.config = config self._extra_services = {} self._strict_proxies = strict_proxies if extra_services: for service in extra_services: self._extra_services[service.service_type] = service if not self.config: if oslo_conf: self.config = cloud_region.from_conf( oslo_conf, session=session, app_name=app_name, app_version=app_version, service_types=service_types) elif session: self.config = cloud_region.from_session( session=session, app_name=app_name, app_version=app_version, load_yaml_config=False, load_envvars=False, rate_limit=rate_limit, **kwargs) else: self.config = _config.get_cloud_region(cloud=cloud, app_name=app_name, app_version=app_version, load_yaml_config=cloud is not None, load_envvars=cloud is not None, rate_limit=rate_limit, **kwargs) self._session = None self._proxies = {} self.__pool_executor = pool_executor self._global_request_id = global_request_id self.use_direct_get = use_direct_get self.strict_mode = strict # Call the _*CloudMixin constructors while we work on # integrating things better. _cloud._OpenStackCloudMixin.__init__(self) _accelerator.AcceleratorCloudMixin.__init__(self) _baremetal.BaremetalCloudMixin.__init__(self) _block_storage.BlockStorageCloudMixin.__init__(self) _clustering.ClusteringCloudMixin.__init__(self) _coe.CoeCloudMixin.__init__(self) _compute.ComputeCloudMixin.__init__(self) _dns.DnsCloudMixin.__init__(self) _floating_ip.FloatingIPCloudMixin.__init__(self) _identity.IdentityCloudMixin.__init__(self) _image.ImageCloudMixin.__init__(self) _network_common.NetworkCommonCloudMixin.__init__(self) _network.NetworkCloudMixin.__init__(self) _object_store.ObjectStoreCloudMixin.__init__(self) _orchestration.OrchestrationCloudMixin.__init__(self) _security_group.SecurityGroupCloudMixin.__init__(self) # Allow vendors to provide hooks. They will normally only receive a # connection object and a responsible to register additional services vendor_hook = kwargs.get('vendor_hook') if not vendor_hook and 'vendor_hook' in self.config.config: # Get the one from profile vendor_hook = self.config.config.get('vendor_hook') if vendor_hook: try: # NOTE(gtema): no class name in the hook, plain module:function # Split string hook into module and function try: (package_name, function) = vendor_hook.rsplit(':') if package_name and function: ep = importlib_metadata.EntryPoint( name='vendor_hook', value=vendor_hook, group='vendor_hook', ) hook = ep.load() hook(self) except ValueError: self.log.warning('Hook should be in the entrypoint ' 'module:attribute format') except (ImportError, TypeError, AttributeError) as e: self.log.warning('Configured hook %s cannot be executed: %s', vendor_hook, e) # Add additional metrics into the configuration according to the # selected connection. We don't want to deal with overall config in the # proxy, just pass required part. if (self.config._influxdb_config and 'additional_metric_tags' in self.config.config): self.config._influxdb_config['additional_metric_tags'] = \ self.config.config['additional_metric_tags']