def load_plugins(self): self.plugins = [] for entrypoint in pkg_resources.iter_entry_points('%s.plugin' % self._project_name): try: pluginclass = entrypoint.load() plugin = pluginclass(self._service_name) self.plugins.append(plugin) except Exception as exc: LOG.error(_("Failed to load plugin %(plug)s: %(exc)s") % {'plug': entrypoint, 'exc': exc}) # Register individual notifiers. for plugin in self.plugins: for notifier in plugin.notifiers: notifier_api.add_driver(notifier)
def load_plugins(self): self.plugins = [] for entrypoint in pkg_resources.iter_entry_points('%s.plugin' % self._project_name): try: pluginclass = entrypoint.load() plugin = pluginclass(self._service_name) self.plugins.append(plugin) except Exception as exc: LOG.error( _("Failed to load plugin %(plug)s: %(exc)s") % { 'plug': entrypoint, 'exc': exc }) # Register individual notifiers. for plugin in self.plugins: for notifier in plugin.notifiers: notifier_api.add_driver(notifier)
class PluginManager(object): """Manages plugin entrypoints and loading. For a service to implement this plugin interface for callback purposes: - Make use of the openstack-common notifier system - Instantiate this manager in each process (passing in project and service name) For an API service to extend itself using this plugin interface, it needs to query the plugin_extension_factory provided by the already-instantiated PluginManager. """ def __init__(self, project_name, service_name): """ Construct Plugin Manager; load and initialize plugins. project_name (e.g. 'nova' or 'glance') is used to construct the entry point that identifies plugins. The service_name (e.g. 'compute') is passed on to each plugin as a raw string for it to do what it will. """ self._project_name = project_name self._service_name = service_name self.plugins = [] def load_plugins(self): self.plugins = [] for entrypoint in pkg_resources.iter_entry_points('%s.plugin' % self._project_name): try: pluginclass = entrypoint.load() plugin = pluginclass(self._service_name) self.plugins.append(plugin) except Exception, exc: LOG.error( _("Failed to load plugin %(plug)s: %(exc)s") % { 'plug': entrypoint, 'exc': exc }) # Register individual notifiers. for plugin in self.plugins: for notifier in plugin.notifiers: notifier_api.add_driver(notifier)
def setUp(self): super(TestNovaNotifier, self).setUp() nova_CONF.compute_driver = 'nova.virt.fake.FakeDriver' nova_CONF.notification_driver = [nova_notifier.__name__] nova_CONF.rpc_backend = 'nova.openstack.common.rpc.impl_fake' nova_CONF.vnc_enabled = False nova_CONF.spice.enabled = False self.compute = importutils.import_object(nova_CONF.compute_manager) self.context = context.get_admin_context() fake_network.set_stub_network_methods(self.stubs) self.instance = {"name": "instance-1", 'OS-EXT-SRV-ATTR:instance_name': 'instance-1', "id": 1, "image_ref": "FAKE", "user_id": "FAKE", "project_id": "FAKE", "display_name": "FAKE NAME", "hostname": "abcdef", "reservation_id": "FAKE RID", "instance_type_id": 1, "architecture": "x86", "memory_mb": "1024", "root_gb": "20", "ephemeral_gb": "0", "vcpus": 1, 'node': "fakenode", "host": "fakehost", "availability_zone": "1e3ce043029547f1a61c1996d1a531a4", "created_at": '2012-05-08 20:23:41', "os_type": "linux", "kernel_id": "kernelid", "ramdisk_id": "ramdiskid", "vm_state": vm_states.ACTIVE, "access_ip_v4": "someip", "access_ip_v6": "someip", "metadata": {}, "uuid": "144e08f4-00cb-11e2-888e-5453ed1bbb5f", "system_metadata": {}} self.stubs.Set(db, 'instance_info_cache_delete', self.do_nothing) self.stubs.Set(db, 'instance_destroy', self.do_nothing) self.stubs.Set(db, 'instance_system_metadata_get', self.fake_db_instance_system_metadata_get) self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', lambda context, instance: {}) self.stubs.Set(db, 'instance_update_and_get_original', lambda context, uuid, kwargs: (self.instance, self.instance)) self.stubs.Set(flavors, 'extract_flavor', lambda ref: {}) # Set up to capture the notification messages generated by the # plugin and to invoke our notifier plugin. self.notifications = [] notifier_api._reset_drivers() notifier_api.add_driver(self) notifier_api.add_driver(nova_notifier) ext_mgr = test_manager.TestExtensionManager([ extension.Extension('test', None, None, self.Pollster(), ), ]) self.ext_mgr = ext_mgr self.gatherer = nova_notifier.DeletedInstanceStatsGatherer(ext_mgr) nova_notifier.initialize_gatherer(self.gatherer) # Terminate the instance to trigger the notification. with contextlib.nested( # Under Grizzly, Nova has moved to no-db access on the # compute node. The compute manager uses RPC to talk to # the conductor. We need to disable communication between # the nova manager and the remote system since we can't # expect the message bus to be available, or the remote # controller to be there if the message bus is online. mock.patch.object(self.compute, 'conductor_api'), # The code that looks up the instance uses a global # reference to the API, so we also have to patch that to # return our fake data. mock.patch.object(nova_notifier.instance_info_source, 'instance_get_by_uuid', self.fake_instance_ref_get), ): self.compute.terminate_instance(self.context, instance=self.instance)
def setUp(self): super(TestNovaNotifier, self).setUp() nova_CONF.compute_driver = 'nova.virt.fake.FakeDriver' nova_CONF.notification_driver = [nova_notifier.__name__] nova_CONF.rpc_backend = 'nova.openstack.common.rpc.impl_fake' nova_CONF.vnc_enabled = False nova_CONF.spice.enabled = False self.compute = importutils.import_object(nova_CONF.compute_manager) self.context = context.get_admin_context() fake_network.set_stub_network_methods(self.stubs) instance_data = { "display_name": "instance-1", 'OS-EXT-SRV-ATTR:instance_name': 'instance-1', "id": 1, "image_ref": "FAKE", "user_id": "FAKE", "project_id": "FAKE", "display_name": "FAKE NAME", "hostname": "abcdef", "reservation_id": "FAKE RID", "instance_type_id": 1, "architecture": "x86", "memory_mb": "1024", "root_gb": "20", "ephemeral_gb": "0", "vcpus": 1, 'node': "fakenode", "host": "fakehost", "availability_zone": "1e3ce043029547f1a61c1996d1a531a4", "created_at": '2012-05-08 20:23:41', "launched_at": '2012-05-08 20:25:45', "terminated_at": '2012-05-09 20:23:41', "os_type": "linux", "kernel_id": "kernelid", "ramdisk_id": "ramdiskid", "vm_state": vm_states.ACTIVE, "task_state": None, "access_ip_v4": "192.168.5.4", "access_ip_v6": "2001:DB8::0", "metadata": {}, "uuid": "144e08f4-00cb-11e2-888e-5453ed1bbb5f", "system_metadata": {} } self.instance = nova_instance.Instance() for key, value in instance_data.iteritems(): setattr(self.instance, key, value) self.stubs.Set(db, 'instance_info_cache_delete', self.do_nothing) self.stubs.Set(db, 'instance_destroy', self.do_nothing) self.stubs.Set(db, 'instance_system_metadata_get', self.fake_db_instance_system_metadata_get) self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', lambda context, instance: {}) self.stubs.Set( db, 'instance_update_and_get_original', lambda context, uuid, kwargs: (self.instance, self.instance)) self.stubs.Set(flavors, 'extract_flavor', lambda ref: {}) # Set up to capture the notification messages generated by the # plugin and to invoke our notifier plugin. self.notifications = [] notifier_api._reset_drivers() notifier_api.add_driver(self) notifier_api.add_driver(nova_notifier) ext_mgr = test_manager.TestExtensionManager([ extension.Extension( 'test', None, None, self.Pollster(), ), ]) self.ext_mgr = ext_mgr self.gatherer = nova_notifier.DeletedInstanceStatsGatherer(ext_mgr) nova_notifier.initialize_gatherer(self.gatherer) # Terminate the instance to trigger the notification. with contextlib.nested( # Under Grizzly, Nova has moved to no-db access on the # compute node. The compute manager uses RPC to talk to # the conductor. We need to disable communication between # the nova manager and the remote system since we can't # expect the message bus to be available, or the remote # controller to be there if the message bus is online. mock.patch.object(self.compute, 'conductor_api'), # The code that looks up the instance uses a global # reference to the API, so we also have to patch that to # return our fake data. mock.patch.object(nova_notifier.instance_info_source, 'instance_get_by_uuid', self.fake_instance_ref_get), ): self.compute.terminate_instance(self.context, instance=self.instance)