def __init__(self): # NOTE(mdulko): Default shutdown timeout is 60 seconds and K8s won't # wait more by default anyway. super(CNIDaemonServiceManager, self).__init__() # TODO(dulek): Use cotyledon.oslo_config_glue to support conf reload. # TODO(vikasc): Should be done using dynamically loadable OVO types # plugin. objects.register_locally_defined_vifs() os_vif.initialize() clients.setup_kubernetes_client() if CONF.sriov.enable_pod_resource_service: clients.setup_pod_resources_client() self.manager = multiprocessing.Manager() registry = self.manager.dict() # For Watcher->Server communication. healthy = multiprocessing.Value(c_bool, True) self.add(CNIDaemonWatcherService, workers=1, args=( registry, healthy, )) self._server_service = self.add(CNIDaemonServerService, workers=1, args=(registry, healthy)) self.add(CNIDaemonHealthServerService, workers=1, args=(healthy, )) self.register_hooks(on_terminate=self.terminate)
def start(): config.init(sys.argv[1:]) config.setup_logging() clients.setup_clients() os_vif.initialize() kuryrk8s_launcher = service.launch(config.CONF, KuryrK8sService()) kuryrk8s_launcher.wait()
def setUp(self): super(FakeLibvirtFixture, self).setUp() # Some modules load the libvirt library in a strange way for module in ('driver', 'host', 'guest', 'firewall', 'migration'): i = 'nova.virt.libvirt.{module}.libvirt'.format(module=module) # NOTE(mdbooth): The strange incantation below means 'this module' self.useFixture(fixtures.MonkeyPatch(i, sys.modules[__name__])) self.useFixture( fixtures.MockPatch('nova.virt.libvirt.utils.get_fs_info')) # Don't assume that the system running tests has a valid machine-id self.useFixture(fixtures.MockPatch( 'nova.virt.libvirt.driver.LibvirtDriver' '._get_host_sysinfo_serial_os', return_value=uuids.machine_id)) disable_event_thread(self) if self.stub_os_vif: # Make sure to never try and actually plug VIFs in os-vif unless # we're explicitly testing that code and the test itself will # handle the appropriate mocking. self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver._plug_os_vif', lambda *a, **kw: None)) # os_vif.initialize is typically done in nova-compute startup # even if we are not planning to plug anything with os_vif in the test # we still need the object model initialized to be able to generate # guest config xml properly import os_vif os_vif.initialize()
def main(): d = jsonutils.load(sys.stdin.buffer) cni_conf = utils.CNIConfig(d) args = (['--config-file', cni_conf.zun_conf] if 'zun_conf' in d else []) try: if cni_conf.debug: args.append('-d') except AttributeError: pass config.init(args) if os.environ.get('CNI_COMMAND') == 'VERSION': CONF.set_default('use_stderr', True) # Initialize o.vo registry. os_vif.initialize() runner = cni_api.CNIDaemonizedRunner() def _timeout(signum, frame): runner._write_dict(sys.stdout, { 'msg': 'timeout', 'code': consts.CNI_TIMEOUT_CODE, }) LOG.debug('timed out') sys.exit(1) signal.signal(signal.SIGALRM, _timeout) signal.alarm(_CNI_TIMEOUT) status = runner.run(os.environ, cni_conf, sys.stdout) LOG.debug("Exiting with status %s", status) if status: sys.exit(status)
def test_host_info_all(self): os_vif.initialize() info = os_vif.host_info() # NOTE(sean-k-mooney): as out of tree plugins could be # visable in path assert only at at least all the in # intree plugins are loaded instead of an exact match. self.assertTrue(len(info.plugin_info) >= 3) plugins = {p.plugin_name: p for p in info.plugin_info} in_tree_plugin_names = ("linux_bridge", "ovs", "noop") self.assertTrue(all(name in plugins for name in in_tree_plugin_names)) lb = plugins["linux_bridge"] self.assertTrue( any("VIFBridge" == vif.vif_object_name for vif in lb.vif_info)) ovs = plugins["ovs"] self.assertTrue(len(ovs.vif_info) >= 4) vif_names = (vif.vif_object_name for vif in ovs.vif_info) ovs_vifs = ("VIFBridge", "VIFOpenVSwitch", "VIFVHostUser", "VIFHostDevice") self.assertTrue(all(name in ovs_vifs for name in vif_names)) noop = plugins["noop"] self.assertTrue( any("VIFVHostUser" == vif.vif_object_name for vif in noop.vif_info))
def setUp(self): super(FakeLibvirtFixture, self).setUp() # Some modules load the libvirt library in a strange way for module in ('driver', 'host', 'guest', 'firewall', 'migration'): i = 'nova.virt.libvirt.{module}.libvirt'.format(module=module) # NOTE(mdbooth): The strange incantation below means 'this module' self.useFixture(fixtures.MonkeyPatch(i, sys.modules[__name__])) self.useFixture( fixtures.MockPatch('nova.virt.libvirt.utils.get_fs_info')) disable_event_thread(self) if self.stub_os_vif: # Make sure to never try and actually plug VIFs in os-vif unless # we're explicitly testing that code and the test itself will # handle the appropriate mocking. self.useFixture( fixtures.MonkeyPatch( 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver._plug_os_vif', lambda *a, **kw: None)) # os_vif.initialize is typically done in nova-compute startup # even if we are not planning to plug anything with os_vif in the test # we still need the object model initialized to be able to generate # guest config xml properly import os_vif os_vif.initialize()
def test_initialize(self, mock_EM): self.assertEqual(None, os_vif._EXT_MANAGER) os_vif.initialize() os_vif.initialize() mock_EM.assert_called_once_with( invoke_args={}, invoke_on_load=True, namespace='os_vif') self.assertNotEqual(None, os_vif._EXT_MANAGER)
def __init__(self): super(CNIDaemonServiceManager, self).__init__() # TODO(dulek): Use cotyledon.oslo_config_glue to support conf reload. # TODO(vikasc): Should be done using dynamically loadable OVO types # plugin. objects.register_locally_defined_vifs() os_vif.initialize() clients.setup_kubernetes_client() self.manager = multiprocessing.Manager() registry = self.manager.dict() # For Watcher->Server communication. healthy = multiprocessing.Value(c_bool, True) self.add(CNIDaemonWatcherService, workers=1, args=( registry, healthy, )) self.add(CNIDaemonServerService, workers=1, args=( registry, healthy, )) self.add(CNIDaemonHealthServerService, workers=1, args=(healthy, )) self.register_hooks(on_terminate=self.terminate)
def run(): d = jsonutils.load(sys.stdin.buffer) cni_conf = utils.CNIConfig(d) args = (['--config-file', cni_conf.kuryr_conf] if 'kuryr_conf' in d else []) try: if cni_conf.debug: args.append('-d') except AttributeError: pass config.init(args) config.setup_logging() # Initialize o.vo registry. k_objects.register_locally_defined_vifs() os_vif.initialize() runner = cni_api.CNIDaemonizedRunner() def _timeout(signum, frame): runner._write_dict(sys.stdout, { 'msg': 'timeout', 'code': k_const.CNI_TIMEOUT_CODE, }) LOG.debug('timed out') sys.exit(1) signal.signal(signal.SIGALRM, _timeout) signal.alarm(_CNI_TIMEOUT) status = runner.run(os.environ, cni_conf, sys.stdout) LOG.debug("Exiting with status %s", status) if status: sys.exit(status)
def test_unplug(self): plugin = mock.MagicMock() with mock.patch('stevedore.extension.ExtensionManager', return_value={'foobar': plugin}): os_vif.initialize() vif = objects.vif.VIF(id='uniq', plugin='foobar') os_vif.unplug(vif) plugin.unplug.assert_called_once_with(vif)
def test_initialize(self, mock_EM): self.assertEqual(None, os_vif._EXT_MANAGER) os_vif.initialize() os_vif.initialize() mock_EM.assert_called_once_with(invoke_args={}, invoke_on_load=True, namespace='os_vif') self.assertNotEqual(None, os_vif._EXT_MANAGER)
def main(): priv_context.init(root_helper=shlex.split(utils.get_root_helper())) zun_service.prepare_service(sys.argv) config.parse_args(sys.argv) # Initialize o.vo registry. os_vif.initialize() service.CNIDaemonServiceManager().run()
def test_initialize(self, mock_EM): self.assertIsNone(os_vif._EXT_MANAGER) # Note: the duplicate call for initialize is to validate # that the extension manager is only initialized once os_vif.initialize() os_vif.initialize() mock_EM.assert_called_once_with( invoke_on_load=False, namespace='os_vif') self.assertIsNotNone(os_vif._EXT_MANAGER)
def test_initialize(self, mock_EM): self.assertIsNone(os_vif._EXT_MANAGER) # Note: the duplicate call for initialize is to validate # that the extension manager is only initialized once os_vif.initialize() os_vif.initialize() mock_EM.assert_called_once_with(invoke_on_load=False, namespace='os_vif') self.assertIsNotNone(os_vif._EXT_MANAGER)
def test_host_info_filtered(self): os_vif.initialize() info = os_vif.host_info(permitted_vif_type_names=["VIFOpenVSwitch"]) self.assertEqual(len(info.plugin_info), 1) self.assertEqual(info.plugin_info[0].plugin_name, "ovs") vif_info = info.plugin_info[0].vif_info self.assertEqual(len(vif_info), 1) self.assertEqual(vif_info[0].vif_object_name, "VIFOpenVSwitch")
def test_plug(self): plugin = mock.MagicMock() with mock.patch('stevedore.extension.ExtensionManager', return_value={'foobar': plugin}): os_vif.initialize() instance = mock.MagicMock() info = objects.instance_info.InstanceInfo() vif = objects.vif.VIF(id='uniq', plugin='foobar', instance_info=info) os_vif.plug(vif, instance) plugin.plug.assert_called_once_with(vif, instance)
def test_describe_noop_plugin(self): os_vif.initialize() noop_plugin = NoOpPlugin.load('noop') result = noop_plugin.describe() expected = objects.host_info.HostPluginInfo( plugin_name='noop', vif_info=[ objects.host_info.HostVIFInfo( vif_object_name=k_vif.VIFVlanNested.__name__, min_version="1.0", max_version="1.0"), ]) self.assertEqual(expected, result)
def __init__(self): # NOTE(mdulko): Default shutdown timeout is 60 seconds and K8s won't # wait more by default anyway. super(CNIDaemonServiceManager, self).__init__() self._server_service = None # TODO(dulek): Use cotyledon.oslo_config_glue to support conf reload. # TODO(vikasc): Should be done using dynamically loadable OVO types # plugin. objects.register_locally_defined_vifs() os_vif.initialize() clients.setup_kubernetes_client() if CONF.sriov.enable_pod_resource_service: clients.setup_pod_resources_client() self.manager = multiprocessing.Manager() registry = self.manager.dict() # For Watcher->Server communication. healthy = multiprocessing.Value(c_bool, True) metrics = self.manager.Queue() self.add(watcher_service.KuryrPortWatcherService, workers=1, args=( registry, healthy, )) self.add(watcher_service.PodWatcherService, workers=1, args=( registry, healthy, )) self._server_service = self.add(CNIDaemonServerService, workers=1, args=( registry, healthy, metrics, )) self.add(CNIDaemonHealthServerService, workers=1, args=(healthy, )) self.add(CNIDaemonExporterService, workers=1, args=(metrics, )) def shutdown_hook(service_id, worker_id, exit_code): LOG.critical(f'Child Service {service_id} had exited with code ' f'{exit_code}, stopping kuryr-daemon') self.shutdown() self.register_hooks(on_terminate=self.terminate, on_dead_worker=shutdown_hook)
def test_plug(self, mock_plug): plg = extension.Extension(name="demo", entry_point="os-vif", plugin=DemoPlugin, obj=None) with mock.patch('stevedore.extension.ExtensionManager.names', return_value=['foobar']),\ mock.patch('stevedore.extension.ExtensionManager.__getitem__', return_value=plg): os_vif.initialize() info = objects.instance_info.InstanceInfo() vif = objects.vif.VIFBridge( id='9a12694f-f95e-49fa-9edb-70239aee5a2c', plugin='foobar') os_vif.plug(vif, info) mock_plug.assert_called_once_with(vif, info)
def test_unplug(self, mock_unplug): plg = extension.Extension(name="demo", entry_point="os-vif", plugin=NoOpPlugin, obj=None) with mock.patch('stevedore.extension.ExtensionManager.names', return_value=['foobar']),\ mock.patch('stevedore.extension.ExtensionManager.__getitem__', return_value=plg): os_vif.initialize() info = mock.sentinel.info vif = mock.MagicMock() vif.plugin_name = 'noop' os_vif.unplug(vif, info) mock_unplug.assert_called_once_with(vif, info)
def setUp(self): super(_VirtDriverTestCase, self).setUp() self.flags(instances_path=self.useFixture(fixtures.TempDir()).path) self.connection = importutils.import_object(self.driver_module, fake.FakeVirtAPI()) self.ctxt = test_utils.get_test_admin_context() self.image_service = fake_image.FakeImageService() # NOTE(dripton): resolve_driver_format does some file reading and # writing and chowning that complicate testing too much by requiring # using real directories with proper permissions. Just stub it out # here; we test it in test_imagebackend.py self.stubs.Set(imagebackend.Image, 'resolve_driver_format', imagebackend.Image._get_driver_format) os_vif.initialize()
def test_unplug(self, mock_unplug): plg = extension.Extension(name="demo", entry_point="os-vif", plugin=DemoPlugin, obj=None) with mock.patch('stevedore.extension.ExtensionManager.names', return_value=['foobar']),\ mock.patch('stevedore.extension.ExtensionManager.__getitem__', return_value=plg): os_vif.initialize() info = objects.instance_info.InstanceInfo() vif = objects.vif.VIFBridge( id='9a12694f-f95e-49fa-9edb-70239aee5a2c', plugin='foobar') os_vif.unplug(vif, info) mock_unplug.assert_called_once_with(vif, info)
def test_host_info_all(self): os_vif.initialize() info = os_vif.host_info() self.assertEqual(len(info.plugin_info), 2) self.assertEqual(info.plugin_info[0].plugin_name, "linux_bridge") vif_info = info.plugin_info[0].vif_info self.assertEqual(len(vif_info), 1) self.assertEqual(vif_info[0].vif_object_name, "VIFBridge") self.assertEqual(info.plugin_info[1].plugin_name, "ovs") vif_info = info.plugin_info[1].vif_info self.assertEqual(len(vif_info), 3) self.assertEqual(vif_info[0].vif_object_name, "VIFBridge") self.assertEqual(vif_info[1].vif_object_name, "VIFOpenVSwitch") self.assertEqual(vif_info[2].vif_object_name, "VIFVHostUser")
def run(): # REVISIT(ivc): current CNI implementation provided by this package is # experimental and its primary purpose is to enable development of other # components (e.g. functional tests, service/LBaaSv2 support) cni_conf = utils.CNIConfig(jsonutils.load(sys.stdin)) args = ['--config-file', cni_conf.kuryr_conf] try: if cni_conf.debug: args.append('-d') except AttributeError: pass config.init(args) config.setup_logging() # Initialize o.vo registry. k_objects.register_locally_defined_vifs() os_vif.initialize() if CONF.cni_daemon.daemon_enabled: runner = cni_api.CNIDaemonizedRunner() else: # TODO(dulek): Switch that to versionutils.deprecation_warning once # bug 1754087 is fixed. versionutils.report_deprecated_feature( LOG, 'Deploying kuryr-kubernetes without kuryr-daemon service is ' 'deprecated since Rocky release and may be removed in future ' 'releases.') runner = cni_api.CNIStandaloneRunner(k8s_cni.K8sCNIPlugin()) LOG.info("Using '%s' ", runner.__class__.__name__) def _timeout(signum, frame): runner._write_dict(sys.stdout, { 'msg': 'timeout', 'code': k_const.CNI_TIMEOUT_CODE, }) LOG.debug('timed out') sys.exit(1) signal.signal(signal.SIGALRM, _timeout) signal.alarm(_CNI_TIMEOUT) status = runner.run(os.environ, cni_conf, sys.stdout) LOG.debug("Exiting with status %s", status) if status: sys.exit(status)
def main(): config.parse_args(sys.argv) logging.setup(CONF, 'nova') priv_context.init(root_helper=shlex.split(utils.get_root_helper())) utils.monkey_patch() objects.register_all() # Ensure os-vif objects are registered and plugins loaded os_vif.initialize() gmr.TextGuruMeditation.setup_autorun(version) cmd_common.block_db_access('nova-compute') objects_base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI() server = service.Service.create(binary='nova-compute', topic=compute_rpcapi.RPC_TOPIC) service.serve(server) service.wait()
def main(): config.parse_args(sys.argv) logging.setup(CONF, 'nova') priv_context.init(root_helper=shlex.split(utils.get_root_helper())) objects.register_all() gmr_opts.set_defaults(CONF) # Ensure os-vif objects are registered and plugins loaded os_vif.initialize() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) cmd_common.block_db_access('nova-compute') objects_base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI() objects.Service.enable_min_version_cache() server = service.Service.create(binary='nova-compute', topic=compute_rpcapi.RPC_TOPIC) service.serve(server) service.wait()
def main(): opt = cfg.SubCommandOpt( 'category', title='command', description='kuryr-k8s-status command or category to execute', handler=add_parsers) conf = cfg.ConfigOpts() conf.register_cli_opt(opt) conf(sys.argv[1:]) os_vif.initialize() objects.register_locally_defined_vifs() try: return conf.category.action_fn() except Exception: print('Error:\n%s' % traceback.format_exc()) # This is 255 so it's not confused with the upgrade check exit codes. return 255
def run(): if six.PY3: d = jsonutils.load(sys.stdin.buffer) else: d = jsonutils.load(sys.stdin) cni_conf = utils.CNIConfig(d) args = ['--config-file', cni_conf.kuryr_conf] try: if cni_conf.debug: args.append('-d') except AttributeError: pass config.init(args) config.setup_logging() # Initialize o.vo registry. k_objects.register_locally_defined_vifs() os_vif.initialize() if CONF.cni_daemon.daemon_enabled: runner = cni_api.CNIDaemonizedRunner() else: LOG.warning('Deploying kuryr-kubernetes without kuryr-daemon service', 'R') runner = cni_api.CNIStandaloneRunner(k8s_cni.K8sCNIPlugin()) LOG.info("Using '%s' ", runner.__class__.__name__) def _timeout(signum, frame): runner._write_dict(sys.stdout, { 'msg': 'timeout', 'code': k_const.CNI_TIMEOUT_CODE, }) LOG.debug('timed out') sys.exit(1) signal.signal(signal.SIGALRM, _timeout) signal.alarm(_CNI_TIMEOUT) status = runner.run(os.environ, cni_conf, sys.stdout) LOG.debug("Exiting with status %s", status) if status: sys.exit(status)
def setUp(self): # We need to enable neutron in this one self.flags(physnets=['foo', 'bar'], group='neutron') neutron_conf.register_dynamic_opts(CONF) self.flags(numa_nodes=[1], group='neutron_physnet_foo') self.flags(numa_nodes=[0], group='neutron_physnet_bar') self.flags(numa_nodes=[0, 1], group='neutron_tunnel') super(NUMAServersWithNetworksTest, self).setUp() # NOTE(mriedem): Unset the stub methods so we actually run our # neutronv2/api code and populate the net attributes on the # network model. fake_network.unset_stub_network_methods(self) self.neutron_fixture = self.useFixture( NUMAAffinityNeutronFixture(self)) _p = mock.patch('nova.virt.libvirt.host.Host.get_connection') self.mock_conn = _p.start() self.addCleanup(_p.stop) os_vif.initialize()
def _setup(self, params): args = ['--config-file', params.config.kuryr_conf] try: if params.config.debug: args.append('-d') except AttributeError: pass config.init(args) config.setup_logging() os_vif.initialize() clients.setup_kubernetes_client() self._pipeline = h_cni.CNIPipeline() self._watcher = k_watcher.Watcher(self._pipeline) self._watcher.add( "%(base)s/namespaces/%(namespace)s/pods" "?fieldSelector=metadata.name=%(pod)s" % { 'base': k_const.K8S_API_BASE, 'namespace': params.args.K8S_POD_NAMESPACE, 'pod': params.args.K8S_POD_NAME })
def run(): # REVISIT(ivc): current CNI implementation provided by this package is # experimental and its primary purpose is to enable development of other # components (e.g. functional tests, service/LBaaSv2 support) cni_conf = utils.CNIConfig(jsonutils.load(sys.stdin)) args = ['--config-file', cni_conf.kuryr_conf] try: if cni_conf.debug: args.append('-d') except AttributeError: pass config.init(args) config.setup_logging() # Initialize o.vo registry. k_objects.register_locally_defined_vifs() os_vif.initialize() if CONF.cni_daemon.daemon_enabled: runner = cni_api.CNIDaemonizedRunner() else: runner = cni_api.CNIStandaloneRunner(K8sCNIPlugin()) LOG.info("Using '%s' ", runner.__class__.__name__) def _timeout(signum, frame): runner._write_dict(sys.stdout, { 'msg': 'timeout', 'code': k_const.CNI_TIMEOUT_CODE, }) LOG.debug('timed out') sys.exit(1) signal.signal(signal.SIGALRM, _timeout) signal.alarm(_CNI_TIMEOUT) status = runner.run(os.environ, cni_conf, sys.stdout) LOG.debug("Exiting with status %s", status) if status: sys.exit(status)
def main(): config.parse_args(sys.argv) logging.setup(CONF, 'nova') priv_context.init(root_helper=shlex.split(utils.get_root_helper())) utils.monkey_patch() objects.register_all() # Ensure os-vif objects are registered and plugins loaded os_vif.initialize() gmr.TextGuruMeditation.setup_autorun(version) if not CONF.conductor.use_local: cmd_common.block_db_access('nova-compute') objects_base.NovaObject.indirection_api = \ conductor_rpcapi.ConductorAPI() else: LOG.warning(_LW('Conductor local mode is deprecated and will ' 'be removed in a subsequent release')) server = service.Service.create(binary='nova-compute', topic=CONF.compute_topic, db_allowed=CONF.conductor.use_local) service.serve(server) service.wait()
def test_initialize(self, mock_EM): self.assertIsNone(os_vif._EXT_MANAGER) os_vif.initialize() mock_EM.assert_called_once_with(invoke_on_load=False, namespace='os_vif') self.assertIsNotNone(os_vif._EXT_MANAGER)
# https://docs.openstack.org/os-vif/latest/user/usage.html import os_vif import uuid from nova import objects as nova_objects from os_vif import execption as vif_exc from os_vif import objects as vif_objects from os_vif import vnic_types os_vif.initialize() instance_uuid = 'd7a730ca-3c28-49c3-8f26-4662b909fe8a' instance = nova_objects.Instance.get_by_uuid(instance_uuid) instance_info = vif_objects.InstanceInfo( uuid=instance.uuid, name=instance.name, project_id=instance.project_id) subnet = vif_objects.Subnet(cidr='192.168.1.0/24') subnets = vif_objects.SubnetList([subnet]) network = vif_objects.Network(label='tenantnet', subnets=subnets, multi_host=False, should_provide_vlan=False, should_provide_bridge=False) vif_uuid = uuid.uuid4() vif = vif_objects.VIFVHostUser(id=vif_uuid,
import sys import os_vif from os_vif import objects from oslo_config import cfg from oslo_log import log as logging from nova import exception from nova.network import model LOG = logging.getLogger(__name__) CONF = cfg.CONF # Ensure os-vif objects are registered and plugins loaded os_vif.initialize() def _get_vif_name(vif): """Get a VIF device name :param vif: the nova.nework.model.VIF instance Get a string suitable for use as a host OS network device name :returns: a device name """ if vif.get('devname', None) is not None: return vif['devname']
def __init__(self): os_vif.initialize()
def setUp(self): super(TestCase, self).setUp() args = [] config.init(args=args) os_vif.initialize()