def __init__(self): super(CNIDaemonServiceManager, self).__init__() # TODO(dulek): Use cotyledon.oslo_config_glue to support conf reload. # TODO(vikasc): Should be done using dynamically loadable OVO types # plugin. objects.register_locally_defined_vifs() os_vif.initialize() clients.setup_kubernetes_client() self.manager = multiprocessing.Manager() registry = self.manager.dict() # For Watcher->Server communication. healthy = multiprocessing.Value(c_bool, True) self.add(CNIDaemonWatcherService, workers=1, args=( registry, healthy, )) self.add(CNIDaemonServerService, workers=1, args=( registry, healthy, )) self.add(CNIDaemonHealthServerService, workers=1, args=(healthy, )) self.register_hooks(on_terminate=self.terminate)
def __init__(self): super(KuryrK8sService, self).__init__() objects.register_locally_defined_vifs() pipeline = h_pipeline.ControllerPipeline(self.tg) self.watcher = watcher.Watcher(pipeline, self.tg) # TODO(ivc): pluggable resource/handler registration for resource in ["pods", "services", "endpoints"]: self.watcher.add("%s/%s" % (constants.K8S_API_BASE, resource)) pipeline.register(h_vif.VIFHandler()) pipeline.register(h_lbaas.LBaaSSpecHandler())
def __init__(self): super(KuryrK8sService, self).__init__() objects.register_locally_defined_vifs() pipeline = h_pipeline.ControllerPipeline(self.tg) self.watcher = watcher.Watcher(pipeline, self.tg) self.health_manager = health.HealthServer() handlers = _load_kuryr_ctrlr_handlers() for handler in handlers: self.watcher.add(handler.get_watch_path()) pipeline.register(handler)
def __init__(self): # NOTE(mdulko): Default shutdown timeout is 60 seconds and K8s won't # wait more by default anyway. super(CNIDaemonServiceManager, self).__init__() self._server_service = None # TODO(dulek): Use cotyledon.oslo_config_glue to support conf reload. # TODO(vikasc): Should be done using dynamically loadable OVO types # plugin. objects.register_locally_defined_vifs() os_vif.initialize() clients.setup_kubernetes_client() if CONF.sriov.enable_pod_resource_service: clients.setup_pod_resources_client() self.manager = multiprocessing.Manager() registry = self.manager.dict() # For Watcher->Server communication. healthy = multiprocessing.Value(c_bool, True) metrics = self.manager.Queue() self.add(watcher_service.KuryrPortWatcherService, workers=1, args=( registry, healthy, )) self.add(watcher_service.PodWatcherService, workers=1, args=( registry, healthy, )) self._server_service = self.add(CNIDaemonServerService, workers=1, args=( registry, healthy, metrics, )) self.add(CNIDaemonHealthServerService, workers=1, args=(healthy, )) self.add(CNIDaemonExporterService, workers=1, args=(metrics, )) def shutdown_hook(service_id, worker_id, exit_code): LOG.critical(f'Child Service {service_id} had exited with code ' f'{exit_code}, stopping kuryr-daemon') self.shutdown() self.register_hooks(on_terminate=self.terminate, on_dead_worker=shutdown_hook)
def run(): # REVISIT(ivc): current CNI implementation provided by this package is # experimental and its primary purpose is to enable development of other # components (e.g. functional tests, service/LBaaSv2 support) cni_conf = utils.CNIConfig(jsonutils.load(sys.stdin)) args = ['--config-file', cni_conf.kuryr_conf] try: if cni_conf.debug: args.append('-d') except AttributeError: pass config.init(args) config.setup_logging() # Initialize o.vo registry. k_objects.register_locally_defined_vifs() os_vif.initialize() if CONF.cni_daemon.daemon_enabled: runner = cni_api.CNIDaemonizedRunner() else: # TODO(dulek): Switch that to versionutils.deprecation_warning once # bug 1754087 is fixed. versionutils.report_deprecated_feature( LOG, 'Deploying kuryr-kubernetes without kuryr-daemon service is ' 'deprecated since Rocky release and may be removed in future ' 'releases.') runner = cni_api.CNIStandaloneRunner(k8s_cni.K8sCNIPlugin()) LOG.info("Using '%s' ", runner.__class__.__name__) def _timeout(signum, frame): runner._write_dict(sys.stdout, { 'msg': 'timeout', 'code': k_const.CNI_TIMEOUT_CODE, }) LOG.debug('timed out') sys.exit(1) signal.signal(signal.SIGALRM, _timeout) signal.alarm(_CNI_TIMEOUT) status = runner.run(os.environ, cni_conf, sys.stdout) LOG.debug("Exiting with status %s", status) if status: sys.exit(status)
def __init__(self): super(KuryrK8sService, self).__init__() periodic_task.PeriodicTasks.__init__(self, CONF) objects.register_locally_defined_vifs() pipeline = h_pipeline.ControllerPipeline(self.tg) self.watcher = watcher.Watcher(pipeline, self.tg, exit_on_stop=True) self.health_manager = health.HealthServer() self.current_leader = None self.node_name = utils.get_node_name() handlers = _load_kuryr_ctrlr_handlers() for handler in handlers: self.watcher.add(handler.get_watch_path()) pipeline.register(handler) self.pool_driver = drivers.VIFPoolDriver.get_instance( specific_driver='multi_pool') self.pool_driver.set_vif_driver()
def main(): opt = cfg.SubCommandOpt( 'category', title='command', description='kuryr-k8s-status command or category to execute', handler=add_parsers) conf = cfg.ConfigOpts() conf.register_cli_opt(opt) conf(sys.argv[1:]) os_vif.initialize() objects.register_locally_defined_vifs() try: return conf.category.action_fn() except Exception: print('Error:\n%s' % traceback.format_exc()) # This is 255 so it's not confused with the upgrade check exit codes. return 255
def run(): if six.PY3: d = jsonutils.load(sys.stdin.buffer) else: d = jsonutils.load(sys.stdin) cni_conf = utils.CNIConfig(d) args = ['--config-file', cni_conf.kuryr_conf] try: if cni_conf.debug: args.append('-d') except AttributeError: pass config.init(args) config.setup_logging() # Initialize o.vo registry. k_objects.register_locally_defined_vifs() os_vif.initialize() if CONF.cni_daemon.daemon_enabled: runner = cni_api.CNIDaemonizedRunner() else: LOG.warning('Deploying kuryr-kubernetes without kuryr-daemon service', 'R') runner = cni_api.CNIStandaloneRunner(k8s_cni.K8sCNIPlugin()) LOG.info("Using '%s' ", runner.__class__.__name__) def _timeout(signum, frame): runner._write_dict(sys.stdout, { 'msg': 'timeout', 'code': k_const.CNI_TIMEOUT_CODE, }) LOG.debug('timed out') sys.exit(1) signal.signal(signal.SIGALRM, _timeout) signal.alarm(_CNI_TIMEOUT) status = runner.run(os.environ, cni_conf, sys.stdout) LOG.debug("Exiting with status %s", status) if status: sys.exit(status)
def run(): # REVISIT(ivc): current CNI implementation provided by this package is # experimental and its primary purpose is to enable development of other # components (e.g. functional tests, service/LBaaSv2 support) cni_conf = utils.CNIConfig(jsonutils.load(sys.stdin)) args = ['--config-file', cni_conf.kuryr_conf] try: if cni_conf.debug: args.append('-d') except AttributeError: pass config.init(args) config.setup_logging() # Initialize o.vo registry. k_objects.register_locally_defined_vifs() os_vif.initialize() if CONF.cni_daemon.daemon_enabled: runner = cni_api.CNIDaemonizedRunner() else: runner = cni_api.CNIStandaloneRunner(K8sCNIPlugin()) LOG.info("Using '%s' ", runner.__class__.__name__) def _timeout(signum, frame): runner._write_dict(sys.stdout, { 'msg': 'timeout', 'code': k_const.CNI_TIMEOUT_CODE, }) LOG.debug('timed out') sys.exit(1) signal.signal(signal.SIGALRM, _timeout) signal.alarm(_CNI_TIMEOUT) status = runner.run(os.environ, cni_conf, sys.stdout) LOG.debug("Exiting with status %s", status) if status: sys.exit(status)
def run(): if six.PY3: d = jsonutils.load(sys.stdin.buffer) else: d = jsonutils.load(sys.stdin) cni_conf = utils.CNIConfig(d) args = (['--config-file', cni_conf.kuryr_conf] if 'kuryr_conf' in d else []) try: if cni_conf.debug: args.append('-d') except AttributeError: pass config.init(args) config.setup_logging() # Initialize o.vo registry. k_objects.register_locally_defined_vifs() os_vif.initialize() runner = cni_api.CNIDaemonizedRunner() def _timeout(signum, frame): runner._write_dict(sys.stdout, { 'msg': 'timeout', 'code': k_const.CNI_TIMEOUT_CODE, }) LOG.debug('timed out') sys.exit(1) signal.signal(signal.SIGALRM, _timeout) signal.alarm(_CNI_TIMEOUT) status = runner.run(os.environ, cni_conf, sys.stdout) LOG.debug("Exiting with status %s", status) if status: sys.exit(status)
def run(): # REVISIT(ivc): current CNI implementation provided by this package is # experimental and its primary purpose is to enable development of other # components (e.g. functional tests, service/LBaaSv2 support) # TODO(vikasc): Should be done using dynamically loadable OVO types plugin. objects.register_locally_defined_vifs() runner = cni_api.CNIRunner(K8sCNIPlugin()) def _timeout(signum, frame): runner._write_dict(sys.stdout, { 'msg': 'timeout', 'code': k_const.CNI_TIMEOUT_CODE, }) LOG.debug('timed out') sys.exit(1) signal.signal(signal.SIGALRM, _timeout) signal.alarm(_CNI_TIMEOUT) status = runner.run(os.environ, sys.stdin, sys.stdout) LOG.debug("Exiting with status %s", status) if status: sys.exit(status)