def test__signal_handlers_set(self, signal_handlers_set_mock): callables = set() l1 = service.ProcessLauncher(self.conf) callables.add(l1._handle_signal) self.assertEqual(1, len(service.ProcessLauncher._signal_handlers_set)) l2 = service.ProcessLauncher(self.conf) callables.add(l2._handle_signal) self.assertEqual(2, len(service.ProcessLauncher._signal_handlers_set)) self.assertEqual(callables, service.ProcessLauncher._signal_handlers_set)
def test_double_sighup(self, pipe_mock, green_pipe_mock, handle_signal_mock, start_child_mock): # Test that issuing two SIGHUPs in a row does not exit; then send a # TERM that does cause an exit. pipe_mock.return_value = [None, None] launcher = service.ProcessLauncher(self.conf) serv = _Service() launcher.launch_service(serv, workers=0) def stager(): # -1: start state # 0: post-init # 1: first HUP sent # 2: second HUP sent # 3: TERM sent stager.stage += 1 if stager.stage < 3: launcher._handle_hup(1, mock.sentinel.frame) elif stager.stage == 3: launcher._handle_term(15, mock.sentinel.frame) else: self.fail("TERM did not kill launcher") stager.stage = -1 handle_signal_mock.side_effect = stager launcher.wait() self.assertEqual(3, stager.stage)
def serve_rpc(): plugin = manager.NeutronManager.get_plugin() # If 0 < rpc_workers then start_rpc_listeners would be called in a # subprocess and we cannot simply catch the NotImplementedError. It is # simpler to check this up front by testing whether the plugin supports # multiple RPC workers. if not plugin.rpc_workers_supported(): LOG.debug("Active plugin doesn't implement start_rpc_listeners") if 0 < cfg.CONF.rpc_workers: LOG.error(_LE("'rpc_workers = %d' ignored because " "start_rpc_listeners is not implemented."), cfg.CONF.rpc_workers) raise NotImplementedError() try: rpc = RpcWorker(plugin) if cfg.CONF.rpc_workers < 1: rpc.start() return rpc else: # dispose the whole pool before os.fork, otherwise there will # be shared DB connections in child processes which may cause # DB errors. session.dispose() launcher = common_service.ProcessLauncher(cfg.CONF, wait_interval=1.0) launcher.launch_service(rpc, workers=cfg.CONF.rpc_workers) return launcher except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Unrecoverable error: please check log for ' 'details.'))
def test_stop(self, signal_mock, alarm_mock): signal_mock.SIGTERM = 15 launcher = service.ProcessLauncher(self.conf) self.assertTrue(launcher.running) pid_nums = [22, 222] fakeServiceWrapper = service.ServiceWrapper(service.Service(), 1) launcher.children = { pid_nums[0]: fakeServiceWrapper, pid_nums[1]: fakeServiceWrapper } with mock.patch('oslo_service.service.os.kill') as mock_kill: with mock.patch.object(launcher, '_wait_child') as _wait_child: def fake_wait_child(): pid = pid_nums.pop() return launcher.children.pop(pid) _wait_child.side_effect = fake_wait_child with mock.patch('oslo_service.service.Service.stop') as \ mock_service_stop: mock_service_stop.side_effect = lambda: None launcher.stop() self.assertFalse(launcher.running) self.assertFalse(launcher.children) self.assertEqual([ mock.call(222, signal_mock.SIGTERM), mock.call(22, signal_mock.SIGTERM) ], mock_kill.mock_calls) mock_service_stop.assert_called_once_with()
def main(): flocx_market_service.prepare_service(sys.argv) # Build and start the WSGI app launcher = service.ProcessLauncher(CONF, restart_method='mutate') server = wsgi_service.WSGIService('flocx_market_api') launcher.launch_service(server, workers=server.workers) launcher.wait()
def start(self, application, port, host='0.0.0.0', workers=0): """Run a WSGI server with the given application.""" #设置服务要绑定的host,port self._host = host self._port = port backlog = CONF.backlog #创建监听socket self._socket = self._get_socket(self._host, self._port, backlog=backlog) if workers < 1: #仅运行一个进程 # For the case where only one process is required. self._server = self.pool.spawn(self._run, application, self._socket) #知会systemd,本进程已完成启动 systemd.notify_once() else: # Minimize the cost of checking for child exit by extending the # wait interval past the default of 0.01s. #多进程方式启动 self._launcher = common_service.ProcessLauncher(CONF, wait_interval=1.0) self._server = WorkerService(self, application) self._launcher.launch_service(self._server, workers=workers)
def serve(api_service, conf, workers=1): global _launcher if _launcher: raise RuntimeError(_('serve() can only be called once')) _launcher = service.ProcessLauncher(conf, restart_method='mutate') _launcher.launch_service(api_service, workers=workers)
def launch_api(): launcher = service.ProcessLauncher(cfg.CONF) server = api_service.WSGIService('mistral_api') launcher.launch_service(server, workers=server.workers) launcher.wait()
def main(): logging.register_options(CONF) CONF(sys.argv[1:], project='credsmgr', version=".1") logging.setup(CONF, "credsmgr") service_instance = service.WSGIService('credsmgr_api') service_launcher = oslo_service.ProcessLauncher(CONF) service_launcher.launch_service(service_instance, workers=service_instance.workers) service_launcher.wait()
def test_check_service_base_fails(self, pipe_mock, green_pipe_mock, handle_signal_mock, start_child_mock): pipe_mock.return_value = [None, None] launcher = service.ProcessLauncher(self.conf) class FooService(object): def __init__(self): pass serv = FooService() self.assertRaises(TypeError, launcher.launch_service, serv, 0)
def main(): config.setup_logging(CONF) config.init(sys.argv) process_launcher = service.ProcessLauncher(CONF) process_launcher.launch_service( conductor.Service(threads=CONF['service:conductor'].threads), workers=CONF['service:conductor'].workers) process_launcher.wait()
def launch_process(server, workers=1): try: global SERVER_PROCESS_MANAGER if not SERVER_PROCESS_MANAGER: SERVER_PROCESS_MANAGER = service.ProcessLauncher(CONF) SERVER_PROCESS_MANAGER.launch_service(server, workers=workers) except Exception as e: sys.stderr.write("ERROR: %s\n" % e) sys.exit(1)
def start(self): self.server = wsgi.Server(CONF, "m19k", self._app, host = self.host, port = self.port, use_ssl = self.use_ssl) launcher = service.ProcessLauncher(CONF) launcher.launch_service(self.server, workers = self.workers) LOG.debug("launch service (%s:%s)." % (self.host, self.port)) launcher.wait()
def start_plugin_workers(): launchers = [] # NOTE(twilson) get_service_plugins also returns the core plugin for plugin in manager.NeutronManager.get_unique_service_plugins(): # TODO(twilson) Instead of defaulting here, come up with a good way to # share a common get_workers default between NeutronPluginBaseV2 and # ServicePluginBase for plugin_worker in getattr(plugin, 'get_workers', tuple)(): launcher = common_service.ProcessLauncher(cfg.CONF) launcher.launch_service(plugin_worker) launchers.append(launcher) return launchers
def main(): config.init(sys.argv[1:]) worker = LogAgentWorker() launcher = service.ProcessLauncher( cfg.CONF, wait_interval=1.0) launcher.launch_service(worker, workers=cfg.CONF.rpc_workers) # Start everything. LOG.info('worker initialized successfully, now running.') pool = eventlet.GreenPool() pool.spawn(launcher.wait) pool.waitall()
def main(): pdb.set_trace() #conf.register_opts(_conf_opt) conf.log_file = 'osp.log' conf.log_dir = '/var/log/' conf.debug = True logging.register_options(conf) logging.setup(conf, 'osp') launcher = service.ProcessLauncher(conf) launcher.launch_service(Service(topic='osprofiler', host='127.0.0.1')) LOG.info("launching service") launcher.wait()
def create_process(self, topic=None, host=None): metrics_task_server = service. \ MetricsService.create(binary='delfin-task', topic=topic, host=host, manager='delfin.' 'task_manager.' 'subprocess_manager.' 'SubprocessManager', coordination=False) launcher = oslo_ser.ProcessLauncher(CONF) launcher.launch_service(metrics_task_server, workers=1) return launcher
def serve_rpc(self): """Launches configured # of workers per loaded plugin.""" if cfg.CONF.QUARK_ASYNC.rpc_workers < 1: cfg.CONF.set_override('rpc_workers', 1, "QUARK_ASYNC") try: rpc = service.RpcWorker(self.plugins) launcher = common_service.ProcessLauncher(CONF, wait_interval=1.0) launcher.launch_service(rpc, workers=CONF.QUARK_ASYNC.rpc_workers) return launcher except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Unrecoverable error: please check log for ' 'details.'))
def _start_workers(workers, neutron_api=None): process_workers = [ plugin_worker for plugin_worker in workers if plugin_worker.worker_process_count > 0 ] try: if process_workers: # Get eventual already existing instance from WSGI app worker_launcher = None if neutron_api: worker_launcher = neutron_api.wsgi_app.process_launcher if worker_launcher is None: worker_launcher = common_service.ProcessLauncher( cfg.CONF, wait_interval=1.0, restart_method='mutate' ) # add extra process worker and spawn there all workers with # worker_process_count == 0 thread_workers = [ plugin_worker for plugin_worker in workers if plugin_worker.worker_process_count < 1 ] if thread_workers: process_workers.append( AllServicesNeutronWorker(thread_workers) ) # dispose the whole pool before os.fork, otherwise there will # be shared DB connections in child processes which may cause # DB errors. session.get_context_manager().dispose_pool() for worker in process_workers: worker_launcher.launch_service(worker, worker.worker_process_count) else: worker_launcher = common_service.ServiceLauncher(cfg.CONF) for worker in workers: worker_launcher.launch_service(worker) return worker_launcher except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Unrecoverable error: please check log for ' 'details.')
def run(port_queue=None): eventlet.patcher.monkey_patch() launcher = service.ProcessLauncher(cfg.CONF) def hi_app(environ, start_response): start_response('200 OK', [('Content-Type', 'application/json')]) yield 'hi' server = ServerWrapper(Server(hi_app), workers=3) server.launch_with(launcher) port = server.server.socket.getsockname()[1] port_queue.put(port) sys.stdout.flush() launcher.wait()
def main(): config.setup_logging(CONF) config.init(sys.argv) process_launcher = service.ProcessLauncher(CONF) process_launcher.launch_service( api.Service( CONF, 'API', Loader(CONF).load_app('kosmos'), host=CONF['service:api'].bind_host, port=CONF['service:api'].bind_port ), workers=CONF['service:api'].workers) process_launcher.wait()
def _launch(self, application, workers=0): service = WorkerService(self, application) if workers < 1: # The API service should run in the current process. self._server = service service.start() systemd.notify_once() else: # dispose the whole pool before os.fork, otherwise there will # be shared DB connections in child processes which may cause # DB errors. api.dispose() # The API service runs in a number of child processes. # Minimize the cost of checking for child exit by extending the # wait interval past the default of 0.01s. self._server = common_service.ProcessLauncher(cfg.CONF, wait_interval=1.0) self._server.launch_service(service, workers=workers)
def serve(*servers): if max([server[1].workers for server in servers]) > 1: # TODO(arosen) - need to provide way to communicate with DSE services launcher = service.ProcessLauncher(cfg.CONF, restart_method='mutate') else: launcher = service.ServiceLauncher(cfg.CONF, restart_method='mutate') for name, server in servers: try: server.launch_with(launcher) except socket.error: LOG.exception(_('Failed to start the %s server'), name) raise try: launcher.wait() except KeyboardInterrupt: LOG.info("Congress server stopped by interrupt.")
def test_parent_process_reload_config( self, is_sighup_and_daemon_mock, reload_config_files_mock, notify_once_mock, log_opt_values_mock, handle_signal_mock, respawn_children_mock, stop_mock, kill_mock, alarm_mock): is_sighup_and_daemon_mock.return_value = True respawn_children_mock.side_effect = [ None, eventlet.greenlet.GreenletExit() ] launcher = service.ProcessLauncher(self.conf) launcher.sigcaught = 1 launcher.children = {} wrap_mock = mock.Mock() launcher.children[222] = wrap_mock launcher.wait() reload_config_files_mock.assert_called_once_with() wrap_mock.service.reset.assert_called_once_with()
def serve(*servers): if max([server[1].workers for server in servers]) > 1: launcher = service.ProcessLauncher(CONF) else: launcher = service.ServiceLauncher(CONF) for name, server in servers: try: server.launch_with(launcher) except socket.error: logging.exception("Failed to start the %(name)s server" % {"name": name}) raise # notify calling process we are ready to serve systemd.notify_once() for name, server in servers: launcher.wait()
def _launch(self, application, workers=0): service = WorkerService(self, application, self.disable_ssl, workers) if workers < 1: # The API service should run in the current process. self._server = service # Dump the initial option values cfg.CONF.log_opt_values(LOG, logging.DEBUG) service.start() systemd.notify_once() else: # dispose the whole pool before os.fork, otherwise there will # be shared DB connections in child processes which may cause # DB errors. api.context_manager.dispose_pool() # The API service runs in a number of child processes. # Minimize the cost of checking for child exit by extending the # wait interval past the default of 0.01s. self._server = common_service.ProcessLauncher(cfg.CONF, wait_interval=1.0) self._server.launch_service(service, workers=service.worker_process_count)
def _start_workers(workers): process_workers = [ plugin_worker for plugin_worker in workers if plugin_worker.worker_process_count > 0 ] try: if process_workers: worker_launcher = common_service.ProcessLauncher( cfg.CONF, wait_interval=1.0 ) # add extra process worker and spawn there all workers with # worker_process_count == 0 thread_workers = [ plugin_worker for plugin_worker in workers if plugin_worker.worker_process_count < 1 ] if thread_workers: process_workers.append( AllServicesNeutronWorker(thread_workers) ) # dispose the whole pool before os.fork, otherwise there will # be shared DB connections in child processes which may cause # DB errors. session.context_manager.dispose_pool() for worker in process_workers: worker_launcher.launch_service(worker, worker.worker_process_count) else: worker_launcher = common_service.ServiceLauncher(cfg.CONF) for worker in workers: worker_launcher.launch_service(worker) return worker_launcher except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Unrecoverable error: please check log for ' 'details.'))
def run(): # ProcessLauncher uses cfg.CONF.log_opt_values() # and cfg.CONF.log_opt_values() uses config_file option. # We need to call CONF() to register the --config-file option cfg.CONF() eventlet.patcher.monkey_patch() launcher = service.ProcessLauncher(cfg.CONF) def hi_app(environ, start_response): start_response('200 OK', [('Content-Type', 'application/json')]) yield 'hi' server = ServerWrapper(Server(hi_app), workers=3) server.launch_with(launcher) print('%s' % server.server.socket.getsockname()[1]) sys.stdout.flush() launcher.wait()
def serve(*servers): logging.warning(_('Running keystone via eventlet is deprecated as of Kilo ' 'in favor of running in a WSGI server (e.g. mod_wsgi). ' 'Support for keystone under eventlet will be removed in ' 'the "M"-Release.')) if max([server[1].workers for server in servers]) > 1: launcher = service.ProcessLauncher(CONF) else: launcher = service.ServiceLauncher(CONF) for name, server in servers: try: server.launch_with(launcher) except socket.error: logging.exception(_('Failed to start the %(name)s server') % { 'name': name}) raise # notify calling process we are ready to serve systemd.notify_once() for name, server in servers: launcher.wait()
def process_launcher(): return service.ProcessLauncher(CONF, restart_method='mutate')