def main(): magnum_service.prepare_service(sys.argv) gmr.TextGuruMeditation.setup_autorun(version) LOG.info('Starting server in PID %s', os.getpid()) LOG.debug("Configuration:") CONF.log_opt_values(LOG, logging.DEBUG) conductor_id = short_id.generate_id() endpoints = [ indirection_api.Handler(), cluster_conductor.Handler(), conductor_listener.Handler(), ca_conductor.Handler(), federation_conductor.Handler(), ] server = rpc_service.Service.create(CONF.conductor.topic, conductor_id, endpoints, binary='magnum-conductor') workers = CONF.conductor.workers if not workers: workers = processutils.get_worker_count() launcher = service.launch(CONF, server, workers=workers) # NOTE(mnaser): We create the periodic tasks here so that they # can be attached to the main process and not # duplicated in all the children if multiple # workers are being used. server.create_periodic_tasks() server.start() launcher.wait()
def test_workers_set_default(self, mock_loader): self.override_config('osapi_volume_listen_port', CONF.test_service_listen_port) test_service = service.WSGIService("osapi_volume") self.assertEqual(processutils.get_worker_count(), test_service.workers) self.assertTrue(mock_loader.called)
def launch_engine(setup_logging=True): if setup_logging: logging.register_options(cfg.CONF) cfg.CONF(project='heat', prog='heat-engine', version=version.version_info.version_string()) if setup_logging: logging.setup(cfg.CONF, 'heat-engine') logging.set_defaults() messaging.setup() config.startup_sanity_check() mgr = None try: mgr = template._get_template_extension_manager() except template.TemplatePluginNotRegistered as ex: LOG.critical("%s", ex) if not mgr or not mgr.names(): sys.exit("ERROR: No template format plugins registered") from heat.engine import service as engine # noqa profiler.setup('heat-engine', cfg.CONF.host) gmr.TextGuruMeditation.setup_autorun(version) srv = engine.EngineService(cfg.CONF.host, rpc_api.ENGINE_TOPIC) workers = cfg.CONF.num_engine_workers if not workers: workers = max(4, processutils.get_worker_count()) launcher = service.launch(cfg.CONF, srv, workers=workers, restart_method='mutate') return launcher
def start_wsgi(self): workers = self.conf.workers # childs == num of cores if workers == 0: childs_num = processutils.get_worker_count() # launch only one GreenPool without childs elif workers == 1: # Useful for profiling, test, debug etc. self.pool = eventlet.GreenPool(size=self.threads) self.pool.spawn_n(self._single_run, self.application, self.sock) return # childs equal specified value of workers else: childs_num = workers LOG.info("Starting %d workers", workers) signal.signal(signal.SIGTERM, self.kill_children) signal.signal(signal.SIGINT, self.kill_children) signal.signal(signal.SIGHUP, self.hup) rfd, self.writepipe = os.pipe() self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') while len(self.children) < childs_num: self.run_child()
def __init__(self, name, loader=None, use_ssl=False, max_url_len=None): """Initialize, but do not start the WSGI server. :param name: The name of the WSGI server given to the loader. :param loader: Loads the WSGI application using the given name. :returns: None """ self.name = name self.manager = self._get_manager() self.loader = loader or wsgi.Loader() self.app = self.loader.load_app(name) # inherit all compute_api worker counts from osapi_compute if name.startswith("openstack_compute_api"): wname = "osapi_compute" else: wname = name self.host = getattr(CONF, "%s_listen" % name, "0.0.0.0") self.port = getattr(CONF, "%s_listen_port" % name, 0) self.workers = getattr(CONF, "%s_workers" % wname, None) or processutils.get_worker_count() if self.workers and self.workers < 1: worker_name = "%s_workers" % name msg = _("%(worker_name)s value of %(workers)s is invalid, " "must be greater than 0") % { "worker_name": worker_name, "workers": str(self.workers), } raise exception.InvalidInput(msg) self.use_ssl = use_ssl self.server = wsgi.Server( name, self.app, host=self.host, port=self.port, use_ssl=self.use_ssl, max_url_len=max_url_len ) # Pull back actual port used self.port = self.server.port self.backdoor_port = None
def build_all(self, deps=None): """Function that browse containers dependencies and build them. :params deps: Dictionary defining the container images dependencies. """ if deps is None: deps = self.deps if isinstance(deps, (list,)): # Only a list of images can be multi-processed because they # are the last layer to build. Otherwise we could have issues # to build multiple times the same layer. # Number of workers will be based on CPU count with a min 2, # max 8. Concurrency in Buildah isn't that great so it's not # useful to go above 8. workers = min(8, max(2, processutils.get_worker_count())) with futures.ThreadPoolExecutor(max_workers=workers) as executor: future_to_build = {executor.submit(self.build_all, container): container for container in deps} futures.wait(future_to_build, timeout=self.build_timeout, return_when=futures.ALL_COMPLETED) elif isinstance(deps, (dict,)): for container in deps: self._generate_container(container) self.build_all(deps.get(container)) elif isinstance(deps, six.string_types): self._generate_container(deps)
def main(): service.prepare_service(sys.argv) gmr.TextGuruMeditation.setup_autorun(version) # Enable object backporting via the conductor base.MagnumObject.indirection_api = base.MagnumObjectIndirectionAPI() app = api_app.load_app() # Setup OSprofiler for WSGI service profiler.setup('magnum-api', CONF.host) # SSL configuration use_ssl = CONF.api.enabled_ssl # Create the WSGI server and start it host, port = CONF.api.host, CONF.api.port LOG.info(_LI('Starting server in PID %s'), os.getpid()) LOG.debug("Configuration:") CONF.log_opt_values(LOG, logging.DEBUG) LOG.info(_LI('Serving on %(proto)s://%(host)s:%(port)s'), dict(proto="https" if use_ssl else "http", host=host, port=port)) workers = CONF.api.workers if not workers: workers = processutils.get_worker_count() LOG.info(_LI('Server will handle each request in a new process up to' ' %s concurrent processes'), workers) serving.run_simple(host, port, app, processes=workers, ssl_context=_get_ssl_configs(use_ssl))
def main(): logging.register_options(cfg.CONF) cfg.CONF(project='heat', prog='heat-engine', version=version.version_info.version_string()) logging.setup(cfg.CONF, 'heat-engine') logging.set_defaults() messaging.setup() config.startup_sanity_check() mgr = None try: mgr = template._get_template_extension_manager() except template.TemplatePluginNotRegistered as ex: LOG.critical(_LC("%s"), ex) if not mgr or not mgr.names(): sys.exit("ERROR: No template format plugins registered") from heat.engine import service as engine # noqa profiler.setup('heat-engine', cfg.CONF.host) gmr.TextGuruMeditation.setup_autorun(version) srv = engine.EngineService(cfg.CONF.host, rpc_api.ENGINE_TOPIC) workers = cfg.CONF.num_engine_workers if not workers: workers = max(4, processutils.get_worker_count()) launcher = service.launch(cfg.CONF, srv, workers=workers) if cfg.CONF.enable_cloud_watch_lite: # We create the periodic tasks here, which mean they are created # only in the parent process when num_engine_workers>1 is specified srv.create_periodic_tasks() launcher.wait()
def __init__(self, name, loader=None): """Initialize, but do not start the WSGI server. :param name: The name of the WSGI server given to the loader. :param loader: Loads the WSGI application using the given name. :returns: None """ self.name = name self.manager = self._get_manager() self.loader = loader or wsgi_common.Loader() # name = osapi_volume,没有这个name就等着报下面的错吧 # LookupError: No section 'main'(prefixed by 'app' or 'application' or 'composite' or 'composit' or # 'pipeline' or 'filter-app') found in config self.app = self.loader.load_app(name) self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0") self.port = getattr(CONF, '%s_listen_port' % name, 0) self.workers = (getattr(CONF, '%s_workers' % name, None) or processutils.get_worker_count()) if self.workers and self.workers < 1: worker_name = '%s_workers' % name msg = (_("%(worker_name)s value of %(workers)d is invalid, " "must be greater than 0.") % {'worker_name': worker_name, 'workers': self.workers}) raise exception.InvalidInput(msg) setup_profiler(name, self.host) self.server = wsgi.Server(name, self.app, host=self.host, port=self.port)
def main(CONF): from transformer.common import wsgi conf_file = CONF.find_file(CONF.api_paste_config) workers = CONF.api_workers or processutils.get_worker_count() launcher = wsgi.launch('transformer', CONF.bind_port, conf_file, host=CONF.bind_host, workers=workers) launcher.wait()
def __init__(self, name, loader=None): """Initialize, but do not start the WSGI server. :param name: The name of the WSGI server given to the loader. :param loader: Loads the WSGI application using the given name. :returns: None """ self.name = name self.manager = self._get_manager() self.loader = loader or wsgi.Loader(CONF) self.app = self.loader.load_app(name) self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0") self.port = getattr(CONF, '%s_listen_port' % name, 0) self.workers = (getattr(CONF, '%s_workers' % name, None) or processutils.get_worker_count()) if self.workers and self.workers < 1: worker_name = '%s_workers' % name msg = (_("%(worker_name)s value of %(workers)d is invalid, " "must be greater than 0.") % {'worker_name': worker_name, 'workers': self.workers}) raise exception.InvalidInput(msg) setup_profiler(name, self.host) self.server = wsgi.Server(CONF, name, self.app, host=self.host, port=self.port)
def _get_workers(worker_type_config_opt): # Get the value from config, if the config value is None (not set), return # the number of cpus with a minimum of 2. worker_count = CONF.eventlet_server.get(worker_type_config_opt) if not worker_count: worker_count = max(2, processutils.get_worker_count()) return worker_count
def __init__(self, name, loader=None, use_ssl=False, max_url_len=None): """Initialize, but do not start the WSGI server. :param name: The name of the WSGI server given to the loader. :param loader: Loads the WSGI application using the given name. :returns: None """ self.name = name self.manager = self._get_manager() self.loader = loader or wsgi.Loader() self.app = self.loader.load_app(name) #self.app = APIRouter() self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0") self.port = getattr(CONF, '%s_listen_port' % name, 0) self.workers = (getattr(CONF, '%s_workers' % name, None) or processutils.get_worker_count()) if self.workers and self.workers < 1: worker_name = '%s_workers' % name msg = (_("%(worker_name)s value of %(workers)s is invalid, " "must be greater than 0") % {'worker_name': worker_name, 'workers': str(self.workers)}) raise exception.InvalidInput(msg) self.use_ssl = use_ssl self.server = wsgi.Server(name, self.app, host=self.host, port=self.port, use_ssl=self.use_ssl, max_url_len=max_url_len) # Pull back actual port used self.port = self.server.port self.backdoor_port = None
def test_workers_set_zero_user_setting(self, wsgi_server, mock_load_app, mock_find_config): self.override_config('osapi_karbor_workers', 0) test_service = service.WSGIService("osapi_karbor") # If a value less than 1 is used, defaults to number of procs available self.assertEqual(processutils.get_worker_count(), test_service.workers)
def test_workers_set_zero_user_setting(self, mock_loader): self.override_config('osapi_volume_workers', 0) test_service = service.WSGIService("osapi_volume") # If a value less than 1 is used, defaults to number of procs # available self.assertEqual(processutils.get_worker_count(), test_service.workers) self.assertTrue(mock_loader.called)
def start_server(conf): from trove.common import wsgi conf_file = conf.find_file(conf.api_paste_config) workers = conf.trove_api_workers or processutils.get_worker_count() launcher = wsgi.launch('trove', conf.bind_port or 8779, conf_file, workers=workers) start_fake_taskmanager(conf) launcher.wait()
def main(CONF): from trove.common import wsgi profile.setup_profiler('api', CONF.host) conf_file = CONF.find_file(CONF.api_paste_config) workers = CONF.trove_api_workers or processutils.get_worker_count() launcher = wsgi.launch('trove', CONF.bind_port, conf_file, host=CONF.bind_host, workers=workers) launcher.wait()
def __init__(self, topic, endpoints, version): target = messaging.Target(topic=topic, server=utils.get_hostname(), version=version) self._server = rpc.get_server(target, endpoints) self._workers = (CONF.messaging_workers or processutils.get_worker_count())
def test_workers_set_default(self, mock_server): service_name = "ironic_api" test_service = wsgi_service.WSGIService(service_name) self.assertEqual(processutils.get_worker_count(), test_service.workers) mock_server.assert_called_once_with(CONF, service_name, test_service.app, host='0.0.0.0', port=6385, use_ssl=False)
def test_api_http(self, mock_prep, mock_app, mock_run, mock_base): api.main() app = mock_app.load_app.return_value mock_prep.assert_called_once_with(mock.ANY) mock_app.load_app.assert_called_once_with() workers = processutils.get_worker_count() mock_run.assert_called_once_with(base.CONF.api.host, base.CONF.api.port, app, processes=workers, ssl_context=None)
def main(conf): from trove.common.rpc import service as rpc_service from trove.common.rpc import version as rpc_version topic = conf.conductor_queue server = rpc_service.RpcService( manager=conf.conductor_manager, topic=topic, rpc_api_version=rpc_version.RPC_API_VERSION) workers = conf.trove_conductor_workers or processutils.get_worker_count() launcher = openstack_service.launch(conf, server, workers=workers) launcher.wait()
def main(): config.parse_args(sys.argv) logging.setup(CONF, "nova") utils.monkey_patch() objects.register_all() server = service.Service.create(binary='nova-conductor', topic=CONF.conductor.topic, manager=CONF.conductor.manager) workers = CONF.conductor.workers or processutils.get_worker_count() service.serve(server, workers=workers) service.wait()
def test_conductor(self, mock_prep, mock_rpc, mock_launch): conductor.main() server = mock_rpc.Service.create.return_value launcher = mock_launch.return_value mock_prep.assert_called_once_with(mock.ANY) mock_rpc.Service.create.assert_called_once_with( base.CONF.conductor.topic, mock.ANY, mock.ANY, binary='magnum-conductor') workers = processutils.get_worker_count() mock_launch.assert_called_once_with(base.CONF, server, workers=workers) launcher.wait.assert_called_once_with()
def main(): config.parse_args(sys.argv) logging.setup(CONF, "nova") utils.monkey_patch() objects.register_all() objects.Service.enable_min_version_cache() gmr.TextGuruMeditation.setup_autorun(version) server = service.Service.create(binary="nova-conductor", topic=CONF.conductor.topic, manager=CONF.conductor.manager) workers = CONF.conductor.workers or processutils.get_worker_count() service.serve(server, workers=workers) service.wait()
def main(): config.parse_args(sys.argv) logging.setup(CONF, "nova") objects.register_all() gmr_opts.set_defaults(CONF) objects.Service.enable_min_version_cache() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) server = service.Service.create(binary='nova-conductor', topic=rpcapi.RPC_TOPIC) workers = CONF.conductor.workers or processutils.get_worker_count() service.serve(server, workers=workers) service.wait()
def __init__(self, name): self._host = CONF.api_migration_listen self._port = CONF.api_migration_listen_port self._workers = (CONF.api_migration_workers or processutils.get_worker_count()) self._loader = wsgi.Loader(CONF) self._app = self._loader.load_app(name) self._server = wsgi.Server(CONF, name, self._app, host=self._host, port=self._port)
def main(): try: config.parse_args() logging.setup(CONF, 'murano') workers = CONF.engine.workers if not workers: workers = processutils.get_worker_count() launcher = service.launch(CONF, engine.EngineService(), workers=workers) launcher.wait() except RuntimeError as e: sys.stderr.write("ERROR: %s\n" % e) sys.exit(1)
def main(): config.parse_args(sys.argv) logging.setup(CONF, "patron") utils.monkey_patch() objects.register_all() gmr.TextGuruMeditation.setup_autorun(version) server = service.Service.create(binary='patron-verify', topic=CONF.verify.topic, manager=CONF.verify.manager) workers = CONF.verify.workers or processutils.get_worker_count() service.serve(server, workers=workers) service.wait()
def __init__(self, name, use_ssl=False): """Initialize, but do not start the WSGI server. :param name: The name of the WSGI server given to the loader. :param use_ssl: Wraps the socket in an SSL context if True. """ self.name = name self.app = app.VersionSelectorApplication() self.workers = (CONF.api.workers or processutils.get_worker_count()) self.server = wsgi.Server(CONF, name, self.app, host=CONF.api.host, port=CONF.api.port, use_ssl=use_ssl, logger_name=name)
def main(CONF): from trove.common import cfg from trove.common import notification from trove.common import wsgi from trove.instance import models as inst_models notification.DBaaSAPINotification.register_notify_callback( inst_models.persist_instance_fault) cfg.set_api_config_defaults() profile.setup_profiler('api', CONF.host) conf_file = CONF.find_file(CONF.api_paste_config) workers = CONF.trove_api_workers or processutils.get_worker_count() launcher = wsgi.launch('trove', CONF.bind_port, conf_file, host=CONF.bind_host, workers=workers) launcher.wait()
def test_api_workers_default(self): self.assertEqual(processutils.get_worker_count(), neutron_service._get_api_workers())
# Need to register global_opts from cinder.common import config # noqa from cinder.db import api as session from cinder import i18n i18n.enable_lazy() from cinder import objects from cinder import service from cinder import utils from cinder import version CONF = cfg.CONF backup_workers_opt = cfg.IntOpt( 'backup_workers', default=1, min=1, max=processutils.get_worker_count(), help='Number of backup processes to launch. Improves performance with ' 'concurrent backups.') CONF.register_opt(backup_workers_opt) LOG = None # NOTE(mriedem): The default backup driver uses swift and performs read/write # operations in a thread. swiftclient will log requests and responses at DEBUG # level, which can cause a thread switch and break the backup operation. So we # set a default log level of WARN for swiftclient to try and avoid this issue. _EXTRA_DEFAULT_LOG_LEVELS = ['swiftclient=WARN'] def _launch_backup_process(launcher, num_process): try:
def test_get_worker_count_cpu_count_not_implemented(self, mock_cpu_count): self.assertEqual(1, processutils.get_worker_count())
def test_workers_set_default(self, wsgi_server): test_service = service.WSGIService("osapi_volume") self.assertEqual(processutils.get_worker_count(), test_service.workers)
def test_openstack_compute_api_workers_set_default(self): test_service = service.WSGIService("openstack_compute_api_v2") self.assertEqual(test_service.workers, processutils.get_worker_count())
def test_get_worker_count(self, mock_cpu_count): self.assertEqual(8, processutils.get_worker_count())
def test_workers_set_default(self, mock_loader): test_service = service.WSGIService("osapi_volume") self.assertEqual(processutils.get_worker_count(), test_service.workers) self.assertTrue(mock_loader.called)
help=_('The backlog value that will be used when creating the ' 'TCP listener socket.')), cfg.IntOpt('tcp_keepidle', default=600, help=_('The value for the socket option TCP_KEEPIDLE. This is ' 'the time in seconds that the connection must be idle ' 'before TCP starts sending keepalive probes.')), cfg.StrOpt('ca_file', help=_('CA certificate file to use to verify ' 'connecting clients.')), cfg.StrOpt('cert_file', help=_('Certificate file to use when starting API ' 'server securely.')), cfg.StrOpt('key_file', help=_('Private key file to use when starting API ' 'server securely.')), ] eventlet_opts = [ cfg.IntOpt('workers', default=processutils.get_worker_count(), help=_('The number of child process workers that will be ' 'created to service requests. The default will be ' 'equal to the number of CPUs available.')), cfg.IntOpt('max_header_line', default=16384, help=_('Maximum line size of message headers to be accepted. ' 'max_header_line may need to be increased when using ' 'large tokens (typically those generated by the ' 'Keystone v3 API with big service catalogs')), cfg.BoolOpt('http_keepalive', default=True, help=_('If False, server will return the header ' '"Connection: close", ' 'If True, server will return "Connection: Keep-Alive" ' 'in its responses. In order to close the client socket ' 'connection explicitly after the response is sent and ' 'read successfully by the client, you simply have to '
def test_workers_set_zero_setting(self, wsgi_server): self.config(api_workers=0, group='api') test_service = service.WSGIService("ironic_api") self.assertEqual(processutils.get_worker_count(), test_service.workers)
def test_workers_default(self, launch, setup, parse_args): engine.main() launch.assert_called_once_with(mock.ANY, mock.ANY, workers=processutils.get_worker_count())
def _get_api_workers(): workers = cfg.CONF.api_workers if workers is None: workers = processutils.get_worker_count() return workers
def get_num_workers(): """Return the configured number of workers.""" if CONF.workers is None: # None implies the number of CPUs return processutils.get_worker_count() return CONF.workers
def test_workers_set_zero_user_setting(self, wsgi_server): self.override_config('osapi_volume_workers', 0) test_service = service.WSGIService("osapi_volume") # If a value less than 1 is used, defaults to number of procs available self.assertEqual(processutils.get_worker_count(), test_service.workers)
def test_workers_set_default(self): test_service = service.WSGIService("masakari_api") self.assertEqual(test_service.workers, processutils.get_worker_count())
def test_openstack_compute_api_workers_set_zero_user_setting(self): CONF.set_override('osapi_compute_workers', 0) test_service = service.WSGIService("openstack_compute_api_v2") # If a value less than 1 is used, defaults to number of procs available self.assertEqual(test_service.workers, processutils.get_worker_count())
'the time in seconds that the connection must be idle ' 'before TCP starts sending keepalive probes.')), cfg.StrOpt('ca_file', help=_('CA certificate file to use to verify ' 'connecting clients.')), cfg.StrOpt('cert_file', help=_('Certificate file to use when starting API ' 'server securely.')), cfg.StrOpt('key_file', help=_('Private key file to use when starting API ' 'server securely.')), ] eventlet_opts = [ cfg.IntOpt('workers', default=processutils.get_worker_count(), help=_('The number of child process workers that will be ' 'created to service requests. The default will be ' 'equal to the number of CPUs available.')), cfg.IntOpt('max_header_line', default=16384, help=_('Maximum line size of message headers to be accepted. ' 'max_header_line may need to be increased when using ' 'large tokens (typically those generated by the ' 'Keystone v3 API with big service catalogs')), cfg.BoolOpt('http_keepalive', default=True, help=_('If False, server will return the header ' '"Connection: close", ' 'If True, server will return "Connection: Keep-Alive" ' 'in its responses. In order to close the client socket '
def test_workers_zero_setting(self, launch, setup, parse_args): self.override_config("engine_workers", 0, "engine") engine.main() launch.assert_called_once_with(mock.ANY, mock.ANY, workers=processutils.get_worker_count())