def subscribe(): try: log.register_options(CONF) except cfg.ArgsAlreadyParsedError: pass log.setup(CONF, 'storyboard') CONF(project='storyboard') CONF.register_opts(NOTIFICATION_OPTS, "notifications") subscriber = Subscriber(CONF.notifications) subscriber.start() manager = enabled.EnabledExtensionManager( namespace='storyboard.plugin.worker', check_func=check_enabled, invoke_on_load=True, invoke_args=(CONF,) ) while subscriber.started: (method, properties, body) = subscriber.get() if not method or not properties: LOG.debug(_("No messages available, sleeping for 5 seconds.")) time.sleep(5) continue manager.map(handle_event, body) # Ack the message subscriber.ack(method.delivery_tag)
def main(): CONF.register_cli_opt(category_opt) try: log.register_options(CONF) CONF(sys.argv[1:], project='rumster', version=version.version_info.version_string()) log.setup(CONF, "rumster") except cfg.ConfigFilesNotFoundError: cfgfile = CONF.config_file[-1] if CONF.config_file else None if cfgfile and not os.access(cfgfile, os.R_OK): st = os.stat(cfgfile) print(_LI("Could not read %s. Re-running with sudo") % cfgfile) try: os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv) except Exception: print(_LI('sudo failed, continuing as if nothing happened')) print(_LI('Please re-run rumster-manage as root.')) sys.exit(2) fn = CONF.category.action_fn fn_args = fetch_func_args(fn) fn(*fn_args)
def main(args=sys.argv[1:]): # pragma: no cover log.register_options(CONF) CONF(args, project='ironic-inspector') debug = CONF.debug log.set_defaults(default_log_levels=[ 'urllib3.connectionpool=WARN', 'keystonemiddleware.auth_token=WARN', 'requests.packages.urllib3.connectionpool=WARN', ('ironicclient.common.http=INFO' if debug else 'ironicclient.common.http=ERROR')]) log.setup(CONF, 'ironic_inspector') app_kwargs = {'host': CONF.listen_address, 'port': CONF.listen_port} context = create_ssl_context() if context: app_kwargs['ssl_context'] = context init() try: app.run(**app_kwargs) finally: firewall.clean_up()
def prepare_service(argv=None, config_files=None, share=False): conf = cfg.ConfigOpts() for group, options in opts.list_opts(): conf.register_opts(list(options), group=None if group == "DEFAULT" else group) db_options.set_defaults(conf) if profiler_opts: profiler_opts.set_defaults(conf) if not share: defaults.set_cors_middleware_defaults() oslo_i18n.enable_lazy() log.register_options(conf) if argv is None: argv = sys.argv conf(argv[1:], project='panko', validate_default_values=True, version=version.version_info.version_string(), default_config_files=config_files) if not share: log.setup(conf, 'panko') profiler.setup(conf) # NOTE(liusheng): guru cannot run with service under apache daemon, so when # panko-api running with mod_wsgi, the argv is [], we don't start # guru. if argv: gmr.TextGuruMeditation.setup_autorun(version) return conf
def prepare_service(argv=None): if argv is None: argv = [] log.register_options(CONF) config.parse_args(argv) config.set_config_defaults() log.setup(CONF, 'zun')
def new_config(): conf = cfg.ConfigOpts() log.register_options(conf) conf.register_opts(context_opts) conf.register_opts(common_opts) conf.register_opts(host_opts) conf.register_opts(db_opts) conf.register_opts(_options.eventlet_backdoor_opts) conf.register_opts(_options.periodic_opts) conf.register_opts(_options.ssl_opts, "ssl") conf.register_group(retry_opt_group) conf.register_opts(retry_opts, group=retry_opt_group) conf.register_group(queue_opt_group) conf.register_opts(queue_opts, group=queue_opt_group) conf.register_group(ks_queue_opt_group) conf.register_opts(ks_queue_opts, group=ks_queue_opt_group) conf.register_group(quota_opt_group) conf.register_opts(quota_opts, group=quota_opt_group) # Update default values from libraries that carry their own oslo.config # initialization and configuration. set_middleware_defaults() return conf
def _log_setup(self): CONF = cfg.CONF self.set_request_context() DOMAIN = "masakari" CONF.log_file = self.conf_log.get("log_file") CONF.use_stderr = False logging.register_options(CONF) logging.setup(CONF, DOMAIN) log_dir = os.path.dirname(self.conf_log.get("log_file")) # create log dir if not created try: os.makedirs(log_dir) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(log_dir): pass else: raise return
def main(): CONF.register_cli_opt(command_opt) try: logging.register_options(CONF) cfg_files = cfg.find_config_files(project='glance', prog='glance-registry') cfg_files.extend(cfg.find_config_files(project='glance', prog='glance-api')) cfg_files.extend(cfg.find_config_files(project='glance', prog='glance-manage')) config.parse_args(default_config_files=cfg_files, usage="%(prog)s [options] <cmd>") logging.setup(CONF, 'glance') except RuntimeError as e: sys.exit("ERROR: %s" % e) try: if CONF.command.action.startswith('db'): return CONF.command.action_fn() else: func_kwargs = {} for k in CONF.command.action_kwargs: v = getattr(CONF.command, 'action_kwarg_' + k) if v is None: continue if isinstance(v, six.string_types): v = encodeutils.safe_decode(v) func_kwargs[k] = v func_args = [encodeutils.safe_decode(arg) for arg in CONF.command.action_args] return CONF.command.action_fn(*func_args, **func_kwargs) except exception.GlanceException as e: sys.exit("ERROR: %s" % utils.exception_to_str(e))
def main(): CONF.register_cli_opts(IMPORT_OPTS) try: log.register_options(CONF) except cfg.ArgsAlreadyParsedError: pass log.setup(CONF, 'storyboard') CONF(project='storyboard') # only_tags and exclude_tags are mutually exclusive if CONF.only_tags and CONF.exclude_tags: print('ERROR: only-tags and exclude-tags are mutually exclusive', file=sys.stderr) exit(1) # If the user requested an autoincrement value, set that before we start # importing things. Note that mysql will automatically set the # autoincrement to the next-available id equal to or larger than the # requested one. auto_increment = CONF.auto_increment if auto_increment: print('Setting stories.AUTO_INCREMENT to %d' % (auto_increment,)) session = db_api.get_session(in_request=False) session.execute('ALTER TABLE stories AUTO_INCREMENT = %d;' % (auto_increment,)) if CONF.origin is 'launchpad': loader = LaunchpadLoader(CONF.from_project, CONF.to_project, set(CONF.only_tags), set(CONF.exclude_tags)) loader.run() else: print('Unsupported import origin: %s' % CONF.origin) return
def parse_args(argv=None, config_file=None): """Loads application configuration. Loads entire application configuration just once. """ global _CONF_LOADED if _CONF_LOADED: LOG.debug('Configuration has been already loaded') return log.set_defaults() log.register_options(CONF) argv = (argv if argv is not None else sys.argv[1:]) args = ([] if _is_running_under_gunicorn() else argv or []) config_file = (_get_deprecated_config_file() if config_file is None else config_file) CONF(args=args, prog='api', project='monasca', version=version.version_str, default_config_files=[config_file] if config_file else None, description='RESTful API for alarming in the cloud') log.setup(CONF, product_name='monasca-api', version=version.version_str) conf.register_opts() _CONF_LOADED = True
def main(): CONF.register_cli_opts([ cfg.Opt('os-username'), cfg.Opt('os-password'), cfg.Opt('os-auth-url'), cfg.Opt('os-tenant-name'), ]) try: logging.register_options(CONF) cfg_files = cfg.find_config_files(project='glance', prog='glance-api') cfg_files.extend(cfg.find_config_files(project='glance', prog='glance-search')) config.parse_args(default_config_files=cfg_files) logging.setup(CONF, 'glance') namespace = 'glance.search.index_backend' ext_manager = stevedore.extension.ExtensionManager( namespace, invoke_on_load=True) for ext in ext_manager.extensions: try: ext.obj.setup() except Exception as e: LOG.error(_LE("Failed to setup index extension " "%(ext)s: %(e)s") % {'ext': ext.name, 'e': e}) except RuntimeError as e: sys.exit("ERROR: %s" % e)
def launch_engine(setup_logging=True): if setup_logging: logging.register_options(cfg.CONF) cfg.CONF(project='heat', prog='heat-engine', version=version.version_info.version_string()) if setup_logging: logging.setup(cfg.CONF, 'heat-engine') logging.set_defaults() messaging.setup() config.startup_sanity_check() mgr = None try: mgr = template._get_template_extension_manager() except template.TemplatePluginNotRegistered as ex: LOG.critical("%s", ex) if not mgr or not mgr.names(): sys.exit("ERROR: No template format plugins registered") from heat.engine import service as engine # noqa profiler.setup('heat-engine', cfg.CONF.host) gmr.TextGuruMeditation.setup_autorun(version) srv = engine.EngineService(cfg.CONF.host, rpc_api.ENGINE_TOPIC) workers = cfg.CONF.num_engine_workers if not workers: workers = max(4, processutils.get_worker_count()) launcher = service.launch(cfg.CONF, srv, workers=workers, restart_method='mutate') return launcher
def set_default_for_default_log_levels(): extra_log_level_defaults = [ ] log.register_options(CONF) CONF.set_default("default_log_levels", CONF.default_log_levels + extra_log_level_defaults)
def main(): """Parse options and call the appropriate class/method.""" CONF.register_cli_opt(category_opt) script_name = sys.argv[0] if len(sys.argv) < 2: print(_("\nOpenStack manila version: %(version)s\n") % {'version': version.version_string()}) print(script_name + " category action [<args>]") print(_("Available categories:")) for category in CATEGORIES: print("\t%s" % category) sys.exit(2) try: log.register_options(CONF) CONF(sys.argv[1:], project='manila', version=version.version_string()) log.setup(CONF, "manila") except cfg.ConfigFilesNotFoundError as e: cfg_files = e.config_files print(_("Failed to read configuration file(s): %s") % cfg_files) sys.exit(2) fn = CONF.category.action_fn fn_args = fetch_func_args(fn) fn(*fn_args)
def prepare_service(argv=None, config_files=None, conf=None): if argv is None: argv = sys.argv # FIXME(sileht): Use ConfigOpts() instead if conf is None: conf = cfg.CONF oslo_i18n.enable_lazy() log.register_options(conf) log_levels = (conf.default_log_levels + ['futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO']) log.set_defaults(default_log_levels=log_levels) defaults.set_cors_middleware_defaults() policy_opts.set_defaults(conf) conf(argv[1:], project='ceilometer', validate_default_values=True, version=version.version_info.version_string(), default_config_files=config_files) ka_loading.load_auth_from_conf_options(conf, "service_credentials") log.setup(conf, 'ceilometer') # NOTE(liusheng): guru cannot run with service under apache daemon, so when # ceilometer-api running with mod_wsgi, the argv is [], we don't start # guru. if argv: gmr.TextGuruMeditation.setup_autorun(version) messaging.setup() return conf
def main(): logging.register_options(cfg.CONF) cfg.CONF(sys.argv[1:], project='magnum') logging.setup(cfg.CONF, 'magnum') LOG.info(_LI('Starting server in PID %s') % os.getpid()) LOG.debug("Configuration:") cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) cfg.CONF.import_opt('topic', 'magnum.conductor.config', group='conductor') conductor_id = short_id.generate_id() endpoints = [ docker_conductor.Handler(), k8s_conductor.Handler(), bay_conductor.Handler(), conductor_listener.Handler(), ] if (not os.path.isfile(cfg.CONF.bay.k8s_atomic_template_path) and not os.path.isfile(cfg.CONF.bay.k8s_coreos_template_path)): LOG.error(_LE("The Heat template can not be found for either k8s " "atomic %(atomic_template)s or coreos " "(coreos_template)%s. Install template first if you " "want to create bay.") % {'atomic_template': cfg.CONF.bay.k8s_atomic_template_path, 'coreos_template': cfg.CONF.bay.k8s_coreos_template_path}) server = service.Service(cfg.CONF.conductor.topic, conductor_id, endpoints) server.serve()
def parse_args_with_log(project, argv=None, version=None, conf=None, log=True, default_config_files=None, default_log_format=None, default_log_levels=None): conf = conf if conf else cfg.CONF argv = argv if argv else sys.argv[1:] if not log: conf(argv, project=project, version=version, default_config_files=default_config_files) return from oslo_log import log if project not in _ROOTS: _DEFAULT_LOG_LEVELS.append('%s=INFO' % project) _ROOTS.append(project) log_fmt = default_log_format if default_log_format else _DEFAULT_LOG_FORMAT log_lvl = default_log_levels if default_log_levels else _DEFAULT_LOG_LEVELS log.set_defaults(log_fmt, log_lvl) log.register_options(conf) # (TODO): Configure the options of the other libraries, which must be called # before parsing the configuration file. conf(argv, project=project, version=version, default_config_files=default_config_files) log.setup(conf, project, version)
def _setUp(self): log.register_options(cfg.CONF) CONF.set_default('host', 'fake-mini') CONF.set_default('connection', "sqlite://", group='database') CONF.set_default('sqlite_synchronous', False, group='database') config.parse_args([], default_config_files=[]) self.addCleanup(CONF.reset)
def prepare_service(argv=None, config_files=None, conf=None): if argv is None: argv = sys.argv if conf is None: conf = cfg.ConfigOpts() oslo_i18n.enable_lazy() for group, options in opts.list_opts(): conf.register_opts(list(options), group=None if group == "DEFAULT" else group) keystone_client.register_keystoneauth_opts(conf) log.register_options(conf) log_levels = (conf.default_log_levels + ['futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO']) log.set_defaults(default_log_levels=log_levels) conf(argv[1:], project='ceilometer', validate_default_values=True, version=version.version_info.version_string(), default_config_files=config_files) keystone_client.post_register_keystoneauth_opts(conf) log.setup(conf, 'ceilometer') utils.setup_root_helper(conf) sample.setup(conf) gmr.TextGuruMeditation.setup_autorun(version) messaging.setup() return conf
def main(): logging.register_options(cfg.CONF) cfg.CONF(project='heat', prog='heat-engine', version=version.version_info.version_string()) logging.setup(cfg.CONF, 'heat-engine') logging.set_defaults() messaging.setup() config.startup_sanity_check() mgr = None try: mgr = template._get_template_extension_manager() except template.TemplatePluginNotRegistered as ex: LOG.critical(_LC("%s"), ex) if not mgr or not mgr.names(): sys.exit("ERROR: No template format plugins registered") from heat.engine import service as engine # noqa profiler.setup('heat-engine', cfg.CONF.host) gmr.TextGuruMeditation.setup_autorun(version) srv = engine.EngineService(cfg.CONF.host, rpc_api.ENGINE_TOPIC) workers = cfg.CONF.num_engine_workers if not workers: workers = max(4, processutils.get_worker_count()) launcher = service.launch(cfg.CONF, srv, workers=workers) if cfg.CONF.enable_cloud_watch_lite: # We create the periodic tasks here, which mean they are created # only in the parent process when num_engine_workers>1 is specified srv.create_periodic_tasks() launcher.wait()
def _get_config_opts(self): config = cfg.ConfigOpts() config.register_opts(common_config.core_opts) config.register_opts(common_config.core_cli_opts) logging.register_options(config) agent_config.register_process_monitor_opts(config) return config
def main(): try: logging.register_options(cfg.CONF) cfg.CONF(project='heat', prog='heat-api-cloudwatch', version=version.version_info.version_string()) logging.setup(cfg.CONF, 'heat-api-cloudwatch') logging.set_defaults() messaging.setup() app = config.load_paste_app() port = cfg.CONF.heat_api_cloudwatch.bind_port host = cfg.CONF.heat_api_cloudwatch.bind_host LOG.info(_LI('Starting Heat CloudWatch API on %(host)s:%(port)s'), {'host': host, 'port': port}) profiler.setup('heat-api-cloudwatch', host) gmr.TextGuruMeditation.setup_autorun(version) server = wsgi.Server('heat-api-cloudwatch', cfg.CONF.heat_api_cloudwatch) server.start(app, default_port=port) systemd.notify_once() server.wait() except RuntimeError as e: msg = six.text_type(e) sys.exit("ERROR: %s" % msg)
def new_config(): conf = cfg.ConfigOpts() log.register_options(conf) conf.register_opts(context_opts) conf.register_opts(common_opts) conf.register_opts(host_opts) conf.register_opts(db_opts) conf.register_opts(_options.eventlet_backdoor_opts) conf.register_opts(_options.periodic_opts) conf.register_opts(_options.ssl_opts, "ssl") conf.register_group(retry_opt_group) conf.register_opts(retry_opts, group=retry_opt_group) conf.register_group(queue_opt_group) conf.register_opts(queue_opts, group=queue_opt_group) conf.register_group(ks_queue_opt_group) conf.register_opts(ks_queue_opts, group=ks_queue_opt_group) conf.register_group(quota_opt_group) conf.register_opts(quota_opts, group=quota_opt_group) return conf
def launch(conf, config_file="/etc/monasca/events_api.conf"): log.register_options(cfg.CONF) log.set_defaults() cfg.CONF(args=[], project='monasca_events_api', default_config_files=[config_file]) log.setup(cfg.CONF, 'monasca_events_api') app = falcon.API() versions = simport.load(cfg.CONF.dispatcher.versions)() app.add_route("/", versions) app.add_route("/{version_id}", versions) events = simport.load(cfg.CONF.dispatcher.events)() app.add_route("/v2.0/events", events) app.add_route("/v2.0/events/{event_id}", events) streams = simport.load(cfg.CONF.dispatcher.stream_definitions)() app.add_route("/v2.0/stream-definitions/", streams) app.add_route("/v2.0/stream-definitions/{stream_id}", streams) transforms = simport.load(cfg.CONF.dispatcher.transforms)() app.add_route("/v2.0/transforms", transforms) app.add_route("/v2.0/transforms/{transform_id}", transforms) LOG.debug('Dispatcher drivers have been added to the routes!') return app
def prepare_service(argv=[], conf=cfg.CONF): log.register_options(conf) config.parse_args(argv) cfg.set_defaults(_options.log_opts, default_log_levels=_DEFAULT_LOG_LEVELS) log.setup(conf, 'python-watcher') conf.log_opt_values(LOG, logging.DEBUG)
def parse_args(argv, default_config_files=None, configure_db=True, init_rpc=True): log.register_options(CONF) # We use the oslo.log default log levels which includes suds=INFO # and add only the extra levels that Nova needs if CONF.glance.debug: extra_default_log_levels = ['glanceclient=DEBUG'] else: extra_default_log_levels = ['glanceclient=WARN'] log.set_defaults(default_log_levels=log.get_default_log_levels() + extra_default_log_levels) rpc.set_defaults(control_exchange='nova') if profiler: profiler.set_defaults(CONF) config.set_middleware_defaults() CONF(argv[1:], project='nova', version=version.version_string(), default_config_files=default_config_files) if init_rpc: rpc.init(CONF) if configure_db: sqlalchemy_api.configure(CONF)
def main(): """Parse options and call the appropriate class/method.""" CONF.register_cli_opt(category_opt) script_name = sys.argv[0] if len(sys.argv) < 2: print(_("\nOpenStack manila version: %(version)s\n") % {"version": version.version_string()}) print(script_name + " category action [<args>]") print(_("Available categories:")) for category in CATEGORIES: print("\t%s" % category) sys.exit(2) try: log.register_options(CONF) CONF(sys.argv[1:], project="manila", version=version.version_string()) log.setup(CONF, "manila") except cfg.ConfigFilesNotFoundError: cfgfile = CONF.config_file[-1] if CONF.config_file else None if cfgfile and not os.access(cfgfile, os.R_OK): st = os.stat(cfgfile) print(_("Could not read %s. Re-running with sudo") % cfgfile) try: os.execvp("sudo", ["sudo", "-u", "#%s" % st.st_uid] + sys.argv) except Exception: print(_("sudo failed, continuing as if nothing happened")) print(_("Please re-run manila-manage as root.")) sys.exit(2) fn = CONF.category.action_fn fn_args = fetch_func_args(fn) fn(*fn_args)
def api_app(conf): log.set_defaults(constant.KILOEYES_LOGGING_CONTEXT_FORMAT, constant.KILOEYES_LOG_LEVELS) log.register_options(cfg.CONF) if conf.get('name'): name = conf.get('name') else: name = 'kiloeyes' cfg.CONF(args=[], project=name) log.setup(cfg.CONF, name) dispatcher_manager = named.NamedExtensionManager( namespace=namespace.DISPATCHER_NS, names=cfg.CONF.dispatcher, invoke_on_load=True, invoke_args=[cfg.CONF]) if not list(dispatcher_manager): LOG.error('Failed to load any dispatchers for %s' % namespace.DISPATCHER_NS) return None # Create the application app = resource_api.ResourceAPI() # add each dispatcher to the application to serve requests offered by # each dispatcher for driver in dispatcher_manager: app.add_route(None, driver.obj) LOG.debug('Dispatcher drivers have been added to the routes!') return app
def parse_args(args=None, usage=None, default_config_files=None): logging.register_options(CONF) CONF(args=args, project='murano', version=version.version_string, usage=usage, default_config_files=default_config_files)
def __init__(self, parse_conf=True, config_path=None): """Initialize a configuration from a conf directory and conf file.""" super(TempestConfigPrivate, self).__init__() config_files = [] failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE if config_path: path = config_path else: # Environment variables override defaults... conf_dir = os.environ.get("TEMPEST_CONFIG_DIR", self.DEFAULT_CONFIG_DIR) conf_file = os.environ.get("TEMPEST_CONFIG", self.DEFAULT_CONFIG_FILE) path = os.path.join(conf_dir, conf_file) if not os.path.isfile(path): path = failsafe_path # only parse the config file if we expect one to exist. This is needed # to remove an issue with the config file up to date checker. if parse_conf: config_files.append(path) logging.register_options(_CONF) if os.path.isfile(path): _CONF([], project="tempest", default_config_files=config_files) else: _CONF([], project="tempest") logging.setup(_CONF, "tempest") LOG = logging.getLogger("tempest") LOG.info("Using tempest config file %s" % path) register_opts() self._set_attrs() if parse_conf: _CONF.log_opt_values(LOG, std_logging.DEBUG)