def api_app(conf): cfg.CONF(args=[], project='monasca') log_levels = (cfg.CONF.default_log_levels) cfg.set_defaults(log.log_opts, default_log_levels=log_levels) log.setup('monasca') dispatcher_manager = named.NamedExtensionManager( namespace=namespace.DISPATCHER_NS, names=cfg.CONF.dispatcher, invoke_on_load=True, invoke_args=[cfg.CONF]) if not list(dispatcher_manager): LOG.error('Failed to load any dispatchers for %s' % namespace.DISPATCHER_NS) return None # Create the application app = resource_api.ResourceAPI() # add each dispatcher to the application to serve requests offered by # each dispatcher for driver in dispatcher_manager: app.add_route(None, driver.obj) LOG.debug('Dispatcher drivers have been added to the routes!') return app
def prepare_service(argv=[]): cfg.set_defaults(log.log_opts, default_log_levels=['sqlalchemy=WARN', 'eventlet.wsgi.server=WARN' ]) parse_args(argv) log.setup('kds')
def prepare_service(argv=None): eventlet.monkey_patch() gettextutils.install('gringotts', lazy=False) # Override the default control_exchange, default is 'openstack' rpc.set_defaults(control_exchange='gringotts') cfg.set_defaults(log.log_opts, default_log_levels=['amqplib=WARN', 'qpid.messaging=INFO', 'sqlalchemy=WARN', 'keystoneclient=INFO', 'stevedore=INFO', 'eventlet.wsgi.server=WARN' ]) if argv is None: argv = sys.argv cfg.CONF(argv[1:], project='gringotts') log.setup('gringotts') #NOTE(suo): Import services/submodules to register methods # If use `from gringotts.services import *` will cause SynaxWarning, # so we import every submodule implicitly. from gringotts import services for m in services.SUBMODULES: importutils.import_module("gringotts.services.%s" % m) LOG.warn('Loaded resources: %s' % services.RESOURCE_GET_MAP.keys())
def main(argv=None): log_levels = (cfg.CONF.default_log_levels) cfg.set_defaults(log.log_opts, default_log_levels=log_levels) cfg.CONF(argv[1:], project='monasca-anomaly') log.setup('monasca-anomaly') nupic_anomaly_processor = multiprocessing.Process( target=NupicAnomalyProcessor().run ) processors.append(nupic_anomaly_processor) ks_anomaly_processor = multiprocessing.Process( target=KsAnomalyProcessor().run ) processors.append(ks_anomaly_processor) try: LOG.info('Starting processes') for process in processors: process.start() # The signal handlers must be added after the processes start otherwise they run on all processes # signal.signal(signal.SIGCHLD, clean_exit) #signal.signal(signal.SIGINT, clean_exit) #signal.signal(signal.SIGTERM, clean_exit) while True: time.sleep(5) except Exception: LOG.exception('Error! Exiting.') for process in processors: process.terminate()
def set_defaults(logging_context_format_string): if default_log_levels is None: default_log_levels = DEFAULT_LOG_LEVELS cfg.set_defaults( log_opts, logging_context_format_string=logging_context_format_string, default_log_levels=default_log_levels)
def set_transport_defaults(control_exchange): """Set defaults for messaging transport configuration options. :param control_exchange: the default exchange under which topics are scoped :type control_exchange: str """ cfg.set_defaults(_transport_opts, control_exchange=control_exchange)
def setup_app(pecan_config=None): if not pecan_config: pecan_config = get_pecan_config() # Setup logging cfg.set_defaults(log.log_opts, default_log_levels=[ 'thirdpartydashboard=INFO', 'thirdpartydashboard.openstack.common.db=WARN', 'sqlalchemy=WARN' ]) log.setup('thirdpartydashboard') hooks = [] # user_id_hook.UserIdHook() #] # Setup token storage #token_storage_type = CONF.token_storage_type #storage_cls = storage_impls.STORAGE_IMPLS[token_storage_type] #storage.set_storage(storage_cls()) # Setup search engine search_engine_name = CONF.search_engine search_engine_cls = search_engine_impls.ENGINE_IMPLS[search_engine_name] search_engine.set_engine(search_engine_cls()) # Load user preference plugins #initialize_user_preferences() # Setup notifier #if CONF.enable_notifications: # hooks.append(NotificationHook()) app = pecan.make_app( pecan_config.app.root, debug=CONF.debug, hooks=hooks, force_canonical=getattr(pecan_config.app, 'force_canonical', True), guess_content_type_from_ext=False ) #app = token_middleware.AuthTokenMiddleware(app) # Setup CORS #if CONF.cors.allowed_origins: if False: app = CORSMiddleware(app, allowed_origins=CONF.cors.allowed_origins, allowed_methods=['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS'], allowed_headers=['origin', 'authorization', 'accept', 'x-total', 'x-limit', 'x-marker', 'x-client', 'content-type'], max_age=CONF.cors.max_age) return app
def prepare_service(argv=None): log_levels = (cfg.CONF.default_log_levels + ['stevedore=INFO', 'keystoneclient=INFO']) cfg.set_defaults(log.log_opts, default_log_levels=log_levels) if argv is None: argv = sys.argv cfg.CONF(argv[1:], project='saladier') log.setup('saladier')
def prepare_service(argv=[]): options.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION, sqlite_db='kite.sqlite') cfg.set_defaults(log.log_opts, default_log_levels=['sqlalchemy=WARN', 'eventlet.wsgi.server=WARN' ]) parse_args(argv) log.setup('kite')
def set_defaults(logging_context_format_string=None, default_log_levels=None): # Just in case the caller is not setting the # default_log_level. This is insurance because # we introduced the default_log_level parameter # later in a backwards in-compatible change if default_log_levels is not None: cfg.set_defaults(log_opts, default_log_levels=default_log_levels) if logging_context_format_string is not None: cfg.set_defaults(log_opts, logging_context_format_string=logging_context_format_string)
def prepare_service(argv=[]): cfg.set_defaults(logging.log_opts, default_log_levels=['amqplib=WARN', 'qpid.messaging=INFO', 'stevedore=INFO', 'eventlet.wsgi.server=WARN' ]) cfg.CONF(argv[1:], project='climate') logging.setup('climate')
def setup_service(argv=None): eventlet.monkey_patch() # We can leave this as "openstack" default for simple stuff. rpc.set_defaults(control_exchange="worker") cfg.set_defaults(log.log_opts, default_log_levels=["amqplib=DEBUG", "eventlet.wsgi.server=DEBUG"]) if argv is None: argv = sys.argv cfg.CONF(argv[1:], project="worker") log.setup("worker")
def prepare_service(argv=None): gettextutils.install('openstack') gettextutils.enable_lazy() log_levels = (cfg.CONF.default_log_levels) cfg.set_defaults(log.log_opts, default_log_levels=log_levels) if argv is None: argv = sys.argv cfg.CONF(argv[1:], project='persister') log.setup('persister') LOG.info('Service has started!')
def prepare_service(argv=None): i18n.enable_lazy() log_levels = (cfg.CONF.default_log_levels + ['stevedore=INFO', 'keystoneclient=INFO']) cfg.set_defaults(log.log_opts, default_log_levels=log_levels) if argv is None: argv = sys.argv cfg.CONF(argv[1:], project='ceilometer') log.setup('ceilometer') messaging.setup()
def set_defaults(sql_connection, sqlite_db, max_pool_size=None, max_overflow=None, pool_timeout=None): """Set defaults for configuration variables.""" cfg.set_defaults(database_opts, connection=sql_connection) cfg.set_defaults(sqlite_db_opts, sqlite_db=sqlite_db) # Update the QueuePool defaults if max_pool_size is not None: cfg.set_defaults(database_opts, max_pool_size=max_pool_size) if max_overflow is not None: cfg.set_defaults(database_opts, max_overflow=max_overflow) if pool_timeout is not None: cfg.set_defaults(database_opts, pool_timeout=pool_timeout)
def prepare_service(argv=None): cfg.CONF(project='entropy') log.setup('entropy') cfg.CONF.log_opt_values(LOG, logging.DEBUG) gettextutils.install('entropy') gettextutils.enable_lazy() cfg.set_defaults(log.log_opts) if argv is None: argv = sys.argv cfg.CONF(argv[1:], project='entropy')
def prepare_service(argv=[]): rpc.set_defaults(control_exchange='ironic') cfg.set_defaults(log.log_opts, default_log_levels=['amqplib=WARN', 'qpid.messaging=INFO', 'sqlalchemy=WARN', 'keystoneclient=INFO', 'stevedore=INFO', 'eventlet.wsgi.server=WARN' ]) cfg.CONF(argv[1:], project='ironic') log.setup('ironic')
def prepare_service(argv=[]): cfg.set_defaults(log.log_opts, default_log_levels=['amqplib=WARN', 'qpid.messaging=INFO', 'sqlalchemy=WARN', 'keystoneclient=INFO', 'stevedore=INFO', 'eventlet.wsgi.server=WARN' ]) cfg.CONF(argv[1:], project='solum') log.setup('solum') objects.load()
def prepare_service(argv=[]): config.parse_args(argv) cfg.set_defaults(log.log_opts, default_log_levels=['amqplib=WARN', 'qpid.messaging=INFO', 'sqlalchemy=WARN', 'keystoneclient=INFO', 'stevedore=INFO', 'eventlet.wsgi.server=WARN', 'iso8601=WARN', 'paramiko=WARN', ]) log.setup('ironic')
def prepare_service(argv=[]): eventlet.monkey_patch() utils.read_config('billingstack', sys.argv) rpc.set_defaults(control_exchange='billingstack') cfg.set_defaults(log.log_opts, default_log_levels=['amqplib=WARN', 'qpid.messaging=INFO', 'sqlalchemy=WARN', 'keystoneclient=INFO', 'stevedore=INFO', 'eventlet.wsgi.server=WARN' ]) cfg.CONF(argv[1:], project='billingstack') log.setup('billingstack')
def prepare_service(argv=None): eventlet.monkey_patch() gettextutils.install('ceilometer', lazy=False) rpc.set_defaults(control_exchange='ceilometer') cfg.set_defaults(log.log_opts, default_log_levels=['amqplib=WARN', 'qpid.messaging=INFO', 'sqlalchemy=WARN', 'keystoneclient=INFO', 'stevedore=INFO', 'eventlet.wsgi.server=WARN' ]) if argv is None: argv = sys.argv cfg.CONF(argv[1:], project='ceilometer') log.setup('ceilometer')
def prepare_service(argv=None): # gettextutils.install('volt', lazy=True) cfg.set_defaults(log.log_opts, default_log_levels=['amqplib=WARN', 'qpid.messaging=INFO', 'sqlalchemy=WARN', 'keystoneclient=INFO', 'stevedore=INFO', 'eventlet.wsgi.server=WARN', 'iso8601=WARN' ]) if argv is None: argv = sys.argv cfg.CONF(argv[1:], project='volt', version=version.version_string()) log.setup('volt')
def prepare_service(argv=None): gettextutils.install('ceilometer', lazy=True) gettextutils.enable_lazy() cfg.set_defaults(log.log_opts, default_log_levels=['amqplib=WARN', 'qpid.messaging=INFO', 'sqlalchemy=WARN', 'keystoneclient=INFO', 'stevedore=INFO', 'eventlet.wsgi.server=WARN', 'iso8601=WARN' ]) if argv is None: argv = sys.argv cfg.CONF(argv[1:], project='ceilometer') log.setup('ceilometer') messaging.setup()
def prepare_service(argv=None): # NOTE(jd) We need to monkey patch the socket module for, at least, # oslo.rpc, otherwise everything's blocked on its first read() eventlet.monkey_patch(socket=True) gettextutils.install('ceilometer', lazy=True) rpc.set_defaults(control_exchange='ceilometer') cfg.set_defaults(log.log_opts, default_log_levels=['amqplib=WARN', 'qpid.messaging=INFO', 'sqlalchemy=WARN', 'keystoneclient=INFO', 'stevedore=INFO', 'eventlet.wsgi.server=WARN' ]) if argv is None: argv = sys.argv cfg.CONF(argv[1:], project='ceilometer') log.setup('ceilometer')
def setup_app(pecan_config=None): if not pecan_config: pecan_config = get_pecan_config() app = pecan.make_app( pecan_config.app.root, debug=CONF.debug, force_canonical=getattr(pecan_config.app, 'force_canonical', True), guess_content_type_from_ext=False ) cfg.set_defaults(log.log_opts, default_log_levels=[ 'storyboard=INFO', 'sqlalchemy=WARN' ]) log.setup('storyboard') return app
def prepare_service(argv=None): """Sets global config from config file and sets up logging.""" argv = argv or [] config.init(argv[1:]) LOG.info(_LI('Starting Octavia API server')) cfg.set_defaults(log.log_opts, default_log_levels=['amqp=WARN', 'amqplib=WARN', 'qpid.messaging=INFO', 'sqlalchemy=WARN', 'keystoneclient=INFO', 'stevedore=INFO', 'eventlet.wsgi.server=WARN', 'iso8601=WARN', 'paramiko=WARN', 'requests=WARN', 'ironic.openstack.common=WARN', ]) config.setup_logging(cfg.CONF)
def prepare_service(argv=None): eventlet.monkey_patch() gettextutils.install("ceilometer", lazy=False) rpc.set_defaults(control_exchange="ceilometer") cfg.set_defaults( log.log_opts, default_log_levels=[ "amqplib=WARN", "qpid.messaging=INFO", "sqlalchemy=WARN", "keystoneclient=INFO", "stevedore=INFO", "eventlet.wsgi.server=WARN", ], ) if argv is None: argv = sys.argv cfg.CONF(argv[1:], project="ceilometer") log.setup("ceilometer")
def prepare_service(argv=[]): config.parse_args(argv) cfg.set_defaults( log.log_opts, default_log_levels=[ "amqp=WARN", "amqplib=WARN", "qpid.messaging=INFO", "sqlalchemy=WARN", "keystoneclient=INFO", "stevedore=INFO", "eventlet.wsgi.server=WARN", "iso8601=WARN", "paramiko=WARN", "requests=WARN", "neutronclient=WARN", "glanceclient=WARN", "ironic.openstack.common=WARN", ], ) log.setup("ironic")
def api_app(conf): # Setup logs log_levels = (cfg.CONF.default_log_levels) cfg.set_defaults(log.log_opts, default_log_levels=log_levels) cfg.CONF(args=[], project='monasca') log.setup('monasca') # Create the application app = resource_api.ResourceAPI() # add the metrics resource app.add_resource('metrics', METRICS_DISPATCHER_NAMESPACE, cfg.CONF.dispatcher.driver, [conf]) # add the events resource app.add_resource('events', EVENTS_DISPATCHER_NAMESPACE, cfg.CONF.dispatcher.driver, [conf]) # add the transforms resource app.add_resource('transforms', TRANSFORMS_DISPATCHER_NAMESPACE, cfg.CONF.dispatcher.driver, [conf]) # add the notifications resource app.add_resource('notifications', NOTIFICATIONS_DISPATCHER_NAMESPACE, cfg.CONF.dispatcher.driver, [conf]) # load the alarm definitions resource app.add_resource('alarm-definitions', ALARM_DEFINITIONS_DISPATCHER_NAMESPACE, cfg.CONF.dispatcher.driver, [conf]) # load the alarm definitions resource app.add_resource('alarms', ALARMS_DISPATCHER_NAMESPACE, cfg.CONF.dispatcher.driver, [conf]) return app
def set_defaults(lock_path): cfg.set_defaults(util_opts, lock_path=lock_path)
def set_defaults(logging_context_format_string): cfg.set_defaults( log_opts, logging_context_format_string=logging_context_format_string)
def set_defaults(control_exchange): cfg.set_defaults(rpc_opts, control_exchange=control_exchange)
from rack import exception from rack.resourceoperator import openstack as os_client from rack.resourceoperator.openstack import networks from rack import test CONF = cfg.CONF CREDENTIALS = { "os_username": "******", "os_password": "******", "os_tenant_name": "fake", "os_auth_url": "fake", "os_region_name": "fake" } cfg.set_defaults(os_client.openstack_client_opts, **CREDENTIALS) CIDR = "10.0.0.0/24" class NetworkTestCase(test.NoDBTestCase): def setUp(self): super(NetworkTestCase, self).setUp() self.network_client = networks.NetworkAPI() self.neutron_mock = self.mox.CreateMock(neutron_client.Client) self.mox.StubOutWithMock(os_client, "get_neutron_client") os_client.get_neutron_client().AndReturn(self.neutron_mock) def test_network_list(self): network_list = [{"id": "fake_id1"}, {"id": "fake_id2"}] self.neutron_mock.list_networks()\
CONF.import_opt('log_file', 'highlander.openstack.common.log') CONF.import_opt('log_config_append', 'highlander.openstack.common.log') CONF.import_opt('log_format', 'highlander.openstack.common.log') CONF.import_opt('log_date_format', 'highlander.openstack.common.log') CONF.import_opt('use_syslog', 'highlander.openstack.common.log') CONF.import_opt('syslog_log_facility', 'highlander.openstack.common.log') # Extend oslo default_log_levels to include some that are useful for highlander # some are in oslo logging already, this is just making sure it stays this # way. default_log_levels = cfg.CONF.default_log_levels logs_to_quieten = [ 'sqlalchemy=WARN', 'oslo.messaging=INFO', 'iso8601=WARN', 'eventlet.wsgi.server=WARN' ] for chatty in logs_to_quieten: if chatty not in default_log_levels: default_log_levels.append(chatty) cfg.set_defaults(log.log_opts, default_log_levels=default_log_levels) def parse_args(args=None, usage=None, default_config_files=None): CONF(args=args, project='highlander', version=version, usage=usage, default_config_files=default_config_files)
def set_defaults(sql_connection, sqlite_db): """Set defaults for configuration variables.""" cfg.set_defaults(database_opts, connection=sql_connection) cfg.set_defaults(sqlite_db_opts, sqlite_db=sqlite_db)
def set_defaults(lock_path): """Set value for lock_path. This can be used by tests to set lock_path to a temporary directory. """ cfg.set_defaults(_opts, lock_path=lock_path)
default=False, help="Use Neutron Networking (False indicates the use of Nova " "networking)."), cfg.BoolOpt('use_namespaces', default=False, help="Use network namespaces for communication (only valid to " "use in conjunction with use_neutron=True).") ] cfg.set_defaults(log.log_opts, default_log_levels=[ 'amqplib=WARN', 'qpid.messaging=INFO', 'stevedore=INFO', 'eventlet.wsgi.server=WARN', 'sqlalchemy=WARN', 'boto=WARN', 'suds=INFO', 'keystone=INFO', 'paramiko=WARN', 'requests=WARN', 'iso8601=WARN', ]) CONF = cfg.CONF CONF.register_cli_opts(cli_opts) CONF.register_opts(networking_opts) CONF.register_opts(edp_opts) def parse_configs(conf_files=None): try: