Example #1
0
def main(args=sys.argv[1:]):  # pragma: no cover
    log.register_options(CONF)
    CONF(args, project='ironic-inspector')

    log.set_defaults(default_log_levels=[
        'sqlalchemy=WARNING',
        'keystoneclient=INFO',
        'iso8601=WARNING',
        'requests=WARNING',
        'urllib3.connectionpool=WARNING',
        'keystonemiddleware=WARNING',
        'swiftclient=WARNING',
        'keystoneauth=WARNING',
        'ironicclient=WARNING'
    ])
    log.setup(CONF, 'ironic_inspector')

    app_kwargs = {'host': CONF.listen_address,
                  'port': CONF.listen_port}

    context = create_ssl_context()
    if context:
        app_kwargs['ssl_context'] = context

    init()
    try:
        app.run(**app_kwargs)
    finally:
        firewall.clean_up()
Example #2
0
def main(args=sys.argv[1:]):  # pragma: no cover
    log.register_options(CONF)
    CONF(args, project='ironic-inspector')
    debug = CONF.debug

    log.set_defaults(default_log_levels=[
        'urllib3.connectionpool=WARN',
        'keystonemiddleware.auth_token=WARN',
        'requests.packages.urllib3.connectionpool=WARN',
        ('ironicclient.common.http=INFO' if debug else
         'ironicclient.common.http=ERROR')])
    log.setup(CONF, 'ironic_inspector')

    app_kwargs = {'host': CONF.listen_address,
                  'port': CONF.listen_port}

    context = create_ssl_context()
    if context:
        app_kwargs['ssl_context'] = context

    init()
    try:
        app.run(**app_kwargs)
    finally:
        firewall.clean_up()
Example #3
0
def main():
    try:
        logging.register_options(cfg.CONF)
        cfg.CONF(project='heat',
                 prog='heat-api-cloudwatch',
                 version=version.version_info.version_string())
        logging.setup(cfg.CONF, 'heat-api-cloudwatch')
        logging.set_defaults()
        messaging.setup()

        app = config.load_paste_app()

        port = cfg.CONF.heat_api_cloudwatch.bind_port
        host = cfg.CONF.heat_api_cloudwatch.bind_host
        LOG.info(_LI('Starting Heat CloudWatch API on %(host)s:%(port)s'),
                 {'host': host, 'port': port})
        profiler.setup('heat-api-cloudwatch', host)
        gmr.TextGuruMeditation.setup_autorun(version)
        server = wsgi.Server('heat-api-cloudwatch',
                             cfg.CONF.heat_api_cloudwatch)
        server.start(app, default_port=port)
        systemd.notify_once()
        server.wait()
    except RuntimeError as e:
        msg = six.text_type(e)
        sys.exit("ERROR: %s" % msg)
Example #4
0
 def test_change_default(self):
     my_default = '%(asctime)s %(levelname)s %(name)s [%(request_id)s '\
                  '%(user_id)s %(project)s] %(instance)s'\
                  '%(message)s'
     log.set_defaults(logging_context_format_string=my_default)
     self.conf([])
     self.assertEqual(self.conf.logging_context_format_string, my_default)
Example #5
0
def main():
    # init conf and logging
    conf = cfg.CONF
    conf.register_cli_opts(config.OPTS)
    conf.register_opts(config.OPTS)
    logging.register_options(conf)
    logging.set_defaults()

    conf(project='driverlog')

    logging.setup(conf, 'driverlog')
    LOG.info('Logging enabled')
    conf.log_opt_values(LOG, logging.DEBUG)

    MEMCACHED_URI_PREFIX = r'^memcached:\/\/'
    stripped = re.sub(MEMCACHED_URI_PREFIX, '', cfg.CONF.runtime_storage_uri)
    if not stripped:
        exit(1)

    memcached_uri = stripped.split(',')
    memcached_inst = memcache.Client(memcached_uri)

    default_data = utils.read_json_from_uri(cfg.CONF.default_data_uri)
    if not default_data:
        LOG.critical('Unable to load default data')
        return not 0

    process(memcached_inst, default_data, cfg.CONF.force_update)
Example #6
0
def main():
    logging.register_options(cfg.CONF)
    cfg.CONF(project='heat', prog='heat-engine',
             version=version.version_info.version_string())
    logging.setup(cfg.CONF, 'heat-engine')
    logging.set_defaults()
    messaging.setup()

    config.startup_sanity_check()

    mgr = None
    try:
        mgr = template._get_template_extension_manager()
    except template.TemplatePluginNotRegistered as ex:
        LOG.critical(_LC("%s"), ex)
    if not mgr or not mgr.names():
        sys.exit("ERROR: No template format plugins registered")

    from heat.engine import service as engine  # noqa

    profiler.setup('heat-engine', cfg.CONF.host)
    gmr.TextGuruMeditation.setup_autorun(version)
    srv = engine.EngineService(cfg.CONF.host, rpc_api.ENGINE_TOPIC)
    workers = cfg.CONF.num_engine_workers
    if not workers:
        workers = max(4, processutils.get_worker_count())

    launcher = service.launch(cfg.CONF, srv, workers=workers)
    if cfg.CONF.enable_cloud_watch_lite:
        # We create the periodic tasks here, which mean they are created
        # only in the parent process when num_engine_workers>1 is specified
        srv.create_periodic_tasks()
    launcher.wait()
Example #7
0
def main():
    # init conf and logging
    conf = cfg.CONF
    conf.register_cli_opts(config.OPTS)
    conf.register_cli_opts(OPTS)
    conf.register_opts(config.OPTS)
    conf.register_opts(OPTS)
    logging.register_options(conf)
    logging.set_defaults()
    conf(project='stackalytics')

    logging.setup(conf, 'stackalytics')
    LOG.info('Logging enabled')
    conf.log_opt_values(LOG, std_logging.DEBUG)

    memcached_inst = _connect_to_memcached(cfg.CONF.runtime_storage_uri)

    filename = cfg.CONF.file

    if cfg.CONF.restore:
        if filename:
            fd = open(filename, 'r')
        else:
            fd = sys.stdin
        import_data(memcached_inst, fd)
    else:
        if filename:
            fd = open(filename, 'w')
        else:
            fd = sys.stdout
        export_data(memcached_inst, fd)
Example #8
0
def launch(conf, config_file="/etc/monasca/events_api.conf"):
    log.register_options(cfg.CONF)
    log.set_defaults()
    cfg.CONF(args=[],
             project='monasca_events_api',
             default_config_files=[config_file])
    log.setup(cfg.CONF, 'monasca_events_api')

    app = falcon.API()

    versions = simport.load(cfg.CONF.dispatcher.versions)()
    app.add_route("/", versions)
    app.add_route("/{version_id}", versions)

    events = simport.load(cfg.CONF.dispatcher.events)()
    app.add_route("/v2.0/events", events)
    app.add_route("/v2.0/events/{event_id}", events)

    streams = simport.load(cfg.CONF.dispatcher.stream_definitions)()
    app.add_route("/v2.0/stream-definitions/", streams)
    app.add_route("/v2.0/stream-definitions/{stream_id}", streams)

    transforms = simport.load(cfg.CONF.dispatcher.transforms)()
    app.add_route("/v2.0/transforms", transforms)
    app.add_route("/v2.0/transforms/{transform_id}", transforms)

    LOG.debug('Dispatcher drivers have been added to the routes!')
    return app
Example #9
0
 def test_tempest_set_log_file(self):
     log_file = 'foo.log'
     log.tempest_set_log_file(log_file)
     self.addCleanup(log.tempest_set_log_file, None)
     log.set_defaults()
     self.conf([])
     self.assertEqual(log_file, self.conf.log_file)
Example #10
0
def prepare_service(argv=None):
    """Sets global config from config file and sets up logging."""
    argv = argv or []
    config.init(argv[1:])
    log.set_defaults()
    config.setup_logging(cfg.CONF)
    rpc.init()
Example #11
0
def prepare_service(argv=None, config_files=None, conf=None):
    if argv is None:
        argv = sys.argv

    if conf is None:
        conf = cfg.ConfigOpts()

    oslo_i18n.enable_lazy()
    for group, options in opts.list_opts():
        conf.register_opts(list(options),
                           group=None if group == "DEFAULT" else group)
    keystone_client.register_keystoneauth_opts(conf)
    log.register_options(conf)
    log_levels = (conf.default_log_levels +
                  ['futurist=INFO', 'neutronclient=INFO',
                   'keystoneclient=INFO'])
    log.set_defaults(default_log_levels=log_levels)

    conf(argv[1:], project='ceilometer', validate_default_values=True,
         version=version.version_info.version_string(),
         default_config_files=config_files)

    keystone_client.post_register_keystoneauth_opts(conf)

    log.setup(conf, 'ceilometer')
    utils.setup_root_helper(conf)
    sample.setup(conf)

    gmr.TextGuruMeditation.setup_autorun(version)
    messaging.setup()
    return conf
Example #12
0
def parse_args_with_log(project, argv=None, version=None, conf=None, log=True,
                        default_config_files=None, default_log_format=None,
                        default_log_levels=None):

    conf = conf if conf else cfg.CONF
    argv = argv if argv else sys.argv[1:]

    if not log:
        conf(argv, project=project, version=version,
             default_config_files=default_config_files)
        return

    from oslo_log import log

    if project not in _ROOTS:
        _DEFAULT_LOG_LEVELS.append('%s=INFO' % project)
        _ROOTS.append(project)
    log_fmt = default_log_format if default_log_format else _DEFAULT_LOG_FORMAT
    log_lvl = default_log_levels if default_log_levels else _DEFAULT_LOG_LEVELS

    log.set_defaults(log_fmt, log_lvl)
    log.register_options(conf)

    # (TODO): Configure the options of the other libraries, which must be called
    # before parsing the configuration file.

    conf(argv, project=project, version=version,
         default_config_files=default_config_files)

    log.setup(conf, project, version)
Example #13
0
def parse_config(argv, default_config_files=DEFAULT_CONFIG_FILES):
    log.register_options(cfg.CONF)
    # Set the logging format to include the process and thread, since
    # those aren't included in standard openstack logs but are useful
    # for the rug
    extended = ':'.join('%(' + n + ')s'
                        for n in ['name',
                                  'process',
                                  'processName',
                                  'threadName'])
    log_format = ('%(asctime)s.%(msecs)03d %(levelname)s ' +
                  extended + ' %(message)s')

    # Configure the default log levels for some third-party packages
    # that are chatty
    log_levels = [
        'amqp=WARN',
        'amqplib=WARN',
        'qpid.messaging=INFO',
        'sqlalchemy=WARN',
        'keystoneclient=INFO',
        'stevedore=INFO',
        'eventlet.wsgi.server=WARN',
        'requests=WARN',
        'akanda.rug.openstack.common.rpc.amqp=INFO',
        'neutronclient.client=INFO',
        'oslo.messaging=INFO',
    ]
    cfg.CONF.set_default('logging_default_format_string', log_format)
    log.set_defaults(default_log_levels=log_levels)
    cfg.CONF(argv,
             project='akanda-rug',
             default_config_files=default_config_files)
Example #14
0
def launch_engine(setup_logging=True):
    if setup_logging:
        logging.register_options(cfg.CONF)
    cfg.CONF(project='heat', prog='heat-engine',
             version=version.version_info.version_string())
    if setup_logging:
        logging.setup(cfg.CONF, 'heat-engine')
        logging.set_defaults()
    messaging.setup()

    config.startup_sanity_check()

    mgr = None
    try:
        mgr = template._get_template_extension_manager()
    except template.TemplatePluginNotRegistered as ex:
        LOG.critical("%s", ex)
    if not mgr or not mgr.names():
        sys.exit("ERROR: No template format plugins registered")

    from heat.engine import service as engine  # noqa

    profiler.setup('heat-engine', cfg.CONF.host)
    gmr.TextGuruMeditation.setup_autorun(version)
    srv = engine.EngineService(cfg.CONF.host, rpc_api.ENGINE_TOPIC)
    workers = cfg.CONF.num_engine_workers
    if not workers:
        workers = max(4, processutils.get_worker_count())

    launcher = service.launch(cfg.CONF, srv, workers=workers,
                              restart_method='mutate')
    return launcher
Example #15
0
def setup_logging():
    """Set some oslo log defaults."""
    # disable freezer from logging to stderr
    CONF.set_default('use_stderr', False)
    CONF.set_default('log_file', prepare_logging())
    log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS)
    log.setup(CONF, 'freezer', version=FREEZER_VERSION)
Example #16
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project='cinder',
         version=version.version_string())
    logging.set_defaults(
        default_log_levels=logging.get_default_log_levels() +
        _EXTRA_DEFAULT_LOG_LEVELS)
    logging.setup(CONF, "cinder")
    python_logging.captureWarnings(True)
    priv_context.init(root_helper=shlex.split(utils.get_root_helper()))
    utils.monkey_patch()
    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
    global LOG
    LOG = logging.getLogger(__name__)

    if CONF.backup_workers > 1:
        LOG.info('Backup running with %s processes.', CONF.backup_workers)
        launcher = service.get_launcher()

        for i in range(CONF.backup_workers):
            _launch_backup_process(launcher, i)

        launcher.wait()
    else:
        LOG.info('Backup running in single process mode.')
        server = service.Service.create(binary='cinder-backup',
                                        coordination=True,
                                        process_number=1)
        service.serve(server)
        service.wait()
Example #17
0
    def _log_setup(self):

        CONF = cfg.CONF

        level = self.conf_log.get('log_level')

        logging.set_defaults(
            default_log_levels=logging.get_default_log_levels() +
            ['controller=' + level])

        DOMAIN = "masakari"
        CONF.log_file = self.conf_log.get("log_file")
        CONF.use_stderr = False

        logging.register_options(CONF)
        logging.setup(CONF, DOMAIN)

        log_dir = os.path.dirname(self.conf_log.get("log_file"))

        # create log dir if not created
        try:
            os.makedirs(log_dir)
        except OSError as exc:
            if exc.errno == errno.EEXIST and os.path.isdir(log_dir):
                pass
            else:
                raise

        return
Example #18
0
def prepare_service(argv=None, config_files=None, conf=None):
    if argv is None:
        argv = sys.argv

    # FIXME(sileht): Use ConfigOpts() instead
    if conf is None:
        conf = cfg.CONF

    oslo_i18n.enable_lazy()
    log.register_options(conf)
    log_levels = (conf.default_log_levels +
                  ['futurist=INFO', 'neutronclient=INFO',
                   'keystoneclient=INFO'])
    log.set_defaults(default_log_levels=log_levels)
    defaults.set_cors_middleware_defaults()
    policy_opts.set_defaults(conf)

    conf(argv[1:], project='ceilometer', validate_default_values=True,
         version=version.version_info.version_string(),
         default_config_files=config_files)

    ka_loading.load_auth_from_conf_options(conf, "service_credentials")

    log.setup(conf, 'ceilometer')
    # NOTE(liusheng): guru cannot run with service under apache daemon, so when
    # ceilometer-api running with mod_wsgi, the argv is [], we don't start
    # guru.
    if argv:
        gmr.TextGuruMeditation.setup_autorun(version)
    messaging.setup()
    return conf
Example #19
0
def parse_args(argv=None, config_file=None):
    """Loads application configuration.

    Loads entire application configuration just once.

    """
    global _CONF_LOADED
    if _CONF_LOADED:
        LOG.debug('Configuration has been already loaded')
        return

    log.set_defaults()
    log.register_options(CONF)

    argv = (argv if argv is not None else sys.argv[1:])
    args = ([] if _is_running_under_gunicorn() else argv or [])
    config_file = (_get_deprecated_config_file()
                   if config_file is None else config_file)

    CONF(args=args,
         prog='api',
         project='monasca',
         version=version.version_str,
         default_config_files=[config_file] if config_file else None,
         description='RESTful API for alarming in the cloud')

    log.setup(CONF,
              product_name='monasca-api',
              version=version.version_str)
    conf.register_opts()

    _CONF_LOADED = True
Example #20
0
def api_app(conf):
    log.set_defaults(constant.KILOEYES_LOGGING_CONTEXT_FORMAT,
                     constant.KILOEYES_LOG_LEVELS)
    log.register_options(cfg.CONF)

    if conf.get('name'):
        name = conf.get('name')
    else:
        name = 'kiloeyes'

    cfg.CONF(args=[], project=name)

    log.setup(cfg.CONF, name)

    dispatcher_manager = named.NamedExtensionManager(
        namespace=namespace.DISPATCHER_NS,
        names=cfg.CONF.dispatcher,
        invoke_on_load=True,
        invoke_args=[cfg.CONF])

    if not list(dispatcher_manager):
        LOG.error('Failed to load any dispatchers for %s' %
                  namespace.DISPATCHER_NS)
        return None

    # Create the application
    app = resource_api.ResourceAPI()

    # add each dispatcher to the application to serve requests offered by
    # each dispatcher
    for driver in dispatcher_manager:
        app.add_route(None, driver.obj)

    LOG.debug('Dispatcher drivers have been added to the routes!')
    return app
Example #21
0
def parse_args(argv, default_config_files=None, configure_db=True,
               init_rpc=True):
    log.register_options(CONF)
    # We use the oslo.log default log levels which includes suds=INFO
    # and add only the extra levels that Nova needs
    if CONF.glance.debug:
        extra_default_log_levels = ['glanceclient=DEBUG']
    else:
        extra_default_log_levels = ['glanceclient=WARN']
    log.set_defaults(default_log_levels=log.get_default_log_levels() +
                     extra_default_log_levels)
    rpc.set_defaults(control_exchange='nova')
    if profiler:
        profiler.set_defaults(CONF)
    config.set_middleware_defaults()

    CONF(argv[1:],
         project='nova',
         version=version.version_string(),
         default_config_files=default_config_files)

    if init_rpc:
        rpc.init(CONF)

    if configure_db:
        sqlalchemy_api.configure(CONF)
Example #22
0
def parse_args(argv, default_config_files=None, configure_db=True,
               init_rpc=True):
    log.register_options(CONF)
    # We use the oslo.log default log levels which includes suds=INFO
    # and add only the extra levels that Nova needs
    if CONF.glance.debug:
        extra_default_log_levels = ['glanceclient=DEBUG']
    else:
        extra_default_log_levels = ['glanceclient=WARN']

    # NOTE(danms): DEBUG logging in privsep will result in some large
    # and potentially sensitive things being logged.
    extra_default_log_levels.append('oslo.privsep.daemon=INFO')

    log.set_defaults(default_log_levels=log.get_default_log_levels() +
                     extra_default_log_levels)
    rpc.set_defaults(control_exchange='nova')
    if profiler:
        profiler.set_defaults(CONF)
    config.set_middleware_defaults()

    CONF(argv[1:],
         project='nova',
         version=version.version_string(),
         default_config_files=default_config_files)

    if init_rpc:
        rpc.init(CONF)

    if configure_db:
        sqlalchemy_api.configure(CONF)
        placement_db.configure(CONF)
def _parse_config():
    config = cfg.ConfigOpts()
    log.register_options(config)
    config.register_cli_opts(opts)
    config(prog='bifrost_inventory.py')
    log.set_defaults()
    log.setup(config, "bifrost_inventory.py")
    return config
def enable_logging(conf=None, app_name='castellan'):
    conf = conf or cfg.CONF

    log.register_options(conf)
    log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT,
                     _DEFAULT_LOG_LEVELS)

    log.setup(conf, app_name)
Example #25
0
def launch(conf, config_file="/etc/monasca/api-config.conf"):
    log.register_options(cfg.CONF)
    log.set_defaults()
    cfg.CONF(args=[],
             project='monasca_api',
             default_config_files=[config_file])
    log.setup(cfg.CONF, 'monasca_api')

    app = falcon.API()

    versions = simport.load(cfg.CONF.dispatcher.versions)()
    app.add_route("/", versions)
    app.add_route("/{version_id}", versions)

    # The following resource is a workaround for a regression in falcon 0.3
    # which causes the path '/v2.0' to not route to the versions resource
    version_2_0 = simport.load(cfg.CONF.dispatcher.version_2_0)()
    app.add_route("/v2.0", version_2_0)

    metrics = simport.load(cfg.CONF.dispatcher.metrics)()
    app.add_route("/v2.0/metrics", metrics)

    metrics_measurements = simport.load(
        cfg.CONF.dispatcher.metrics_measurements)()
    app.add_route("/v2.0/metrics/measurements", metrics_measurements)

    metrics_statistics = simport.load(cfg.CONF.dispatcher.metrics_statistics)()
    app.add_route("/v2.0/metrics/statistics", metrics_statistics)

    metrics_names = simport.load(cfg.CONF.dispatcher.metrics_names)()
    app.add_route("/v2.0/metrics/names", metrics_names)

    alarm_definitions = simport.load(cfg.CONF.dispatcher.alarm_definitions)()
    app.add_route("/v2.0/alarm-definitions/", alarm_definitions)
    app.add_route("/v2.0/alarm-definitions/{alarm_definition_id}",
                  alarm_definitions)

    alarms = simport.load(cfg.CONF.dispatcher.alarms)()
    app.add_route("/v2.0/alarms", alarms)
    app.add_route("/v2.0/alarms/{alarm_id}", alarms)

    alarm_count = simport.load(cfg.CONF.dispatcher.alarms_count)()
    app.add_route("/v2.0/alarms/count/", alarm_count)

    alarms_state_history = simport.load(
        cfg.CONF.dispatcher.alarms_state_history)()
    app.add_route("/v2.0/alarms/state-history", alarms_state_history)
    app.add_route("/v2.0/alarms/{alarm_id}/state-history",
                  alarms_state_history)

    notification_methods = simport.load(
        cfg.CONF.dispatcher.notification_methods)()
    app.add_route("/v2.0/notification-methods", notification_methods)
    app.add_route("/v2.0/notification-methods/{notification_method_id}",
                  notification_methods)

    LOG.debug('Dispatcher drivers have been added to the routes!')
    return app
Example #26
0
def prepare_service(argv=None):
    i18n.enable_lazy()
    log.set_defaults(_DEFAULT_LOG_LEVELS)
    log.register_options(CONF)
    if argv is None:
        argv = sys.argv
    CONF(argv[1:], project='glance-search')
    log.setup(cfg.CONF, 'glance-search')
    oslo.messaging.set_transport_defaults('glance')
Example #27
0
def parse_args(argv, default_config_files=None):
    log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS)
    log.register_options(CONF)
    options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION)

    cfg.CONF(argv[1:],
             project='ec2api',
             version=version.version_info.version_string(),
             default_config_files=default_config_files)
Example #28
0
def parse_args(argv, **kwargs):
    log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS)
    log.register_options(CONF)
    options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION,
                        sqlite_db='daolicontroller.sqlite')
    CONF(argv[1:],
         project='daolicontroller',
         version='1.0',
         **kwargs)
Example #29
0
def parse_args(args=None, usage=None, default_config_files=None):
    log.set_defaults(default_log_levels=_DEFAULT_LOG_LEVELS)
    log.register_options(CONF)
    CONF(
        args=args,
        project='mistral',
        version=version,
        usage=usage,
        default_config_files=default_config_files
    )
Example #30
0
def start_agent():
    conf = load_config(cfg.CONF)
    log.set_defaults()
    log.setup(conf, "watcher_metering")

    agent = Agent(conf, **conf.agent)

    agent.register_drivers()
    agent.start()
    agent.join()
Example #31
0
def main():
    log_levels = [
        'docker=WARN',
    ]
    default_log_levels = logging.get_default_log_levels()
    default_log_levels.extend(log_levels)
    logging.set_defaults(default_log_levels=default_log_levels)
    logging.register_options(CONF)

    cfg.parse_args(sys.argv)
    logging.setup(CONF, None)
    debug_utils.setup()

    from trove.guestagent import dbaas
    manager = dbaas.datastore_registry().get(CONF.datastore_manager)
    if not manager:
        msg = (_("Manager class not registered for datastore manager %s") %
               CONF.datastore_manager)
        raise RuntimeError(msg)

    if not CONF.guest_id:
        msg = (_("The guest_id parameter is not set. guest_info.conf "
               "was not injected into the guest or not read by guestagent"))
        raise RuntimeError(msg)

    # Create user and group for running docker container.
    LOG.info('Creating user and group for database service')
    uid = cfg.get_configuration_property('database_service_uid')
    operating_system.create_user('database', uid)

    # rpc module must be loaded after decision about thread monkeypatching
    # because if thread module is not monkeypatched we can't use eventlet
    # executor from oslo_messaging library.
    from trove import rpc
    rpc.init(CONF)

    from trove.common.rpc import service as rpc_service
    server = rpc_service.RpcService(
        key=CONF.instance_rpc_encr_key,
        topic="guestagent.%s" % CONF.guest_id,
        manager=manager, host=CONF.guest_id,
        rpc_api_version=guest_api.API.API_LATEST_VERSION)

    launcher = openstack_service.launch(CONF, server, restart_method='mutate')
    launcher.wait()
Example #32
0
def main():
    logging.register_options(cfg.CONF)
    cfg.CONF(args=sys.argv[1:], project='stresshm', version='stresshm 1.0')
    logging.set_defaults()
    logging.setup(cfg.CONF, 'stresshm')
    LOG = logging.getLogger(__name__)

    octavia_context_manager = enginefacade.transaction_context()
    octavia_context_manager.configure(
        connection=CONF.test_params.octavia_db_connection)
    o_session_maker = octavia_context_manager.writer.get_sessionmaker()

    if CONF.db_create_only:
        LOG.info('Your run prefix ID is: %s' % PREFIX)
        setup_db(o_session_maker, PREFIX)
        return

    if CONF.clean_db:
        cleanup_db(o_session_maker, CONF.clean_db)
        return

    LOG.info('Your run prefix ID is: %s' % PREFIX)
    lbs = setup_db(o_session_maker, PREFIX)

    exit_event = multiprocessing.Event()
    processes = []

    for i in range(CONF.test_params.load_balancers):
        for amp_id in lbs[i]['amphorae']:
            amp = multiprocessing.Process(name='amp' + str(i),
                                          target=amp_sim,
                                          args=(exit_event, amp_id, lbs[i]))
            processes.append(amp)
            amp.start()

    time.sleep(CONF.test_params.test_runtime_secs)

    exit_event.set()

    for process in processes:
        process.join()

    cleanup_db(o_session_maker, PREFIX)

    return
Example #33
0
def main():
    logging.register_options(cfg.CONF)
    cfg.CONF(project='abbot',
             prog='abbot-engine',
             version=version.version_info.version_string())
    logging.setup(cfg.CONF, 'abbot-engine')
    logging.set_defaults()
    messaging.setup()

    config.startup_sanity_check()

    from heat.engine import service as engine  # noqa

    srv = engine.EngineService(cfg.CONF.host, rpc_api.ENGINE_TOPIC)
    launcher = service.launch(cfg.CONF,
                              srv,
                              workers=cfg.CONF.num_engine_workers)
    launcher.wait()
Example #34
0
def set_default_for_default_log_levels():
    """Set the default for the default_log_levels option for keystone.

    Keystone uses some packages that other OpenStack services don't use that do
    logging. This will set the default_log_levels default level for those
    packages.

    This function needs to be called before CONF().

    """
    extra_log_level_defaults = [
        'dogpile=INFO',
        'routes=INFO',
    ]

    log.register_options(CONF)
    log.set_defaults(default_log_levels=log.get_default_log_levels() +
                     extra_log_level_defaults)
Example #35
0
def set_log_defaults():
    # We use the oslo.log default log levels which includes suds=INFO
    # and add only the extra levels that Nova needs
    if CONF.glance.debug:
        extra_default_log_levels = ['glanceclient=DEBUG']
    else:
        extra_default_log_levels = ['glanceclient=WARN']

    # Allow cinderclient and os_brick to log at DEBUG without Nova
    if CONF.cinder.debug:
        extra_default_log_levels += ['cinderclient=DEBUG', 'os_brick=DEBUG']

    # NOTE(danms): DEBUG logging in privsep will result in some large
    # and potentially sensitive things being logged.
    extra_default_log_levels.append('oslo.privsep.daemon=INFO')

    log.set_defaults(default_log_levels=log.get_default_log_levels() +
                     extra_default_log_levels)
Example #36
0
def main():
    logging.register_options(cfg.CONF)
    cfg.CONF(project='senlin', prog='senlin-engine')
    logging.setup(cfg.CONF, 'senlin-engine')
    logging.set_defaults()
    objects.register_all()
    messaging.setup()

    from senlin.engine import service as engine

    profiler.setup('senlin-engine', cfg.CONF.host)
    srv = engine.EngineService(cfg.CONF.host, consts.ENGINE_TOPIC)
    launcher = service.launch(cfg.CONF, srv,
                              workers=cfg.CONF.num_engine_workers,
                              restart_method='mutate')
    # the following periodic tasks are intended serve as HA checking
    # srv.create_periodic_tasks()
    launcher.wait()
Example #37
0
    def setup_logging(self, args):
        log.register_options(CONF)
        CONF(args, project='ironic-inspector')

        log.set_defaults(default_log_levels=[
            'sqlalchemy=WARNING',
            'iso8601=WARNING',
            'requests=WARNING',
            'urllib3.connectionpool=WARNING',
            'keystonemiddleware=WARNING',
            'swiftclient=WARNING',
            'keystoneauth=WARNING',
            'ironicclient=WARNING'
        ])
        log.setup(CONF, 'ironic_inspector')

        LOG.debug("Configuration:")
        CONF.log_opt_values(LOG, log.DEBUG)
Example #38
0
def init_config_and_logging(opts):
    conf = cfg.CONF
    conf.register_cli_opts(opts)
    conf.register_opts(opts)
    logging.register_options(conf)
    logging.set_defaults()

    try:
        conf(project='shaker')
        validate_required_opts(conf, opts)
    except cfg.RequiredOptError as e:
        print('Error: %s' % e)
        conf.print_usage()
        exit(1)

    logging.setup(conf, 'shaker')
    LOG.info('Logging enabled')
    conf.log_opt_values(LOG, std_logging.DEBUG)
Example #39
0
def main():
    config.parse_args(sys.argv, 'senlin-health-manager')
    logging.setup(CONF, 'senlin-health-manager')
    logging.set_defaults()
    gmr.TextGuruMeditation.setup_autorun(version)
    objects.register_all()
    messaging.setup()

    from senlin.health_manager import service as health_manager

    profiler.setup('senlin-health-manager', CONF.host)
    srv = health_manager.HealthManagerService(CONF.host,
                                              consts.HEALTH_MANAGER_TOPIC)
    launcher = service.launch(CONF,
                              srv,
                              workers=CONF.health_manager.workers,
                              restart_method='mutate')
    launcher.wait()
Example #40
0
def main():
    logging.register_options(cfg.CONF)
    cfg.CONF(project='senlin', prog='senlin-engine')
    logging.setup(cfg.CONF, 'senlin-engine')
    logging.set_defaults()
    gmr.TextGuruMeditation.setup_autorun(version)
    objects.register_all()
    messaging.setup()

    from senlin.engine import service as engine

    profiler.setup('senlin-engine', cfg.CONF.host)
    srv = engine.EngineService(cfg.CONF.host, consts.ENGINE_TOPIC)
    launcher = service.launch(cfg.CONF,
                              srv,
                              workers=cfg.CONF.engine.workers,
                              restart_method='mutate')
    launcher.wait()
Example #41
0
def update_opt_defaults():
    log.set_defaults(default_log_levels=[
        'amqp=WARNING',
        'amqplib=WARNING',
        'qpid.messaging=INFO',
        'oslo_messaging=INFO',
        'sqlalchemy=WARNING',
        'stevedore=INFO',
        'eventlet.wsgi.server=INFO',
        'iso8601=WARNING',
        'paramiko=WARNING',
        'requests=WARNING',
        'neutronclient=WARNING',
        'glanceclient=WARNING',
        'urllib3.connectionpool=WARNING',
        'keystonemiddleware.auth_token=INFO',
        'keystoneauth.session=INFO',
    ])
Example #42
0
def main(ctx, debug, api, url, token):
    """
    Multi Helm Chart Deployment Manager

    Common actions from this point include:

    \b
    $ armada apply
    $ armada delete
    $ armada test
    $ armada tiller
    $ armada validate

    Environment:

        \b
        $TOKEN set auth token
        $HOST  set armada service host endpoint

    This tool will communicate with deployed Tiller in your Kubernetes cluster.
    """

    if not ctx.obj:
        ctx.obj = {}

    if api:
        if not url or not token:
            raise click.ClickException(
                'When api option is enable user needs to pass url')
        else:
            ctx.obj['api'] = api
            parsed_url = urlparse(url)
            ctx.obj['CLIENT'] = ArmadaClient(
                ArmadaSession(host=parsed_url.netloc,
                              scheme=parsed_url.scheme,
                              token=token))

    log.register_options(CONF)

    if debug:
        CONF.debug = debug

    log.set_defaults(default_log_levels=CONF.default_log_levels)
    log.setup(CONF, 'armada')
Example #43
0
def parse_args(argv,
               default_config_files=None,
               configure_db=True,
               init_rpc=True):
    log.register_options(CONF)
    # if CONF.similar.debug:
    #    extra_default_log_levels = ['similarclient=DEBUG']
    # else:
    #    extra_default_log_levels = ['similarclient=WARN']
    # log.set_defaults(default_log_levels=log.get_default_log_levels() +
    #                 extra_default_log_levels)
    log.set_defaults(default_log_levels=log.get_default_log_levels())
    if profiler:
        profiler.set_defaults(CONF)
    config.set_middleware_defaults()
    CONF(argv[1:],
         project='similar',
         version=version.version_string(),
         default_config_files=default_config_files)
Example #44
0
def main():
    logging.register_options(cfg.CONF)
    cfg.CONF(project='senlin', prog='senlin-health-manager')
    logging.setup(cfg.CONF, 'senlin-health-manager')
    logging.set_defaults()
    gmr.TextGuruMeditation.setup_autorun(version)
    objects.register_all()
    messaging.setup()

    from senlin.health_manager import service as health_manager

    profiler.setup('senlin-health-manager', cfg.CONF.host)
    srv = health_manager.HealthManagerService(cfg.CONF.host,
                                              consts.HEALTH_MANAGER_TOPIC)
    launcher = service.launch(cfg.CONF,
                              srv,
                              workers=cfg.CONF.health_manager.workers,
                              restart_method='mutate')
    launcher.wait()
Example #45
0
def launch(conf, config_file='/etc/monasca/log-api-config.conf'):
    if conf and 'config_file' in conf:
        config_file = conf.get('config_file')

    log.set_defaults()
    CONF(args=[],
         project='monasca_log_api',
         default_config_files=[config_file])
    log.setup(CONF, 'monasca_log_api')

    app = falcon.API()

    load_versions_resource(app)
    load_logs_resource(app)
    load_healthcheck_resource(app)

    LOG.debug('Dispatcher drivers have been added to the routes!')

    return app
Example #46
0
def main():
    config.parse_args(sys.argv, 'senlin-conductor')
    logging.setup(CONF, 'senlin-conductor')
    logging.set_defaults()
    gmr.TextGuruMeditation.setup_autorun(version)
    objects.register_all()
    messaging.setup()

    from senlin.conductor import service as conductor

    profiler.setup('senlin-conductor', CONF.host)
    srv = conductor.ConductorService(CONF.host, consts.CONDUCTOR_TOPIC)
    launcher = service.launch(CONF,
                              srv,
                              workers=CONF.conductor.workers,
                              restart_method='mutate')
    # the following periodic tasks are intended serve as HA checking
    # srv.create_periodic_tasks()
    launcher.wait()
Example #47
0
def update_opt_defaults():
    log.set_defaults(default_log_levels=[
        'amqp=WARNING',
        'amqplib=WARNING',
        'qpid.messaging=INFO',
        # TODO(therve): when bug #1685148 is fixed in oslo.messaging, we
        # should be able to remove one of those 2 lines.
        'oslo_messaging=INFO',
        'oslo.messaging=INFO',
        'sqlalchemy=WARNING',
        'stevedore=INFO',
        'eventlet.wsgi.server=INFO',
        'iso8601=WARNING',
        'requests=WARNING',
        'neutronclient=WARNING',
        'glanceclient=WARNING',
        'urllib3.connectionpool=WARNING',
        'keystonemiddleware.auth_token=INFO',
        'keystoneauth.session=INFO',
    ])
Example #48
0
def setup_logging():
    """Set some oslo log defaults."""
    _DEFAULT_LOG_LEVELS = [
        'amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN',
        'stevedore=WARN', 'oslo_log=INFO', 'iso8601=WARN',
        'requests.packages.urllib3.connectionpool=WARN',
        'urllib3.connectionpool=WARN', 'websocket=WARN',
        'keystonemiddleware=WARN', 'freezer=INFO'
    ]

    _DEFAULT_LOGGING_CONTEXT_FORMAT = (
        '%(asctime)s.%(msecs)03d %(process)d '
        '%(levelname)s %(name)s [%(request_id)s '
        '%(user_identity)s] %(instance)s'
        '%(message)s')
    # disable freezer from logging to stderr
    CONF.set_default('use_stderr', False)
    CONF.set_default('log_file', prepare_logging())
    log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS)
    log.setup(CONF, 'freezer', version=FREEZER_VERSION)
Example #49
0
def prepare_service(argv=[]):
    log.register_options(CONF)
    log.set_defaults(default_log_levels=[
        'amqp=WARNING',
        'amqplib=WARNING',
        'qpid.messaging=INFO',
        'oslo_messaging=INFO',
        'sqlalchemy=WARNING',
        'keystoneclient=INFO',
        'stevedore=INFO',
        'eventlet.wsgi.server=WARNING',
        'iso8601=WARNING',
        'paramiko=WARNING',
        'requests=WARNING',
        'neutronclient=WARNING',
        'glanceclient=WARNING',
        'urllib3.connectionpool=WARNING',
    ])
    config.parse_args(argv)
    log.setup(CONF, 'nca47')
Example #50
0
def prepare_service(argv=None, config_files=None):
    oslo_i18n.enable_lazy()
    log.register_options(cfg.CONF)
    log_levels = (cfg.CONF.default_log_levels +
                  ['stevedore=INFO', 'keystoneclient=INFO'])
    log.set_defaults(default_log_levels=log_levels)
    if argv is None:
        argv = sys.argv
    cfg.CONF(argv[1:],
             project='ceilometer',
             validate_default_values=True,
             version=version.version_info.version_string(),
             default_config_files=config_files)
    log.setup(cfg.CONF, 'ceilometer')
    # NOTE(liusheng): guru cannot run with service under apache daemon, so when
    # ceilometer-api running with mod_wsgi, the argv is [], we don't start
    # guru.
    if argv:
        gmr.TextGuruMeditation.setup_autorun(version)
    messaging.setup()
Example #51
0
def init_application():
    i18n.enable_lazy()

    logging.register_options(CONF)
    CONF(project='heat',
         prog='heat-api-cfn',
         version=version.version_info.version_string())
    logging.setup(CONF, CONF.prog)
    logging.set_defaults()
    LOG = logging.getLogger(CONF.prog)
    config.set_config_defaults()
    messaging.setup()

    port = CONF.heat_api_cfn.bind_port
    host = CONF.heat_api_cfn.bind_host
    LOG.info('Starting Heat API on %(host)s:%(port)s',
             {'host': host, 'port': port})
    profiler.setup(CONF.prog, host)

    return config.load_paste_app()
Example #52
0
def parse_args(argv,
               default_config_files=None,
               configure_db=True,
               init_rpc=True):
    log.register_options(CONF)
    # We use the oslo.log default log levels which includes suds=INFO
    # and add only the extra levels that Masakari needs
    log.set_defaults(default_log_levels=log.get_default_log_levels())
    rpc.set_defaults(control_exchange='masakari')
    config.set_middleware_defaults()

    CONF(argv[1:],
         project='masakari',
         version=version.version_string(),
         default_config_files=default_config_files)

    if init_rpc:
        rpc.init(CONF)

    if configure_db:
        sqlalchemy_api.configure(CONF)
Example #53
0
def main():
    # prepare(cfg.CONF)
    logging.register_options(CONF)
    # extra_log_level_defaults = [
    #     'requests=INFO',
    #     'mongoengin=INFO'
    #     ]
    #
    # logging.set_defaults(
    #     default_log_levels=logging.get_default_log_levels() +
    #     extra_log_level_defaults)

    CONF(project='learn_oslo', prog=PROG)

    logging.setup(CONF, PROG)
    logging.set_defaults()

    try:
        service.launch(CONF, Apiservice(), workers=2)
    except Exception:
        LOG.exception('Service launch failed!')
Example #54
0
def main():
    objects.register_all()
    gmr_opts.set_defaults(CONF)
    CONF(sys.argv[1:], project='cinder', version=version.version_string())
    logging.set_defaults(default_log_levels=logging.get_default_log_levels() +
                         _EXTRA_DEFAULT_LOG_LEVELS)
    logging.setup(CONF, "cinder")
    python_logging.captureWarnings(True)
    priv_context.init(root_helper=shlex.split(utils.get_root_helper()))
    utils.monkey_patch()
    gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
    global LOG
    LOG = logging.getLogger(__name__)

    LOG.info('Backup running with %s processes.', CONF.backup_workers)
    launcher = service.get_launcher()

    for i in range(1, CONF.backup_workers + 1):
        _launch_backup_process(launcher, i)

    launcher.wait()
Example #55
0
def prepare_service(argv=None):
    conf = cfg.ConfigOpts()
    oslo_i18n.enable_lazy()
    log.register_options(conf)
    log_levels = (conf.default_log_levels +
                  ['stevedore=INFO', 'keystoneclient=INFO'])
    log.set_defaults(default_log_levels=log_levels)
    db_options.set_defaults(conf)
    policy_opts.set_defaults(conf)
    for group, options in ks_opts.list_auth_token_opts():
        conf.register_opts(list(options), group=group)
    from aodh import opts
    # Register our own Aodh options
    for group, options in opts.list_opts():
        conf.register_opts(list(options),
                           group=None if group == "DEFAULT" else group)

    conf(argv, project='aodh', validate_default_values=True)
    log.setup(conf, 'aodh')
    messaging.setup()
    return conf
Example #56
0
def parse_args():
    global _CONF_LOADED
    if _CONF_LOADED:
        LOG.debug('Configuration has been already loaded')
        return

    log.set_defaults()
    log.register_options(CONF)

    CONF(prog='persister',
         project='monasca',
         version=version.version_str,
         description='Persists metrics & alarm history in TSDB')

    log.setup(CONF,
              product_name='persister',
              version=version.version_str)

    conf.register_opts()

    _CONF_LOADED = True
Example #57
0
def create(enable_middleware=CONF.middleware):
    """Entry point for initializing Armada server.

    :param enable_middleware: Whether to enable middleware.
    :type enable_middleware: bool
    """

    if enable_middleware:
        api = falcon.API(
            request_type=ArmadaRequest,
            middleware=[
                AuthMiddleware(),
                ContextMiddleware(),
                LoggingMiddleware(),
            ])
    else:
        api = falcon.API(request_type=ArmadaRequest)

    logging.set_defaults(default_log_levels=CONF.default_log_levels)
    logging.setup(CONF, 'armada')

    # Configure API routing
    url_routes_v1 = (
        ('health', Health()),
        ('apply', Apply()),
        ('releases', Release()),
        ('status', Status()),
        ('tests', TestReleasesManifestController()),
        ('test/{release}', TestReleasesReleaseNameController()),
        ('validatedesign', Validate()),
    )

    for route, service in url_routes_v1:
        api.add_route("/api/v1.0/{}".format(route), service)
    api.add_route('/versions', Versions())

    # Initialize policy config options.
    policy.Enforcer(CONF)

    return api
Example #58
0
def parse_config(argv, default_config_files=DEFAULT_CONFIG_FILES):
    log.register_options(cfg.CONF)
    # Set the logging format to include the process and thread, since
    # those aren't included in standard openstack logs but are useful
    # for the rug
    extended = ':'.join(
        '%(' + n + ')s'
        for n in ['name', 'process', 'processName', 'threadName'])
    log_format = ('%(asctime)s.%(msecs)03d %(levelname)s ' + extended +
                  ' %(message)s')

    # Configure the default log levels for some third-party packages
    # that are chatty
    log_levels = [
        'amqp=WARN',
        'amqplib=WARN',
        'qpid.messaging=INFO',
        'sqlalchemy=WARN',
        'keystoneclient=INFO',
        'stevedore=INFO',
        'eventlet.wsgi.server=WARN',
        'requests=WARN',
        'astara.openstack.common.rpc.amqp=INFO',
        'neutronclient.client=INFO',
        'oslo.messaging=INFO',
        'iso8601=INFO',
        'cliff.commandmanager=INFO',
    ]
    cfg.CONF.set_default('logging_default_format_string', log_format)
    log.set_defaults(default_log_levels=log_levels)

    # For legacy compatibility
    default_config_files = map(get_best_config_path, default_config_files)

    # remove default config files that do not exist
    default_config_files = filter(os.path.isfile, default_config_files)

    cfg.CONF(argv,
             project='astara-orchestrator',
             default_config_files=default_config_files)
Example #59
0
def main():
    # init conf and logging
    conf = cfg.CONF
    conf.register_cli_opts(config.OPTS)
    conf.register_opts(config.OPTS)
    logging.register_options(conf)
    logging.set_defaults()
    conf(project='stackalytics')

    logging.setup(conf, 'stackalytics')
    LOG.info('Logging enabled')
    conf.log_opt_values(LOG, std_logging.DEBUG)

    runtime_storage_inst = runtime_storage.get_runtime_storage(
        cfg.CONF.runtime_storage_uri)

    default_data = utils.read_json_from_uri(cfg.CONF.default_data_uri)
    if not default_data:
        LOG.critical('Unable to load default data')
        return not 0

    default_data_processor.process(runtime_storage_inst, default_data,
                                   cfg.CONF.driverlog_data_uri)

    process_project_list(runtime_storage_inst, cfg.CONF.project_list_uri)

    update_pids(runtime_storage_inst)

    record_processor_inst = record_processor.RecordProcessor(
        runtime_storage_inst)

    process(runtime_storage_inst, record_processor_inst)

    apply_corrections(cfg.CONF.corrections_uri, runtime_storage_inst)

    # long operation should be the last
    update_members(runtime_storage_inst, record_processor_inst)

    runtime_storage_inst.set_by_key('runtime_storage_update_time',
                                    utils.date_to_timestamp('now'))
Example #60
0
def prepare_service(argv=None, config_files=None, conf=None):
    if argv is None:
        argv = sys.argv

    if conf is None:
        conf = cfg.ConfigOpts()

    oslo_i18n.enable_lazy()
    for group, options in opts.list_opts():
        conf.register_opts(list(options),
                           group=None if group == "DEFAULT" else group)
    keystone_client.register_keystoneauth_opts(conf)
    log.register_options(conf)
    log_levels = (
        conf.default_log_levels +
        ['futurist=INFO', 'neutronclient=INFO', 'keystoneclient=INFO'])
    log.set_defaults(default_log_levels=log_levels)
    defaults.set_cors_middleware_defaults()
    policy_opts.set_defaults(conf)
    db_options.set_defaults(conf)

    conf(argv[1:],
         project='ceilometer',
         validate_default_values=True,
         version=version.version_info.version_string(),
         default_config_files=config_files)

    keystone_client.post_register_keystoneauth_opts(conf)

    log.setup(conf, 'ceilometer')
    utils.setup_root_helper(conf)
    sample.setup(conf)

    # NOTE(liusheng): guru cannot run with service under apache daemon, so when
    # ceilometer-api running with mod_wsgi, the argv is [], we don't start
    # guru.
    if argv:
        gmr.TextGuruMeditation.setup_autorun(version)
    messaging.setup()
    return conf