Example #1
0
def main():
    utils.read_config('designate', sys.argv)

    logging.setup(CONF, 'designate')
    gmr.TextGuruMeditation.setup_autorun(version)

    # NOTE(timsim): This is to ensure people don't start the wrong
    #               services when the worker model is enabled.
    if cfg.CONF['service:worker'].enabled:
        LOG.error('You have designate-worker enabled, starting '
                  'designate-pool-manager is incompatible with '
                  'designate-worker. You need to start '
                  'designate-worker instead.')
        sys.exit(1)

    debtcollector.deprecate('designate-pool-manager is deprecated in favor of '
                            'designate-worker', version='newton',
                            removal_version='rocky')

    server = pool_manager_service.Service(
        threads=CONF['service:pool_manager'].threads
    )

    hookpoints.log_hook_setup()

    service.serve(server, workers=CONF['service:pool_manager'].workers)
    server.heartbeat_emitter.start()
    service.wait()
Example #2
0
def prepare_service(argv=None, config_files=None, conf=None):
    if argv is None:
        argv = sys.argv

    if conf is None:
        conf = cfg.ConfigOpts()

    oslo_i18n.enable_lazy()
    for group, options in opts.list_opts():
        conf.register_opts(list(options),
                           group=None if group == "DEFAULT" else group)
    keystone_client.register_keystoneauth_opts(conf)
    log.register_options(conf)
    log_levels = (conf.default_log_levels +
                  ['futurist=INFO', 'neutronclient=INFO',
                   'keystoneclient=INFO'])
    log.set_defaults(default_log_levels=log_levels)

    conf(argv[1:], project='ceilometer', validate_default_values=True,
         version=version.version_info.version_string(),
         default_config_files=config_files)

    keystone_client.post_register_keystoneauth_opts(conf)

    log.setup(conf, 'ceilometer')
    utils.setup_root_helper(conf)
    sample.setup(conf)

    gmr.TextGuruMeditation.setup_autorun(version)
    messaging.setup()
    return conf
Example #3
0
def prepare_service(argv=None, config_files=None, conf=None):
    if argv is None:
        argv = sys.argv

    # FIXME(sileht): Use ConfigOpts() instead
    if conf is None:
        conf = cfg.CONF

    oslo_i18n.enable_lazy()
    log.register_options(conf)
    log_levels = (conf.default_log_levels +
                  ['futurist=INFO', 'neutronclient=INFO',
                   'keystoneclient=INFO'])
    log.set_defaults(default_log_levels=log_levels)
    defaults.set_cors_middleware_defaults()
    policy_opts.set_defaults(conf)

    conf(argv[1:], project='ceilometer', validate_default_values=True,
         version=version.version_info.version_string(),
         default_config_files=config_files)

    ka_loading.load_auth_from_conf_options(conf, "service_credentials")

    log.setup(conf, 'ceilometer')
    # NOTE(liusheng): guru cannot run with service under apache daemon, so when
    # ceilometer-api running with mod_wsgi, the argv is [], we don't start
    # guru.
    if argv:
        gmr.TextGuruMeditation.setup_autorun(version)
    messaging.setup()
    return conf
Example #4
0
def parse_args_with_log(project, argv=None, version=None, conf=None, log=True,
                        default_config_files=None, default_log_format=None,
                        default_log_levels=None):

    conf = conf if conf else cfg.CONF
    argv = argv if argv else sys.argv[1:]

    if not log:
        conf(argv, project=project, version=version,
             default_config_files=default_config_files)
        return

    from oslo_log import log

    if project not in _ROOTS:
        _DEFAULT_LOG_LEVELS.append('%s=INFO' % project)
        _ROOTS.append(project)
    log_fmt = default_log_format if default_log_format else _DEFAULT_LOG_FORMAT
    log_lvl = default_log_levels if default_log_levels else _DEFAULT_LOG_LEVELS

    log.set_defaults(log_fmt, log_lvl)
    log.register_options(conf)

    # (TODO): Configure the options of the other libraries, which must be called
    # before parsing the configuration file.

    conf(argv, project=project, version=version,
         default_config_files=default_config_files)

    log.setup(conf, project, version)
Example #5
0
def main_app(global_config):
    log.setup(config.CONF, 'bareon_api')
    return pecan.Pecan(
        root.V1Controller(),
        hooks=[],
        force_canonical=False
    )
Example #6
0
def api_app(conf):
    log.set_defaults(constant.KILOEYES_LOGGING_CONTEXT_FORMAT,
                     constant.KILOEYES_LOG_LEVELS)
    log.register_options(cfg.CONF)

    if conf.get('name'):
        name = conf.get('name')
    else:
        name = 'kiloeyes'

    cfg.CONF(args=[], project=name)

    log.setup(cfg.CONF, name)

    dispatcher_manager = named.NamedExtensionManager(
        namespace=namespace.DISPATCHER_NS,
        names=cfg.CONF.dispatcher,
        invoke_on_load=True,
        invoke_args=[cfg.CONF])

    if not list(dispatcher_manager):
        LOG.error('Failed to load any dispatchers for %s' %
                  namespace.DISPATCHER_NS)
        return None

    # Create the application
    app = resource_api.ResourceAPI()

    # add each dispatcher to the application to serve requests offered by
    # each dispatcher
    for driver in dispatcher_manager:
        app.add_route(None, driver.obj)

    LOG.debug('Dispatcher drivers have been added to the routes!')
    return app
Example #7
0
def main():
    try:
        logging.register_options(cfg.CONF)
        cfg.CONF(project='heat',
                 prog='heat-api-cloudwatch',
                 version=version.version_info.version_string())
        logging.setup(cfg.CONF, 'heat-api-cloudwatch')
        logging.set_defaults()
        messaging.setup()

        app = config.load_paste_app()

        port = cfg.CONF.heat_api_cloudwatch.bind_port
        host = cfg.CONF.heat_api_cloudwatch.bind_host
        LOG.info(_LI('Starting Heat CloudWatch API on %(host)s:%(port)s'),
                 {'host': host, 'port': port})
        profiler.setup('heat-api-cloudwatch', host)
        gmr.TextGuruMeditation.setup_autorun(version)
        server = wsgi.Server('heat-api-cloudwatch',
                             cfg.CONF.heat_api_cloudwatch)
        server.start(app, default_port=port)
        systemd.notify_once()
        server.wait()
    except RuntimeError as e:
        msg = six.text_type(e)
        sys.exit("ERROR: %s" % msg)
Example #8
0
def launch(conf, config_file="/etc/monasca/events_api.conf"):
    log.register_options(cfg.CONF)
    log.set_defaults()
    cfg.CONF(args=[],
             project='monasca_events_api',
             default_config_files=[config_file])
    log.setup(cfg.CONF, 'monasca_events_api')

    app = falcon.API()

    versions = simport.load(cfg.CONF.dispatcher.versions)()
    app.add_route("/", versions)
    app.add_route("/{version_id}", versions)

    events = simport.load(cfg.CONF.dispatcher.events)()
    app.add_route("/v2.0/events", events)
    app.add_route("/v2.0/events/{event_id}", events)

    streams = simport.load(cfg.CONF.dispatcher.stream_definitions)()
    app.add_route("/v2.0/stream-definitions/", streams)
    app.add_route("/v2.0/stream-definitions/{stream_id}", streams)

    transforms = simport.load(cfg.CONF.dispatcher.transforms)()
    app.add_route("/v2.0/transforms", transforms)
    app.add_route("/v2.0/transforms/{transform_id}", transforms)

    LOG.debug('Dispatcher drivers have been added to the routes!')
    return app
Example #9
0
def main():
    objects.register_all()
    CONF(sys.argv[1:], project='guts',
         version=version.version_string())
    logging.setup(CONF, "guts")
    LOG = logging.getLogger('guts.all')

    utils.monkey_patch()

    gmr.TextGuruMeditation.setup_autorun(version)

    rpc.init(CONF)

    launcher = service.process_launcher()
    # guts-api
    try:
        server = service.WSGIService('osapi_migration')
        launcher.launch_service(server, workers=server.workers or 1)
    except (Exception, SystemExit):
        LOG.exception(_LE('Failed to load osapi_migration'))

    # guts-migration
    try:
        launcher.launch_service(
            service.Service.create(binary='guts-migration'))
    except (Exception, SystemExit):
        LOG.exception(_LE('Failed to load guts-migration'))

    launcher.wait()
Example #10
0
def main():
    logging.register_options(cfg.CONF)
    cfg.CONF(project='heat', prog='heat-engine',
             version=version.version_info.version_string())
    logging.setup(cfg.CONF, 'heat-engine')
    logging.set_defaults()
    messaging.setup()

    config.startup_sanity_check()

    mgr = None
    try:
        mgr = template._get_template_extension_manager()
    except template.TemplatePluginNotRegistered as ex:
        LOG.critical(_LC("%s"), ex)
    if not mgr or not mgr.names():
        sys.exit("ERROR: No template format plugins registered")

    from heat.engine import service as engine  # noqa

    profiler.setup('heat-engine', cfg.CONF.host)
    gmr.TextGuruMeditation.setup_autorun(version)
    srv = engine.EngineService(cfg.CONF.host, rpc_api.ENGINE_TOPIC)
    workers = cfg.CONF.num_engine_workers
    if not workers:
        workers = max(4, processutils.get_worker_count())

    launcher = service.launch(cfg.CONF, srv, workers=workers)
    if cfg.CONF.enable_cloud_watch_lite:
        # We create the periodic tasks here, which mean they are created
        # only in the parent process when num_engine_workers>1 is specified
        srv.create_periodic_tasks()
    launcher.wait()
Example #11
0
def main():
    logging.register_options(cfg.CONF)
    cfg.CONF(sys.argv[1:], project='magnum')
    logging.setup(cfg.CONF, 'magnum')

    LOG.info(_LI('Starting server in PID %s') % os.getpid())
    LOG.debug("Configuration:")
    cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)

    cfg.CONF.import_opt('topic', 'magnum.conductor.config', group='conductor')

    conductor_id = short_id.generate_id()
    endpoints = [
        docker_conductor.Handler(),
        k8s_conductor.Handler(),
        bay_conductor.Handler(),
        conductor_listener.Handler(),
    ]

    if (not os.path.isfile(cfg.CONF.bay.k8s_atomic_template_path)
            and not os.path.isfile(cfg.CONF.bay.k8s_coreos_template_path)):
        LOG.error(_LE("The Heat template can not be found for either k8s "
                      "atomic %(atomic_template)s or coreos "
                      "(coreos_template)%s. Install template first if you "
                      "want to create bay.") %
                  {'atomic_template': cfg.CONF.bay.k8s_atomic_template_path,
                   'coreos_template': cfg.CONF.bay.k8s_coreos_template_path})

    server = service.Service(cfg.CONF.conductor.topic,
                             conductor_id, endpoints)
    server.serve()
Example #12
0
    def _log_setup(self):

        CONF = cfg.CONF

        self.set_request_context()

        DOMAIN = "masakari"
        CONF.log_file = self.conf_log.get("log_file")
        CONF.use_stderr = False

        logging.register_options(CONF)
        logging.setup(CONF, DOMAIN)

        log_dir = os.path.dirname(self.conf_log.get("log_file"))

        # create log dir if not created
        try:
            os.makedirs(log_dir)
        except OSError as exc:
            if exc.errno == errno.EEXIST and os.path.isdir(log_dir):
                pass
            else:
                raise

        return
Example #13
0
def main():
    """The main function."""

    try:
        config.parse_args()
    except RuntimeError as e:
        sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e))
    except SystemExit as e:
        sys.exit("Please specify one command")

    # Setup logging
    logging.setup(CONF, 'xmonitor')

    if CONF.token:
        CONF.slavetoken = CONF.token
        CONF.mastertoken = CONF.token

    command = lookup_command(CONF.command)

    try:
        command(CONF, CONF.args)
    except TypeError as e:
        LOG.error(_LE(command.__doc__) % {'prog': command.__name__})  # noqa
        sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e))
    except ValueError as e:
        LOG.error(_LE(command.__doc__) % {'prog': command.__name__})  # noqa
        sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e))
Example #14
0
def main():
    config.parse_args(sys.argv)
    logging.setup(CONF, 'gceapi')

    server = service.WSGIService('gceapi', max_url_len=16384)
    service.serve(server)
    service.wait()
Example #15
0
def prepare_service(argv=[], conf=cfg.CONF):
    log.register_options(conf)
    config.parse_args(argv)
    cfg.set_defaults(_options.log_opts,
                     default_log_levels=_DEFAULT_LOG_LEVELS)
    log.setup(conf, 'python-watcher')
    conf.log_opt_values(LOG, logging.DEBUG)
Example #16
0
def main():
    config.parse_args(sys.argv)
    logging.setup(CONF, "ec2api")

    server = service.WSGIService("ec2api", max_url_len=16384)
    service.serve(server, workers=server.workers)
    service.wait()
Example #17
0
    def reload(self):
        """
        Reload and re-apply configuration settings

        Existing child processes are sent a SIGHUP signal
        and will exit after completing existing requests.
        New child processes, which will have the updated
        configuration, are spawned. This allows preventing
        interruption to the service.
        """
        def _has_changed(old, new, param):
            old = old.get(param)
            new = getattr(new, param)
            return (new != old)

        old_conf = utils.stash_conf_values()
        has_changed = functools.partial(_has_changed, old_conf, CONF)
        CONF.reload_config_files()
        os.killpg(self.pgid, signal.SIGHUP)
        self.stale_children = self.children
        self.children = set()

        # Ensure any logging config changes are picked up
        logging.setup(CONF, 'glance')
        config.set_config_defaults()

        self.configure(old_conf, has_changed)
        self.start_wsgi()
Example #18
0
def main():
    CONF.register_cli_opt(category_opt)

    try:
        log.register_options(CONF)

        CONF(sys.argv[1:], project='rumster',
             version=version.version_info.version_string())

        log.setup(CONF, "rumster")
    except cfg.ConfigFilesNotFoundError:
        cfgfile = CONF.config_file[-1] if CONF.config_file else None
        if cfgfile and not os.access(cfgfile, os.R_OK):
            st = os.stat(cfgfile)
            print(_LI("Could not read %s. Re-running with sudo") % cfgfile)
            try:
                os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
            except Exception:
                print(_LI('sudo failed, continuing as if nothing happened'))

        print(_LI('Please re-run rumster-manage as root.'))
        sys.exit(2)

    fn = CONF.category.action_fn

    fn_args = fetch_func_args(fn)
    fn(*fn_args)
Example #19
0
def prepare_service(argv=None, config_files=None, share=False):
    conf = cfg.ConfigOpts()
    for group, options in opts.list_opts():
        conf.register_opts(list(options),
                           group=None if group == "DEFAULT" else group)
    db_options.set_defaults(conf)
    if profiler_opts:
        profiler_opts.set_defaults(conf)
    if not share:
        defaults.set_cors_middleware_defaults()
        oslo_i18n.enable_lazy()
        log.register_options(conf)

    if argv is None:
        argv = sys.argv
    conf(argv[1:], project='panko', validate_default_values=True,
         version=version.version_info.version_string(),
         default_config_files=config_files)

    if not share:
        log.setup(conf, 'panko')
    profiler.setup(conf)
    # NOTE(liusheng): guru cannot run with service under apache daemon, so when
    # panko-api running with mod_wsgi, the argv is [], we don't start
    # guru.
    if argv:
        gmr.TextGuruMeditation.setup_autorun(version)
    return conf
Example #20
0
    def test_log_config_append_disable_existing_loggers(self):
        self.config(log_config_append=self.log_config_append)
        with mock.patch('logging.config.fileConfig') as fileConfig:
            log.setup(self.CONF, 'test_log_config_append')

        fileConfig.assert_called_once_with(self.log_config_append,
                                           disable_existing_loggers=False)
Example #21
0
def proxy(host, port, target_host=None):

    if CONF.ssl_only and not os.path.exists(CONF.cert):
        exit_with_error("SSL only and %s not found" % CONF.cert)

    # Check to see if tty html/js/css files are present
    if CONF.web and not os.path.exists(CONF.web):
        exit_with_error("Can not find html/js files at %s." % CONF.web)

    logging.setup(CONF, "nova")

    gmr.TextGuruMeditation.setup_autorun(version)

    # Create and start the NovaWebSockets proxy
    websocketproxy.NovaWebSocketProxy(
        listen_host=host,
        listen_port=port,
        source_is_ipv6=CONF.source_is_ipv6,
        verbose=CONF.verbose,
        cert=CONF.cert,
        key=CONF.key,
        ssl_only=CONF.ssl_only,
        daemon=CONF.daemon,
        record=CONF.record,
        traffic=CONF.verbose and not CONF.daemon,
        web=CONF.web,
        target_host=target_host,
        file_only=True,
        RequestHandlerClass=websocketproxy.NovaProxyRequestHandler
    ).start_server()
Example #22
0
def main():
    # TODO(tmckay): Work on restricting the options
    # pulled in by imports which show up in the help.
    # If we find a nice way to do this the calls to
    # unregister_extra_cli_opt() can be removed
    CONF(project="sahara")

    # For some reason, this is necessary to clear cached values
    # and re-read configs.  For instance, if this is not done
    # here the 'plugins' value will not reflect the value from
    # the config file on the command line
    CONF.reload_config_files()
    log.setup(CONF, "sahara")

    # If we have to enforce extra option checks, like one option
    # requires another, do it here
    extra_option_checks()

    # Since this may be scripted, record the command in the log
    # so a user can know exactly what was done
    LOG.info(_LI("Command: {command}").format(command=" ".join(sys.argv)))

    api.set_logger(LOG)
    api.set_conf(CONF)

    CONF.command.func()

    LOG.info(_LI("Finished {command}").format(command=CONF.command.name))
Example #23
0
def main():
    """Parse options and call the appropriate class/method."""
    CONF.register_cli_opt(category_opt)
    script_name = sys.argv[0]
    if len(sys.argv) < 2:
        print(_("\nOpenStack manila version: %(version)s\n") % {"version": version.version_string()})
        print(script_name + " category action [<args>]")
        print(_("Available categories:"))
        for category in CATEGORIES:
            print("\t%s" % category)
        sys.exit(2)

    try:
        log.register_options(CONF)
        CONF(sys.argv[1:], project="manila", version=version.version_string())
        log.setup(CONF, "manila")
    except cfg.ConfigFilesNotFoundError:
        cfgfile = CONF.config_file[-1] if CONF.config_file else None
        if cfgfile and not os.access(cfgfile, os.R_OK):
            st = os.stat(cfgfile)
            print(_("Could not read %s. Re-running with sudo") % cfgfile)
            try:
                os.execvp("sudo", ["sudo", "-u", "#%s" % st.st_uid] + sys.argv)
            except Exception:
                print(_("sudo failed, continuing as if nothing happened"))

        print(_("Please re-run manila-manage as root."))
        sys.exit(2)

    fn = CONF.category.action_fn

    fn_args = fetch_func_args(fn)
    fn(*fn_args)
Example #24
0
def main(args=sys.argv[1:]):  # pragma: no cover
    log.register_options(CONF)
    CONF(args, project='ironic-inspector')
    debug = CONF.debug

    log.set_defaults(default_log_levels=[
        'urllib3.connectionpool=WARN',
        'keystonemiddleware.auth_token=WARN',
        'requests.packages.urllib3.connectionpool=WARN',
        ('ironicclient.common.http=INFO' if debug else
         'ironicclient.common.http=ERROR')])
    log.setup(CONF, 'ironic_inspector')

    app_kwargs = {'host': CONF.listen_address,
                  'port': CONF.listen_port}

    context = create_ssl_context()
    if context:
        app_kwargs['ssl_context'] = context

    init()
    try:
        app.run(**app_kwargs)
    finally:
        firewall.clean_up()
def setup():
    global logger

    CONF(sys.argv[1:], project='example_rpc_server')

    log.setup(CONF, 'example_rpc_server')
    logger = log.getLogger(__name__)
Example #26
0
def main():
    cfg.parse_args(sys.argv)
    logging.setup(CONF, None)

    debug_utils.setup()

    # Patch 'thread' module if debug is disabled
    if not debug_utils.enabled():
        eventlet.monkey_patch(thread=True)

    from trove.guestagent import dbaas
    manager = dbaas.datastore_registry().get(CONF.datastore_manager)
    if not manager:
        msg = ("Manager class not registered for datastore manager %s" %
               CONF.datastore_manager)
        raise RuntimeError(msg)

    # rpc module must be loaded after decision about thread monkeypatching
    # because if thread module is not monkeypatched we can't use eventlet
    # executor from oslo_messaging library.
    from trove import rpc
    rpc.init(CONF)

    from trove.common.rpc import service as rpc_service
    from trove.common.rpc import version as rpc_version
    server = rpc_service.RpcService(
        manager=manager, host=CONF.guest_id,
        rpc_api_version=rpc_version.RPC_API_VERSION)

    launcher = openstack_service.launch(CONF, server)
    launcher.wait()
Example #27
0
def prepare_service(argv=None):
    if argv is None:
        argv = []
    log.register_options(CONF)
    config.parse_args(argv)
    config.set_config_defaults()
    log.setup(CONF, 'zun')
Example #28
0
def main():
    CONF.register_cli_opts(IMPORT_OPTS)
    try:
        log.register_options(CONF)
    except cfg.ArgsAlreadyParsedError:
        pass
    log.setup(CONF, 'storyboard')
    CONF(project='storyboard')

    # only_tags and exclude_tags are mutually exclusive
    if CONF.only_tags and CONF.exclude_tags:
        print('ERROR: only-tags and exclude-tags are mutually exclusive',
              file=sys.stderr)
        exit(1)

    # If the user requested an autoincrement value, set that before we start
    # importing things. Note that mysql will automatically set the
    # autoincrement to the next-available id equal to or larger than the
    # requested one.
    auto_increment = CONF.auto_increment
    if auto_increment:
        print('Setting stories.AUTO_INCREMENT to %d' % (auto_increment,))
        session = db_api.get_session(in_request=False)
        session.execute('ALTER TABLE stories AUTO_INCREMENT = %d;'
                        % (auto_increment,))

    if CONF.origin is 'launchpad':
        loader = LaunchpadLoader(CONF.from_project, CONF.to_project,
                                 set(CONF.only_tags), set(CONF.exclude_tags))
        loader.run()
    else:
        print('Unsupported import origin: %s' % CONF.origin)
        return
Example #29
0
def main():
    """The main function."""

    try:
        config.parse_args()
    except RuntimeError as e:
        sys.exit("ERROR: %s" % utils.exception_to_str(e))

    # Setup logging
    logging.setup('glance')

    if CONF.token:
        CONF.slavetoken = CONF.token
        CONF.mastertoken = CONF.token

    command = lookup_command(CONF.command)

    try:
        command(CONF, CONF.args)
    except TypeError as e:
        LOG.error(_LE(command.__doc__) % {'prog': command.__name__})  # noqa
        sys.exit("ERROR: %s" % utils.exception_to_str(e))
    except ValueError as e:
        LOG.error(_LE(command.__doc__) % {'prog': command.__name__})  # noqa
        sys.exit("ERROR: %s" % utils.exception_to_str(e))
Example #30
0
    def __init__(self, config,
                 disable_update_pre=False,
                 disable_update_post=False,
                 enable_chart_cleanup=False,
                 skip_pre_flight=False,
                 dry_run=False,
                 wait=False,
                 timeout=None,
                 debug=False):
        '''
        Initialize the Armada Engine and establish
        a connection to Tiller
        '''
        self.disable_update_pre = disable_update_pre
        self.disable_update_post = disable_update_post
        self.enable_chart_cleanup = enable_chart_cleanup
        self.skip_pre_flight = skip_pre_flight
        self.dry_run = dry_run
        self.wait = wait
        self.timeout = timeout
        self.config = yaml.load(config)
        self.tiller = Tiller()
        self.debug = debug

        # Set debug value
        CONF.set_default('debug', self.debug)
        logging.setup(CONF, DOMAIN)
Example #31
0
def main():
    """Main method for syncing neutron networks and ports with ovn nb db.

    The utility syncs neutron db with ovn nb db.
    """
    conf = setup_conf()

    # if no config file is passed or no configuration options are passed
    # then load configuration from /etc/neutron/neutron.conf
    try:
        conf(project='neutron')
    except TypeError:
        LOG.error(
            _LE('Error parsing the configuration values. '
                'Please verify.'))
        return

    logging.setup(conf, 'neutron_ovn_db_sync_util')
    LOG.info(_LI('Started Neutron OVN db sync'))
    mode = ovn_config.get_ovn_neutron_sync_mode()
    if mode not in [ovn_db_sync.SYNC_MODE_LOG, ovn_db_sync.SYNC_MODE_REPAIR]:
        LOG.error(
            _LE('Invalid sync mode : ["%s"]. Should be "log" or '
                '"repair"'), mode)
        return

    # Validate and modify core plugin and ML2 mechanism drivers for syncing.
    if cfg.CONF.core_plugin.endswith('.Ml2Plugin'):
        cfg.CONF.core_plugin = (
            'networking_ovn.cmd.neutron_ovn_db_sync_util.Ml2Plugin')
        if 'ovn' not in cfg.CONF.ml2.mechanism_drivers:
            LOG.error(_LE('No "ovn" mechanism driver found : "%s".'),
                      cfg.CONF.ml2.mechanism_drivers)
            return
        cfg.CONF.set_override('mechanism_drivers', ['ovn-sync'], 'ml2')
        conf.service_plugins = ['networking_ovn.l3.l3_ovn.OVNL3RouterPlugin']
    else:
        LOG.error(_LE('Invalid core plugin : ["%s"].'), cfg.CONF.core_plugin)
        return

    try:
        ovn_api = impl_idl_ovn.OvsdbNbOvnIdl(None)
    except RuntimeError:
        LOG.error(_LE('Invalid --ovn-ovn_nb_connection parameter provided.'))
        return

    core_plugin = manager.NeutronManager.get_plugin()
    ovn_driver = core_plugin.mechanism_manager.mech_drivers['ovn-sync'].obj
    ovn_driver._nb_ovn = ovn_api

    synchronizer = ovn_db_sync.OvnNbSynchronizer(core_plugin, ovn_api, mode,
                                                 ovn_driver)

    ctx = context.get_admin_context()

    LOG.info(_LI('Syncing the networks and ports with mode : %s'), mode)
    try:
        synchronizer.sync_address_sets(ctx)
    except Exception:
        LOG.exception(
            _LE("Error syncing  the Address Sets. Check the "
                "--database-connection value again"))
        return
    try:
        synchronizer.sync_networks_ports_and_dhcp_opts(ctx)
    except Exception:
        LOG.exception(
            _LE("Error syncing  Networks, Ports and DHCP options "
                "for unknown reason please try again"))
        return
    try:
        synchronizer.sync_acls(ctx)
    except Exception:
        LOG.exception(
            _LE("Error syncing  ACLs for unknown "
                "reason please try again"))
        return
    try:
        synchronizer.sync_routers_and_rports(ctx)
    except Exception:
        LOG.exception(
            _LE("Error syncing  Routers and Router ports "
                "please try again"))
        return
    LOG.info(_LI('Sync completed'))
def setup_logging():
    global LOG
    logging.setup(CONF, __name__)
    LOG = logging.getLogger(__name__)
Example #33
0
def main(argv=sys.argv[1:]):
    """Main Entry point into the akanda-rug

    This is the main entry point into the akanda-rug. On invocation of
    this method, logging, local network connectivity setup is performed.
    This information is obtained through the 'ak-config' file, passed as
    arguement to this method. Worker threads are spawned for handling
    various tasks that are associated with processing as well as
    responding to different Neutron events prior to starting a notification
    dispatch loop.

    :param argv: list of Command line arguments

    :returns: None

    :raises: None

    """
    # TODO(rama) Error Handling to be added as part of the docstring
    # description

    # Change the process and thread name so the logs are cleaner.
    p = multiprocessing.current_process()
    p.name = 'pmain'
    t = threading.current_thread()
    t.name = 'tmain'
    ak_cfg.parse_config(argv)
    log.setup(cfg.CONF, 'akanda-rug')
    cfg.CONF.log_opt_values(LOG, logging.INFO)

    neutron = neutron_api.Neutron(cfg.CONF)

    # TODO(mark): develop better way restore after machine reboot
    # neutron.purge_management_interface()

    # bring the mgt tap interface up
    neutron.ensure_local_service_port()

    # bring the external port
    if cfg.CONF.plug_external_port:
        neutron.ensure_local_external_port()

    # Set up the queue to move messages between the eventlet-based
    # listening process and the scheduler.
    notification_queue = multiprocessing.Queue()

    # Ignore signals that might interrupt processing.
    daemon.ignore_signals()

    # If we see a SIGINT, stop processing.
    def _stop_processing(*args):
        notification_queue.put((None, None))

    signal.signal(signal.SIGINT, _stop_processing)

    # Listen for notifications.
    notification_proc = multiprocessing.Process(
        target=notifications.listen,
        kwargs={'notification_queue': notification_queue},
        name='notification-listener',
    )
    notification_proc.start()

    mgt_ip_address = neutron_api.get_local_service_ip(cfg.CONF).split('/')[0]
    metadata_proc = multiprocessing.Process(target=metadata.serve,
                                            args=(mgt_ip_address, ),
                                            name='metadata-proxy')
    metadata_proc.start()

    from akanda.rug.api import rug as rug_api
    rug_api_proc = multiprocessing.Process(target=rug_api.serve,
                                           args=(mgt_ip_address, ),
                                           name='rug-api')
    rug_api_proc.start()

    # Set up the notifications publisher
    Publisher = (notifications.Publisher if cfg.CONF.ceilometer.enabled else
                 notifications.NoopPublisher)
    publisher = Publisher(topic=cfg.CONF.ceilometer.topic, )

    # Set up a factory to make Workers that know how many threads to
    # run.
    worker_factory = functools.partial(worker.Worker, notifier=publisher)

    # Set up the scheduler that knows how to manage the routers and
    # dispatch messages.
    sched = scheduler.Scheduler(worker_factory=worker_factory, )

    # Prepopulate the workers with existing routers on startup
    populate.pre_populate_workers(sched)

    # Set up the periodic health check
    health.start_inspector(cfg.CONF.health_check_period, sched)

    # Block the main process, copying messages from the notification
    # listener to the scheduler
    try:
        shuffle_notifications(notification_queue, sched)
    finally:
        LOG.info(_LI('Stopping scheduler.'))
        sched.stop()
        LOG.info(_LI('Stopping notification publisher.'))
        publisher.stop()

        # Terminate the subprocesses
        for subproc in [notification_proc, metadata_proc, rug_api_proc]:
            LOG.info(_LI('Stopping %s.'), subproc.name)
            subproc.terminate()
Example #34
0
def clean_command(sql_url, min_num_days, do_clean_unassociated_projects,
                  do_soft_delete_expired_secrets, verbose, log_file):
    """Clean command to clean up the database.

    :param sql_url: sql connection string to connect to a database
    :param min_num_days: clean up soft deletions older than this date
    :param do_clean_unassociated_projects: If True, clean up
                                           unassociated projects
    :param do_soft_delete_expired_secrets: If True, soft delete secrets
                                           that have expired
    :param verbose: If True, log and print more information
    :param log_file: If set, override the log_file configured
    """
    if verbose:
        # The verbose flag prints out log events to the screen, otherwise
        # the log events will only go to the log file
        CONF.set_override('debug', True)

    if log_file:
        CONF.set_override('log_file', log_file)

    LOG.info("Cleaning up soft deletions in the barbican database")
    log.setup(CONF, 'barbican')

    cleanup_total = 0
    current_time = timeutils.utcnow()
    stop_watch = timeutils.StopWatch()
    stop_watch.start()
    try:
        if sql_url:
            CONF.set_override('sql_connection', sql_url)
        repo.setup_database_engine_and_factory()

        if do_clean_unassociated_projects:
            cleanup_total += cleanup_unassociated_projects()

        if do_soft_delete_expired_secrets:
            cleanup_total += soft_delete_expired_secrets(
                threshold_date=current_time)

        threshold_date = None
        if min_num_days >= 0:
            threshold_date = current_time - datetime.timedelta(
                days=min_num_days)
        else:
            threshold_date = current_time
        cleanup_total += cleanup_all(threshold_date=threshold_date)
        repo.commit()

    except Exception as ex:
        LOG.exception('Failed to clean up soft deletions in database.')
        repo.rollback()
        cleanup_total = 0  # rollback happened, no entries affected
        raise ex
    finally:
        stop_watch.stop()
        elapsed_time = stop_watch.elapsed()
        if verbose:
            CONF.clear_override('debug')

        if log_file:
            CONF.clear_override('log_file')
        repo.clear()

        if sql_url:
            CONF.clear_override('sql_connection')

        log.setup(CONF, 'barbican')  # reset the overrides

        LOG.info("Cleaning of database affected %s entries", cleanup_total)
        LOG.info('DB clean up finished in %s seconds', elapsed_time)
Example #35
0
#   under the License.
#
from oslo_config import cfg
from oslo_log import log as logging

from osc_lib.i18n import _
from osc_lib import utils

from tripleoclient import command
from tripleoclient import constants
from tripleoclient import utils as oooutils
from tripleoclient.workflows import package_update

CONF = cfg.CONF
logging.register_options(CONF)
logging.setup(CONF, '')


class ExternalUpgradeRun(command.Command):
    """Run external major upgrade Ansible playbook

       This will run the external major upgrade Ansible playbook,
       executing tasks from the undercloud. The upgrade playbooks are
       made available after completion of the 'overcloud upgrade
       prepare' command.

    """

    log = logging.getLogger(__name__ + ".ExternalUpgradeRun")

    def get_parser(self, prog_name):
Example #36
0
import fixtures
import tempfile
import testscenarios

from oslo_config import cfg
from oslo_log import log as logging
from oslotest import base

from blazar import context
from blazar.db.sqlalchemy import api as db_api
from blazar.db.sqlalchemy import facade_wrapper

cfg.CONF.set_override('use_stderr', False)

logging.setup(cfg.CONF, 'blazar')
_DB_CACHE = None


class Database(fixtures.Fixture):

    def setUp(self):
        super(Database, self).setUp()

        fd = tempfile.NamedTemporaryFile(delete=False)
        self.db_path = fd.name
        database_connection = 'sqlite:///' + self.db_path
        cfg.CONF.set_override('connection', str(database_connection),
                              group='database')
        facade_wrapper._clear_engine()
        self.engine = facade_wrapper.get_engine()
Example #37
0
def setup_logging(project=""):
    log.setup(CONF, project)
Example #38
0
import os
import sys

import fixtures
import mock
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
import six
import testtools

from murano.cmd import test_runner

CONF = cfg.CONF
logging.register_options(CONF)
logging.setup(CONF, 'murano')


class TestCaseShell(testtools.TestCase):
    def setUp(self):
        super(TestCaseShell, self).setUp()
        self.auth_params = {'username': '******',
                            'password': '******',
                            'project_name': 'test',
                            'auth_url': 'http://localhost:5000'}
        self.args = ['test-runner.py']
        for k, v in self.auth_params.iteritems():
            k = '--os-' + k.replace('_', '-')
            self.args.extend([k, v])

        sys.stdout = six.StringIO()
Example #39
0
def config_and_logging(argv, default_config_files=None):
    prepare_logging()
    parse_args(argv, default_config_files=default_config_files)
    log.setup(cfg.CONF, "deepaas")
Example #40
0
def init():
    """Initialize logging. """
    product_name = "nfp"
    oslo_logging.setup(oslo_cfg.CONF, product_name)
Example #41
0
def setup_logging():
    product_name = "server"
    log.setup(oslo_cfg.CONF, product_name)
Example #42
0
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_log import log as logging
import testtools

from ironic.common import config as ironic_config
from ironic.common import context as ironic_context
from ironic.common import hash_ring
from ironic.objects import base as objects_base
from ironic.tests.unit import policy_fixture


CONF = cfg.CONF
CONF.import_opt('host', 'ironic.common.service')
logging.register_options(CONF)
logging.setup(CONF, 'ironic')


class ReplaceModule(fixtures.Fixture):
    """Replace a module with a fake module."""

    def __init__(self, name, new_value):
        self.name = name
        self.new_value = new_value

    def _restore(self, old_value):
        sys.modules[self.name] = old_value

    def setUp(self):
        super(ReplaceModule, self).setUp()
        old_value = sys.modules.get(self.name)
Example #43
0
 def configure_logging(self):
     super(ArmadaApp, self).configure_logging()
     log.register_options(CONF)
     log.set_defaults(default_log_levels=CONF.default_log_levels)
     log.setup(CONF, 'armada')
Example #44
0
from jmilkfansblog.tasks import on_reminder_save
from jmilkfansblog.controllers import admin
from jmilkfansblog.i18n import _LI, _LE


LOG = logging.getLogger(__name__)
CONF = cfg.CONF
DOMAIN = "jmilkfansblog"

# log_level_opts = [
#     cfg.BoolOpt('debug',
#                 default=True)]
# CONF.register_opts(log_level_opts)

logging.register_options(CONF)
logging.setup(CONF, DOMAIN)


def create_app(object_name):
    """Create the app instance via `Factory Method`"""

    LOG.info(_LI("Create the flask application object %s"), object_name)

    app = Flask(__name__)
    # Set the config for app instance
    app.config.from_object(object_name)
    
    #### Init the Flask-SQLAlchemy via app boject
    # Will be load the SQLALCHEMY_DATABASE_URL from config.py to db object
    db.init_app(app)
    # Using the SQLAlchemy's event
conf.register_group(sqlalchemy)
conf.register_cli_opts([
    cfg.BoolOpt('echo', default=True),
    cfg.BoolOpt('autoflush', default=True),
    cfg.IntOpt('pool_size', 10),
    cfg.IntOpt('pool_recycle', 3600)
], sqlalchemy)

# mysql
mysql = cfg.OptGroup(name='mysql', title="MySQL 配置")
conf.register_group(mysql)
conf.register_cli_opts([
    cfg.StrOpt('unitymob', default='localhost'),
], mysql)

# redis
redis = cfg.OptGroup(name='redis', title="Redis 相关配置")
conf.register_group(redis)
conf.register_cli_opts([
    cfg.StrOpt('host', default='127.0.0.1'),
    cfg.IntOpt('port', default=6379),
    cfg.IntOpt('db', default=0),
    cfg.StrOpt('password', default='123456'),
], redis)

conf(default_config_files=[
    join(dirname(__file__), '.'.join(['config', 'ini']))
])

logging.setup(conf, "unitymob")
Example #46
0
import six
import sqlalchemy
import testtools

import storyboard.common.working_dir as working_dir
from storyboard.db.api import base as db_api_base
from storyboard.db.migration.cli import get_alembic_config
import storyboard.tests.mock_data as mock_data

CONF = cfg.CONF
_TRUE_VALUES = ('true', '1', 'yes')

_DB_CACHE = None

logging.register_options(CONF)
logging.setup(CONF, 'storyboard')
LOG = logging.getLogger(__name__)


class TestCase(testtools.TestCase):
    """Test case base class for all unit tests."""
    def setUp(self):
        """Run before each test method to initialize test environment."""

        super(TestCase, self).setUp()
        test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
        try:
            test_timeout = int(test_timeout)
        except ValueError:
            # If timeout value is invalid do not set a timeout.
            test_timeout = 0
Example #47
0
def main():
    # FIXME: Split these up into separate components?
    #   host, port, username, password, database
    cli_opts = [
        cfg.SubCommandOpt('command',
                          title="Commands",
                          help="Available commands",
                          handler=add_subcommands),
        cfg.URIOpt('source',
                   required=False,
                   help='connection URL to the src DB server'),
        cfg.URIOpt('target',
                   required=False,
                   help='connection URL to the target server'),
        cfg.StrOpt('batch',
                   required=False,
                   help='YAML file containing connection URLs'),
        cfg.BoolOpt('exclude-deleted',
                    default=True,
                    help='Exclude table rows marked as deleted. '
                    'True by default.'),
        cfg.IntOpt('chunk-size',
                   default=10000,
                   min=0,
                   help='Number of records to move per chunk. Set to 0 to '
                   'disable, default is 10,000.')
    ]

    cfg.CONF.register_cli_opts(cli_opts)
    logging.register_options(cfg.CONF)
    logging.set_defaults()

    # read config and initialize logging
    cfg.CONF(project='psql2mysql')
    #    cfg.CONF.set_override("use_stderr", True)

    logging.setup(cfg.CONF, 'psql2mysql')

    # We expect batch file with this syntax:
    #
    # keystone:
    #   source: postgresql://keystone:[email protected]/keystone
    #   target: mysql+pymysql://keystone:[email protected]/keystone?charset=utf8
    # cinder:
    #   source: postgresql://cinder:[email protected]/cinder
    #   target:
    if cfg.CONF.batch:
        try:
            with open(cfg.CONF.batch, 'r') as f:
                for db_name, db in yaml.load(f).iteritems():
                    print('Processing database "%s"... ' % db_name)
                    check_source_schema(db['source'])
                    if db['target']:
                        check_target_schema(db['target'])
                    cfg.CONF.command.func(cfg, db['source'], db['target'])

        except Psql2MysqlRuntimeError as e:
            print(e, file=sys.stderr)
            sys.exit(1)
        except IOError:
            print('Batch file "%s" does not exist or cannot be read' %
                  cfg.CONF.batch)
            sys.exit(2)

        print("Batch processing done.")
        sys.exit(0)

    if not cfg.CONF.source:
        print("Source database was not specified.")
        sys.exit(1)

    check_source_schema(cfg.CONF.source)

    if cfg.CONF.target:
        check_target_schema(cfg.CONF.target)

    try:
        cfg.CONF.command.func(cfg, cfg.CONF.source, cfg.CONF.target)
    except Psql2MysqlRuntimeError as e:
        print(e, file=sys.stderr)
        sys.exit(1)
Example #48
0
# See the License for the specific language governing permissions and
# limitations under the License.

from barbican.common import config
from barbican.model import models
from barbican.model import repositories as repo
from oslo_log import log
from oslo_utils import timeutils

from sqlalchemy import sql as sa_sql

import datetime

# Import and configure logging.
CONF = config.CONF
log.setup(CONF, 'barbican')
LOG = log.getLogger(__name__)


def cleanup_unassociated_projects():
    """Clean up unassociated projects.

    This looks for projects that have no children entries on the dependent
    tables and removes them.
    """
    LOG.debug("Cleaning up unassociated projects")
    session = repo.get_session()
    project_children_tables = [
        models.Order, models.KEKDatum, models.Secret,
        models.ContainerConsumerMetadatum, models.Container,
        models.PreferredCertificateAuthority, models.CertificateAuthority,
"""WSGI script for Nova API

EXPERIMENTAL support script for running Nova API under Apache2 etc.

"""

from oslo_config import cfg
from oslo_log import log as logging
from paste import deploy

from nova import config
from nova import objects
from nova import service  # noqa
from nova import utils

CONF = cfg.CONF

config_files = ['/etc/nova/api-paste.ini', '/etc/nova/nova.conf']
config.parse_args([], default_config_files=config_files)

logging.setup(CONF, "nova")
utils.monkey_patch()
objects.register_all()

conf = config_files[0]
name = "osapi_compute"

options = deploy.appconfig('config:%s' % conf, name=name)

application = deploy.loadapp('config:%s' % conf, name=name)
def setup_logging():
    """Sets up the logging options for a log with supplied name."""
    product_name = "aim"
    logging.setup(cfg.CONF, product_name)
Example #51
0
from nova import context
from nova.db.main import api as db_api
from nova import exception
from nova import objects
from nova.objects import base as objects_base
from nova import quota
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import matchers
from nova import utils
from nova.virt import images

CONF = cfg.CONF

logging.register_options(CONF)
CONF.set_override('use_stderr', False)
logging.setup(CONF, 'nova')
cache.configure(CONF)
LOG = logging.getLogger(__name__)

_TRUE_VALUES = ('True', 'true', '1', 'yes')
CELL1_NAME = 'cell1'

# For compatibility with the large number of tests which use test.nested
nested = utils.nested_contexts


class TestingException(Exception):
    pass


# NOTE(claudiub): this needs to be called before any mock.patch calls are
Example #52
0
# -*- coding: utf-8 -*-
import sys
from webob import Request
#引入配置文件
from oslo_config import cfg
#引入带调用链的日志
from oslo_log import log as logging
from oslo_context import context
#引入REST服务
from oslo_service import service
from oslo_service import wsgi

CONF = cfg.CONF
LOG = logging.getLogger(__name__)
logging.register_options(CONF)
logging.setup(CONF, "m19k")

#mini服务
class MiniService:
    def __init__(self, host = "0.0.0.0", port = "9000", workers = 1, use_ssl = False, cert_file = None, ca_file = None):
        self.host = host
        self.port = port
        self.workers = workers
        self.use_ssl = use_ssl
        self.cert_file = cert_file
        self.ca_file = ca_file
        self._actions = {}

    def add_action(self, url_path, action):
        if (url_path.lower() == "default") or (url_path == "/") or (url_path == ""):
            url_path = "default"
Example #53
0
def main():
    def actions(subparser):
        repo_path_help = 'SQLAlchemy Migrate repository path.'

        parser = subparser.add_parser(
            'db_sync', description='Populate the database structure')
        parser.add_argument('--repo_path', help=repo_path_help)

        parser = subparser.add_parser(
            'db_upgrade',
            description='Upgrade the database to the '
            'specified version.')
        parser.add_argument('--version',
                            help='Target version. Defaults to the '
                            'latest version.')
        parser.add_argument('--repo_path', help=repo_path_help)

        parser = subparser.add_parser(
            'db_downgrade',
            description='Downgrade the database to the '
            'specified version.')
        parser.add_argument('version', help='Target version.')
        parser.add_argument('--repo_path', help=repo_path_help)

        parser = subparser.add_parser(
            'datastore_update',
            description='Add or update a datastore. '
            'If the datastore already exists, the default version will be '
            'updated.')
        parser.add_argument('datastore_name', help='Name of the datastore.')
        parser.add_argument(
            'default_version',
            help='Name or ID of an existing datastore '
            'version to set as the default. When adding a new datastore, use '
            'an empty string.')

        parser = subparser.add_parser(
            'datastore_version_update',
            description='Add or update a '
            'datastore version. If the datastore version already exists, all '
            'values except the datastore name and version will be updated.')
        parser.add_argument('datastore', help='Name of the datastore.')
        parser.add_argument('version_name',
                            help='Name of the datastore version.')
        parser.add_argument(
            'manager',
            help='Name of the manager that will administer the '
            'datastore version.')
        parser.add_argument(
            'image_id',
            help='ID of the image used to create an instance of '
            'the datastore version.')
        parser.add_argument(
            'packages',
            help='Packages required by the datastore version that '
            'are installed on the guest image.')
        parser.add_argument(
            'active',
            help='Whether the datastore version is active or not. '
            'Accepted values are 0 and 1.')

        parser = subparser.add_parser(
            'db_recreate', description='Drop the database and recreate it.')
        parser.add_argument('--repo_path', help=repo_path_help)

        parser = subparser.add_parser(
            'db_load_datastore_config_parameters',
            description='Loads configuration group parameter validation rules '
            'for a datastore version into the database.')
        parser.add_argument('datastore', help='Name of the datastore.')
        parser.add_argument('datastore_version',
                            help='Name of the datastore version.')
        parser.add_argument(
            'config_file_location',
            help='Fully qualified file path to the configuration group '
            'parameter validation rules.')

        parser = subparser.add_parser(
            'datastore_version_flavor_add',
            help='Adds flavor association to '
            'a given datastore and datastore version.')
        parser.add_argument('datastore_name', help='Name of the datastore.')
        parser.add_argument('datastore_version_name',
                            help='Name of the '
                            'datastore version.')
        parser.add_argument('flavor_ids',
                            help='Comma separated list of '
                            'flavor ids.')

        parser = subparser.add_parser(
            'datastore_version_flavor_delete',
            help='Deletes a flavor '
            'associated with a given datastore and datastore version.')
        parser.add_argument('datastore_name', help='Name of the datastore.')
        parser.add_argument('datastore_version_name',
                            help='Name of the '
                            'datastore version.')
        parser.add_argument('flavor_id',
                            help='The flavor to be deleted for '
                            'a given datastore and datastore version.')

    cfg.custom_parser('action', actions)
    cfg.parse_args(sys.argv)

    try:
        logging.setup(CONF, None)

        Commands().execute()
        sys.exit(0)
    except TypeError as e:
        print(_("Possible wrong number of arguments supplied %s.") % e)
        sys.exit(2)
    except Exception:
        print(_("Command failed, please check log for more info."))
        raise
Example #54
0
def main():
    """Parse options and call the appropriate class/method."""
    CONF.register_cli_opt(category_opt)
    try:
        config.parse_args(sys.argv)
        logging.setup(CONF, "conveyor")
    except cfg.ConfigFilesNotFoundError:
        cfgfile = CONF.config_file[-1] if CONF.config_file else None
        if cfgfile and not os.access(cfgfile, os.R_OK):
            st = os.stat(cfgfile)
            print(_("Could not read %s. Re-running with sudo") % cfgfile)
            try:
                os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
            except Exception:
                print(_('sudo failed, continuing as if nothing happened'))

        print(_('Please re-run gw-manage as root.'))
        return (2)

    if CONF.category.name == "version":
        print(version.version_string_with_package())
        return (0)

    if CONF.category.name == "bash-completion":
        if not CONF.category.query_category:
            print(" ".join(CATEGORIES.keys()))
        elif CONF.category.query_category in CATEGORIES:
            fn = CATEGORIES[CONF.category.query_category]
            command_object = fn()
            actions = methods_of(command_object)
            print(" ".join([k for (k, v) in actions]))
        return (0)

    fn = CONF.category.action_fn
    fn_args = [arg.decode('utf-8') for arg in CONF.category.action_args]
    fn_kwargs = {}
    for k in CONF.category.action_kwargs:
        v = getattr(CONF.category, 'action_kwarg_' + k)
        if v is None:
            continue
        if isinstance(v, six.string_types):
            v = v.decode('utf-8')
        fn_kwargs[k] = v

    # call the action with the remaining arguments
    # check arguments
    try:
        cliutils.validate_args(fn, *fn_args, **fn_kwargs)
    except cliutils.MissingArgs as e:
        # NOTE(mikal): this isn't the most helpful error message ever. It is
        # long, and tells you a lot of things you probably don't want to know
        # if you just got a single arg wrong.
        print(fn.__doc__)
        CONF.print_help()
        print(e)
        return (1)
    try:
        ret = fn(*fn_args, **fn_kwargs)
        rpc.cleanup()
        return (ret)
    except Exception:
        print(_("Command failed, please check log for more info"))
        raise
Example #55
0
import sys
from oslo_config import cfg
from oslo_log import log as logging

CONF = cfg.CONF

logging.register_options(CONF)
logging.setup(CONF, 'demo')
LOG = logging.getLogger(__name__)

LOG.info("Oslo Info Logging")
LOG.warning("Oslo Warning Logging")
LOG.error("Oslo Error Logging")
LOG.info("%(key)s, %(age)d", {"key":"hello", "age":10})
Example #56
0
def setup_logging():
    logging.setup(CONF, 'vdibroker')
Example #57
0
def setup_logging():
    logging.setup(CONF, 'coriolis')
Example #58
0
def setup_logging():
    """Sets up logging for the congress package."""
    logging.setup(cfg.CONF, 'congress')
Example #59
0
def main(argv):
    oslo_config.CONF.register_opts(PROXY_OPTS, 'proxy')
    oslo_config.CONF(args=sys.argv[1:])
    oslo_logging.setup(oslo_config.CONF, 'nfp')
    conf = Configuration(oslo_config.CONF)
    Proxy(conf).start()
Example #60
0
from __future__ import absolute_import

import os

import fixtures
import mock
from oslo_config import cfg
from oslo_log import log as logging
import testtools

from deckhand.db.sqlalchemy import api as db_api

CONF = cfg.CONF
logging.register_options(CONF)
logging.setup(CONF, 'deckhand')


class DeckhandTestCase(testtools.TestCase):
    def setUp(self):
        super(DeckhandTestCase, self).setUp()
        self.useFixture(fixtures.FakeLogger('deckhand'))

    def override_config(self, name, override, group=None):
        CONF.set_override(name, override, group)
        self.addCleanup(CONF.clear_override, name, group)

    def assertEmpty(self, collection):
        if isinstance(collection, list):
            self.assertEqual(0, len(collection))
        elif isinstance(collection, dict):