Esempio n. 1
0
def main():
    """Parse options and call the appropriate class/method."""
    CONF.register_cli_opt(category_opt)
    script_name = sys.argv[0]
    if len(sys.argv) < 2:
        print(_("\nOpenStack manila version: %(version)s\n") % {"version": version.version_string()})
        print(script_name + " category action [<args>]")
        print(_("Available categories:"))
        for category in CATEGORIES:
            print("\t%s" % category)
        sys.exit(2)

    try:
        log.register_options(CONF)
        CONF(sys.argv[1:], project="manila", version=version.version_string())
        log.setup(CONF, "manila")
    except cfg.ConfigFilesNotFoundError:
        cfgfile = CONF.config_file[-1] if CONF.config_file else None
        if cfgfile and not os.access(cfgfile, os.R_OK):
            st = os.stat(cfgfile)
            print(_("Could not read %s. Re-running with sudo") % cfgfile)
            try:
                os.execvp("sudo", ["sudo", "-u", "#%s" % st.st_uid] + sys.argv)
            except Exception:
                print(_("sudo failed, continuing as if nothing happened"))

        print(_("Please re-run manila-manage as root."))
        sys.exit(2)

    fn = CONF.category.action_fn

    fn_args = fetch_func_args(fn)
    fn(*fn_args)
Esempio n. 2
0
def parse_args(args=None, usage=None, default_config_files=None):
    logging.register_options(CONF)
    CONF(args=args,
         project='murano',
         version=version.version_string,
         usage=usage,
         default_config_files=default_config_files)
Esempio n. 3
0
def main(args=sys.argv[1:]):  # pragma: no cover
    log.register_options(CONF)
    CONF(args, project='ironic-inspector')
    debug = CONF.debug

    log.set_defaults(default_log_levels=[
        'urllib3.connectionpool=WARN',
        'keystonemiddleware.auth_token=WARN',
        'requests.packages.urllib3.connectionpool=WARN',
        ('ironicclient.common.http=INFO' if debug else
         'ironicclient.common.http=ERROR')])
    log.setup(CONF, 'ironic_inspector')

    app_kwargs = {'host': CONF.listen_address,
                  'port': CONF.listen_port}

    context = create_ssl_context()
    if context:
        app_kwargs['ssl_context'] = context

    init()
    try:
        app.run(**app_kwargs)
    finally:
        firewall.clean_up()
Esempio n. 4
0
def prepare_service(argv=[], conf=cfg.CONF):
    log.register_options(conf)
    config.parse_args(argv)
    cfg.set_defaults(_options.log_opts,
                     default_log_levels=_DEFAULT_LOG_LEVELS)
    log.setup(conf, 'python-watcher')
    conf.log_opt_values(LOG, logging.DEBUG)
Esempio n. 5
0
def main():
    CONF.register_cli_opt(category_opt)

    try:
        log.register_options(CONF)

        CONF(sys.argv[1:], project='rumster',
             version=version.version_info.version_string())

        log.setup(CONF, "rumster")
    except cfg.ConfigFilesNotFoundError:
        cfgfile = CONF.config_file[-1] if CONF.config_file else None
        if cfgfile and not os.access(cfgfile, os.R_OK):
            st = os.stat(cfgfile)
            print(_LI("Could not read %s. Re-running with sudo") % cfgfile)
            try:
                os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
            except Exception:
                print(_LI('sudo failed, continuing as if nothing happened'))

        print(_LI('Please re-run rumster-manage as root.'))
        sys.exit(2)

    fn = CONF.category.action_fn

    fn_args = fetch_func_args(fn)
    fn(*fn_args)
Esempio n. 6
0
def new_config():
    conf = cfg.ConfigOpts()
    log.register_options(conf)
    conf.register_opts(context_opts)
    conf.register_opts(common_opts)
    conf.register_opts(host_opts)
    conf.register_opts(db_opts)
    conf.register_opts(_options.eventlet_backdoor_opts)
    conf.register_opts(_options.periodic_opts)

    conf.register_opts(_options.ssl_opts, "ssl")

    conf.register_group(retry_opt_group)
    conf.register_opts(retry_opts, group=retry_opt_group)

    conf.register_group(queue_opt_group)
    conf.register_opts(queue_opts, group=queue_opt_group)

    conf.register_group(ks_queue_opt_group)
    conf.register_opts(ks_queue_opts, group=ks_queue_opt_group)

    conf.register_group(quota_opt_group)
    conf.register_opts(quota_opts, group=quota_opt_group)

    return conf
Esempio n. 7
0
def prepare_service(argv=None):
    if argv is None:
        argv = []
    log.register_options(CONF)
    config.parse_args(argv)
    config.set_config_defaults()
    log.setup(CONF, 'zun')
Esempio n. 8
0
 def _get_config_opts(self):
     config = cfg.ConfigOpts()
     config.register_opts(common_config.core_opts)
     config.register_opts(common_config.core_cli_opts)
     logging.register_options(config)
     agent_config.register_process_monitor_opts(config)
     return config
Esempio n. 9
0
def prepare_service(argv=None, config_files=None, share=False):
    conf = cfg.ConfigOpts()
    for group, options in opts.list_opts():
        conf.register_opts(list(options),
                           group=None if group == "DEFAULT" else group)
    db_options.set_defaults(conf)
    if profiler_opts:
        profiler_opts.set_defaults(conf)
    if not share:
        defaults.set_cors_middleware_defaults()
        oslo_i18n.enable_lazy()
        log.register_options(conf)

    if argv is None:
        argv = sys.argv
    conf(argv[1:], project='panko', validate_default_values=True,
         version=version.version_info.version_string(),
         default_config_files=config_files)

    if not share:
        log.setup(conf, 'panko')
    profiler.setup(conf)
    # NOTE(liusheng): guru cannot run with service under apache daemon, so when
    # panko-api running with mod_wsgi, the argv is [], we don't start
    # guru.
    if argv:
        gmr.TextGuruMeditation.setup_autorun(version)
    return conf
Esempio n. 10
0
def prepare_service(argv=None, config_files=None, conf=None):
    if argv is None:
        argv = sys.argv

    if conf is None:
        conf = cfg.ConfigOpts()

    oslo_i18n.enable_lazy()
    for group, options in opts.list_opts():
        conf.register_opts(list(options),
                           group=None if group == "DEFAULT" else group)
    keystone_client.register_keystoneauth_opts(conf)
    log.register_options(conf)
    log_levels = (conf.default_log_levels +
                  ['futurist=INFO', 'neutronclient=INFO',
                   'keystoneclient=INFO'])
    log.set_defaults(default_log_levels=log_levels)

    conf(argv[1:], project='ceilometer', validate_default_values=True,
         version=version.version_info.version_string(),
         default_config_files=config_files)

    keystone_client.post_register_keystoneauth_opts(conf)

    log.setup(conf, 'ceilometer')
    utils.setup_root_helper(conf)
    sample.setup(conf)

    gmr.TextGuruMeditation.setup_autorun(version)
    messaging.setup()
    return conf
Esempio n. 11
0
    def _log_setup(self):

        CONF = cfg.CONF

        self.set_request_context()

        DOMAIN = "masakari"
        CONF.log_file = self.conf_log.get("log_file")
        CONF.use_stderr = False

        logging.register_options(CONF)
        logging.setup(CONF, DOMAIN)

        log_dir = os.path.dirname(self.conf_log.get("log_file"))

        # create log dir if not created
        try:
            os.makedirs(log_dir)
        except OSError as exc:
            if exc.errno == errno.EEXIST and os.path.isdir(log_dir):
                pass
            else:
                raise

        return
Esempio n. 12
0
def parse_args_with_log(project, argv=None, version=None, conf=None, log=True,
                        default_config_files=None, default_log_format=None,
                        default_log_levels=None):

    conf = conf if conf else cfg.CONF
    argv = argv if argv else sys.argv[1:]

    if not log:
        conf(argv, project=project, version=version,
             default_config_files=default_config_files)
        return

    from oslo_log import log

    if project not in _ROOTS:
        _DEFAULT_LOG_LEVELS.append('%s=INFO' % project)
        _ROOTS.append(project)
    log_fmt = default_log_format if default_log_format else _DEFAULT_LOG_FORMAT
    log_lvl = default_log_levels if default_log_levels else _DEFAULT_LOG_LEVELS

    log.set_defaults(log_fmt, log_lvl)
    log.register_options(conf)

    # (TODO): Configure the options of the other libraries, which must be called
    # before parsing the configuration file.

    conf(argv, project=project, version=version,
         default_config_files=default_config_files)

    log.setup(conf, project, version)
Esempio n. 13
0
def new_config():
    conf = cfg.ConfigOpts()
    log.register_options(conf)
    conf.register_opts(context_opts)
    conf.register_opts(common_opts)
    conf.register_opts(host_opts)
    conf.register_opts(db_opts)
    conf.register_opts(_options.eventlet_backdoor_opts)
    conf.register_opts(_options.periodic_opts)

    conf.register_opts(_options.ssl_opts, "ssl")

    conf.register_group(retry_opt_group)
    conf.register_opts(retry_opts, group=retry_opt_group)

    conf.register_group(queue_opt_group)
    conf.register_opts(queue_opts, group=queue_opt_group)

    conf.register_group(ks_queue_opt_group)
    conf.register_opts(ks_queue_opts, group=ks_queue_opt_group)

    conf.register_group(quota_opt_group)
    conf.register_opts(quota_opts, group=quota_opt_group)

    # Update default values from libraries that carry their own oslo.config
    # initialization and configuration.
    set_middleware_defaults()

    return conf
Esempio n. 14
0
def api_app(conf):
    log.set_defaults(constant.KILOEYES_LOGGING_CONTEXT_FORMAT,
                     constant.KILOEYES_LOG_LEVELS)
    log.register_options(cfg.CONF)

    if conf.get('name'):
        name = conf.get('name')
    else:
        name = 'kiloeyes'

    cfg.CONF(args=[], project=name)

    log.setup(cfg.CONF, name)

    dispatcher_manager = named.NamedExtensionManager(
        namespace=namespace.DISPATCHER_NS,
        names=cfg.CONF.dispatcher,
        invoke_on_load=True,
        invoke_args=[cfg.CONF])

    if not list(dispatcher_manager):
        LOG.error('Failed to load any dispatchers for %s' %
                  namespace.DISPATCHER_NS)
        return None

    # Create the application
    app = resource_api.ResourceAPI()

    # add each dispatcher to the application to serve requests offered by
    # each dispatcher
    for driver in dispatcher_manager:
        app.add_route(None, driver.obj)

    LOG.debug('Dispatcher drivers have been added to the routes!')
    return app
Esempio n. 15
0
def prepare_service(argv=None, config_files=None, conf=None):
    if argv is None:
        argv = sys.argv

    # FIXME(sileht): Use ConfigOpts() instead
    if conf is None:
        conf = cfg.CONF

    oslo_i18n.enable_lazy()
    log.register_options(conf)
    log_levels = (conf.default_log_levels +
                  ['futurist=INFO', 'neutronclient=INFO',
                   'keystoneclient=INFO'])
    log.set_defaults(default_log_levels=log_levels)
    defaults.set_cors_middleware_defaults()
    policy_opts.set_defaults(conf)

    conf(argv[1:], project='ceilometer', validate_default_values=True,
         version=version.version_info.version_string(),
         default_config_files=config_files)

    ka_loading.load_auth_from_conf_options(conf, "service_credentials")

    log.setup(conf, 'ceilometer')
    # NOTE(liusheng): guru cannot run with service under apache daemon, so when
    # ceilometer-api running with mod_wsgi, the argv is [], we don't start
    # guru.
    if argv:
        gmr.TextGuruMeditation.setup_autorun(version)
    messaging.setup()
    return conf
Esempio n. 16
0
def launch(conf, config_file="/etc/monasca/events_api.conf"):
    log.register_options(cfg.CONF)
    log.set_defaults()
    cfg.CONF(args=[],
             project='monasca_events_api',
             default_config_files=[config_file])
    log.setup(cfg.CONF, 'monasca_events_api')

    app = falcon.API()

    versions = simport.load(cfg.CONF.dispatcher.versions)()
    app.add_route("/", versions)
    app.add_route("/{version_id}", versions)

    events = simport.load(cfg.CONF.dispatcher.events)()
    app.add_route("/v2.0/events", events)
    app.add_route("/v2.0/events/{event_id}", events)

    streams = simport.load(cfg.CONF.dispatcher.stream_definitions)()
    app.add_route("/v2.0/stream-definitions/", streams)
    app.add_route("/v2.0/stream-definitions/{stream_id}", streams)

    transforms = simport.load(cfg.CONF.dispatcher.transforms)()
    app.add_route("/v2.0/transforms", transforms)
    app.add_route("/v2.0/transforms/{transform_id}", transforms)

    LOG.debug('Dispatcher drivers have been added to the routes!')
    return app
Esempio n. 17
0
def parse_args(argv, default_config_files=None, configure_db=True,
               init_rpc=True):
    log.register_options(CONF)
    # We use the oslo.log default log levels which includes suds=INFO
    # and add only the extra levels that Nova needs
    if CONF.glance.debug:
        extra_default_log_levels = ['glanceclient=DEBUG']
    else:
        extra_default_log_levels = ['glanceclient=WARN']
    log.set_defaults(default_log_levels=log.get_default_log_levels() +
                     extra_default_log_levels)
    rpc.set_defaults(control_exchange='nova')
    if profiler:
        profiler.set_defaults(CONF)
    config.set_middleware_defaults()

    CONF(argv[1:],
         project='nova',
         version=version.version_string(),
         default_config_files=default_config_files)

    if init_rpc:
        rpc.init(CONF)

    if configure_db:
        sqlalchemy_api.configure(CONF)
Esempio n. 18
0
def main():
    logging.register_options(cfg.CONF)
    cfg.CONF(project='heat', prog='heat-engine',
             version=version.version_info.version_string())
    logging.setup(cfg.CONF, 'heat-engine')
    logging.set_defaults()
    messaging.setup()

    config.startup_sanity_check()

    mgr = None
    try:
        mgr = template._get_template_extension_manager()
    except template.TemplatePluginNotRegistered as ex:
        LOG.critical(_LC("%s"), ex)
    if not mgr or not mgr.names():
        sys.exit("ERROR: No template format plugins registered")

    from heat.engine import service as engine  # noqa

    profiler.setup('heat-engine', cfg.CONF.host)
    gmr.TextGuruMeditation.setup_autorun(version)
    srv = engine.EngineService(cfg.CONF.host, rpc_api.ENGINE_TOPIC)
    workers = cfg.CONF.num_engine_workers
    if not workers:
        workers = max(4, processutils.get_worker_count())

    launcher = service.launch(cfg.CONF, srv, workers=workers)
    if cfg.CONF.enable_cloud_watch_lite:
        # We create the periodic tasks here, which mean they are created
        # only in the parent process when num_engine_workers>1 is specified
        srv.create_periodic_tasks()
    launcher.wait()
Esempio n. 19
0
def main():
    try:
        logging.register_options(cfg.CONF)
        cfg.CONF(project='heat',
                 prog='heat-api-cloudwatch',
                 version=version.version_info.version_string())
        logging.setup(cfg.CONF, 'heat-api-cloudwatch')
        logging.set_defaults()
        messaging.setup()

        app = config.load_paste_app()

        port = cfg.CONF.heat_api_cloudwatch.bind_port
        host = cfg.CONF.heat_api_cloudwatch.bind_host
        LOG.info(_LI('Starting Heat CloudWatch API on %(host)s:%(port)s'),
                 {'host': host, 'port': port})
        profiler.setup('heat-api-cloudwatch', host)
        gmr.TextGuruMeditation.setup_autorun(version)
        server = wsgi.Server('heat-api-cloudwatch',
                             cfg.CONF.heat_api_cloudwatch)
        server.start(app, default_port=port)
        systemd.notify_once()
        server.wait()
    except RuntimeError as e:
        msg = six.text_type(e)
        sys.exit("ERROR: %s" % msg)
Esempio n. 20
0
def main():
    logging.register_options(cfg.CONF)
    cfg.CONF(sys.argv[1:], project='magnum')
    logging.setup(cfg.CONF, 'magnum')

    LOG.info(_LI('Starting server in PID %s') % os.getpid())
    LOG.debug("Configuration:")
    cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)

    cfg.CONF.import_opt('topic', 'magnum.conductor.config', group='conductor')

    conductor_id = short_id.generate_id()
    endpoints = [
        docker_conductor.Handler(),
        k8s_conductor.Handler(),
        bay_conductor.Handler(),
        conductor_listener.Handler(),
    ]

    if (not os.path.isfile(cfg.CONF.bay.k8s_atomic_template_path)
            and not os.path.isfile(cfg.CONF.bay.k8s_coreos_template_path)):
        LOG.error(_LE("The Heat template can not be found for either k8s "
                      "atomic %(atomic_template)s or coreos "
                      "(coreos_template)%s. Install template first if you "
                      "want to create bay.") %
                  {'atomic_template': cfg.CONF.bay.k8s_atomic_template_path,
                   'coreos_template': cfg.CONF.bay.k8s_coreos_template_path})

    server = service.Service(cfg.CONF.conductor.topic,
                             conductor_id, endpoints)
    server.serve()
Esempio n. 21
0
 def _setUp(self):
     log.register_options(cfg.CONF)
     CONF.set_default('host', 'fake-mini')
     CONF.set_default('connection', "sqlite://", group='database')
     CONF.set_default('sqlite_synchronous', False, group='database')
     config.parse_args([], default_config_files=[])
     self.addCleanup(CONF.reset)
Esempio n. 22
0
def main():
    CONF.register_cli_opt(command_opt)
    try:
        logging.register_options(CONF)
        cfg_files = cfg.find_config_files(project='glance',
                                          prog='glance-registry')
        cfg_files.extend(cfg.find_config_files(project='glance',
                                               prog='glance-api'))
        cfg_files.extend(cfg.find_config_files(project='glance',
                                               prog='glance-manage'))
        config.parse_args(default_config_files=cfg_files,
                          usage="%(prog)s [options] <cmd>")
        logging.setup(CONF, 'glance')
    except RuntimeError as e:
        sys.exit("ERROR: %s" % e)

    try:
        if CONF.command.action.startswith('db'):
            return CONF.command.action_fn()
        else:
            func_kwargs = {}
            for k in CONF.command.action_kwargs:
                v = getattr(CONF.command, 'action_kwarg_' + k)
                if v is None:
                    continue
                if isinstance(v, six.string_types):
                    v = encodeutils.safe_decode(v)
                func_kwargs[k] = v
            func_args = [encodeutils.safe_decode(arg)
                         for arg in CONF.command.action_args]
            return CONF.command.action_fn(*func_args, **func_kwargs)
    except exception.GlanceException as e:
        sys.exit("ERROR: %s" % utils.exception_to_str(e))
Esempio n. 23
0
def parse_args(argv=None, config_file=None):
    """Loads application configuration.

    Loads entire application configuration just once.

    """
    global _CONF_LOADED
    if _CONF_LOADED:
        LOG.debug('Configuration has been already loaded')
        return

    log.set_defaults()
    log.register_options(CONF)

    argv = (argv if argv is not None else sys.argv[1:])
    args = ([] if _is_running_under_gunicorn() else argv or [])
    config_file = (_get_deprecated_config_file()
                   if config_file is None else config_file)

    CONF(args=args,
         prog='api',
         project='monasca',
         version=version.version_str,
         default_config_files=[config_file] if config_file else None,
         description='RESTful API for alarming in the cloud')

    log.setup(CONF,
              product_name='monasca-api',
              version=version.version_str)
    conf.register_opts()

    _CONF_LOADED = True
Esempio n. 24
0
def main():
    CONF.register_cli_opts([
        cfg.Opt('os-username'),
        cfg.Opt('os-password'),
        cfg.Opt('os-auth-url'),
        cfg.Opt('os-tenant-name'),
        ])
    try:
        logging.register_options(CONF)
        cfg_files = cfg.find_config_files(project='glance',
                                          prog='glance-api')
        cfg_files.extend(cfg.find_config_files(project='glance',
                                               prog='glance-search'))
        config.parse_args(default_config_files=cfg_files)
        logging.setup(CONF, 'glance')

        namespace = 'glance.search.index_backend'
        ext_manager = stevedore.extension.ExtensionManager(
            namespace, invoke_on_load=True)
        for ext in ext_manager.extensions:
            try:
                ext.obj.setup()
            except Exception as e:
                LOG.error(_LE("Failed to setup index extension "
                              "%(ext)s: %(e)s") % {'ext': ext.name,
                                                   'e': e})
    except RuntimeError as e:
        sys.exit("ERROR: %s" % e)
Esempio n. 25
0
def set_default_for_default_log_levels():
    extra_log_level_defaults = [
    ]

    log.register_options(CONF)
    CONF.set_default("default_log_levels",
                     CONF.default_log_levels + extra_log_level_defaults)
Esempio n. 26
0
def launch_engine(setup_logging=True):
    if setup_logging:
        logging.register_options(cfg.CONF)
    cfg.CONF(project='heat', prog='heat-engine',
             version=version.version_info.version_string())
    if setup_logging:
        logging.setup(cfg.CONF, 'heat-engine')
        logging.set_defaults()
    messaging.setup()

    config.startup_sanity_check()

    mgr = None
    try:
        mgr = template._get_template_extension_manager()
    except template.TemplatePluginNotRegistered as ex:
        LOG.critical("%s", ex)
    if not mgr or not mgr.names():
        sys.exit("ERROR: No template format plugins registered")

    from heat.engine import service as engine  # noqa

    profiler.setup('heat-engine', cfg.CONF.host)
    gmr.TextGuruMeditation.setup_autorun(version)
    srv = engine.EngineService(cfg.CONF.host, rpc_api.ENGINE_TOPIC)
    workers = cfg.CONF.num_engine_workers
    if not workers:
        workers = max(4, processutils.get_worker_count())

    launcher = service.launch(cfg.CONF, srv, workers=workers,
                              restart_method='mutate')
    return launcher
Esempio n. 27
0
def main():
    CONF.register_cli_opts(IMPORT_OPTS)
    try:
        log.register_options(CONF)
    except cfg.ArgsAlreadyParsedError:
        pass
    log.setup(CONF, 'storyboard')
    CONF(project='storyboard')

    # only_tags and exclude_tags are mutually exclusive
    if CONF.only_tags and CONF.exclude_tags:
        print('ERROR: only-tags and exclude-tags are mutually exclusive',
              file=sys.stderr)
        exit(1)

    # If the user requested an autoincrement value, set that before we start
    # importing things. Note that mysql will automatically set the
    # autoincrement to the next-available id equal to or larger than the
    # requested one.
    auto_increment = CONF.auto_increment
    if auto_increment:
        print('Setting stories.AUTO_INCREMENT to %d' % (auto_increment,))
        session = db_api.get_session(in_request=False)
        session.execute('ALTER TABLE stories AUTO_INCREMENT = %d;'
                        % (auto_increment,))

    if CONF.origin is 'launchpad':
        loader = LaunchpadLoader(CONF.from_project, CONF.to_project,
                                 set(CONF.only_tags), set(CONF.exclude_tags))
        loader.run()
    else:
        print('Unsupported import origin: %s' % CONF.origin)
        return
Esempio n. 28
0
def subscribe():
    try:
        log.register_options(CONF)
    except cfg.ArgsAlreadyParsedError:
        pass

    log.setup(CONF, 'storyboard')
    CONF(project='storyboard')
    CONF.register_opts(NOTIFICATION_OPTS, "notifications")

    subscriber = Subscriber(CONF.notifications)
    subscriber.start()

    manager = enabled.EnabledExtensionManager(
        namespace='storyboard.plugin.worker',
        check_func=check_enabled,
        invoke_on_load=True,
        invoke_args=(CONF,)
    )

    while subscriber.started:
        (method, properties, body) = subscriber.get()

        if not method or not properties:
            LOG.debug(_("No messages available, sleeping for 5 seconds."))
            time.sleep(5)
            continue

        manager.map(handle_event, body)

        # Ack the message
        subscriber.ack(method.delivery_tag)
Esempio n. 29
0
def main():
    """Parse options and call the appropriate class/method."""
    CONF.register_cli_opt(category_opt)
    script_name = sys.argv[0]
    if len(sys.argv) < 2:
        print(_("\nOpenStack manila version: %(version)s\n") %
              {'version': version.version_string()})
        print(script_name + " category action [<args>]")
        print(_("Available categories:"))
        for category in CATEGORIES:
            print("\t%s" % category)
        sys.exit(2)

    try:
        log.register_options(CONF)
        CONF(sys.argv[1:], project='manila',
             version=version.version_string())
        log.setup(CONF, "manila")
    except cfg.ConfigFilesNotFoundError as e:
        cfg_files = e.config_files
        print(_("Failed to read configuration file(s): %s") % cfg_files)
        sys.exit(2)

    fn = CONF.category.action_fn

    fn_args = fetch_func_args(fn)
    fn(*fn_args)
Esempio n. 30
0
    def __init__(self, parse_conf=True, config_path=None):
        """Initialize a configuration from a conf directory and conf file."""
        super(TempestConfigPrivate, self).__init__()
        config_files = []
        failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE

        if config_path:
            path = config_path
        else:
            # Environment variables override defaults...
            conf_dir = os.environ.get("TEMPEST_CONFIG_DIR", self.DEFAULT_CONFIG_DIR)
            conf_file = os.environ.get("TEMPEST_CONFIG", self.DEFAULT_CONFIG_FILE)

            path = os.path.join(conf_dir, conf_file)

        if not os.path.isfile(path):
            path = failsafe_path

        # only parse the config file if we expect one to exist. This is needed
        # to remove an issue with the config file up to date checker.
        if parse_conf:
            config_files.append(path)
        logging.register_options(_CONF)
        if os.path.isfile(path):
            _CONF([], project="tempest", default_config_files=config_files)
        else:
            _CONF([], project="tempest")
        logging.setup(_CONF, "tempest")
        LOG = logging.getLogger("tempest")
        LOG.info("Using tempest config file %s" % path)
        register_opts()
        self._set_attrs()
        if parse_conf:
            _CONF.log_opt_values(LOG, std_logging.DEBUG)
Esempio n. 31
0
def register_cli_opts():
    cfg.CONF.register_cli_opts(core_cli_opts)
    logging.register_options(cfg.CONF)
Esempio n. 32
0
    def setUp(self):
        super(TestDvrRouterOperations, self).setUp()
        mock.patch('eventlet.spawn').start()
        self.conf = agent_config.setup_conf()
        self.conf.register_opts(base_config.core_opts)
        log.register_options(self.conf)
        self.conf.register_opts(agent_config.AGENT_STATE_OPTS, 'AGENT')
        self.conf.register_opts(l3_config.OPTS)
        self.conf.register_opts(ha.OPTS)
        agent_config.register_interface_driver_opts_helper(self.conf)
        agent_config.register_use_namespaces_opts_helper(self.conf)
        agent_config.register_process_monitor_opts(self.conf)
        self.conf.register_opts(interface.OPTS)
        self.conf.register_opts(external_process.OPTS)
        self.conf.set_override('router_id', 'fake_id')
        self.conf.set_override('interface_driver',
                               'neutron.agent.linux.interface.NullDriver')
        self.conf.set_override('send_arp_for_ha', 1)
        self.conf.set_override('state_path', '')

        self.device_exists_p = mock.patch(
            'neutron.agent.linux.ip_lib.device_exists')
        self.device_exists = self.device_exists_p.start()

        self.ensure_dir = mock.patch('neutron.agent.linux.utils'
                                     '.ensure_dir').start()

        mock.patch('neutron.agent.linux.keepalived.KeepalivedManager'
                   '.get_full_config_file_path').start()

        self.utils_exec_p = mock.patch('neutron.agent.linux.utils.execute')
        self.utils_exec = self.utils_exec_p.start()

        self.utils_replace_file_p = mock.patch(
            'neutron.agent.linux.utils.replace_file')
        self.utils_replace_file = self.utils_replace_file_p.start()

        self.external_process_p = mock.patch(
            'neutron.agent.linux.external_process.ProcessManager')
        self.external_process = self.external_process_p.start()
        self.process_monitor = mock.patch(
            'neutron.agent.linux.external_process.ProcessMonitor').start()

        self.send_adv_notif_p = mock.patch(
            'neutron.agent.linux.ip_lib.send_ip_addr_adv_notif')
        self.send_adv_notif = self.send_adv_notif_p.start()

        self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
        driver_cls = self.dvr_cls_p.start()
        self.mock_driver = mock.MagicMock()
        self.mock_driver.DEV_NAME_LEN = (
            interface.LinuxInterfaceDriver.DEV_NAME_LEN)
        driver_cls.return_value = self.mock_driver

        self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
        ip_cls = self.ip_cls_p.start()
        self.mock_ip = mock.MagicMock()
        ip_cls.return_value = self.mock_ip

        ip_rule = mock.patch('neutron.agent.linux.ip_lib.IPRule').start()
        self.mock_rule = mock.MagicMock()
        ip_rule.return_value = self.mock_rule

        ip_dev = mock.patch('neutron.agent.linux.ip_lib.IPDevice').start()
        self.mock_ip_dev = mock.MagicMock()
        ip_dev.return_value = self.mock_ip_dev

        self.l3pluginApi_cls_p = mock.patch(
            'neutron.agent.l3.agent.L3PluginApi')
        l3pluginApi_cls = self.l3pluginApi_cls_p.start()
        self.plugin_api = mock.MagicMock()
        l3pluginApi_cls.return_value = self.plugin_api

        self.looping_call_p = mock.patch(
            'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')
        self.looping_call_p.start()

        subnet_id_1 = _uuid()
        subnet_id_2 = _uuid()
        self.snat_ports = [{
            'subnets': [{
                'cidr': '152.2.0.0/16',
                'gateway_ip': '152.2.0.1',
                'id': subnet_id_1
            }],
            'network_id':
            _uuid(),
            'device_owner':
            'network:router_centralized_snat',
            'mac_address':
            'fa:16:3e:80:8d:80',
            'fixed_ips': [{
                'subnet_id': subnet_id_1,
                'ip_address': '152.2.0.13',
                'prefixlen': 16
            }],
            'id':
            _uuid(),
            'device_id':
            _uuid()
        }, {
            'subnets': [{
                'cidr': '152.10.0.0/16',
                'gateway_ip': '152.10.0.1',
                'id': subnet_id_2
            }],
            'network_id':
            _uuid(),
            'device_owner':
            'network:router_centralized_snat',
            'mac_address':
            'fa:16:3e:80:8d:80',
            'fixed_ips': [{
                'subnet_id': subnet_id_2,
                'ip_address': '152.10.0.13',
                'prefixlen': 16
            }],
            'id':
            _uuid(),
            'device_id':
            _uuid()
        }]

        self.ri_kwargs = {
            'agent_conf': self.conf,
            'interface_driver': self.mock_driver
        }

        self._callback_manager = manager.CallbacksManager()
        mock.patch.object(registry,
                          '_get_callback_manager',
                          return_value=self._callback_manager).start()
Esempio n. 33
0
ks_loading.register_auth_conf_options(cfg.CONF, NOVA_CONF_SECTION)
ks_loading.register_session_conf_options(cfg.CONF, NOVA_CONF_SECTION)

# Register the nova configuration options
common_config.register_nova_opts()

ks_loading.register_auth_conf_options(cfg.CONF,
                                      common_config.PLACEMENT_CONF_SECTION)
ks_loading.register_session_conf_options(cfg.CONF,
                                         common_config.PLACEMENT_CONF_SECTION)

# Register the placement configuration options
common_config.register_placement_opts()

logging.register_options(cfg.CONF)


def init(args, default_config_files=None, **kwargs):
    cfg.CONF(args=args,
             project='neutron',
             version='%%(prog)s %s' % version.version_info.release_string(),
             default_config_files=default_config_files,
             **kwargs)

    n_rpc.init(cfg.CONF)

    # Validate that the base_mac is of the correct format
    msg = validators.validate_regex(cfg.CONF.base_mac, validators.MAC_PATTERN)
    if msg:
        msg = _("Base MAC: %s") % msg
Esempio n. 34
0
def register_reconn_opts():
    '''Register oslo logger opts & cli opts
     and reconn default opts & cli opts
    '''

    global _default_action_message_format
    _default_message_format_help = "Default format of message for all " \
        "survey action group. Message will be composed " \
        "of this format on matched pattern. " \
        "Variables within {} will be substituted " \
        "with its value. These variables should " \
        "be part of msg_user_data option. Fields " \
        "{timestamp}, {line} and {matched_pattern} " \
        "are computed. Field {name} is substituted " \
        "by the value defined for parameter name " \
        "of matching survey config group. " \
        "Rest all characters will be sent as it is in message. " \
        "Logging { or } requires escape by doubling " \
        "{{, }}. Defaults to :" + _default_action_message_format

    logging.register_options(CONF)

    reconn_opts = [
        cfg.StrOpt('target_file',
                   required=True,
                   default=None,
                   help='Absolute file path of console.log '
                        'of a VM instance, RECONN is supposed '
                        'to stream read and look out for VM '
                        'boot up stage'),

        cfg.IntOpt('timeout',
                   default=20,
                   help='terminate reconn after timeout minutes. '
                        'Defaults to 20 minutes'),

        cfg.StrOpt('survey_action_message_format',
                   default=_default_action_message_format,
                   help=_default_message_format_help),

        cfg.DictOpt('msg_user_data',
                    default={},
                    help="User data is a set of key:value pairs, where the "
                         "key is looked up in survey_action_message_format "
                         "string within {} and it is substituted with the "
                         "value. This helps in forming "
                         "custom message to be sent to RMQ"),

        cfg.StrOpt('end_reconn',
                   default=None,
                   help='A [CONFIG] group name that defines a '
                        'parameter called "pattern" which is an '
                        'regular expression that will be looked '
                        'out in file. On encountering end reconn '
                        'pattern, reconn will be stopped'),

        cfg.StrOpt('survey_group',
                   required=True,
                   default=None,
                   help='Survey pattern groups name'),
    ]

    CONF.register_opts(reconn_opts)

    CONF.register_cli_opts(reconn_opts[:-2])
Esempio n. 35
0
    $ gunicorn zaqar.transport.wsgi.app:app

NOTE: As for external containers, it is necessary
to put config files in the standard paths. There's
no common way to specify / pass configuration files
to the WSGI app when it is called from other apps.
"""

from oslo_config import cfg
from oslo_log import log
from oslo_reports import guru_meditation_report as gmr
from oslo_reports import opts as gmr_opts

from zaqar import bootstrap
from zaqar import version

# Use the global CONF instance
conf = cfg.CONF
gmr_opts.set_defaults(conf)
log.register_options(conf)
conf(project='zaqar', prog='zaqar-queues', args=[])
log.setup(conf, 'zaqar')

gmr.TextGuruMeditation.setup_autorun(version, conf=conf)

boot = bootstrap.Bootstrap(conf)
conf.drivers.transport = 'wsgi'
application = boot.transport.app
# Keep the old name for compatibility
app = application
Esempio n. 36
0
def parse_args(argv, default_config_files=None):
    log.register_options(CONF)
    CONF(argv[1:],
         project='prometheus-alertmanager-dingtalk',
         version='0.1',
         default_config_files=default_config_files)
Esempio n. 37
0
    def setUp(self):
        super(TestDvrRouterOperations, self).setUp()
        mock.patch('eventlet.spawn').start()
        self.conf = agent_config.setup_conf()
        self.conf.register_opts(base_config.core_opts)
        log.register_options(self.conf)
        self.conf.register_opts(agent_config.AGENT_STATE_OPTS, 'AGENT')
        l3_config.register_l3_agent_config_opts(l3_config.OPTS, self.conf)
        ha_conf.register_l3_agent_ha_opts(self.conf)
        agent_config.register_interface_driver_opts_helper(self.conf)
        agent_config.register_process_monitor_opts(self.conf)
        agent_config.register_interface_opts(self.conf)
        agent_config.register_external_process_opts(self.conf)
        self.conf.set_override('interface_driver',
                               'neutron.agent.linux.interface.NullDriver')
        self.conf.set_override('state_path', cfg.CONF.state_path)

        self.device_exists_p = mock.patch(
            'neutron.agent.linux.ip_lib.device_exists')
        self.device_exists = self.device_exists_p.start()

        self.ensure_dir = mock.patch(
            'oslo_utils.fileutils.ensure_tree').start()

        mock.patch('neutron.agent.linux.keepalived.KeepalivedManager'
                   '.get_full_config_file_path').start()

        self.utils_exec_p = mock.patch('neutron.agent.linux.utils.execute')
        self.utils_exec = self.utils_exec_p.start()

        self.utils_replace_file_p = mock.patch(
            'neutron_lib.utils.file.replace_file')
        self.utils_replace_file = self.utils_replace_file_p.start()

        self.external_process_p = mock.patch(
            'neutron.agent.linux.external_process.ProcessManager')
        self.external_process = self.external_process_p.start()
        self.process_monitor = mock.patch(
            'neutron.agent.linux.external_process.ProcessMonitor').start()

        self.send_adv_notif_p = mock.patch(
            'neutron.agent.linux.ip_lib.send_ip_addr_adv_notif')
        self.send_adv_notif = self.send_adv_notif_p.start()

        self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
        driver_cls = self.dvr_cls_p.start()
        self.mock_driver = mock.MagicMock()
        self.mock_driver.DEV_NAME_LEN = (
            interface.LinuxInterfaceDriver.DEV_NAME_LEN)
        driver_cls.return_value = self.mock_driver

        self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
        ip_cls = self.ip_cls_p.start()
        self.mock_ip = mock.MagicMock()
        ip_cls.return_value = self.mock_ip

        self.mock_delete_ip_rule = mock.patch.object(ip_lib,
                                                     'delete_ip_rule').start()

        ip_dev = mock.patch('neutron.agent.linux.ip_lib.IPDevice').start()
        self.mock_ip_dev = mock.MagicMock()
        ip_dev.return_value = self.mock_ip_dev

        self.l3pluginApi_cls_p = mock.patch(
            'neutron.agent.l3.agent.L3PluginApi')
        l3pluginApi_cls = self.l3pluginApi_cls_p.start()
        self.plugin_api = mock.MagicMock()
        l3pluginApi_cls.return_value = self.plugin_api

        self.looping_call_p = mock.patch(
            'oslo_service.loopingcall.FixedIntervalLoopingCall')
        self.looping_call_p.start()

        subnet_id_1 = _uuid()
        subnet_id_2 = _uuid()
        self.snat_ports = [{
            'subnets': [{
                'cidr': '152.2.0.0/16',
                'gateway_ip': '152.2.0.1',
                'id': subnet_id_1
            }],
            'network_id':
            _uuid(),
            'device_owner':
            lib_constants.DEVICE_OWNER_ROUTER_SNAT,
            'mac_address':
            'fa:16:3e:80:8d:80',
            'fixed_ips': [{
                'subnet_id': subnet_id_1,
                'ip_address': '152.2.0.13',
                'prefixlen': 16
            }],
            'id':
            _uuid(),
            'device_id':
            _uuid()
        }, {
            'subnets': [{
                'cidr': '152.10.0.0/16',
                'gateway_ip': '152.10.0.1',
                'id': subnet_id_2
            }],
            'network_id':
            _uuid(),
            'device_owner':
            lib_constants.DEVICE_OWNER_ROUTER_SNAT,
            'mac_address':
            'fa:16:3e:80:8d:80',
            'fixed_ips': [{
                'subnet_id': subnet_id_2,
                'ip_address': '152.10.0.13',
                'prefixlen': 16
            }],
            'id':
            _uuid(),
            'device_id':
            _uuid()
        }]

        self.ri_kwargs = {
            'agent_conf': self.conf,
            'interface_driver': self.mock_driver
        }
 def setUp(self):
     super(FaultMiddlewareTest, self).setUp()
     log.register_options(cfg.CONF)
Esempio n. 39
0
 def setup(self):
     logging.register_options(CONF)
     CONF(sys.argv[1:], version=self.version)
     logging.setup(CONF, self.name)
Esempio n. 40
0
def launch(conf, config_file="/etc/monasca/api-config.conf"):
    log.register_options(cfg.CONF)
    log.set_defaults()
    cfg.CONF(args=[],
             project='monasca_api',
             default_config_files=[config_file])
    log.setup(cfg.CONF, 'monasca_api')

    app = falcon.API()

    versions = simport.load(cfg.CONF.dispatcher.versions)()
    app.add_route("/", versions)
    app.add_route("/{version_id}", versions)

    # The following resource is a workaround for a regression in falcon 0.3
    # which causes the path '/v2.0' to not route to the versions resource
    version_2_0 = simport.load(cfg.CONF.dispatcher.version_2_0)()
    app.add_route("/v2.0", version_2_0)

    metrics = simport.load(cfg.CONF.dispatcher.metrics)()
    app.add_route("/v2.0/metrics", metrics)

    metrics_measurements = simport.load(
        cfg.CONF.dispatcher.metrics_measurements)()
    app.add_route("/v2.0/metrics/measurements", metrics_measurements)

    metrics_statistics = simport.load(cfg.CONF.dispatcher.metrics_statistics)()
    app.add_route("/v2.0/metrics/statistics", metrics_statistics)

    metrics_names = simport.load(cfg.CONF.dispatcher.metrics_names)()
    app.add_route("/v2.0/metrics/names", metrics_names)

    alarm_definitions = simport.load(cfg.CONF.dispatcher.alarm_definitions)()
    app.add_route("/v2.0/alarm-definitions/", alarm_definitions)
    app.add_route("/v2.0/alarm-definitions/{alarm_definition_id}",
                  alarm_definitions)

    alarms = simport.load(cfg.CONF.dispatcher.alarms)()
    app.add_route("/v2.0/alarms", alarms)
    app.add_route("/v2.0/alarms/{alarm_id}", alarms)

    alarm_count = simport.load(cfg.CONF.dispatcher.alarms_count)()
    app.add_route("/v2.0/alarms/count/", alarm_count)

    alarms_state_history = simport.load(
        cfg.CONF.dispatcher.alarms_state_history)()
    app.add_route("/v2.0/alarms/state-history", alarms_state_history)
    app.add_route("/v2.0/alarms/{alarm_id}/state-history",
                  alarms_state_history)

    notification_methods = simport.load(
        cfg.CONF.dispatcher.notification_methods)()
    app.add_route("/v2.0/notification-methods", notification_methods)
    app.add_route("/v2.0/notification-methods/{notification_method_id}",
                  notification_methods)

    dimension_values = simport.load(cfg.CONF.dispatcher.dimension_values)()
    app.add_route("/v2.0/metrics/dimensions/names/values", dimension_values)

    dimension_names = simport.load(cfg.CONF.dispatcher.dimension_names)()
    app.add_route("/v2.0/metrics/dimensions/names", dimension_names)

    notification_method_types = simport.load(
        cfg.CONF.dispatcher.notification_method_types)()
    app.add_route("/v2.0/notification-methods/types",
                  notification_method_types)

    LOG.debug('Dispatcher drivers have been added to the routes!')
    return app
Esempio n. 41
0
def config(args=[]):
    CONF.register_opts(_COMMON)
    CONF.register_cli_opts(_COMMON)
    log.register_options(CONF)
    CONF(args=args, project='freezer', version=FREEZER_VERSION)
Esempio n. 42
0
def _setup_logger(args=None):
    args = [] if args is None else args
    log.register_options(CONF)
    opts.set_config_defaults()
    opts.parse_args(args)
    log.setup(CONF, 'ironic_inspector')
Esempio n. 43
0
def _setup_logging():
    # TODO(aznashwan): setup logging for OpenStack client libs too:
    logging.register_options(conf.CONF)
    logging.setup(conf.CONF, 'coriolis')
# Copyright 2018 Cloudbase Solutions Srl
# All Rights Reserved.

import argparse

from oslo_log import log as logging

from coriolis_openstack_utils import actions
from coriolis_openstack_utils import conf
from coriolis_openstack_utils import constants

# Setup logging:
logging.register_options(conf.CONF)
logging.setup(conf.CONF, 'coriolis')
LOG = logging.getLogger(__name__)

# Setup argument parsing:
PARSER = argparse.ArgumentParser(description="Coriolis Openstack utils.")
PARSER.add_argument("-v",
                    "--verbose",
                    action="store_true",
                    help="Increase log verbosity")
PARSER.add_argument("--config-file",
                    metavar="CONF_FILE",
                    dest="conf_file",
                    help="Path to the config file.")
PARSER.add_argument(
    "--dont-recreate-tenants",
    dest="dont_recreate_tenants",
    default=False,
    action="store_true",
Esempio n. 45
0
def main():
    if len(sys.argv) == 1:
        print('Error: Config file must be specified.')
        print('a10_nlbaas2oct --config-file <filename>')
        return 1
    logging.register_options(cfg.CONF)
    cfg.CONF(args=sys.argv[1:],
             project='a10_nlbaas2oct',
             version='a10_nlbaas2oct 1.0')
    logging.set_defaults()
    logging.setup(cfg.CONF, 'a10_nlbaas2oct')
    LOG = logging.getLogger('a10_nlbaas2oct')
    CONF.log_opt_values(LOG, logging.DEBUG)

    XOR_CLI_COMMANDS = (CONF.all, CONF.lb_id, CONF.project_id)

    if CONF.flavor_id and not CONF.lb_id:
        print('Error: --lb-id should be specified with --flavor-id')
        return 1

    if not any(XOR_CLI_COMMANDS) and not CONF.migration.lb_id_list:
        print('Error: One of --all, --lb-id, --project-id must be '
              'specified or lb_id_list must be set in the config file.')
        return 1

    if CONF.flavor_id and (XOR_CLI_COMMANDS[0] or XOR_CLI_COMMANDS[2]):
        print(
            'Error: --flavor-id can only be used in conjunction with --lb-id')
        return 1

    if CONF.migration.flavor_id_list and CONF.migration.lb_id_list:
        if len(CONF.migration.flavor_id_list) > len(CONF.migration.lb_id_list):
            print('Error: flavor_id_list cannot be mapped with lb_id_list '
                  'as flavor_id_list holds more entries than lb_id_list')
            return 1

    check_multi = lambda x: bool(x)
    commands = list(filter(check_multi, XOR_CLI_COMMANDS))
    if len(commands) > 1:
        print('Error: Only one of --all, --lb-id, --project-id allowed.')
        return 1
    elif len(commands) == 1:
        if CONF.migration.lb_id_list and not (CONF.lb_id or CONF.cleanup):
            print(
                'Error: Only --lb-id and --cleanup are allowed with lb_id_list set in the config file.'
            )
            return

    CLEANUP_ONLY = False
    if CONF.cleanup:
        full_success_msg, lb_success_msg = _cleanup_confirmation(LOG)
        if full_success_msg and lb_success_msg:
            CLEANUP_ONLY = True
        else:
            print('Exiting...')
            return
    else:
        LOG.info('=== Starting migration ===')
        full_success_msg = '\n\nMigration completed successfully'
        lb_success_msg = 'migration of loadbalancer %s'

    db_sessions = _setup_db_sessions()
    n_session = db_sessions['n_session']
    o_session = db_sessions['o_session']

    a10_config = a10_cfg.A10Config(config_path=CONF.migration.a10_config_path,
                                   provider=CONF.migration.provider_name)

    conf_lb_id_list = CONF.migration.lb_id_list
    if not conf_lb_id_list:
        conf_lb_id_list = []

    if CONF.lb_id and CONF.lb_id not in conf_lb_id_list:
        conf_lb_id_list.append(CONF.lb_id)

    if CONF.project_id and db_sessions.get('k_session'):
        if not db_utils.get_project_entry(db_sessions['k_session'],
                                          CONF.project_id):
            print('Error: Provide valid --project-id value.')
            return

    lb_id_list = db_utils.get_loadbalancer_ids(n_session,
                                               conf_lb_id_list=conf_lb_id_list,
                                               conf_project_id=CONF.project_id,
                                               conf_all=CONF.all)
    conf_flavor_id_list = []
    flavor_map = {}

    if CONF.migration.flavor_id_list and CONF.migration.lb_id_list:
        conf_flavor_id_list.extend(CONF.migration.flavor_id_list)
        fl_list_length = len(CONF.migration.flavor_id_list)
        lb_list_length = len(CONF.migration.lb_id_list)
        if fl_list_length < lb_list_length:
            if CONF.migration.default_flavor_id:
                for flavor_id in range(fl_list_length, lb_list_length):
                    conf_flavor_id_list.insert(
                        flavor_id, CONF.migration.default_flavor_id)

    fl_id = None
    fl_id_list = []
    failure_count = 0
    tenant_bindings = []
    curr_device_name = None
    lb_idx = 0
    for lb_id in lb_id_list:
        lb_id = lb_id[0]
        try:
            LOG.info('Locking Neutron LBaaS load balancer: %s', lb_id)
            db_utils.lock_loadbalancer(n_session, lb_id)
            n_lb = db_utils.get_loadbalancer_entry(n_session, lb_id)
            provider = n_lb[0]
            tenant_id = n_lb[1]
            tenant_name = None
            if not db_utils.get_project_entry(db_sessions['k_session'],
                                              tenant_id):
                LOG.info(
                    'Tenant with id %s does not exist. Attempting to lookup '
                    'the tenant using %s as the name instead of id.',
                    tenant_id, tenant_id)
                tenant_id = db_utils.get_tenant_by_name(
                    db_sessions['k_session'], tenant_id)
                if not tenant_id:
                    LOG.warning(
                        'Skipping loadbalancer with id %s. It is owned by a tenant with id '
                        'or name %s that does not exist.', lb_id, n_lb[1])
                    continue
                elif len(tenant_id) > 1:
                    LOG.warning(
                        'Skipping loadbalancer with id %s. It was created with the tenant name %s '
                        'instead of tenant id. This tenant name is used by multiple projects so it '
                        'cannot be looked up. Please update the loadbalancer tenant_id in the db to'
                        'match the intended tenant.', lb_id, n_lb[1])
                    continue
                else:
                    tenant_name = n_lb[1]
                    tenant_id = tenant_id[0][0]

            if provider != CONF.migration.provider_name:
                LOG.info(
                    'Skipping loadbalancer with provider %s. '
                    'Does not match specified provider %s.', provider,
                    CONF.migration.provider_name)
                continue

            tenant_device_binding = tenant_name if tenant_name else tenant_id
            tenant_bindings.append(tenant_device_binding)
            if not CLEANUP_ONLY:
                device_info = _migrate_device(LOG, a10_config, db_sessions,
                                              lb_id, tenant_id,
                                              tenant_device_binding)
                if not device_info:
                    continue
                device_name = device_info['name']

                fl_id = _flavor_selection(LOG, a10_config, o_session, lb_id,
                                          conf_flavor_id_list, lb_idx,
                                          flavor_map, device_name)
                _migrate_slb(LOG, n_session, o_session, lb_id, fl_id,
                             tenant_id, n_lb,
                             CONF.migration.ignore_l7rule_status)
                lb_idx = lb_idx + 1

            _cleanup_slb(LOG, n_session, lb_id, CLEANUP_ONLY)

            # Rollback everything if we are in a trial run otherwise commit
            if CONF.migration.trial_run:
                o_session.rollback()
                n_session.rollback()
                LOG.info('Simulated ' + lb_success_msg + ' successful', lb_id)
            else:
                o_session.commit()
                n_session.commit()
                LOG.info('Successful ' + lb_success_msg, lb_id)

        except Exception as e:
            n_session.rollback()
            o_session.rollback()
            LOG.exception("Skipping load balancer %s due to: %s.", lb_id,
                          str(e))
            failure_count += 1
        finally:
            # Attempt to unlock the loadbalancer even if an error occured or it was deleted.
            # This ensures we don't get stuck in pending states
            LOG.info('Unlocking Neutron LBaaS load balancer: %s', lb_id)
            db_utils.unlock_loadbalancer(n_session, lb_id)
            n_session.commit()

    failure_count += _cleanup_tenant_bindings(
        LOG, db_sessions['a10_nlbaas_session'], a10_config, tenant_bindings,
        CLEANUP_ONLY)

    if failure_count:
        LOG.warning("%d failures were detected", failure_count)
        sys.exit(1)

    print(full_success_msg)
Esempio n. 46
0
def prepare_service(argv=None):
    if argv is None:
        argv = []
    logging.register_options(cfg.CONF)
    cfg.CONF(argv[1:], project='magnum')
    logging.setup(cfg.CONF, 'magnum')
Esempio n. 47
0
def main():
    """Executes model's methods with corresponding parameters"""

    # we may add deepaas config, but then too many options...
    # config.config_and_logging(sys.argv)

    log.register_options(CONF)
    log.set_defaults(default_log_levels=log.get_default_log_levels())

    CONF(sys.argv[1:], project='deepaas', version=deepaas.__version__)

    log.setup(CONF, "deepaas-cli")

    LOG.info("[INFO, Method] {} was called.".format(CONF.methods.name))

    # put all variables in dict, makes life easier...
    conf_vars = vars(CONF._namespace)

    if CONF.deepaas_with_multiprocessing:
        mp.set_start_method('spawn', force=True)

    # TODO(multi-file): change to many files ('for' itteration)
    if CONF.methods.__contains__('files'):
        if CONF.methods.files:
            # create tmp file as later it supposed
            # to be deleted by the application
            temp = tempfile.NamedTemporaryFile()
            temp.close()
            # copy original file into tmp file
            with open(conf_vars['files'], "rb") as f:
                with open(temp.name, "wb") as f_tmp:
                    for line in f:
                        f_tmp.write(line)

            # create file object
            file_type = mimetypes.MimeTypes().guess_type(conf_vars['files'])[0]
            file_obj = v2_wrapper.UploadedFile(
                name="data",
                filename=temp.name,
                content_type=file_type,
                original_filename=conf_vars['files'])
            # re-write 'files' parameter in conf_vars
            conf_vars['files'] = file_obj

    # debug of input parameters
    LOG.debug("[DEBUG provided options, conf_vars]: {}".format(conf_vars))

    if CONF.methods.name == 'get_metadata':
        meta = model_obj.get_metadata()
        meta_json = json.dumps(meta)
        LOG.debug("[DEBUG, get_metadata, Output]: {}".format(meta_json))
        if CONF.deepaas_method_output:
            _store_output(meta_json, CONF.deepaas_method_output)

        return meta_json

    elif CONF.methods.name == 'warm':
        # await model_obj.warm()
        model_obj.warm()
        LOG.info("[INFO, warm] Finished warm() method")

    elif CONF.methods.name == 'predict':
        # call predict method
        predict_vars = _get_subdict(conf_vars, predict_args)
        task = model_obj.predict(**predict_vars)

        if CONF.deepaas_method_output:
            out_file = CONF.deepaas_method_output
            out_path = os.path.dirname(os.path.abspath(out_file))
            if not os.path.exists(out_path):  # Create path if does not exist
                os.makedirs(out_path)
            # check extension of the output file
            out_filename, out_extension = os.path.splitext(out_file)

            # set default extension for the data returned
            # by the application to .json
            extension = ".json"
            # check what is asked to return by the application (if --accept)
            if CONF.methods.__contains__('accept'):
                if CONF.methods.accept:
                    extension = mimetypes.guess_extension(CONF.methods.accept)

            if (extension is not None and out_extension is not None
                    and extension != out_extension):  # noqa: W503
                out_file = out_file + extension
                LOG.warn("[WARNING] You are trying to store {} "
                         "type data in the file "
                         "with {} extension!\n"
                         "===================> "
                         "New output is {}".format(extension, out_extension,
                                                   out_file))
            if extension == ".json" or extension is None:
                results_json = json.dumps(task)
                LOG.debug("[DEBUG, predict, Output]: {}".format(results_json))
                f = open(out_file, "w+")
                f.write(results_json)
                f.close()
            else:
                out_results = task.name
                shutil.copy(out_results, out_file)

            LOG.info("[INFO, Output] Output is saved in {}".format(out_file))

        return task

    elif CONF.methods.name == 'train':
        train_vars = _get_subdict(conf_vars, train_args)
        start = time.time()
        task = model_obj.train(**train_vars)
        LOG.info("[INFO] Elapsed time:  %s", str(time.time() - start))
        # we assume that train always returns JSON
        results_json = json.dumps(task)
        LOG.debug("[DEBUG, train, Output]: {}".format(results_json))
        if CONF.deepaas_method_output:
            _store_output(results_json, CONF.deepaas_method_output)

        return results_json

    else:
        LOG.warn("[WARNING] No Method was requested! Return get_metadata()")
        meta = model_obj.get_metadata()
        meta_json = json.dumps(meta)
        LOG.debug("[DEBUG, get_metadata, Output]: {}".format(meta_json))

        return meta_json
Esempio n. 48
0
def read_config(prog, argv):
    logging.register_options(cfg.CONF)
    config_files = find_config('%s.conf' % prog)
    cfg.CONF(argv[1:], project='designate', prog=prog,
             default_config_files=config_files)
    config.set_defaults()
Esempio n. 49
0
def init(args, **kwargs):
    # Register the configuration options
    logging.register_options(cfg.CONF)
    cfg.CONF(args=args, project='valence', **kwargs)
Esempio n. 50
0
#    under the License.

import eventlet
eventlet.monkey_patch()

import argparse
import ConfigParser
from gbpservice.nfp.core import log as nfp_logging
import os
from oslo_config import cfg
from oslo_log import log as oslo_logging
import socket
import sys
import time

oslo_logging.register_options(cfg.CONF)

LOG = nfp_logging.getLogger(__name__)

# Queue of proxy connections which workers will handle
ConnQ = eventlet.queue.Queue(maxsize=0)

tcp_open_connection_count = 0
tcp_close_connection_count = 0


class ConnectionIdleTimeOut(Exception):
    '''
    Exception raised when connection is idle for configured timeout
    '''
    pass
Esempio n. 51
0
 def log_file(self, file_dir):
     logging.register_options(self.CONF)
     self.CONF(default_config_files=[file_dir])
     logging.setup(self.CONF, __name__)
Esempio n. 52
0
def parse_args(argv, default_config_files=None):
    logging.register_options(CONF)
    CONF(argv[1:],
         project='smart_proj',
         version='smart_ver 0.0.1',
         default_config_files=default_config_files)
Esempio n. 53
0
#    License for the specific language governing permissions and limitations
#    under the License.

import eventlet
eventlet.monkey_patch()

from gbpservice._i18n import _
from gbpservice.nfp.core import log as nfp_logging
import os
from oslo_config import cfg as oslo_config
from oslo_log import log as oslo_logging
import socket
import sys
import time

oslo_logging.register_options(oslo_config.CONF)

if not hasattr(oslo_config.CONF, 'module'):
    module_opts = [
        oslo_config.StrOpt('module',
                           default='proxy',
                           help='component name for logging.')
    ]
    oslo_config.CONF.register_opts(module_opts)

LOG = nfp_logging.getLogger(__name__)

# Queue of proxy connections which workers will handle
ConnQ = eventlet.queue.Queue(maxsize=0)

tcp_open_connection_count = 0
Esempio n. 54
0
 def init_log(self):
     logging.register_options(self.CONF)
     logging.setup(self.CONF, __name__)
Esempio n. 55
0
def init(__file__, OPTS, argv=None):
    """
    标准的init, 初始化CONF和LOG
    :param __file__:
    :param OPTS:
    :return:
    """

    self_package_name = get_package_name(__file__)
    # print self_package_name
    self_package_folder = get_script_location(__file__)
    # print self_package_folder
    SELF_PACKAGE_OPTS = [
        cfg.StrOpt('self_package_name',
                   default=self_package_name,
                   help='the self package name'),
        cfg.StrOpt('self_package_folder',
                   default=self_package_folder,
                   help='the self package folder'),
    ]

    # done: important! 这里暂时设置为空, oslo.config强制检查argv, 最好是让他不检查
    # done: 暂时使用backup_argv过后再还原的方式.
    backup_argv = []
    backup_argv[:] = sys.argv
    sys.argv[1:] = []

    CONF_FILE = self_package_name + '.conf'
    global get_logger
    # print __file__
    CONF.register_opts(SELF_PACKAGE_OPTS)
    CONF.register_opts(OPTS)
    which_log = 'oslo'
    # which_log='smartlog'

    if which_log == 'smartlog':
        OPTS = [
            # DEBUG False的时候会计算all.
            cfg.BoolOpt('debug',
                        default=True,
                        help='debug toggle')

        ]
        CONF.register_opts(OPTS)

        # todo: root level改了没用, 另外这一段需要在configfile之后才好.
        if CONF.debug:
            log_level = "DEBUG"
        else:
            log_level = "WARNING"
        # log的名字空间是全局的, 只根据名字来分级. 因此根log只要建立一下就行了.
        # LOG=get_logger(name=CONF.self_package_name,level=log_level,console=True)
        # 直接用root, 不然捕获不了general的信息. 默认的format在smartlg里面有. 需要的时候再在这里设置.
        set_root_logger(level=log_level)
    elif which_log == 'oslo':
        oslo_i18n.enable_lazy()
        log.register_options(CONF)
        log_levels = (CONF.default_log_levels +
                      ['loader=ERROR'])
        # todo: oslo log格式如何调整. 以及如何设置各个log的级别.
        # done: 暂时使用自己的log
        log.set_defaults(default_log_levels=log_levels)
        log.setup(CONF, CONF.self_package_name)
        get_logger = log.getLogger
    else:
        print("please set which log system to use")
        exit(0)

    script_location = get_script_location(__file__)

    # Path configuration of labkit packages and config.
    # labkit 的 packages 和 config 的路径配置
    config_file_location = os.path.join(os.environ['HOME'], '.labkit', 'config', self_package_name)
    config_folder = os.path.join(os.environ['HOME'], '.labkit', 'config')
    local_packages_folder = os.path.join(os.environ['HOME'], '.labkit', 'packages')
    packages_folder = os.path.dirname(os.path.dirname(self_package_folder))

    OPTS = [
        cfg.StrOpt('config_file_location',
                   default=config_file_location,
                   help='config file location'),
        cfg.StrOpt('local_packages_folder',
                   default=local_packages_folder,
                   help='local packages folder'),
        cfg.StrOpt('config_folder',
                   default=config_folder,
                   help='config folder'),
        cfg.StrOpt('packages_folder',
                   default=packages_folder,
                   help='labkit packages folder')

    ]
    CONF.register_opts(OPTS)

    config_files = [os.path.join(config_file_location, CONF_FILE)]

    # version=importlib.import_module(CONF.self_package_name+'.version')
    if argv is None:
        argv = sys.argv

    CONF(args=None, project=CONF.self_package_name, validate_default_values=False,
         # version=version.version_info.version_string(),
         default_config_files=config_files)

    sys.argv[:] = backup_argv[:]
Esempio n. 56
0
 def __init__(self):
     self.logger = LOG
     logging.register_options(CONF)
     logging.set_defaults(default_log_levels=CONF.default_log_levels)
     logging.setup(CONF, 'armada')
Esempio n. 57
0
               help=("Pass in your authentication token if you have "
                     "one. This is the token used for the master.")),
    cfg.StrOpt('slavetoken',
               short='S',
               default='',
               help=("Pass in your authentication token if you have "
                     "one. This is the token used for the slave.")),
    cfg.StrOpt('command',
               positional=True,
               help="Command to be given to replicator"),
    cfg.ListOpt('args', positional=True, help="Arguments for the command"),
]

CONF = cfg.CONF
CONF.register_cli_opts(cli_opts)
logging.register_options(CONF)

# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(
    os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
    sys.path.insert(0, possible_topdir)

COMMANDS = """Commands:

    help <command>  Output help for one of the commands below

    compare         What is missing from the slave glance?
    dump            Dump the contents of a glance instance to local disk.
    livecopy        Load the contents of one glance instance into another.
Esempio n. 58
0
 def setUp(self):
     super(TestApp, self).setUp()
     self.CONF = self.useFixture(fixture_config.Config()).conf
     log.register_options(cfg.CONF)
Esempio n. 59
0
def parse_args(argv, default_config_files=None):
    log.register_options(CONF)
    CONF(argv[1:], project='hagent', version='0.1',
         default_config_files=default_config_files)
Esempio n. 60
0
 def setUp(self):
     super(TestEvent, self).setUp()
     logging.register_options(cfg.CONF)