Exemplo n.º 1
0
 def setUp(self):
     super(TestSwift, self).setUp()
     cfg.CONF([], project='ceilometermiddleware')
     self.addCleanup(cfg.CONF.reset)
Exemplo n.º 2
0
def init(args, **kwargs):
    cfg.CONF(args=args, project='octavia',
             version='%%prog %s' % version.version_info.release_string(),
             **kwargs)
    handle_deprecation_compatibility()
    setup_remote_debugger()
Exemplo n.º 3
0
def get_enforcer():
    cfg.CONF([], project='congress')
    init()
    return _ENFORCER
Exemplo n.º 4
0
def parse_args(argv, default_config_files=None):
    cfg.CONF(argv[1:],
             project='magnum',
             version=version.version_string,
             default_config_files=default_config_files)
Exemplo n.º 5
0
def main():
    cfg.CONF(sys.argv[1:], project='fm')
    migration.db_sync()
Exemplo n.º 6
0
def main(argv):
    oslo_config.CONF.register_opts(PROXY_OPTS, 'proxy')
    oslo_config.CONF(args=sys.argv[1:])
    oslo_logging.setup(oslo_config.CONF, 'nfp')
    conf = Configuration(oslo_config.CONF)
    Proxy(conf).start()
Exemplo n.º 7
0
def main():
    parser = argparse.ArgumentParser(
        description='Tools to play with oslo.messaging\'s RPC',
        usage=USAGE,
    )
    parser.add_argument('--url',
                        dest='url',
                        default='rabbit://*****:*****@localhost/',
                        help="oslo.messaging transport url")
    parser.add_argument('-d',
                        '--debug',
                        dest='debug',
                        type=bool,
                        default=False,
                        help="Turn on DEBUG logging level instead of WARN")
    parser.add_argument('-tp',
                        '--topic',
                        dest='topic',
                        default="profiler_topic",
                        help="Topics to publish/receive messages to/from.")
    parser.add_argument('-s',
                        '--server',
                        dest='server',
                        default="profiler_server",
                        help="Servers to publish/receive messages to/from.")
    parser.add_argument('-tg',
                        '--targets',
                        dest='targets',
                        nargs="+",
                        default=["profiler_topic.profiler_server"],
                        help="Targets to publish/receive messages to/from.")
    parser.add_argument('-l',
                        dest='duration',
                        type=int,
                        help='send messages for certain time')
    parser.add_argument('-j',
                        '--json',
                        dest='json_filename',
                        help='File name to store results in JSON format')
    parser.add_argument('--config-file',
                        dest='config_file',
                        type=str,
                        help="Oslo messaging config file")

    subparsers = parser.add_subparsers(dest='mode',
                                       help='notify/rpc server/client mode')

    server = subparsers.add_parser('notify-server')
    server.add_argument('-w', dest='wait_before_answer', type=int, default=-1)
    server.add_argument('--requeue', dest='requeue', action='store_true')

    server = subparsers.add_parser('batch-notify-server')
    server.add_argument('-w', dest='wait_before_answer', type=int, default=-1)
    server.add_argument('--requeue', dest='requeue', action='store_true')

    client = subparsers.add_parser('notify-client')
    client.add_argument('-p',
                        dest='threads',
                        type=int,
                        default=1,
                        help='number of client threads')
    client.add_argument('-m',
                        dest='messages',
                        type=int,
                        default=1,
                        help='number of call per threads')
    client.add_argument('-w',
                        dest='wait_after_msg',
                        type=float,
                        default=-1,
                        help='sleep time between two messages')
    client.add_argument('--timeout',
                        dest='timeout',
                        type=int,
                        default=3,
                        help='client timeout')

    server = subparsers.add_parser('rpc-server')
    server.add_argument('-w', dest='wait_before_answer', type=int, default=-1)
    server.add_argument('-e',
                        '--executor',
                        dest='executor',
                        type=str,
                        default='eventlet',
                        help='name of a message executor')

    client = subparsers.add_parser('rpc-client')
    client.add_argument('-p',
                        dest='threads',
                        type=int,
                        default=1,
                        help='number of client threads')
    client.add_argument('-m',
                        dest='messages',
                        type=int,
                        default=1,
                        help='number of call per threads')
    client.add_argument('-w',
                        dest='wait_after_msg',
                        type=float,
                        default=-1,
                        help='sleep time between two messages')
    client.add_argument('--timeout',
                        dest='timeout',
                        type=int,
                        default=3,
                        help='client timeout')
    client.add_argument('--exit-wait',
                        dest='exit_wait',
                        type=int,
                        default=0,
                        help='Keep connections open N seconds after calls '
                        'have been done')
    client.add_argument('--is-cast',
                        dest='is_cast',
                        type=bool,
                        default=False,
                        help='Use `call` or `cast` RPC methods')
    client.add_argument('--is-fanout',
                        dest='is_fanout',
                        type=bool,
                        default=False,
                        help='fanout=True for CAST messages')

    args = parser.parse_args()

    _setup_logging(is_debug=args.debug)

    if args.config_file:
        cfg.CONF(["--config-file", args.config_file])

    global TRANSPORT
    if args.mode in ['rpc-server', 'rpc-client']:
        TRANSPORT = messaging.get_transport(cfg.CONF, url=args.url)
    else:
        TRANSPORT = messaging.get_notification_transport(cfg.CONF,
                                                         url=args.url)

    if args.mode in ['rpc-client', 'notify-client']:
        # always generate maximum number of messages for duration-limited tests
        generate_messages(MESSAGES_LIMIT if args.duration else args.messages)

    # oslo.config defaults
    cfg.CONF.heartbeat_interval = 5
    cfg.CONF.prog = os.path.basename(__file__)
    cfg.CONF.project = 'oslo.messaging'

    signal.signal(signal.SIGTERM, signal_handler)
    signal.signal(signal.SIGINT, signal_handler)

    if args.mode == 'rpc-server':
        target = messaging.Target(topic=args.topic, server=args.server)
        if args.url.startswith('zmq'):
            cfg.CONF.rpc_zmq_matchmaker = "redis"

        endpoint = rpc_server(TRANSPORT, target, args.wait_before_answer,
                              args.executor, args.duration)
        show_server_stats(endpoint, args.json_filename)

    elif args.mode == 'notify-server':
        endpoint = notify_server(TRANSPORT, args.topic,
                                 args.wait_before_answer, args.duration,
                                 args.requeue)
        show_server_stats(endpoint, args.json_filename)

    elif args.mode == 'batch-notify-server':
        endpoint = batch_notify_server(TRANSPORT, args.topic,
                                       args.wait_before_answer, args.duration,
                                       args.requeue)
        show_server_stats(endpoint, args.json_filename)

    elif args.mode == 'notify-client':
        spawn_notify_clients(args.threads, args.topic, TRANSPORT,
                             args.messages, args.wait_after_msg, args.timeout,
                             args.duration)
        show_client_stats(CLIENTS, args.json_filename)

    elif args.mode == 'rpc-client':
        targets = [target.partition('.')[::2] for target in args.targets]
        targets = [
            messaging.Target(topic=topic,
                             server=server_name,
                             fanout=args.is_fanout)
            for topic, server_name in targets
        ]
        spawn_rpc_clients(args.threads, TRANSPORT, targets,
                          args.wait_after_msg, args.timeout, args.is_cast,
                          args.messages, args.duration)

        show_client_stats(CLIENTS, args.json_filename, not args.is_cast)

        if args.exit_wait:
            LOG.info("Finished. waiting for %d seconds", args.exit_wait)
            time.sleep(args.exit_wait)
Exemplo n.º 8
0
def prepare_service():
    logging.register_options(cfg.CONF)
    cfg.CONF(sys.argv[1:], project='cloudkitty')

    logging.setup(cfg.CONF, 'cloudkitty')
Exemplo n.º 9
0
 def __init__(self):
     cfg.CONF([], default_config_files=[])
     config.register_opts()
     super(ConfigFixture, self).__init__()
Exemplo n.º 10
0
import logging
import logging.config
import sys, os.path as path
import logconf
from oslo_config import cfg
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from server import session

logging.config.dictConfig(logconf.conf_dict)
log = logging.getLogger('server.test_os')


def test():
    user = session.Session('user1', '123456')
    admin = session.AdminSession('user1')
    vms = admin.get_vms()
    for vm in vms:
        log.debug(vm)

        try:
            user.stop_vm(vm['id'])
            user.start_vm(vm['id'])
        except session.VMError as e:
            log.error(e)


if __name__ == '__main__':
    cfg.CONF(default_config_files=['/etc/foldex/foldex.conf'])
    test()
Exemplo n.º 11
0
def launch(conf):
    # use default, but try to access one passed from conf first
    config_file = conf.get('config_file', "/etc/monasca/api-config.conf")

    log.register_options(cfg.CONF)
    log.set_defaults()
    cfg.CONF(args=[],
             project='monasca_api',
             default_config_files=[config_file])
    log.setup(cfg.CONF, 'monasca_api')

    app = falcon.API(request_type=request.Request)

    versions = simport.load(cfg.CONF.dispatcher.versions)()
    app.add_route("/", versions)
    app.add_route("/{version_id}", versions)

    # The following resource is a workaround for a regression in falcon 0.3
    # which causes the path '/v2.0' to not route to the versions resource
    version_2_0 = simport.load(cfg.CONF.dispatcher.version_2_0)()
    app.add_route("/v2.0", version_2_0)

    metrics = simport.load(cfg.CONF.dispatcher.metrics)()
    app.add_route("/v2.0/metrics", metrics)

    metrics_measurements = simport.load(
        cfg.CONF.dispatcher.metrics_measurements)()
    app.add_route("/v2.0/metrics/measurements", metrics_measurements)

    metrics_statistics = simport.load(cfg.CONF.dispatcher.metrics_statistics)()
    app.add_route("/v2.0/metrics/statistics", metrics_statistics)

    metrics_names = simport.load(cfg.CONF.dispatcher.metrics_names)()
    app.add_route("/v2.0/metrics/names", metrics_names)

    alarm_definitions = simport.load(cfg.CONF.dispatcher.alarm_definitions)()
    app.add_route("/v2.0/alarm-definitions/", alarm_definitions)
    app.add_route("/v2.0/alarm-definitions/{alarm_definition_id}",
                  alarm_definitions)

    alarms = simport.load(cfg.CONF.dispatcher.alarms)()
    app.add_route("/v2.0/alarms", alarms)
    app.add_route("/v2.0/alarms/{alarm_id}", alarms)

    alarm_count = simport.load(cfg.CONF.dispatcher.alarms_count)()
    app.add_route("/v2.0/alarms/count/", alarm_count)

    alarms_state_history = simport.load(
        cfg.CONF.dispatcher.alarms_state_history)()
    app.add_route("/v2.0/alarms/state-history", alarms_state_history)
    app.add_route("/v2.0/alarms/{alarm_id}/state-history",
                  alarms_state_history)

    notification_methods = simport.load(
        cfg.CONF.dispatcher.notification_methods)()
    app.add_route("/v2.0/notification-methods", notification_methods)
    app.add_route("/v2.0/notification-methods/{notification_method_id}",
                  notification_methods)

    dimension_values = simport.load(cfg.CONF.dispatcher.dimension_values)()
    app.add_route("/v2.0/metrics/dimensions/names/values", dimension_values)

    dimension_names = simport.load(cfg.CONF.dispatcher.dimension_names)()
    app.add_route("/v2.0/metrics/dimensions/names", dimension_names)

    notification_method_types = simport.load(
        cfg.CONF.dispatcher.notification_method_types)()
    app.add_route("/v2.0/notification-methods/types", notification_method_types)

    LOG.debug('Dispatcher drivers have been added to the routes!')
    return app
Exemplo n.º 12
0
def parse_args(argv, default_config_files=None):
    cfg.CONF(argv[1:],
             project='deepaas',
             version=deepaas.__version__,
             default_config_files=default_config_files)
Exemplo n.º 13
0
def init(args, **kwargs):
    cfg.CONF(args=args,
             project='fuxi',
             version=version_info.release_string(),
             **kwargs)
Exemplo n.º 14
0
#!/usr/bin/env python
# coding=utf-8
import oslo_messaging
from oslo_config import cfg
import messaging
from endpoint import TestEndpoint

# TRANSPORT_URL = 'rabbit://*****:*****@10.0.0.3/'
TOPIC = 'notifications'
cfg.CONF(default_config_files=['/etc/nova/nova.conf'])

listener = None


def start():
    # Init transport
    transport = messaging.get_transport()
    # Init endpint
    endpoints = [TestEndpoint()]
    # Init target
    targets = [oslo_messaging.Target(topic=TOPIC)]
    listener = messaging.get_notification_listener(transport, targets,
                                                   endpoints)
    listener.start()
    listener.wait()


def stop():
    listener.stop()
Exemplo n.º 15
0
def init(*args, **kwargs):
    """ Initialize the cfg.CONF object for octavia project"""
    cfg.CONF(*args, project='octavia',
             version='%%prog %s' % version.version_info.release_string(),
             **kwargs)
Exemplo n.º 16
0
 def __init__(self):
     cfg.CONF([], default_config_files=[])
     config._opts.append((fake_service1_group, FakeService1Group))
     config._opts.append((fake_service2_group, FakeService2Group))
     config.register_opts()
     super(ServiceClientsConfigFixture, self).__init__()
Exemplo n.º 17
0
def read_config():
    cfg.CONF(args=[], default_config_files=[find_config_file()])
Exemplo n.º 18
0
def init(args, **kwargs):
    # Register the configuration options
    logging.register_options(cfg.CONF)
    cfg.CONF(args=args, project='plasma', **kwargs)
Exemplo n.º 19
0
def main():
    parser = argparse.ArgumentParser(
        description='Tools to play with oslo.messaging\'s RPC',
        usage=USAGE,
    )
    parser.add_argument('--url',
                        dest='url',
                        default='rabbit://*****:*****@localhost/',
                        help="oslo.messaging transport url")
    parser.add_argument('-d',
                        '--debug',
                        dest='debug',
                        type=bool,
                        default=False,
                        help="Turn on DEBUG logging level instead of WARN")
    parser.add_argument('-tp',
                        '--topic',
                        dest='topic',
                        default="profiler_topic",
                        help="Topics to publish/receive messages to/from.")
    parser.add_argument('-s',
                        '--server',
                        dest='server',
                        default="profiler_server",
                        help="Servers to publish/receive messages to/from.")
    parser.add_argument('-tg',
                        '--targets',
                        dest='targets',
                        nargs="+",
                        default=["profiler_topic.profiler_server"],
                        help="Targets to publish/receive messages to/from.")
    parser.add_argument('-l',
                        dest='duration',
                        type=int,
                        help='send messages for certain time')
    parser.add_argument('--config-file',
                        dest='config_file',
                        type=str,
                        help="Oslo messaging config file")

    subparsers = parser.add_subparsers(dest='mode',
                                       help='notify/rpc server/client mode')

    server = subparsers.add_parser('notify-server')
    server.add_argument('--show-stats',
                        dest='show_stats',
                        type=bool,
                        default=True)
    server = subparsers.add_parser('batch-notify-server')
    server.add_argument('--show-stats',
                        dest='show_stats',
                        type=bool,
                        default=True)
    client = subparsers.add_parser('notify-client')
    client.add_argument('-p',
                        dest='threads',
                        type=int,
                        default=1,
                        help='number of client threads')
    client.add_argument('-m',
                        dest='messages',
                        type=int,
                        default=1,
                        help='number of call per threads')
    client.add_argument('-w',
                        dest='wait_after_msg',
                        type=int,
                        default=-1,
                        help='sleep time between two messages')
    client.add_argument('-t',
                        dest='timeout',
                        type=int,
                        default=3,
                        help='client timeout')

    server = subparsers.add_parser('rpc-server')
    server.add_argument('-w', dest='wait_before_answer', type=int, default=-1)
    server.add_argument('--show-stats',
                        dest='show_stats',
                        type=bool,
                        default=True)
    server.add_argument('-e',
                        '--executor',
                        dest='executor',
                        type=str,
                        default='eventlet',
                        help='name of a message executor')

    client = subparsers.add_parser('rpc-client')
    client.add_argument('-p',
                        dest='threads',
                        type=int,
                        default=1,
                        help='number of client threads')
    client.add_argument('-m',
                        dest='messages',
                        type=int,
                        default=1,
                        help='number of call per threads')
    client.add_argument('-w',
                        dest='wait_after_msg',
                        type=int,
                        default=-1,
                        help='sleep time between two messages')
    client.add_argument('-t',
                        dest='timeout',
                        type=int,
                        default=3,
                        help='client timeout')
    client.add_argument('--exit-wait',
                        dest='exit_wait',
                        type=int,
                        default=0,
                        help='Keep connections open N seconds after calls '
                        'have been done')
    client.add_argument('--is-cast',
                        dest='is_cast',
                        type=bool,
                        default=False,
                        help='Use `call` or `cast` RPC methods')

    args = parser.parse_args()

    _setup_logging(is_debug=args.debug)

    if args.config_file:
        cfg.CONF(["--config-file", args.config_file])

    if args.mode in ['rpc-server', 'rpc-client']:
        transport = messaging.get_transport(cfg.CONF, url=args.url)
    else:
        transport = messaging.get_notification_transport(cfg.CONF,
                                                         url=args.url)
        cfg.CONF.oslo_messaging_notifications.topics = "notif"
        cfg.CONF.oslo_messaging_notifications.driver = "messaging"

    # oslo.config defaults
    cfg.CONF.heartbeat_interval = 5
    cfg.CONF.prog = os.path.basename(__file__)
    cfg.CONF.project = 'oslo.messaging'

    if args.mode == 'rpc-server':
        target = messaging.Target(topic=args.topic, server=args.server)
        if args.url.startswith('zmq'):
            cfg.CONF.rpc_zmq_matchmaker = "redis"
        rpc_server(transport, target, args.wait_before_answer, args.executor,
                   args.show_stats, args.duration)
    elif args.mode == 'notify-server':
        notify_server(transport, args.show_stats)
    elif args.mode == 'batch-notify-server':
        batch_notify_server(transport, args.show_stats)
    elif args.mode == 'notify-client':
        threads_spawner(args.threads, notifier, transport, args.messages,
                        args.wait_after_msg, args.timeout)
    elif args.mode == 'rpc-client':
        init_msg(args.messages)
        targets = [target.partition('.')[::2] for target in args.targets]
        start = datetime.datetime.now()
        targets = [
            messaging.Target(topic=topic, server=server_name)
            for topic, server_name in targets
        ]
        spawn_rpc_clients(args.threads, transport, targets,
                          args.wait_after_msg, args.timeout, args.is_cast,
                          args.messages, args.duration)
        time_elapsed = (datetime.datetime.now() - start).total_seconds()

        msg_count = 0
        total_bytes = 0
        for client in RPC_CLIENTS:
            msg_count += client.msg_sent
            total_bytes += client.bytes

        LOG.info(
            '%d messages were sent for %d seconds. '
            'Bandwidth was %d msg/sec', msg_count, time_elapsed,
            (msg_count / time_elapsed))
        log_msg = '%s bytes were sent for %d seconds. Bandwidth is %d b/s' % (
            total_bytes, time_elapsed, (total_bytes / time_elapsed))
        LOG.info(log_msg)
        with open('./oslo_res_%s.txt' % args.server, 'a+') as f:
            f.write(log_msg + '\n')

        LOG.info("calls finished, wait %d seconds", args.exit_wait)
        time.sleep(args.exit_wait)
Exemplo n.º 20
0
def init(args, **kwargs):
    cfg.CONF(args=args,
             project='easyovs',
             version='%%prog %s' % VERSION,
             **kwargs)
Exemplo n.º 21
0
def main():
    api_config.register_config()
    try:
        cfg.CONF(args=[],
                 project='bagpipe-looking-glass',
                 default_config_files=['/etc/bagpipe-bgp/bgp.conf'])
        api_port = cfg.CONF.API.port
    except cfg.ConfigFilesNotFoundError:
        api_port = api_config.DEFAULT_PORT

    usage = """ %prog [--server <ip>] path to object in looking-glass

e.g.: %prog vpns instances"""
    parser = optparse.OptionParser(usage)

    parser.add_option(
        "--server", dest="server", default="127.0.0.1",
        help="IP address of BaGPipe BGP (optional, default: %default)")

    parser.add_option(
        "--port", dest="port", type="int", default=api_port,
        help="Port of BaGPipe BGP (optional, default: %default)")

    parser.add_option(
        "--prefix", dest="prefix", default=LOOKING_GLASS_BASE,
        help="Looking-glass URL Prefix (optional, default: %default)")

    parser.add_option(
        "-r", "--recurse", dest="recurse", action="store_true", default=False,
        help="Recurse down into the whole looking-glass (disabled by default)")

    (options, args) = parser.parse_args()

    quoted_args = [urllib.parse.quote(arg) for arg in args]
    target_url = "http://%s:%d/%s/%s" % (options.server, options.port,
                                         options.prefix, "/".join(quoted_args))
    try:
        os.environ['NO_PROXY'] = options.server
        response = urllib.request.urlopen(target_url)

        if response.getcode() == 200:
            data = jsonutils.load(response)

            if (isinstance(data, dict) and "href" in data):
                target_url_bis = data["href"]
                response_bis = urllib.request.urlopen(target_url_bis)
                if response.getcode() == 200:
                    target_url = target_url_bis
                    data = jsonutils.load(response_bis)

            pretty_print_recurse(data, 0, options.recurse, target_url,
                                 already_anew_line=True)

    except urllib.error.HTTPError as e:
        if e.code == 404:
            print("No such looking glass path: %s\n(%s)" %
                  (" ".join(quoted_args), target_url))
        else:
            print("Error code %d: %s" % (e.getcode(), e.read()))
        return
    except urllib.error.URLError as e:
        print("No server at http://%s:%d : %s" % (options.server,
                                                  options.port, e))
Exemplo n.º 22
0
def prepare_service(argv=[]):
    cfg.CONF(argv[1:], project='magnum')
    logging.setup('magnum')
Exemplo n.º 23
0
redis = cfg.OptGroup(name='redis', title="Redis 相关配置")
# 配置组必须在其组件被注册前注册
conf.register_group(redis)
conf.register_cli_opts([
    cfg.StrOpt('host', default='127.0.0.1'),
    cfg.IntOpt('port', default=6379),
    cfg.StrOpt('password', default='unitymob'),
    cfg.StrOpt('prefix', default='unitymob_'),
], redis)

# rabbitmq
rabbitmq = cfg.OptGroup(name='rabbitmq', title="Rabbitmq 相关配置")
# 配置组必须在其组件被注册前注册
conf.register_group(rabbitmq)
conf.register_cli_opts([
    cfg.StrOpt('dsn', default=''),
], rabbitmq)

env = environ.get(conf.environ, 'conf')
env = env if env in ['debug', 'pre', 'conf'] else 'conf'
conf(default_config_files=[join(dirname(__file__), '.'.join([env, 'ini']))])

logging.setup(conf, "unitymob")

if __name__ == '__main__':
    # 调用容器对象,传入要解析的文件(可以多个)
    cfg.CONF(default_config_files=['conf.ini'])
    print(cfg.CONF['rabbitmq']['dsn'])
    for i in cfg.CONF.rabbitmq:
        print(i)
Exemplo n.º 24
0
grp = cfg.OptGroup('PROMETHEUS_EXPORTER')

prometheus_opts = [
    cfg.StrOpt('metric_server_ip',
               default='0.0.0.0',
               help='The exporter server host  ip'),
    cfg.IntOpt('metric_server_port',
               default=8195,
               help='The exporter server port'),
    cfg.StrOpt('metrics_cache_file',
               default='/var/lib/delfin/delfin_exporter'
               '.txt',
               help='The temp cache file used for persisting metrics'),
]
cfg.CONF.register_opts(prometheus_opts, group=grp)
cfg.CONF(sys.argv[1:])


@app.route("/metrics", methods=['GET'])
def getfile():
    with open(cfg.CONF.PROMETHEUS_EXPORTER.metrics_cache_file, "r+") as f:
        data = f.read()
        f.truncate(0)
    return data


if __name__ == '__main__':
    app.run(host=cfg.CONF.PROMETHEUS_EXPORTER.metric_server_ip,
            port=cfg.CONF.PROMETHEUS_EXPORTER.metric_server_port)
Exemplo n.º 25
0
def parse_args(args=None):
    cfg.CONF(args=args, version=sys_constants.VERSION_STRING)
Exemplo n.º 26
0
def parse(args):
    cfg.CONF(args=args, project='helix',
             version='%%prog %s' % version.version_info.release_string())
    return cfg.CONF.config_file
Exemplo n.º 27
0
def parse_args(args=None):
    register_opts()
    cfg.CONF(args=args, version=VERSION_STRING)
Exemplo n.º 28
0
def prepare_service(argv=None):
    if argv is None:
        argv = sys.argv
    cfg.CONF(argv[1:], project='app', validate_default_values=True)
Exemplo n.º 29
0
def parse_args(args=None):
    cfg.CONF(
        args=args,
        version=VERSION_STRING,
        default_config_files=[DEFAULT_CONFIG_FILE_PATH],
    )
Exemplo n.º 30
0
def main():
    # TODO(sai): Make this relative to QUADS install location
    config_file = os.path.dirname(__file__) + '/../conf/openstack.conf'
    cfg.CONF(default_config_files=[config_file])
    quads = initialize_quads_object()
    # TODO(sai): uncomment below lines to dynamically get inventory for the
    # cloud as we currently hardcoded inventory for testing purposes
    # global inventory
    # inventory = quads.query_cloud_host_types(None, cfg.CONF.cloud)
    # Remove undercloud from inventory
    # undercloud_type = quads.get_host_type(cfg.CONF.undercloud)
    # inventory[undercloud_type] -=1
    version = query_openstack_config(quads, cfg.CONF.cloud, 'version')
    build = query_openstack_config(quads, cfg.CONF.cloud, 'build')
    if cfg.CONF.query:
        print version
        print build
        sys.exit(0)
    if not os.path.isfile(cfg.CONF.instackenv):
        sys.exit(1)
    if not os.path.isdir(cfg.CONF.templates):
        sys.exit(1)
    controller_count = query_openstack_config(quads, cfg.CONF.cloud,
                                              'controllers')
    compute_count = query_openstack_config(quads, cfg.CONF.cloud, 'computes')
    ceph_count = query_openstack_config(quads, cfg.CONF.cloud, 'ceph')
    # Schedule in the order of controller, ceph and compute
    controller_nodes = schedule_nodes(controller_count, controller_priority,
                                      'control')
    ceph_nodes = schedule_nodes(2, ceph_priority)
    compute_nodes = schedule_nodes(3, compute_priority)
    try:
        instack_data = load_json(cfg.CONF.instackenv)
    except IOError:
        print("File {} doesn't exist").format(cfg.CONF.instackenv)
        sys.exit(1)
    tag_instack(instack_data, controller_nodes, 'control', quads,
                cfg.CONF.instackenv)
    try:
        instack_data = load_json('sai.json')
    except IOError:
        print("File {} doesn't exist").format(cfg.CONF.instackenv)
        sys.exit(1)
    tag_instack(instack_data, compute_nodes, 'compute', quads,
                cfg.CONF.instackenv)
    try:
        instack_data = load_json('sai.json')
    except IOError:
        print("File {} doesn't exist").format(cfg.CONF.instackenv)
        sys.exit(1)
    tag_instack(instack_data, ceph_nodes, 'ceph', quads, cfg.CONF.instackenv)
    for type, count in controller_nodes.iteritems():
        controller_type = type
    controller = {'type': controller_type}
    deploy = {
        'controller_count': composable_role['control'],
        'r930compute_count': composable_role['r930compute'],
        'r730compute_count': composable_role['r730compute'],
        'r630compute_count': composable_role['r630compute'],
        'r620compute_count': composable_role['r620compute'],
        'r6048compute_count': composable_role['6048rcompute'],
        'r6018compute_count': composable_role['6018rcompute'],
        'r930ceph_count': composable_role['r930ceph'],
        'r730ceph_count': composable_role['r730ceph'],
        'r630ceph_count': composable_role['r630ceph'],
        'r620ceph_count': composable_role['r620ceph'],
        'r6048ceph_count': composable_role['6048rceph'],
        'r6018ceph_count': composable_role['6018rceph']
    }
    deploy_template = os.path.join(cfg.CONF.templates, 'deploy.yaml.j2')
    overcloud_script_template = os.path.join(cfg.CONF.templates,
                                             'overcloud_deploy.sh.j2')
    deploy_file = os.path.join(cfg.CONF.templates, 'deploy.yaml')
    overcloud_script_file = os.path.join(cfg.CONF.templates,
                                         'overcloud_deploy.sh')
    network_environment_template = os.path.join(cfg.CONF.templates,
                                                'network-environment.yaml.j2')
    network_environment_file = os.path.join(cfg.CONF.templates,
                                            'network-environment.yaml')
    with open(deploy_file, 'w') as f:
        result = render(deploy_template, deploy)
        f.write(result)
    with open(network_environment_file, 'w') as f:
        result = render(network_environment_template, controller)
        f.write(result)
    context = {'version': version}
    with open(overcloud_script_file, 'w') as f:
        result = render(overcloud_script_template, context)
        f.write(result)