Exemplo n.º 1
0
def main():
    magnum_service.prepare_service(sys.argv)

    gmr.TextGuruMeditation.setup_autorun(version)

    LOG.info(_LI('Starting server in PID %s'), os.getpid())
    LOG.debug("Configuration:")
    cfg.CONF.log_opt_values(LOG, logging.DEBUG)

    cfg.CONF.import_opt('topic', 'magnum.conductor.config', group='conductor')

    conductor_id = short_id.generate_id()
    endpoints = [
        indirection_api.Handler(),
        docker_conductor.Handler(),
        k8s_conductor.Handler(),
        bay_conductor.Handler(),
        conductor_listener.Handler(),
        ca_conductor.Handler(),
    ]

    server = rpc_service.Service.create(cfg.CONF.conductor.topic,
                                        conductor_id, endpoints,
                                        binary='magnum-conductor')
    launcher = service.launch(cfg.CONF, server)
    launcher.wait()
Exemplo n.º 2
0
def main():
    magnum_service.prepare_service(sys.argv)

    gmr.TextGuruMeditation.setup_autorun(version)

    LOG.info('Starting server in PID %s', os.getpid())
    LOG.debug("Configuration:")
    CONF.log_opt_values(LOG, logging.DEBUG)

    conductor_id = short_id.generate_id()
    endpoints = [
        indirection_api.Handler(),
        cluster_conductor.Handler(),
        conductor_listener.Handler(),
        ca_conductor.Handler(),
    ]

    server = rpc_service.Service.create(CONF.conductor.topic,
                                        conductor_id,
                                        endpoints,
                                        binary='magnum-conductor')
    workers = CONF.conductor.workers
    if not workers:
        workers = processutils.get_worker_count()
    launcher = service.launch(CONF, server, workers=workers)
    launcher.wait()
Exemplo n.º 3
0
def main():
    service.prepare_service(sys.argv)

    gmr.TextGuruMeditation.setup_autorun(version)

    # Enable object backporting via the conductor
    base.MagnumObject.indirection_api = base.MagnumObjectIndirectionAPI()

    app = api_app.load_app()

    # Setup OSprofiler for WSGI service
    profiler.setup('magnum-api', CONF.host)

    # SSL configuration
    use_ssl = CONF.api.enabled_ssl

    # Create the WSGI server and start it
    host, port = CONF.api.host, CONF.api.port

    LOG.info(_LI('Starting server in PID %s'), os.getpid())
    LOG.debug("Configuration:")
    CONF.log_opt_values(LOG, logging.DEBUG)

    LOG.info(_LI('Serving on %(proto)s://%(host)s:%(port)s'),
             dict(proto="https" if use_ssl else "http", host=host, port=port))

    workers = CONF.api.workers
    if not workers:
        workers = processutils.get_worker_count()
    LOG.info(_LI('Server will handle each request in a new process up to'
                 ' %s concurrent processes'), workers)
    serving.run_simple(host, port, app, processes=workers,
                       ssl_context=_get_ssl_configs(use_ssl))
Exemplo n.º 4
0
def main():
    magnum_service.prepare_service(sys.argv)

    gmr.TextGuruMeditation.setup_autorun(version)

    LOG.info('Starting server in PID %s', os.getpid())
    LOG.debug("Configuration:")
    CONF.log_opt_values(LOG, logging.DEBUG)

    conductor_id = short_id.generate_id()
    endpoints = [
        indirection_api.Handler(),
        cluster_conductor.Handler(),
        conductor_listener.Handler(),
        ca_conductor.Handler(),
        federation_conductor.Handler(),
    ]

    server = rpc_service.Service.create(CONF.conductor.topic,
                                        conductor_id, endpoints,
                                        binary='magnum-conductor')
    workers = CONF.conductor.workers
    if not workers:
        workers = processutils.get_worker_count()
    launcher = service.launch(CONF, server, workers=workers)

    # NOTE(mnaser): We create the periodic tasks here so that they
    #               can be attached to the main process and not
    #               duplicated in all the children if multiple
    #               workers are being used.
    server.create_periodic_tasks()
    server.start()

    launcher.wait()
Exemplo n.º 5
0
def main():
    service.prepare_service(sys.argv)

    gmr.TextGuruMeditation.setup_autorun(version)

    # Enable object backporting via the conductor
    base.MagnumObject.indirection_api = base.MagnumObjectIndirectionAPI()

    app = api_app.setup_app()

    # Create the WSGI server and start it
    host, port = cfg.CONF.api.host, cfg.CONF.api.port
    srv = simple_server.make_server(host, port, app)

    LOG.info(_LI('Starting server in PID %s') % os.getpid())
    LOG.debug("Configuration:")
    cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)

    if host == '0.0.0.0':
        LOG.info(_LI('serving on 0.0.0.0:%(port)s, '
                     'view at http://127.0.0.1:%(port)s') %
                 dict(port=port))
    else:
        LOG.info(_LI('serving on http://%(host)s:%(port)s') %
                 dict(host=host, port=port))

    srv.serve_forever()
Exemplo n.º 6
0
def main():
    magnum_service.prepare_service(sys.argv)

    gmr.TextGuruMeditation.setup_autorun(version)

    LOG.info(_LI('Starting server in PID %s'), os.getpid())
    LOG.debug("Configuration:")
    cfg.CONF.log_opt_values(LOG, logging.DEBUG)

    cfg.CONF.import_opt('topic', 'magnum.conductor.config', group='conductor')

    conductor_id = short_id.generate_id()
    endpoints = [
        indirection_api.Handler(),
        cluster_conductor.Handler(),
        conductor_listener.Handler(),
        ca_conductor.Handler(),
    ]

    server = rpc_service.Service.create(cfg.CONF.conductor.topic,
                                        conductor_id,
                                        endpoints,
                                        binary='magnum-conductor')
    launcher = service.launch(cfg.CONF, server)
    launcher.wait()
Exemplo n.º 7
0
def main():
    service.prepare_service(sys.argv)

    gmr.TextGuruMeditation.setup_autorun(version)

    # Enable object backporting via the conductor
    base.MagnumObject.indirection_api = base.MagnumObjectIndirectionAPI()

    app = api_app.load_app()

    # SSL configuration
    use_ssl = CONF.api.enabled_ssl

    # Create the WSGI server and start it
    host, port = CONF.api.host, CONF.api.port

    LOG.info(_LI('Starting server in PID %s'), os.getpid())
    LOG.debug("Configuration:")
    CONF.log_opt_values(LOG, logging.DEBUG)

    LOG.info(_LI('Serving on %(proto)s://%(host)s:%(port)s'),
             dict(proto="https" if use_ssl else "http", host=host, port=port))

    serving.run_simple(host, port, app,
                       ssl_context=_get_ssl_configs(use_ssl))
Exemplo n.º 8
0
def main():
    magnum_service.prepare_service(sys.argv)

    gmr.TextGuruMeditation.setup_autorun(version)

    LOG.info(_LI('Starting server in PID %s') % os.getpid())
    LOG.debug("Configuration:")
    cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)

    cfg.CONF.import_opt('topic', 'magnum.conductor.config', group='conductor')

    conductor_id = short_id.generate_id()
    endpoints = [
        docker_conductor.Handler(),
        k8s_conductor.Handler(),
        bay_conductor.Handler(),
        x509keypair_conductor.Handler(),
        conductor_listener.Handler(),
        ca_conductor.Handler(),
    ]

    if (not os.path.isfile(cfg.CONF.bay.k8s_atomic_template_path)
            and not os.path.isfile(cfg.CONF.bay.k8s_coreos_template_path)):
        LOG.error(_LE("The Heat template can not be found for either k8s "
                      "atomic %(atomic_template)s or coreos "
                      "(coreos_template)%s. Install template first if you "
                      "want to create bay.") %
                  {'atomic_template': cfg.CONF.bay.k8s_atomic_template_path,
                   'coreos_template': cfg.CONF.bay.k8s_coreos_template_path})

    server = rpc_service.Service.create(cfg.CONF.conductor.topic,
                                        conductor_id, endpoints)
    launcher = service.launch(cfg.CONF, server)
    launcher.wait()
Exemplo n.º 9
0
def main():
    magnum_service.prepare_service(sys.argv)

    gmr.TextGuruMeditation.setup_autorun(version)

    LOG.info(_LI('Starting server in PID %s'), os.getpid())
    LOG.debug("Configuration:")
    cfg.CONF.log_opt_values(LOG, logging.DEBUG)

    cfg.CONF.import_opt('topic', 'magnum.conductor.config', group='conductor')

    conductor_id = short_id.generate_id()
    endpoints = [
        indirection_api.Handler(),
        docker_conductor.Handler(),
        k8s_conductor.Handler(),
        bay_conductor.Handler(),
        conductor_listener.Handler(),
        ca_conductor.Handler(),
    ]

    if (not os.path.isfile(cfg.CONF.bay.k8s_atomic_template_path)
            and not os.path.isfile(cfg.CONF.bay.k8s_coreos_template_path)):
        LOG.error(_LE("The Heat template can not be found for either k8s "
                      "atomic %(atomic_template)s or coreos "
                      "%(coreos_template)s. Install template first if you "
                      "want to create bay.") %
                  {'atomic_template': cfg.CONF.bay.k8s_atomic_template_path,
                   'coreos_template': cfg.CONF.bay.k8s_coreos_template_path})

    server = rpc_service.Service.create(cfg.CONF.conductor.topic,
                                        conductor_id, endpoints,
                                        binary='magnum-conductor')
    launcher = service.launch(cfg.CONF, server)
    launcher.wait()
Exemplo n.º 10
0
def main():
    magnum_service.prepare_service(sys.argv)

    gmr.TextGuruMeditation.setup_autorun(version)

    LOG.info('Starting server in PID %s', os.getpid())
    LOG.debug("Configuration:")
    CONF.log_opt_values(LOG, logging.DEBUG)

    conductor_id = short_id.generate_id()
    endpoints = [
        indirection_api.Handler(),
        cluster_conductor.Handler(),
        conductor_listener.Handler(),
        ca_conductor.Handler(),
        federation_conductor.Handler(),
    ]

    server = rpc_service.Service.create(CONF.conductor.topic,
                                        conductor_id,
                                        endpoints,
                                        binary='magnum-conductor')
    workers = CONF.conductor.workers
    if not workers:
        workers = processutils.get_worker_count()
    launcher = service.launch(CONF, server, workers=workers)

    # NOTE(mnaser): We create the periodic tasks here so that they
    #               can be attached to the main process and not
    #               duplicated in all the children if multiple
    #               workers are being used.
    server.create_periodic_tasks()
    server.start()

    launcher.wait()
Exemplo n.º 11
0
def main():
    service.prepare_service(sys.argv)

    gmr.TextGuruMeditation.setup_autorun(version)

    app = api_app.setup_app()

    # Create the WSGI server and start it
    host, port = cfg.CONF.api.host, cfg.CONF.api.port
    srv = simple_server.make_server(host, port, app)

    LOG.info(_LI('Starting server in PID %s') % os.getpid())
    LOG.debug("Configuration:")
    cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)

    if host == '0.0.0.0':
        LOG.info(
            _LI('serving on 0.0.0.0:%(port)s, '
                'view at http://127.0.0.1:%(port)s') % dict(port=port))
    else:
        LOG.info(
            _LI('serving on http://%(host)s:%(port)s') %
            dict(host=host, port=port))

    srv.serve_forever()
Exemplo n.º 12
0
def main():
    service.prepare_service(sys.argv)

    gmr.TextGuruMeditation.setup_autorun(version)

    # Enable object backporting via the conductor
    base.MagnumObject.indirection_api = base.MagnumObjectIndirectionAPI()

    app = api_app.load_app()

    # Setup OSprofiler for WSGI service
    profiler.setup('magnum-api', CONF.host)

    # SSL configuration
    use_ssl = CONF.api.enabled_ssl

    # Create the WSGI server and start it
    host, port = CONF.api.host, CONF.api.port

    LOG.info('Starting server in PID %s' % os.getpid())
    LOG.debug("Configuration:")
    CONF.log_opt_values(LOG, logging.DEBUG)

    LOG.info('Serving on %(proto)s://%(host)s:%(port)s' %
             dict(proto="https" if use_ssl else "http", host=host, port=port))

    workers = CONF.api.workers
    if not workers:
        workers = processutils.get_worker_count()
    LOG.info('Server will handle each request in a new process up to'
             ' %s concurrent processes' % workers)
    serving.run_simple(host, port, app, processes=workers,
                       ssl_context=_get_ssl_configs(use_ssl))
Exemplo n.º 13
0
def main():
    service.prepare_service(sys.argv)

    gmr.TextGuruMeditation.setup_autorun(version)

    # Enable object backporting via the conductor
    base.MagnumObject.indirection_api = base.MagnumObjectIndirectionAPI()

    app = api_app.load_app()

    # Setup OSprofiler for WSGI service
    profiler.setup('magnum-api', CONF.host)

    # SSL configuration
    use_ssl = CONF.api.enabled_ssl

    # Create the WSGI server and start it
    host, port = CONF.api.host, CONF.api.port

    LOG.info(_LI('Starting server in PID %s'), os.getpid())
    LOG.debug("Configuration:")
    CONF.log_opt_values(LOG, logging.DEBUG)

    LOG.info(_LI('Serving on %(proto)s://%(host)s:%(port)s'),
             dict(proto="https" if use_ssl else "http", host=host, port=port))

    serving.run_simple(host, port, app,
                       ssl_context=_get_ssl_configs(use_ssl))
Exemplo n.º 14
0
    def test_prepare_service_with_argv_none(self, mock_parse, mock_set,
                                            mock_setup, mock_reg):
        argv = None
        mock_parse.side_effect = lambda *args, **kwargs: None

        service.prepare_service(argv)

        mock_parse.assert_called_once_with([])
        mock_setup.assert_called_once_with(base.CONF, 'magnum')
        mock_reg.assert_called_once_with(base.CONF)
        mock_set.assert_called_once_with()
Exemplo n.º 15
0
    def test_prepare_service_with_argv_none(self, mock_parse, mock_set,
                                            mock_setup, mock_reg):
        argv = None
        mock_parse.side_effect = lambda *args, **kwargs: None

        service.prepare_service(argv)

        mock_parse.assert_called_once_with([])
        mock_setup.assert_called_once_with(base.CONF, 'magnum')
        mock_reg.assert_called_once_with(base.CONF)
        mock_set.assert_called_once_with()
Exemplo n.º 16
0
def main():
    service.prepare_service(sys.argv)

    gmr.TextGuruMeditation.setup_autorun(version)

    # Enable object backporting via the conductor
    base.MagnumObject.indirection_api = base.MagnumObjectIndirectionAPI()

    app = api_app.load_app()

    # Create the WSGI server and start it
    host, port = cfg.CONF.api.host, cfg.CONF.api.port
    srv = simple_server.make_server(host, port, app)

    LOG.info(_LI('Starting server in PID %s'), os.getpid())
    LOG.debug("Configuration:")
    cfg.CONF.log_opt_values(LOG, logging.DEBUG)

    LOG.info(_LI('serving on http://%(host)s:%(port)s'),
             dict(host=host, port=port))

    srv.serve_forever()
Exemplo n.º 17
0
def main():
    service.prepare_service(sys.argv)

    app = api_app.setup_app()

    # Create the WSGI server and start it
    host, port = cfg.CONF.api.host, cfg.CONF.api.port
    srv = simple_server.make_server(host, port, app)

    LOG.info(_LI('Starting server in PID %s') % os.getpid())
    LOG.debug("Configuration:")
    cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)

    if host == '0.0.0.0':
        LOG.info(_LI('serving on 0.0.0.0:%(port)s, '
                   'view at http://127.0.0.1:%(port)s') %
                 dict(port=port))
    else:
        LOG.info(_LI('serving on http://%(host)s:%(port)s') %
                 dict(host=host, port=port))

    srv.serve_forever()
Exemplo n.º 18
0
def main():
    magnum_service.prepare_service(sys.argv)

    LOG.info(_LI("Starting server in PID %s") % os.getpid())
    LOG.debug("Configuration:")
    cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)

    cfg.CONF.import_opt("topic", "magnum.conductor.config", group="conductor")

    conductor_id = short_id.generate_id()
    endpoints = [
        docker_conductor.Handler(),
        k8s_conductor.Handler(),
        bay_conductor.Handler(),
        conductor_listener.Handler(),
    ]

    if not os.path.isfile(cfg.CONF.bay.k8s_atomic_template_path) and not os.path.isfile(
        cfg.CONF.bay.k8s_coreos_template_path
    ):
        LOG.error(
            _LE(
                "The Heat template can not be found for either k8s "
                "atomic %(atomic_template)s or coreos "
                "(coreos_template)%s. Install template first if you "
                "want to create bay."
            )
            % {
                "atomic_template": cfg.CONF.bay.k8s_atomic_template_path,
                "coreos_template": cfg.CONF.bay.k8s_coreos_template_path,
            }
        )

    server = rpc_service.Service.create(cfg.CONF.conductor.topic, conductor_id, endpoints)
    launcher = service.launch(cfg.CONF, server)
    launcher.wait()
Exemplo n.º 19
0
def build_wsgi_app(argv=None):
    service.prepare_service(sys.argv)
    return load_app()