Esempio n. 1
0
def _run_server():
    host = cfg.CONF.stream.host
    port = cfg.CONF.stream.port

    LOG.info(
        "(PID=%s) ST2 Stream API is serving on http://%s:%s.", os.getpid(), host, port
    )

    max_pool_size = eventlet.wsgi.DEFAULT_MAX_SIMULTANEOUS_REQUESTS
    worker_pool = eventlet.GreenPool(max_pool_size)
    sock = eventlet.listen((host, port))

    def queue_shutdown(signal_number, stack_frame):
        eventlet.spawn_n(
            shutdown_server_kill_pending_requests,
            sock=sock,
            worker_pool=worker_pool,
            wait_time=WSGI_SERVER_REQUEST_SHUTDOWN_TIME,
        )

    # We register a custom SIGINT handler which allows us to kill long running active requests.
    # Note: Eventually we will support draining (waiting for short-running requests), but we
    # will still want to kill long running stream requests.
    register_stream_signal_handlers(handler_func=queue_shutdown)

    wsgi.server(sock, app.setup_app(), custom_pool=worker_pool)
    return 0
Esempio n. 2
0
def _run_server():
    host = cfg.CONF.stream.host
    port = cfg.CONF.stream.port

    LOG.info('(PID=%s) ST2 Stream API is serving on http://%s:%s.', os.getpid(), host, port)

    max_pool_size = eventlet.wsgi.DEFAULT_MAX_SIMULTANEOUS_REQUESTS
    worker_pool = eventlet.GreenPool(max_pool_size)
    sock = eventlet.listen((host, port))

    def queue_shutdown(signal_number, stack_frame):
        eventlet.spawn_n(shutdown_server_kill_pending_requests, sock=sock,
                         worker_pool=worker_pool, wait_time=WSGI_SERVER_REQUEST_SHUTDOWN_TIME)

    # We register a custom SIGINT handler which allows us to kill long running active requests.
    # Note: Eventually we will support draining (waiting for short-running requests), but we
    # will still want to kill long running stream requests.
    register_stream_signal_handlers(handler_func=queue_shutdown)

    wsgi.server(sock, app.setup_app(), custom_pool=worker_pool)
    return 0