Exemplo n.º 1
0
def load_dependencies():
    env = cs.HostingEnvironment()
    saved_model_path = os.path.join(env.model_dir, 'export/Servo')
    subprocess.Popen([
        'tensorflow_model_server', '--port={}'.format(TF_SERVING_PORT),
        '--model_name={}'.format(GENERIC_MODEL_NAME),
        '--model_base_path={}'.format(saved_model_path)
    ])
Exemplo n.º 2
0
def transformer(user_module):
    env = cs.HostingEnvironment()

    port = int(cs.Server.next_safe_port(env.port_range)) if env.port_range else DEFAULT_TF_SERVING_PORT

    grpc_proxy_client = proxy_client.GRPCProxyClient(port)
    _wait_model_to_load(grpc_proxy_client, TF_SERVING_MAXIMUM_LOAD_MODEL_TIME_IN_SECONDS)

    return Transformer.from_module(user_module, grpc_proxy_client)
Exemplo n.º 3
0
def load_dependencies():
    env = cs.HostingEnvironment()

    port = cs.Server.next_safe_port(env.port_range) if env.port_range else DEFAULT_TF_SERVING_PORT
    saved_model_path = os.path.join(env.model_dir, 'export/Servo')
    subprocess.Popen(['tensorflow_model_server',
                      '--port={}'.format(port),
                      '--model_name={}'.format(GENERIC_MODEL_NAME),
                      '--model_base_path={}'.format(saved_model_path)])
Exemplo n.º 4
0
    def start(cls):
        """Prepare the container for model serving, configure and launch the model server stack.
        """

        logger.info("reading config")
        env = cs.HostingEnvironment()
        env.start_metrics_if_enabled()

        if env.user_script_name:
            Server._download_user_module(env)

        logger.info('loading framework-specific dependencies')
        framework = cs.ContainerEnvironment.load_framework()
        framework.load_dependencies()

        nginx_pid = 0
        gunicorn_bind_address = '0.0.0.0:8080'
        if env.use_nginx:
            logger.info("starting nginx")
            nginx_conf = pkg_resources.resource_filename(
                'container_support', 'etc/nginx.conf')
            subprocess.check_call(
                ['ln', '-sf', '/dev/stdout', '/var/log/nginx/access.log'])
            subprocess.check_call(
                ['ln', '-sf', '/dev/stderr', '/var/log/nginx/error.log'])
            gunicorn_bind_address = 'unix:/tmp/gunicorn.sock'
            nginx_pid = subprocess.Popen(['nginx', '-c', nginx_conf]).pid

        logger.info("starting gunicorn")
        gunicorn_pid = subprocess.Popen([
            "gunicorn", "--timeout",
            str(env.model_server_timeout), "-k", "gevent", "-b",
            gunicorn_bind_address, "--worker-connections",
            str(1000 * env.model_server_workers), "-w",
            str(env.model_server_workers), "container_support.wsgi:app"
        ]).pid

        signal.signal(
            signal.SIGTERM,
            lambda a, b: Server._sigterm_handler(nginx_pid, gunicorn_pid))

        children = set([nginx_pid, gunicorn_pid
                        ]) if nginx_pid else gunicorn_pid
        logger.info("inference server started. waiting on processes: %s" %
                    children)

        while True:
            pid, _ = os.wait()
            if pid in children:
                break

        Server._sigterm_handler(nginx_pid, gunicorn_pid)
Exemplo n.º 5
0
    def from_env(cls):
        cs.configure_logging()
        logger.info("creating Server instance")
        env = cs.HostingEnvironment()

        env.pip_install_requirements()
        logger.info("importing user module")
        user_module = env.import_user_module(
        ) if env.user_script_name else None

        framework = cs.ContainerEnvironment.load_framework()
        transformer = framework.transformer(user_module)

        server = Server("model server", transformer)
        logger.info("returning initialized server")
        return server