Exemplo n.º 1
0
def test_logging_file_directory():
    container = BentoMLContainer
    config = BentoMLConfiguration().as_dict()
    container.config.set(config)

    assert container.logging_file_directory.get() == os.path.join(
        mock_bentoml_home(), "logs"
    )

    override_config = tempfile.NamedTemporaryFile(delete=False)
    override_config.write(
        b"""
logging:
  file:
    directory: /tmp/logs
"""
    )
    override_config.close()

    config = BentoMLConfiguration(override_config_file=override_config.name).as_dict()
    container.config.set(config)

    assert container.logging_file_directory.get() == "/tmp/logs"

    os.remove(override_config.name)
Exemplo n.º 2
0
def test_yatai_database_url():
    container = BentoMLContainer
    config = BentoMLConfiguration().as_dict()
    container.config.set(config)

    assert container.yatai_database_url.get() == "{}:///{}".format(
        "sqlite", os.path.join(mock_bentoml_home(), "storage.db")
    )

    override_config = tempfile.NamedTemporaryFile(delete=False)
    override_config.write(
        b"""
yatai:
  database:
    url: customized_url
"""
    )
    override_config.close()

    config = BentoMLConfiguration(override_config_file=override_config.name).as_dict()
    container.config.set(config)

    assert container.yatai_database_url.get() == "customized_url"

    os.remove(override_config.name)
Exemplo n.º 3
0
def inject_dependencies():
    """Inject dependencies and configuration to BentoML packages"""

    from timeit import default_timer as timer

    start = timer()

    logger.debug("Start dependency injection")

    from bentoml.configuration.containers import BentoMLContainer, BentoMLConfiguration

    config_file = get_local_config_file()
    if config_file.endswith(".yml"):
        configuration = BentoMLConfiguration(override_config_file=config_file)
    else:
        configuration = BentoMLConfiguration()

    container = BentoMLContainer()
    container.config.from_dict(configuration.as_dict())

    from bentoml import marshal, server, tracing, cli

    container.wire(packages=[marshal, server, tracing, cli])

    end = timer()

    logger.debug("Dependency injection completed in %.3f seconds", end - start)
Exemplo n.º 4
0
def test_yatai_logging_path():
    container = BentoMLContainer
    config = BentoMLConfiguration().as_dict()
    container.config.set(config)

    assert container.yatai_logging_path.get() == os.path.join(
        mock_bentoml_home(), "logs", "yatai_web_server.log"
    )

    override_config = tempfile.NamedTemporaryFile(delete=False)
    override_config.write(
        b"""
yatai:
  logging:
    path: /tmp/customized.log
"""
    )
    override_config.close()

    config = BentoMLConfiguration(override_config_file=override_config.name).as_dict()
    container.config.set(config)

    assert container.yatai_logging_path.get() == "/tmp/customized.log"

    os.remove(override_config.name)
Exemplo n.º 5
0
def _start_prod_server(
    saved_bundle_path: str,
    config: BentoMLConfiguration,
    port: Optional[int] = None,
    prometheus_lock: Optional[multiprocessing.Lock] = None,
):

    logger.info("Starting BentoML API server in production mode..")

    container = BentoMLContainer()
    container.config.from_dict(config.as_dict())

    container.wire(packages=[sys.modules[__name__]])

    from bentoml.server.gunicorn_server import GunicornBentoServer

    if port is None:
        gunicorn_app = GunicornBentoServer(
            saved_bundle_path, prometheus_lock=prometheus_lock,
        )
    else:
        gunicorn_app = GunicornBentoServer(
            saved_bundle_path, port=port, prometheus_lock=prometheus_lock,
        )
    gunicorn_app.run()
Exemplo n.º 6
0
def _start_prod_batching_server(
    saved_bundle_path: str,
    api_server_port: int,
    config: BentoMLConfiguration,
    prometheus_lock: Optional[multiprocessing.Lock] = None,
):

    logger.info("Starting BentoML Batching server in production mode..")

    container = BentoMLContainer()
    container.config.from_dict(config.as_dict())

    from bentoml import marshal
    from bentoml.server.marshal_server import GunicornMarshalServer

    container.wire(packages=[sys.modules[__name__], marshal])

    # avoid load model before gunicorn fork
    marshal_server = GunicornMarshalServer(
        bundle_path=saved_bundle_path,
        prometheus_lock=prometheus_lock,
        outbound_host="localhost",
        outbound_port=api_server_port,
    )
    marshal_server.run()
Exemplo n.º 7
0
def test_legacy_compatibility():
    config = tempfile.NamedTemporaryFile(delete=False)
    config.write(b"""
bento_server:
  port: 0
  workers: 0
  max_request_size: 0
  metrics:
    namespace: Null
adapters:
  image_input:
    extensions: []
""")
    config.close()

    config_dict = BentoMLConfiguration(
        default_config_file=config.name,
        legacy_compatibility=True,
        validate_schema=False,
    ).as_dict()
    os.remove(config.name)
    assert config_dict is not None
    assert config_dict["bento_server"]["port"] == 5000
    assert config_dict["bento_server"]["workers"] == 1
    assert config_dict["bento_server"]["max_request_size"] == 20971520
    assert config_dict["bento_server"]["metrics"]["namespace"] == "BENTOML"
    assert config_dict["adapters"]["image_input"]["default_extensions"] == [
        '.jpg',
        '.png',
        '.jpeg',
        '.tiff',
        '.webp',
        '.bmp',
    ]
Exemplo n.º 8
0
def test_legacy_compatibiltiy():
    config = tempfile.NamedTemporaryFile(delete=False)
    config.write(b"""
api_server:
  port: 0
  max_request_size: 0
marshal_server:
  request_header_flag: Null
yatai:
  url: Null
instrument:
  namespace: Null
""")
    config.close()

    config_dict = BentoMLConfiguration(
        default_config_file=config.name,
        legacy_compatibility=True,
        validate_schema=False,
    ).as_dict()
    os.remove(config.name)
    assert config_dict is not None
    assert config_dict["api_server"]["port"] == 5000
    assert config_dict["api_server"]["max_request_size"] == 20971520
    assert config_dict["marshal_server"]["request_header_flag"] == (
        "BentoML-Is-Merged-Request")
    assert config_dict["yatai"]["url"] == ""
    assert config_dict["instrument"]["namespace"] == "BENTOML"
Exemplo n.º 9
0
def test_prometheus_multiproc_dir():
    container = BentoMLContainer
    config = BentoMLConfiguration().as_dict()
    container.config.set(config)

    assert container.prometheus_multiproc_dir.get() == os.path.join(
        mock_bentoml_home(), "prometheus_multiproc_dir"
    )
Exemplo n.º 10
0
def inject_dependencies():
    """Inject dependencies and configuration to BentoML packages"""

    from timeit import default_timer as timer

    start = timer()

    logger.debug("Start dependency injection")

    from bentoml.configuration.containers import BentoMLContainer, BentoMLConfiguration

    config_file = get_local_config_file()
    if config_file and config_file.endswith(".yml"):
        configuration = BentoMLConfiguration(override_config_file=config_file)
    else:
        configuration = BentoMLConfiguration()

    container = BentoMLContainer()
    container.config.from_dict(configuration.as_dict())

    from bentoml import (
        marshal,
        server,
        tracing,
        cli,
        adapters,
        saved_bundle,
        service,
    )
    from bentoml.yatai import yatai_service
    from bentoml.yatai import yatai_service_impl
    from bentoml.yatai.repository import s3_repository, gcs_repository

    container.wire(
        modules=[
            yatai_service, s3_repository, gcs_repository, yatai_service_impl
        ],
        packages=[
            marshal, server, tracing, cli, adapters, saved_bundle, service
        ],
    )

    end = timer()

    logger.debug("Dependency injection completed in %.3f seconds", end - start)
Exemplo n.º 11
0
def test_yatai_tls_root_ca_cert():
    container = BentoMLContainer
    config = BentoMLConfiguration().as_dict()
    container.config.set(config)

    assert container.yatai_tls_root_ca_cert.get() is None

    override_config = tempfile.NamedTemporaryFile(delete=False)
    override_config.write(
        b"""
yatai:
  remote:
    tls:
      client_certificate_file: value1
"""
    )
    override_config.close()

    config = BentoMLConfiguration(override_config_file=override_config.name).as_dict()
    container.config.set(config)

    assert container.yatai_tls_root_ca_cert.get() == "value1"

    os.remove(override_config.name)

    override_config = tempfile.NamedTemporaryFile(delete=False)
    override_config.write(
        b"""
yatai:
  remote:
    tls:
      root_ca_cert: value1
      client_certificate_file: value2
"""
    )
    override_config.close()

    config = BentoMLConfiguration(override_config_file=override_config.name).as_dict()
    container.config.set(config)

    assert container.yatai_tls_root_ca_cert.get() == "value1"

    os.remove(override_config.name)
Exemplo n.º 12
0
    def run(api_name, config, run_args, bento=None):
        container = BentoMLContainer()
        config = BentoMLConfiguration(override_config_file=config)
        container.config.from_dict(config.as_dict())

        from bentoml import tracing

        container.wire(modules=[tracing])

        parser = argparse.ArgumentParser()
        parser.add_argument('--yatai-url', type=str, default=None)
        parsed_args, _ = parser.parse_known_args(run_args)
        yatai_url = parsed_args.yatai_url
        saved_bundle_path = resolve_bundle_path(bento,
                                                pip_installed_bundle_path,
                                                yatai_url)

        api = load_bento_service_api(saved_bundle_path, api_name)
        exit_code = api.handle_cli(run_args)
        sys.exit(exit_code)
Exemplo n.º 13
0
def test_api_server_workers():
    container = BentoMLContainer

    config_auto_workers = tempfile.NamedTemporaryFile(delete=False)
    config_auto_workers.write(
        b"""
bento_server:
  workers: Null
"""
    )
    config_auto_workers.close()

    container.config.set(
        BentoMLConfiguration(
            default_config_file=config_auto_workers.name, validate_schema=False,
        ).as_dict(),
    )
    os.remove(config_auto_workers.name)
    workers = container.api_server_workers.get()
    assert workers is not None
    assert workers > 0

    config_manual_workers = tempfile.NamedTemporaryFile(delete=False)
    config_manual_workers.write(
        b"""
bento_server:
  workers: 42
"""
    )
    config_manual_workers.close()

    container.config.set(
        BentoMLConfiguration(
            default_config_file=config_manual_workers.name, validate_schema=False,
        ).as_dict(),
    )
    os.remove(config_manual_workers.name)
    workers = container.api_server_workers.get()
    assert workers is not None
    assert workers == 42
Exemplo n.º 14
0
def test_customized_bento_bundle_deployment_version():
    override_config = tempfile.NamedTemporaryFile(delete=False)
    override_config.write(
        b"""
bento_bundle:
  deployment_version: 0.0.1
"""
    )
    override_config.close()

    container = BentoMLContainer
    config = BentoMLConfiguration(override_config_file=override_config.name).as_dict()
    container.config.set(config)

    assert container.bento_bundle_deployment_version.get() == "0.0.1"
    os.remove(override_config.name)
Exemplo n.º 15
0
def test_validate_schema():
    config = tempfile.NamedTemporaryFile(delete=False)
    config.write(b"""
invalid_key1:
  invalid_key2: Null
""")
    config.close()

    with pytest.raises(BentoMLConfigException) as e:
        BentoMLConfiguration(
            default_config_file=config.name,
            validate_schema=True,
            legacy_compatibility=False,
        )

    assert e is not None
    os.remove(config.name)
Exemplo n.º 16
0
def _start_dev_server(
    saved_bundle_path: str,
    api_server_port: int,
    config: BentoMLConfiguration,
):

    logger.info("Starting BentoML API server in development mode..")

    from bentoml.saved_bundle import load_from_dir

    bento_service = load_from_dir(saved_bundle_path)

    from bentoml.server.api_server import BentoAPIServer

    container = BentoMLContainer()
    container.config.from_dict(config.as_dict())
    container.wire(packages=[sys.modules[__name__]])

    api_server = BentoAPIServer(bento_service)
    api_server.start(port=api_server_port)
Exemplo n.º 17
0
def test_config_file_override():
    default_config_file = tempfile.NamedTemporaryFile(delete=False)
    default_config_file.write(
        b"""
key1:
  key2:
    key3: value3
    key4: value4
    key5: value5
"""
    )
    default_config_file.close()

    override_config_file = tempfile.NamedTemporaryFile(delete=False)
    override_config_file.write(
        b"""
key1:
  key2:
    key3: override3
    key5: override5
"""
    )
    override_config_file.close()

    config = BentoMLConfiguration(
        default_config_file=default_config_file.name,
        override_config_file=override_config_file.name,
        validate_schema=False,
    ).as_dict()

    os.remove(default_config_file.name)
    os.remove(override_config_file.name)
    print(default_config_file.name)

    assert config is not None
    assert config["key1"] is not None
    assert config["key1"]["key2"] is not None
    assert config["key1"]["key2"]["key3"] == "override3"
    assert config["key1"]["key2"]["key4"] == "value4"
    assert config["key1"]["key2"]["key5"] == "override5"
Exemplo n.º 18
0
def _start_dev_proxy(
    saved_bundle_path: str,
    api_server_port: int,
    config: BentoMLConfiguration,
):

    logger.info("Starting BentoML API proxy in development mode..")

    from bentoml import marshal

    container = BentoMLContainer()
    container.config.from_dict(config.as_dict())
    container.wire(packages=[marshal])

    from bentoml.marshal.marshal import MarshalService

    marshal_server = MarshalService(
        saved_bundle_path,
        outbound_host="localhost",
        outbound_port=api_server_port,
    )

    marshal_server.fork_start_app()
Exemplo n.º 19
0
def test_override_none_value():
    config = BentoMLConfiguration(legacy_compatibility=False)
    config.override(["bento_server", "port"], None)
    config_dict = config.as_dict()
    assert config_dict is not None
    assert config_dict["bento_server"]["port"] == 5000
Exemplo n.º 20
0
def start_prod_server(
    saved_bundle_path: str,
    port: Optional[int] = None,
    workers: Optional[int] = None,
    timeout: Optional[int] = None,
    enable_microbatch: Optional[bool] = None,
    enable_swagger: Optional[bool] = None,
    mb_max_batch_size: Optional[int] = None,
    mb_max_latency: Optional[int] = None,
    microbatch_workers: Optional[int] = None,
    config_file: Optional[str] = None,
):
    import psutil

    assert (
        psutil.POSIX
    ), "BentoML API Server production mode only supports POSIX platforms"

    config = BentoMLConfiguration(override_config_file=config_file)
    config.override(["api_server", "port"], port)
    config.override(["api_server", "workers"], workers)
    config.override(["api_server", "timeout"], timeout)
    config.override(["api_server", "enable_microbatch"], enable_microbatch)
    config.override(["api_server", "enable_swagger"], enable_swagger)
    config.override(["marshal_server", "max_batch_size"], mb_max_batch_size)
    config.override(["marshal_server", "max_latency"], mb_max_latency)
    config.override(["marshal_server", "workers"], microbatch_workers)

    if config.config['api_server'].get('enable_microbatch'):
        prometheus_lock = multiprocessing.Lock()
        with reserve_free_port() as api_server_port:
            pass

        model_server_job = multiprocessing.Process(
            target=_start_prod_server,
            kwargs=dict(
                saved_bundle_path=saved_bundle_path,
                port=api_server_port,
                config=config,
                prometheus_lock=prometheus_lock,
            ),
            daemon=True,
        )
        model_server_job.start()

        try:
            _start_prod_batching_server(
                saved_bundle_path=saved_bundle_path,
                config=config,
                api_server_port=api_server_port,
                prometheus_lock=prometheus_lock,
            )
        finally:
            model_server_job.terminate()
    else:
        _start_prod_server(saved_bundle_path=saved_bundle_path, config=config)
Exemplo n.º 21
0
def test_default_bento_bundle_deployment_version():
    container = BentoMLContainer
    config = BentoMLConfiguration().as_dict()
    container.config.set(config)

    assert container.bento_bundle_deployment_version.get() is not None
Exemplo n.º 22
0
    def serve_gunicorn(
        port,
        workers,
        timeout,
        bento,
        enable_microbatch,
        mb_max_batch_size,
        mb_max_latency,
        microbatch_workers,
        yatai_url,
        enable_swagger,
        config,
    ):
        if not psutil.POSIX:
            _echo(
                "The `bentoml serve-gunicorn` command is only supported on POSIX. "
                "On windows platform, use `bentoml serve` for local API testing and "
                "docker for running production API endpoint: "
                "https://docs.docker.com/docker-for-windows/ ")
            return
        saved_bundle_path = resolve_bundle_path(bento,
                                                pip_installed_bundle_path,
                                                yatai_url)

        container = BentoMLContainer()
        config = BentoMLConfiguration(override_config_file=config)
        config.override(["api_server", "port"], port)
        config.override(["api_server", "workers"], workers)
        config.override(["api_server", "timeout"], timeout)
        config.override(["api_server", "enable_microbatch"], enable_microbatch)
        config.override(["api_server", "enable_swagger"], enable_swagger)
        config.override(["marshal_server", "max_batch_size"],
                        mb_max_batch_size)
        config.override(["marshal_server", "max_latency"], mb_max_latency)
        config.override(["marshal_server", "workers"], microbatch_workers)
        container.config.from_dict(config.as_dict())

        from bentoml import marshal, server

        container.wire(packages=[marshal, server])

        start_prod_server(saved_bundle_path)
Exemplo n.º 23
0
    def serve(
        port,
        bento,
        enable_microbatch,
        mb_max_batch_size,
        mb_max_latency,
        run_with_ngrok,
        yatai_url,
        enable_swagger,
        config,
    ):
        saved_bundle_path = resolve_bundle_path(
            bento, pip_installed_bundle_path, yatai_url
        )

        container = BentoMLContainer()
        config = BentoMLConfiguration(override_config_file=config)
        config.override(["api_server", "port"], port)
        config.override(["api_server", "enable_microbatch"], enable_microbatch)
        config.override(["api_server", "run_with_ngrok"], run_with_ngrok)
        config.override(["api_server", "enable_swagger"], enable_swagger)
        config.override(["marshal_server", "max_batch_size"], mb_max_batch_size)
        config.override(["marshal_server", "max_latency"], mb_max_latency)
        container.config.from_dict(config.as_dict())

        from bentoml import marshal, server

        container.wire(packages=[marshal, server])

        start_dev_server(saved_bundle_path)
Exemplo n.º 24
0
def test_override():
    config = BentoMLConfiguration()
    config.override(["bento_server", "port"], 6000)
    config_dict = config.as_dict()
    assert config_dict is not None
    assert config_dict["bento_server"]["port"] == 6000
Exemplo n.º 25
0
def test_override_empty_key():
    config = BentoMLConfiguration()
    with pytest.raises(BentoMLConfigException) as e:
        config.override([], 6000)
    assert e is not None
Exemplo n.º 26
0
def start_dev_server(
    bundle_path: str,
    port: Optional[int] = None,
    enable_microbatch: Optional[bool] = None,
    mb_max_batch_size: Optional[int] = None,
    mb_max_latency: Optional[int] = None,
    run_with_ngrok: Optional[bool] = None,
    enable_swagger: Optional[bool] = None,
    config_file: Optional[str] = None,
):
    config = BentoMLConfiguration(override_config_file=config_file)
    config.override(["api_server", "port"], port)
    config.override(["api_server", "enable_microbatch"], enable_microbatch)
    config.override(["api_server", "enable_swagger"], enable_swagger)
    config.override(["marshal_server", "max_batch_size"], mb_max_batch_size)
    config.override(["marshal_server", "max_latency"], mb_max_latency)

    if run_with_ngrok:
        from threading import Timer

        from bentoml.utils.flask_ngrok import start_ngrok

        thread = Timer(1, start_ngrok, args=(port, ))
        thread.setDaemon(True)
        thread.start()

    with reserve_free_port() as api_server_port:
        # start server right after port released
        #  to reduce potential race

        model_server_proc = multiprocessing.Process(
            target=_start_dev_server,
            kwargs=dict(
                api_server_port=api_server_port,
                saved_bundle_path=bundle_path,
                config=config,
            ),
            daemon=True,
        )
    model_server_proc.start()

    try:
        _start_dev_proxy(
            api_server_port=api_server_port,
            saved_bundle_path=bundle_path,
            config=config,
        )
    finally:
        model_server_proc.terminate()
Exemplo n.º 27
0
def test_override():
    config = BentoMLConfiguration(legacy_compatibility=False)
    config.override(["api_server", "port"], 6000)
    config_dict = config.as_dict()
    assert config_dict is not None
    assert config_dict["api_server"]["port"] == 6000
Exemplo n.º 28
0
def test_override_schema_violation():
    config = BentoMLConfiguration()
    with pytest.raises(BentoMLConfigException) as e:
        config.override(["api_server", "port"], "non-integer")
    assert e is not None
Exemplo n.º 29
0
def test_override_empty_key():
    config = BentoMLConfiguration(legacy_compatibility=False)
    with pytest.raises(BentoMLConfigException) as e:
        config.override([], 6000)
    assert e is not None
Exemplo n.º 30
0
def test_override_nonexist_key():
    config = BentoMLConfiguration()
    with pytest.raises(BentoMLConfigException) as e:
        config.override(["non-existent", "non-existent"], 6000)
    assert e is not None