示例#1
0
def server(
    backend_store_uri,
    default_artifact_root,
    serve_artifacts,
    artifacts_only,
    artifacts_destination,
    host,
    port,
    workers,
    static_prefix,
    gunicorn_opts,
    waitress_opts,
    expose_prometheus,
):
    """
    Run the MLflow tracking server.

    The server which listen on http://localhost:5000 by default, and only accept connections
    from the local machine. To let the server accept connections from other machines, you will need
    to pass ``--host 0.0.0.0`` to listen on all network interfaces
    (or a specific interface address).
    """
    from mlflow.server import _run_server
    from mlflow.server.handlers import initialize_backend_stores

    _validate_server_args(gunicorn_opts=gunicorn_opts, workers=workers, waitress_opts=waitress_opts)

    # Ensure that both backend_store_uri and default_artifact_uri are set correctly.
    if not backend_store_uri:
        backend_store_uri = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH

    default_artifact_root = resolve_default_artifact_root(
        serve_artifacts, default_artifact_root, backend_store_uri
    )

    try:
        initialize_backend_stores(backend_store_uri, default_artifact_root)
    except Exception as e:
        _logger.error("Error initializing backend store")
        _logger.exception(e)
        sys.exit(1)

    try:
        _run_server(
            backend_store_uri,
            default_artifact_root,
            serve_artifacts,
            artifacts_only,
            artifacts_destination,
            host,
            port,
            static_prefix,
            workers,
            gunicorn_opts,
            waitress_opts,
            expose_prometheus,
        )
    except ShellCommandException:
        eprint("Running the mlflow server failed. Please see the logs above for details.")
        sys.exit(1)
示例#2
0
def ui(backend_store_uri, default_artifact_root, serve_artifacts,
       artifacts_destination, port, host):
    """
    Launch the MLflow tracking UI for local viewing of run results. To launch a production
    server, use the "mlflow server" command instead.

    The UI will be visible at http://localhost:5000 by default, and only accept connections
    from the local machine. To let the UI server accept connections from other machines, you will
    need to pass ``--host 0.0.0.0`` to listen on all network interfaces (or a specific interface
    address).
    """
    from mlflow.server import _run_server
    from mlflow.server.handlers import initialize_backend_stores

    # Ensure that both backend_store_uri and default_artifact_uri are set correctly.
    if not backend_store_uri:
        backend_store_uri = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH

    default_artifact_root = resolve_default_artifact_root(
        serve_artifacts,
        default_artifact_root,
        backend_store_uri,
        resolve_to_local=True)

    try:
        initialize_backend_stores(backend_store_uri, default_artifact_root)
    except Exception as e:
        _logger.error("Error initializing backend store")
        _logger.exception(e)
        sys.exit(1)

    # TODO: We eventually want to disable the write path in this version of the server.
    try:
        _run_server(
            backend_store_uri,
            default_artifact_root,
            serve_artifacts,
            False,
            artifacts_destination,
            host,
            port,
            None,
            1,
        )
    except ShellCommandException:
        eprint(
            "Running the mlflow server failed. Please see the logs above for details."
        )
        sys.exit(1)