def server( backend_store_uri, default_artifact_root, serve_artifacts, artifacts_only, artifacts_destination, host, port, workers, static_prefix, gunicorn_opts, waitress_opts, expose_prometheus, ): """ Run the MLflow tracking server. The server which listen on http://localhost:5000 by default, and only accept connections from the local machine. To let the server accept connections from other machines, you will need to pass ``--host 0.0.0.0`` to listen on all network interfaces (or a specific interface address). """ from mlflow.server import _run_server from mlflow.server.handlers import initialize_backend_stores _validate_server_args(gunicorn_opts=gunicorn_opts, workers=workers, waitress_opts=waitress_opts) # Ensure that both backend_store_uri and default_artifact_uri are set correctly. if not backend_store_uri: backend_store_uri = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH default_artifact_root = resolve_default_artifact_root( serve_artifacts, default_artifact_root, backend_store_uri ) try: initialize_backend_stores(backend_store_uri, default_artifact_root) except Exception as e: _logger.error("Error initializing backend store") _logger.exception(e) sys.exit(1) try: _run_server( backend_store_uri, default_artifact_root, serve_artifacts, artifacts_only, artifacts_destination, host, port, static_prefix, workers, gunicorn_opts, waitress_opts, expose_prometheus, ) except ShellCommandException: eprint("Running the mlflow server failed. Please see the logs above for details.") sys.exit(1)
def ui(backend_store_uri, default_artifact_root, port): """ Launch the MLflow tracking UI for local viewing of run results. To launch a production server, use the "mlflow server" command instead. The UI will be visible at http://localhost:5000 by default. """ # Ensure that both backend_store_uri and default_artifact_uri are set correctly. if not backend_store_uri: backend_store_uri = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH if not default_artifact_root: if is_local_uri(backend_store_uri): default_artifact_root = backend_store_uri else: default_artifact_root = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH try: initialize_backend_stores(backend_store_uri, default_artifact_root) except Exception as e: # pylint: disable=broad-except _logger.error("Error initializing backend store") _logger.exception(e) sys.exit(1) # TODO: We eventually want to disable the write path in this version of the server. try: _run_server(backend_store_uri, default_artifact_root, "127.0.0.1", port, None, 1) except ShellCommandException: eprint( "Running the mlflow server failed. Please see the logs above for details." ) sys.exit(1)
def ui(backend_store_uri, default_artifact_root, serve_artifacts, artifacts_destination, port, host): """ Launch the MLflow tracking UI for local viewing of run results. To launch a production server, use the "mlflow server" command instead. The UI will be visible at http://localhost:5000 by default, and only accept connections from the local machine. To let the UI server accept connections from other machines, you will need to pass ``--host 0.0.0.0`` to listen on all network interfaces (or a specific interface address). """ from mlflow.server import _run_server from mlflow.server.handlers import initialize_backend_stores # Ensure that both backend_store_uri and default_artifact_uri are set correctly. if not backend_store_uri: backend_store_uri = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH default_artifact_root = resolve_default_artifact_root( serve_artifacts, default_artifact_root, backend_store_uri, resolve_to_local=True) try: initialize_backend_stores(backend_store_uri, default_artifact_root) except Exception as e: _logger.error("Error initializing backend store") _logger.exception(e) sys.exit(1) # TODO: We eventually want to disable the write path in this version of the server. try: _run_server( backend_store_uri, default_artifact_root, serve_artifacts, False, artifacts_destination, host, port, None, 1, ) except ShellCommandException: eprint( "Running the mlflow server failed. Please see the logs above for details." ) sys.exit(1)
def server(backend_store_uri, default_artifact_root, host, port, workers, static_prefix, gunicorn_opts, waitress_opts, expose_prometheus): """ Run the MLflow tracking server. The server which listen on http://localhost:5000 by default, and only accept connections from the local machine. To let the server accept connections from other machines, you will need to pass ``--host 0.0.0.0`` to listen on all network interfaces (or a specific interface address). """ _validate_server_args(gunicorn_opts=gunicorn_opts, workers=workers, waitress_opts=waitress_opts) # Ensure that both backend_store_uri and default_artifact_uri are set correctly. if not backend_store_uri: backend_store_uri = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH if not default_artifact_root: if is_local_uri(backend_store_uri): default_artifact_root = backend_store_uri else: eprint( "Option 'default-artifact-root' is required, when backend store is not " "local file based.") sys.exit(1) try: initialize_backend_stores(backend_store_uri, default_artifact_root) except Exception as e: # pylint: disable=broad-except _logger.error("Error initializing backend store") _logger.exception(e) sys.exit(1) try: _run_server(backend_store_uri, default_artifact_root, host, port, static_prefix, workers, gunicorn_opts, waitress_opts, expose_prometheus) except ShellCommandException: eprint( "Running the mlflow server failed. Please see the logs above for details." ) sys.exit(1)
def start_mlflow_server(backend_store_uri: str, default_artifact_root: str) -> None: """Start the server. :param backend_store_uri: URI to a database back-end. :param default_artifact_root: Location to use for storing artefacts. """ try: initialize_backend_stores(backend_store_uri, default_artifact_root) except Exception as e: log.error(f"Error initializing backend store - {e}") sys.exit(1) try: _run_server( backend_store_uri, default_artifact_root, DEFAULT_HOST, DEFAULT_PORT, workers=1, ) except ShellCommandException as e: log.error(f"Running the mlflow server failed - {e}") sys.exit(1)