コード例 #1
0
def _init_server(backend_uri, root_artifact_uri):
    """
    Launch a new REST server using the tracking store specified by backend_uri and root artifact
    directory specified by root_artifact_uri.
    :returns A tuple (url, process) containing the string URL of the server and a handle to the
             server process (a multiprocessing.Process object).
    """
    mlflow.set_tracking_uri(None)
    server_port = get_safe_port()
    env = {
        BACKEND_STORE_URI_ENV_VAR: backend_uri,
        ARTIFACT_ROOT_ENV_VAR: path_to_local_file_uri(
            tempfile.mkdtemp(dir=local_file_uri_to_path(root_artifact_uri))),
    }
    with mock.patch.dict(os.environ, env):
        cmd = ["python",
               "-c",
               'from mlflow.server import app; app.run("{hostname}", {port})'.format(
                   hostname=LOCALHOST, port=server_port)]
        process = Popen(cmd)

    _await_server_up_or_die(server_port)
    url = "http://{hostname}:{port}".format(hostname=LOCALHOST, port=server_port)
    print("Launching tracking server against backend URI %s. Server URL: %s" % (backend_uri, url))
    return url, process
コード例 #2
0
def test_build_docker(iris_data, sk_model):
    with mlflow.start_run() as active_run:
        mlflow.sklearn.log_model(sk_model, "model")
        model_uri = "runs:/{run_id}/model".format(run_id=active_run.info.run_id)
    x, _ = iris_data
    df = pd.DataFrame(x)
    image_name = pyfunc_build_image(model_uri, extra_args=["--install-mlflow"])
    host_port = get_safe_port()
    scoring_proc = pyfunc_serve_from_docker_image(image_name, host_port)
    _validate_with_rest_endpoint(scoring_proc, host_port, df, x, sk_model)
コード例 #3
0
def test_mlflow_server_command(command):
    port = get_safe_port()
    cmd = ["mlflow", command, "--port", str(port)]
    process = subprocess.Popen(cmd)
    try:
        _await_server_up_or_die(port, timeout=10)
        resp = requests.get(f"http://localhost:{port}/health")
        augmented_raise_for_status(resp)
        assert resp.text == "OK"
    finally:
        process.kill()
コード例 #4
0
def test_mlflow_tracking_disabled_in_artifacts_only_mode():

    port = get_safe_port()
    cmd = ["mlflow", "server", "--port", str(port), "--artifacts-only"]
    process = subprocess.Popen(cmd)
    _await_server_up_or_die(port, timeout=10)
    resp = requests.get(
        f"http://localhost:{port}/api/2.0/mlflow/experiments/list")
    assert (
        "Endpoint: /api/2.0/mlflow/experiments/list disabled due to the mlflow server running "
        "in `--artifacts-only` mode." in resp.text)
    process.kill()
コード例 #5
0
def test_mlflow_artifact_list_in_artifacts_only_mode():

    port = get_safe_port()
    cmd = ["mlflow", "server", "--port", str(port), "--artifacts-only", "--serve-artifacts"]
    process = subprocess.Popen(cmd)
    try:
        _await_server_up_or_die(port, timeout=10)
        resp = requests.get(f"http://localhost:{port}/api/2.0/mlflow-artifacts/artifacts")
        augmented_raise_for_status(resp)
        assert resp.status_code == 200
        assert resp.text == "{}"
    finally:
        process.kill()
コード例 #6
0
ファイル: test_cli.py プロジェクト: bkbonde/mlflow
def test_build_docker_virtualenv(iris_data, sk_model):
    with mlflow.start_run():
        model_info = mlflow.sklearn.log_model(sk_model, "model")

    x, _ = iris_data
    df = pd.DataFrame(iris_data[0])

    extra_args = ["--install-mlflow", "--env-manager", "virtualenv"]
    image_name = pyfunc_build_image(model_info.model_uri,
                                    extra_args=extra_args)
    host_port = get_safe_port()
    scoring_proc = pyfunc_serve_from_docker_image(image_name, host_port)
    _validate_with_rest_endpoint(scoring_proc, host_port, df, x, sk_model)
コード例 #7
0
def test_mlflow_artifact_service_unavailable_without_config():

    port = get_safe_port()
    cmd = ["mlflow", "server", "--port", str(port)]
    process = subprocess.Popen(cmd)
    try:
        _await_server_up_or_die(port, timeout=10)
        endpoint = "/api/2.0/mlflow-artifacts/artifacts"
        resp = requests.get(f"http://localhost:{port}{endpoint}")
        assert (
            f"Endpoint: {endpoint} disabled due to the mlflow server running without "
            "`--serve-artifacts`" in resp.text)
    finally:
        process.kill()
コード例 #8
0
def artifacts_server():
    with tempfile.TemporaryDirectory() as tmpdir:
        port = get_safe_port()
        backend_store_uri = os.path.join(tmpdir, "mlruns")
        artifacts_destination = os.path.join(tmpdir, "mlartifacts")
        url = f"http://{LOCALHOST}:{port}"
        default_artifact_root = f"{url}/api/2.0/mlflow-artifacts/artifacts"
        uri_prefix = "file:///" if is_windows() else ""
        process = _launch_server(
            LOCALHOST,
            port,
            uri_prefix + backend_store_uri,
            default_artifact_root,
            uri_prefix + artifacts_destination,
        )
        yield ArtifactsServer(backend_store_uri, default_artifact_root,
                              artifacts_destination, url, process)
        process.kill()
コード例 #9
0
ファイル: test_cli.py プロジェクト: ConsultingMD/mlflow
def test_build_docker(iris_data, sk_model, enable_mlserver):
    with mlflow.start_run() as active_run:
        if enable_mlserver:
            # MLServer requires Python 3.7, so we'll force that Python version
            with mock.patch("mlflow.utils.environment.PYTHON_VERSION", "3.7"):
                mlflow.sklearn.log_model(sk_model, "model")
        else:
            mlflow.sklearn.log_model(sk_model, "model")
        model_uri = "runs:/{run_id}/model".format(run_id=active_run.info.run_id)

    x, _ = iris_data
    df = pd.DataFrame(x)

    extra_args = ["--install-mlflow"]
    if enable_mlserver:
        extra_args.append("--enable-mlserver")

    image_name = pyfunc_build_image(model_uri, extra_args=extra_args)
    host_port = get_safe_port()
    scoring_proc = pyfunc_serve_from_docker_image(image_name, host_port)
    _validate_with_rest_endpoint(scoring_proc, host_port, df, x, sk_model, enable_mlserver)
コード例 #10
0
ファイル: test_cli.py プロジェクト: yzongyue/mlflow
def test_build_docker(iris_data, sk_model):
    with mlflow.start_run() as active_run:
        mlflow.sklearn.log_model(sk_model, "model")
        model_uri = "runs:/{run_id}/model".format(
            run_id=active_run.info.run_id)
    x, _ = iris_data
    df = pd.DataFrame(x)
    image_name = pyfunc_build_image(model_uri, extra_args=["--install-mlflow"])
    host_port = get_safe_port()
    scoring_proc = pyfunc_serve_from_docker_image(image_name, host_port)
    with RestEndpoint(proc=scoring_proc, port=host_port) as endpoint:
        for content_type in [
                CONTENT_TYPE_JSON_SPLIT_ORIENTED, CONTENT_TYPE_CSV,
                CONTENT_TYPE_JSON
        ]:
            scoring_response = endpoint.invoke(df, content_type)
            assert scoring_response.status_code == 200, "Failed to serve prediction, got " \
                                                        "response %s" % scoring_response.text
            np.testing.assert_array_equal(
                np.array(json.loads(scoring_response.text)),
                sk_model.predict(x))
        # Try examples of bad input, verify we get a non-200 status code
        for content_type in [
                CONTENT_TYPE_JSON_SPLIT_ORIENTED, CONTENT_TYPE_CSV,
                CONTENT_TYPE_JSON
        ]:
            scoring_response = endpoint.invoke(data="",
                                               content_type=content_type)
            assert scoring_response.status_code == 500, \
                "Expected server failure with error code 500, got response with status code %s " \
                "and body %s" % (scoring_response.status_code, scoring_response.text)
            scoring_response_dict = json.loads(scoring_response.content)
            assert "error_code" in scoring_response_dict
            assert scoring_response_dict["error_code"] == ErrorCode.Name(
                MALFORMED_REQUEST)
            assert "message" in scoring_response_dict
            assert "stack_trace" in scoring_response_dict