Пример #1
0
def test_build_docker(iris_data, sk_model):
    with mlflow.start_run() as active_run:
        mlflow.sklearn.log_model(sk_model, "model")
        model_uri = "runs:/{run_id}/model".format(run_id=active_run.info.run_id)
    x, _ = iris_data
    df = pd.DataFrame(x)
    image_name = pyfunc_build_image(model_uri, extra_args=["--install-mlflow"])
    host_port = get_safe_port()
    scoring_proc = pyfunc_serve_from_docker_image(image_name, host_port)
    _validate_with_rest_endpoint(scoring_proc, host_port, df, x, sk_model)
Пример #2
0
def test_build_docker_virtualenv(iris_data, sk_model):
    with mlflow.start_run():
        model_info = mlflow.sklearn.log_model(sk_model, "model")

    x, _ = iris_data
    df = pd.DataFrame(iris_data[0])

    extra_args = ["--install-mlflow", "--env-manager", "virtualenv"]
    image_name = pyfunc_build_image(model_info.model_uri,
                                    extra_args=extra_args)
    host_port = get_safe_port()
    scoring_proc = pyfunc_serve_from_docker_image(image_name, host_port)
    _validate_with_rest_endpoint(scoring_proc, host_port, df, x, sk_model)
Пример #3
0
def test_build_docker(iris_data, sk_model, enable_mlserver):
    with mlflow.start_run() as active_run:
        if enable_mlserver:
            # MLServer requires Python 3.7, so we'll force that Python version
            with mock.patch("mlflow.utils.environment.PYTHON_VERSION", "3.7"):
                mlflow.sklearn.log_model(sk_model, "model")
        else:
            mlflow.sklearn.log_model(sk_model, "model")
        model_uri = "runs:/{run_id}/model".format(run_id=active_run.info.run_id)

    x, _ = iris_data
    df = pd.DataFrame(x)

    extra_args = ["--install-mlflow"]
    if enable_mlserver:
        extra_args.append("--enable-mlserver")

    image_name = pyfunc_build_image(model_uri, extra_args=extra_args)
    host_port = get_safe_port()
    scoring_proc = pyfunc_serve_from_docker_image(image_name, host_port)
    _validate_with_rest_endpoint(scoring_proc, host_port, df, x, sk_model, enable_mlserver)
Пример #4
0
def test_build_docker(iris_data, sk_model):
    with mlflow.start_run() as active_run:
        mlflow.sklearn.log_model(sk_model, "model")
        model_uri = "runs:/{run_id}/model".format(
            run_id=active_run.info.run_id)
    x, _ = iris_data
    df = pd.DataFrame(x)
    image_name = pyfunc_build_image(model_uri, extra_args=["--install-mlflow"])
    host_port = get_safe_port()
    scoring_proc = pyfunc_serve_from_docker_image(image_name, host_port)
    with RestEndpoint(proc=scoring_proc, port=host_port) as endpoint:
        for content_type in [
                CONTENT_TYPE_JSON_SPLIT_ORIENTED, CONTENT_TYPE_CSV,
                CONTENT_TYPE_JSON
        ]:
            scoring_response = endpoint.invoke(df, content_type)
            assert scoring_response.status_code == 200, "Failed to serve prediction, got " \
                                                        "response %s" % scoring_response.text
            np.testing.assert_array_equal(
                np.array(json.loads(scoring_response.text)),
                sk_model.predict(x))
        # Try examples of bad input, verify we get a non-200 status code
        for content_type in [
                CONTENT_TYPE_JSON_SPLIT_ORIENTED, CONTENT_TYPE_CSV,
                CONTENT_TYPE_JSON
        ]:
            scoring_response = endpoint.invoke(data="",
                                               content_type=content_type)
            assert scoring_response.status_code == 500, \
                "Expected server failure with error code 500, got response with status code %s " \
                "and body %s" % (scoring_response.status_code, scoring_response.text)
            scoring_response_dict = json.loads(scoring_response.content)
            assert "error_code" in scoring_response_dict
            assert scoring_response_dict["error_code"] == ErrorCode.Name(
                MALFORMED_REQUEST)
            assert "message" in scoring_response_dict
            assert "stack_trace" in scoring_response_dict