Ejemplo n.º 1
0
def _validate_with_rest_endpoint(scoring_proc, host_port, df, x, sk_model):
    with RestEndpoint(proc=scoring_proc, port=host_port) as endpoint:
        for content_type in [
                CONTENT_TYPE_JSON_SPLIT_ORIENTED, CONTENT_TYPE_CSV,
                CONTENT_TYPE_JSON
        ]:
            scoring_response = endpoint.invoke(df, content_type)
            assert scoring_response.status_code == 200, "Failed to serve prediction, got " \
                                                        "response %s" % scoring_response.text
            np.testing.assert_array_equal(
                np.array(json.loads(scoring_response.text)),
                sk_model.predict(x))
        # Try examples of bad input, verify we get a non-200 status code
        for content_type in [
                CONTENT_TYPE_JSON_SPLIT_ORIENTED, CONTENT_TYPE_CSV,
                CONTENT_TYPE_JSON
        ]:
            scoring_response = endpoint.invoke(data="",
                                               content_type=content_type)
            assert scoring_response.status_code == 500, \
                "Expected server failure with error code 500, got response with status code %s " \
                "and body %s" % (scoring_response.status_code, scoring_response.text)
            scoring_response_dict = json.loads(scoring_response.content)
            assert "error_code" in scoring_response_dict
            assert scoring_response_dict["error_code"] == ErrorCode.Name(
                MALFORMED_REQUEST)
            assert "message" in scoring_response_dict
            assert "stack_trace" in scoring_response_dict
Ejemplo n.º 2
0
def _validate_with_rest_endpoint(scoring_proc, host_port, df, x, sk_model, enable_mlserver=False):
    with RestEndpoint(proc=scoring_proc, port=host_port) as endpoint:
        for content_type in [CONTENT_TYPE_JSON_SPLIT_ORIENTED, CONTENT_TYPE_CSV, CONTENT_TYPE_JSON]:
            scoring_response = endpoint.invoke(df, content_type)
            assert scoring_response.status_code == 200, (
                "Failed to serve prediction, got " "response %s" % scoring_response.text
            )
            np.testing.assert_array_equal(
                np.array(json.loads(scoring_response.text)), sk_model.predict(x)
            )
        # Try examples of bad input, verify we get a non-200 status code
        for content_type in [CONTENT_TYPE_JSON_SPLIT_ORIENTED, CONTENT_TYPE_CSV, CONTENT_TYPE_JSON]:
            scoring_response = endpoint.invoke(data="", content_type=content_type)
            expected_status_code = 500 if enable_mlserver else 400
            assert scoring_response.status_code == expected_status_code, (
                "Expected server failure with error code %s, got response with status code %s "
                "and body %s"
                % (expected_status_code, scoring_response.status_code, scoring_response.text)
            )

            if enable_mlserver:
                # MLServer returns a different set of errors.
                # Skip these assertions until this issue gets tackled:
                # https://github.com/SeldonIO/MLServer/issues/360)
                continue

            scoring_response_dict = json.loads(scoring_response.content)
            assert "error_code" in scoring_response_dict
            assert scoring_response_dict["error_code"] == ErrorCode.Name(BAD_REQUEST)
            assert "message" in scoring_response_dict
            assert "stack_trace" in scoring_response_dict
Ejemplo n.º 3
0
def test_build_docker(iris_data, sk_model):
    with mlflow.start_run() as active_run:
        mlflow.sklearn.log_model(sk_model, "model")
        model_uri = "runs:/{run_id}/model".format(
            run_id=active_run.info.run_id)
    x, _ = iris_data
    df = pd.DataFrame(x)
    image_name = pyfunc_build_image(model_uri, extra_args=["--install-mlflow"])
    host_port = get_safe_port()
    scoring_proc = pyfunc_serve_from_docker_image(image_name, host_port)
    with RestEndpoint(proc=scoring_proc, port=host_port) as endpoint:
        for content_type in [
                CONTENT_TYPE_JSON_SPLIT_ORIENTED, CONTENT_TYPE_CSV,
                CONTENT_TYPE_JSON
        ]:
            scoring_response = endpoint.invoke(df, content_type)
            assert scoring_response.status_code == 200, "Failed to serve prediction, got " \
                                                        "response %s" % scoring_response.text
            np.testing.assert_array_equal(
                np.array(json.loads(scoring_response.text)),
                sk_model.predict(x))
        # Try examples of bad input, verify we get a non-200 status code
        for content_type in [
                CONTENT_TYPE_JSON_SPLIT_ORIENTED, CONTENT_TYPE_CSV,
                CONTENT_TYPE_JSON
        ]:
            scoring_response = endpoint.invoke(data="",
                                               content_type=content_type)
            assert scoring_response.status_code == 500, \
                "Expected server failure with error code 500, got response with status code %s " \
                "and body %s" % (scoring_response.status_code, scoring_response.text)
            scoring_response_dict = json.loads(scoring_response.content)
            assert "error_code" in scoring_response_dict
            assert scoring_response_dict["error_code"] == ErrorCode.Name(
                MALFORMED_REQUEST)
            assert "message" in scoring_response_dict
            assert "stack_trace" in scoring_response_dict