Example #1
0
def test_model_save_load(build_model, model_path, data):
    x, _ = data
    keras_model = build_model(data)
    if build_model == tf_keras_model:
        model_path = os.path.join(model_path, "tf")
    else:
        model_path = os.path.join(model_path, "plain")
    expected = keras_model.predict(x)
    mlflow.keras.save_model(keras_model, model_path)
    # Loading Keras model
    model_loaded = mlflow.keras.load_model(model_path)
    assert type(keras_model) == type(model_loaded)
    assert all(expected == model_loaded.predict(x))
    # Loading pyfunc model
    pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path)
    assert all(pyfunc_loaded.predict(x).values == expected)

    # pyfunc serve
    scoring_response = pyfunc_serve_and_score_model(
        model_uri=os.path.abspath(model_path),
        data=pd.DataFrame(x),
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)
    assert all(
        pd.read_json(scoring_response.content, orient="records").values.astype(
            np.float32) == expected)
    # test spark udf
    spark_udf_preds = score_model_as_udf(model_uri=os.path.abspath(model_path),
                                         pandas_df=pd.DataFrame(x),
                                         result_type="float")
    np.testing.assert_array_almost_equal(np.array(spark_udf_preds),
                                         expected.reshape(
                                             len(spark_udf_preds)),
                                         decimal=4)
Example #2
0
def test_model_save_load(model, model_path, data, predicted):
    x, y = data
    mlflow.keras.save_model(model, model_path)

    # Loading Keras model
    model_loaded = mlflow.keras.load_model(model_path)
    assert all(model_loaded.predict(x) == predicted)

    # Loading pyfunc model
    pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path)
    assert all(pyfunc_loaded.predict(x).values == predicted)

    # pyfunc serve
    scoring_response = pyfunc_serve_and_score_model(
        model_path=os.path.abspath(model_path),
        data=pd.DataFrame(x),
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)
    assert all(
        pd.read_json(scoring_response.content, orient="records").values.astype(
            np.float32) == predicted)

    # test spark udf
    spark_udf_preds = score_model_as_udf(os.path.abspath(model_path),
                                         run_id=None,
                                         pandas_df=pd.DataFrame(x),
                                         result_type="float")
    np.testing.assert_array_almost_equal(np.array(spark_udf_preds),
                                         predicted.reshape(
                                             len(spark_udf_preds)),
                                         decimal=4)
Example #3
0

        
def test_model_save_load_evaluate_pyfunc_format_multiple_inputs(
    onnx_model_multiple_inputs_float64, data_multiple_inputs, predicted_multiple_inputs, model_path
):
    mlflow.onnx.save_model(onnx_model_multiple_inputs_float64, model_path)

    # Loading pyfunc model
    pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path)
    assert np.allclose(
        pyfunc_loaded.predict(data_multiple_inputs).values,
        predicted_multiple_inputs.values,
        rtol=1e-05,
        atol=1e-05,
    )

    # pyfunc serve
    scoring_response = pyfunc_serve_and_score_model(
        model_uri=os.path.abspath(model_path),
        data=data_multiple_inputs,
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
    )
    assert np.allclose(
        pd.read_json(scoring_response.content, orient="records").values,
        predicted_multiple_inputs.values,
        rtol=1e-05,
        atol=1e-05,
    )
Example #5
0
def test_pyfunc_serve_and_score_transformers():
    from transformers import BertModel, BertConfig  # pylint: disable=import-error

    class MyBertModel(BertModel):
        def forward(self, *args, **kwargs):  # pylint: disable=arguments-differ
            return super().forward(*args, **kwargs).last_hidden_state

    model = MyBertModel(
        BertConfig(
            vocab_size=16,
            hidden_size=2,
            num_hidden_layers=2,
            num_attention_heads=2,
            intermediate_size=2,
        ))
    model.eval()

    with mlflow.start_run():
        mlflow.pytorch.log_model(model, artifact_path="model")
        model_uri = mlflow.get_artifact_uri("model")

    input_ids = model.dummy_inputs["input_ids"]
    data = json.dumps({"inputs": input_ids.tolist()})
    resp = pyfunc_serve_and_score_model(
        model_uri,
        data,
        pyfunc_scoring_server.CONTENT_TYPE_JSON,
        extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS,
    )
    np.testing.assert_array_equal(json.loads(resp.content),
                                  model(input_ids).detach().numpy())
def test_pyfunc_model_serving_with_conda_env_activation_succeeds_with_main_scoped_class(
        sklearn_knn_model, main_scoped_model_class, iris_data, tmpdir):
    sklearn_model_path = os.path.join(str(tmpdir), "sklearn_model")
    mlflow.sklearn.save_model(sk_model=sklearn_knn_model, path=sklearn_model_path)

    def test_predict(sk_model, model_input):
        return sk_model.predict(model_input) * 2

    pyfunc_model_path = os.path.join(str(tmpdir), "pyfunc_model")
    mlflow.pyfunc.save_model(path=pyfunc_model_path,
                             artifacts={
                                 "sk_model": sklearn_model_path
                             },
                             python_model=main_scoped_model_class(test_predict),
                             conda_env=_conda_env())
    loaded_pyfunc_model = mlflow.pyfunc.load_pyfunc(model_uri=pyfunc_model_path)

    sample_input = pd.DataFrame(iris_data[0])
    scoring_response = pyfunc_serve_and_score_model(
        model_uri=pyfunc_model_path,
        data=sample_input,
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)
    assert scoring_response.status_code == 200
    np.testing.assert_array_equal(
        np.array(json.loads(scoring_response.text)),
        loaded_pyfunc_model.predict(sample_input))
def test_pmdarima_pyfunc_serve_and_score_groups(grouped_prophet, diviner_data):

    artifact_path = "model"
    with mlflow.start_run():
        mlflow.diviner.log_model(
            grouped_prophet,
            artifact_path,
        )
        model_uri = mlflow.get_artifact_uri(artifact_path)

    groups = []
    for i in [0, -1]:
        key_entries = []
        for value in diviner_data.df[diviner_data.key_columns].iloc[[i]].to_dict().values():
            key_entries.append(list(value.values())[0])
        groups.append(tuple(key_entries))

    local_predict = grouped_prophet.predict_groups(groups=groups, horizon=10, frequency="W")

    inference_data = pd.DataFrame({"groups": [groups], "horizon": 10, "frequency": "W"}, index=[0])

    resp = pyfunc_serve_and_score_model(
        model_uri,
        data=inference_data,
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_RECORDS_ORIENTED,
        extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS,
    )
    scores = pd.read_json(resp.content.decode("utf-8"), orient="records")
    scores["ds"] = pd.to_datetime(scores["ds"], format=DS_FORMAT)
    scores["multiplicative_terms"] = scores["multiplicative_terms"].astype("float64")
    pd.testing.assert_frame_equal(local_predict, scores)
Example #8
0
def test_pyfunc_serve_and_score():
    X, y = shap.datasets.boston()
    reg = sklearn.ensemble.RandomForestRegressor(n_estimators=10).fit(X, y)
    model = shap.Explainer(
        reg.predict,
        masker=X,
        algorithm="permutation",
        # `link` defaults to `shap.links.identity` which is decorated by `numba.jit` and causes
        # the following error when loading the explainer for serving:
        # ```
        # Exception: The passed link function needs to be callable and have a callable .inverse property!  # noqa
        # ```
        # As a workaround, use an identify function that's NOT decorated by `numba.jit`.
        link=create_identity_function(),
    )
    artifact_path = "model"
    with mlflow.start_run():
        mlflow.shap.log_explainer(model, artifact_path)
        model_uri = mlflow.get_artifact_uri(artifact_path)

    resp = pyfunc_serve_and_score_model(
        model_uri,
        data=pd.DataFrame(X[:3]),
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
    )
    scores = pd.read_json(resp.content, orient="records").values
    np.testing.assert_allclose(scores, model(X[:3]).values, rtol=100, atol=100)
Example #9
0
def test_serve_gunicorn_opts(iris_data, sk_model):
    if sys.platform == "win32":
        pytest.skip(
            "This test requires gunicorn which is not available on windows.")
    with mlflow.start_run() as active_run:
        mlflow.sklearn.log_model(sk_model,
                                 "model",
                                 registered_model_name="imlegit")
        run_id = active_run.info.run_id

    model_uris = [
        "models:/{name}/{stage}".format(name="imlegit", stage="None"),
        "runs:/{run_id}/model".format(run_id=run_id),
    ]
    for model_uri in model_uris:
        with TempDir() as tpm:
            output_file_path = tpm.path("stoudt")
            with open(output_file_path, "w") as output_file:
                x, _ = iris_data
                scoring_response = pyfunc_serve_and_score_model(
                    model_uri,
                    pd.DataFrame(x),
                    content_type=CONTENT_TYPE_JSON_SPLIT_ORIENTED,
                    stdout=output_file,
                    extra_args=["-w", "3"],
                )
            with open(output_file_path, "r") as output_file:
                stdout = output_file.read()
        actual = pd.read_json(scoring_response.content, orient="records")
        actual = actual[actual.columns[0]].values
        expected = sk_model.predict(x)
        assert all(expected == actual)
        expected_command_pattern = re.compile(
            ("gunicorn.*-w 3.*mlflow.pyfunc.scoring_server.wsgi:app"))
        assert expected_command_pattern.search(stdout) is not None
Example #10
0
def test_scoring_server_successfully_evaluates_correct_dataframes_with_pandas_split_orientation(
        sklearn_model, model_path):
    mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)

    pandas_split_content = pd.DataFrame(
        sklearn_model.inference_data).to_json(orient="split")
    response_default_content_type = pyfunc_serve_and_score_model(
        model_path=os.path.abspath(model_path),
        data=pandas_split_content,
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON)
    assert response_default_content_type.status_code == 200

    response = pyfunc_serve_and_score_model(
        model_path=os.path.abspath(model_path),
        data=pandas_split_content,
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)
    assert response.status_code == 200
Example #11
0
def serve_and_score(model_uri, data, extra_args=None):
    resp = pyfunc_serve_and_score_model(
        model_uri,
        data=data,
        content_type=CONTENT_TYPE_JSON_SPLIT_ORIENTED,
        extra_args=["--env-manager=virtualenv"] + (extra_args or []),
    )
    return pd.read_json(resp.content, orient="records").values.squeeze()
def test_scoring_server_responds_to_invalid_content_type_request_with_unsupported_content_type_code(
        sklearn_model, model_path):
    mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)

    pandas_split_content = pd.DataFrame(sklearn_model.inference_data).to_json(orient="split")
    response = pyfunc_serve_and_score_model(
        model_uri=os.path.abspath(model_path),
        data=pandas_split_content,
        content_type="not_a_supported_content_type")
    assert response.status_code == 415
def test_load_model_succeeds_with_dependencies_specified_via_code_paths(
    module_scoped_subclassed_model, model_path, data
):
    # Save a PyTorch model whose class is defined in the current test suite. Because the
    # `tests` module is not available when the model is deployed for local scoring, we include
    # the test suite file as a code dependency
    mlflow.pytorch.save_model(
        path=model_path,
        pytorch_model=module_scoped_subclassed_model,
        conda_env=None,
        code_paths=[__file__],
    )

    # Define a custom pyfunc model that loads a PyTorch model artifact using
    # `mlflow.pytorch.load_model`
    class TorchValidatorModel(pyfunc.PythonModel):
        def load_context(self, context):
            # pylint: disable=attribute-defined-outside-init
            self.pytorch_model = mlflow.pytorch.load_model(context.artifacts["pytorch_model"])

        def predict(self, context, model_input):
            with torch.no_grad():
                input_tensor = torch.from_numpy(model_input.values.astype(np.float32))
                output_tensor = self.pytorch_model(input_tensor)
                return pd.DataFrame(output_tensor.numpy())

    pyfunc_artifact_path = "pyfunc_model"
    with mlflow.start_run():
        pyfunc.log_model(
            artifact_path=pyfunc_artifact_path,
            python_model=TorchValidatorModel(),
            artifacts={"pytorch_model": model_path},
        )
        pyfunc_model_path = _download_artifact_from_uri(
            "runs:/{run_id}/{artifact_path}".format(
                run_id=mlflow.active_run().info.run_id, artifact_path=pyfunc_artifact_path
            )
        )

    # Deploy the custom pyfunc model and ensure that it is able to successfully load its
    # constituent PyTorch model via `mlflow.pytorch.load_model`
    scoring_response = pyfunc_serve_and_score_model(
        model_uri=pyfunc_model_path,
        data=data[0],
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
        extra_args=["--no-conda"],
    )
    assert scoring_response.status_code == 200

    deployed_model_preds = pd.DataFrame(json.loads(scoring_response.content))
    np.testing.assert_array_almost_equal(
        deployed_model_preds.values[:, 0],
        _predict(model=module_scoped_subclassed_model, data=data),
        decimal=4,
    )
Example #14
0
def test_scoring_server_successfully_evaluates_correct_tf_serving_sklearn(
        sklearn_model, model_path):
    mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)

    inp_dict = {"instances": sklearn_model.inference_data.tolist()}
    response_records_content_type = pyfunc_serve_and_score_model(
        model_uri=os.path.abspath(model_path),
        data=json.dumps(inp_dict),
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
    )
    assert response_records_content_type.status_code == 200
Example #15
0
def test_scoring_server_successfully_evaluates_correct_split_to_numpy(
        sklearn_model, model_path):
    kiwi.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)

    pandas_split_content = pd.DataFrame(
        sklearn_model.inference_data).to_json(orient="split")
    response_records_content_type = pyfunc_serve_and_score_model(
        model_uri=os.path.abspath(model_path),
        data=pandas_split_content,
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_NUMPY)
    assert response_records_content_type.status_code == 200
Example #16
0
def test_scoring_server_successfully_evaluates_correct_dataframes_with_pandas_records_orientation(
        sklearn_model, model_path):
    kiwi.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)

    pandas_record_content = pd.DataFrame(
        sklearn_model.inference_data).to_json(orient="records")
    response_records_content_type = pyfunc_serve_and_score_model(
        model_uri=os.path.abspath(model_path),
        data=pandas_record_content,
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_RECORDS_ORIENTED)
    assert response_records_content_type.status_code == 200
Example #17
0
def test_serving_model_with_schema(pandas_df_with_all_types):
    class TestModel(PythonModel):
        def predict(self, context, model_input):
            return [[k, str(v)] for k, v in model_input.dtypes.items()]

    schema = Schema([ColSpec(c, c) for c in pandas_df_with_all_types.columns])
    df = _shuffle_pdf(pandas_df_with_all_types)
    with TempDir(chdr=True):
        with mlflow.start_run() as run:
            mlflow.pyfunc.log_model("model",
                                    python_model=TestModel(),
                                    signature=ModelSignature(schema))
        response = pyfunc_serve_and_score_model(
            model_uri="runs:/{}/model".format(run.info.run_id),
            data=json.dumps(df.to_dict(orient="split"), cls=NumpyEncoder),
            content_type=pyfunc_scoring_server.
            CONTENT_TYPE_JSON_SPLIT_ORIENTED,
            extra_args=["--no-conda"],
        )
        response_json = json.loads(response.content)

        # objects are not converted to pandas Strings at the moment
        expected_types = {
            **pandas_df_with_all_types.dtypes, "string": np.dtype(object)
        }
        assert response_json == [[k, str(v)]
                                 for k, v in expected_types.items()]
        response = pyfunc_serve_and_score_model(
            model_uri="runs:/{}/model".format(run.info.run_id),
            data=json.dumps(pandas_df_with_all_types.to_dict(orient="records"),
                            cls=NumpyEncoder),
            content_type=pyfunc_scoring_server.
            CONTENT_TYPE_JSON_RECORDS_ORIENTED,
            extra_args=["--no-conda"],
        )
        response_json = json.loads(response.content)
        assert response_json == [[k, str(v)]
                                 for k, v in expected_types.items()]
def test_scoring_server_responds_to_malformed_json_input_with_stacktrace_and_error_code(
        sklearn_model, model_path):
    mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)

    malformed_json_content = "this is,,,, not valid json"
    response = pyfunc_serve_and_score_model(
        model_uri=os.path.abspath(model_path),
        data=malformed_json_content,
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)
    response_json = json.loads(response.content)
    assert "error_code" in response_json
    assert response_json["error_code"] == ErrorCode.Name(MALFORMED_REQUEST)
    assert "message" in response_json
    assert "stack_trace" in response_json
def test_scoring_server_responds_to_invalid_json_input_with_stacktrace_and_error_code(
        sklearn_model, model_path):
    mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)

    incorrect_json_content = json.dumps({"not": "a serialized dataframe"})
    response = pyfunc_serve_and_score_model(
        model_uri=os.path.abspath(model_path),
        data=incorrect_json_content,
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)
    response_json = json.loads(response.content)
    assert "error_code" in response_json
    assert response_json["error_code"] == ErrorCode.Name(MALFORMED_REQUEST)
    assert "message" in response_json
    assert "stack_trace" in response_json
Example #20
0
def test_pyfunc_serve_and_score(pd_model):
    model, inference_dataframe = pd_model
    artifact_path = "model"
    with mlflow.start_run():
        mlflow.paddle.log_model(model, artifact_path)
        model_uri = mlflow.get_artifact_uri(artifact_path)

    resp = pyfunc_serve_and_score_model(
        model_uri,
        data=pd.DataFrame(inference_dataframe),
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
    )
    scores = pd.read_json(resp.content.decode("utf-8"), orient="records").values.squeeze()
    np.testing.assert_array_almost_equal(scores, model(inference_dataframe).squeeze())
def test_scoring_server_responds_to_incompatible_inference_dataframe_with_stacktrace_and_error_code(
        sklearn_model, model_path):
    mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
    incompatible_df = pd.DataFrame(np.array(range(10)))

    response = pyfunc_serve_and_score_model(
        model_uri=os.path.abspath(model_path),
        data=incompatible_df,
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)
    response_json = json.loads(response.content)
    assert "error_code" in response_json
    assert response_json["error_code"] == ErrorCode.Name(BAD_REQUEST)
    assert "message" in response_json
    assert "stack_trace" in response_json
Example #22
0
def test_pyfunc_serve_and_score():
    model, _, inference_dataframe = ols_model()
    artifact_path = "model"
    with mlflow.start_run():
        mlflow.statsmodels.log_model(model, artifact_path)
        model_uri = mlflow.get_artifact_uri(artifact_path)

    resp = pyfunc_serve_and_score_model(
        model_uri,
        data=pd.DataFrame(inference_dataframe),
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
        extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS,
    )
    scores = pd.read_json(resp.content, orient="records").values.squeeze()
    np.testing.assert_array_almost_equal(scores, model.predict(inference_dataframe))
Example #23
0
def test_pyfunc_serve_and_score(spacy_model_with_data):
    model, inference_dataframe = spacy_model_with_data
    artifact_path = "model"
    with mlflow.start_run():
        mlflow.spacy.log_model(model, artifact_path)
        model_uri = mlflow.get_artifact_uri(artifact_path)

    resp = pyfunc_serve_and_score_model(
        model_uri,
        data=inference_dataframe,
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
        extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS,
    )
    scores = pd.read_json(resp.content, orient="records")
    pd.testing.assert_frame_equal(scores, _predict(model, inference_dataframe))
def test_pyfunc_serve_and_score(reg_model):
    model, inference_dataframe = reg_model
    artifact_path = "model"
    with mlflow.start_run():
        mlflow.catboost.log_model(model, artifact_path)
        model_uri = mlflow.get_artifact_uri(artifact_path)

    resp = pyfunc_serve_and_score_model(
        model_uri,
        data=inference_dataframe,
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
    )
    scores = pd.read_json(resp.content, orient="records").values.squeeze()
    np.testing.assert_array_almost_equal(scores,
                                         model.predict(inference_dataframe))
Example #25
0
def test_pyfunc_serve_and_score(h2o_iris_model):
    model, inference_dataframe = h2o_iris_model
    artifact_path = "model"
    with mlflow.start_run():
        mlflow.h2o.log_model(model, artifact_path)
        model_uri = mlflow.get_artifact_uri(artifact_path)

    resp = pyfunc_serve_and_score_model(
        model_uri,
        data=inference_dataframe.as_data_frame(),
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
    )
    scores = pd.read_json(resp.content.decode("utf-8"), orient="records").drop("predict", axis=1)
    preds = model.predict(inference_dataframe).as_data_frame().drop("predict", axis=1)
    np.testing.assert_array_almost_equal(scores, preds)
def test_scoring_server_responds_to_invalid_csv_input_with_stacktrace_and_error_code(
        sklearn_model, model_path):
    mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)

    # Any empty string is not valid pandas CSV
    incorrect_csv_content = ""
    response = pyfunc_serve_and_score_model(
        model_uri=os.path.abspath(model_path),
        data=incorrect_csv_content,
        content_type=pyfunc_scoring_server.CONTENT_TYPE_CSV)
    response_json = json.loads(response.content)
    assert "error_code" in response_json
    assert response_json["error_code"] == ErrorCode.Name(MALFORMED_REQUEST)
    assert "message" in response_json
    assert "stack_trace" in response_json
Example #27
0
def test_pyfunc_serve_and_score(xgb_model):
    model, inference_dataframe, inference_dmatrix = xgb_model
    artifact_path = "model"
    with mlflow.start_run():
        mlflow.xgboost.log_model(model, artifact_path)
        model_uri = mlflow.get_artifact_uri(artifact_path)

    resp = pyfunc_serve_and_score_model(
        model_uri,
        data=inference_dataframe,
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
        extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS,
    )
    scores = pd.read_json(resp.content.decode("utf-8"), orient="records").values.squeeze()
    np.testing.assert_array_almost_equal(scores, model.predict(inference_dmatrix))
Example #28
0
def test_pyfunc_serve_and_score_sklearn(reg_model):
    model, inference_dataframe = reg_model
    model = Pipeline([("model", reg_model.model)])

    with mlflow.start_run():
        mlflow.sklearn.log_model(model, artifact_path="model")
        model_uri = mlflow.get_artifact_uri("model")

    resp = pyfunc_serve_and_score_model(
        model_uri,
        inference_dataframe.head(3),
        pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
        extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS,
    )
    scores = pd.read_json(resp.content.decode("utf-8"), orient="records").values.squeeze()
    np.testing.assert_array_almost_equal(scores, model.predict(inference_dataframe.head(3)))
Example #29
0
def test_scoring_server_successfully_evaluates_correct_tf_serving_keras_inputs(
        keras_model, model_path):
    mlflow.keras.save_model(keras_model.model, model_path)

    inp_dict = {
        "inputs": {
            "a": keras_model.inference_data[:, :2].tolist(),
            "b": keras_model.inference_data[:, -2:].tolist(),
        }
    }
    response_records_content_type = pyfunc_serve_and_score_model(
        model_uri=os.path.abspath(model_path),
        data=json.dumps(inp_dict),
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
    )
    assert response_records_content_type.status_code == 200
Example #30
0
def test_pyfunc_serve_and_score_sklearn(model):
    X, y = datasets.load_iris(return_X_y=True, as_frame=True)
    model.fit(X, y)

    with mlflow.start_run():
        mlflow.sklearn.log_model(model, artifact_path="model")
        model_uri = mlflow.get_artifact_uri("model")

    resp = pyfunc_serve_and_score_model(
        model_uri,
        X.head(3),
        pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
        extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS,
    )
    scores = pd.read_json(resp.content.decode("utf-8"), orient="records").values.squeeze()
    np.testing.assert_array_equal(scores, model.predict(X.head(3)))