Ejemplo n.º 1
0
def test_model_save_load(fastai_model, model_path):
    model = fastai_model.model

    kiwi.fastai.save_model(fastai_learner=model, path=model_path)
    reloaded_model = kiwi.fastai.load_model(model_uri=model_path)
    reloaded_pyfunc = pyfunc.load_model(model_uri=model_path)

    # Verify reloaded model computes same predictions as original model
    test_data = TabularList.from_df(fastai_model.inference_dataframe)
    model.data.add_test(test_data)
    reloaded_model.data.add_test(test_data)

    real_preds, real_target = map(lambda output: output.numpy(),
                                  model.get_preds(DatasetType.Test))
    reloaded_preds, reloaded_target = map(
        lambda output: output.numpy(),
        reloaded_model.get_preds(DatasetType.Test))

    np.testing.assert_array_almost_equal(real_preds, reloaded_preds)
    np.testing.assert_array_almost_equal(real_target, reloaded_target)

    model_wrapper = kiwi.fastai._FastaiModelWrapper(model)
    reloaded_model_wrapper = kiwi.fastai._FastaiModelWrapper(reloaded_model)

    model_result = model_wrapper.predict(fastai_model.inference_dataframe)
    reloaded_result = reloaded_model_wrapper.predict(
        fastai_model.inference_dataframe)
    pyfunc_result = reloaded_pyfunc.predict(fastai_model.inference_dataframe)

    compare_wrapper_results(model_result, reloaded_result)
    compare_wrapper_results(reloaded_result, pyfunc_result)
Ejemplo n.º 2
0
def main(argv):
    with kiwi.start_run():
        args = parser.parse_args(argv[1:])

        # Builds, trains and evaluates a tf.estimator. Then, exports it for inference,
        # logs the exported model with MLflow, and loads the fitted model back as a PyFunc.
        (x_train,
         y_train), (x_test,
                    y_test) = tf.keras.datasets.boston_housing.load_data()

        # There are 13 features we are using for inference.
        feat_cols = [
            tf.feature_column.numeric_column(key="features",
                                             shape=(x_train.shape[1], ))
        ]
        feat_spec = {
            "features":
            tf.placeholder("float",
                           name="features",
                           shape=[None, x_train.shape[1]])
        }

        hidden_units = [50, 20]
        steps = args.steps

        regressor = tf.estimator.DNNRegressor(hidden_units=hidden_units,
                                              feature_columns=feat_cols)
        train_input_fn = tf.estimator.inputs.numpy_input_fn(
            {"features": x_train}, y_train, num_epochs=None, shuffle=True)
        regressor.train(train_input_fn, steps=steps)
        test_input_fn = tf.estimator.inputs.numpy_input_fn(
            {"features": x_test}, y_test, num_epochs=None, shuffle=True)
        # Compute mean squared error
        mse = regressor.evaluate(test_input_fn, steps=steps)

        # Building a receiver function for exporting
        receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(
            feat_spec)
        temp = tempfile.mkdtemp()
        try:
            # The model is automatically logged when export_saved_model() is called.
            saved_estimator_path = regressor.export_savedmodel(
                temp, receiver_fn).decode("utf-8")

            # Since the model was automatically logged as an artifact (more specifically
            # a MLflow Model), we don't need to use saved_estimator_path to load back the model.
            # MLflow takes care of it!
            pyfunc_model = pyfunc.load_model(kiwi.get_artifact_uri('model'))
            df = pd.DataFrame(data=x_test,
                              columns=["features"] * x_train.shape[1])

            # Checking the PyFunc's predictions are the same as the original model's predictions.
            predict_df = pyfunc_model.predict(df)
            predict_df['original_labels'] = y_test
            print(predict_df)
        finally:
            shutil.rmtree(temp)
Ejemplo n.º 3
0
def test_iris_data_model_can_be_loaded_and_evaluated_as_pyfunc(
        saved_tf_iris_model, model_path):
    kiwi.tensorflow.save_model(
        tf_saved_model_dir=saved_tf_iris_model.path,
        tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags,
        tf_signature_def_key=saved_tf_iris_model.signature_def_key,
        path=model_path)

    pyfunc_wrapper = pyfunc.load_model(model_path)
    results_df = pyfunc_wrapper.predict(saved_tf_iris_model.inference_df)
    for key in results_df.keys():
        assert (np.array_equal(results_df[key],
                               saved_tf_iris_model.raw_df[key]))
Ejemplo n.º 4
0
def test_iris_data_model_can_be_loaded_and_evaluated_as_pyfunc(
        saved_tf_iris_model, model_path):
    kiwi.tensorflow.save_model(
        tf_saved_model_dir=saved_tf_iris_model.path,
        tf_meta_graph_tags=saved_tf_iris_model.meta_graph_tags,
        tf_signature_def_key=saved_tf_iris_model.signature_def_key,
        path=model_path)

    pyfunc_wrapper = pyfunc.load_model(model_path)
    results_df = pyfunc_wrapper.predict(saved_tf_iris_model.inference_df)
    pandas.testing.assert_frame_equal(results_df,
                                      saved_tf_iris_model.expected_results_df,
                                      check_less_precise=1)
Ejemplo n.º 5
0
def test_categorical_model_can_be_loaded_and_evaluated_as_pyfunc(
        saved_tf_categorical_model, model_path):
    kiwi.tensorflow.save_model(
        tf_saved_model_dir=saved_tf_categorical_model.path,
        tf_meta_graph_tags=saved_tf_categorical_model.meta_graph_tags,
        tf_signature_def_key=saved_tf_categorical_model.signature_def_key,
        path=model_path)

    pyfunc_wrapper = pyfunc.load_model(model_path)
    results_df = pyfunc_wrapper.predict(
        saved_tf_categorical_model.inference_df)
    # Precision is less accurate for the categorical model when we load back the saved model.
    pandas.testing.assert_frame_equal(
        results_df,
        saved_tf_categorical_model.expected_results_df,
        check_less_precise=3)
Ejemplo n.º 6
0
def _predict(model_uri, input_path, output_path, content_type, json_format):
    pyfunc_model = load_model(model_uri)
    if input_path is None:
        input_path = sys.stdin

    if content_type == "json":
        df = parse_json_input(input_path, orient=json_format)
    elif content_type == "csv":
        df = parse_csv_input(input_path)
    else:
        raise Exception("Unknown content type '{}'".format(content_type))

    if output_path is None:
        predictions_to_json(pyfunc_model.predict(df), sys.stdout)
    else:
        with open(output_path, "w") as fout:
            predictions_to_json(pyfunc_model.predict(df), fout)
Ejemplo n.º 7
0
def _serve(model_uri, port, host):
    pyfunc_model = load_model(model_uri)
    init(pyfunc_model).run(port=port, host=host)
Ejemplo n.º 8
0
import os
from kiwi.pyfunc import scoring_server
from kiwi.pyfunc import load_model


app = scoring_server.init(load_model(os.environ[scoring_server._SERVER_MODEL_PATH]))
Ejemplo n.º 9
0
def main(argv):
    with kiwi.start_run():
        args = parser.parse_args(argv[1:])

        # Fetch the data
        (train_x, train_y), (test_x, test_y) = load_data()

        # Feature columns describe how to use the input.
        my_feature_columns = []
        for key in train_x.keys():
            my_feature_columns.append(
                tf.feature_column.numeric_column(key=key))

        # Two hidden layers of 10 nodes each.
        hidden_units = [10, 10]

        # Build 2 hidden layer DNN with 10, 10 units respectively.
        classifier = tf.estimator.DNNClassifier(
            feature_columns=my_feature_columns,
            hidden_units=hidden_units,
            # The model must choose between 3 classes.
            n_classes=3)

        # Train the Model.
        classifier.train(
            input_fn=lambda: train_input_fn(train_x, train_y, args.batch_size),
            steps=args.train_steps)

        # Evaluate the model.
        eval_result = classifier.evaluate(
            input_fn=lambda: eval_input_fn(test_x, test_y, args.batch_size))

        print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

        # Generate predictions from the model
        expected = ['Setosa', 'Versicolor', 'Virginica']
        predict_x = {
            'SepalLength': [5.1, 5.9, 6.9],
            'SepalWidth': [3.3, 3.0, 3.1],
            'PetalLength': [1.7, 4.2, 5.4],
            'PetalWidth': [0.5, 1.5, 2.1],
        }

        predictions = classifier.predict(input_fn=lambda: eval_input_fn(
            predict_x, labels=None, batch_size=args.batch_size))

        old_predictions = []
        template = '\nPrediction is "{}" ({:.1f}%), expected "{}"'

        for pred_dict, expec in zip(predictions, expected):
            class_id = pred_dict['class_ids'][0]
            probability = pred_dict['probabilities'][class_id]

            print(template.format(SPECIES[class_id], 100 * probability, expec))

            old_predictions.append(SPECIES[class_id])

        # Creating output tf.Variables to specify the output of the saved model.
        feat_specifications = {
            'SepalLength': tf.Variable([],
                                       dtype=tf.float64,
                                       name="SepalLength"),
            'SepalWidth': tf.Variable([], dtype=tf.float64, name="SepalWidth"),
            'PetalLength': tf.Variable([],
                                       dtype=tf.float64,
                                       name="PetalLength"),
            'PetalWidth': tf.Variable([], dtype=tf.float64, name="PetalWidth")
        }

        receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(
            feat_specifications)
        temp = tempfile.mkdtemp()
        try:
            # The model is automatically logged when export_saved_model() is called.
            saved_estimator_path = classifier.export_saved_model(
                temp, receiver_fn).decode("utf-8")

            # Since the model was automatically logged as an artifact (more specifically
            # a MLflow Model), we don't need to use saved_estimator_path to load back the model.
            # MLflow takes care of it!
            pyfunc_model = pyfunc.load_model(kiwi.get_artifact_uri('model'))

            predict_data = [[5.1, 3.3, 1.7, 0.5], [5.9, 3.0, 4.2, 1.5],
                            [6.9, 3.1, 5.4, 2.1]]
            df = pd.DataFrame(data=predict_data,
                              columns=[
                                  "SepalLength", "SepalWidth", "PetalLength",
                                  "PetalWidth"
                              ])

            # Predicting on the loaded Python Function and a DataFrame containing the
            # original data we predicted on.
            predict_df = pyfunc_model.predict(df)

            # Checking the PyFunc's predictions are the same as the original model's predictions.
            template = '\nOriginal prediction is "{}", reloaded prediction is "{}"'
            for expec, pred in zip(old_predictions, predict_df['classes']):
                class_id = predict_df['class_ids'][predict_df.loc[
                    predict_df['classes'] == pred].index[0]]
                reloaded_label = SPECIES[class_id]
                print(template.format(expec, reloaded_label))
        finally:
            shutil.rmtree(temp)