コード例 #1
0
ファイル: scoring.py プロジェクト: dartov/AoaPmmlDemo-1
def evaluate(data_conf, model_conf, **kwargs):
    """Python evaluate method called by AOA framework

    Parameters:
    data_conf (dict): The dataset metadata
    model_conf (dict): The model configuration to use

    Returns:
    None:No return

    """

    predict_df = pd.read_csv(data_conf['location'])
    _, test = train_test_split(predict_df, test_size=0.5, random_state=42)
    X_predict = test.drop("species", 1)
    y_test = test['species']

    jnius_configure_classpath()
    backend = PyJNIusBackend()

    evaluator = make_evaluator(backend, "models/model.pmml") \
    .verify()

    y_predict = evaluator.evaluateAll(X_predict)

    scores = {}
    scores['accuracy'] = metrics.accuracy_score(y_test, y_predict['y'])
    print("model accuracy is ", scores['accuracy'])

    # dump results as json file evaluation.json to models/ folder
    with open("models/evaluation.json", "w+") as f:
        json.dump(scores, f)
    print("Evaluation complete...")
コード例 #2
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import unittest

import numpy as np
import pandas as pd
import xgboost
from jpmml_evaluator import make_evaluator
from jpmml_evaluator.pyjnius import PyJNIusBackend, jnius_configure_classpath
from sqlflow_submitter.xgboost.train import save_model_to_local_file

# Configure JVM
jnius_configure_classpath()

# Construct a PyJNIus backend
PMML_BACKEND = PyJNIusBackend()


# TODO(sneaxiy): Add XGBRanker unittest. XGBRanker requires group information
# but we do not support group now.
class TestXGBoostModelSavingBase(unittest.TestCase):
    def tearDown(self):
        filename = self.filename()
        if filename is not None and os.path.exists(filename):
            os.remove(filename)
            os.remove(self.pmml_filename())

    def filename(self):