コード例 #1
0
ファイル: scoring.py プロジェクト: dartov/AoaPmmlDemo-1
def evaluate(data_conf, model_conf, **kwargs):
    """Python evaluate method called by AOA framework

    Parameters:
    data_conf (dict): The dataset metadata
    model_conf (dict): The model configuration to use

    Returns:
    None:No return

    """

    predict_df = pd.read_csv(data_conf['location'])
    _, test = train_test_split(predict_df, test_size=0.5, random_state=42)
    X_predict = test.drop("species", 1)
    y_test = test['species']

    jnius_configure_classpath()
    backend = PyJNIusBackend()

    evaluator = make_evaluator(backend, "models/model.pmml") \
    .verify()

    y_predict = evaluator.evaluateAll(X_predict)

    scores = {}
    scores['accuracy'] = metrics.accuracy_score(y_test, y_predict['y'])
    print("model accuracy is ", scores['accuracy'])

    # dump results as json file evaluation.json to models/ folder
    with open("models/evaluation.json", "w+") as f:
        json.dump(scores, f)
    print("Evaluation complete...")
コード例 #2
0
    def save_and_load_model(self, booster, params):
        save_model_to_local_file(booster, params, self.filename())
        self.assertTrue(os.path.exists(self.filename()))
        self.assertTrue(os.path.exists(self.pmml_filename()))

        loaded_booster = xgboost.Booster({"n_thread": 4})
        loaded_booster.load_model(self.filename())

        pmml_evaluator = make_evaluator(PMML_BACKEND,
                                        self.pmml_filename()).verify()
        return loaded_booster, pmml_evaluator
コード例 #3
0
 def load(self) -> bool:
     model_path = kserve.Storage.download(self.model_dir)
     paths = [os.path.join(model_path, MODEL_BASENAME + model_extension)
              for model_extension in MODEL_EXTENSIONS]
     for path in paths:
         if os.path.exists(path):
             self._gateway = launch_gateway()
             self._backend = Py4JBackend(self._gateway)
             self.evaluator = make_evaluator(self._backend, path).verify()
             self.input_fields = [inputField.getName() for inputField in self.evaluator.getInputFields()]
             self.ready = True
             break
     return self.ready
コード例 #4
0
ファイル: pmml.py プロジェクト: fact-project/aict-tools
    def __init__(self, path):
        if not HAS_PMML:
            raise ImportError("You need `jpmml_evaluator` to load pmml models."
                              "Use pip install -U aict-tools[pmml] or [all]")
        self.backend = PyJNIusBackend()
        self.evaluator = jpmml_evaluator.make_evaluator(self.backend,
                                                        path).verify()
        self.feature_names = sorted(
            [i.getName() for i in self.evaluator.getInputFields()])

        names = [o.getName() for o in self.evaluator.getTargetFields()]
        if len(names) > 1:
            raise ValueError("Model has more than one output")
        self.target_name = names[0]
コード例 #5
0
    def workflow(self, backend):
        evaluator = make_evaluator(backend, _resource("DecisionTreeIris.pmml"), reporting = True) \
         .verify()

        self.assertEqual(2, len(evaluator.getInputFields()))
        self.assertEqual(1, len(evaluator.getTargetFields()))
        self.assertEqual(4, len(evaluator.getOutputFields()))

        targetField = evaluator.getTargetFields()[0]

        self.assertEqual("Species", targetField.getName())
        self.assertEqual("string", targetField.getDataType())
        self.assertEqual("categorical", targetField.getOpType())

        arguments = {
            "Sepal.Length": 5.1,
            "Sepal.Width": 3.5,
            "Petal.Length": 1.4,
            "Petal.Width": 0.2
        }
        print(arguments)

        results = evaluator.evaluate(arguments)
        print(results)

        self.assertEqual(5, len(results))

        self.assertEqual("setosa", results["Species"])
        self.assertEqual(1.0, results["probability(setosa)"])
        self.assertEqual(0.0, results["probability(versicolor)"])
        self.assertEqual(0.0, results["probability(virginica)"])
        # See https://github.com/kivy/pyjnius/issues/408
        #self.assertTrue(results["report(probability(versicolor))"].startswith("<math "))
        self.assertTrue("report(probability(versicolor))" in results)

        arguments_df = pandas.read_csv(_resource("Iris.csv"), sep=",")
        print(arguments_df.head(5))

        results_df = evaluator.evaluateAll(arguments_df)
        print(results_df.head(5))

        self.assertEqual((150, 5), results_df.shape)
コード例 #6
0
    def workflow(self, backend, lax):
        pyArguments = {
            "missing": None,
            "str": str("one"),
            "int": int(1),
            "float": float(1.0),
            "bool": bool(True)
        }
        pyResults = _argumentsToResults(backend, pyArguments)

        self.assertDictEqual(pyArguments, pyResults)

        numpyArguments = {
            "int8": numpy.int8(1),
            "int16": numpy.int16(1),
            "int32": numpy.int32(1),
            "float32": numpy.float32(1.0),
            "float64": numpy.float64(1.0)
        }
        numpyResults = _argumentsToResults(backend, numpyArguments)

        self.assertDictEqual(
            {
                "int8": 1,
                "int16": 1,
                "int32": 1,
                "float32": float(1.0),
                "float64": float(1.0)
            }, numpyResults)

        evaluator = make_evaluator(backend, _resource("DecisionTreeIris.pmml"), lax = lax, reporting = True) \
         .verify()

        self.assertEqual(2, len(evaluator.getInputFields()))
        self.assertEqual(1, len(evaluator.getTargetFields()))
        self.assertEqual(4, len(evaluator.getOutputFields()))

        targetField = evaluator.getTargetFields()[0]

        self.assertEqual("Species", targetField.getName())
        self.assertEqual("string", targetField.getDataType())
        self.assertEqual("categorical", targetField.getOpType())

        arguments = {
            "Sepal.Length": "error",
            "Sepal.Width": "error",
            "Petal.Length": "error",
            "Petal.Width": "error"
        }
        print(arguments)

        try:
            results = evaluator.evaluate(arguments)

            self.fail()
        except JavaError as je:
            self.assertIsNotNone(je.className)
            self.assertIsNotNone(je.message)
            self.assertTrue(len(je.stackTraceElements) > 0)
            self.assertFalse(je.isInstance("java.lang.String"))
            self.assertTrue(
                je.isInstance("org.jpmml.evaluator.ValueCheckException"))
            self.assertTrue(
                je.isInstance("org.jpmml.evaluator.EvaluationException"))
            self.assertFalse(
                je.isInstance("org.jpmml.model.InvalidMarkupException"))
            self.assertFalse(
                je.isInstance(
                    "org.jpmml.evaluator.UnsupportedMarkupException"))
            self.assertTrue(je.isInstance("org.jpmml.model.PMMLException"))
            self.assertTrue(je.isInstance("java.lang.RuntimeException"))

        arguments = {
            "Sepal.Length": 5.1,
            "Sepal.Width": 3.5,
            "Petal.Length": 1.4,
            "Petal.Width": 0.2
        }
        print(arguments)

        results = evaluator.evaluate(arguments)
        print(results)

        self.assertEqual(5, len(results))

        self.assertEqual("setosa", results["Species"])
        self.assertEqual(1.0, results["probability(setosa)"])
        self.assertEqual(0.0, results["probability(versicolor)"])
        self.assertEqual(0.0, results["probability(virginica)"])
        self.assertTrue(
            results["report(probability(versicolor))"].startswith("<math "))

        arguments_df = pandas.read_csv(_resource("Iris.csv"), sep=",")
        print(arguments_df.head(5))

        results_df = evaluator.evaluateAll(arguments_df)
        print(results_df.head(5))

        self.assertEqual((150, 5), results_df.shape)
コード例 #7
0
	def workflow(self, backend):
		pyDict = {
			"str" : str("one"),
			"int" : int(1),
			"float" : float(1.0),
			"bool" : bool(True)
		}

		javaMap = backend.dict2map(pyDict)
		pyJavaDict = backend.map2dict(javaMap)

		self.assertDictEqual(pyDict, pyJavaDict)

		numpyDict = {
			"int8" : numpy.int8(1),
			"int16" : numpy.int16(1),
			"int32" : numpy.int32(1),
			"float32" : numpy.float32(1.0),
			"float64" : numpy.float64(1.0)
		}
		
		javaMap = backend.dict2map(numpyDict)

		evaluator = make_evaluator(backend, _resource("DecisionTreeIris.pmml"), reporting = True) \
			.verify()

		self.assertEqual(2, len(evaluator.getInputFields()))
		self.assertEqual(1, len(evaluator.getTargetFields()))
		self.assertEqual(4, len(evaluator.getOutputFields()))

		targetField = evaluator.getTargetFields()[0]

		self.assertEqual("Species", targetField.getName())
		self.assertEqual("string", targetField.getDataType())
		self.assertEqual("categorical", targetField.getOpType())

		arguments = {
			"Sepal.Length" : 5.1,
			"Sepal.Width" : 3.5,
			"Petal.Length" : 1.4,
			"Petal.Width" : 0.2
		}
		print(arguments)

		results = evaluator.evaluate(arguments)
		print(results)

		self.assertEqual(5, len(results))

		self.assertEqual("setosa", results["Species"])
		self.assertEqual(1.0, results["probability(setosa)"])
		self.assertEqual(0.0, results["probability(versicolor)"])
		self.assertEqual(0.0, results["probability(virginica)"])
		self.assertTrue(results["report(probability(versicolor))"].startswith("<math "))

		arguments_df = pandas.read_csv(_resource("Iris.csv"), sep = ",")
		print(arguments_df.head(5))

		results_df = evaluator.evaluateAll(arguments_df)
		print(results_df.head(5))

		self.assertEqual((150, 5), results_df.shape)