Exemplo n.º 1
0
  def __init__(self, model_name, model_base_path, verbose=False):
    """
    Initialize the service.
        
    Args:
      model_name: The name of the model.
      model_base_path: The file path of the model.
    Return:
      None
    """

    super(PmmlInferenceService, self).__init__()

    self.model_name = model_name
    self.model_base_path = model_base_path
    self.model_version_list = [1]
    self.model_graph_signature = ""
    self.platform = "PMML"

    # Load model
    from openscoring import Openscoring
    openscoring_server_endpoint = "localhost:8080"
    kwargs = {"auth" : ("admin", "adminadmin")}
    self.openscoring = Openscoring("http://{}/openscoring".format(openscoring_server_endpoint))
    self.openscoring.deployFile(self.model_name, self.model_base_path, **kwargs)

    self.model_graph_signature = "No signature for PMML models"
    self.verbose = verbose
Exemplo n.º 2
0
 def testMergeDictOverride(self):
     self.assertEqual({"A": {
         "one": 1
     }}, Openscoring._merge({"A": {
         "one": 1
     }}))
     self.assertEqual({"A": {
         "one": "1"
     }}, Openscoring._merge({"A": {
         "one": 1
     }}, A={"one": "1"}))
Exemplo n.º 3
0
class FNDOpenScoring:
    def __init__(self):
        self.scoring = Openscoring("http://localhost:8080/openscoring")
        self.arguments = {"sourceTaggedAsFakeCount": 2.0, "reporterScore": 3.0}

    def _deploy(self):
        self.scoring.deployFile("FakeNewsDetector", "FakeNewsAIModel.pmml",
                                **{"auth": ("admin", "adminadmin")})

    def _setArgs(self, jsonString):
        jsonObj = json.loads(jsonString)
        self.arguments["similarWebAvgScore"] = jsonObj['average']
        self.arguments["similarWebStdScore"] = jsonObj['variance']
        self.arguments["fakeFactSitesCount"] = jsonObj['blacklisted']

    def __call__(self, jsonString):
        self._deploy()
        self._setArgs(jsonString)
        result = self.scoring.evaluate("FakeNewsDetector", self.arguments)
        return result
Exemplo n.º 4
0
 def testMergeDict(self):
     self.assertEqual({"A": {
         "one": 1,
         "two": 2,
         "three": 3
     }}, Openscoring._merge({"A": {
         "one": 1
     }}, A={
         "two": 2,
         "three": 3
     }))
Exemplo n.º 5
0
def get_predictions(pw, pl, sw, sl):
    """
    Get predictions by source
    :param pw:
    :param pl:
    :param sw:
    :param sl:
    :return:
    """
    arguments = {
        "sepal_length": float(sl),
        "sepal_width": float(sw),
        "petal_length": float(pl),
        "petal_width": float(pw)
    }
    print(arguments)
    from openscoring import Openscoring
    os = Openscoring("http://localhost:8080/openscoring")
    result = os.evaluate("Iris", arguments)

    print(result)
    return html.Div([
        html.H3("The class detected was  : {}".format(result['species'])),
    ])
Exemplo n.º 6
0
	def testMergeDict(self):
		self.assertEqual({"A" : {"one" : 1, "two" : 2, "three" : 3}}, Openscoring._merge({"A" : {"one" : 1}}, A = {"two" : 2, "three" : 3}))
Exemplo n.º 7
0
	def testMergeValueConflict(self):
		with self.assertRaises(Exception):
			Openscoring._merge({"A" : 1}, A = "1")
Exemplo n.º 8
0
 def testMissingUserDict(self):
     self.assertEqual({}, Openscoring._merge(None))
     self.assertEqual({"A": 1}, Openscoring._merge(None, A=1))
     self.assertEqual({"A": {
         "one": 1
     }}, Openscoring._merge(None, A={"one": 1}))
#!/usr/bin/env python

from openscoring import Openscoring

os = Openscoring("http://localhost:8080/openscoring")

kwargs = {"auth": ("admin", "adminadmin")}

model_file_path = "../../models/pmml_iris/DecisionTreeIris.pmml"
model_name = "PmmlModel"
os.deployFile(model_name, model_file_path, **kwargs)

arguments = {
    "Sepal_Length": 5.1,
    "Sepal_Width": 3.5,
    "Petal_Length": 1.4,
    "Petal_Width": 0.2
}

result = os.evaluate(model_name, arguments)
print(result)
Exemplo n.º 10
0

app.config['suppress_callback_exceptions'] = True

if __name__ == '__main__':
    import pandas
    from sklearn.tree import DecisionTreeClassifier
    from sklearn2pmml.pipeline import PMMLPipeline
    from sklearn2pmml import sklearn2pmml

    iris_df = pandas.read_csv("iris.csv")
    #
    pipeline = PMMLPipeline([("classifier", DecisionTreeClassifier())])
    pipeline.fit(iris_df[iris_df.columns.difference(["species"])],
                 iris_df["species"])
    #
    sklearn2pmml(pipeline, "DecisionTreeIris.pmml", with_repr=True)

    ## Actual openscoring code
    from openscoring import Openscoring
    import subprocess

    p = subprocess.Popen('java -jar openscoring-server-executable-1.4.3.jar',
                         shell=True)

    os = Openscoring("http://localhost:8080/openscoring")
    #
    kwargs = {"auth": ("admin", "adminadmin")}
    os.deployFile("Iris", "DecisionTreeIris.pmml", **kwargs)

    app.run_server(debug=True, port=9000)
Exemplo n.º 11
0
                          ("Income", ContinuousDomain()),
                          (["Hours", "Income"],
                           Alias(ExpressionTransformer("X[1] / (X[0] * 52)"),
                                 "Hourly_Income"))])
classifier = H2ORandomForestEstimator(ntrees=17)

predict_proba_transformer = Pipeline([
    ("expression", ExpressionTransformer("X[1]")),
    ("cut",
     Alias(CutTransformer(bins=[0.0, 0.75, 0.90, 1.0],
                          labels=["no", "maybe", "yes"]),
           "Decision",
           prefit=True))
])

pipeline = PMMLPipeline([("local_mapper", mapper),
                         ("uploader", H2OFrameCreator()),
                         ("remote_classifier", classifier)],
                        predict_proba_transformer=predict_proba_transformer)
pipeline.fit(audit_X, H2OFrame(audit_y.to_frame(),
                               column_types=["categorical"]))

pipeline.verify(audit_X.sample(100))

sklearn2pmml(pipeline, "pmml/RandomForestAudit.pmml")

if "--deploy" in sys.argv:
    from openscoring import Openscoring

    os = Openscoring("http://localhost:8080/openscoring")
    os.deployFile("RandomForestAudit", "pmml/RandomForestAudit.pmml")
Exemplo n.º 12
0
 def testMergeValueConflict(self):
     with self.assertRaises(Exception):
         Openscoring._merge({"A": 1}, A="1")
Exemplo n.º 13
0
 def testMergeValueEqual(self):
     self.assertEqual({"A": 1}, Openscoring._merge({"A": 1}, A=1))
Exemplo n.º 14
0
 def testMergeValue(self):
     self.assertEqual({
         "A": 1,
         "B": 2,
         "C": 3
     }, Openscoring._merge({"A": 1}, B=2, C=3))
Exemplo n.º 15
0
	def testMergeDictOverride(self):
		self.assertEqual({"A" : {"one" : 1}}, Openscoring._merge({"A" : {"one" : 1}}))
		self.assertEqual({"A" : {"one" : "1"}}, Openscoring._merge({"A" : {"one" : 1}}, A = {"one" : "1"}))
Exemplo n.º 16
0
	def testMissingUserDict(self):
		self.assertEqual({}, Openscoring._merge(None))
		self.assertEqual({"A" : 1}, Openscoring._merge(None, A = 1))
		self.assertEqual({"A" : {"one" : 1}}, Openscoring._merge(None, A = {"one" : 1}))
Exemplo n.º 17
0
        LabelBinarizer()
    ]), ("Hours", ContinuousDomain()), ("Income", ContinuousDomain()),
    (["Hours", "Income"],
     Alias(ExpressionTransformer("X[1] / (X[0] * 52)"), "Hourly_Income"))
])
interaction_mapper = DataFrameMapper([
    ("Gender", [CategoricalDomain(), LabelBinarizer()]),
    ("Marital", [CategoricalDomain(), LabelBinarizer()])
])
classifier = XGBClassifier()

pipeline = PMMLPipeline([
    ("mapper",
     FeatureUnion([("scalar_mapper", scalar_mapper),
                   ("interaction",
                    Pipeline([("interaction_mapper", interaction_mapper),
                              ("polynomial", PolynomialFeatures())]))])),
    ("classifier", classifier)
])
pipeline.fit(audit_X, audit_y)

pipeline.configure(compact=True)
pipeline.verify(audit_X.sample(100), zeroThreshold=1e-6, precision=1e-6)

sklearn2pmml(pipeline, "pmml/XGBoostAudit.pmml")

if "--deploy" in sys.argv:
    from openscoring import Openscoring

    os = Openscoring("http://localhost:8080/openscoring")
    os.deployFile("XGBoostAudit", "pmml/XGBoostAudit.pmml")
Exemplo n.º 18
0
from sklearn2pmml import sklearn2pmml
from sklearn2pmml.pipeline import PMMLPipeline
from sklearn2pmml.ruleset import RuleSetClassifier

import pandas
import sys

iris_df = pandas.read_csv("csv/Iris.csv")
#print(iris_df.head(5))

iris_X = iris_df[iris_df.columns.difference(["Species"])]
iris_y = iris_df["Species"]

classifier = RuleSetClassifier([
	("X['Petal_Length'] < 2.45", "setosa"),
	("X['Petal_Width'] < 1.75", "versicolor"),
], default_score = "virginica")

pipeline = PMMLPipeline([
	("classifier", classifier)
])
pipeline.fit(iris_X, iris_y)

sklearn2pmml(pipeline, "pmml/RuleSetIris.pmml")

if "--deploy" in sys.argv:
	from openscoring import Openscoring

	os = Openscoring("http://localhost:8080/openscoring")
	os.deployFile("RuleSetIris", "pmml/RuleSetIris.pmml")
Exemplo n.º 19
0
    def testReadme(self):
        openscoring = Openscoring()

        pmml = os.path.join(os.path.dirname(__file__), "resources",
                            "DecisionTreeIris.pmml")

        with open(pmml, "rb") as instream:
            pmmlBytes = instream.read()
        self.assertTrue(isinstance(pmmlBytes, bytes))
        self.assertEqual(4306, len(pmmlBytes))
        modelResponse = openscoring.deploy("Iris", pmmlBytes)
        self.assertEqual("Iris", modelResponse.id)

        modelResponse = openscoring.deployFile("Iris", pmml)
        self.assertEqual("Iris", modelResponse.id)

        arguments = {
            "Sepal_Length": 5.1,
            "Sepal_Width": 3.5,
            "Petal_Length": 1.4,
            "Petal_Width": 0.2
        }
        result = openscoring.evaluate("Iris", arguments)
        self.assertEqual(
            {
                "Species": "setosa",
                "Probability_setosa": 1.0,
                "Probability_versicolor": 0.0,
                "Probability_virginica": 0.0,
                "Node_Id": "2"
            }, result)
        evaluationRequest = EvaluationRequest("record-001", arguments)
        evaluationResponse = openscoring.evaluate("Iris", evaluationRequest)
        self.assertEqual(evaluationRequest.id, evaluationResponse.id)
        self.assertEqual("setosa", evaluationResponse.result["Species"])

        inCsv = os.path.join(os.path.dirname(__file__), "resources",
                             "input.csv")
        outCsv = os.path.join(tempfile.gettempdir(), "output.csv")

        df = pandas.read_csv(inCsv, sep=",")

        result = openscoring.evaluateCsv("Iris", df)
        self.assertEqual(df["Id"].tolist(), result["Id"].tolist())
        self.assertEqual(["setosa", "versicolor", "virginica"],
                         result["Species"].tolist())

        self.assertFalse(os.path.isfile(outCsv))
        openscoring.evaluateCsvFile("Iris", inCsv, outCsv)
        self.assertTrue(
            os.path.isfile(outCsv) and os.path.getsize(outCsv) > 10)

        os.remove(outCsv)

        openscoring.undeploy("Iris")

        with self.assertRaises(Exception) as context:
            openscoring.evaluate("Iris", arguments)
        self.assertEqual("Model \"Iris\" not found", str(context.exception))
Exemplo n.º 20
0
class PmmlInferenceService(AbstractInferenceService):
  """
  The service to load PMML model and make inference.
  """

  def __init__(self, model_name, model_base_path, verbose=False):
    """
    Initialize the service.
        
    Args:
      model_name: The name of the model.
      model_base_path: The file path of the model.
    Return:
      None
    """

    super(PmmlInferenceService, self).__init__()

    self.model_name = model_name
    self.model_base_path = model_base_path
    self.model_version_list = [1]
    self.model_graph_signature = ""
    self.platform = "PMML"

    # Load model
    from openscoring import Openscoring
    openscoring_server_endpoint = "localhost:8080"
    kwargs = {"auth" : ("admin", "adminadmin")}
    self.openscoring = Openscoring("http://{}/openscoring".format(openscoring_server_endpoint))
    self.openscoring.deployFile(self.model_name, self.model_base_path, **kwargs)

    self.model_graph_signature = "No signature for PMML models"
    self.verbose = verbose

  def inference(self, json_data):
    """
    Make inference with the current Session object and JSON request data.
        
    Args:
      json_data: The JSON serialized object with key and array data.
                 Example is {"model_version": 1, "data": {"keys": [[1.0], [2.0]], "features": [[10, 10, 10, 8, 6, 1, 8, 9, 1], [6, 2, 1, 1, 1, 1, 7, 1, 1]]}}.
    Return:
      The dictionary with key and array data.
      Example is {"keys": [[11], [2]], "softmax": [[0.61554497, 0.38445505], [0.61554497, 0.38445505]], "prediction": [0, 0]}.
    """

    # 1. Build inference data
    # Example: arguments = {"Sepal_Length" : 5.1, "Sepal_Width" : 3.5, "Petal_Length" : 1.4, "Petal_Width" : 0.2}
    request_json_data =  json_data["data"]

    # 2. Do inference
    if self.verbose:
      start_time = time.time()

    # Example: {'Probability_setosa': 1.0, 'Probability_versicolor': 0.0, 'Node_Id': '2', 'Species': 'setosa', 'Probability_virginica': 0.0}
    predict_result = self.openscoring.evaluate(self.model_name, request_json_data)

    if self.verbose:
      logging.debug("Inference time: {} s".format(time.time() - start_time))

    # 3. Build return data
    result = {
        "result": predict_result,
    }

    if self.verbose:
      logging.debug("Inference result: {}".format(result))

    return result
Exemplo n.º 21
0
	def testMergeValue(self):
		self.assertEqual({"A" : 1, "B" : 2, "C" : 3}, Openscoring._merge({"A" : 1}, B = 2, C = 3))
Exemplo n.º 22
0
	def testReadme(self):
		openscoring = Openscoring(base_url = "http://localhost:8080/openscoring", token = os.getenv("OPENSCORING_TOKEN", None))

		pmml = os.path.join(os.path.dirname(__file__), "resources", "DecisionTreeIris.pmml")

		with open(pmml, "rb") as instream:
			pmmlBytes = instream.read()
		self.assertTrue(isinstance(pmmlBytes, bytes))
		self.assertEqual(2919, len(pmmlBytes))
		modelResponse = openscoring.deploy("Iris", pmmlBytes)
		self.assertEqual("Iris", modelResponse.id)

		modelResponse = openscoring.deployFile("Iris", pmml)
		self.assertEqual("Iris", modelResponse.id)

		arguments = {
			"Sepal.Length" : 5.1,
			"Sepal.Width" : 3.5,
			"Petal.Length" : 1.4,
			"Petal.Width" : 0.2
		}
		results = openscoring.evaluate("Iris", arguments)
		self.assertEqual({"Species" : "setosa", "probability(setosa)" : 1.0, "probability(versicolor)" : 0.0, "probability(virginica)" : 0.0}, results)
		evaluationRequest = EvaluationRequest("record-001", arguments)
		evaluationResponse = openscoring.evaluate("Iris", evaluationRequest)
		self.assertEqual(evaluationRequest.id, evaluationResponse.id)
		self.assertEqual("setosa", evaluationResponse.results["Species"])

		batchArguments = [
			{
				"Petal.Length" : 1.4,
				"Petal.Width" : 0.2
			},
			{
				"Petal.Length" : 4.7,
				"Petal.Width" : 1.4
			},
			{
				"Petal.Length" : 3.6,
				"Petal.Width" : 2.5
			}
		]
		batchResults = openscoring.evaluateBatch("Iris", batchArguments)
		self.assertEquals(3, len(batchResults))
		self.assertEquals({"Species" : "setosa", "probability(setosa)" : 1.0, "probability(versicolor)" : 0.0, "probability(virginica)" : 0.0}, batchResults[0])
		self.assertEquals({"Species" : "versicolor", "probability(setosa)" : 0.0, "probability(versicolor)" : (49.0 / 54.0), "probability(virginica)" : (5.0 / 54.0)}, batchResults[1])
		self.assertEquals({"Species" : "virginica", "probability(setosa)" : 0.0, "probability(versicolor)" : (1.0 / 46.0), "probability(virginica)" : (45.0 / 46.0)}, batchResults[2])
		evaluationRequests = [EvaluationRequest(None, arguments) for arguments in batchArguments]
		batchEvaluationRequest = BatchEvaluationRequest("batch-A", evaluationRequests)
		batchEvaluationResponse = openscoring.evaluateBatch("Iris", batchEvaluationRequest)
		self.assertEqual(batchEvaluationRequest.id, batchEvaluationResponse.id)
		evaluationResponses = batchEvaluationResponse.responses
		self.assertEqual(3, len(evaluationResponses))
		self.assertEqual("setosa", evaluationResponses[0].results["Species"])
		self.assertEqual("versicolor", evaluationResponses[1].results["Species"])
		self.assertEqual("virginica", evaluationResponses[2].results["Species"])

		inCsv = os.path.join(os.path.dirname(__file__), "resources", "input.csv")
		outCsv = os.path.join(tempfile.gettempdir(), "output.csv")

		dfArguments = pandas.read_csv(inCsv, sep = ",")
		dfResults = openscoring.evaluateCsv("Iris", dfArguments)
		self.assertEqual((3, 1 + 4), dfResults.shape)
		self.assertEqual(dfArguments["Id"].tolist(), dfResults["Id"].tolist())
		self.assertEqual(["setosa", "versicolor", "virginica"], dfResults["Species"].tolist())

		self.assertFalse(os.path.isfile(outCsv))
		openscoring.evaluateCsvFile("Iris", inCsv, outCsv)
		self.assertTrue(os.path.isfile(outCsv) and os.path.getsize(outCsv) > 10)

		os.remove(outCsv)

		openscoring.undeploy("Iris")

		with self.assertRaises(Exception) as context:
			openscoring.evaluate("Iris", arguments)
		self.assertEqual("Not Found", str(context.exception))
Exemplo n.º 23
0
	def testMergeValueEqual(self):
		self.assertEqual({"A" : 1}, Openscoring._merge({"A" : 1}, A = 1))
Exemplo n.º 24
0
 def __init__(self):
     self.scoring = Openscoring("http://localhost:8080/openscoring")
     self.arguments = {"sourceTaggedAsFakeCount": 2.0, "reporterScore": 3.0}
Exemplo n.º 25
0
                    level=logging.DEBUG,
                    format='%(asctime)s %(message)s')
logging.getLogger('requests').setLevel(logging.NOTSET)

client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message

## Callback specification
client.message_callback_add("pmml/allongement", on_message_allongement)
client.message_callback_add("pmml/durete", on_message_durete)
client.message_callback_add("pmml/rupture", on_message_rupture)
client.message_callback_add("pmml/100", on_message_100)

## Openscoring definition and model deployment
os = Openscoring("http://localhost:8080/openscoring")

os.deployFile("rupture", "model/rank_contrainterupture.pmml")
os.deployFile("100", "model/rank_contrainte100.pmml")
os.deployFile("allongement", "model/rank_allongement.pmml")
os.deployFile("durete", "model/rank_durete.pmml")

## Global variable definition
urllib3.contrib.pyopenssl.inject_into_urllib3()
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())

reset_data()

batch_consumed = ""

server_rheo = 'tcp:SLVDSQL02'