예제 #1
0
  def __init__(self, model_name, model_base_path, verbose=False):
    """
    Initialize the service.
        
    Args:
      model_name: The name of the model.
      model_base_path: The file path of the model.
    Return:
      None
    """

    super(PmmlInferenceService, self).__init__()

    self.model_name = model_name
    self.model_base_path = model_base_path
    self.model_version_list = [1]
    self.model_graph_signature = ""
    self.platform = "PMML"

    # Load model
    from openscoring import Openscoring
    openscoring_server_endpoint = "localhost:8080"
    kwargs = {"auth" : ("admin", "adminadmin")}
    self.openscoring = Openscoring("http://{}/openscoring".format(openscoring_server_endpoint))
    self.openscoring.deployFile(self.model_name, self.model_base_path, **kwargs)

    self.model_graph_signature = "No signature for PMML models"
    self.verbose = verbose
예제 #2
0
    def testReadme(self):
        openscoring = Openscoring()

        pmml = os.path.join(os.path.dirname(__file__), "resources",
                            "DecisionTreeIris.pmml")

        with open(pmml, "rb") as instream:
            pmmlBytes = instream.read()
        self.assertTrue(isinstance(pmmlBytes, bytes))
        self.assertEqual(4306, len(pmmlBytes))
        modelResponse = openscoring.deploy("Iris", pmmlBytes)
        self.assertEqual("Iris", modelResponse.id)

        modelResponse = openscoring.deployFile("Iris", pmml)
        self.assertEqual("Iris", modelResponse.id)

        arguments = {
            "Sepal_Length": 5.1,
            "Sepal_Width": 3.5,
            "Petal_Length": 1.4,
            "Petal_Width": 0.2
        }
        result = openscoring.evaluate("Iris", arguments)
        self.assertEqual(
            {
                "Species": "setosa",
                "Probability_setosa": 1.0,
                "Probability_versicolor": 0.0,
                "Probability_virginica": 0.0,
                "Node_Id": "2"
            }, result)
        evaluationRequest = EvaluationRequest("record-001", arguments)
        evaluationResponse = openscoring.evaluate("Iris", evaluationRequest)
        self.assertEqual(evaluationRequest.id, evaluationResponse.id)
        self.assertEqual("setosa", evaluationResponse.result["Species"])

        inCsv = os.path.join(os.path.dirname(__file__), "resources",
                             "input.csv")
        outCsv = os.path.join(tempfile.gettempdir(), "output.csv")

        df = pandas.read_csv(inCsv, sep=",")

        result = openscoring.evaluateCsv("Iris", df)
        self.assertEqual(df["Id"].tolist(), result["Id"].tolist())
        self.assertEqual(["setosa", "versicolor", "virginica"],
                         result["Species"].tolist())

        self.assertFalse(os.path.isfile(outCsv))
        openscoring.evaluateCsvFile("Iris", inCsv, outCsv)
        self.assertTrue(
            os.path.isfile(outCsv) and os.path.getsize(outCsv) > 10)

        os.remove(outCsv)

        openscoring.undeploy("Iris")

        with self.assertRaises(Exception) as context:
            openscoring.evaluate("Iris", arguments)
        self.assertEqual("Model \"Iris\" not found", str(context.exception))
예제 #3
0
파일: app.py 프로젝트: nmud19/hosting_test
def get_predictions(pw, pl, sw, sl):
    """
    Get predictions by source
    :param pw:
    :param pl:
    :param sw:
    :param sl:
    :return:
    """
    arguments = {
        "sepal_length": float(sl),
        "sepal_width": float(sw),
        "petal_length": float(pl),
        "petal_width": float(pw)
    }
    print(arguments)
    from openscoring import Openscoring
    os = Openscoring("http://localhost:8080/openscoring")
    result = os.evaluate("Iris", arguments)

    print(result)
    return html.Div([
        html.H3("The class detected was  : {}".format(result['species'])),
    ])
예제 #4
0
 def __init__(self):
     self.scoring = Openscoring("http://localhost:8080/openscoring")
     self.arguments = {"sourceTaggedAsFakeCount": 2.0, "reporterScore": 3.0}
예제 #5
0
	def testReadme(self):
		openscoring = Openscoring(base_url = "http://localhost:8080/openscoring", token = os.getenv("OPENSCORING_TOKEN", None))

		pmml = os.path.join(os.path.dirname(__file__), "resources", "DecisionTreeIris.pmml")

		with open(pmml, "rb") as instream:
			pmmlBytes = instream.read()
		self.assertTrue(isinstance(pmmlBytes, bytes))
		self.assertEqual(2919, len(pmmlBytes))
		modelResponse = openscoring.deploy("Iris", pmmlBytes)
		self.assertEqual("Iris", modelResponse.id)

		modelResponse = openscoring.deployFile("Iris", pmml)
		self.assertEqual("Iris", modelResponse.id)

		arguments = {
			"Sepal.Length" : 5.1,
			"Sepal.Width" : 3.5,
			"Petal.Length" : 1.4,
			"Petal.Width" : 0.2
		}
		results = openscoring.evaluate("Iris", arguments)
		self.assertEqual({"Species" : "setosa", "probability(setosa)" : 1.0, "probability(versicolor)" : 0.0, "probability(virginica)" : 0.0}, results)
		evaluationRequest = EvaluationRequest("record-001", arguments)
		evaluationResponse = openscoring.evaluate("Iris", evaluationRequest)
		self.assertEqual(evaluationRequest.id, evaluationResponse.id)
		self.assertEqual("setosa", evaluationResponse.results["Species"])

		batchArguments = [
			{
				"Petal.Length" : 1.4,
				"Petal.Width" : 0.2
			},
			{
				"Petal.Length" : 4.7,
				"Petal.Width" : 1.4
			},
			{
				"Petal.Length" : 3.6,
				"Petal.Width" : 2.5
			}
		]
		batchResults = openscoring.evaluateBatch("Iris", batchArguments)
		self.assertEquals(3, len(batchResults))
		self.assertEquals({"Species" : "setosa", "probability(setosa)" : 1.0, "probability(versicolor)" : 0.0, "probability(virginica)" : 0.0}, batchResults[0])
		self.assertEquals({"Species" : "versicolor", "probability(setosa)" : 0.0, "probability(versicolor)" : (49.0 / 54.0), "probability(virginica)" : (5.0 / 54.0)}, batchResults[1])
		self.assertEquals({"Species" : "virginica", "probability(setosa)" : 0.0, "probability(versicolor)" : (1.0 / 46.0), "probability(virginica)" : (45.0 / 46.0)}, batchResults[2])
		evaluationRequests = [EvaluationRequest(None, arguments) for arguments in batchArguments]
		batchEvaluationRequest = BatchEvaluationRequest("batch-A", evaluationRequests)
		batchEvaluationResponse = openscoring.evaluateBatch("Iris", batchEvaluationRequest)
		self.assertEqual(batchEvaluationRequest.id, batchEvaluationResponse.id)
		evaluationResponses = batchEvaluationResponse.responses
		self.assertEqual(3, len(evaluationResponses))
		self.assertEqual("setosa", evaluationResponses[0].results["Species"])
		self.assertEqual("versicolor", evaluationResponses[1].results["Species"])
		self.assertEqual("virginica", evaluationResponses[2].results["Species"])

		inCsv = os.path.join(os.path.dirname(__file__), "resources", "input.csv")
		outCsv = os.path.join(tempfile.gettempdir(), "output.csv")

		dfArguments = pandas.read_csv(inCsv, sep = ",")
		dfResults = openscoring.evaluateCsv("Iris", dfArguments)
		self.assertEqual((3, 1 + 4), dfResults.shape)
		self.assertEqual(dfArguments["Id"].tolist(), dfResults["Id"].tolist())
		self.assertEqual(["setosa", "versicolor", "virginica"], dfResults["Species"].tolist())

		self.assertFalse(os.path.isfile(outCsv))
		openscoring.evaluateCsvFile("Iris", inCsv, outCsv)
		self.assertTrue(os.path.isfile(outCsv) and os.path.getsize(outCsv) > 10)

		os.remove(outCsv)

		openscoring.undeploy("Iris")

		with self.assertRaises(Exception) as context:
			openscoring.evaluate("Iris", arguments)
		self.assertEqual("Not Found", str(context.exception))
#!/usr/bin/env python

from openscoring import Openscoring

os = Openscoring("http://localhost:8080/openscoring")

kwargs = {"auth": ("admin", "adminadmin")}

model_file_path = "../../models/pmml_iris/DecisionTreeIris.pmml"
model_name = "PmmlModel"
os.deployFile(model_name, model_file_path, **kwargs)

arguments = {
    "Sepal_Length": 5.1,
    "Sepal_Width": 3.5,
    "Petal_Length": 1.4,
    "Petal_Width": 0.2
}

result = os.evaluate(model_name, arguments)
print(result)