Example #1
0
    def testPredictionSavedModelWithBadInput(self):
        data_path = os.path.join(FLAGS.test_srcdir, SAVED_TESTDATA_BAD)
        with open(data_path) as f:
            # Read two input records as strings.
            instances = [
                json.loads(next(f).rstrip("\n")),
                json.loads(next(f).rstrip("\n"))
            ]

        model_path = os.path.join(FLAGS.test_srcdir, INPUT_MODEL)
        with self.assertRaises(mlprediction.PredictionError) as error:
            mlprediction.local_predict(model_dir=model_path,
                                       instances=instances)

        self.assertTrue(
            "Unexpected tensor name: x" in error.exception.error_detail)
Example #2
0
    def testLocalPredictionTensorflowModelWithStrings(self,
                                                      mock_create_client):

        signature_def = meta_graph_pb2.SignatureDef()
        signature_def.outputs["x_bytes"].dtype = types_pb2.DT_STRING
        signature_def.inputs["x_bytes"].dtype = types_pb2.DT_STRING

        mock_client = mock.Mock()
        mock_client.signature_map = {
            tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
            signature_def
        }
        mock_client.predict.return_value = {"x_bytes": "to_encode"}
        mock_create_client.return_value = mock_client
        predictions = mlprediction.local_predict(model_dir=None,
                                                 instances=[{
                                                     "x_bytes": [1, 2, 3]
                                                 }])
        # Validate that the output is correctly base64 encoded (and only once)
        self.assertEquals(predictions, {
            "predictions": [{
                "x_bytes": {
                    "b64": base64.b64encode("to_encode")
                }
            }]
        })
Example #3
0
    def testPredictionWithSavedModel(self):
        data_path = os.path.join(FLAGS.test_srcdir, SAVED_TESTDATA)

        with open(data_path) as f:
            # Read two input records as strings.
            instances = [
                json.loads(next(f).rstrip("\n")),
                json.loads(next(f).rstrip("\n"))
            ]

        model_path = os.path.join(FLAGS.test_srcdir, INPUT_MODEL)
        predictions = mlprediction.local_predict(model_dir=model_path,
                                                 instances=instances)

        prediction_result = [{
            "key": p["key"],
            "prediction": p["prediction"]
        } for p in predictions["predictions"]]
        # Just check the key and prediction result, not each individual scores
        # that are floating numbers.
        self.assertEqual(2, len(prediction_result))
        for r in prediction_result:
            if r["key"] == 0:
                self.assertEqual(3, r["prediction"])
            elif r["key"] == 9:
                self.assertEqual(4, r["prediction"])
            else:
                self.fail()
Example #4
0
 def testLocalPredictionXgboostModel(self):
     # Uses a trained xgboost model that computes x+y
     instances = [[10, 20], [1, 2], [5, 6]]
     model_path = os.path.join(FLAGS.test_srcdir, XGBOOST_MODEL)
     predictions = mlprediction.local_predict(
         model_dir=model_path,
         instances=instances,
         framework=mlprediction.XGBOOST_FRAMEWORK_NAME)
     self.assertEqual([round(i) for i in predictions["predictions"]],
                      [30, 3, 11])
Example #5
0
    def testLocalPredictionSklearnModel(self):
        # Uses a trained sklearn model that computes x+y
        instances = [[10, 20], [1, 2], [5, 6]]
        model_path = os.path.join(FLAGS.test_srcdir, SKLEARN_JOBLIB_MODEL)
        predictions = mlprediction.local_predict(
            model_dir=model_path,
            instances=instances,
            framework=mlprediction.SCIKIT_LEARN_FRAMEWORK_NAME)

        self.assertEqual(predictions, {"predictions": [30, 3, 11]})
Example #6
0
    def testPredictWithSavedModelWithCustomSignature(self):
        model_dir = os.path.join(FLAGS.test_tmpdir, "identity_model_predict")
        model_test_util.create_identity_model(model_dir=model_dir,
                                              signature_name="mysignature",
                                              tags=("tag1", "tag2"))
        result = mlprediction.local_predict(model_dir,
                                            tags=("tag1", "tag2"),
                                            signature_name="mysignature",
                                            instances=[{
                                                "in": "check"
                                            }])
        self.assertEqual(result["predictions"], [{"out": "check"}])

        # Only one signature_def in the graph, so it's optional to specify it.
        result = mlprediction.local_predict(model_dir,
                                            tags=("tag1", "tag2"),
                                            instances=[{
                                                "in": "check"
                                            }])
        self.assertEqual(result["predictions"], [{"out": "check"}])
Example #7
0
    def testPredictWithSavedModelMultipleSignatures(self):
        model_dir = os.path.join(FLAGS.test_tmpdir, "constant_model_predict")
        model_test_util.create_constant_model(model_dir,
                                              "mysignature",
                                              1,
                                              "serving_default",
                                              2,
                                              tags=("tag1", "tag2"))
        # Predict with specified signature.
        result = mlprediction.local_predict(model_dir,
                                            tags=("tag1", "tag2"),
                                            signature_name="mysignature",
                                            instances=[{
                                                "in": "check"
                                            }])
        self.assertEqual(result["predictions"], [{"out": 1}])

        # Predict without specified signature will use serving default.
        result = mlprediction.local_predict(model_dir,
                                            tags=("tag1", "tag2"),
                                            instances=[{
                                                "in": "check"
                                            }])
        self.assertEqual(result["predictions"], [{"out": 2}])

        # Predict with wrong specified signature.
        with self.assertRaises(mlprediction.PredictionError) as error:
            result = mlprediction.local_predict(
                model_dir,
                tags=("tag1", "tag2"),
                signature_name="wrongsignature",
                instances=[{
                    "in": "check"
                }])
        self.assertEqual(
            "No signature found for signature key wrongsignature.",
            error.exception.error_detail)
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('--model-dir', required=True, help='Path of the model.')
  args, _ = parser.parse_known_args()
  if not _verify_tensorflow():
    sys.exit(-1)

  instances = []
  for line in sys.stdin:
    instance = json.loads(line.rstrip('\n'))
    instances.append(instance)

  prediction = _import_prediction_lib()
  predictions = prediction.local_predict(model_dir=args.model_dir,
                                         instances=instances)
  print(json.dumps(predictions))
Example #9
0
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('--model-dir', required=True, help='Path of the model.')
  args, _ = parser.parse_known_args()
  if not _has_required_package():
    sys.exit(-1)

  instances = []
  for line in sys.stdin:
    instance = json.loads(line.rstrip('\n'))
    instances.append(instance)

  # pylint: disable=g-import-not-at-top
  from google.cloud.ml import prediction
  # pylint: enable=g-import-not-at-top
  predictions = prediction.local_predict(model_dir=args.model_dir,
                                         instances=instances)
  print(json.dumps(predictions))