Ejemplo n.º 1
0
    def validate(self):
        if "inputs" not in self.request:
            raise InferenceError("Expected key `inputs` in request body")
        # assumes single input
        inputs = self.request["inputs"][0]
        data_type = inputs["datatype"]

        if data_type == "BYTES":
            raise InferenceError(
                "KFServing protocol BYTES data can not be presently handled")
Ejemplo n.º 2
0
    async def invocations(self, request: Request) -> Response:
        """
        This custom handler is meant to mimic the behaviour of the existing
        scoring server in MLflow.
        For details about its implementation, please consult the original
        implementation in the MLflow repository:

            https://github.com/mlflow/mlflow/blob/master/mlflow/pyfunc/scoring_server/__init__.py
        """
        content_type = request.headers.get("content-type", None)
        raw_data = await request.body()
        as_str = raw_data.decode("utf-8")

        if content_type == CONTENT_TYPE_CSV:
            csv_input = StringIO(as_str)
            data = parse_csv_input(csv_input=csv_input)
        elif content_type == CONTENT_TYPE_JSON:
            data = infer_and_parse_json_input(as_str, self._input_schema)
        elif content_type == CONTENT_TYPE_JSON_SPLIT_ORIENTED:
            data = parse_json_input(
                json_input=StringIO(as_str),
                orient="split",
                schema=self._input_schema,
            )
        elif content_type == CONTENT_TYPE_JSON_RECORDS_ORIENTED:
            data = parse_json_input(
                json_input=StringIO(as_str),
                orient="records",
                schema=self._input_schema,
            )
        elif content_type == CONTENT_TYPE_JSON_SPLIT_NUMPY:
            data = parse_split_oriented_json_input_to_numpy(as_str)
        else:
            content_type_error_message = (
                "This predictor only supports the following content types, "
                f"{CONTENT_TYPES}. Got '{content_type}'.")
            raise InferenceError(content_type_error_message)

        try:
            raw_predictions = self._model.predict(data)
        except MlflowException as e:
            raise InferenceError(e.message)
        except Exception:
            error_message = (
                "Encountered an unexpected error while evaluating the model. Verify"
                " that the serialized input Dataframe is compatible with the model for"
                " inference.")
            raise InferenceError(error_message)

        result = StringIO()
        predictions_to_json(raw_predictions, result)
        return Response(content=result.getvalue(),
                        media_type="application/json")
Ejemplo n.º 3
0
 def _check_request(
         self, payload: types.InferenceRequest) -> types.InferenceRequest:
     if len(payload.inputs) != 1:
         raise InferenceError(
             "AlibiDetector only supports a single input tensor "
             f"({len(payload.inputs)} were received)")
     return payload
Ejemplo n.º 4
0
    def _check_request(
            self, payload: types.InferenceRequest) -> types.InferenceRequest:
        if len(payload.inputs) != 1:
            raise InferenceError(
                "SKLearnModel only supports a single input tensor "
                f"({len(payload.inputs)} were received)")

        if not payload.outputs:
            # By default, only return the result of `predict()`
            payload.outputs = [types.RequestOutput(name=PREDICT_OUTPUT)]
        else:
            for request_output in payload.outputs:
                if request_output.name not in VALID_OUTPUTS:
                    raise InferenceError(
                        f"SKLearnModel only supports '{PREDICT_OUTPUT}' and "
                        f"'{PREDICT_PROBA_OUTPUT}' as outputs "
                        f"({request_output.name} was received)")

        return payload
Ejemplo n.º 5
0
    def _check_request(
            self, payload: types.InferenceRequest) -> types.InferenceRequest:
        if len(payload.inputs) != 1:
            raise InferenceError(
                "LightGBM only supports a single input tensor "
                f"({len(payload.inputs)} were received)")

        # Convert to `numpy.ndarray` and store in parameters
        try:
            model_input = payload.inputs[0]
            array_data = to_ndarray(model_input)

            model_input.parameters = {"data": array_data}
        except Exception as e:
            # There are a few things that can go wrong here, e.g. less than 2-D
            # in the array), or input data not compatible with a numpy array
            raise InferenceError("Invalid input to LightGBM") from e

        return payload
Ejemplo n.º 6
0
def _get_request_ty(request: dict) -> SeldonPayload:
    data_def = request["data"]
    if "tensor" in data_def:
        return SeldonPayload.TENSOR
    elif "ndarray" in data_def:
        return SeldonPayload.NDARRAY
    elif "tftensor" in data_def:
        return SeldonPayload.TFTENSOR
    else:
        raise InferenceError("Unknown Seldon payload %s" % data_def)
Ejemplo n.º 7
0
    def _check_request(
            self, payload: types.InferenceRequest) -> types.InferenceRequest:
        if len(payload.inputs) != 1:
            raise InferenceError(
                "XGBoostModel only supports a single input tensor "
                f"({len(payload.inputs)} were received)")

        # Convert to `xgboost.DMatrix` and store in parameters
        # TODO: Move this out to "types conversion" pipeline, once it's there.
        try:
            model_input = payload.inputs[0]
            array_data = to_ndarray(model_input)
            dmatrix_data = xgb.DMatrix(array_data)

            # TODO: Use Parameters object
            model_input.parameters = {
                "dmatrix_data": dmatrix_data
            }  # type: ignore
        except Exception as e:
            # There are a few things that can go wrong here, e.g. less than 2-D
            # in the array), or input data not compatible with a numpy array
            raise InferenceError("Invalid input to XGBoostModel") from e

        return payload
Ejemplo n.º 8
0
def _extract_list(body: dict) -> np.array:
    data_def = body["data"]
    if "tensor" in data_def:
        arr = np.array(data_def.get("tensor").get("values")).reshape(
            data_def.get("tensor").get("shape"))
        return arr
    elif "ndarray" in data_def:
        return np.array(data_def.get("ndarray"))
    elif "tftensor" in data_def:
        arr = np.array(data_def["tftensor"]["float_val"])
        shape = []
        for dim in data_def["tftensor"]["tensor_shape"]["dim"]:
            shape.append(dim["size"])
        arr = arr.reshape(shape)
        return arr
    else:
        raise InferenceError("Unknown Seldon payload %s" % body)
Ejemplo n.º 9
0
    async def detect(self, request: Request) -> Response:
        """
        This custom handler is meant to mimic the behaviour prediction in alibi-detect
        """
        raw_data = await request.body()
        as_str = raw_data.decode("utf-8")

        try:
            body = orjson.loads(as_str)
        except orjson.JSONDecodeError as e:
            raise InferenceError("Unrecognized request format: %s" % e)

        request_handler = get_request_handler(
            Protocol(self.alibi_detect_settings.protocol), body)
        request_handler.validate()
        input_data = request_handler.extract_request()

        y = await self.predict_fn(input_data)
        output_data = orjson.dumps(y, option=orjson.OPT_SERIALIZE_NUMPY)

        return Response(content=output_data, media_type="application/json")
Ejemplo n.º 10
0
    def validate(self):
        if "data" not in self.request:
            raise InferenceError("Expected key `data` in request body")

        _get_request_ty(self.request)
Ejemplo n.º 11
0
 def validate(self):
     if "instances" not in self.request:
         raise InferenceError("Expected key `instances` in request body")