Ejemplo n.º 1
0
    async def predict(self, request: InferenceRequest) -> InferenceResponse:

        request_dict = request.dict()

        insights_wrapper = InsightsWrapper(self.insights_manager)
        # TODO: Add request_id, response_headers, request_headers, etc
        payload_context = PayloadContext(request_id=request.id,
                                         request=request_dict)
        tempo_wrapper = TempoContextWrapper(payload_context, insights_wrapper,
                                            self.state)
        tempo_context.set(tempo_wrapper)

        response_dict = self._model.request(request_dict)
        if self._is_coroutine:
            response_dict = await response_dict  # type: ignore

        # TODO: Ensure model_version is added by mlserver
        response_dict["model_version"] = "NOTIMPLEMENTED"

        # TODO: Move to functions declared upfront with logic contained to avoid if
        if self._model.get_insights_mode == InsightRequestModes.ALL:
            insights_wrapper.log(request_dict,
                                 insights_type=InsightsTypes.INFER_REQUEST)
            insights_wrapper.log(response_dict,
                                 insights_type=InsightsTypes.INFER_RESPONSE)
        else:
            if self._model.get_insights_mode == InsightRequestModes.REQUEST or insights_wrapper.set_log_request:
                insights_wrapper.log(request_dict,
                                     insights_type=InsightsTypes.INFER_REQUEST)
            if self._model.get_insights_mode == InsightRequestModes.RESPONSE or insights_wrapper.set_log_response:
                insights_wrapper.log(
                    response_dict, insights_type=InsightsTypes.INFER_RESPONSE)

        return InferenceResponse(**response_dict)
Ejemplo n.º 2
0
    async def predict(self, payload: InferenceRequest) -> InferenceResponse:
        print("------ Encoded Input (request) ------")
        as_dict = payload.dict(exclude=_to_exclude)  # type: ignore
        print(json.dumps(as_dict, indent=2))
        print("------ Decoded input (request) ------")
        decoded_request = None
        if payload.parameters:
            decoded_request = getattr(payload.parameters, DecodedParameterName)
        print(decoded_request)

        outputs = []
        for request_input in payload.inputs:
            outputs.append(
                ResponseOutput(
                    name=request_input.name,
                    datatype=request_input.datatype,
                    shape=request_input.shape,
                    data=request_input.data,
                ))

        return InferenceResponse(model_name=self.name, outputs=outputs)
Ejemplo n.º 3
0
    async def predict(self, request: InferenceRequest) -> InferenceResponse:

        insights_wrapper = InsightsWrapper(self.insights_manager)
        insights_context.set(insights_wrapper)

        request_dict = request.dict()

        response_dict = self._model.request(request_dict)
        if self._is_coroutine:
            response_dict = await response_dict  # type: ignore

        # TODO: Move to functions declared upfront with logic contained to avoid if
        if self._model.get_insights_mode == InsightRequestModes.ALL:
            self.insights_manager.log(request_dict)
            self.insights_manager.log(response_dict)
        else:
            if self._model.get_insights_mode == InsightRequestModes.REQUEST or insights_wrapper.set_log_request:
                self.insights_manager.log(request_dict)
            if self._model.get_insights_mode == InsightRequestModes.RESPONSE or insights_wrapper.set_log_response:
                self.insights_manager.log(response_dict)

        return InferenceResponse(**response_dict)
Ejemplo n.º 4
0
 async def predict(self, payload: InferenceRequest) -> InferenceResponse:
     prediction = self._pipeline.request(payload.dict())
     return InferenceResponse(**prediction)