def transform(self, truth, response, request=None):
        """
        Perform a multiclass one-hot comparison between truth and response.

        Parameters
        -----------
        truth
            Actual data value as format of an array (or array of arrays) of the form of one-hot or probabilities.
        response
            Prediction data value as format of an array (or array of arrays) of the form of one-hot or probabilities.
        request
            Input data value as format of an array (or array of arrays) of the form of one-hot or probabilities.
        """

        metrics = []
        response = (response[0] if isinstance(response[0],
                                              (list,
                                               np.ndarray)) else response)
        truth = truth[0] if isinstance(truth[0], (list, np.ndarray)) else truth
        response_class = max(enumerate(response), key=lambda x: x[1])[0]
        truth_class = max(enumerate(truth), key=lambda x: x[1])[0]

        correct = response_class == truth_class

        if correct:
            metrics.append({
                "key": "seldon_metric_true_positive",
                "type": "COUNTER",
                "value": 1,
                "tags": {
                    "class": f"CLASS_{truth_class}"
                },
            })
            metrics.append({
                "key": "seldon_metric_true_negative",
                "type": "COUNTER",
                "value": 1,
                "tags": {
                    "class": f"CLASS_{response_class}"
                },
            })
        else:
            metrics.append({
                "key": "seldon_metric_false_negative",
                "type": "COUNTER",
                "value": 1,
                "tags": {
                    "class": f"CLASS_{truth_class}"
                },
            })
            metrics.append({
                "key": "seldon_metric_false_positive",
                "type": "COUNTER",
                "value": 1,
                "tags": {
                    "class": f"CLASS_{response_class}"
                },
            })

        return SeldonResponse(None, None, metrics)
Exemplo n.º 2
0
    def transform(self, truth, response, request=None):
        """
        Perform a binary classification comparison between truth and response

        Parameters
        -----------
        truth
            Actual data value as value 0, 1, [0], or [1]
        response
            Prediction data value as value 0, 1, [0], or [1]
        request
            Input data value as value 0, 1, [0], or [1]
        """

        response_class = (
            int(response[0]) if isinstance(response, list) else int(response)
        )
        truth_class = int(truth[0]) if isinstance(truth, list) else int(truth)

        correct = response_class == truth_class

        if truth_class:
            if correct:
                key = "seldon_metric_true_positive"
            else:
                key = "seldon_metric_false_negative"
        else:
            if correct:
                key = "seldon_metric_true_negative"
            else:
                key = "seldon_metric_false_positive"

        metrics = [{"key": key, "type": "COUNTER", "value": 1}]

        return SeldonResponse(None, None, metrics)
Exemplo n.º 3
0
  def predict(self, X, features_names):
    start_time = time.time()
    res = self.session.run([self.output_name], {self.input_name: X.astype('float32')})

    runtime_metrics = [{"type": "TIMER", "key": "prediction_time", "value": ((time.time() - start_time) * 1000)}]

    return SeldonResponse(data=res, metrics=runtime_metrics)
Exemplo n.º 4
0
    def transform(self, truth, response, request=None):
        """
        Perform a multiclass numeric comparison between truth and response.

        Parameters
        -----------
        truth
            Actual data value as format of <number> or [<number>]
        response
            Prediction data value as format of <number> or [<number>]
        request
            Input data value as format of <number> or [<number>]
        """

        metrics = []

        response_class = (response[0] if isinstance(
            response, (list, np.ndarray)) else response)
        truth_class = truth[0] if isinstance(truth,
                                             (list, np.ndarray)) else truth

        correct = response_class == truth_class

        if correct:
            metrics.append({
                "key": "seldon_metric_true_positive",
                "type": "COUNTER",
                "value": 1,
                "tags": {
                    "class": f"CLASS_{truth_class}"
                },
            })
            metrics.append({
                "key": "seldon_metric_true_negative",
                "type": "COUNTER",
                "value": 1,
                "tags": {
                    "class": f"CLASS_{response_class}"
                },
            })
        else:
            metrics.append({
                "key": "seldon_metric_false_negative",
                "type": "COUNTER",
                "value": 1,
                "tags": {
                    "class": f"CLASS_{truth_class}"
                },
            })
            metrics.append({
                "key": "seldon_metric_false_positive",
                "type": "COUNTER",
                "value": 1,
                "tags": {
                    "class": f"CLASS_{response_class}"
                },
            })

        return SeldonResponse(None, None, metrics)
Exemplo n.º 5
0
    def process_event(self, inputs: List, headers: Dict) -> Optional[Dict]:
        """
        Process the event and return Alibi Detect score

        Parameters
        ----------
        inputs
             Input data
        headers
             Header options

        Returns
        -------
             Alibi Detect response

        """
        logging.info("PROCESSING EVENT.")
        logging.info(str(headers))
        logging.info("----")
        try:
            X = np.array(inputs)
        except Exception as e:
            raise Exception(
                "Failed to initialize NumPy array from inputs: %s, %s" %
                (e, inputs))

        if self.batch is None:
            self.batch = X
        else:
            self.batch = np.vstack((self.batch, X))

        if self.batch.shape[0] >= self.drift_batch_size:
            logging.info(
                "Running drift detection. Batch size is %d. Needed %d",
                self.batch.shape[0],
                self.drift_batch_size,
            )
            cd_preds = self.model.predict(self.batch)
            self.batch = None

            output = json.loads(json.dumps(cd_preds, cls=NumpyEncoder))

            metrics = _drift_to_metrics(output.get("data", {}))

            seldon_response = SeldonResponse(output, None, metrics)

            return seldon_response
        else:
            logging.info(
                "Not running drift detection. Batch size is %d. Need %d",
                self.batch.shape[0],
                self.drift_batch_size,
            )
            return None
    def predict(self, features, names=[], meta={}):
        X = reshape(features)

        logging.info(f"model features: {features}")
        logging.info(f"model names: {names}")
        logging.info(f"model meta: {meta}")

        logging.info(f"model X: {X}")

        runtime_metrics = [{"type": "COUNTER", "key": "instance_counter", "value": len(X)}]
        runtime_tags = {"runtime": "tag", "shared": "right one"}
        return SeldonResponse(data=X, metrics=runtime_metrics, tags=runtime_tags)
Exemplo n.º 7
0
    def process_event(self, inputs: Union[List, Dict], headers: Dict) -> Dict:
        """
        Process the event and return Alibi Detect score

        Parameters
        ----------
        inputs
             Input data
        headers
             Header options

        Returns
        -------
             SeldonResponse response

        """
        logging.info("PROCESSING Feedback Event.")
        logging.info(str(headers))
        logging.info("----")

        metrics = []
        output = {}

        if "truth" not in inputs:
            raise SeldonMicroserviceException(
                f"No truth value provided in: {json.dumps(inputs)}",
                status_code=400,
                reason="NO_TRUTH_VALUE",
            )

        # We automatically add any metrics provided in the incoming request
        if "metrics" in inputs:
            metrics.extend(inputs["metrics"])

        # If response is provided then we can perform a comparison
        # TODO: If Header UUID provided we could fetch from ELK to do the evaluation
        if "response" in inputs:
            response = inputs["response"]
            truth = inputs["truth"]
            r = self.model.transform(truth, response)
            metrics.extend(r.metrics)

        seldon_response = SeldonResponse(output or None, None, metrics)

        return seldon_response
Exemplo n.º 8
0
    def process_event(self, inputs: Union[List, Dict], headers: Dict) -> Dict:
        """
        Process the event and return Alibi Detect score

        Parameters
        ----------
        inputs
             Input data
        headers
             Header options

        Returns
        -------
             SeldonResponse response

        """
        logging.info("PROCESSING Feedback Event.")
        logging.info(str(headers))
        logging.info("----")

        metrics = []
        output = {}
        truth = None
        response = None
        error = None

        if "truth" not in inputs:
            raise SeldonMicroserviceException(
                f"No truth value provided in: {json.dumps(inputs)}",
                status_code=400,
                reason="NO_TRUTH_VALUE",
            )
        else:
            truth = inputs["truth"]

        # We automatically add any metrics provided in the incoming request
        if "metrics" in inputs:
            metrics.extend(inputs["metrics"])

        # If response is provided then we can perform a comparison
        if "response" in inputs:
            response = inputs["response"]

        elif REQUEST_ID_HEADER_NAME in headers:
            # Otherwise if UUID is provided we can fetch from elasticsearch
            if not self.elasticsearch_client:
                error = "Seldon-Puid provided but elasticsearch client not configured"
            else:
                try:
                    seldon_puid = headers.get(REQUEST_ID_HEADER_NAME, "")
                    seldon_namespace = headers.get(NAMESPACE_HEADER_NAME, "")

                    # Currently only supports SELDON inference type (not kfserving)
                    elasticsearch_index = f"inference-log-seldon-{seldon_namespace}-{SELDON_DEPLOYMENT_ID}-{SELDON_PREDICTOR_ID}"

                    doc = self.elasticsearch_client.get(
                        index=elasticsearch_index, id=seldon_puid)
                    response = (doc.get("_source",
                                        {}).get("response",
                                                None).get("instance", None))
                    if not response:
                        error = f"Elasticsearch index {elasticsearch_index} with id {seldon_puid} did not contain response value"
                except NotFoundError:
                    error = f"Elasticsearch index {elasticsearch_index} with id {seldon_puid} not found"
        else:
            error = "Neither response nor request Puid provided in headers"

        if error:
            raise SeldonMicroserviceException(error,
                                              status_code=400,
                                              reason="METRICS_SERVER_ERROR")

        logging.error(f"{truth}, {response}")
        output = self.model.transform(truth, response)
        seldon_response = SeldonResponse.create(output or None)

        seldon_response.metrics.extend(metrics)

        return seldon_response
Exemplo n.º 9
0
 def send_feedback(self, X, feature_names, reward, truth, routing):
     return SeldonResponse(data=X, metrics=RUNTIME_METRICS, tags=RUNTIME_TAGS)
Exemplo n.º 10
0
 def route(self, X, feature_names):
     logging.info("Route called")
     return SeldonResponse(data=22, metrics=RUNTIME_METRICS, tags=RUNTIME_TAGS)
Exemplo n.º 11
0
 def transform_output(self, X, feature_names):
     logging.info("Transform output called")
     return SeldonResponse(data=X, metrics=RUNTIME_METRICS, tags=RUNTIME_TAGS)
Exemplo n.º 12
0
 def aggregate(self, X, features_names):
     logging.info("Aggregate called")
     return SeldonResponse(data=X[0], metrics=RUNTIME_METRICS, tags=RUNTIME_TAGS)
Exemplo n.º 13
0
 def predict(self, X, features_names):
     logging.info("Predict called")
     return SeldonResponse(data=X, metrics=RUNTIME_METRICS, tags=RUNTIME_TAGS)
Exemplo n.º 14
0
    def process_event(self, inputs: Union[List, Dict], headers: Dict) -> Dict:
        """
        Process the event and return Alibi Detect score

        Parameters
        ----------
        inputs
             Input data
        headers
             Header options

        Returns
        -------
             Alibi Detect response

        """
        logging.info("PROCESSING EVENT.")
        logging.info(str(headers))
        logging.info("----")
        try:
            X = np.array(inputs)
        except Exception as e:
            raise Exception(
                "Failed to initialize NumPy array from inputs: %s, %s" %
                (e, inputs))

        ret_instance_score = False
        if (HEADER_RETURN_INSTANCE_SCORE in headers
                and headers[HEADER_RETURN_INSTANCE_SCORE]
                == "true") or RETURN_INSTANCE_SCORE:
            ret_instance_score = True

        outlier_type = "instance"
        if HEADER_OUTLIER_TYPE in headers and headers[HEADER_OUTLIER_TYPE]:
            outlier_type = headers[HEADER_OUTLIER_TYPE]
        ret_feature_score = False
        if (HEADER_RETURN_FEATURE_SCORE in headers
                and headers[HEADER_RETURN_FEATURE_SCORE]
                == "true") or RETURN_FEATURE_SCORE:
            ret_feature_score = True
        od_preds = {}
        name = self.model.meta["name"]
        if (name == "IForest" or name == "OutlierAEGMM"
                or name == "Mahalanobis" or name == "SpectralResidual"
                or name == "OutlierVAEGMM"):
            od_preds = self.model.predict(
                X,
                # scores used to determine outliers
                return_instance_score=ret_instance_score,
            )
        else:
            od_preds = self.model.predict(
                X,
                outlier_type=outlier_type,
                # use 'feature' or 'instance' level
                return_feature_score=ret_feature_score,
                # scores used to determine outliers
                return_instance_score=ret_instance_score,
            )

        # Register metrics
        metrics = []
        _append_outlier_metrcs(metrics, od_preds, "is_outlier")
        _append_outlier_metrcs(metrics,
                               od_preds,
                               "instance_score",
                               is_count=False)

        # clean result
        if ("data" in od_preds and "instance_score" in od_preds["data"]
                and od_preds["data"]["instance_score"] is None):
            del od_preds["data"]["instance_score"]
        if ("data" in od_preds and "feature_score" in od_preds["data"]
                and od_preds["data"]["feature_score"] is None):
            del od_preds["data"]["feature_score"]

        resp_data = json.loads(json.dumps(od_preds, cls=NumpyEncoder))

        return SeldonResponse(resp_data, None, metrics)
Exemplo n.º 15
0
    def post(self):
        """
        Handle post request. Extract data. Call event handler and optionally send a reply event.

        """
        if not self.model.ready:
            self.model.load()

        try:
            body = json.loads(self.request.body)
        except json.decoder.JSONDecodeError as e:
            raise tornado.web.HTTPError(
                status_code=HTTPStatus.BAD_REQUEST,
                reason="Unrecognized request format: %s" % e,
            )

        # Extract payload from request
        request_handler: RequestHandler = get_request_handler(
            self.protocol, body)
        request_handler.validate()
        request = request_handler.extract_request()

        # Create event from request body
        event = v02.Event()
        http_marshaller = marshaller.NewDefaultHTTPMarshaller()
        event = http_marshaller.FromRequest(event, self.request.headers,
                                            self.request.body, json.loads)
        logging.debug(json.dumps(event.Properties()))

        # Extract any desired request headers
        headers = {}

        for (key, val) in self.request.headers.get_all():
            headers[key] = val

        response = self.model.process_event(request, headers)
        seldon_response = SeldonResponse.create(response)

        runtime_metrics = seldon_response.metrics
        if runtime_metrics is not None:
            if validate_metrics(runtime_metrics):
                self.seldon_metrics.update(runtime_metrics, self.event_type)
            else:
                logging.error("Metrics returned are invalid: " +
                              str(runtime_metrics))

        if seldon_response.data is not None:
            responseStr = json.dumps(seldon_response.data)

            # Create event from response if reply_url is active
            if not self.reply_url == "":
                if event.EventID() is None or event.EventID() == "":
                    resp_event_id = uuid.uuid1().hex
                else:
                    resp_event_id = event.EventID()
                revent = (
                    v02.Event().SetContentType("application/json").SetData(
                        responseStr).SetEventID(resp_event_id).SetSource(
                            self.event_source).SetEventType(
                                self.event_type).SetExtensions(
                                    event.Extensions()))
                logging.debug(json.dumps(revent.Properties()))
                sendCloudEvent(revent, self.reply_url)
            self.write(json.dumps(seldon_response.data))