示例#1
0
def compute_metrics(predictions, ground_truth):
    from alectio_sdk.metrics.object_detection import Metrics, batch_to_numpy
    metrics = {}

    det_boxes, det_labels, det_scores, true_boxes, true_labels = batch_to_numpy(
        predictions, ground_truth)

    m = Metrics(
        det_boxes=det_boxes,
        det_labels=det_labels,
        det_scores=det_scores,
        true_boxes=true_boxes,
        true_labels=true_labels,
        num_classes=60,
    )

    metrics = {
        "mAP": m.getmAP(),
        "AP": m.getAP(),
        "precision": m.getprecision(),
        "recall": m.getrecall(),
        "confusion_matrix": m.getCM().tolist(),
        "class_labels": None,
    }

    print("========= TEST METRICS =========")
    print(metrics)
示例#2
0
    def compute_metrics(self, predictions, ground_truth):
        if self.type == "Object Detection":
            det_boxes, det_labels, det_scores, true_boxes, true_labels = batch_to_numpy(
                predictions, ground_truth)

            m = Metrics(
                det_boxes=det_boxes,
                det_labels=det_labels,
                det_scores=det_scores,
                true_boxes=true_boxes,
                true_labels=true_labels,
                num_classes=len(self.meta_data["class_labels"]),
            )

            metrics = {
                "mAP": m.getmAP(),
                "AP": m.getAP(),
                "precision": m.getprecision(),
                "recall": m.getrecall(),
                "confusion_matrix": m.getCM().tolist(),
                "class_labels": self.meta_data["class_labels"],
            }

        if self.type == "Classification" or self.type == "Text Classification":
            confusion_matrix = sklearn.metrics.confusion_matrix(
                ground_truth, predictions)
            num_queried_per_class = {
                k: v
                for k, v in enumerate(confusion_matrix.sum(axis=1))
            }
            acc_per_class = {
                k: v.round(3)
                for k, v in enumerate(confusion_matrix.diagonal() /
                                      confusion_matrix.sum(axis=1))
            }
            accuracy = sklearn.metrics.accuracy_score(ground_truth,
                                                      predictions)
            FP = confusion_matrix.sum(axis=0) - np.diag(confusion_matrix)
            FN = confusion_matrix.sum(axis=1) - np.diag(confusion_matrix)
            TP = confusion_matrix.diagonal()
            TN = confusion_matrix.sum() - (FP + FN + TP)
            label_disagreement = {
                k: v.round(3)
                for k, v in enumerate(FP / (FP + TN))
            }

            metrics = {
                "accuracy": accuracy,
                "confusion_matrix": confusion_matrix,
                "acc_per_class": acc_per_class,
                "label_disagreement": label_disagreement,
                "num_queried_per_class": num_queried_per_class,
            }

        # save metrics to S3
        object_key = os.path.join(self.expt_dir,
                                  "metrics_{}.pkl".format(self.cur_loop))
        self.client.multi_part_upload_with_s3(metrics, self.bucket_name,
                                              object_key, "pickle")
        return
示例#3
0
    def compute_metrics(self, predictions, ground_truth):
        metrics = {}
        if self.type == "2d_object_detection":
            det_boxes, det_labels, det_scores, true_boxes, true_labels = batch_to_numpy(
                predictions, ground_truth)

            m = Metrics(
                det_boxes=det_boxes,
                det_labels=det_labels,
                det_scores=det_scores,
                true_boxes=true_boxes,
                true_labels=true_labels,
                num_classes=len(self.meta_data["class_labels"]),
            )

            metrics = {
                "mAP": m.getmAP(),
                "AP": m.getAP(),
                "precision": m.getprecision(),
                "recall": m.getrecall(),
                "confusion_matrix": m.getCM().tolist(),
                "class_labels": self.meta_data["class_labels"],
            }

        elif self.type == "text_classification" or self.type == "image_classification":

            m = ClassificationMetrics(predictions, ground_truth)

            metrics = {
                "accuracy": m.get_accuracy(),
                "f1_score_per_class": m.get_f1_score_per_class(),
                "f1_score": m.get_f1_score(),
                "precision_per_class": m.get_precision_per_class(),
                "precision": m.get_precision(),
                "recall_per_class": m.get_recall_per_class(),
                "recall": m.get_recall(),
                "confusion_matrix": m.get_confusion_matrix(),
                "acc_per_class": m.get_acc_per_class(),
                "label_disagreement": m.get_label_disagreement(),
            }

        elif self.type == "2d_segmentation":

            m = SegMetrics(
                n_classes=len(self.meta_data["class_labels"]),
                labels=self.meta_data["class_labels"],
                return_2D=True,
                return_3D=False,
                rangenet=False,
                default_ranges=None,
                include_ranges=False,
                range_labels=None,
            )
            m.evaluate2D(ground_truth, predictions)

            metrics = {
                'confusion_matrix': m.get2DCM(),
                'pixel_acc': m.get2DmAcc(),
                'classwise_pixel_acc ': m.get2DAcc(),
                'freqw_iou': m.get2DfwIOU(),
                'mean_iou': m.get2DmIOU(),
                'classwise_iou': m.get2DIOU(),
                'classwise_dice': m.get2DDICE()
            }

        elif self.type == "multilabel_text_classification" or self.type == "multi_label_text_classification":

            m = MultiLabelClassificationMetrics(predictions, ground_truth)

            metrics = {
                "accuracy": m.get_accuracy(),
                "micro_f1": m.get_f1_score_micro(),
                "macro_f1": m.get_f1_score_macro(),
                "micro_precision": m.get_precision_micro(),
                "macro_precision": m.get_precision_macro(),
                "micro_recall": m.get_recall_micro(),
                "macro_recall": m.get_recall_macro(),
                "multilabel_confusion_matrix": m.get_confusion_matrix(),
                "hamming_loss": m.get_hamming_loss()
            }

        else:
            raise ValueError(
                "The selected task type is currently not supported, received type : {}"
                .format(self.type))

        # save metrics to S3
        object_key = os.path.join(self.expt_dir,
                                  "metrics_{}.pkl".format(self.cur_loop))
        self.client.multi_part_upload_with_s3(metrics, self.bucket_name,
                                              object_key, "pickle")

        return
示例#4
0
    def compute_metrics(self, predictions, ground_truth):
        metrics = {}
        if self.type == "Object Detection":
            det_boxes, det_labels, det_scores, true_boxes, true_labels = batch_to_numpy(
                predictions, ground_truth)

            m = Metrics(
                det_boxes=det_boxes,
                det_labels=det_labels,
                det_scores=det_scores,
                true_boxes=true_boxes,
                true_labels=true_labels,
                num_classes=len(self.meta_data["class_labels"]),
            )

            metrics = {
                "mAP": m.getmAP(),
                "AP": m.getAP(),
                "precision": m.getprecision(),
                "recall": m.getrecall(),
                "confusion_matrix": m.getCM().tolist(),
                "class_labels": self.meta_data["class_labels"],
            }

        if self.type == "Classification" or self.type == "Text Classification":
            confusion_matrix = sklearn.metrics.confusion_matrix(
                ground_truth, predictions)
            acc_per_class = {
                k: v.round(3)
                for k, v in enumerate(confusion_matrix.diagonal() /
                                      confusion_matrix.sum(axis=1))
            }
            accuracy = sklearn.metrics.accuracy_score(ground_truth,
                                                      predictions)
            FP = confusion_matrix.sum(axis=0) - np.diag(confusion_matrix)
            FN = confusion_matrix.sum(axis=1) - np.diag(confusion_matrix)
            TP = confusion_matrix.diagonal()
            TN = confusion_matrix.sum() - (FP + FN + TP)
            precision = TP / (TP + FP)
            recall = TP / (TP + FN)
            f1_score = 2 * precision * recall / (precision + recall)
            label_disagreement = {
                k: v.round(3)
                for k, v in enumerate(FP / (FP + TN))
            }

            metrics = {
                "accuracy": accuracy,
                "f1_score_per_class": {k: v
                                       for (k, v) in enumerate(f1_score)},
                "f1_score": f1_score.mean(),
                "precision_per_class":
                {k: v
                 for (k, v) in enumerate(precision)},
                "precision": precision.mean(),
                "recall_per_class": {k: v
                                     for (k, v) in enumerate(recall)},
                "recall": recall.mean(),
                "confusion_matrix": confusion_matrix.tolist(),
                "acc_per_class": acc_per_class,
                "label_disagreement": label_disagreement,
            }

        # save metrics to S3
        object_key = os.path.join(self.expt_dir,
                                  "metrics_{}.pkl".format(self.cur_loop))
        self.client.multi_part_upload_with_s3(metrics, self.bucket_name,
                                              object_key, "pickle")
        if "onprem" in self.args and not self.args["onprem"]:
            demometricsobject_key = os.path.join(
                self.demoexpt_dir, "metrics_{}.pkl".format(self.cur_loop))
            self.client.multi_part_upload_with_s3(
                metrics,
                self.demopayload["bucket_name"],
                demometricsobject_key,
                "pickle",
            )
        return