Exemple #1
0
    def evaluate(self, results, metric='mIoU', logger=None, **kwargs):
        """Evaluate the dataset.

        Args:
            results (list): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | None | str): Logger used for printing
                related information during evaluation. Default: None.

        Returns:
            dict[str, float]: Default metrics.
        """

        if not isinstance(metric, str):
            assert len(metric) == 1
            metric = metric[0]
        allowed_metrics = ['mIoU']
        if metric not in allowed_metrics:
            raise KeyError('metric {} is not supported'.format(metric))

        eval_results = {}
        gt_seg_maps = self.get_gt_seg_maps()
        if self.CLASSES is None:
            num_classes = len(
                reduce(np.union1d, [np.unique(_) for _ in gt_seg_maps]))
        else:
            num_classes = len(self.CLASSES)

        all_acc, acc, iou = mean_iou(
            results, gt_seg_maps, num_classes, ignore_index=self.ignore_index)
        summary_str = ''
        summary_str += 'per class results:\n'

        line_format = '{:<15} {:>10} {:>10}\n'
        summary_str += line_format.format('Class', 'IoU', 'Acc')
        if self.CLASSES is None:
            class_names = tuple(range(num_classes))
        else:
            class_names = self.CLASSES
        for i in range(num_classes):
            iou_str = '{:.2f}'.format(iou[i] * 100)
            acc_str = '{:.2f}'.format(acc[i] * 100)
            summary_str += line_format.format(class_names[i], iou_str, acc_str)
        summary_str += 'Summary:\n'
        line_format = '{:<15} {:>10} {:>10} {:>10}\n'
        summary_str += line_format.format('Scope', 'mIoU', 'mAcc', 'aAcc')

        iou_str = '{:.2f}'.format(np.nanmean(iou) * 100)
        acc_str = '{:.2f}'.format(np.nanmean(acc) * 100)
        all_acc_str = '{:.2f}'.format(all_acc * 100)
        summary_str += line_format.format('global', iou_str, acc_str,
                                          all_acc_str)
        print_log(summary_str, logger)

        eval_results['mIoU'] = np.nanmean(iou)
        eval_results['mAcc'] = np.nanmean(acc)
        eval_results['aAcc'] = all_acc

        return eval_results
Exemple #2
0
def compute_metrics(
    seg_pred,
    seg_gt,
    n_cls,
    ignore_index=None,
    ret_cat_iou=False,
    tmp_dir=None,
    distributed=False,
):
    ret_metrics_mean = torch.zeros(3, dtype=float, device=ptu.device)
    if ptu.dist_rank == 0:
        list_seg_pred = []
        list_seg_gt = []
        keys = sorted(seg_pred.keys())
        for k in keys:
            list_seg_pred.append(np.asarray(seg_pred[k]))
            list_seg_gt.append(np.asarray(seg_gt[k]))
        ret_metrics = mean_iou(
            results=list_seg_pred,
            gt_seg_maps=list_seg_gt,
            num_classes=n_cls,
            ignore_index=ignore_index,
        )
        ret_metrics = [
            ret_metrics["aAcc"], ret_metrics["Acc"], ret_metrics["IoU"]
        ]
        ret_metrics_mean = torch.tensor(
            [
                np.round(np.nanmean(ret_metric.astype(np.float)) * 100, 2)
                for ret_metric in ret_metrics
            ],
            dtype=float,
            device=ptu.device,
        )
        cat_iou = ret_metrics[2]
    # broadcast metrics from 0 to all nodes
    if distributed:
        dist.broadcast(ret_metrics_mean, 0)
    pix_acc, mean_acc, miou = ret_metrics_mean
    ret = dict(pixel_accuracy=pix_acc, mean_accuracy=mean_acc, mean_iou=miou)
    if ret_cat_iou and ptu.dist_rank == 0:
        ret["cat_iou"] = cat_iou
    return ret
Exemple #3
0
    def evaluate(self, results, metric='mIoU', logger=None, **kwargs):
        """Evaluate the dataset.

        Args:
            results (list[list]): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | None | str): Logger used for printing
                related information during evaluation. Default: None.

        Returns:
            dict[str, float]: Default metrics.
        """

        if not isinstance(metric, str):
            assert len(metric) == 1
            metric = metric[0]
        allowed_metrics = ['mIoU']
        if metric not in allowed_metrics:
            raise KeyError('metric {} is not supported'.format(metric))

        eval_results = {}
        gt_seg_maps_0, gt_seg_maps_1, gt_seg_maps_2 = self.get_gt_seg_maps()
        results_0 = results[0]
        results_1 = results[1]
        results_2 = results[2]
        if self.CLASSES is None:
            num_classes_0 = 3
            num_classes_1 = 3
            num_classes_2 = 9
        else:
            # num_classes = len(self.CLASSES) # TODO fix
            pass
        num_classes = [3, 3, 9]

        all_acc_0, acc_0, iou_0 = mean_iou(results_0,
                                           gt_seg_maps_0,
                                           num_classes_0,
                                           ignore_index=self.ignore_index)
        all_acc_1, acc_1, iou_1 = mean_iou(results_1,
                                           gt_seg_maps_1,
                                           num_classes_1,
                                           ignore_index=self.ignore_index)
        all_acc_2, acc_2, iou_2 = mean_iou(results_2,
                                           gt_seg_maps_2,
                                           num_classes_2,
                                           ignore_index=self.ignore_index)
        all_acc = [all_acc_0, all_acc_1, all_acc_2]
        acc = [acc_0, acc_1, acc_2]
        iou = [iou_0, iou_1, iou_2]

        summary_str = ''
        summary_str += 'per class results:\n'

        line_format = '{:<15} {:>10} {:>10}\n'
        summary_str += line_format.format('Class', 'IoU', 'Acc')
        if self.CLASSES is None:
            class_names = [
                tuple(range(num_classes_i)) for num_classes_i in num_classes
            ]
        else:
            # class_names = self.CLASSES #TODO fix
            pass
        for idx in range(3):
            for i in range(num_classes[idx]):
                iou_str = '{:.2f}'.format(iou[idx][i] * 100)
                acc_str = '{:.2f}'.format(acc[idx][i] * 100)
                summary_str += line_format.format(
                    LABEL_NAMES[idx] + '_' + str(class_names[idx][i]), iou_str,
                    acc_str)
        summary_str += 'Summary:\n'
        line_format = '{:<15} {:>10} {:>10} {:>10}\n'
        summary_str += line_format.format('Scope', 'mIoU', 'mAcc', 'aAcc')

        for idx in range(3):
            iou_str = '{:.2f}'.format(np.nanmean([idx]) * 100)
            acc_str = '{:.2f}'.format(np.nanmean(acc[idx]) * 100)
            all_acc_str = '{:.2f}'.format(all_acc[idx] * 100)
            summary_str += line_format.format(LABEL_NAMES[idx], iou_str,
                                              acc_str, all_acc_str)
        print_log(summary_str, logger)

        for idx in range(3):
            eval_results[LABEL_NAMES[idx] + '_' + 'mIoU'] = np.nanmean(
                iou[idx])
            eval_results[LABEL_NAMES[idx] + '_' + 'mAcc'] = np.nanmean(
                acc[idx])
            eval_results[LABEL_NAMES[idx] + '_' + 'aAcc'] = all_acc[idx]

        return eval_results
    def evaluate(self, results, metric='mIoU', logger=None, **kwargs):
        """Evaluate the dataset.

        Args:
            results (list): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | None | str): Logger used for printing
                related information during evaluation. Default: None.

        Returns:
            dict[str, float]: Default metrics.
        """

        if not isinstance(metric, str):
            assert len(metric) == 1
            metric = metric[0]
        allowed_metrics = ['mIoU']
        if metric not in allowed_metrics:
            raise KeyError('metric {} is not supported'.format(metric))

        eval_results = {}
        gt_seg_maps = self.get_gt_seg_maps()
        if self.CLASSES is None:
            num_classes = len(
                reduce(np.union1d, [np.unique(_) for _ in gt_seg_maps]))
        else:
            num_classes = len(self.CLASSES)

        all_acc, TP, FP, TN, FN, acc, iou, dice = mean_iou(
            results, gt_seg_maps, num_classes, ignore_index=self.ignore_index)
        summary_str = ''
        summary_str += 'per class results:\n'
        line_format = '{:<15} {:>10} {:>10} {:>10}\n'
        summary_str += line_format.format('Class', 'IoU', 'Dice', 'Acc')
        if self.CLASSES is None:
            class_names = tuple(range(num_classes))
        else:
            class_names = self.CLASSES

        # to record class-wise dice scores
        dice_pos_str = ''
        dice_neg_str = ''
        pl_dice = 0
        bg_dice = 0

        for i in range(num_classes):
            iou_str = '{:.2f}'.format(iou[i] * 100)
            acc_str = '{:.2f}'.format(acc[i] * 100)

            dice_score = dice[i] * 100
            dice_str = '{:.2f}'.format(dice_score)

            if i == 0:  #background class
                dice_neg_str = '{:.2f}'.format(dice_score)
                bg_dice = dice_score

            else:  #positive class
                dice_pos_str = '{:.2f}'.format(dice_score)
                pl_dice = dice_score

            summary_str += line_format.format(class_names[i], iou_str,
                                              dice_str, acc_str)
        summary_str += 'Summary:\n'
        line_format = '{:<15} {:>10} {:>10} {:>10} {:>20} {:>20} {:>10} {:>10} {:>10} {:>10} {:>10}\n'
        summary_str += line_format.format('Scope', 'mIoU', 'mDice', 'mAcc',
                                          'Powerline Dice', 'Background Dice',
                                          'aAcc', 'TPR', 'TNR', 'FDR',
                                          'Precision')

        iou_str = '{:.2f}'.format(np.nanmean(iou) * 100)
        dice_str = '{:.2f}'.format(np.nanmean(dice) * 100)
        acc_str = '{:.2f}'.format(np.nanmean(acc) * 100)
        all_acc_str = '{:.2f}'.format(all_acc * 100)

        TPR = (TP / (TP + FN)) if (TP + FN) != 0 else 0
        TNR = (TN / (TN + FP)) if (TN + FP) != 0 else 0
        FDR = (FP / (FP + TP)) if (FP + TP) != 0 else 0
        precision = (TP / (TP + FP)) if (TP + FP) != 0 else 0

        TPR_str = '{:.2f}'.format(TPR * 100)
        TNR_str = '{:.2f}'.format(TNR * 100)
        FDR_str = '{:.2f}'.format(FDR * 100)
        precision_str = '{:.2f}'.format(precision * 100)
        summary_str += line_format.format('global', iou_str, dice_str, acc_str,
                                          dice_pos_str, dice_neg_str,
                                          all_acc_str, TPR_str, TNR_str,
                                          FDR_str, precision_str)
        print_log(summary_str, logger)

        eval_results['mIoU'] = np.nanmean(iou)
        eval_results['mDice'] = np.nanmean(dice)
        eval_results['mAcc'] = np.nanmean(acc)

        eval_results['plDice'] = pl_dice
        eval_results['bgDice'] = bg_dice
        eval_results['aAcc'] = all_acc

        eval_results['TPR'] = TPR
        eval_results['TNR'] = TNR
        eval_results['FDR'] = FDR
        eval_results['Precision'] = precision

        return eval_results