示例#1
0
def _sseg_metrics(output, label, labels, ignore_label=-100):
    """

    Ignore:
        >>> from clab.torch.sseg_train import *
        >>> from clab.torch.metrics import *
        >>> datasets = load_task_dataset(taskname='camvid')
        >>> train = datasets['train']
        >>> loader = torch.utils.data.DataLoader(train, batch_size=3)
        >>> inputs, label = map(torch.autograd.Variable, next(iter(loader)))
        >>> model = models.UNet(in_channels=train.n_channels, n_classes=train.n_classes)
        >>> output = model(inputs)
        >>> labels = train.task.labels
        >>> ignore_label = train.ignore_label
        >>> metrics = _sseg_metrics(output, label, labels, ignore_label)
    """
    pred = output.data.max(dim=1)[1]
    true = label.data
    mask = (true != ignore_label) & (pred != ignore_label)
    y_pred = pred[mask].cpu().numpy()
    y_true = true[mask].cpu().numpy()

    cfsn = confusion_matrix(y_pred, y_true, labels)
    cfsn = pd.DataFrame(cfsn, index=labels, columns=labels)
    cfsn = cfsn.drop(ignore_label, axis=0).drop(ignore_label, axis=1)

    ious = jaccard_score_from_confusion(cfsn)
    miou = ious.mean()

    # TODO: fix timetime warnings: Mean of empty slice, invalid value encoutered
    # in true_divide

    pixel_accuracy = pixel_accuracy_from_confusion(cfsn)  # same as tpr
    perclass_acc = perclass_accuracy_from_confusion(cfsn)
    perclass_acc = perclass_acc.fillna(0)
    class_accuracy = perclass_acc.mean()

    metrics_dict = ub.odict()
    metrics_dict['miou'] = miou
    metrics_dict['pixel_tpr'] = pixel_accuracy
    metrics_dict['class_tpr'] = class_accuracy
    # if len(perclass_acc) < 3:
    #     for k, acc in perclass_acc.to_dict().items():
    #         metrics_dict['class{}_tpr'.format(k)] = acc
    return metrics_dict
示例#2
0
    def custom_metrics(harn, outputs, labels):
        label = labels[0]
        output = outputs[0]

        y_pred = output.data.max(dim=1)[1].cpu().numpy()
        y_true = label.data.cpu().numpy()

        cfsn = confusion_matrix(y_pred, y_true, labels=all_labels)

        global_acc = pixel_accuracy_from_confusion(cfsn)  # same as acc
        perclass_acc = perclass_accuracy_from_confusion(cfsn)
        # class_accuracy = perclass_acc.fillna(0).mean()
        class_accuracy = np.nan_to_num(perclass_acc).mean()

        metrics_dict = ub.odict()
        metrics_dict['global_acc'] = global_acc
        metrics_dict['class_acc'] = class_accuracy
        return metrics_dict
示例#3
0
def _clf_metrics(output, label, labels, ignore_label=-100):
    """

    Ignore:
        >>> from clab.torch.sseg_train import *
        >>> from clab.torch.metrics import *
        >>> datasets = load_task_dataset(taskname='camvid')
        >>> train = datasets['train']
        >>> loader = torch.utils.data.DataLoader(train, batch_size=3)
        >>> inputs, label = map(torch.autograd.Variable, next(iter(loader)))
        >>> model = models.UNet(in_channels=train.n_channels, n_classes=train.n_classes)
        >>> output = model(inputs)
        >>> labels = train.task.labels
        >>> ignore_label = train.ignore_label
        >>> metrics = _sseg_metrics(output, label, labels, ignore_label)
    """
    pred = output.data.max(dim=1)[1]
    true = label.data
    mask = (true != ignore_label) & (pred != ignore_label)
    y_pred = pred[mask].cpu().numpy()
    y_true = true[mask].cpu().numpy()

    cfsn = confusion_matrix(y_pred, y_true, labels)
    cfsn = pd.DataFrame(cfsn, index=labels, columns=labels)

    if ignore_label >= 0:
        cfsn = cfsn.drop(ignore_label, axis=0)
        cfsn = cfsn.drop(ignore_label, axis=1)

    global_tpr = pixel_accuracy_from_confusion(cfsn)  # same as tpr
    perclass_acc = perclass_accuracy_from_confusion(cfsn)
    perclass_acc = perclass_acc.fillna(0)
    class_accuracy = perclass_acc.mean()

    metrics_dict = ub.odict()
    metrics_dict['global_tpr'] = global_tpr
    metrics_dict['class_tpr'] = class_accuracy
    return metrics_dict
示例#4
0
文件: _old.py 项目: afcarl/clab
                            pred2 = (test_dataset.task.instance_label(
                                big_pred, k=k, n_iters=n_iters,
                                watershed=watershed) > 0).astype(np.int8)

                            # # cfsn1 += confusion_matrix(big_gt.ravel(), big_pred.ravel(), labels=[0, 1, 2])
                            # if k > 1:
                            #     kernel = np.ones((k, k), np.uint8)
                            #     opening = cv2.morphologyEx(big_pred, cv2.MORPH_OPEN, kernel, iterations=n_iters)
                            #     # opening = filters.watershed_filter(opening)
                            #     # n_ccs, cc_labels = cv2.connectedComponents(opening.astype(np.uint8), connectivity=4)
                            #     # pred2 = (cc_labels > 0).astype(np.int)
                            #     pred2 = opening
                            # else:
                            #     pred2 = big_pred

                            cfsn2 += confusion_matrix(big_gt.ravel(), pred2.ravel(), labels=[0, 1, 2])

                        miou = jaccard_score_from_confusion(cfsn2)[0:2].mean()
                        scores[(mode, k, n_iters, watershed)] = miou
                        print('mode={}, k={:3d}, n_iters={}, w={} miou = {!r}'.format(mode, k, n_iters, int(watershed), miou))

        print(pd.Series(scores).sort_values())


        # if False:
        #     loader = torch.utils.data.DataLoader(
        #         pharn.dataset, shuffle=False,
        #         pin_memory=True,
        #         num_workers=0,
        #         batch_size=1,
        #     )