Ejemplo n.º 1
0
    def create_generalized_dice_metric(self, cm: ConfusionMatrix,
                                       weight: torch.Tensor):
        """
        Computes the Sørensen–Dice Coefficient (https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient)
        Args:
            cm (:obj:`ignite.metrics.ConfusionMatrix`): A confusion matrix representing the classification of data.
            weight (:obj:`torch.Tensor`): A weight vector which length equals to the number of classes.
        Returns:
            ignite.Metric: The Generalized Dice Coefficient Metric object.
        """

        # Increase floating point precision
        cm = cm.type(torch.float64)
        dice = 2 * (cm.diag() * weight) / ((
            (cm.sum(dim=1) + cm.sum(dim=0)) * weight) + EPSILON)

        if self._ignore_index != -100:

            def remove_index(dice_vector):
                try:
                    indices = list(range(len(dice_vector)))
                    indices.remove(self._ignore_index)
                    return dice_vector[indices]
                except ValueError as e:
                    raise IndexError(
                        "'ignore_index' must be non-negative, and lower than the number of classes in confusion matrix, but {} was given. "
                        .format(self._ignore_index))

            return MetricsLambda(remove_index, dice)
        else:
            return dice
def save_confusion_matrix(data_root,
                          output_root,
                          segmenter,
                          data_subset="val"):
    dataset = SegmentationDataset(data_root, data_subset)

    confusion_matrix_caluclator = ConfusionMatrix(num_classes=2,
                                                  average="precision")
    accuracy_calculator = Accuracy()

    for image, mask_gt in dataset:
        mask_pred = segmenter.get_raw_prediction(image)
        mask_gt = torch.from_numpy(mask_gt).to(
            mask_pred.device).unsqueeze(0).unsqueeze(0)

        output = (mask_pred, mask_gt)

        confusion_matrix_caluclator.update(
            output_transform_confusion_matrix(output))
        accuracy_calculator.update(output_transform_accuracy(output))

    confusion_matrix = confusion_matrix_caluclator.compute()
    accuracy = accuracy_calculator.compute()

    cm_figure = plot_confusion_matrix(confusion_matrix)

    filename_base = f"confusion_matrix_acc={accuracy:.6f}"

    cm_figure.savefig(os.path.join(output_root, filename_base + ".pdf"))
    cm_figure.savefig(os.path.join(output_root, filename_base + ".png"))
Ejemplo n.º 3
0
def cmAccuracy(cm: ConfusionMatrix,
               ignore_index: Optional[int] = None) -> MetricsLambda:
    """Calculates accuracy using :class:`~ignite.metrics.ConfusionMatrix` metric.
    Args:
        cm (ConfusionMatrix): instance of confusion matrix metric
    Returns:
        MetricsLambda
    """
    # Increase floating point precision and pass to CPU
    cm = cm.type(torch.DoubleTensor)

    correct_pixels = cm.diag()
    total_class_pixels = cm.sum(dim=1)

    pix_accs = correct_pixels / (total_class_pixels + 1e-15)

    if ignore_index is not None:

        def ignore_index_fn(pix_accs_vector):
            if ignore_index >= len(pix_accs_vector):
                raise ValueError(
                    "ignore_index {} is larger than the length of pix_accs vector {}"
                    .format(ignore_index, len(pix_accs_vector)))
            indices = list(range(len(pix_accs_vector)))
            indices.remove(ignore_index)
            return pix_accs_vector[indices]

        return MetricsLambda(ignore_index_fn, pix_accs)
    else:
        return pix_accs
Ejemplo n.º 4
0
class IoU(EvaluationMetric):
    """ Intersection over Union (IoU) metric per class

    The metric is defined for a pair of grayscale semantic segmentation images.

    Args:
        num_classes: number of calsses in the ground truth image
        output_transform: function that transform output pair of images

    Attributes:
        cm (ignite.metrics.ConfusionMatrix): pytorch ignite confusion matrix
        object.
    """
    def __init__(self, num_classes, output_transform=lambda x: x):
        self.cm = ConfusionMatrix(
            num_classes=num_classes,
            average=None,
            output_transform=output_transform,
        )

    def reset(self):
        self.cm.reset()

    def update(self, output):
        self.cm.update(output)

    def compute(self):
        cm = self.cm.compute()
        iou = cm.diag() / (cm.sum(dim=1) + cm.sum(dim=0) - cm.diag() + 1e-15)

        return iou
Ejemplo n.º 5
0
 def __init__(self,
              num_classes: int,
              reduction: Union[None, str] = "mean",
              average: str = None,
              weight: torch.Tensor = None,
              ignore_index: int = -100,
              output_transform: callable = lambda x: x) -> None:
     """
     Metric initializer.
     Args:
         num_classes (int): The number of classes in the problem. In case of images, num_classes should also count the background index 0.
         average (str, optional): Confusion matrix values averaging schema: None, "samples", "recall", "precision".
             Default is None. If `average="samples"` then confusion matrix values are normalized by the number of seen
             samples. If `average="recall"` then confusion matrix values are normalized such that diagonal values
             represent class recalls. If `average="precision"` then confusion matrix values are normalized such that
             diagonal values represent class precisions.
         reduction (str): The type of reduction to apply (e.g. 'mean').
         ignore_index (int, optional): To ignore an index in Dice computation.
         output_transform (callable, optional): a callable that is used to transform the
             output into the form expected by the metric. This can be useful if, for example, you have a multi-output model and
             you want to compute the metric with respect to one of the outputs.
     """
     if reduction not in self.SUPPORTED_REDUCTIONS:
         raise NotImplementedError("Reduction type not supported.")
     self._num_classes = num_classes
     self._ignore_index = ignore_index
     self._reduction = reduction
     self._weight = weight
     self._cm = ConfusionMatrix(num_classes=num_classes,
                                average=average,
                                output_transform=output_transform)
     self._metric = self.create_dice_metric(self._cm)
     super(Dice, self).__init__(output_transform=output_transform)
Ejemplo n.º 6
0
def test_no_update():
    cm = ConfusionMatrix(10)
    with pytest.raises(
            NotComputableError,
            match=r"Confusion matrix must have at least one example before it "
    ):
        cm.compute()
Ejemplo n.º 7
0
def test_ignored_out_of_num_classes_indices():
    num_classes = 21
    cm = ConfusionMatrix(num_classes=num_classes)

    y_pred = torch.rand(4, num_classes, 12, 10)
    y = torch.randint(0, 255, size=(4, 12, 10)).long()
    cm.update((y_pred, y))
    np_y_pred = y_pred.numpy().argmax(axis=1).ravel()
    np_y = y.numpy().ravel()
    assert np.all(confusion_matrix(np_y, np_y_pred, labels=list(range(num_classes))) == cm.compute().numpy())
Ejemplo n.º 8
0
def test_indexing_metric():
    def _test(ignite_metric, sklearn_metic, sklearn_args, index, num_classes=5):
        y_pred = torch.rand(15, 10, num_classes).float()
        y = torch.randint(0, num_classes, size=(15, 10)).long()

        def update_fn(engine, batch):
            y_pred, y = batch
            return y_pred, y

        metrics = {'metric': ignite_metric[index],
                   'metric_wo_index': ignite_metric}

        validator = Engine(update_fn)

        for name, metric in metrics.items():
            metric.attach(validator, name)

        def data(y_pred, y):
            for i in range(y_pred.shape[0]):
                yield (y_pred[i], y[i])

        d = data(y_pred, y)
        state = validator.run(d, max_epochs=1)

        sklearn_output = sklearn_metic(y.view(-1).numpy(),
                                       y_pred.view(-1, num_classes).argmax(dim=1).numpy(),
                                       **sklearn_args)

        assert (state.metrics['metric_wo_index'][index] == state.metrics['metric']).all()
        assert (np.allclose(state.metrics['metric'].numpy(), sklearn_output))

    num_classes = 5

    labels = list(range(0, num_classes, 2))
    _test(Precision(), precision_score, {'labels': labels, 'average': None}, index=labels)
    labels = list(range(num_classes - 1, 0, -2))
    _test(Precision(), precision_score, {'labels': labels, 'average': None}, index=labels)
    labels = [1]
    _test(Precision(), precision_score, {'labels': labels, 'average': None}, index=labels)

    labels = list(range(0, num_classes, 2))
    _test(Recall(), recall_score, {'labels': labels, 'average': None}, index=labels)
    labels = list(range(num_classes - 1, 0, -2))
    _test(Recall(), recall_score, {'labels': labels, 'average': None}, index=labels)
    labels = [1]
    _test(Recall(), recall_score, {'labels': labels, 'average': None}, index=labels)

    # np.ix_ is used to allow for a 2D slice of a matrix. This is required to get accurate result from
    # ConfusionMatrix. ConfusionMatrix must be sliced the same row-wise and column-wise.
    labels = list(range(0, num_classes, 2))
    _test(ConfusionMatrix(num_classes), confusion_matrix, {'labels': labels}, index=np.ix_(labels, labels))
    labels = list(range(num_classes - 1, 0, -2))
    _test(ConfusionMatrix(num_classes), confusion_matrix, {'labels': labels}, index=np.ix_(labels, labels))
    labels = [1]
    _test(ConfusionMatrix(num_classes), confusion_matrix, {'labels': labels}, index=np.ix_(labels, labels))
Ejemplo n.º 9
0
def test_multiclass_images():
    num_classes = 3
    cm = ConfusionMatrix(num_classes=num_classes)

    y_true, y_pred = get_y_true_y_pred()

    # Compute confusion matrix with sklearn
    true_res = confusion_matrix(y_true.reshape(-1), y_pred.reshape(-1))

    th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)

    # Update metric
    output = (th_y_logits, th_y_true)
    cm.update(output)

    res = cm.compute().numpy()

    assert np.all(true_res == res)

    # Another test on batch of 2 images
    num_classes = 3
    cm = ConfusionMatrix(num_classes=num_classes)

    # Create a batch of two images:
    th_y_true1 = torch.from_numpy(y_true).reshape(1, 30, 30)
    th_y_true2 = torch.from_numpy(y_true.transpose()).reshape(1, 30, 30)
    th_y_true = torch.cat([th_y_true1, th_y_true2], dim=0)

    # Create a batch of 2 logits tensors
    y_probas = np.ones((3, 30, 30)) * -10
    y_probas[0, (y_pred == 0)] = 720
    y_probas[1, (y_pred == 1)] = 720
    y_probas[2, (y_pred == 2)] = 768
    th_y_logits1 = torch.from_numpy(y_probas).reshape(1, 3, 30, 30)

    y_probas = np.ones((3, 30, 30)) * -10
    y_probas[0, (y_pred.transpose() == 0)] = 720
    y_probas[1, (y_pred.transpose() == 2)] = 720
    y_probas[2, (y_pred.transpose() == 1)] = 768
    th_y_logits2 = torch.from_numpy(y_probas).reshape(1, 3, 30, 30)

    th_y_logits = torch.cat([th_y_logits1, th_y_logits2], dim=0)

    # Update metric & compute
    output = (th_y_logits, th_y_true)
    cm.update(output)
    res = cm.compute().numpy()

    # Compute confusion matrix with sklearn
    true_res = confusion_matrix(
        th_y_true.numpy().reshape(-1),
        np.argmax(th_y_logits.numpy(), axis=1).reshape(-1),
    )

    assert np.all(true_res == res)
Ejemplo n.º 10
0
def test_cm_recall():

    y_true, y_pred = np.random.randint(0, 10, size=(1000,)), np.random.randint(0, 10, size=(1000,))
    th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)

    true_re = recall_score(y_true.reshape(-1), y_pred.reshape(-1), average="macro")

    cm = ConfusionMatrix(num_classes=10)
    re_metric = cmRecall(cm, average=True)

    # Update metric
    output = (th_y_logits, th_y_true)
    cm.update(output)

    res = re_metric.compute().numpy()

    assert pytest.approx(res) == true_re

    true_re = recall_score(y_true.reshape(-1), y_pred.reshape(-1), average=None)
    cm = ConfusionMatrix(num_classes=10)
    re_metric = cmRecall(cm, average=False)

    # Update metric
    output = (th_y_logits, th_y_true)
    cm.update(output)

    res = re_metric.compute().numpy()

    assert np.all(res == true_re)
Ejemplo n.º 11
0
def test_iou():

    y_true, y_pred = get_y_true_y_pred()
    th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)

    true_res = [0, 0, 0]
    for index in range(3):
        bin_y_true = y_true == index
        bin_y_pred = y_pred == index
        intersection = bin_y_true & bin_y_pred
        union = bin_y_true | bin_y_pred
        true_res[index] = intersection.sum() / union.sum()

    cm = ConfusionMatrix(num_classes=3)
    iou_metric = IoU(cm)

    # Update metric
    output = (th_y_logits, th_y_true)
    cm.update(output)

    res = iou_metric.compute().numpy()

    assert np.all(res == true_res)

    for ignore_index in range(3):
        cm = ConfusionMatrix(num_classes=3)
        iou_metric = IoU(cm, ignore_index=ignore_index)
        # Update metric
        output = (th_y_logits, th_y_true)
        cm.update(output)
        res = iou_metric.compute().numpy()
        true_res_ = true_res[:ignore_index] + true_res[ignore_index + 1:]
        assert np.all(res == true_res_), "{}: {} vs {}".format(
            ignore_index, res, true_res_)
Ejemplo n.º 12
0
def test_dice_coefficient():

    y_true, y_pred = get_y_true_y_pred()
    th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)

    true_res = [0, 0, 0]
    for index in range(3):
        bin_y_true = y_true == index
        bin_y_pred = y_pred == index
        # dice coefficient: 2*intersection(x, y) / (|x| + |y|)
        # union(x, y) = |x| + |y| - intersection(x, y)
        intersection = bin_y_true & bin_y_pred
        union = bin_y_true | bin_y_pred
        true_res[index] = 2.0 * intersection.sum() / (union.sum() + intersection.sum())

    cm = ConfusionMatrix(num_classes=3)
    dice_metric = DiceCoefficient(cm)

    # Update metric
    output = (th_y_logits, th_y_true)
    cm.update(output)

    res = dice_metric.compute().numpy()
    np.testing.assert_allclose(res, true_res)

    for ignore_index in range(3):
        cm = ConfusionMatrix(num_classes=3)
        dice_metric = DiceCoefficient(cm, ignore_index=ignore_index)
        # Update metric
        output = (th_y_logits, th_y_true)
        cm.update(output)
        res = dice_metric.compute().numpy()
        true_res_ = true_res[:ignore_index] + true_res[ignore_index + 1 :]
        assert np.all(res == true_res_), f"{ignore_index}: {res} vs {true_res_}"
Ejemplo n.º 13
0
    def _test(average=None):

        y_true, y_pred = get_y_true_y_pred()
        th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)

        true_res = [0, 0, 0]
        for index in range(3):
            bin_y_true = y_true == index
            bin_y_pred = y_pred == index
            intersection = bin_y_true & bin_y_pred
            union = bin_y_true | bin_y_pred
            true_res[index] = intersection.sum() / union.sum()

        cm = ConfusionMatrix(num_classes=3, average=average)
        jaccard_index = JaccardIndex(cm)

        # Update metric
        output = (th_y_logits, th_y_true)
        cm.update(output)

        res = jaccard_index.compute().numpy()

        assert np.all(res == true_res)

        for ignore_index in range(3):
            cm = ConfusionMatrix(num_classes=3)
            jaccard_index_metric = JaccardIndex(cm, ignore_index=ignore_index)
            # Update metric
            output = (th_y_logits, th_y_true)
            cm.update(output)
            res = jaccard_index_metric.compute().numpy()
            true_res_ = true_res[:ignore_index] + true_res[ignore_index + 1 :]
            assert np.all(res == true_res_), f"{ignore_index}: {res} vs {true_res_}"
Ejemplo n.º 14
0
def test_miou():

    y_true, y_pred = get_y_true_y_pred()
    th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)

    true_res = [0, 0, 0]
    for index in range(3):
        bin_y_true = y_true == index
        bin_y_pred = y_pred == index
        intersection = bin_y_true & bin_y_pred
        union = bin_y_true | bin_y_pred
        true_res[index] = intersection.sum() / union.sum()

    true_res_ = np.mean(true_res)

    cm = ConfusionMatrix(num_classes=3)
    iou_metric = mIoU(cm)

    # Update metric
    output = (th_y_logits, th_y_true)
    cm.update(output)

    res = iou_metric.compute().numpy()

    assert res == true_res_

    for ignore_index in range(3):
        cm = ConfusionMatrix(num_classes=3)
        iou_metric = mIoU(cm, ignore_index=ignore_index)
        # Update metric
        output = (th_y_logits, th_y_true)
        cm.update(output)
        res = iou_metric.compute().numpy()
        true_res_ = np.mean(true_res[:ignore_index] + true_res[ignore_index + 1 :])
        assert res == true_res_, f"{ignore_index}: {res} vs {true_res_}"
Ejemplo n.º 15
0
def eval_model(model, val_loader, device='cpu', num_classes=21):
    def evaluate_function(engine, batch):
        model.eval()
        with torch.no_grad():
            img, mask = batch
            img = img.to(device)
            mask = mask.to(device)
            mask_pred = model(img)
            try:
                mask_pred = mask_pred['out']
            except:
                print('')
            return mask_pred, mask

    val_evaluator = Engine(evaluate_function)
    cm = ConfusionMatrix(num_classes=num_classes)
    mIoU(cm).attach(val_evaluator, 'mean IoU')
    Accuracy().attach(val_evaluator, "accuracy")
    Loss(loss_fn=nn.CrossEntropyLoss())\
    .attach(val_evaluator, "CE Loss")

    state = val_evaluator.run(val_loader)
    #print("mIoU :",state.metrics['mean IoU'])
    #print("Accuracy :",state.metrics['accuracy'])
    #print("CE Loss :",state.metrics['CE Loss'])

    return state
Ejemplo n.º 16
0
def test_cm_accuracy():

    y_true, y_pred = get_y_true_y_pred()
    th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)

    true_acc = accuracy_score(y_true.reshape(-1), y_pred.reshape(-1))

    cm = ConfusionMatrix(num_classes=3)
    acc_metric = cmAccuracy(cm)

    # Update metric
    output = (th_y_logits, th_y_true)
    cm.update(output)

    res = acc_metric.compute().numpy()

    assert pytest.approx(res) == true_acc
Ejemplo n.º 17
0
def evaluation(local_rank, config, logger, with_clearml):

    rank = idist.get_rank()
    device = idist.device()
    manual_seed(config.seed + local_rank)

    data_loader = config.data_loader
    model = config.model.to(device)

    # Load weights:
    state_dict = get_model_weights(config, logger, with_clearml)
    model.load_state_dict(state_dict)

    # Adapt model to dist config
    model = idist.auto_model(model)

    # Setup evaluators
    num_classes = config.num_classes
    cm_metric = ConfusionMatrix(num_classes=num_classes)

    val_metrics = {
        "IoU": IoU(cm_metric),
        "mIoU_bg": mIoU(cm_metric),
    }

    if ("val_metrics" in config) and isinstance(config.val_metrics, dict):
        val_metrics.update(config.val_metrics)

    evaluator = create_evaluator(model,
                                 val_metrics,
                                 config,
                                 with_clearml,
                                 tag="val")

    # Setup Tensorboard logger
    if rank == 0:
        tb_logger = common.TensorboardLogger(
            log_dir=config.output_path.as_posix())
        tb_logger.attach_output_handler(
            evaluator,
            event_name=Events.COMPLETED,
            tag="validation",
            metric_names="all",
        )

    # Log confusion matrix to ClearML:
    if with_clearml:
        evaluator.add_event_handler(Events.COMPLETED, compute_and_log_cm,
                                    cm_metric, evaluator.state.iteration)

    state = evaluator.run(data_loader)
    utils.log_metrics(logger, 0, state.times["COMPLETED"], "Validation",
                      state.metrics)

    if idist.get_rank() == 0:
        tb_logger.close()
Ejemplo n.º 18
0
def _test_distrib_accumulator_device(device):

    metric_devices = [torch.device("cpu")]
    if device.type != "xla":
        metric_devices.append(idist.device())
    for metric_device in metric_devices:

        cm = ConfusionMatrix(num_classes=3, device=metric_device)
        assert cm._device == metric_device
        assert (
            cm.confusion_matrix.device == metric_device
        ), f"{type(cm.confusion_matrix.device)}:{cm._num_correct.device} vs {type(metric_device)}:{metric_device}"

        y_true, y_pred = get_y_true_y_pred()
        th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)
        cm.update((th_y_logits, th_y_true))

        assert (
            cm.confusion_matrix.device == metric_device
        ), f"{type(cm.confusion_matrix.device)}:{cm._num_correct.device} vs {type(metric_device)}:{metric_device}"
Ejemplo n.º 19
0
def make_engine(process_function):
    evaluator = Engine(process_function)

    cm = ConfusionMatrix(num_classes=getattr(
        datasets, CONFIG["dataset"]["name"]).N_LABELS,
                         output_transform=output_transform)
    IoU(cm, ignore_index=0).attach(evaluator, 'IoU')
    mIoU(cm, ignore_index=0).attach(evaluator, 'mIoU')
    Accuracy(output_transform=output_transform).attach(evaluator, 'Accuracy')
    cmAccuracy(cm, ignore_index=0).attach(evaluator, 'ClasswiseAccuracy')

    return evaluator
def get_metrics(loss):
    metrics = {
        "loss":
        Loss(loss),
        "accuracy":
        Accuracy(output_transform_accuracy),
        "confusion matrix":
        ConfusionMatrix(num_classes=2,
                        output_transform=output_transform_confusion_matrix,
                        average="precision"),
    }
    return metrics
Ejemplo n.º 21
0
def test_multiclass_input():
    def _test(y_pred, y, num_classes, cm, batch_size):
        cm.reset()
        if batch_size > 1:
            n_iters = y.shape[0] // batch_size + 1
            for i in range(n_iters):
                idx = i * batch_size
                cm.update(
                    (y_pred[idx:idx + batch_size], y[idx:idx + batch_size]))
        else:
            cm.update((y_pred, y))

        np_y_pred = y_pred.numpy().argmax(axis=1).ravel()
        np_y = y.numpy().ravel()
        assert np.all(
            confusion_matrix(np_y, np_y_pred, labels=list(range(num_classes)))
            == cm.compute().numpy())

    def get_test_cases():
        return [
            # Multiclass input data of shape (N, )
            (torch.rand(10, 4), torch.randint(0, 4, size=(10, )).long(), 4, 1),
            (torch.rand(4, 10), torch.randint(0, 10,
                                              size=(4, )).long(), 10, 1),
            (torch.rand(4, 2), torch.randint(0, 2, size=(4, )).long(), 2, 1),
            (torch.rand(100, 5), torch.randint(0, 5,
                                               size=(100, )).long(), 5, 16),
            # Multiclass input data of shape (N, L)
            (torch.rand(10, 4, 5), torch.randint(0, 4,
                                                 size=(10, 5)).long(), 4, 1),
            (torch.rand(4, 10, 5), torch.randint(0, 10,
                                                 size=(4, 5)).long(), 10, 1),
            (torch.rand(100, 9,
                        7), torch.randint(0, 9, size=(100, 7)).long(), 9, 16),
            # Multiclass input data of shape (N, H, W, ...)
            (torch.rand(4, 5, 12,
                        10), torch.randint(0, 5,
                                           size=(4, 12, 10)).long(), 5, 1),
            (torch.rand(4, 5, 10, 12,
                        8), torch.randint(0, 5,
                                          size=(4, 10, 12, 8)).long(), 5, 1),
            (torch.rand(100, 3, 8,
                        8), torch.randint(0, 3,
                                          size=(100, 8, 8)).long(), 3, 16),
        ]

    # check multiple random inputs as random exact occurencies are rare
    for _ in range(5):
        for y_pred, y, num_classes, batch_size in get_test_cases():
            cm = ConfusionMatrix(num_classes=num_classes)
            _test(y_pred, y, num_classes, cm, batch_size)
Ejemplo n.º 22
0
    def create_dice_metric(self, cm: ConfusionMatrix):
        """
        Computes the Sørensen–Dice Coefficient (https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient)
        Args:
            cm (:obj:`ignite.metrics.ConfusionMatrix`): A confusion matrix representing the classification of data.
        Returns:
            array or float: The Sørensen–Dice Coefficient for each class or the mean Sørensen–Dice Coefficient.
        """
        # Increase floating point precision
        cm = cm.type(torch.float64)
        dice = 2 * cm.diag() / (cm.sum(dim=1) + cm.sum(dim=0) + EPSILON)

        if self._ignore_index != -100:

            def remove_index(dice_vector):
                try:
                    indices = list(range(len(dice_vector)))
                    indices.remove(self._ignore_index)
                    return dice_vector[indices]
                except ValueError as e:
                    raise IndexError(
                        "'ignore_index' must be non-negative, and lower than the number of classes in confusion matrix, but {} was given. "
                        .format(self._ignore_index))

            dice = MetricsLambda(remove_index, dice)

        if self._weight is not None:

            def multiply_weights(dice_vector):
                return self._weight * dice_vector

            dice = MetricsLambda(multiply_weights, dice)

        if self._reduction == "mean":
            dice = dice.mean()

        return dice
Ejemplo n.º 23
0
def test_network(model, test_dataset):
    with torch.no_grad():
        evaluator = create_supervised_evaluator(model,
                                                metrics={
                                                    "accuracy":
                                                    Accuracy(),
                                                    "confusion":
                                                    ConfusionMatrix(10)
                                                })
        data_loader = DataLoader(test_dataset,
                                 batch_size=BATCH_SIZE,
                                 shuffle=False)
        evaluator.run(data_loader)
        return evaluator.state.metrics[
            "accuracy"] * 100, evaluator.state.metrics["confusion"]
 def __load_metrics(self):
     precision = Precision(average=False)
     recall = Recall(average=False)
     F1 = precision * recall * 2 / (precision + recall + 1e-20)
     F1 = MetricsLambda(lambda t: torch.mean(t).item(), F1)
     confusion_matrix = ConfusionMatrix(self.n_class, average="recall")
     # TODO: Add metric by patient
     self.metrics = {
         'accuracy': Accuracy(),
         "f1": F1,
         "confusion_matrix": confusion_matrix,
         "precision": precision.mean(),
         "recall": recall.mean(),
         'loss': Loss(self.loss)
     }
Ejemplo n.º 25
0
def evaluate(model, dataset, task_id, batch_size, device, out_id=0):
    val_loader = DataLoader(dataset, batch_size=batch_size, shuffle=False)
    n_classes = dataset.tensors[1][:, 0].unique().numel()
    out_transform = get_attr_transform(out_id)
    eval_metrics = {
        'accuracy': Accuracy(output_transform=out_transform),
        'confusion': ConfusionMatrix(num_classes=n_classes,
                                     output_transform=out_transform)
    }
    evaluator = create_supervised_evaluator(model, metrics=eval_metrics,
                                            device=device)
    evaluator._logger.setLevel(logging.WARNING)
    evaluator.run(val_loader)
    metrics = evaluator.state.metrics
    return metrics['accuracy'], metrics['confusion']
Ejemplo n.º 26
0
        def compute_and_log_cm():
            cm = cm_metric.compute()
            # CM: values are normalized such that diagonal values represent class recalls
            cm = ConfusionMatrix.normalize(cm, "recall").cpu().numpy()

            if idist.get_rank() == 0:
                from trains import Task

                trains_logger = Task.current_task().get_logger()
                trains_logger.report_confusion_matrix(
                    title="Final Confusion Matrix",
                    series="cm-preds-gt",
                    matrix=cm,
                    iteration=trainer.state.iteration,
                    xlabels=VOCSegmentationOpencv.target_names,
                    ylabels=VOCSegmentationOpencv.target_names,
                )
Ejemplo n.º 27
0
def build_metrics():
    metrics = {
        'precision':
        Precision(lambda x: (x[0].argmax(dim=1), x[1])),
        'recall':
        Recall(lambda x: (x[0].argmax(dim=1), x[1])),
        'accuracy':
        Accuracy(lambda x: (x[0].argmax(dim=1), x[1])),
        'confusion_matrix':
        ConfusionMatrix(2, output_transform=prepare_confusion_matrix),
        'metric_output_results':
        MetricOutputResults(lambda x: (x[0].argmax(dim=1), x[1], x[0], x[2])),
        'metric_last_layer':
        MetricLastLayer(lambda x: (x[3])),
        #'auroc':AUROC(lambda x: (x[0], x[1])),
    }
    return metrics
Ejemplo n.º 28
0
def test_dice_coefficient_wrong_input():

    with pytest.raises(TypeError, match="Argument cm should be instance of ConfusionMatrix"):
        DiceCoefficient(None)

    cm = ConfusionMatrix(num_classes=10)
    with pytest.raises(ValueError, match="ignore_index should be non-negative integer"):
        DiceCoefficient(cm, ignore_index=-1)

    with pytest.raises(ValueError, match="ignore_index should be non-negative integer"):
        DiceCoefficient(cm, ignore_index="a")

    with pytest.raises(ValueError, match="ignore_index should be non-negative integer"):
        DiceCoefficient(cm, ignore_index=10)

    with pytest.raises(ValueError, match="ignore_index should be non-negative integer"):
        DiceCoefficient(cm, ignore_index=11)
Ejemplo n.º 29
0
def test_iou():
    def _test(average=None):

        y_true, y_pred = get_y_true_y_pred()
        th_y_true, th_y_logits = compute_th_y_true_y_logits(y_true, y_pred)

        true_res = [0, 0, 0]
        for index in range(3):
            bin_y_true = y_true == index
            bin_y_pred = y_pred == index
            intersection = bin_y_true & bin_y_pred
            union = bin_y_true | bin_y_pred
            true_res[index] = intersection.sum() / union.sum()

        cm = ConfusionMatrix(num_classes=3, average=average)
        iou_metric = IoU(cm)

        # Update metric
        output = (th_y_logits, th_y_true)
        cm.update(output)

        res = iou_metric.compute().numpy()

        assert np.all(res == true_res)

        for ignore_index in range(3):
            cm = ConfusionMatrix(num_classes=3)
            iou_metric = IoU(cm, ignore_index=ignore_index)
            # Update metric
            output = (th_y_logits, th_y_true)
            cm.update(output)
            res = iou_metric.compute().numpy()
            true_res_ = true_res[:ignore_index] + true_res[ignore_index + 1:]
            assert np.all(res == true_res_), "{}: {} vs {}".format(
                ignore_index, res, true_res_)

    _test()
    _test(average="samples")

    with pytest.raises(
            ValueError,
            match=r"ConfusionMatrix should have average attribute either"):
        cm = ConfusionMatrix(num_classes=3, average="precision")
        IoU(cm)
Ejemplo n.º 30
0
def step_train_supervised(model,
                          train_loader,
                          criterion,
                          optimizer,
                          device='cpu',
                          num_classes=21):
    """
        A step of fully supervised segmentation model training.
    """
    def train_function(engine, batch):
        optimizer.zero_grad()
        model.train()
        img, mask = batch
        img = img.to(device)
        mask = mask.to(device)
        mask_pred = model(img)
        try:
            mask_pred = mask_pred['out']
        except:
            print('')
        #print(mask_pred)
        #print("UNIQUE",torch.unique(mask_pred.argmax(dim=1)))
        #print("SIZE",mask_pred.size())
        loss = criterion(mask_pred, mask)
        loss.backward()
        optimizer.step()

        return mask_pred, mask

    #print(num_classes)

    train_engine = Engine(train_function)
    cm = ConfusionMatrix(
        num_classes=num_classes)  #,output_transform=output_transform)
    mIoU(cm).attach(train_engine, 'mean IoU')
    Accuracy().attach(train_engine, "accuracy")
    Loss(loss_fn=nn.CrossEntropyLoss()).attach(train_engine, "CE Loss")
    state = train_engine.run(train_loader)
    #print("mIoU :",state.metrics['mean IoU'])
    #print("Accuracy :",state.metrics['accuracy'])
    #print("CE Loss :",state.metrics['CE Loss'])

    return state