Exemple #1
0
    def update(self, output):
        y_pred, y = self._check_shape(output)
        self._check_type((y_pred, y))

        dtype = y_pred.type()

        if self._type == "binary":
            y_pred = torch.round(y_pred).view(-1)
            y = y.view(-1)
        elif self._type == "multiclass":
            num_classes = y_pred.size(1)
            y = to_onehot(y.view(-1), num_classes=num_classes)
            indices = torch.max(y_pred, dim=1)[1].view(-1)
            y_pred = to_onehot(indices, num_classes=num_classes)

        y_pred = y_pred.type(dtype)
        y = y.type(dtype)
        correct = y * y_pred
        actual_positives = y.sum(dim=0)

        if correct.sum() == 0:
            true_positives = torch.zeros_like(actual_positives)
        else:
            true_positives = correct.sum(dim=0)

        self._true_positives += true_positives
        self._positives += actual_positives
Exemple #2
0
    def update(self, output):
        y_pred, y = output
        dtype = y_pred.type()

        if not (y.ndimension() == y_pred.ndimension()
                or y.ndimension() + 1 == y_pred.ndimension()):
            raise ValueError(
                "y must have shape of (batch_size, ...) and y_pred "
                "must have shape of (batch_size, num_classes, ...) or (batch_size, ...)."
            )

        if y.ndimension() > 1 and y.shape[1] == 1:
            y = y.squeeze(dim=1)

        if y_pred.ndimension() > 1 and y_pred.shape[1] == 1:
            y_pred = y_pred.squeeze(dim=1)

        y_shape = y.shape
        y_pred_shape = y_pred.shape

        if y.ndimension() + 1 == y_pred.ndimension():
            y_pred_shape = (y_pred_shape[0], ) + y_pred_shape[2:]

        if not (y_shape == y_pred_shape):
            raise ValueError("y and y_pred must have compatible shapes.")

        if y_pred.ndimension() == y.ndimension():
            # Maps Binary Case to Categorical Case with 2 classes
            y_pred = y_pred.unsqueeze(dim=1)
            y_pred = torch.cat([1.0 - y_pred, y_pred], dim=1)

        y = to_onehot(y.view(-1), num_classes=y_pred.size(1))
        indices = torch.max(y_pred, dim=1)[1].view(-1)
        y_pred = to_onehot(indices, num_classes=y_pred.size(1))

        y_pred = y_pred.type(dtype)
        y = y.type(dtype)

        correct = y * y_pred
        wrong = (1 - y) * y_pred

        true_negatives = correct.sum(dim=0)
        ridx = torch.arange(true_negatives.size(0) - 1, -1,
                            -1).to(true_negatives.device)
        true_negatives = true_negatives.index_select(0, ridx)

        false_positives = wrong.sum(dim=0)
        n = true_negatives + false_positives

        if self._n is None:
            self._n = n
            self._true_negatives = true_negatives
        else:
            self._n += n
            self._true_negatives += true_negatives
Exemple #3
0
    def update(self, output):
        y_pred, y = output
        dtype = y_pred.type()

        if not (y.ndimension() == y_pred.ndimension()
                or y.ndimension() + 1 == y_pred.ndimension()):
            raise ValueError(
                "y must have shape of (batch_size, ...) and y_pred "
                "must have shape of (batch_size, num_classes, ...) or (batch_size, ...)."
            )

        if y.ndimension() > 1 and y.shape[1] == 1:
            y = y.squeeze(dim=1)

        if y_pred.ndimension() > 1 and y_pred.shape[1] == 1:
            y_pred = y_pred.squeeze(dim=1)

        y_shape = y.shape
        y_pred_shape = y_pred.shape

        if y.ndimension() + 1 == y_pred.ndimension():
            y_pred_shape = (y_pred_shape[0], ) + y_pred_shape[2:]

        if not (y_shape == y_pred_shape):
            raise ValueError("y and y_pred must have compatible shapes.")

        if y_pred.ndimension() == y.ndimension():
            # Maps Binary Case to Categorical Case with 2 classes
            y_pred = y_pred.unsqueeze(dim=1)
            y_pred = torch.cat([1.0 - y_pred, y_pred], dim=1)

        y = to_onehot(y.view(-1), num_classes=y_pred.size(1))
        indices = torch.max(y_pred, dim=1)[1].view(-1)
        y_pred = to_onehot(indices, num_classes=y_pred.size(1))

        y_pred = y_pred.type(dtype)
        y = y.type(dtype)

        correct = y * y_pred
        all_positives = y_pred.sum(dim=0)

        if correct.sum() == 0:
            true_positives = torch.zeros_like(all_positives)
        else:
            true_positives = correct.sum(dim=0)
        if self._all_positives is None:
            self._all_positives = all_positives
            self._true_positives = true_positives
        else:
            self._all_positives += all_positives
            self._true_positives += true_positives
Exemple #4
0
 def update(self, output):
     y_pred, y = output
     num_classes = y_pred.size(1)
     indices = torch.max(y_pred, 1)[1]
     correct = torch.eq(indices, y)
     actual_onehot = to_onehot(y, num_classes)
     actual = actual_onehot.sum(dim=0)
     if correct.sum() == 0:
         true_positives = torch.zeros_like(actual)
     else:
         correct_onehot = to_onehot(indices[correct], num_classes)
         true_positives = correct_onehot.sum(dim=0)
     if self._actual is None:
         self._actual = actual
         self._true_positives = true_positives
     else:
         self._actual += actual
         self._true_positives += true_positives
Exemple #5
0
 def update(self, output):
     y_pred, y = output
     y = y.long()
     num_classes = y_pred.size(1)
     indices = torch.max(y_pred, 1)[1]
     correct = torch.eq(indices, y)
     pred_onehot = to_onehot(indices, num_classes)
     all_positives = pred_onehot.sum(dim=0)
     if correct.sum() == 0:
         true_positives = torch.zeros_like(all_positives)
     else:
         correct_onehot = to_onehot(indices[correct], num_classes)
         true_positives = correct_onehot.sum(dim=0)
     if self._all_positives is None:
         self._all_positives = all_positives
         self._true_positives = true_positives
     else:
         self._all_positives += all_positives
         self._true_positives += true_positives
Exemple #6
0
    def update(self, output):
        #y_pred (batch_size, num_classes)
        #y (batch_size)
        y_pred, y = output
        num_classes = y_pred.size(1)
        y_pred_ = torch.exp(y_pred)
        actual_onehot = to_onehot(y, num_classes)

        # if  self._actual is None or self._pred is None:
        self._actual.extend(actual_onehot.tolist())
        self._pred.extend(y_pred_.tolist())
Exemple #7
0
    def update(self, output):
        y_pred, y = self._check_shape(output)
        self._check_type((y_pred, y))

        if self._type == "binary":
            y_pred = y_pred.view(-1)
            y = y.view(-1)
        elif self._type == "multiclass":
            num_classes = y_pred.size(1)
            y = to_onehot(y.view(-1), num_classes=num_classes)
            indices = torch.max(y_pred, dim=1)[1].view(-1)
            y_pred = to_onehot(indices, num_classes=num_classes)
        elif self._type == "multilabel":
            # if y, y_pred shape is (N, C, ...) -> (C, N x ...)
            num_classes = y_pred.size(1)
            y_pred = torch.transpose(y_pred, 1, 0).reshape(num_classes, -1)
            y = torch.transpose(y, 1, 0).reshape(num_classes, -1)

        y = y.type_as(y_pred)
        correct = y * y_pred
        actual_positives = y.sum(dim=0).type(
            torch.DoubleTensor)  # Convert from int cuda/cpu to double cpu

        if correct.sum() == 0:
            true_positives = torch.zeros_like(actual_positives)
        else:
            true_positives = correct.sum(dim=0)

        # Convert from int cuda/cpu to double cpu
        # We need double precision for the division true_positives / actual_positives
        true_positives = true_positives.type(torch.DoubleTensor)

        if self._type == "multilabel":
            self._true_positives = torch.cat(
                [self._true_positives, true_positives], dim=0)
            self._positives = torch.cat([self._positives, actual_positives],
                                        dim=0)
        else:
            self._true_positives += true_positives
            self._positives += actual_positives
 def forward(self, input, target):
     target = to_onehot(target, num_classes=n_classes).float()
     return super(_BCEWithLogitsLoss, self).forward(input, target)
Exemple #9
0
def test_to_onehot():
    indices = torch.LongTensor([0, 1, 2, 3])
    actual = to_onehot(indices, 4)
    expected = torch.eye(4)
    assert actual.equal(expected)