Esempio n. 1
0
def test_to_prob(an_array, a_binary_array):
    out = to_prob(an_array)
    assert not np.allclose(out, an_array)

    out = to_prob(a_binary_array)
    assert not np.allclose(out, a_binary_array)

    a_array_scaled = softmax(an_array, 1)
    a_binary_array_scaled = expit(a_binary_array)

    out = to_prob(a_array_scaled)
    assert np.allclose(out, a_array_scaled)

    out = to_prob(a_binary_array_scaled)
    assert np.allclose(out, a_binary_array_scaled)
Esempio n. 2
0
    def update(self, output=None, target=None):
        """
        Update the confusion matrice according to output and target.

        Args:
            output (tensor): predictions of model
            target (tensor): labels
        """
        output = output.detach().cpu().numpy()
        target = target.detach().cpu().numpy()
        output = to_prob(output)

        assert output.ndim > target.ndim, "Only multiclass classification is supported."
        for cls in range(self.num_classes):
            target_cls = (target == cls).astype(np.int8)
            for th in self.threshold:
                report = self._make_report(output[:, cls, ...], target_cls, th)
                self._data[cls][th].fp += report.fp
                self._data[cls][th].tp += report.tp
                self._data[cls][th].fn += report.fn
Esempio n. 3
0
    def update(self, output=None, target=None):
        """
        Updating the true positive (tp) and number of samples in each bin.

        Args:
            output (tensor): logits or predictions of model
            target (tensor): labels
        """
        output = output.detach().cpu().numpy()
        target = target.detach().cpu().numpy()
        output = to_prob(output)

        # this is to make sure handling 1.0 value confidence to be assigned to a bin
        output = np.clip(output, 0, 0.9999)

        for cls in range(self.n_cls):
            for pred, t in zip(output[:, cls], target):
                bin_id = int(math.floor(pred * self.n_bins))
                self.samples[cls, bin_id] += 1
                self.tp[cls, bin_id] += int(cls == t)
Esempio n. 4
0
    def update(self, output=None, target=None):
        """
        Updating the true positive (tp) and number of samples in each bin.

        Args:
            output (tensor): logits or predictions of model
            target (tensor): labels
        """
        output = output.detach().cpu().numpy()
        target = target.detach().cpu().numpy()
        output = to_prob(output)

        # this is to make sure handling 1.0 value confidence to be assigned to a bin
        output = np.clip(output, 0, 0.9999)

        for pred, t in zip(output, target):
            t = int(t)  # Force the conversion
            conf, p_cls = pred.max(), pred.argmax()
            bin_id = int(math.floor(conf * self.n_bins))
            self.samples[t, bin_id] += 1
            self.tp[t, bin_id] += int(p_cls == t)
Esempio n. 5
0
 def wrapper(self, probabilities):
     # Expected shape : [n_sample, n_classes, ..., n_iterations]
     probabilities = to_prob(probabilities)
     return fn(self, probabilities)