Пример #1
0
    def test_weighted_entropy(self):
        sample = torch.Tensor([[0, 0, 1, 1]])
        distances = torch.Tensor([[1, 1, 2, 2]])
        self.assertLess(entropy(sample, distances, use_weights=True), 1.)

        sample = torch.Tensor([[0, 0, 0, 1, 1, 1]])
        distances = torch.Tensor([[1, 2, 3, 1, 2, 3]])
        self.assertEqual(entropy(sample, distances, use_weights=True), 1.)
Пример #2
0
    def knn_mse_loss(prediction: Tensor, target: Tensor, input_data: Tensor) -> Tensor:

        assert prediction.shape == target.shape, 'Invalid target shape!'

        # Find k most similar vectors in the training dataset
        scores, indexes = index.search(input_data.numpy(), k + 1)

        # Get classes of the most similiar vectors
        knn = train_y[[indexes]][:, 1:]

        # Calculate entropy
        entropies = entropy(knn)
        assert entropies.shape == target.shape, 'Invalid entropies shape!'

        entropies = entropies.reshape(-1, 1)
        loss = torch.exp(-1 * entropies) * ((target - prediction) ** 2)

        assert loss.shape == prediction.shape, 'Invalid loss shape!'
        return loss.mean()
Пример #3
0
    def entropy_weighted_bin_loss(
            prediction: Tensor,
            target: Tensor,
            inputs: Tensor,
            reduction: str = 'mean') -> Union[float, Tensor]:

        _, _, classes = knn.get(inputs.numpy(), exclude_query=True)
        classes = Tensor(classes)

        entropies = entropy(classes)
        assert entropies.shape == target.shape, 'Invalid entropies shape!'

        base_loss = base_loss_function(prediction, target, reduction='none')
        assert base_loss.shape == target.shape, 'Invalid base loss shape!'

        loss = torch.exp(-1 * entropies) * base_loss

        reduction_method = get_reduction_method(reduction)
        return reduction_method(loss)
Пример #4
0
    def weighted_cross_entropy(
            prediction: Tensor,
            target: Tensor,
            inputs: Tensor,
            reduction: str = 'mean') -> Union[float, Tensor]:

        n_classes = prediction.shape[1]

        # Retrieve nearest points and their calsses
        _, _, nn_classes = knn.get(inputs.numpy(), exclude_query=True)
        nn_classes = Tensor(nn_classes)
        nn_class_entropy = entropy(nn_classes)

        # Convert target vector into probabilty distribution
        target = convert_logits_to_class_distribution(target, n_classes)

        # Apply softmax on model output
        prediction = logsoftmax(prediction)

        loss = (-torch.sum(target * prediction, dim=1).reshape(-1, 1) *
                torch.exp(-nn_class_entropy))

        reduction_method = get_reduction_method(reduction)
        return reduction_method(loss)
Пример #5
0
 def test_weighted_entropy_without_distances(self):
     sample = torch.Tensor([[0, 0, 1, 1]])
     with self.assertRaises(AssertionError):
         entropy(sample, use_weights=True)
Пример #6
0
 def test_entropy_multiple_vectors(self):
     sample = torch.Tensor([[0, 0, 0, 0], [1, 1, 1, 1]])
     self.assertTrue(torch.all(torch.eq(entropy(sample), torch.Tensor([0., 0.]))))
Пример #7
0
 def test_multiple_class_entropy(self):
     num_classes = 5
     sample_size = 10
     sample = torch.Tensor([np.random.choice(np.arange(num_classes), size=sample_size)])
     self.assertGreaterEqual(entropy(sample), 0.)
     self.assertLessEqual(entropy(sample), np.log(sample_size))
Пример #8
0
 def test_max_entropy(self):
     sample = torch.Tensor(np.array([[0, 0, 0, 1, 1, 1]]))
     self.assertEqual(entropy(sample), 1.)
Пример #9
0
 def test_mixed_entropy(self):
     sample = torch.Tensor(np.array([[0, 0, 0, 0, 1, 1]]))
     expected_result = -1 * ((1/3) * np.log2(1/3) + (2/3) * np.log2(2/3))
     self.assertEqual(entropy(sample), expected_result)
Пример #10
0
 def test_zero_entropy(self):
     sample = torch.Tensor(np.array([[0, 0, 0, 0, 0]]))
     self.assertEqual(entropy(sample), 0.)