コード例 #1
0
    def test_entropy_for_uniform_distribution(self, device: str):
        metric = Entropy()
        logits = torch.tensor([[1, 1, 1, 1], [1, 1, 1, 1]], dtype=torch.float, device=device)
        metric(logits)
        assert_allclose(metric.get_metric()["entropy"], 1.38629436)
        # actual values shouldn't effect uniform distribution:
        logits = torch.tensor([[2, 2, 2, 2], [2, 2, 2, 2]], dtype=torch.float, device=device)
        metric(logits)
        assert_allclose(metric.get_metric()["entropy"], 1.38629436)

        metric.reset()
        assert metric._entropy == 0.0
        assert metric._count == 0.0
コード例 #2
0
    def test_entropy_for_uniform_distribution(self):
        metric = Entropy()
        logits = torch.Tensor([[1, 1, 1, 1], [1, 1, 1, 1]])
        metric(logits)
        numpy.testing.assert_almost_equal(metric.get_metric(), 1.38629436)
        # actual values shouldn't effect uniform distribution:
        logits = torch.Tensor([[2, 2, 2, 2], [2, 2, 2, 2]])
        metric(logits)
        numpy.testing.assert_almost_equal(metric.get_metric(), 1.38629436)

        metric.reset()
        assert metric._entropy == 0.0
        assert metric._count == 0.0
コード例 #3
0
    def test_entropy_for_uniform_distribution(self):
        metric = Entropy()
        logits = torch.Tensor([[1, 1, 1, 1],
                               [1, 1, 1, 1]])
        metric(logits)
        numpy.testing.assert_almost_equal(metric.get_metric(), 1.38629436)
        # actual values shouldn't effect uniform distribution:
        logits = torch.Tensor([[2, 2, 2, 2],
                               [2, 2, 2, 2]])
        metric(logits)
        numpy.testing.assert_almost_equal(metric.get_metric(), 1.38629436)

        metric.reset()
        assert metric._entropy == 0.0
        assert metric._count == 0.0