Esempio n. 1
0
    def test_bad(self):
        output = DiscreteKLDivergence.compute(self.real(), self.bad())
        normalized = DiscreteKLDivergence.normalize(output)

        assert 0 <= output < 0.5
        assert 0 <= normalized < 0.5
Esempio n. 2
0
    def test_awful(self):
        output = DiscreteKLDivergence.compute(self.ones(), self.zeros())
        normalized = DiscreteKLDivergence.normalize(output)

        assert 0.0 <= output < 0.1
        assert 0.0 <= normalized < 0.1
Esempio n. 3
0
    def test_good(self):
        output = DiscreteKLDivergence.compute(self.real(), self.good())
        normalized = DiscreteKLDivergence.normalize(output)

        assert 0.5 < output <= 1
        assert 0.5 < normalized <= 1
Esempio n. 4
0
    def test_perfect(self):
        output = DiscreteKLDivergence.compute(self.ones(), self.ones())
        normalized = DiscreteKLDivergence.normalize(output)

        assert output == 1
        assert normalized == 1
Esempio n. 5
0
    def test_bad(self):
        output = DiscreteKLDivergence.compute(self.real(), self.bad())

        assert 0 <= output < 0.5
Esempio n. 6
0
    def test_good(self):
        output = DiscreteKLDivergence.compute(self.real(), self.good())

        assert 0.5 < output <= 1
Esempio n. 7
0
    def test_awful(self):
        output = DiscreteKLDivergence.compute(self.ones(), self.zeros())

        assert 0.0 <= output < 0.1
Esempio n. 8
0
    def test_perfect(self):
        output = DiscreteKLDivergence.compute(self.ones(), self.ones())

        assert output == 1