示例#1
0
    def test_bad(self):
        output = DiscreteKLDivergence.compute(self.real(), self.bad())
        normalized = DiscreteKLDivergence.normalize(output)

        assert 0 <= output < 0.5
        assert 0 <= normalized < 0.5
示例#2
0
    def test_awful(self):
        output = DiscreteKLDivergence.compute(self.ones(), self.zeros())
        normalized = DiscreteKLDivergence.normalize(output)

        assert 0.0 <= output < 0.1
        assert 0.0 <= normalized < 0.1
示例#3
0
    def test_good(self):
        output = DiscreteKLDivergence.compute(self.real(), self.good())
        normalized = DiscreteKLDivergence.normalize(output)

        assert 0.5 < output <= 1
        assert 0.5 < normalized <= 1
示例#4
0
    def test_perfect(self):
        output = DiscreteKLDivergence.compute(self.ones(), self.ones())
        normalized = DiscreteKLDivergence.normalize(output)

        assert output == 1
        assert normalized == 1
示例#5
0
    def test_bad(self):
        output = DiscreteKLDivergence.compute(self.real(), self.bad())

        assert 0 <= output < 0.5
示例#6
0
    def test_good(self):
        output = DiscreteKLDivergence.compute(self.real(), self.good())

        assert 0.5 < output <= 1
示例#7
0
    def test_awful(self):
        output = DiscreteKLDivergence.compute(self.ones(), self.zeros())

        assert 0.0 <= output < 0.1
示例#8
0
    def test_perfect(self):
        output = DiscreteKLDivergence.compute(self.ones(), self.ones())

        assert output == 1