예제 #1
0
    def test_perplexity(self):
        nll = NLLLoss()
        ppl = Perplexity()
        nll.eval_batch(self.outputs, self.batch)
        ppl.eval_batch(self.outputs, self.batch)

        nll_loss = nll.get_loss()
        ppl_loss = ppl.get_loss()

        self.assertAlmostEqual(ppl_loss, math.exp(nll_loss))
예제 #2
0
    def test_perplexity(self):
        nll = NLLLoss()
        ppl = Perplexity()
        for output, target in zip(self.outputs, self.targets):
            nll.eval_batch(output, target)
            ppl.eval_batch(output, target)

        nll_loss = nll.get_loss()
        ppl_loss = ppl.get_loss()

        self.assertAlmostEqual(ppl_loss, math.exp(nll_loss))
예제 #3
0
    def test_perplexity(self):
        num_class = 5
        num_batch = 10
        batch_size = 5

        outputs = [F.softmax(Variable(torch.randn(batch_size, num_class)))
                   for _ in range(num_batch)]
        targets = [Variable(torch.LongTensor([random.randint(0, num_class - 1)
                                              for _ in range(batch_size)]))
                   for _ in range(num_batch)]

        nll = NLLLoss()
        ppl = Perplexity()
        for output, target in zip(outputs, targets):
            nll.eval_batch(output, target)
            ppl.eval_batch(output, target)

        nll_loss = nll.get_loss()
        ppl_loss = ppl.get_loss()

        self.assertAlmostEqual(ppl_loss, math.exp(nll_loss))