Esempio n. 1
0
 def test_loss(self):
     metric = Loss()
     self.assertEqual(metric.result(), 0)
     metric.update(torch.tensor(1.), self.batch_size)
     self.assertGreaterEqual(metric.result(), 0)
     metric.reset()
     self.assertEqual(metric.result(), 0)
Esempio n. 2
0
 def test_loss_multi_task(self):
     metric = Loss()
     self.assertEqual(metric.result(), {})
     metric.update(torch.tensor(1.0), 1, 0)
     metric.update(torch.tensor(2.0), 1, 1)
     out = metric.result()
     for k, v in out.items():
         self.assertIn(k, [0, 1])
         if k == 0:
             self.assertEqual(v, 1)
         else:
             self.assertEqual(v, 2)
     metric.reset()
     self.assertEqual(metric.result(), {})
Esempio n. 3
0
    def test_standalone_forgetting(self):
        uut = Loss()

        # Initial loss should be 0
        self.assertEqual(0.0, uut.result())

        loss = torch.as_tensor([0.0, 0.1, 0.2, 0.3, 0.4])  # Avg = 0.2
        uut.update(loss, 5)

        self.assertAlmostEqual(0.2, uut.result())

        loss = torch.as_tensor([0.1, 0.2, 0.3, 0.4, 0.5])  # Avg = 0.3
        uut.update(loss, 5)

        self.assertAlmostEqual(0.25, uut.result())

        # After-reset loss should be 0
        uut.reset()
        self.assertEqual(0.0, uut.result())

        # Check if handles 0 loss
        loss = torch.as_tensor([0.0, 0.0, 0.0])
        uut.update(loss, 3)

        self.assertEqual(0.0, uut.result())

        # Check handles losses with different reductions
        uut.reset()
        loss = torch.rand((5, 20))
        expected_mean = loss.mean().item()
        uut.update(loss, loss.shape[0])

        self.assertAlmostEqual(expected_mean, uut.result())

        # Check that the last call to result didn't change the value
        self.assertAlmostEqual(expected_mean, uut.result())