Exemplo n.º 1
0
    def test_cupy_and_non_cupy_same(self):
        layer = nn.GlobalNormFlipFlop(12, 4).cuda()

        # Perform calculation using cupy
        x1 = torch.randn((100, 4, 12)).cuda()
        x1.requires_grad = True
        loss1 = layer(x1).sum()
        loss1.backward()

        # Repeat calculation using pure pytorch
        x2 = x1.detach()
        x2.requires_grad = True
        layer._never_use_cupy = True
        loss2 = layer(x2).sum()
        loss2.backward()

        # Results and gradients should match
        self.assertTrue(torch.allclose(loss1, loss2))
        # Higher atol on gradient because the final operation is a softmax, and
        # rtol before softmax = atol after softmax. Therefore I've replaced
        # the atol with the default value for rtol.
        self.assertTrue(torch.allclose(x1.grad, x2.grad, atol=1e-05))
Exemplo n.º 2
0
 def setUp(self):
     self.layer = nn.GlobalNormFlipFlop(12, 4)
Exemplo n.º 3
0
 def setUp(self):
     torch.manual_seed(0xdeadbeef)
     np.random.seed(0xD00D00)
     self.layer = nn.GlobalNormFlipFlop(12, 4)