def test_add(self): a = torch.empty(5, 5, device='msnpu', requires_grad=True) self.assertEqual(msnpu_extension.get_test_int(), 0) b = torch.empty(5, 5, device='msnpu') self.assertEqual(msnpu_extension.get_test_int(), 0) c = a + b self.assertEqual(msnpu_extension.get_test_int(), 1)
def test_add(self): a = torch.zeros(5, 5, device='msnpu') self.assertEqual(msnpu_extension.get_test_int(), 0) b = torch.zeros(5, 5, device='msnpu') self.assertEqual(msnpu_extension.get_test_int(), 0) c = torch.add(a, b) self.assertEqual(msnpu_extension.get_test_int(), 1)
def test_zeros(self): a = torch.empty(5, 5, device='cpu') self.assertEqual(a.device, torch.device('cpu')) b = torch.empty(5, 5, device='msnpu') self.assertEqual(b.device, torch.device('msnpu', 0)) self.assertEqual(msnpu_extension.get_test_int(), 0) self.assertEqual(torch.get_default_dtype(), b.dtype) c = torch.empty((5, 5), dtype=torch.int64, device='msnpu') self.assertEqual(msnpu_extension.get_test_int(), 0) self.assertEqual(torch.int64, c.dtype)
def test_backwards(self): a = torch.zeros(5, 5, device='msnpu', requires_grad=True) self.assertEqual(msnpu_extension.get_test_int(), 0) b = torch.zeros(5, 5, device='msnpu') self.assertEqual(msnpu_extension.get_test_int(), 0) c = torch.kl_div(a, b) self.assertEqual(msnpu_extension.get_test_int(), 3) d = c.sum() self.assertEqual(msnpu_extension.get_test_int(), 2) d.backward() self.assertEqual(msnpu_extension.get_test_int(), 4)
def test_zeros(self): a = torch.zeros(5, 5, device='cpu') self.assertEqual(a.device, torch.device('cpu')) self.assertEqual(a.sum(), 0) b = torch.zeros(5, 5, device='msnpu') self.assertEqual(msnpu_extension.get_test_int(), 0)
def test_conv_backend_override(self): # To simplify tests, we use 4d input here to avoid doing view4d( which # needs more overrides) in _convolution. input = torch.empty(2, 4, 10, 2, device='msnpu', requires_grad=True) weight = torch.empty(6, 4, 2, 2, device='msnpu', requires_grad=True) bias = torch.empty(6, device='msnpu') # Make sure forward is overriden out = torch.nn.functional.conv1d(input, weight, bias, 2, 0, 1, 1) self.assertEqual(msnpu_extension.get_test_int(), 2) self.assertEqual(out.shape[0], input.shape[0]) self.assertEqual(out.shape[1], weight.shape[0]) # Make sure backward is overriden # Double backward is dispatched to _convolution_double_backward. # It is not tested here as it involves more computation/overrides. grad = torch.autograd.grad(out, input, out, create_graph=True) self.assertEqual(msnpu_extension.get_test_int(), 3) self.assertEqual(grad[0].shape, input.shape)