def _test_binary_method(self): a = utils.gen_float_tensor(1, (2, 3)) b = utils.gen_float_tensor(2, (2, 3)) c = utils.gen_float_tensor(3, (2, 3)) # The constructor is supposed to copy! a1 = nestedtensor.nested_tensor([a, b]) a2 = nestedtensor.nested_tensor([b, c]) a3 = nestedtensor.nested_tensor([ getattr(a, "__" + func + "__")(b), getattr(b, "__" + func + "__")(c) ]) self.assertEqual(a3, getattr(a1, "__" + func + "__")(a2)) # The constructor is supposed to copy! a1 = nestedtensor.nested_tensor([a, b]) a2 = c a3 = nestedtensor.nested_tensor([ getattr(a, "__" + func + "__")(a2), getattr(b, "__" + func + "__")(a2) ]) self.assertEqual(a3, getattr(a1, "__" + func + "__")(a2)) a1 = c a2 = nestedtensor.nested_tensor([a, b]) a3 = nestedtensor.nested_tensor([ getattr(a1, "__" + func + "__")(a), getattr(a1, "__" + func + "__")(b) ]) self.assertEqual(a3, getattr(a2, "__r" + func + "__")(a1))
def test_as_nested_tensor(self): tensors = [] num_tensors = 16 for i in range(num_tensors): tensors.append(utils.gen_float_tensor(i, (i + 1, 128, 128))) # This should NOT create references nested_tensor = nestedtensor.as_nested_tensor(tensors) for i in range(num_tensors): tensors[i].mul_(i + 2) for i in range(num_tensors): self.assertNotEqual(tensors[i], nested_tensor.unbind()[i]) # This should NOT create references nested_tensor = nestedtensor.nested_tensor(tensors) for i in range(num_tensors): tensors[i].mul_(i + 2) for i in range(num_tensors): self.assertNotEqual(tensors[i], nested_tensor.unbind()[i]) nested_tensor1 = nestedtensor.as_nested_tensor(nested_tensor) self.assertTrue(nested_tensor1 is nested_tensor) self.assertRaises( NotImplementedError, lambda: nestedtensor.as_nested_tensor(nested_tensor, dtype=torch.int64))
def _test_binary(self): a = utils.gen_float_tensor(1, (2, 3)) b = utils.gen_float_tensor(2, (2, 3)) c = utils.gen_float_tensor(3, (2, 3)) # The constructor is supposed to copy! a1 = nestedtensor.nested_tensor([a, b]) a2 = nestedtensor.nested_tensor([b, c]) a1_l = nestedtensor.as_nested_tensor([a.clone(), b.clone()]) a2_l = nestedtensor.as_nested_tensor([b.clone(), c.clone()]) a3 = nestedtensor.nested_tensor( [getattr(torch, func)(a, b), getattr(torch, func)(b, c)]) a3_l = nestedtensor.as_nested_tensor(a3) self.assertEqual(a3_l, getattr(torch, func)(a1_l, a2_l)) self.assertEqual(a3_l, getattr(torch, func)(a1, a2)) self.assertEqual(a3, getattr(a1, func)(a2)) self.assertEqual(a3, getattr(a1, func + "_")(a2)) self.assertEqual(a3, a1)
def _test_binary(self): a = utils.gen_float_tensor(1, (2, 3)) b = utils.gen_float_tensor(2, (2, 3)) c = utils.gen_float_tensor(3, (2, 3)) # The constructor is supposed to copy! a1 = nestedtensor.nested_tensor([a, b]) a2 = nestedtensor.nested_tensor([b, c]) a3 = nestedtensor.nested_tensor( [getattr(torch, func)(a, b), getattr(torch, func)(b, c)]) self.assertEqual(a3, getattr(torch, func)(a1, a2)) self.assertEqual(a3, getattr(a1, func)(a2)) self.assertEqual(a3, getattr(a1, func + "_")(a2)) self.assertEqual(a3, a1) # The constructor is supposed to copy! a1 = nestedtensor.nested_tensor([a, b]) a2 = c a3 = nestedtensor.nested_tensor( [getattr(torch, func)(a, a2), getattr(torch, func)(b, a2)]) self.assertEqual(a3, getattr(torch, func)(a1, a2)) self.assertEqual(a3, getattr(a1, func)(a2)) self.assertEqual(a3, getattr(a1, func + "_")(a2)) self.assertEqual(a3, a1) # The constructor is supposed to copy! a1 = c a2 = nestedtensor.nested_tensor([a, b]) a3 = nestedtensor.nested_tensor( [getattr(torch, func)(c, a), getattr(torch, func)(c, b)]) self.assertEqual(a3, getattr(torch, func)(a1, a2)) # TODO: This depends on https://github.com/pytorch/rfcs/pull/3 # RFC-0001: Add method __torch_function__ RFC. # TODO: This causes a segfault likely due https://github.com/pytorch/pytorch/pull/37091 self.assertEqual(a3, getattr(a1, func)(a2)) # Cannot apply in-place methods to regular Tensors given a NestedTensor as an other # TODO: Only sub doesn't adhere to this rule but with irregular behavior if func != "sub": self.assertRaises(RuntimeError, lambda: getattr(a1, func + "_") (a2))
def test_unbind(self): a = torch.tensor([1, 2]) b = torch.tensor([7, 8]) nt = nestedtensor.nested_tensor([a, b]) a1, b1 = nt.unbind() self.assertEqual(a, a1) self.assertEqual(b, b1) a = utils.gen_float_tensor(1, (2, 3)).add_(1) nt = nestedtensor.nested_tensor([a]) self.assertEqual(a, nt.unbind()[0])
def test_list_constructor(self): """ This tests whether nestedtensor.as_nested_tensor stores Variables that share storage with the input Variables used for construction. """ tensors = [] num_tensors = 16 for i in range(num_tensors): tensors.append(utils.gen_float_tensor(i, (i + 1, 128, 128))) nested_tensor = nestedtensor.as_nested_tensor(tensors) for i in range(num_tensors): tensors[i].mul_(i + 2) for i in range(num_tensors): self.assertNotEqual(tensors[i], nested_tensor.unbind()[i]) self.assertNotEqual(tensors[i].storage().data_ptr(), nested_tensor.unbind()[i].storage().data_ptr())
def test_constructor(self): tensors = [] num_tensors = 16 for i in range(num_tensors): tensors.append(utils.gen_float_tensor(i, (i + 1, 128, 128))) nested_tensor = nestedtensor.nested_tensor(tensors) for i in range(num_tensors): tensors[i].mul_(i + 2) for i in range(num_tensors): self.assertNotEqual(tensors[i], nested_tensor.unbind()[i]) self.assertRaises( ValueError, lambda: nestedtensor.nested_tensor(torch.tensor([3.0]))) self.assertRaises( ValueError, lambda: nestedtensor.nested_tensor( nestedtensor.nested_tensor([torch.tensor([3.0])]))) self.assertRaises( TypeError, lambda: nestedtensor.nested_tensor([ torch.tensor([2.0]), nestedtensor.nested_tensor([torch.tensor([3.0])]) ])) self.assertRaises(TypeError, lambda: nestedtensor.nested_tensor(4.0))
def _test(a, b, c, d, e): nt = nestedtensor.nested_tensor([a, b]) a1, b1 = nt.unbind() self.assertTrue(a is not a1) self.assertTrue(b is not b1) nt1 = nestedtensor.nested_tensor([[c, d], [e]]) nt11, nt12 = unbind_fn(nt1, 0) c1, d1 = unbind_fn(nt11, 0) e1 = unbind_fn(nt12, 0)[0] self.assertTrue(c is not c1) self.assertTrue(d is not d1) self.assertTrue(e is not e1) nt = nestedtensor.nested_tensor([a, b]) a1, b1 = unbind_fn(nt, 0) self.assertEqual(a, a1) self.assertEqual(b, b1) a = utils.gen_float_tensor(1, (2, 3)).add_(1) nt = nestedtensor.nested_tensor([a]) self.assertEqual(a, unbind_fn(nt, 0)[0])
def _test_binary(self): a = utils.gen_float_tensor(1, (2, 3)) # * 0 + 1 b = utils.gen_float_tensor(2, (2, 3)) # * 0 + 2 c = utils.gen_float_tensor(3, (2, 3)) # * 0 + 3 d = utils.gen_float_tensor(4, (3, 2)) # * 0 + 4 s = utils.gen_float_tensor(5, (1, )) # * 0 + 5 torch_func = getattr(torch, func) a1 = ntnt([a, b]) if no_grad: a2 = ntnt_nograd([b, c]) else: a2 = ntnt([b, c]) a3 = ntnt([torch_func(a, b), torch_func(b, c)]) res1 = torch_func(a1, a2) if not no_grad: res1.sum().backward() self.assertIsNotNone(a1.grad) if no_grad: self.assertIsNone(a2.grad) else: self.assertIsNotNone(a2.grad) self.assertEqual(a3, torch_func(a1, a2)) self.assertEqual(a3, getattr(a1, func)(a2)) a1.detach_() a2.detach_() a3.detach_() self.assertEqual(a3, getattr(a1, func + "_")(a2)) self.assertEqual(a3, a1) # Test NT x T a1 = ntnt([a, b]) a2 = c a3 = ntnt([torch_func(a, a2), torch_func(b, a2)]) self.assertEqual(a3, torch_func(a1, a2)) self.assertEqual(a3, getattr(a1, func)(a2)) # Test NT x T with broadcasting if func not in ["pow", "atan2"]: a1 = ntnt([a, b]) a2 = torch.tensor([1, 2]).reshape(-1, 1, 1) a3 = ntnt([torch_func(a, 1), torch_func(b, 2)]) self.assertEqual(a3, torch_func(a1, a2)) self.assertEqual(a3, getattr(a1, func)(a2)) a1 = ntnt([a, d]) self.assertEqual(ntnt([torch_func(a, s), torch_func(d, s)]), torch_func(a1, s)) a1 = ntnt([a, b]) self.assertEqual(ntnt([torch_func(a, c), torch_func(b, c)]), torch_func(a1, c.reshape(1, 2, 3))) result = ntnt([torch_func(c, a), torch_func(c, b)]) if no_grad: a1.detach_() result.detach_() self.assertEqual(result, torch_func(c.reshape(1, 2, 3), a1)) a1 = a1.detach() a3 = a3.detach() self.assertEqual(a3, getattr(a1, func + "_")(a2)) self.assertEqual(a3, a1) # The constructor is supposed to copy! a1 = c a2 = ntnt([a, b]) a3 = ntnt([torch_func(c, a), torch_func(c, b)]) if no_grad: a2.detach_() a3.detach_() self.assertEqual(a3, torch_func(a1, a2)) self.assertEqual(a3, getattr(a1, func)(a2)) # Cannot apply in-place methods to regular Tensors given a NestedTensor as an other # TODO: Only sub doesn't adhere to this rule but with irregular behavior if func == "add": self.assertEqual(c + a + b, getattr(a1, func + "_")(a2)) # test autograd a = utils.gen_float_tensor(1, (2, 3)).requires_grad_() b = utils.gen_float_tensor(2, (2, 3)).requires_grad_() c = utils.gen_float_tensor(3, (2, 3)).requires_grad_() a1 = ntnt([a, b]) if no_grad: a2 = ntnt_nograd([b, c]) else: a2 = ntnt([b, c]) if no_grad: a3 = ntnt([torch_func(a, b.detach()), torch_func(b, c.detach())]) else: a3 = ntnt([torch_func(a, b), torch_func(b, c)]) # print(a3.requires_grad) result = torch_func(a1, a2) # print(result.requires_grad) if not no_grad: result.sum().backward() if no_grad: c.detach_() if not no_grad: # This is used to exercise the tree reduction in the # gradient calculation. a1 = ntnt([a, b, c]) result = torch_func(a1, c) result.sum().backward() a_0 = a.clone().detach().requires_grad_() b_0 = b.clone().detach().requires_grad_() c_0 = c.clone().detach().requires_grad_() c_1 = c.clone().detach().requires_grad_() result_a = torch_func(a_0, c_1) result_b = torch_func(b_0, c_1) result_c = torch_func(c_0, c_1) result_a.sum().backward() result_b.sum().backward() result_c.sum().backward() self.assertEqual(c.grad, c_1.grad) # print(result.requires_grad) if no_grad: a1.detach_() result = torch_func(c, a1)
def _test_binary(self): a = utils.gen_float_tensor(1, (2, 3))# * 0 + 1 b = utils.gen_float_tensor(2, (2, 3))# * 0 + 2 c = utils.gen_float_tensor(3, (2, 3))# * 0 + 3 d = utils.gen_float_tensor(4, (3, 2))# * 0 + 4 s = utils.gen_float_tensor(5, (1,))# * 0 + 5 torch_func = getattr(torch, func) a1 = ntnt([a, b]) if no_grad: a2 = ntnt_nograd([b, c]) else: a2 = ntnt([b, c]) a3 = ntnt([torch_func(a, b), torch_func(b, c)]) res1 = torch_func(a1, a2) if not no_grad: res1.sum().backward() self.assertIsNotNone(a1.grad) if no_grad: self.assertIsNone(a2.grad) else: self.assertIsNotNone(a2.grad) self.assertEqual(a3, torch_func(a1, a2)) self.assertEqual(a3, getattr(a1, func)(a2)) # a1.detach_() # a2.detach_() # a3.detach_() self.assertEqual(a3, getattr(a1, func + "_")(a2)) self.assertEqual(a3, a1) # Test NT x T a1 = ntnt([a, b]) a2 = c a3 = ntnt([torch_func(a, a2), torch_func(b, a2)]) self.assertEqual(a3, torch_func(a1, a2)) self.assertEqual(a3, getattr(a1, func)(a2)) # Test NT x T with broadcasting if func not in ["pow", "atan2"]: a1 = ntnt([a, b]) a2 = torch.tensor([1, 2]).reshape(-1, 1, 1) a3 = ntnt([torch_func(a, 1), torch_func(b, 2)]) self.assertEqual(a3, torch_func(a1, a2)) self.assertEqual(a3, getattr(a1, func)(a2)) a1 = ntnt([a, d]) self.assertEqual(ntnt([torch_func(a, s), torch_func(d, s)]), torch_func(a1, s)) a1 = ntnt([a, b]) self.assertEqual(ntnt([torch_func(a, c), torch_func(b, c) ]), torch_func(a1, c.reshape(1, 2, 3))) result = ntnt([torch_func(c, a), torch_func(c, b) ]) # if no_grad: # a1.detach_() # result.detach_() self.assertEqual(result, torch_func(c.reshape(1, 2, 3), a1)) # a1 = a1.detach() # a3 = a3.detach() self.assertEqual(a3, getattr(a1, func + "_")(a2)) self.assertEqual(a3, a1) # The constructor is supposed to copy! a1 = c a2 = ntnt([a, b]) a3 = ntnt([torch_func(c, a), torch_func(c, b)]) # if no_grad: # a2.detach_() # a3.detach_() self.assertEqual(a3, torch_func(a1, a2)) self.assertEqual(a3, getattr(a1, func)(a2)) # Cannot apply in-place methods to regular Tensors given a NestedTensor as an other # TODO: Only sub doesn't adhere to this rule but with irregular behavior if func == "add": self.assertEqual(c + a + b, getattr(a1, func + "_")(a2))
def test_nll_loss(self): utils.gen_float_tensor(1, (40, 5)) utils.gen_float_tensor(1, (40, ))
def _test_binary(self): a = utils.gen_float_tensor(1, (2, 3)) * 0 + 1 b = utils.gen_float_tensor(2, (2, 3)) * 0 + 2 c = utils.gen_float_tensor(3, (2, 3)) * 0 + 3 # The constructor is supposed to copy! a1 = ntnt([a, b]) if func == "remainder": a2 = ntnt_nograd([b, c]) else: a2 = ntnt([b, c]) a3 = ntnt([getattr(torch, func)(a, b), getattr(torch, func)(b, c)]) res1 = getattr(torch, func)(a1, a2) res1.sum().backward() self.assertIsNotNone(a1.grad) if func == "remainder": self.assertIsNone(a2.grad) else: self.assertIsNotNone(a2.grad) self.assertEqual(a3, getattr(torch, func)(a1, a2)) self.assertEqual(a3, getattr(a1, func)(a2)) a1 = a1.detach() a3.detach_() self.assertEqual(a3, getattr(a1, func + "_")(a2)) self.assertEqual(a3, a1) # The constructor is supposed to copy! a1 = ntnt([a, b]) a2 = c a3 = ntnt([getattr(torch, func)(a, a2), getattr(torch, func)(b, a2)]) self.assertEqual(a3, getattr(torch, func)(a1, a2)) self.assertEqual(a3, getattr(a1, func)(a2)) # TODO: Add check for broadcasting smaller tensors / tensor constiuents # self.assertRaisesRegex(RuntimeError, "tensor dimension of self must match or be greater than dimension of other.", # lambda: getattr(torch, func)(a1, c.reshape(1, 2, 3))) # if func == "remainder": # a1.detach_() # self.assertRaisesRegex(RuntimeError, "tensor dimension of other must match or be greater than dimension of self.", # lambda: getattr(torch, func)(c.reshape(1, 2, 3), a1)) # self.assertRaisesRegex(RuntimeError, "tensor dimension of other must match or be greater than dimension of self.", # lambda: getattr(torch, func)(c.reshape(1, 2, 3), a1)) a1 = a1.detach() a3 = a3.detach() self.assertEqual(a3, getattr(a1, func + "_")(a2)) self.assertEqual(a3, a1) # The constructor is supposed to copy! a1 = c a2 = ntnt([a, b]) a3 = ntnt([getattr(torch, func)(c, a), getattr(torch, func)(c, b)]) if func == "remainder": a2.detach_() a3.detach_() self.assertEqual(a3, getattr(torch, func)(a1, a2)) # TODO: This depends on https://github.com/pytorch/rfcs/pull/3 # RFC-0001: Add method __torch_function__ RFC. # TODO: This causes a segfault likely due https://github.com/pytorch/pytorch/pull/37091 self.assertEqual(a3, getattr(a1, func)(a2)) # Cannot apply in-place methods to regular Tensors given a NestedTensor as an other # TODO: Only sub doesn't adhere to this rule but with irregular behavior if func == "add": self.assertEqual(c + a + b, getattr(a1, func + "_")(a2)) # test autograd a = utils.gen_float_tensor(1, (2, 3)).requires_grad_() b = utils.gen_float_tensor(2, (2, 3)).requires_grad_() c = utils.gen_float_tensor(3, (2, 3)).requires_grad_() a1 = ntnt([a, b]) if func == "remainder": a2 = ntnt_nograd([b, c]) else: a2 = ntnt([b, c]) if func == "remainder": a3 = ntnt([getattr(torch, func)(a, b.detach()), getattr(torch, func)(b, c.detach())]) else: a3 = ntnt([getattr(torch, func)(a, b), getattr(torch, func)(b, c)]) # print(a3.requires_grad) result = getattr(torch, func)(a1, a2) # print(result.requires_grad) result.sum().backward() if func == "remainder": c.detach_() if func != "remainder": # This is used to exercise the tree reduction in the # gradient calculation. a1 = ntnt([a, b, c]) result = getattr(torch, func)(a1, c) result.sum().backward() a_0 = a.clone().detach().requires_grad_() b_0 = b.clone().detach().requires_grad_() c_0 = c.clone().detach().requires_grad_() c_1 = c.clone().detach().requires_grad_() result_a = getattr(torch, func)(a_0, c_1) result_b = getattr(torch, func)(b_0, c_1) result_c = getattr(torch, func)(c_0, c_1) result_a.sum().backward() result_b.sum().backward() result_c.sum().backward() self.assertEqual(c.grad, c_1.grad) # print(result.requires_grad) if func == "remainder": a1.detach_() result = getattr(torch, func)(c, a1)