コード例 #1
0
    def test_trace(self):
        @torch.jit.trace(example_inputs=[None, None])
        def func2(a, b):
            return a + b

        @torch.jit.trace
        def func3(a, b):
            return a + b

        @torch.jit.trace(example_inputs=[None])
        def func4(a, b):
            return a + b

        class TestModule(torch.nn.Module):
            def forward(self, a, b):
                return a + b

        func5 = torch.jit.trace(lambda a, b: a + b)
        m = torch.jit.trace(TestModule())
        a, b, c = torch.tensor([1, 2]), torch.tensor([3, 4]), torch.tensor(1)
        self.assertEqual(self.func1(a, b).numpy().tolist(), [4, 6])
        self.assertEqual(func2(a, b).numpy().tolist(), [4, 6])
        self.assertEqual(func3(a, b).numpy().tolist(), [4, 6])
        self.assertEqual(func5(a, b).numpy().tolist(), [4, 6])
        self.assertEqual(m(a, b).numpy().tolist(), [4, 6])
        self.assertEqual(self.func1(a, b, c=c).numpy().tolist(), [4, 6])
        try:
            func4(a, b)
        except ValueError:
            pass
コード例 #2
0
 def test_backward(self):
     x = torch.tensor(1., dtype=torch.float32, requires_grad=True)
     y = x + 1
     entries = [([y], []), ([y], [1]), ([torch.tensor(1.)], []),
                ([y], [torch.tensor([1., 1.])]), ([y], [y])]
     for tensors, grad_tensors in entries:
         try:
             torch.autograd.backward(tensors, grad_tensors)
             self.assertLessEqual(float(x.grad) - 2., 1e-5)
         except (ValueError, TypeError):
             pass
コード例 #3
0
ファイル: test_nn.py プロジェクト: seetaresearch/dragon
 def test_properties(self):
     m = torch.nn.Module()
     m.add_module('sub1', torch.nn.Module().cuda().half())
     m.sub2 = torch.nn.Module().double()
     m.sub2.register_parameter('weight',
                               torch.nn.Parameter(torch.tensor(1)))
     m.add_module('sub3', None)
     m.register_parameter('weight', torch.nn.Parameter(torch.tensor(1)))
     m.weight = torch.nn.Parameter(torch.tensor(1))
     m.register_buffer('bias', torch.tensor(1))
     m.bias = torch.tensor(1)
     m.sub2 = None
     m.sub3 = torch.nn.Conv2d(2, 3, 3)
     m.cpu().float()
     self.assertEqual(m.train().training, True)
     self.assertEqual(m.eval().training, False)
     self.assertEqual(m.sub1.training, False)
     self.assertEqual(m.weight.requires_grad, True)
     self.assertEqual(m.bias.requires_grad, False)
     m.apply(lambda m: m.train())
     self.assertEqual(m.training, True)
     logging.set_verbosity('FATAL')
     m.load_state_dict(m.state_dict())
     logging.set_verbosity('INFO')
     m.load_state_dict(m.state_dict(to_numpy=True))
     try:
         m.load_state_dict({'!@#$%^&*()': 1})
     except RuntimeError:
         pass
     (m.sub3.weight + 1).sum().backward()
     m.zero_grad()
     for _, _ in m.named_modules():
         pass
     for _ in m.modules():
         pass
     for _, _ in m.named_parameters():
         pass
     for _, _ in m.named_buffers():
         pass
     for _ in m.parameters():
         pass
     for _ in m.buffers():
         pass
     _, _ = repr(m), repr(m.weight)
     try:
         m.weight = 1
     except TypeError:
         m.weight = None
     try:
         m.bias = 1
     except TypeError:
         m.bias = None
コード例 #4
0
 def test_set_grad_enabled(self):
     a = torch.tensor(1., requires_grad=True)
     b = torch.tensor(1., requires_grad=False)
     with torch.no_grad():
         self.assertEqual((a + 1).requires_grad, False)
         self.assertEqual((b + 1).requires_grad, False)
     with torch.enable_grad():
         self.assertEqual((a + 1).requires_grad, True)
         self.assertEqual((b + 1).requires_grad, False)
     with torch.set_grad_enabled(False):
         self.assertEqual((a + 1).requires_grad, False)
         self.assertEqual((b + 1).requires_grad, False)
     with torch.set_grad_enabled(True):
         self.assertEqual((a + 1).requires_grad, True)
         self.assertEqual((b + 1).requires_grad, False)
コード例 #5
0
 def test_properties(self):
     a = torch.tensor([0.]).cpu()
     b = torch.Tensor([0., 1.], dtype=torch.float64).zero_()
     a.requires_grad = True
     c = a + b
     c.retain_grad()
     c.backward()
     self.assertEqual(a.is_leaf, True)
     self.assertEqual(a.is_floating_point(), True)
     self.assertEqual(a.is_contiguous(), True)
     self.assertEqual(a.contiguous().is_contiguous(), True)
     self.assertEqual(a.volatile, False)
     self.assertEqual(a.numel(), 1)
     self.assertEqual(a.grad_fn, None)
     self.assertEqual(float(a.grad), 2.)
     self.assertEqual(b.grad, None)
     self.assertEqual(int(a.detach()), 0)
     self.assertEqual(torch.Tensor([0]).dim(), 1)
     self.assertEqual(float(torch.Tensor(1).one_()), 1.)
     self.assertEqual(torch.tensor(2.333).item(), 2.333)
     self.assertEqual(torch.tensor([2, 3]).tolist(), [2, 3])
     self.assertEqual(torch.empty(2, 3).ndimension(), 2)
     self.assertEqual(torch.empty(3).new_empty(2, 3).ndimension(), 2)
     self.assertEqual(repr(torch.tensor(1)), '1')
     self.assertEqual(repr(torch.tensor(1).new_tensor(1)), '1')
     self.assertNotEqual(a.__hash__(), b.__hash__())
     self.assertNotEqual(a.__repr__(), b.__repr__())
     self.assertEqual(torch.BoolTensor(1).dtype, 'bool')
     self.assertEqual(torch.ByteTensor(1).dtype, 'uint8')
     self.assertEqual(torch.CharTensor(1).dtype, 'int8')
     self.assertEqual(torch.DoubleTensor(1).dtype, 'float64')
     self.assertEqual(torch.FloatTensor(1).dtype, 'float32')
     self.assertEqual(torch.HalfTensor(1).dtype, 'float16')
     self.assertEqual(torch.IntTensor(1).dtype, 'int32')
     self.assertEqual(torch.LongTensor(1).dtype, 'int64')
     self.assertEqual(torch.autograd.Variable(torch.Tensor(1)).requires_grad, False)
     try:
         _ = torch.Tensor(5.)
     except ValueError:
         pass
     try:
         _ = torch.Tensor(2, 3.)
     except ValueError:
         pass
     try:
         torch.Tensor(2).retain_grad()
     except RuntimeError:
         pass
コード例 #6
0
 def test_dlpack_converter(self):
     data = np.array([0., 1., 2.], 'float32')
     x = torch.tensor(data)
     x_to_dlpack = torch.utils.dlpack.to_dlpack(x)
     x_from_dlpack = torch.utils.dlpack.from_dlpack(x_to_dlpack)
     self.assertEqual(x_from_dlpack.shape, data.shape)
     self.assertEqual(x_from_dlpack.dtype, str(data.dtype))
     self.assertLessEqual(np.abs(x_from_dlpack.numpy() - data).max(), 1e-5)
コード例 #7
0
 def test_internal_converter(self):
     data = np.array([0., 1., 2.], 'float32')
     x = torch.tensor(data)
     y = x.to(torch.int32)
     self.assertEqual(y.dtype, 'int32')
     y = x.to(torch.device('cpu'))
     self.assertEqual(y.device, torch.device('cpu'))
     y = x.to(torch.FloatTensor(1))
     self.assertEqual(y.dtype, 'float32')
     self.assertEqual(y.device, torch.device('cpu'))
     try:
         _ = x.to(data)
     except ValueError:
         pass
     try:
         _ = x.to(torch.device('gpu'))
     except ValueError:
         pass
コード例 #8
0
def new_tensor(data, requires_grad=False):
    """Create a new tensor from data."""
    return torch.tensor(data, dtype=data.dtype, requires_grad=requires_grad)