Ejemplo n.º 1
0
    def test_trivial_fancy_out_of_bounds(self):
        a = zeros(5)
        ind = ones(20).long()
        ind[-1] = 10
        self.assertRaises(RuntimeError, a.__getitem__, ind)
        self.assertRaises(RuntimeError, a.__setitem__, ind, 0)
        ind = ones(20).long()
        ind[0] = 11
        self.assertRaises(RuntimeError, a.__getitem__, ind)
        self.assertRaises(RuntimeError, a.__setitem__, ind, 0)

    def test_index_is_larger(self):
        # Simple case of fancy index broadcasting of the index.
        a = zeros((5, 5))
        a[[[0], [1], [2]], [0, 1, 2]] = tensor([2, 3, 4])

        self.assertTrue((a[:3, :3] == tensor([2, 3, 4])).all())

    def test_broadcast_subspace(self):
        a = zeros((100, 100))
        v = Variable(torch.arange(0, 100))[:, None]
        b = Variable(torch.arange(99, -1, -1).long())
        a[b] = v
        expected = b.double().unsqueeze(1).expand(100, 100)
        self.assertEqual(a, expected)


if __name__ == '__main__':
    run_tests()
Ejemplo n.º 2
0
        x = torch.randn(4, 4)
        y = torch.randn(4, 4)
        z = cpp_extension.sigmoid_add(x, y)
        self.assertEqual(z, x.sigmoid() + y.sigmoid())

    def test_extension_module(self):
        mm = cpp_extension.MatrixMultiplier(4, 8)
        weights = torch.rand(8, 4)
        expected = mm.get().mm(weights)
        result = mm.forward(weights)
        self.assertEqual(expected, result)

    def test_jit_compile_extension(self):
        module = torch.utils.cpp_extension.load(
            name='jit_extension',
            sources=[
                'cpp_extensions/jit_extension.cpp',
                'cpp_extensions/jit_extension2.cpp'
            ],
            extra_include_paths=['cpp_extensions'],
            extra_cflags=['-g'],
            verbose=True)
        x = torch.randn(4, 4)
        y = torch.randn(4, 4)
        z = module.tanh_add(x, y)
        self.assertEqual(z, x.tanh() + y.tanh())


if __name__ == '__main__':
    common.run_tests()
Ejemplo n.º 3
0
        self.assertFalse(torch.sparse_coo_tensor(([0, 0],), (0., 0.), (1,)).is_nonzero())
        self.assertFalse(torch.sparse_coo_tensor(([0, 0],), (-1., 1.), (1,)).is_nonzero())
        # NB: We should test "scalar" sparse tensors, but they don't actually
        # work at the moment (in principle, they should)


class TestUncoalescedSparse(TestSparse):
    def setUp(self):
        super(TestUncoalescedSparse, self).setUp()
        self.is_uncoalesced = True


@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
class TestCudaSparse(TestSparse):
    def setUp(self):
        super(TestCudaSparse, self).setUp()
        self.is_cuda = True
        self.IndexTensor = torch.cuda.LongTensor
        self.ValueTensor = torch.cuda.DoubleTensor
        self.SparseTensor = torch.cuda.sparse.DoubleTensor


@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
class TestCudaUncoalescedSparse(TestCudaSparse):
    def setUp(self):
        super(TestCudaUncoalescedSparse, self).setUp()
        self.is_uncoalesced = True

if __name__ == '__main__':
    run_tests()