예제 #1
0
def test_io_3d():
    if not ti.has_pytorch():
        return
    import torch
    n = 16

    @ti.kernel
    def torch_kernel(t: ti.ext_arr(), o: ti.ext_arr()):
        for i in range(n):
            for j in range(n):
                for k in range(n):
                    o[i, j, k] = t[i, j, k] * t[i, j, k]

    class Sqr(torch.autograd.Function):
        @staticmethod
        def forward(ctx, inp):
            outp = torch.zeros_like(inp)
            torch_kernel(inp, outp)
            return outp

    sqr = Sqr.apply
    X = torch.tensor(2 * np.ones((n, n, n), dtype=np.float32),
                     requires_grad=True)
    val = sqr(X).sum()
    assert val == 2 * 2 * n * n * n
예제 #2
0
def torch_test(func):
    import taichi as ti
    if ti.has_pytorch():
        # OpenGL somehow crashes torch test without a reason, unforturnately
        return ti.archs_excluding(ti.opengl)(func)
    else:
        return lambda: None
예제 #3
0
def test_io_simple():
    if not ti.has_pytorch():
        return
    import torch
    n = 32

    x1 = ti.var(ti.f32, shape=(n, n))
    t1 = torch.tensor(2 * np.ones((n, n), dtype=np.float32))

    x2 = ti.Matrix(2, 3, ti.f32, shape=(n, n))
    t2 = torch.tensor(2 * np.ones((n, n, 2, 3), dtype=np.float32))

    x1.from_torch(t1)
    for i in range(n):
        for j in range(n):
            assert x1[i, j] == 2

    x2.from_torch(t2)
    for i in range(n):
        for j in range(n):
            for k in range(2):
                for l in range(3):
                    assert x2[i, j][k, l] == 2

    t3 = x2.to_torch()
    assert (t2 == t3).all()
예제 #4
0
def test_io_simple():
    if not ti.has_pytorch():
        return
    import torch
    mat = ti.Matrix(2, 6, dt=ti.f32, shape=(), needs_grad=True)
    zeros = torch.zeros((2, 6))
    zeros[1, 2] = 3
    mat.from_torch(zeros + 1)

    assert mat[None][1, 2] == 4

    zeros = mat.to_torch()
    assert zeros[1, 2] == 4
예제 #5
0
def test_io():
    if not ti.has_pytorch():
        return
    import torch

    n = 32

    @ti.kernel
    def torch_kernel(t: ti.ext_arr(), o: ti.ext_arr()):
        for i in range(n):
            o[i] = t[i] * t[i]

    @ti.kernel
    def torch_kernel_2(t_grad: ti.ext_arr(), t: ti.ext_arr(),
                       o_grad: ti.ext_arr()):
        for i in range(n):
            t_grad[i] = 2 * t[i] * o_grad[i]

    class Sqr(torch.autograd.Function):
        @staticmethod
        def forward(ctx, inp):
            outp = torch.zeros_like(inp)
            ctx.save_for_backward(inp)
            torch_kernel(inp, outp)
            return outp

        @staticmethod
        def backward(ctx, outp_grad):
            outp_grad = outp_grad.contiguous()
            inp_grad = torch.zeros_like(outp_grad)
            inp, = ctx.saved_tensors
            torch_kernel_2(inp_grad, inp, outp_grad)
            return inp_grad

    #, device=torch.device('cuda:0')

    sqr = Sqr.apply
    X = torch.tensor(2 * np.ones((n, ), dtype=np.float32), requires_grad=True)
    sqr(X).sum().backward()
    ret = X.grad.cpu()
    for i in range(n):
        assert ret[i] == 4
예제 #6
0
def test_torch_ad():
    if not ti.has_pytorch():
        return
    import torch
    n = 32

    x = ti.var(ti.f32, shape=n, needs_grad=True)
    y = ti.var(ti.f32, shape=n, needs_grad=True)

    @ti.kernel
    def torch_kernel():
        for i in range(n):
            # Do whatever complex operations here
            y[n - i - 1] = x[i] * x[i]

    class Sqr(torch.autograd.Function):
        @staticmethod
        def forward(ctx, inp):
            x.from_torch(inp)
            torch_kernel()
            outp = y.to_torch()
            return outp

        @staticmethod
        def backward(ctx, outp_grad):
            ti.clear_all_gradients()
            y.grad.from_torch(outp_grad)
            torch_kernel.grad()
            inp_grad = x.grad.to_torch()
            return inp_grad

    sqr = Sqr.apply
    for i in range(10):
        X = torch.tensor(2 * np.ones((n, ), dtype=np.float32),
                         requires_grad=True)
        sqr(X).sum().backward()
        ret = X.grad.cpu().numpy()
        for j in range(n):
            assert ret[j] == 4
예제 #7
0
import taichi as ti
import numpy as np

if ti.has_pytorch():
    import torch


@ti.torch_test
def test_io_devices():
    n = 32
    x = ti.var(dt=ti.i32, shape=n)

    @ti.kernel
    def load(y: ti.ext_arr()):
        for i in x:
            x[i] = y[i] + 10

    @ti.kernel
    def inc():
        for i in x:
            x[i] += i

    @ti.kernel
    def store(y: ti.ext_arr()):
        for i in x:
            y[i] = x[i] * 2

    devices = ['cpu']
    if torch.cuda.is_available():
        devices.append('cuda:0')
    for device in devices:
예제 #8
0
    with pytest.raises(TypeError,
                       match=r"unsupported operand type\(s\) for '&'"):
        bitwise_float()


# @ti.test(arch=ti.cpu)
# def test_ternary_op():
#     @ti.kernel
#     def select():
#         a = 1.1
#         b = 3
#         c = 3.6
#         d = b if a else c
#
#     with pytest.raises(TypeError,
#                        match="for 'select': 'f32', 'i32' and 'f32'"):
#         select()


@pytest.mark.skipif(not ti.has_pytorch(), reason='Pytorch not installed.')
@ti.test(arch=[ti.cpu, ti.opengl])
def test_subscript():
    a = ti.ndarray(ti.i32, shape=(10, 10))

    @ti.kernel
    def any_array(x: ti.any_arr()):
        b = x[3, 1.1]

    with pytest.raises(TypeError, match="indices must be integers"):
        any_array(a)
예제 #9
0
def torch_test(func):
  import taichi as ti
  if ti.has_pytorch():
    return ti.all_archs(func)
  else:
    return lambda: None
예제 #10
0
def torch_test(_func):
    if ti.has_pytorch():
        # OpenGL somehow crashes torch test without a reason, unforturnately
        return ti.test(exclude=[opengl])(_func)
    return lambda: None