Ejemplo n.º 1
0
def test_permute(data, t1):
    permutation = data.draw(permutations(range(len(t1.shape))))

    def permute(a):
        return a.permute(*permutation)

    grad_check(permute, t1)
Ejemplo n.º 2
0
def test_log_softmax(t):
    q = minitorch.softmax(t, 3)
    q2 = minitorch.logsoftmax(t, 3).exp()
    for i in q._tensor.indices():
        assert_close(q[i], q2[i])

    minitorch.grad_check(lambda a: minitorch.logsoftmax(a, dim=2), t)
Ejemplo n.º 3
0
def test_custom():
    fn = lambda t1, t2: t1 * t2
    t1 = minitorch.tensor_fromlist([[1, 2], [4, 5]])
    t2 = minitorch.tensor_fromlist([[1, 4]])
    ts = [
        t1,
        t2,
    ]
    minitorch.grad_check(fn, *ts)
Ejemplo n.º 4
0
def test_permute(backend, data):
    "Check permutations for all backends."
    t1 = data.draw(tensors(backend=shared[backend]))
    permutation = data.draw(permutations(range(len(t1.shape))))

    def permute(a):
        return a.permute(*permutation)

    minitorch.grad_check(permute, t1)
Ejemplo n.º 5
0
def test_softmax(t):
    q = minitorch.softmax(t, 3)
    x = q.sum(dim=3)
    assert_close(x[0, 0, 0, 0], 1.0)

    q = minitorch.softmax(t, 1)
    x = q.sum(dim=1)
    assert_close(x[0, 0, 0, 0], 1.0)

    minitorch.grad_check(lambda a: minitorch.softmax(a, dim=2), t)
Ejemplo n.º 6
0
def test_max(t):
    out = minitorch.nn.max(t, 2)
    assert out[0, 0, 0] == max(t[0, 0, i] for i in range(4))
    out = minitorch.nn.max(t, 1)
    assert out[0, 0, 0] == max(t[0, i, 0] for i in range(3))
    out = minitorch.nn.max(t, 0)
    assert out[0, 0, 0] == max(t[i, 0, 0] for i in range(2))
    rand_tensor = minitorch.rand(t.shape) * 1e-5
    t = t + rand_tensor
    minitorch.grad_check(lambda t: minitorch.nn.max(t, 2), t)
Ejemplo n.º 7
0
def test_mm2():
    a = minitorch.rand((2, 3), backend=FastTensorBackend)
    b = minitorch.rand((3, 4), backend=FastTensorBackend)
    c = a @ b

    c2 = (a.view(2, 3, 1) * b.view(1, 3, 4)).sum(1).view(2, 4)

    for ind in c._tensor.indices():
        assert_close(c[ind], c2[ind])

    minitorch.grad_check(lambda a, b: a @ b, a, b)
Ejemplo n.º 8
0
def test_conv1d_simple2():
    t = minitorch.tensor_fromlist([[1, 2, 1, 2, 3, 4],
                                   [2, 1, 3, 2, 3, 2.]]).view(1, 2, 6)
    t.requires_grad_(True)
    t2 = minitorch.tensor_fromlist([[[3, 2, 1],
                                     [1, 2, 3.]], [[3, 2, 1],
                                                   [1, 2, 3.]]]).view(2, 2, 3)
    out = minitorch.Conv1dFun.apply(t, t2)
    print(out)
    assert out[0, 1, 2] == 26
    minitorch.grad_check(minitorch.Conv1dFun.apply, t, t2)
Ejemplo n.º 9
0
def test_conv2():
    t = minitorch.tensor_fromlist([[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3],
                                   [0, 1, 2, 3]]).view(1, 1, 4, 4)
    t.requires_grad_(True)

    t2 = minitorch.tensor_fromlist([[1, 1], [1, 1]]).view(1, 1, 2, 2)
    t2.requires_grad_(True)
    out = minitorch.Conv2dFun.apply(t, t2)
    out.sum().backward()

    minitorch.grad_check(minitorch.Conv2dFun.apply, t, t2)
Ejemplo n.º 10
0
def test_avg(t):
    out = minitorch.avgpool2d(t, (2, 2))
    assert_close(
        out[0, 0, 0, 0],
        sum([t[0, 0, i, j] for i in range(2) for j in range(2)]) / 4.0)

    out = minitorch.avgpool2d(t, (2, 1))
    assert_close(
        out[0, 0, 0, 0],
        sum([t[0, 0, i, j] for i in range(2) for j in range(1)]) / 2.0)

    out = minitorch.avgpool2d(t, (1, 2))
    assert_close(
        out[0, 0, 0, 0],
        sum([t[0, 0, i, j] for i in range(1) for j in range(2)]) / 2.0)
    minitorch.grad_check(lambda t: minitorch.avgpool2d(t, (2, 2)), t)
Ejemplo n.º 11
0
def test_two_grad_broadcast(fn, ts):
    t1, t2 = ts
    minitorch.grad_check(fn[1], t1, t2)

    # broadcast check
    minitorch.grad_check(fn[1], t1.sum(0), t2)
    minitorch.grad_check(fn[1], t1, t2.sum(0))
Ejemplo n.º 12
0
def test_two_grad_broadcast(fn, ts):
    name, base_fn, tensor_fn = fn
    t1, t2 = ts
    grad_check(tensor_fn, t1, t2)

    # broadcast check
    grad_check(tensor_fn, t1.sum(0), t2)
    grad_check(tensor_fn, t1, t2.sum(0))
Ejemplo n.º 13
0
def test_softmax(t):
    t = minitorch.tensor_fromlist([
        [
            [
                [0.00, 0.00, 0.00, 0.00],
                [0.00, 0.00, 0.00, 0.00],
                [0.00, 0.00, 0.00, 0.00],
                [0.00, 0.00, 0.00, 0.00]]]])
    q = minitorch.softmax(t, 2)
    print('=====t')
    print(t)
    print('=====q')
    print(q)
    x = q.sum(dim=3)
    print('=====x')
    print(x)
    assert_close(x[0, 0, 0, 0], 1.0)

    q = minitorch.softmax(t, 1)
    x = q.sum(dim=1)
    assert_close(x[0, 0, 0, 0], 1.0)

    minitorch.grad_check(lambda a: minitorch.softmax(a, dim=2), t)
Ejemplo n.º 14
0
def test_two_grad_broadcast(fn, backend, data):
    "Run backward for all two arg functions above with broadcast."

    t1, t2 = data.draw(shaped_tensors(2, backend=backend))
    minitorch.grad_check(fn[1], t1, t2)

    # broadcast check
    minitorch.grad_check(fn[1], t1.sum(0), t2)
    minitorch.grad_check(fn[1], t1, t2.sum(0))
Ejemplo n.º 15
0
def test_two_grad_broadcast(fn, backend, data):
    "Run backward for all two arg functions above with broadcast."
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
    name, base_fn, tensor_fn = fn

    grad_check(tensor_fn, t1, t2)

    # broadcast check
    grad_check(tensor_fn, t1.sum(0), t2)
    grad_check(tensor_fn, t1, t2.sum(0))
Ejemplo n.º 16
0
def test_max(t):
    t_np = t.to_numpy()

    compare_0 = t_np.max(axis=0)
    out_0 = minitorch.Max.apply(t, 0)
    assert out_0.shape == (1, 3, 4)
    assert np.array_equal(compare_0.reshape(-1), out_0.to_numpy().reshape(-1))
    minitorch.grad_check(lambda t: minitorch.Max.apply(t, 0),
                         t + (minitorch.rand(t.shape) * 1e-5))

    compare_1 = t_np.max(axis=1)
    out_1 = minitorch.Max.apply(t, 1)
    assert out_1.shape == (2, 1, 4)
    assert np.array_equal(compare_1.reshape(-1), out_1.to_numpy().reshape(-1))
    minitorch.grad_check(lambda t: minitorch.Max.apply(t, 1),
                         t + (minitorch.rand(t.shape) * 1e-5))

    compare_2 = t_np.max(axis=2)
    out_2 = minitorch.Max.apply(t, 2)
    assert out_2.shape == (2, 3, 1)
    assert np.array_equal(compare_2.reshape(-1), out_2.to_numpy().reshape(-1))
    minitorch.grad_check(lambda t: minitorch.Max.apply(t, 0),
                         t + (minitorch.rand(t.shape) * 1e-5))
Ejemplo n.º 17
0
def test_two_grad(fn, backend, data):
    "Run backward for all two arg functions above."
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
    name, _, tensor_fn = fn
    grad_check(tensor_fn, t1, t2)
Ejemplo n.º 18
0
def test_one_derivative(fn, backend, data):
    "Run backward for all one arg functions above."
    t1 = data.draw(tensors(backend=shared[backend]))
    name, _, tensor_fn = fn
    grad_check(tensor_fn, t1)
Ejemplo n.º 19
0
def test_conv1d(input, weight):
    out = minitorch.Conv1dFun.apply(input, weight)
    minitorch.grad_check(minitorch.Conv1dFun.apply, input, weight)
    print('=============')
    print(input, weight)
    print(out)
Ejemplo n.º 20
0
def test_conv_batch(input, weight):
    minitorch.grad_check(minitorch.Conv2dFun.apply, input, weight)
Ejemplo n.º 21
0
def test_two_grad(fn, ts):
    t1, t2 = ts
    minitorch.grad_check(fn[1], t1, t2)
Ejemplo n.º 22
0
def test_back_view(t1):
    def view(a):
        a = a.contiguous()
        return a.view(a.size)

    grad_check(view, t1)
Ejemplo n.º 23
0
def test_one_derivative(fn, t1):
    name, _, tensor_fn = fn
    grad_check(tensor_fn, t1)
Ejemplo n.º 24
0
def test_conv1d(input, weight):
    minitorch.grad_check(minitorch.Conv1dFun.apply, input, weight)
Ejemplo n.º 25
0
def test_reduce(fn, t1):
    name, _, tensor_fn = fn
    grad_check(tensor_fn, t1)
Ejemplo n.º 26
0
def test_one_derivative(fn, t1):
    minitorch.grad_check(fn[1], t1)
Ejemplo n.º 27
0
def test_reduce(fn, t1):
    minitorch.grad_check(fn[1], t1)
Ejemplo n.º 28
0
def test_conv_channel(input, weight):
    # Run several times for random seed
    for _ in range(5):
        minitorch.grad_check(minitorch.Conv2dFun.apply, input, weight)
Ejemplo n.º 29
0
def test_reduce(fn, backend, data):
    "Run backward for all reduce functions above."
    t1 = data.draw(tensors(backend=shared[backend]))
    name, _, tensor_fn = fn
    grad_check(tensor_fn, t1)
Ejemplo n.º 30
0
def test_two_grad(fn, ts):
    name, _, tensor_fn = fn
    t1, t2 = ts
    grad_check(tensor_fn, t1, t2)