コード例 #1
0
ファイル: test_tensor.py プロジェクト: minitorch/Module-2
def test_permute(data, t1):
    permutation = data.draw(permutations(range(len(t1.shape))))

    def permute(a):
        return a.permute(*permutation)

    grad_check(permute, t1)
コード例 #2
0
def test_log_softmax(t):
    q = minitorch.softmax(t, 3)
    q2 = minitorch.logsoftmax(t, 3).exp()
    for i in q._tensor.indices():
        assert_close(q[i], q2[i])

    minitorch.grad_check(lambda a: minitorch.logsoftmax(a, dim=2), t)
コード例 #3
0
ファイル: test_tensor.py プロジェクト: tkukurin/MiniTorch
def test_custom():
    fn = lambda t1, t2: t1 * t2
    t1 = minitorch.tensor_fromlist([[1, 2], [4, 5]])
    t2 = minitorch.tensor_fromlist([[1, 4]])
    ts = [
        t1,
        t2,
    ]
    minitorch.grad_check(fn, *ts)
コード例 #4
0
def test_permute(backend, data):
    "Check permutations for all backends."
    t1 = data.draw(tensors(backend=shared[backend]))
    permutation = data.draw(permutations(range(len(t1.shape))))

    def permute(a):
        return a.permute(*permutation)

    minitorch.grad_check(permute, t1)
コード例 #5
0
def test_softmax(t):
    q = minitorch.softmax(t, 3)
    x = q.sum(dim=3)
    assert_close(x[0, 0, 0, 0], 1.0)

    q = minitorch.softmax(t, 1)
    x = q.sum(dim=1)
    assert_close(x[0, 0, 0, 0], 1.0)

    minitorch.grad_check(lambda a: minitorch.softmax(a, dim=2), t)
コード例 #6
0
def test_max(t):
    out = minitorch.nn.max(t, 2)
    assert out[0, 0, 0] == max(t[0, 0, i] for i in range(4))
    out = minitorch.nn.max(t, 1)
    assert out[0, 0, 0] == max(t[0, i, 0] for i in range(3))
    out = minitorch.nn.max(t, 0)
    assert out[0, 0, 0] == max(t[i, 0, 0] for i in range(2))
    rand_tensor = minitorch.rand(t.shape) * 1e-5
    t = t + rand_tensor
    minitorch.grad_check(lambda t: minitorch.nn.max(t, 2), t)
コード例 #7
0
def test_mm2():
    a = minitorch.rand((2, 3), backend=FastTensorBackend)
    b = minitorch.rand((3, 4), backend=FastTensorBackend)
    c = a @ b

    c2 = (a.view(2, 3, 1) * b.view(1, 3, 4)).sum(1).view(2, 4)

    for ind in c._tensor.indices():
        assert_close(c[ind], c2[ind])

    minitorch.grad_check(lambda a, b: a @ b, a, b)
コード例 #8
0
def test_conv1d_simple2():
    t = minitorch.tensor_fromlist([[1, 2, 1, 2, 3, 4],
                                   [2, 1, 3, 2, 3, 2.]]).view(1, 2, 6)
    t.requires_grad_(True)
    t2 = minitorch.tensor_fromlist([[[3, 2, 1],
                                     [1, 2, 3.]], [[3, 2, 1],
                                                   [1, 2, 3.]]]).view(2, 2, 3)
    out = minitorch.Conv1dFun.apply(t, t2)
    print(out)
    assert out[0, 1, 2] == 26
    minitorch.grad_check(minitorch.Conv1dFun.apply, t, t2)
コード例 #9
0
def test_conv2():
    t = minitorch.tensor_fromlist([[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3],
                                   [0, 1, 2, 3]]).view(1, 1, 4, 4)
    t.requires_grad_(True)

    t2 = minitorch.tensor_fromlist([[1, 1], [1, 1]]).view(1, 1, 2, 2)
    t2.requires_grad_(True)
    out = minitorch.Conv2dFun.apply(t, t2)
    out.sum().backward()

    minitorch.grad_check(minitorch.Conv2dFun.apply, t, t2)
コード例 #10
0
def test_avg(t):
    out = minitorch.avgpool2d(t, (2, 2))
    assert_close(
        out[0, 0, 0, 0],
        sum([t[0, 0, i, j] for i in range(2) for j in range(2)]) / 4.0)

    out = minitorch.avgpool2d(t, (2, 1))
    assert_close(
        out[0, 0, 0, 0],
        sum([t[0, 0, i, j] for i in range(2) for j in range(1)]) / 2.0)

    out = minitorch.avgpool2d(t, (1, 2))
    assert_close(
        out[0, 0, 0, 0],
        sum([t[0, 0, i, j] for i in range(1) for j in range(2)]) / 2.0)
    minitorch.grad_check(lambda t: minitorch.avgpool2d(t, (2, 2)), t)
コード例 #11
0
def test_two_grad_broadcast(fn, ts):
    t1, t2 = ts
    minitorch.grad_check(fn[1], t1, t2)

    # broadcast check
    minitorch.grad_check(fn[1], t1.sum(0), t2)
    minitorch.grad_check(fn[1], t1, t2.sum(0))
コード例 #12
0
ファイル: test_tensor.py プロジェクト: minitorch/Module-2
def test_two_grad_broadcast(fn, ts):
    name, base_fn, tensor_fn = fn
    t1, t2 = ts
    grad_check(tensor_fn, t1, t2)

    # broadcast check
    grad_check(tensor_fn, t1.sum(0), t2)
    grad_check(tensor_fn, t1, t2.sum(0))
コード例 #13
0
def test_softmax(t):
    t = minitorch.tensor_fromlist([
        [
            [
                [0.00, 0.00, 0.00, 0.00],
                [0.00, 0.00, 0.00, 0.00],
                [0.00, 0.00, 0.00, 0.00],
                [0.00, 0.00, 0.00, 0.00]]]])
    q = minitorch.softmax(t, 2)
    print('=====t')
    print(t)
    print('=====q')
    print(q)
    x = q.sum(dim=3)
    print('=====x')
    print(x)
    assert_close(x[0, 0, 0, 0], 1.0)

    q = minitorch.softmax(t, 1)
    x = q.sum(dim=1)
    assert_close(x[0, 0, 0, 0], 1.0)

    minitorch.grad_check(lambda a: minitorch.softmax(a, dim=2), t)
コード例 #14
0
def test_two_grad_broadcast(fn, backend, data):
    "Run backward for all two arg functions above with broadcast."

    t1, t2 = data.draw(shaped_tensors(2, backend=backend))
    minitorch.grad_check(fn[1], t1, t2)

    # broadcast check
    minitorch.grad_check(fn[1], t1.sum(0), t2)
    minitorch.grad_check(fn[1], t1, t2.sum(0))
コード例 #15
0
def test_two_grad_broadcast(fn, backend, data):
    "Run backward for all two arg functions above with broadcast."
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
    name, base_fn, tensor_fn = fn

    grad_check(tensor_fn, t1, t2)

    # broadcast check
    grad_check(tensor_fn, t1.sum(0), t2)
    grad_check(tensor_fn, t1, t2.sum(0))
コード例 #16
0
def test_max(t):
    t_np = t.to_numpy()

    compare_0 = t_np.max(axis=0)
    out_0 = minitorch.Max.apply(t, 0)
    assert out_0.shape == (1, 3, 4)
    assert np.array_equal(compare_0.reshape(-1), out_0.to_numpy().reshape(-1))
    minitorch.grad_check(lambda t: minitorch.Max.apply(t, 0),
                         t + (minitorch.rand(t.shape) * 1e-5))

    compare_1 = t_np.max(axis=1)
    out_1 = minitorch.Max.apply(t, 1)
    assert out_1.shape == (2, 1, 4)
    assert np.array_equal(compare_1.reshape(-1), out_1.to_numpy().reshape(-1))
    minitorch.grad_check(lambda t: minitorch.Max.apply(t, 1),
                         t + (minitorch.rand(t.shape) * 1e-5))

    compare_2 = t_np.max(axis=2)
    out_2 = minitorch.Max.apply(t, 2)
    assert out_2.shape == (2, 3, 1)
    assert np.array_equal(compare_2.reshape(-1), out_2.to_numpy().reshape(-1))
    minitorch.grad_check(lambda t: minitorch.Max.apply(t, 0),
                         t + (minitorch.rand(t.shape) * 1e-5))
コード例 #17
0
def test_two_grad(fn, backend, data):
    "Run backward for all two arg functions above."
    t1, t2 = data.draw(shaped_tensors(2, backend=shared[backend]))
    name, _, tensor_fn = fn
    grad_check(tensor_fn, t1, t2)
コード例 #18
0
def test_one_derivative(fn, backend, data):
    "Run backward for all one arg functions above."
    t1 = data.draw(tensors(backend=shared[backend]))
    name, _, tensor_fn = fn
    grad_check(tensor_fn, t1)
コード例 #19
0
def test_conv1d(input, weight):
    out = minitorch.Conv1dFun.apply(input, weight)
    minitorch.grad_check(minitorch.Conv1dFun.apply, input, weight)
    print('=============')
    print(input, weight)
    print(out)
コード例 #20
0
def test_conv_batch(input, weight):
    minitorch.grad_check(minitorch.Conv2dFun.apply, input, weight)
コード例 #21
0
def test_two_grad(fn, ts):
    t1, t2 = ts
    minitorch.grad_check(fn[1], t1, t2)
コード例 #22
0
ファイル: test_tensor.py プロジェクト: minitorch/Module-2
def test_back_view(t1):
    def view(a):
        a = a.contiguous()
        return a.view(a.size)

    grad_check(view, t1)
コード例 #23
0
ファイル: test_tensor.py プロジェクト: minitorch/Module-2
def test_one_derivative(fn, t1):
    name, _, tensor_fn = fn
    grad_check(tensor_fn, t1)
コード例 #24
0
def test_conv1d(input, weight):
    minitorch.grad_check(minitorch.Conv1dFun.apply, input, weight)
コード例 #25
0
ファイル: test_tensor.py プロジェクト: minitorch/Module-2
def test_reduce(fn, t1):
    name, _, tensor_fn = fn
    grad_check(tensor_fn, t1)
コード例 #26
0
def test_one_derivative(fn, t1):
    minitorch.grad_check(fn[1], t1)
コード例 #27
0
def test_reduce(fn, t1):
    minitorch.grad_check(fn[1], t1)
コード例 #28
0
def test_conv_channel(input, weight):
    # Run several times for random seed
    for _ in range(5):
        minitorch.grad_check(minitorch.Conv2dFun.apply, input, weight)
コード例 #29
0
def test_reduce(fn, backend, data):
    "Run backward for all reduce functions above."
    t1 = data.draw(tensors(backend=shared[backend]))
    name, _, tensor_fn = fn
    grad_check(tensor_fn, t1)
コード例 #30
0
ファイル: test_tensor.py プロジェクト: minitorch/Module-2
def test_two_grad(fn, ts):
    name, _, tensor_fn = fn
    t1, t2 = ts
    grad_check(tensor_fn, t1, t2)