Esempio n. 1
0
def test_softmax(t):
    q = minitorch.softmax(t, 3)
    x = q.sum(dim=3)
    assert_close(x[0, 0, 0, 0], 1.0)

    q = minitorch.softmax(t, 1)
    x = q.sum(dim=1)
    assert_close(x[0, 0, 0, 0], 1.0)

    minitorch.grad_check(lambda a: minitorch.softmax(a, dim=2), t)
Esempio n. 2
0
def test_conv2():
    t = minitorch.tensor_fromlist([[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3],
                                   [0, 1, 2, 3]]).view(1, 1, 4, 4)
    t.requires_grad_(True)

    t2 = minitorch.tensor_fromlist([[1, 1], [1, 1]]).view(1, 1, 2, 2)
    t2.requires_grad_(True)
    out = minitorch.Conv2dFun.apply(t, t2)
    out.sum().backward()

    minitorch.grad_check(minitorch.Conv2dFun.apply, t, t2)
Esempio n. 3
0
def test_avg(t):
    out = minitorch.avgpool2d(t, (2, 2))
    assert (out[0, 0, 0,
                0] == sum([t[0, 0, i, j] for i in range(2)
                           for j in range(2)]) / 4.0)

    out = minitorch.avgpool2d(t, (2, 1))
    assert (out[0, 0, 0,
                0] == sum([t[0, 0, i, j] for i in range(2)
                           for j in range(1)]) / 2.0)

    out = minitorch.avgpool2d(t, (1, 2))
    assert (out[0, 0, 0,
                0] == sum([t[0, 0, i, j] for i in range(1)
                           for j in range(2)]) / 2.0)
    minitorch.grad_check(lambda t: minitorch.avgpool2d(t, (2, 2)), t)
Esempio n. 4
0
def test_two_grad_broadcast(fn, ts):
    t1, t2 = ts
    minitorch.grad_check(fn[1], t1, t2)

    # broadcast check
    minitorch.grad_check(fn[1], t1.sum(0), t2)
    minitorch.grad_check(fn[1], t1, t2.sum(0))
Esempio n. 5
0
def test_two_grad(fn, ts):
    t1, t2 = ts
    minitorch.grad_check(fn[1], t1, t2)
Esempio n. 6
0
def test_reduce(fn, t1):
    minitorch.grad_check(fn[1], t1)
Esempio n. 7
0
def test_one_derivative(fn, t1):
    minitorch.grad_check(fn[1], t1)
Esempio n. 8
0
def test_conv_channel(input, weight):
    minitorch.grad_check(minitorch.Conv2dFun.apply, input, weight)