Esempio n. 1
0
def test_tensor_mul():
    a = tensor(data=[1, 2, 3])
    b = tensor(data=[-1, 3, 1])
    amb = a * b

    assert amb.tolist() == [-1, 6, 3]
    assert not amb.requires_grad
Esempio n. 2
0
def test_softmax3d_backward():
    a = tor4.tensor(
        data=[[[0, 1, -1.0], [1, -2, 3]], [[1, 4, -2], [0, 0, -3]]],
        requires_grad=True,
    )
    a_sm = nn.functional.softmax(a, dim=1)
    a_sm.backward(
        tor4.tensor([[[-5, 3, 0.0], [0, 0, 1]], [[3, 0, -3], [1, 2, 3]]]))

    assert a_sm.requires_grad
    assert np.allclose(
        a_sm.tolist(),
        [
            [[0.2689, 0.9526, 0.018], [0.7311, 0.0474, 0.982]],
            [[0.7311, 0.982, 0.7311], [0.2689, 0.018, 0.2689]],
        ],
        atol=1e-4,
        rtol=1e-4,
    )
    assert np.allclose(
        a.grad.tolist(),
        [
            [[-0.9831, 0.1355, -0.0177], [0.9831, -0.1355, 0.0177]],
            [[0.3932, -0.0353, -1.1797], [-0.3932, 0.0353, 1.1797]],
        ],
        atol=1e-4,
        rtol=1e-4,
    )
Esempio n. 3
0
def test_conv2d_backward5():
    a = tor4.tensor(
        [
            [
                [[0, 1, 2], [3, 4, 5.0]],
                [[0, 1, 2], [3, 4, 5.0]],
                [[0, 1, 2], [3, 4, 5.0]],
            ]
        ],
        requires_grad=True,
    )
    w = tor4.tensor(
        [[[[1, 0], [1, 1]]], [[[0, 1], [0, 1.0]]], [[[1, 0], [1, 1]]]],
        requires_grad=True,
    )

    aw = nn.functional.conv2d(a, w, groups=3)
    aw.backward(tor4.tensor([[[[2, -1]], [[1, 3]], [[3, -2.0]]]]))

    assert aw.tolist() == [[[[7, 10]], [[5, 7]], [[7, 10]]]]
    assert np.allclose(
        a.grad.sum(1).tolist(), [[[5, -2, 3], [5, 3, 0]]], rtol=1e-4, atol=1e-4
    )
    # assert np.allclose(a.grad.tolist(), [[[[2, -1, 0], [2, 1, -1]], [[0, 1, 3], [0, 1, 3.]], [[3, -2, 0], [3, 1, -2.]]]], rtol=1e-4, atol=1e-4)
    assert np.allclose(
        w.grad.tolist(),
        [[[[-1, 0], [2, 3]]], [[[3, 7], [15, 19]]], [[[-2, -1], [1, 2]]]],
        rtol=1e-4,
        atol=1e-4,
    )
Esempio n. 4
0
def test_tensor_matmul():
    a = tensor(data=[1, 2, 3])
    b = tensor(data=[4, 5, 6])
    amb = a @ b

    assert amb.tolist() == 32
    assert not amb.requires_grad
Esempio n. 5
0
def test_tesnor_neg_backward():
    a = tensor(data=[1, 2, 3.0], requires_grad=True)
    na = -a
    na.backward(tensor([1, 1, 1]))

    assert na.tolist() == [-1, -2, -3]
    assert na.requires_grad
    assert a.grad.tolist() == [-1, -1, -1]
Esempio n. 6
0
def test_view_backward():
    a = tor4.tensor([1.0, 2, 3, 4, 5, 6, 7, 8, 9], requires_grad=True)
    b = a.view(-1, 3)
    b.backward(tor4.tensor([[1, 1, 1], [2, 2, 2], [3, 4, 5.0]]))

    assert b.shape == (3, 3)
    assert b.tolist() == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
    assert a.grad.tolist() == [1, 1, 1, 2, 2, 2, 3, 4, 5]
Esempio n. 7
0
def test_view_backward2():
    a = tor4.tensor([[[1, 2], [3, 4], [5, 6.0]]], requires_grad=True)
    b = a.view(2, 3)
    b.backward(tor4.tensor([[0, 1, 0], [6, 3, 1.0]]))

    assert b.shape == (2, 3)
    assert b.tolist() == [[1, 2, 3], [4, 5, 6.0]]
    assert a.grad.tolist() == [[[0, 1], [0, 6], [3, 1]]]
Esempio n. 8
0
def test_tensor_imul_backward():
    a = tensor(data=[1, 2, 3.0], requires_grad=True)
    b = tensor(data=[-1, 3, 1])
    try:
        a *= b
        raise AssertionError()
    except RuntimeError:
        assert True
Esempio n. 9
0
def test_tensor_sum_backward2():
    a = tensor(data=[-1, 1, 2.0], requires_grad=True)
    a_sum = a.sum()
    a_sum.backward(tensor(3))

    assert a_sum.tolist() == 2
    assert a_sum.requires_grad
    assert a.grad.tolist() == [3, 3, 3]
Esempio n. 10
0
def test_tensor_sum_keepdim1_backward():
    a = tensor(data=[[-1, 1, 2], [1, 2, 3.0]], requires_grad=True)
    a_sum = a.sum(dim=1, keepdim=True)
    a_sum.backward(tensor(data=[[2], [3]]))

    assert a_sum.tolist() == [[2], [6]]
    assert a_sum.requires_grad
    assert a.grad.tolist() == [[2, 2, 2], [3, 3, 3]]
Esempio n. 11
0
def test_tensor_sum_keepdim2_backward():
    a = tensor(data=[[[-1], [1], [2]], [[1], [2], [3.0]]], requires_grad=True)
    a_sum = a.sum(dim=1, keepdim=True)
    a_sum.backward(tensor(data=[[[2]], [[3]]]))

    assert a_sum.tolist() == [[[2]], [[6]]]
    assert a_sum.requires_grad
    assert a.grad.tolist() == [[[2], [2], [2]], [[3], [3], [3]]]
Esempio n. 12
0
def test_sigmoid_backward():
    a = tor4.tensor(data=[0.0, 0, 0], requires_grad=True)
    a_sigmoid = a.sigmoid()
    a_sigmoid.backward(tor4.tensor(data=[1, 1, 1]))

    assert a_sigmoid.tolist() == [0.5, 0.5, 0.5]
    assert a_sigmoid.requires_grad
    assert a.grad.tolist() == [0.25, 0.25, 0.25]
Esempio n. 13
0
def test_softmax_backward2():
    a = tor4.tensor(data=[0, 0, 0.0], requires_grad=True)
    a_sm = nn.functional.softmax(a, dim=-1)
    a_sm.backward(tor4.tensor([0, 1, -1.0]))

    assert a_sm.requires_grad
    assert a_sm.tolist() == [1 / 3, 1 / 3, 1 / 3]
    assert a.grad.tolist() == [0, 1 / 3, -1 / 3]
Esempio n. 14
0
def test_logsoftmax_backward():
    a = tor4.tensor([0.0, 0, 0], requires_grad=True)
    lsm = nn.functional.log_softmax(a, dim=-1)
    lsm.backward(tor4.tensor([1, 1, 2.0]))

    assert np.allclose(lsm.tolist(), np.log([1 / 3] * 3))
    assert lsm.requires_grad
    assert np.allclose(a.grad.tolist(), [-1 / 3, -1 / 3, 2 / 3])
Esempio n. 15
0
def test_tensor_add():
    a = tensor([1, 2, 3])
    b = tensor([4, 5, 6])
    apb = a + b

    assert apb.tolist() == [5, 7, 9]
    assert not apb.requires_grad
    assert apb.is_leaf
Esempio n. 16
0
def test_conv2d_backward3():
    a = tor4.tensor([[[[0, 1, 2], [3, 4, 5.0]]]])
    w = tor4.tensor([[[[1, 0], [1, 1]]], [[[0, 1], [0, 1.0]]]], requires_grad=True)

    try:
        nn.functional.conv2d(a, w, dilation=2)
        raise AssertionError()
    except RuntimeError:
        assert True
Esempio n. 17
0
def test_tesnor_rdiv_scalar_backward():
    a = tensor(data=[1.0, 2.0, -3.0], requires_grad=True)
    ad2 = 1 / a
    ad2.backward(tensor(data=[1.0, 1.0, 1.0]))

    assert ad2.tolist() == [1, 1 / 2, -1 / 3]
    assert ad2.requires_grad
    assert a.requires_grad
    assert a.grad.tolist() == [-1, -1 / 4, -1 / 9]
Esempio n. 18
0
def test_tesnor_div_scalar_backward():
    a = tensor(data=[1.0, 2.0, 3.0], requires_grad=True)
    ad2 = a / 2
    ad2.backward(tensor(data=[1.0, 1.0, 1.0]))

    assert ad2.tolist() == [1 / 2, 1, 3 / 2]
    assert ad2.requires_grad
    assert a.requires_grad
    assert a.grad.tolist() == [1 / 2, 1 / 2, 1 / 2]
Esempio n. 19
0
def test_tensor_iadd_backward():
    a = tensor([1, 2, 3.0], requires_grad=True)
    b = tensor([-1, 0, 1])

    try:
        a += b
        raise AssertionError()
    except RuntimeError:
        assert True
Esempio n. 20
0
def test_tensor_pow_scalar_backward():
    a = tensor(data=[1, 2, 3.0], requires_grad=True)
    ap2 = a ** 2
    ap2.backward(tensor(data=[1, 1, 1]))

    assert ap2.tolist() == [1, 4, 9]
    assert ap2.requires_grad
    assert a.requires_grad
    assert a.grad.tolist() == [2, 4, 6]
Esempio n. 21
0
def test_tensor_from_scalar():
    a = tensor(data=1)
    assert a.tolist() == 1
    assert not a.requires_grad
    assert a.is_leaf

    a = tensor(data=2.0, requires_grad=True)
    assert a.tolist() == 2.0
    assert a.requires_grad
    assert a.is_leaf
Esempio n. 22
0
def test_tensor_matmul2():
    a = tensor(data=[[1, 2, 3], [3, 2, 1]])
    b = tensor(data=[[1, 1], [1, 1], [1, 1]])
    amb = a @ b

    assert amb.tolist() == [
        [6, 6],
        [6, 6],
    ]
    assert not amb.requires_grad
    assert amb.shape == (2, 2)
Esempio n. 23
0
def test_max4_backward():
    a = tor4.tensor(data=[[10, 3, -1], [-3, 4, 10.0]], requires_grad=True)
    a_max, arg_max = a.max(dim=1, keepdim=True)
    a_max.backward(tor4.tensor([[1], [1]]))

    assert a_max.tolist() == [[10], [10]]
    assert a_max.requires_grad
    assert arg_max.tolist() == [[0], [2]]
    assert not arg_max.requires_grad
    assert a.grad.tolist() == [[1, 0, 0], [0, 0, 1]]
    assert a.requires_grad
Esempio n. 24
0
def test_max3_backward():
    a = tor4.tensor(data=[[1, 3, -1], [-3, 4, 1.0]], requires_grad=True)
    a_max, arg_max = a.max(dim=0)
    a_max.backward(tor4.tensor([1, 1, 1]))

    assert a_max.tolist() == [1, 4, 1]
    assert a_max.requires_grad
    assert arg_max.tolist() == [0, 1, 1]
    assert not arg_max.requires_grad
    assert a.grad.tolist() == [[1, 0, 0], [0, 1, 1]]
    assert a.requires_grad
Esempio n. 25
0
def test_dropout2d_eval_backward():
    a = tor4.tensor(data=[[[1, 2, 3], [1, 2, 3.0]]],
                    dtype="float32",
                    requires_grad=True)
    a_drop = nn.functional.dropout2d(a, p=0.8, training=False)
    a_drop.backward(tor4.tensor(data=[[[2, 2, 2], [2, 2, 2.0]]]))

    mask = a_drop.detach().numpy() != 0
    assert mask.all()
    assert a_drop.requires_grad
    assert a.grad.tolist() == [[[2, 2, 2], [2, 2, 2.0]]]
Esempio n. 26
0
def test_conv2d_backward31():
    a = tor4.arange(9, dtype=tor4.float32, requires_grad=True)
    b = a.view(1, 1, 3, 3)
    w = tor4.tensor([[[[1, 0], [1, 1]]], [[[0, 1], [0, 1.0]]]], requires_grad=True)

    aw = nn.functional.conv2d(b, w, dilation=2)
    aw.backward(tor4.tensor([[[[2]], [[1.0]]]]))

    assert aw.tolist() == [[[[14]], [[10]]]]
    assert np.allclose(a.grad.tolist(), [2, 0, 1, 0, 0, 0, 2, 0, 3])
    assert np.allclose(w.grad.tolist(), [[[[0, 4], [12, 16]]], [[[0, 2], [6, 8]]]])
Esempio n. 27
0
def test_dropout_eval_backward():
    a = tor4.tensor(data=[1, 2, 3, 4, 5, 6, 7, 8.0],
                    dtype="float32",
                    requires_grad=True)
    a_drop = nn.functional.dropout(a, p=0.8, training=False)
    a_drop.backward(tor4.tensor(data=[10, 10, 10.0, 10, 10, 10, 10, 10]))

    mask = a_drop.detach().numpy() != 0
    assert mask.all()
    assert a_drop.requires_grad
    assert a.grad.tolist() == [10, 10, 10, 10, 10, 10, 10, 10]
Esempio n. 28
0
def test_logsoftmax_backward1():
    a = tor4.tensor([1.0, 2, 3], requires_grad=True)
    lsm = nn.functional.log_softmax(a, dim=-1)
    lsm.backward(tor4.tensor([1, 1, 2.0]))

    assert np.allclose(lsm.tolist(), [-2.4076, -1.4076, -0.4076],
                       rtol=1e-4,
                       atol=1e-4)
    assert lsm.requires_grad
    assert np.allclose(a.grad.tolist(), [0.6399, 0.0211, -0.661],
                       atol=1e-4,
                       rtol=1e-4)
Esempio n. 29
0
def test_tensor_mul_broadcast2_backward():
    a = tensor(data=[[1, 2, 3], [1, 1, 2]])
    b = tensor(data=[[-1, 3, 1.0]], requires_grad=True)
    amb = a * b
    amb.backward(tensor([[1, 1, 1], [1, 1, 1]]))

    assert amb.tolist() == [[-1, 6, 3], [-1, 3, 2]]
    assert not a.requires_grad
    assert b.requires_grad
    assert amb.requires_grad
    assert a.grad is None
    assert b.grad.tolist() == [[2, 3, 5]]
Esempio n. 30
0
def test_tensor_mul_broadcast3_backward():
    a = tensor(data=[[[1, 2, 3], [1, 1, 2]], [[1, 2, 3], [1, 1, 2]]])
    b = tensor(data=[[1], [0.0]], requires_grad=True)
    amb = a * b
    amb.backward(tensor([[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]]))

    assert amb.tolist() == [[[1, 2, 3], [0, 0, 0]], [[1, 2, 3], [0, 0, 0]]]
    assert not a.requires_grad
    assert b.requires_grad
    assert amb.requires_grad
    assert a.grad is None
    assert b.grad.tolist() == [[12], [8]]