示例#1
0
 def test_tensor_matmul():
     a = sn.ones((3, 4), requires_grad=True)
     b = sn.ones((4, 3), requires_grad=True)
     c = sn.matmul(a, b)
     c.set_retain_grad()
     d = c.sum()
     d.backward()
示例#2
0
 def test_tensor_sub():
     a = sn.zeros((3, 4), requires_grad=True)
     a = a - 1.0
     a = 2.0 - a
     b = sn.ones((3, 1))
     b = b - a
     c = sn.ones((1, 4))
     c = c - b
     d = sn.random((3, 4))
     d = d - c
     e = d.sum()
     e.backward()
示例#3
0
 def test_tensor_add():
     a = sn.zeros((3, 4), requires_grad=True)
     a = a + 1.0
     a = 2.0 + a
     b = sn.ones((3, 1))
     b = b + a
     c = sn.ones((1, 4))
     c = c + b
     d = sn.random((3, 4))
     d = d + c
     e = d.sum()
     e.backward()
示例#4
0
    def test_tensor_opt():
        x = sn.ones((2, 1), requires_grad=True)
        b = sn.ones((2, 1))
        data = b.detach()
        data[0] = 0.5

        for i in range(1000):
            y = sn.sum((x * b - 1)**2)
            y.backward()
            x.update_data(0.01)
            x.zero_grad()
        data = x.to_cpu().detach()
        assert np.linalg.norm(data - np.array([[2], [1]])) < 1e-2
示例#5
0
 def test_tensor_asstride():
     x = sn.ones((3, 4), requires_grad=True)
     b = x[:, 2]
     c = x[:, 2] * 0.33
     d = c + b
     c = d.sum()
     c.backward()
     print(c, x, x.grad)
示例#6
0
 def test_tensor_div():
     a = 0.5 / sn.ones((3, 4), requires_grad=True)
     a = a / 2.0
     b = sn.random((3, 1))
     c = b / a
     d = sn.random((1, 4))
     d = d / c
     e = d.sum()
     e.backward()
示例#7
0
 def test_tensor_mul():
     a = 0.5 * sn.ones((3, 4), requires_grad=True)
     a = a * 2.0
     b = sn.random((3, 1))
     c = b * a
     d = sn.random((1, 4))
     d = d * c
     e = d.sum()
     e.backward()
def optimize_and_store(name="sgd"):
    storage = np.zeros((n, 2))

    x = sn.ones((2, 1), requires_grad=True)
    opt = create_optimizer(name, {"x": x}, optim_options)
    for i in range(n):
        storage[i] = x.data.reshape((-1,))
        x1, x2 = x[0], x[1]
        y = x1 ** 2 + 2 * x2 ** 2 - 2 * x1 * x2 - 4 * x1
        y.backward()
        opt.step()
        opt.zero_grad()
    return storage
示例#9
0
 def test_tensor_log():
     a = sn.ones((3, 4), requires_grad=True)
     b = sn.log(a)
     c = b.sum()
     c.backward()
示例#10
0
 def test_tensor_pow():
     a = sn.ones((3, 4), requires_grad=True)
     b = sn.pow(a, 2)
     c = b.sum()
     c.backward()