Beispiel #1
0
 def test_tensor_mul():
     a = 0.5 * sn.ones((3, 4), requires_grad=True)
     a = a * 2.0
     b = sn.random((3, 1))
     c = b * a
     d = sn.random((1, 4))
     d = d * c
     e = d.sum()
     e.backward()
Beispiel #2
0
 def test_tensor_div():
     a = 0.5 / sn.ones((3, 4), requires_grad=True)
     a = a / 2.0
     b = sn.random((3, 1))
     c = b / a
     d = sn.random((1, 4))
     d = d / c
     e = d.sum()
     e.backward()
Beispiel #3
0
 def test_cross_entropy_layer():
     x = sn.random((3, 4), requires_grad=True)
     y = sn.zeros((3, 4))
     y.set_values(np.array([[0, 0, 0, 1], [0, 1, 0, 0], [1, 0, 0, 0]]))
     c = CrossEntropyLayer()
     loss = c(x, y)
     loss.backward()
Beispiel #4
0
 def test_tensor_sub():
     a = sn.zeros((3, 4), requires_grad=True)
     a = a - 1.0
     a = 2.0 - a
     b = sn.ones((3, 1))
     b = b - a
     c = sn.ones((1, 4))
     c = c - b
     d = sn.random((3, 4))
     d = d - c
     e = d.sum()
     e.backward()
Beispiel #5
0
 def test_tensor_add():
     a = sn.zeros((3, 4), requires_grad=True)
     a = a + 1.0
     a = 2.0 + a
     b = sn.ones((3, 1))
     b = b + a
     c = sn.ones((1, 4))
     c = c + b
     d = sn.random((3, 4))
     d = d + c
     e = d.sum()
     e.backward()
Beispiel #6
0
 def test_run_on_gpu():
     x = sn.random((3, 4), device="cuda", requires_grad=True)
     y = x * 2 + 1
     z = sn.sum(y)
     z = z * 2
     z.backward()
Beispiel #7
0
 def __init__(self, features, samples):
     self.initial_x = sn.random((samples, features), requires_grad=False)
     self.initial_y = sn.random((samples, 1), requires_grad=False)
Beispiel #8
0
 def test_mse_layer():
     x = sn.random((10, 3), requires_grad=True)
     y = sn.random((10, 3))
     mse = MSELayer()
     loss = mse(x, y)
     loss.backward()
Beispiel #9
0
 def test_relu_layer():
     r = SigmoidLayer()
     x = sn.random((3, 4), requires_grad=True)
     x = r(x)
     loss = x.sum()
     loss.backward()
Beispiel #10
0
 def test_tanh_layer():
     t = TanhLayer()
     x = sn.random((3, 4), requires_grad=True)
     x = t(x)
     loss = x.sum()
     loss.backward()
Beispiel #11
0
 def test_sigmoid_layer():
     s = SigmoidLayer()
     x = sn.random((3, 4), requires_grad=True)
     x = s(x)
     loss = x.sum()
     loss.backward()
Beispiel #12
0
 def test_linear_layer():
     l1 = LinearLayer(input_nodes=3, output_nodes=1)
     x = sn.random((1, 3))
     x = l1(x)
     loss = x.sum()
     loss.backward()
Beispiel #13
0
 def test_tensor_cross_entropy():
     a = sn.random((3, 4), requires_grad=True)
     b = sn.zeros((3, 4))
     b.set_values(np.array([[0, 0, 0, 1], [0, 1, 0, 0], [1, 0, 0, 0]]))
     c = sn.cross_entropy(a, b)
     c.backward()
Beispiel #14
0
 def test_tensor_mse():
     a = sn.random((3, 4), requires_grad=True)
     b = sn.random((3, 4), requires_grad=True)
     c = sn.mse(a, b)
     c.backward()
Beispiel #15
0
 def test_tensor_relu():
     a = sn.random((3, 4), requires_grad=True)
     b = sn.relu(a)
     b.set_retain_grad()
     c = b.sum()
     c.backward()