示例#1
0
 def setUp(self):
     self.a = Variable([[1,2], [2,3]])
     self.b = Variable([[4,5], [6,7]])
     self.f = BinaryFunctionImpl(a=self.a,
                                 b=self.b)
     self.f.register(self.f)
     self.f.new_context()
示例#2
0
    def test_all(self):
        x = Variable([[1, 2, 3]])
        y = (x > 0).all()
        self.assertTrue(y())

        x = Variable([[1, 2, 3]])
        y = (x > 1).all()
        self.assertFalse(y())
示例#3
0
 def test_running_multiple_tensors(self):
     a = Variable(3)
     b = Variable(2)
     c = a * b
     ans_a, ans_b, ans_c = run([a, b, c])
     self.assertEqual(ans_a, 3)
     self.assertEqual(ans_b, 2)
     self.assertEqual(ans_c, 6)
示例#4
0
 def test_valid_function_result(self):
     a = Variable(1)
     b = Variable(2)
     c = a + b
     expected = 3
     actual = run(c)
     self.assertIsNotNone(actual)
     self.assertEqual(expected, actual)
示例#5
0
    def test_any(self):
        x = Variable([[1, 2, 3], [4, 5, 6]])
        y = (x > 5).any()
        self.assertTrue(y())

        x = Variable([[1, 2, 3], [4, 5, 6]])
        y = (x > 6).any()
        self.assertFalse(y())
示例#6
0
 def test_correct_context_assigned(self):
     a = Variable(1)
     b = Variable(2)
     c = a + b
     d = a + 1
     new_backward_context({a, b, c})
     self.assertEqual({c}, set(a.ctx))
     self.assertEqual({c}, set(b.ctx))
     self.assertEqual(set(), set(c.ctx))
示例#7
0
 def test_ignore_invalid_nodes(self):
     a = Variable(1)
     b = Variable(2)
     c = Variable(3)
     d = a + b
     e = b + c
     f = d + e
     expected = 3
     actual = run(d)
     self.assertTrue(expected, actual)
示例#8
0
 def test_correct_dependencies(self):
     a = Variable([1, 2, 3])
     b = two(a)
     x = a + b
     y = two(x)
     z = Variable(3)
     c = z + x
     self.assertEqual({a}, a.dependencies)
     self.assertEqual({b, a}, b.dependencies)
     self.assertEqual({x, a, b}, x.dependencies)
     self.assertEqual({y, x, a, b}, y.dependencies)
     self.assertEqual({z}, z.dependencies)
     self.assertEqual({c, z, x, a, b}, c.dependencies)
示例#9
0
 def test_correct_dependencies(self):
     a = Variable(0)
     b = Variable(1)
     c = a+b
     d = Variable(2)
     e = b+d
     f = c+d
     self.assertEqual({a}, a.dependencies)
     self.assertEqual({b}, b.dependencies)
     self.assertEqual({c,a,b}, c.dependencies)
     self.assertEqual({d}, d.dependencies)
     self.assertEqual({e,b,d}, e.dependencies)
     self.assertEqual({f,c,d,a,b}, f.dependencies)
示例#10
0
 def test_no_gradient_is_propagated(self):
     x = Variable(1)
     y = x + two(x)
     y.register(y)
     y.new_context()
     y.accumulate(y, 1)
     self.assertEqual(x.gradient, 1)
示例#11
0
 def test_dot_product_of_matrix_with_same_shape(self):
     a = Variable([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
     b = Variable([[2, 3, 4], [5, 6, 7], [8, 9, 0]])
     op = MatMul(a, b)
     op.register(op)
     op.new_context()
     expected = np.array([[36, 42, 18], [81, 96, 51], [126, 150, 84]])
     actual = op.forward()
     self.assertTrue((expected == actual).all())
     gradient = np.ones((3, 3))
     op.accumulate(op, gradient)
     expected1 = np.array([[9, 18, 17], [9, 18, 17], [9, 18, 17]])
     expected2 = np.array([[12, 12, 12], [15, 15, 15], [18, 18, 18]])
     actual1 = a.gradient
     actual2 = b.gradient
     self.assertTrue((expected1 == actual1).all())
     self.assertTrue((expected2 == actual2).all())
示例#12
0
 def test_dot_product_of_matrix_with_vector(self):
     a = Variable([[1, 2], [3, 4], [5, 6]])
     b = Variable([2, 3])
     op = MatMul(a, b)
     op.register(op)
     op.new_context()
     expected = np.array([8, 18, 28])
     actual = op.forward()
     self.assertTrue((expected == actual).all())
     gradient = np.ones((3, ))
     op.accumulate(op, gradient)
     expected1 = np.array([[2, 3], [2, 3], [2, 3]])
     expected2 = np.array([9, 12])
     actual1 = a.gradient
     actual2 = b.gradient
     self.assertTrue((expected1 == actual1).all())
     self.assertTrue((expected2 == actual2).all())
示例#13
0
 def test_dot_product_of_matrix_with_different_row_and_col(self):
     a = Variable([[1, 2, 3]])
     b = Variable([[4], [5], [6]])
     op = MatMul(a, b)
     op.register(op)
     op.new_context()
     expected = np.array([[32]])
     actual = op.forward()
     self.assertTrue((expected == actual).all())
     gradient = np.ones((1, 1))
     op.accumulate(op, gradient)
     expected1 = np.array([[4, 5, 6]])
     expected2 = np.array([[1], [2], [3]])
     actual1 = a.gradient
     actual2 = b.gradient
     self.assertTrue((expected1 == actual1).all())
     self.assertTrue((expected2 == actual2).all())
示例#14
0
 def test_correct_dependencies(self):
     x = Variable([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
     b = len(x)
     c = x * b
     d = b + x
     self.assertEqual({x}, x.dependencies)
     self.assertEqual({b, x}, b.dependencies)
     self.assertEqual({c, b, x}, c.dependencies)
     self.assertEqual({d, b, x}, d.dependencies)
示例#15
0
 def test_len_does_not_affect_gradient(self):
     x = Variable([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
     op = x * len(x)
     self.assertTrue((op.forward() == [[3, 6, 9], [12, 15, 18],
                                       [21, 24, 27]]).all())
     op.register(op)
     op.new_context()
     op.accumulate(op, [[1, 1, 1], [1, 1, 1], [1, 1, 1]])
     self.assertTrue((x.gradient == [[3, 3, 3], [3, 3, 3], [3, 3,
                                                            3]]).all())
示例#16
0
 def test_correct_gradient_propagated(self):
     x = Variable([[1, 2], [3, 4]])
     y = (x > 2) * x
     expected = np.array([[0, 0], [3, 4]])
     y.register(y)
     actual = y()
     self.assertTrue((expected == actual).all())
     y.accumulate(y, np.ones_like(actual))
     expected = np.array([[0, 0], [1, 1]])
     actual = x.gradient
     self.assertTrue((expected == actual).all())
示例#17
0
 def test_padding_with_4_pad_width(self):
     x = Variable(np.ones((2,2)))
     op = Pad(x, pad_width=((1,2), (3,4)))
     expected = np.pad(x.data, pad_width=((1,2), (3,4)))
     op.register(op)
     op.new_context()
     actual = op.forward()
     self.assertTrue((expected == actual).all())
     op.accumulate(op, np.ones_like(expected))
     expected = np.ones((2,2))
     actual = x.gradient
     self.assertTrue((expected == actual).all())
示例#18
0
 def test_gradient_reshaped(self):
     x = Variable([[1,2,3,4,5],
                   [6,7,8,9,10]])
     op = Reshape(x, (5,2))
     op.register(op)
     op.new_context()
     expected = (np.arange(10) + 1).reshape(5,2)
     actual = op.forward()
     self.assertTrue((expected == actual).all())
     op.accumulate(op, np.ones((5,2)))
     expected = np.ones_like(x.data)
     actual = x.gradient
     self.assertTrue((expected == actual).all())
示例#19
0
 def test_padding_with_scalar_pad_with(self):
     x = Variable([[1,2,3,4,5],
                   [6,7,8,9,10]])
     op = Pad(x, pad_width=1, constant_values=-1)
     expected = np.pad(x.data, pad_width=1, constant_values=-1)
     op.register(op)
     op.new_context()
     actual = op.forward()
     self.assertTrue((expected == actual).all())
     op.accumulate(op, np.ones_like(expected))
     expected = np.ones((2,5))
     actual = x.gradient
     self.assertTrue((expected == actual).all())
示例#20
0
 def test_gradient_flattened_excluding_dim_0(self):
     x = Variable([[[1, 2], [3, 4], [5, 6]],
                   [[7, 8], [9, 10], [11, 12]],
                   [[13, 14], [15, 16], [17, 18]]])
     op = Flatten(x)
     op.register(op)
     op.new_context()
     expected = (np.arange(18) + 1).reshape(3, 6)
     actual = op.forward()
     self.assertTrue((expected == actual).all())
     op.accumulate(op, (np.arange(18) + 1).reshape(3, 6))
     expected = (np.arange(18) + 1).reshape(3, 3, 2)
     actual = x.gradient
     self.assertTrue((expected == actual).all())
示例#21
0
 def test_previous_gradients_wrt_x_are_accounted(self):
     a = Variable(np.arange(18).reshape(2, 3, 3))
     op = a * Conv1D(self.x, self.y, padding='VALID', stride=1)
     op.register(op)
     op.new_context()
     forward_result = op.forward()
     expected = [[[0, 252, 546], [999, 1464, 1995], [2610, 3360, 4200]],
                 [[6669, 8220, 9933], [10116, 12168, 14406],
                  [14175, 16800, 19635]]]
     self.assertTrue((expected == forward_result).all())
     gradient = np.ones_like(forward_result.shape)
     op.accumulate(op, gradient)
     expected = [[[8, 17], [52, 97], [186, 294], [340, 439], [296, 359]],
                 [[62, 152], [322, 529], [834, 1185], [934, 1195],
                  [674, 818]]]
     actual = self.x.gradient
     self.assertTrue((expected == actual).all())
示例#22
0
 def test_previous_gradients_wrt_y_are_accounted(self):
     a = Variable(np.arange(18).reshape(2, 3, 3))
     op = a * Conv1D(self.x, self.y, padding='VALID', stride=1)
     op.register(op)
     op.new_context()
     forward_result = op.forward()
     expected = [[[0, 252, 546], [999, 1464, 1995], [2610, 3360, 4200]],
                 [[6669, 8220, 9933], [10116, 12168, 14406],
                  [14175, 16800, 19635]]]
     self.assertTrue((expected == forward_result).all())
     gradient = np.ones_like(forward_result.shape)
     op.accumulate(op, gradient)
     expected = [[[519, 567, 615], [564, 618, 672]],
                 [[609, 669, 729], [654, 720, 786]],
                 [[699, 771, 843], [744, 822, 900]]]
     actual = self.y.gradient
     self.assertTrue((expected == actual).all())
示例#23
0
 def setUp(self) -> None:
     self.a = Variable([3., 2., 1., 0., 3., 0])
     self.b = Variable([0., 1., 2., 3., 4., 0])
示例#24
0
 def setUp(self) -> None:
     self.a = Variable([[1, 2, 3], [4, 5, 6]])
示例#25
0
 def test_gradient_is_propagated(self):
     x = Variable(1)
     y = Gradient(x)
     y.new_context()
     y.forward()
     self.assertTrue(x.gradient == 1)
示例#26
0
 def setUp(self) -> None:
     self.a = Variable([1, 2, 3, 4])
示例#27
0
 def setUp(self) -> None:
     self.a = Variable([[1, 2, 6], [3, 4, 5]])
     self.b = Variable([[1, 1, 1], [1, 1, 1]])
     self.c = Variable([[1, 1, 1]])
     self.d = Variable([[1], [2]])
     self.e = Variable(2)
示例#28
0
 def setUp(self) -> None:
     self.a = Variable(1)
     self.b = Variable(2)
     self.c = 3
示例#29
0
 def test_matmul(self):
     self.assertEqual(Variable([1]).matmul(Variable([2]))(), [2])
     self.assertEqual(Variable([1]).dot(Variable([2]))(), [2])
     self.assertEqual((Variable([1]) @ Variable([2]))(), [2])
示例#30
0
 def setUp(self) -> None:
     self.a = Variable([[0, 1], [1, 0], [1, 1], [2, 4]])