def test_convolve2d(self):
        np.random.seed(1234)

        img = np.random.randn(1, 5, 5, 1)
        kernel = np.random.randn(3, 3, 1, 1)
        output = bn.convolve2d(img, kernel)
        self.assertTrue(
            np.allclose(
                output.value[0, ..., 0],
                correlate(img[0, ..., 0], kernel[..., 0, 0])[1:-1, 1:-1]
            )
        )

        p = bn.Parameter(kernel)
        output = bn.convolve2d(img, p, 2, 1)
        loss = bn.sum(bn.square(output))
        loss.backward()
        grad_backprop = p.grad
        grad_numerical = np.zeros_like(grad_backprop)
        eps = 1e-8
        for i, j in itertools.product(range(3), repeat=2):
            e = np.zeros_like(kernel)
            e[i, j] += eps
            loss_p = bn.sum(bn.square(bn.convolve2d(img, kernel + e, 2, 1))).value
            loss_m = bn.sum(bn.square(bn.convolve2d(img, kernel - e, 2, 1))).value
            grad_numerical[i, j] = (loss_p - loss_m) / (2 * eps)

        self.assertTrue(np.allclose(grad_backprop, grad_numerical))
Example #2
0
    def test_abs(self):
        np.random.seed(1234)
        x = bn.Parameter(np.random.randn(5, 7))
        sign = np.sign(x.value)
        y = bn.abs(x)
        self.assertTrue((y.value == np.abs(x.value)).all())

        for _ in range(10000):
            x.cleargrad()
            y = bn.abs(x)
            bn.square(y - 0.01).sum().backward()
            x.value -= x.grad * 0.001
        self.assertTrue(np.allclose(x.value, 0.01 * sign))
Example #3
0
    def test_logdet(self):
        A = np.array([[2., 1.], [1., 3.]])
        logdetA = np.linalg.slogdet(A)[1]
        self.assertTrue((logdetA == bn.linalg.logdet(A).value).all())

        A = bn.Parameter(A)
        for _ in range(100):
            A.cleargrad()
            logdetA = bn.linalg.logdet(A)
            loss = bn.square(logdetA - 1)
            loss.backward()
            A.value -= 0.1 * A.grad
        self.assertAlmostEqual(logdetA.value, 1)
Example #4
0
    def test_trace(self):
        arrays = [np.random.normal(size=(2, 2)), np.random.normal(size=(3, 4))]

        for arr in arrays:
            arr = bn.Parameter(arr)
            tr_arr = bn.linalg.trace(arr)
            self.assertEqual(tr_arr.value, np.trace(arr.value))

        a = np.array([[1.5, 0], [-0.1, 1.1]])
        a = bn.Parameter(a)
        for _ in range(100):
            a.cleargrad()
            loss = bn.square(bn.linalg.trace(a) - 2)
            loss.backward()
            a.value -= 0.1 * a.grad
        self.assertEqual(bn.linalg.trace(a).value, 2)
Example #5
0
    def test_cholesky(self):
        A = np.array([[2., -1], [-1., 5.]])
        L = np.linalg.cholesky(A)
        Ap = bn.Parameter(A)
        L_test = bn.linalg.cholesky(Ap)
        self.assertTrue((L == L_test.value).all())

        T = np.array([[1., 0.], [-1., 2.]])
        for _ in range(1000):
            Ap.cleargrad()
            L_ = bn.linalg.cholesky(Ap)
            loss = bn.square(T - L_).sum()
            loss.backward()
            Ap.value -= 0.1 * Ap.grad

        self.assertTrue(np.allclose(Ap.value, T @ T.T))
Example #6
0
    def test_solve(self):
        A = np.array([
            [2., 1.],
            [1., 3.]
        ])
        B = np.array([1., 2.])[:, None]
        AinvB = np.linalg.solve(A, B)
        self.assertTrue((AinvB == bn.linalg.solve(A, B).value).all())

        A = bn.Parameter(A)
        B = bn.Parameter(B)
        for _ in range(100):
            A.cleargrad()
            B.cleargrad()
            AinvB = bn.linalg.solve(A, B)
            loss = bn.square(AinvB - 1).sum()
            loss.backward()
            A.value -= A.grad
            B.value -= B.grad
        self.assertTrue(np.allclose(AinvB.value, 1))
Example #7
0
    def test_inverse(self):
        A = np.array([
            [2., 1.],
            [1., 3.]
        ])
        Ainv = np.linalg.inv(A)
        self.assertTrue((Ainv == bn.linalg.inv(A).value).all())

        B = np.array([
            [-1., 1.],
            [1., 0.5]
        ])
        A = bn.Parameter(np.array([
            [-0.4, 0.7],
            [0.7, 0.7]
        ]))
        for _ in range(100):
            A.cleargrad()
            Ainv = bn.linalg.inv(A)
            loss = bn.square(Ainv - B).sum()
            loss.backward()
            A.value -= 0.1 * A.grad

        self.assertTrue(np.allclose(A.value, np.linalg.inv(B)))
Example #8
0
 def test_sqrt(self):
     x = bn.Parameter(2.)
     y = bn.square(x)
     self.assertEqual(y.value, 4)
     y.backward()
     self.assertEqual(x.grad, 4)