Example #1
0
    def test_power(self):
        x = bn.Parameter(2.)
        y = 2 ** x
        self.assertEqual(y.value, 4)
        y.backward()
        self.assertEqual(x.grad, 4 * np.log(2))

        x = np.random.rand(10, 2)
        xp = bn.Parameter(x)
        y = xp ** 3
        self.assertTrue((y.value == x ** 3).all())
        y.backward(np.ones((10, 2)))
        self.assertTrue(np.allclose(xp.grad, 3 * x ** 2))
Example #2
0
    def test_negative(self):
        x = bn.Parameter(2.)
        y = -x
        self.assertEqual(y.value, -2)
        y.backward()
        self.assertEqual(x.grad, -1)

        x = np.random.rand(2, 3)
        xp = bn.Parameter(x)
        y = -xp
        self.assertTrue((y.value == -x).all())
        y.backward(np.ones((2, 3)))
        self.assertTrue((xp.grad == -np.ones((2, 3))).all())
Example #3
0
    def test_exp(self):
        x = bn.Parameter(2.)
        y = bn.exp(x)
        self.assertEqual(y.value, np.exp(2))
        y.backward()
        self.assertEqual(x.grad, np.exp(2))

        x = np.random.rand(5, 3)
        p = bn.Parameter(x)
        y = bn.exp(p)
        self.assertTrue((y.value == np.exp(x)).all())
        y.backward(np.ones((5, 3)))
        self.assertTrue((p.grad == np.exp(x)).all())
Example #4
0
    def test_log(self):
        x = bn.Parameter(2.)
        y = bn.log(x)
        self.assertEqual(y.value, np.log(2))
        y.backward()
        self.assertEqual(x.grad, 0.5)

        x = np.random.rand(4, 6)
        p = bn.Parameter(x)
        y = bn.log(p)
        self.assertTrue((y.value == np.log(x)).all())
        y.backward(np.ones((4, 6)))
        self.assertTrue((p.grad == 1 / x).all())
Example #5
0
    def test_forward_backward(self):
        x = bn.Parameter(2)
        z = x - 5
        self.assertEqual(z.value, -3)
        z.backward()
        self.assertEqual(x.grad, 1)

        x = np.random.rand(5, 4)
        y = np.random.rand(4)
        p = bn.Parameter(y)
        z = x - p
        self.assertTrue((z.value == x - y).all())
        z.backward(np.ones((5, 4)))
        self.assertTrue((p.grad == -np.ones(4) * 5).all())
Example #6
0
    def test_add(self):
        x = bn.Parameter(2)
        z = x + 5
        self.assertEqual(z.value, 7)
        z.backward()
        self.assertEqual(x.grad, 1)

        x = np.random.rand(5, 4)
        y = np.random.rand(4)
        p = bn.Parameter(y)
        z = x + p
        self.assertTrue((z.value == x + y).all())
        z.backward(np.ones((5, 4)))
        self.assertTrue((p.grad == np.ones(4) * 5).all())
Example #7
0
    def test_multiply(self):
        x = bn.Parameter(2)
        y = x * 5
        self.assertEqual(y.value, 10)
        y.backward()
        self.assertEqual(x.grad, 5)

        x = np.random.rand(5, 4)
        y = np.random.rand(4)
        yp = bn.Parameter(y)
        z = x * yp
        self.assertTrue((z.value == x * y).all())
        z.backward(np.ones((5, 4)))
        self.assertTrue((yp.grad == x.sum(axis=0)).all())
Example #8
0
    def test_matmul(self):
        x = np.random.rand(10, 3)
        y = np.random.rand(3, 5)
        g = np.random.rand(10, 5)
        xp = bn.Parameter(x)
        z = xp @ y
        self.assertTrue((z.value == x @ y).all())
        z.backward(g)
        self.assertTrue((xp.grad == g @ y.T).all())

        yp = bn.Parameter(y)
        z = x @ yp
        self.assertTrue((z.value == x @ y).all())
        z.backward(g)
        self.assertTrue((yp.grad == x.T @ g).all())
Example #9
0
 def test_cauchy(self):
     np.random.seed(1234)
     obs = np.random.standard_cauchy(size=10000)
     obs = 2 * obs + 1
     loc = bn.Parameter(0)
     s = bn.Parameter(1)
     for _ in range(100):
         loc.cleargrad()
         s.cleargrad()
         x = bn.random.Cauchy(loc, bn.softplus(s), data=obs)
         x.log_pdf().sum().backward()
         loc.value += loc.grad * 0.001
         s.value += s.grad * 0.001
     self.assertAlmostEqual(x.loc.value, 1, places=1)
     self.assertAlmostEqual(x.scale.value, 2, places=1)
Example #10
0
    def test_divide(self):
        x = bn.Parameter(10.)
        z = x / 2
        self.assertEqual(z.value, 5)
        z.backward()
        self.assertEqual(x.grad, 0.5)

        x = np.random.rand(5, 10, 3)
        y = np.random.rand(10, 1)
        p = bn.Parameter(y)
        z = x / p
        self.assertTrue((z.value == x / y).all())
        z.backward(np.ones((5, 10, 3)))
        d = np.sum(-x / y**2, axis=0).sum(axis=1, keepdims=True)
        self.assertTrue((p.grad == d).all())
Example #11
0
 def test_laplace(self):
     obs = np.arange(3)
     loc = bn.Parameter(0)
     s = bn.Parameter(1)
     for _ in range(1000):
         loc.cleargrad()
         s.cleargrad()
         x = bn.random.Laplace(loc, bn.softplus(s), data=obs)
         x.log_pdf().sum().backward()
         loc.value += loc.grad * 0.01
         s.value += s.grad * 0.01
     self.assertAlmostEqual(x.loc.value, np.median(obs), places=1)
     self.assertAlmostEqual(x.scale.value,
                            np.mean(np.abs(obs - x.loc.value)),
                            places=1)
Example #12
0
    def test_trace(self):
        arrays = [np.random.normal(size=(2, 2)), np.random.normal(size=(3, 4))]

        for arr in arrays:
            arr = bn.Parameter(arr)
            tr_arr = bn.linalg.trace(arr)
            self.assertEqual(tr_arr.value, np.trace(arr.value))

        a = np.array([[1.5, 0], [-0.1, 1.1]])
        a = bn.Parameter(a)
        for _ in range(100):
            a.cleargrad()
            loss = bn.square(bn.linalg.trace(a) - 2)
            loss.backward()
            a.value -= 0.1 * a.grad
        self.assertEqual(bn.linalg.trace(a).value, 2)
Example #13
0
    def test_convolve2d(self):
        np.random.seed(1234)

        img = np.random.randn(1, 5, 5, 1)
        kernel = np.random.randn(3, 3, 1, 1)
        output = bn.convolve2d(img, kernel)
        self.assertTrue(
            np.allclose(
                output.value[0, ..., 0],
                correlate(img[0, ..., 0], kernel[..., 0, 0])[1:-1, 1:-1]
            )
        )

        p = bn.Parameter(kernel)
        output = bn.convolve2d(img, p, 2, 1)
        loss = bn.sum(bn.square(output))
        loss.backward()
        grad_backprop = p.grad
        grad_numerical = np.zeros_like(grad_backprop)
        eps = 1e-8
        for i, j in itertools.product(range(3), repeat=2):
            e = np.zeros_like(kernel)
            e[i, j] += eps
            loss_p = bn.sum(bn.square(bn.convolve2d(img, kernel + e, 2, 1))).value
            loss_m = bn.sum(bn.square(bn.convolve2d(img, kernel - e, 2, 1))).value
            grad_numerical[i, j] = (loss_p - loss_m) / (2 * eps)

        self.assertTrue(np.allclose(grad_backprop, grad_numerical))
Example #14
0
 def test_broadcast(self):
     x = bn.Parameter(np.ones((1, 1)))
     shape = (5, 2, 3)
     y = broadcast_to(x, shape)
     self.assertEqual(y.shape, shape)
     y.backward(np.ones(shape))
     self.assertTrue((x.grad == np.ones((1, 1)) * 30).all())
Example #15
0
 def test_gamma(self):
     self.assertEqual(24, bn.gamma(5).value)
     a = bn.Parameter(2.5)
     eps = 1e-5
     b = bn.gamma(a)
     b.backward()
     num_grad = ((bn.gamma(a + eps) - bn.gamma(a - eps)) / (2 * eps)).value
     self.assertAlmostEqual(a.grad, num_grad)
Example #16
0
    def test_reshape(self):
        self.assertRaises(ValueError, bn.reshape, 1, (2, 3))

        x = np.random.rand(2, 6)
        p = bn.Parameter(x)
        y = p.reshape(3, 4)
        self.assertTrue((x.reshape(3, 4) == y.value).all())
        y.backward(np.ones((3, 4)))
        self.assertTrue((p.grad == np.ones((2, 6))).all())
Example #17
0
    def test_flatten(self):
        self.assertRaises(TypeError, bn.flatten, "abc")
        self.assertRaises(ValueError, bn.flatten, np.ones(1))

        x = np.random.rand(5, 4)
        p = bn.Parameter(x)
        y = p.flatten()
        self.assertTrue((y.value == x.flatten()).all())
        y.backward(np.ones(20))
        self.assertTrue((p.grad == np.ones((5, 4))).all())
Example #18
0
 def test_bernoulli(self):
     np.random.seed(1234)
     obs = np.random.choice(2, 1000, p=[0.1, 0.9])
     a = bn.Parameter(0)
     for _ in range(100):
         a.cleargrad()
         x = bn.random.Bernoulli(logit=a, data=obs)
         x.log_pdf().sum().backward()
         a.value += a.grad * 0.01
     self.assertAlmostEqual(x.mu.value, np.mean(obs))
Example #19
0
 def test_exponential(self):
     np.random.seed(1234)
     obs = np.random.gamma(1, 1 / 0.5, size=1000)
     a = bn.Parameter(0)
     for _ in range(100):
         a.cleargrad()
         x = bn.random.Exponential(bn.softplus(a), data=obs)
         x.log_pdf().sum().backward()
         a.value += a.grad * 0.001
     self.assertAlmostEqual(x.rate.value, 0.475135117)
Example #20
0
 def test_nansum(self):
     x = np.random.rand(5, 1, 2)
     x[0, 0, 0] = np.nan
     xp = bn.Parameter(x)
     z = bn.nansum(xp)
     self.assertEqual(z.value, np.nansum(x))
     z.backward()
     g = np.ones((5, 1, 2))
     g[0, 0, 0] = 0
     self.assertTrue((xp.grad == g).all())
Example #21
0
 def test_split(self):
     x = np.random.rand(10, 7)
     a = bn.Parameter(x)
     b, c = bn.split(a, (3, ), axis=-1)
     self.assertTrue((b.value == x[:, :3]).all())
     self.assertTrue((c.value == x[:, 3:]).all())
     b.backward(np.ones((10, 3)))
     self.assertIs(a.grad, None)
     c.backward(np.ones((10, 4)))
     self.assertTrue((a.grad == np.ones((10, 7))).all())
Example #22
0
    def test_solve(self):
        A = np.array([
            [2., 1.],
            [1., 3.]
        ])
        B = np.array([1., 2.])[:, None]
        AinvB = np.linalg.solve(A, B)
        self.assertTrue((AinvB == bn.linalg.solve(A, B).value).all())

        A = bn.Parameter(A)
        B = bn.Parameter(B)
        for _ in range(100):
            A.cleargrad()
            B.cleargrad()
            AinvB = bn.linalg.solve(A, B)
            loss = bn.square(AinvB - 1).sum()
            loss.backward()
            A.value -= A.grad
            B.value -= B.grad
        self.assertTrue(np.allclose(AinvB.value, 1))
Example #23
0
 def test_categorical(self):
     np.random.seed(1234)
     obs = np.random.choice(3, 100, p=[0.2, 0.3, 0.5])
     obs = np.eye(3)[obs]
     a = bn.Parameter(np.zeros(3))
     for _ in range(100):
         a.cleargrad()
         x = bn.random.Categorical(logit=a, data=obs)
         x.log_pdf().sum().backward()
         a.value += 0.01 * a.grad
     self.assertTrue(np.allclose(np.mean(obs, 0), x.mu.value))
    def test_multivariate_gaussian(self):
        self.assertRaises(ValueError, bn.random.MultivariateGaussian, np.zeros(2), np.eye(3))
        self.assertRaises(ValueError, bn.random.MultivariateGaussian, np.zeros(2), np.eye(2) * -1)

        x_train = np.array([
            [1., 1.],
            [1., -1],
            [-1., 1.],
            [-1., -2.]
        ])
        mu = bn.Parameter(np.ones(2))
        cov = bn.Parameter(np.eye(2) * 2)
        optimizer = bn.optimizer.GradientAscent([mu, cov], 0.1)
        for _ in range(1000):
            optimizer.cleargrad()
            x = bn.random.MultivariateGaussian(mu, cov @ cov.transpose(), data=x_train)
            log_likelihood = x.log_pdf().sum()
            log_likelihood.backward()
            optimizer.update()
        self.assertTrue(np.allclose(mu.value, x_train.mean(axis=0)))
        self.assertTrue(np.allclose(np.cov(x_train, rowvar=False, bias=True), x.cov.value))
Example #25
0
    def test_abs(self):
        np.random.seed(1234)
        x = bn.Parameter(np.random.randn(5, 7))
        sign = np.sign(x.value)
        y = bn.abs(x)
        self.assertTrue((y.value == np.abs(x.value)).all())

        for _ in range(10000):
            x.cleargrad()
            y = bn.abs(x)
            bn.square(y - 0.01).sum().backward()
            x.value -= x.grad * 0.001
        self.assertTrue(np.allclose(x.value, 0.01 * sign))
Example #26
0
    def test_logdet(self):
        A = np.array([[2., 1.], [1., 3.]])
        logdetA = np.linalg.slogdet(A)[1]
        self.assertTrue((logdetA == bn.linalg.logdet(A).value).all())

        A = bn.Parameter(A)
        for _ in range(100):
            A.cleargrad()
            logdetA = bn.linalg.logdet(A)
            loss = bn.square(logdetA - 1)
            loss.backward()
            A.value -= 0.1 * A.grad
        self.assertAlmostEqual(logdetA.value, 1)
Example #27
0
    def test_sum(self):
        x = np.random.rand(5, 1, 2)
        xp = bn.Parameter(x)
        z = xp.sum()
        self.assertEqual(z.value, x.sum())
        z.backward()
        self.assertTrue((xp.grad == np.ones((5, 1, 2))).all())
        xp.cleargrad()

        z = xp.sum(axis=0, keepdims=True)
        self.assertEqual(z.shape, (1, 1, 2))
        self.assertTrue((z.value == x.sum(axis=0, keepdims=True)).all())
        z.backward(np.ones((1, 1, 2)))
        self.assertTrue((xp.grad == np.ones((5, 1, 2))).all())
Example #28
0
    def test_transpose(self):
        arrays = [
            np.random.normal(size=(2, 3)),
            np.random.normal(size=(2, 3, 4))
        ]
        axes = [None, (2, 0, 1)]

        for arr, ax in zip(arrays, axes):
            arr = bn.Parameter(arr)
            arr_t = bn.transpose(arr, ax)
            self.assertEqual(arr_t.shape, np.transpose(arr.value, ax).shape)
            da = np.random.normal(size=arr_t.shape)
            arr_t.backward(da)
            self.assertEqual(arr.grad.shape, arr.shape)
Example #29
0
    def test_swapaxes(self):
        arrays = [
            np.random.normal(size=(2, 3)),
            np.random.normal(size=(2, 3, 4))
        ]
        axes = [(0, 1), (-1, -2)]

        for arr, ax in zip(arrays, axes):
            arr = bn.Parameter(arr)
            arr_swapped = bn.swapaxes(arr, ax[0], ax[1])
            self.assertEqual(arr_swapped.shape,
                             np.swapaxes(arr.value, ax[0], ax[1]).shape)
            da = np.random.normal(size=arr_swapped.shape)
            arr_swapped.backward(da)
            self.assertEqual(arr.grad.shape, arr.shape)
Example #30
0
    def test_cholesky(self):
        A = np.array([[2., -1], [-1., 5.]])
        L = np.linalg.cholesky(A)
        Ap = bn.Parameter(A)
        L_test = bn.linalg.cholesky(Ap)
        self.assertTrue((L == L_test.value).all())

        T = np.array([[1., 0.], [-1., 2.]])
        for _ in range(1000):
            Ap.cleargrad()
            L_ = bn.linalg.cholesky(Ap)
            loss = bn.square(T - L_).sum()
            loss.backward()
            Ap.value -= 0.1 * Ap.grad

        self.assertTrue(np.allclose(Ap.value, T @ T.T))