def test_initialization(self): """ Test initialization methods of GaussianARD """ X = GaussianARD(1, 2, shape=(2, ), plates=(3, )) # Prior initialization mu = 1 * np.ones((3, 2)) alpha = 2 * np.ones((3, 2)) X.initialize_from_prior() u = X._message_to_child() self.assertAllClose(u[0] * np.ones((3, 2)), mu) self.assertAllClose( u[1] * np.ones((3, 2, 2)), linalg.outer(mu, mu, ndim=1) + misc.diag(1 / alpha, ndim=1)) # Parameter initialization mu = np.random.randn(3, 2) alpha = np.random.rand(3, 2) X.initialize_from_parameters(mu, alpha) u = X._message_to_child() self.assertAllClose(u[0], mu) self.assertAllClose( u[1], linalg.outer(mu, mu, ndim=1) + misc.diag(1 / alpha, ndim=1)) # Value initialization x = np.random.randn(3, 2) X.initialize_from_value(x) u = X._message_to_child() self.assertAllClose(u[0], x) self.assertAllClose(u[1], linalg.outer(x, x, ndim=1)) # Random initialization X.initialize_from_random() pass
def test_initialization(self): """ Test initialization methods of GaussianARD """ X = GaussianARD(1, 2, shape=(2,), plates=(3,)) # Prior initialization mu = 1 * np.ones((3, 2)) alpha = 2 * np.ones((3, 2)) X.initialize_from_prior() u = X._message_to_child() self.assertAllClose(u[0]*np.ones((3,2)), mu) self.assertAllClose(u[1]*np.ones((3,2,2)), linalg.outer(mu, mu, ndim=1) + misc.diag(1/alpha, ndim=1)) # Parameter initialization mu = np.random.randn(3, 2) alpha = np.random.rand(3, 2) X.initialize_from_parameters(mu, alpha) u = X._message_to_child() self.assertAllClose(u[0], mu) self.assertAllClose(u[1], linalg.outer(mu, mu, ndim=1) + misc.diag(1/alpha, ndim=1)) # Value initialization x = np.random.randn(3, 2) X.initialize_from_value(x) u = X._message_to_child() self.assertAllClose(u[0], x) self.assertAllClose(u[1], linalg.outer(x, x, ndim=1)) # Random initialization X.initialize_from_random() pass
x = np.random.randn(2, 100) data = np.dot(c, x) + 0.1 * np.random.randn(10, 100) # data:10×100 Y.observe(data) #( Missing values) Y.observe(data, mask=[[True], [False], [False], [True], [True], [False], [True], [True], [True], [False]]) # 2: Choosing the inference method from bayespy.inference import VB Q = VB(Y, C, X, alpha, tau) # 3: Initializing the posterior approximation X.initialize_from_parameters(np.random.randn(1, 100, D), 10) # 4: Running the inference algorithm # Q.update() # Q.update(C, X) # Q.update(C, X, C, tau) # Q.update(repeat=10) # Q.update(repeat=1000) Q.update(repeat=10000, tol=1e-5) # C.update() #( 5 : Parameter expansion 収束が遅い時) # from bayespy.inference.vmp import transformations # rotX = transformations.RotateGaussianARD(X) # rotC = transformations.RotateGaussianARD(C, alpha) # R = transformations.RotationOptimizer(rotC, rotX, D)
def test_annealing(self): X = GaussianARD(3, 4) X.initialize_from_parameters(-1, 6) Q = VB(X) Q.set_annealing(0.1) # # Check that the gradient is correct # # Initial parameters phi0 = X.phi # Gradient rg = X.get_riemannian_gradient() g = X.get_gradient(rg) # Numerical gradient of the first parameter eps = 1e-6 p0 = X.get_parameters() l0 = Q.compute_lowerbound(ignore_masked=False) g_num = [(), ()] e = eps p1 = p0[0] + e X.set_parameters([p1, p0[1]]) l1 = Q.compute_lowerbound(ignore_masked=False) g_num[0] = (l1 - l0) / eps # Numerical gradient of the second parameter p1 = p0[1] + e X.set_parameters([p0[0], p1]) l1 = Q.compute_lowerbound(ignore_masked=False) g_num[1] = (l1 - l0) / (eps) # Check self.assertAllClose(g[0], g_num[0]) self.assertAllClose(g[1], g_num[1]) # # Gradient should be zero after updating # X.update() # Initial parameters phi0 = X.phi # Numerical gradient of the first parameter eps = 1e-8 p0 = X.get_parameters() l0 = Q.compute_lowerbound(ignore_masked=False) g_num = [(), ()] e = eps p1 = p0[0] + e X.set_parameters([p1, p0[1]]) l1 = Q.compute_lowerbound(ignore_masked=False) g_num[0] = (l1 - l0) / eps # Numerical gradient of the second parameter p1 = p0[1] + e X.set_parameters([p0[0], p1]) l1 = Q.compute_lowerbound(ignore_masked=False) g_num[1] = (l1 - l0) / (eps) # Check self.assertAllClose(0, g_num[0], atol=1e-5) self.assertAllClose(0, g_num[1], atol=1e-5) # Not at the optimum X.initialize_from_parameters(-1, 6) # Initial parameters phi0 = X.phi # Gradient g = X.get_riemannian_gradient() # Parameters after VB-EM update X.update() phi1 = X.phi # Check self.assertAllClose(g[0], phi1[0] - phi0[0]) self.assertAllClose(g[1], phi1[1] - phi0[1]) pass