def predict(self, x, return_var=False): y = SumMultiply('i,i', self.weights, x) y_hat, var = y.get_moments() if return_var: return y_hat, var else: return y_hat
def _setup_linear_regression(): """ Setup code for the pdf and contour tests. This code is from http://www.bayespy.org/examples/regression.html """ np.random.seed(1) k = 2 # slope c = 5 # bias s = 2 # noise standard deviation x = np.arange(10) y = k * x + c + s * np.random.randn(10) X = np.vstack([x, np.ones(len(x))]).T B = GaussianARD(0, 1e-6, shape=(2, )) F = SumMultiply('i,i', B, X) tau = Gamma(1e-3, 1e-3) Y = GaussianARD(F, tau) Y.observe(y) Q = VB(Y, B, tau) Q.update(repeat=1000) xh = np.linspace(-5, 15, 100) Xh = np.vstack([xh, np.ones(len(xh))]).T Fh = SumMultiply('i,i', B, Xh) return locals()
def model(M=10, N=100, D=3): """ Construct linear state-space model. See, for instance, the following publication: "Fast variational Bayesian linear state-space model" Luttinen (ECML 2013) """ # Dynamics matrix with ARD alpha = Gamma(1e-5, 1e-5, plates=(D, ), name='alpha') A = GaussianARD(0, alpha, shape=(D, ), plates=(D, ), plotter=bpplt.GaussianHintonPlotter(rows=0, cols=1, scale=0), name='A') A.initialize_from_value(np.identity(D)) # Latent states with dynamics X = GaussianMarkovChain( np.zeros(D), # mean of x0 1e-3 * np.identity(D), # prec of x0 A, # dynamics np.ones(D), # innovation n=N, # time instances plotter=bpplt.GaussianMarkovChainPlotter(scale=2), name='X') X.initialize_from_value(np.random.randn(N, D)) # Mixing matrix from latent space to observation space using ARD gamma = Gamma(1e-5, 1e-5, plates=(D, ), name='gamma') gamma.initialize_from_value(1e-2 * np.ones(D)) C = GaussianARD(0, gamma, shape=(D, ), plates=(M, 1), plotter=bpplt.GaussianHintonPlotter(rows=0, cols=2, scale=0), name='C') C.initialize_from_value(np.random.randn(M, 1, D)) # Observation noise tau = Gamma(1e-5, 1e-5, name='tau') tau.initialize_from_value(1e2) # Underlying noiseless function F = SumMultiply('i,i', C, X, name='F') # Noisy observations Y = GaussianARD(F, tau, name='Y') Q = VB(Y, F, C, gamma, X, A, alpha, tau, C) return Q
def fit(self, X, y): self.weights = GaussianARD(0, 1e-6, shape=(X.shape[-1], )) y_mean = SumMultiply('i,i', self.weights, X) precision = Gamma(1, .1) y_obs = GaussianARD(y_mean, precision) y_obs.observe(y) Q = VB(y_obs, self.weights, precision) Q.update(repeat=self.n_iter, tol=self.tolerance, verbose=False)
def fit(self, X, y): self._init_weights() # self.cost, # self.myopic_voc(action, state), # self.vpi_action(action, state), # self.vpi(state), # self.expected_term_reward(state) self.tau = Gamma(self.prior_a, self.prior_b) F = SumMultiply('i,i', self.weights, X) y_obs = GaussianARD(F, self.tau) y_obs.observe(y) Q = VB(y_obs, self.weights) Q.update(repeat=10, tol=1e-4, verbose=False)
def pca(): np.random.seed(41) M = 10 N = 3000 D = 5 # Construct the PCA model alpha = Gamma(1e-3, 1e-3, plates=(D, ), name='alpha') W = GaussianARD(0, alpha, plates=(M, 1), shape=(D, ), name='W') X = GaussianARD(0, 1, plates=(1, N), shape=(D, ), name='X') tau = Gamma(1e-3, 1e-3, name='tau') W.initialize_from_random() F = SumMultiply('d,d->', W, X) Y = GaussianARD(F, tau, name='Y') # Observe data data = np.sum(np.random.randn(M, 1, D - 1) * np.random.randn(1, N, D - 1), axis=-1) + 1e-1 * np.random.randn(M, N) Y.observe(data) # Initialize VB engine Q = VB(Y, X, W, alpha, tau) # Take one update step (so phi is ok) Q.update(repeat=1) Q.save() # Run VB-EM Q.update(repeat=200) bpplt.pyplot.plot(np.cumsum(Q.cputime), Q.L, 'k-') # Restore the state Q.load() # Run Riemannian conjugate gradient #Q.optimize(X, alpha, maxiter=100, collapsed=[W, tau]) Q.optimize(W, tau, maxiter=100, collapsed=[X, alpha]) bpplt.pyplot.plot(np.cumsum(Q.cputime), Q.L, 'r:') bpplt.pyplot.show()
import numpy numpy.random.seed(1) M = 20 N = 100 import numpy as np x = np.random.randn(N, 2) w = np.random.randn(M, 2) f = np.einsum('ik,jk->ij', w, x) y = f + 0.1 * np.random.randn(M, N) D = 10 from bayespy.nodes import GaussianARD, Gamma, SumMultiply X = GaussianARD(0, 1, plates=(1, N), shape=(D, )) alpha = Gamma(1e-5, 1e-5, plates=(D, )) C = GaussianARD(0, alpha, plates=(M, 1), shape=(D, )) F = SumMultiply('d,d->', X, C) tau = Gamma(1e-5, 1e-5) Y = GaussianARD(F, tau) Y.observe(y) from bayespy.inference import VB Q = VB(Y, X, C, alpha, tau) C.initialize_from_random() from bayespy.inference.vmp.transformations import RotateGaussianARD rot_X = RotateGaussianARD(X) rot_C = RotateGaussianARD(C, alpha) from bayespy.inference.vmp.transformations import RotationOptimizer R = RotationOptimizer(rot_X, rot_C, D) Q.set_callback(R.rotate) Q.update(repeat=1000) import bayespy.plot as bpplt bpplt.plot(F) bpplt.plot(f, color='r', marker='x', linestyle='None')
for xx in x2: if firstline: y = np.mean(xx) firstline = False continue y = np.vstack([y, (k * np.mean(xx) + c + s * np.random.randn(1))]) y = y.reshape(y.shape[0], ) X = x2.reshape(x2.shape[0], 1) from bayespy.nodes import GaussianARD B = GaussianARD(0, 1e-6, shape=(X.shape[1], )) from bayespy.nodes import SumMultiply F = SumMultiply('i,i', B, X) from bayespy.nodes import Gamma tau = Gamma(1e-3, 1e-3) Y = GaussianARD(F, tau) Y.observe(y) from bayespy.inference import VB Q = VB(Y, B, tau) #Q.update(repeat=100990) distribution = [] result = [] distribution = F.get_moments() for min_val, max_val in zip(distribution[0], distribution[1]): #mean = [] mean = (min_val + max_val) / 2 result.append(mean)
def model(M=20, N=100, D=10, K=3): """ Construct the linear state-space model with switching dynamics. """ # # Switching dynamics (HMM) # # Prior for initial state probabilities rho = Dirichlet(1e-3 * np.ones(K), name='rho') # Prior for state transition probabilities V = Dirichlet(1e-3 * np.ones(K), plates=(K, ), name='V') v = 10 * np.identity(K) + 1 * np.ones((K, K)) v /= np.sum(v, axis=-1, keepdims=True) V.initialize_from_value(v) # Hidden states (with unknown initial state probabilities and state # transition probabilities) Z = CategoricalMarkovChain(rho, V, states=N - 1, name='Z', plotter=bpplt.CategoricalMarkovChainPlotter(), initialize=False) Z.u[0] = np.random.dirichlet(np.ones(K)) Z.u[1] = np.reshape( np.random.dirichlet(0.5 * np.ones(K * K), size=(N - 2)), (N - 2, K, K)) # # Linear state-space models # # Dynamics matrix with ARD # (K,D) x () alpha = Gamma(1e-5, 1e-5, plates=(K, 1, D), name='alpha') # (K,1,1,D) x (D) A = GaussianARD(0, alpha, shape=(D, ), plates=(K, D), name='A', plotter=bpplt.GaussianHintonPlotter()) A.initialize_from_value( np.identity(D) * np.ones((K, D, D)) + 0.1 * np.random.randn(K, D, D)) # Latent states with dynamics # (K,1) x (N,D) X = SwitchingGaussianMarkovChain( np.zeros(D), # mean of x0 1e-3 * np.identity(D), # prec of x0 A, # dynamics Z, # dynamics selection np.ones(D), # innovation n=N, # time instances name='X', plotter=bpplt.GaussianMarkovChainPlotter()) X.initialize_from_value(10 * np.random.randn(N, D)) # Mixing matrix from latent space to observation space using ARD # (K,1,1,D) x () gamma = Gamma(1e-5, 1e-5, plates=(D, ), name='gamma') # (K,M,1) x (D) C = GaussianARD(0, gamma, shape=(D, ), plates=(M, 1), name='C', plotter=bpplt.GaussianHintonPlotter(rows=-3, cols=-1)) C.initialize_from_value(np.random.randn(M, 1, D)) # Underlying noiseless function # (K,M,N) x () F = SumMultiply('i,i', C, X, name='F') # # Mixing the models # # Observation noise tau = Gamma(1e-5, 1e-5, name='tau') tau.initialize_from_value(1e2) # Emission/observation distribution Y = GaussianARD(F, tau, name='Y') Q = VB(Y, F, Z, rho, V, C, gamma, X, A, alpha, tau) return Q
def model(M, N, D, K): """ Construct the linear state-space model with time-varying dynamics For reference, see the following publication: (TODO) """ # # The model block for the latent mixing weight process # # Dynamics matrix with ARD # beta : (K) x () beta = Gamma(1e-5, 1e-5, plates=(K, ), name='beta') # B : (K) x (K) B = GaussianARD(np.identity(K), beta, shape=(K, ), plates=(K, ), name='B', plotter=bpplt.GaussianHintonPlotter(rows=0, cols=1, scale=0), initialize=False) B.initialize_from_value(np.identity(K)) # Mixing weight process, that is, the weights in the linear combination of # state dynamics matrices # S : () x (N,K) S = GaussianMarkovChain(np.ones(K), 1e-6 * np.identity(K), B, np.ones(K), n=N, name='S', plotter=bpplt.GaussianMarkovChainPlotter(scale=2), initialize=False) s = 10 * np.random.randn(N, K) s[:, 0] = 10 S.initialize_from_value(s) # # The model block for the latent states # # Projection matrix of the dynamics matrix # alpha : (K) x () alpha = Gamma(1e-5, 1e-5, plates=(D, K), name='alpha') alpha.initialize_from_value(1 * np.ones((D, K))) # A : (D) x (D,K) A = GaussianARD(0, alpha, shape=(D, K), plates=(D, ), name='A', plotter=bpplt.GaussianHintonPlotter(rows=0, cols=1, scale=0), initialize=False) # Initialize S and A such that A*S is almost an identity matrix a = np.zeros((D, D, K)) a[np.arange(D), np.arange(D), np.zeros(D, dtype=int)] = 1 a[:, :, 0] = np.identity(D) / s[0, 0] a[:, :, 1:] = 0.1 / s[0, 0] * np.random.randn(D, D, K - 1) A.initialize_from_value(a) # Latent states with dynamics # X : () x (N,D) X = VaryingGaussianMarkovChain( np.zeros(D), # mean of x0 1e-3 * np.identity(D), # prec of x0 A, # dynamics matrices S._convert(GaussianMoments)[1:], # temporal weights np.ones(D), # innovation n=N, # time instances name='X', plotter=bpplt.GaussianMarkovChainPlotter(scale=2), initialize=False) X.initialize_from_value(np.random.randn(N, D)) # # The model block for observations # # Mixing matrix from latent space to observation space using ARD # gamma : (D) x () gamma = Gamma(1e-5, 1e-5, plates=(D, ), name='gamma') gamma.initialize_from_value(1e-2 * np.ones(D)) # C : (M,1) x (D) C = GaussianARD(0, gamma, shape=(D, ), plates=(M, 1), name='C', plotter=bpplt.GaussianHintonPlotter(rows=0, cols=2, scale=0)) C.initialize_from_value(np.random.randn(M, 1, D)) # Noiseless process # F : (M,N) x () F = SumMultiply('d,d', C, X, name='F') # Observation noise # tau : () x () tau = Gamma(1e-5, 1e-5, name='tau') tau.initialize_from_value(1e2) # Observations # Y: (M,N) x () Y = GaussianARD(F, tau, name='Y') # Construct inference machine Q = VB(Y, F, C, gamma, X, A, alpha, tau, S, B, beta) return Q