Exemplo n.º 1
0
 def __init__(self,
              Y,
              input_dim,
              X=None,
              kernel=None,
              init='PCA',
              num_inducing=10):
     if X is None:
         from ..util.initialization import initialize_latent
         X, fracs = initialize_latent(init, input_dim, Y)
     SparseGPRegression.__init__(self,
                                 X,
                                 Y,
                                 kernel=kernel,
                                 num_inducing=num_inducing)
Exemplo n.º 2
0
 def log_likelihood(self):
     return SparseGPRegression.log_likelihood(self)
Exemplo n.º 3
0
 def _set_params(self, x):
     self.X = x[:self.X.size].reshape(self.num_data, self.input_dim).copy()
     SparseGPRegression._set_params(self, x[self.X.size:])
Exemplo n.º 4
0
 def _get_params(self):
     return np.hstack((self.X.flatten(), SparseGPRegression._get_params(self)))
Exemplo n.º 5
0
 def _get_param_names(self):
     return (sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], [])
             + SparseGPRegression._get_param_names(self))
Exemplo n.º 6
0
 def __init__(self, Y, input_dim, kernel=None, init='PCA', num_inducing=10):
     X = self.initialise_latent(init, input_dim, Y)
     SparseGPRegression.__init__(self, X, Y, kernel=kernel, num_inducing=num_inducing)
Exemplo n.º 7
0
 def __init__(self, Y, input_dim, X=None, kernel=None, init='PCA', num_inducing=10):
     if X is None:
         from ..util.initialization import initialize_latent
         X, fracs = initialize_latent(init, input_dim, Y)
     SparseGPRegression.__init__(self, X, Y, kernel=kernel, num_inducing=num_inducing)
Exemplo n.º 8
0
 def _log_likelihood_gradients(self):
     return np.hstack((self.dL_dX().flatten(), SparseGPRegression._log_likelihood_gradients(self)))
Exemplo n.º 9
0
 def _set_params(self, x):
     self.X = x[:self.X.size].reshape(self.num_data, self.input_dim).copy()
     SparseGPRegression._set_params(self, x[self.X.size:])
Exemplo n.º 10
0
 def log_likelihood(self):
     return SparseGPRegression.log_likelihood(self)
Exemplo n.º 11
0
 def _get_params(self):
     return np.hstack((self.X.flatten(), SparseGPRegression._get_params(self)))
Exemplo n.º 12
0
 def _get_param_names(self):
     return (sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], [])
             + SparseGPRegression._get_param_names(self))
Exemplo n.º 13
0
 def __init__(self, Y, input_dim, kernel=None, init='PCA', num_inducing=10):
     X = self.initialise_latent(init, input_dim, Y)
     SparseGPRegression.__init__(self, X, Y, kernel=kernel, num_inducing=num_inducing)
     self.ensure_default_constraints()
Exemplo n.º 14
0
 def _log_likelihood_gradients(self):
     return np.hstack((self.dL_dX().flatten(), SparseGPRegression._log_likelihood_gradients(self)))
Exemplo n.º 15
0
 def plot(self):
     GPLVM.plot(self)
     # passing Z without a small amout of jitter will induce the white kernel where we don;t want it!
     mu, var, upper, lower = SparseGPRegression.predict(self, self.Z + np.random.randn(*self.Z.shape) * 0.0001)
     pb.plot(mu[:, 0] , mu[:, 1], 'ko')
Exemplo n.º 16
0
 def plot(self):
     GPLVM.plot(self)
     # passing Z without a small amout of jitter will induce the white kernel where we don;t want it!
     mu, var, upper, lower = SparseGPRegression.predict(self, self.Z + np.random.randn(*self.Z.shape) * 0.0001)
     pb.plot(mu[:, 0] , mu[:, 1], 'ko')
Exemplo n.º 17
0
 def model_builder(X, Y, kernel):
     m = SparseGPRegression(X, Y, kernel=kernel)
     m.Z.fix()
     return m