예제 #1
0
 def __init__(self,
              Y,
              input_dim,
              X=None,
              kernel=None,
              init='PCA',
              num_inducing=10):
     if X is None:
         from ..util.initialization import initialize_latent
         X, fracs = initialize_latent(init, input_dim, Y)
     SparseGPRegression.__init__(self,
                                 X,
                                 Y,
                                 kernel=kernel,
                                 num_inducing=num_inducing)
예제 #2
0
 def log_likelihood(self):
     return SparseGPRegression.log_likelihood(self)
예제 #3
0
 def _set_params(self, x):
     self.X = x[:self.X.size].reshape(self.num_data, self.input_dim).copy()
     SparseGPRegression._set_params(self, x[self.X.size:])
예제 #4
0
 def _get_params(self):
     return np.hstack((self.X.flatten(), SparseGPRegression._get_params(self)))
예제 #5
0
 def _get_param_names(self):
     return (sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], [])
             + SparseGPRegression._get_param_names(self))
예제 #6
0
 def __init__(self, Y, input_dim, kernel=None, init='PCA', num_inducing=10):
     X = self.initialise_latent(init, input_dim, Y)
     SparseGPRegression.__init__(self, X, Y, kernel=kernel, num_inducing=num_inducing)
예제 #7
0
 def __init__(self, Y, input_dim, X=None, kernel=None, init='PCA', num_inducing=10):
     if X is None:
         from ..util.initialization import initialize_latent
         X, fracs = initialize_latent(init, input_dim, Y)
     SparseGPRegression.__init__(self, X, Y, kernel=kernel, num_inducing=num_inducing)
예제 #8
0
파일: sparse_gplvm.py 프로젝트: jaidevd/GPy
 def _log_likelihood_gradients(self):
     return np.hstack((self.dL_dX().flatten(), SparseGPRegression._log_likelihood_gradients(self)))
예제 #9
0
파일: sparse_gplvm.py 프로젝트: jaidevd/GPy
 def _set_params(self, x):
     self.X = x[:self.X.size].reshape(self.num_data, self.input_dim).copy()
     SparseGPRegression._set_params(self, x[self.X.size:])
예제 #10
0
파일: sparse_gplvm.py 프로젝트: jaidevd/GPy
 def log_likelihood(self):
     return SparseGPRegression.log_likelihood(self)
예제 #11
0
파일: sparse_gplvm.py 프로젝트: jaidevd/GPy
 def _get_params(self):
     return np.hstack((self.X.flatten(), SparseGPRegression._get_params(self)))
예제 #12
0
파일: sparse_gplvm.py 프로젝트: jaidevd/GPy
 def _get_param_names(self):
     return (sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], [])
             + SparseGPRegression._get_param_names(self))
예제 #13
0
파일: sparse_gplvm.py 프로젝트: jaidevd/GPy
 def __init__(self, Y, input_dim, kernel=None, init='PCA', num_inducing=10):
     X = self.initialise_latent(init, input_dim, Y)
     SparseGPRegression.__init__(self, X, Y, kernel=kernel, num_inducing=num_inducing)
     self.ensure_default_constraints()
예제 #14
0
 def _log_likelihood_gradients(self):
     return np.hstack((self.dL_dX().flatten(), SparseGPRegression._log_likelihood_gradients(self)))
예제 #15
0
파일: sparse_gplvm.py 프로젝트: jaidevd/GPy
 def plot(self):
     GPLVM.plot(self)
     # passing Z without a small amout of jitter will induce the white kernel where we don;t want it!
     mu, var, upper, lower = SparseGPRegression.predict(self, self.Z + np.random.randn(*self.Z.shape) * 0.0001)
     pb.plot(mu[:, 0] , mu[:, 1], 'ko')
예제 #16
0
 def plot(self):
     GPLVM.plot(self)
     # passing Z without a small amout of jitter will induce the white kernel where we don;t want it!
     mu, var, upper, lower = SparseGPRegression.predict(self, self.Z + np.random.randn(*self.Z.shape) * 0.0001)
     pb.plot(mu[:, 0] , mu[:, 1], 'ko')
예제 #17
0
 def model_builder(X, Y, kernel):
     m = SparseGPRegression(X, Y, kernel=kernel)
     m.Z.fix()
     return m