def _log_likelihood_gradients(self): return np.hstack((self.dL_dX().flatten(), SparseGPRegression._log_likelihood_gradients(self)))
def plot(self): GPLVM.plot(self) # passing Z without a small amout of jitter will induce the white kernel where we don;t want it! mu, var, upper, lower = SparseGPRegression.predict(self, self.Z + np.random.randn(*self.Z.shape) * 0.0001) pb.plot(mu[:, 0] , mu[:, 1], 'ko')
def _set_params(self, x): self.X = x[:self.X.size].reshape(self.num_data, self.input_dim).copy() SparseGPRegression._set_params(self, x[self.X.size:])
def log_likelihood(self): return SparseGPRegression.log_likelihood(self)
def _get_params(self): return np.hstack((self.X.flatten(), SparseGPRegression._get_params(self)))
def _get_param_names(self): return (sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], []) + SparseGPRegression._get_param_names(self))
def setstate(self, state): return SparseGPRegression.setstate(self, state)
def getstate(self): return SparseGPRegression.getstate(self)
def __init__(self, Y, input_dim, kernel=None, init='PCA', num_inducing=10): X = initialise_latent(init, input_dim, Y) SparseGPRegression.__init__(self, X, Y, kernel=kernel, num_inducing=num_inducing) self.ensure_default_constraints()