def s_variance(self, x): """Gaussian Process variance at points x""" K, y, var_y, N = self.kyn() rK = psd(K + var_y * tensor.eye(N)) K_x = self.K_fn(self.x, x) var_x = 1 - diag(dots(K_x.T, matrix_inverse(rK), K_x)) if var_x.dtype != self.dtype: raise TypeError('var_x dtype', var_x.dtype) return var_x
def s_mean(self, x): """Gaussian Process mean at points x""" K, y, var_y, N = self.kyn() rK = psd(K + var_y * tensor.eye(N)) alpha = tensor.dot(matrix_inverse(rK), y) K_x = self.K_fn(self.x, x) y_x = tensor.dot(alpha, K_x) if y_x.dtype != self.dtype: raise TypeError('y_x dtype', y_x.dtype) return y_x
def s_nll(self): """ Marginal negative log likelihood of model :note: See RW.pdf page 37, Eq. 2.30. """ K, y, var_y, N = self.kyn() rK = psd(K + var_y * tensor.eye(N)) nll = (0.5 * dots(y, matrix_inverse(rK), y) + 0.5 * tensor.log(det(rK)) + N / 2.0 * tensor.log(2 * numpy.pi)) if nll.dtype != self.dtype: raise TypeError('nll dtype', nll.dtype) return nll
def s_deg_of_freedom(self): """ Degrees of freedom aka "effective number of parameters" of kernel smoother. Defined pg. 25 of Rasmussen & Williams. """ K, y, var_y, N = self.kyn() rK = psd(K + var_y * tensor.eye(N)) dof = trace(tensor.dot(K, matrix_inverse(rK))) if dof.dtype != self.dtype: raise TypeError('dof dtype', dof.dtype) return dof
def s_nll(K, y, var_y, prior_var): """ Marginal negative log likelihood of model K - gram matrix (matrix-like) y - the training targets (vector-like) var_y - the variance of uncertainty about y (vector-like) :note: See RW.pdf page 37, Eq. 2.30. """ n = y.shape[0] rK = psd(prior_var * K + var_y * TT.eye(n)) fit = .5 * dots(y, matrix_inverse(rK), y) complexity = 0.5 * TT.log(det(rK)) normalization = n / 2.0 * TT.log(2 * np.pi) nll = fit + complexity + normalization return nll
def s_variance(K, y, var_y, prior_var, K_new, var_min): rK = psd(prior_var * K + var_y * TT.eye(y.shape[0])) L = cholesky(rK) v = dots(matrix_inverse(L), prior_var * K_new) var_x = TT.maximum(prior_var - (v ** 2).sum(axis=0), var_min) return var_x
def s_mean(K, y, var_y, prior_var, K_new): rK = psd(prior_var * K + var_y * TT.eye(y.shape[0])) alpha = TT.dot(matrix_inverse(rK), y) y_x = TT.dot(alpha, prior_var * K_new) return y_x