Ejemplo n.º 1
0
 def s_variance(self, x):
     """Gaussian Process variance at points x"""
     K, y, var_y, N = self.kyn()
     rK = psd(K + var_y * tensor.eye(N))
     K_x = self.K_fn(self.x, x)
     var_x = 1 - diag(dots(K_x.T, matrix_inverse(rK), K_x))
     if var_x.dtype != self.dtype:
         raise TypeError('var_x dtype', var_x.dtype)
     return var_x
Ejemplo n.º 2
0
 def s_variance(self, x):
     """Gaussian Process variance at points x"""
     K, y, var_y, N = self.kyn()
     rK = psd(K + var_y * tensor.eye(N))
     K_x = self.K_fn(self.x, x)
     var_x = 1 - diag(dots(K_x.T, matrix_inverse(rK), K_x))
     if var_x.dtype != self.dtype:
         raise TypeError('var_x dtype', var_x.dtype)
     return var_x
Ejemplo n.º 3
0
    def s_mean(self, x):
        """Gaussian Process mean at points x"""
        K, y, var_y, N = self.kyn()
        rK = psd(K + var_y * tensor.eye(N))
        alpha = tensor.dot(matrix_inverse(rK), y)

        K_x = self.K_fn(self.x, x)
        y_x = tensor.dot(alpha, K_x)
        if y_x.dtype != self.dtype:
            raise TypeError('y_x dtype', y_x.dtype)
        return y_x
Ejemplo n.º 4
0
    def s_mean(self, x):
        """Gaussian Process mean at points x"""
        K, y, var_y, N = self.kyn()
        rK = psd(K + var_y * tensor.eye(N))
        alpha = tensor.dot(matrix_inverse(rK), y)

        K_x = self.K_fn(self.x, x)
        y_x = tensor.dot(alpha, K_x)
        if y_x.dtype != self.dtype:
            raise TypeError('y_x dtype', y_x.dtype)
        return y_x
Ejemplo n.º 5
0
    def s_nll(self):
        """ Marginal negative log likelihood of model

        :note: See RW.pdf page 37, Eq. 2.30.
        """
        K, y, var_y, N = self.kyn()
        rK = psd(K + var_y * tensor.eye(N))
        nll = (0.5 * dots(y, matrix_inverse(rK), y) +
               0.5 * tensor.log(det(rK)) + N / 2.0 * tensor.log(2 * numpy.pi))
        if nll.dtype != self.dtype:
            raise TypeError('nll dtype', nll.dtype)
        return nll
Ejemplo n.º 6
0
    def s_deg_of_freedom(self):
        """
        Degrees of freedom aka "effective number of parameters"
        of kernel smoother.

        Defined pg. 25 of Rasmussen & Williams.
        """
        K, y, var_y, N = self.kyn()
        rK = psd(K + var_y * tensor.eye(N))
        dof = trace(tensor.dot(K, matrix_inverse(rK)))
        if dof.dtype != self.dtype:
            raise TypeError('dof dtype', dof.dtype)
        return dof
Ejemplo n.º 7
0
    def s_nll(self):
        """ Marginal negative log likelihood of model

        :note: See RW.pdf page 37, Eq. 2.30.
        """
        K, y, var_y, N = self.kyn()
        rK = psd(K + var_y * tensor.eye(N))
        nll = (0.5 * dots(y, matrix_inverse(rK), y)
                + 0.5 * tensor.log(det(rK))
                + N / 2.0 * tensor.log(2 * numpy.pi))
        if nll.dtype != self.dtype:
            raise TypeError('nll dtype', nll.dtype)
        return nll
Ejemplo n.º 8
0
    def s_deg_of_freedom(self):
        """
        Degrees of freedom aka "effective number of parameters"
        of kernel smoother.

        Defined pg. 25 of Rasmussen & Williams.
        """
        K, y, var_y, N = self.kyn()
        rK = psd(K + var_y * tensor.eye(N))
        dof = trace(tensor.dot(K, matrix_inverse(rK)))
        if dof.dtype != self.dtype:
            raise TypeError('dof dtype', dof.dtype)
        return dof
Ejemplo n.º 9
0
def s_nll(K, y, var_y, prior_var):
    """
    Marginal negative log likelihood of model

    K - gram matrix (matrix-like)
    y - the training targets (vector-like)
    var_y - the variance of uncertainty about y (vector-like)

    :note: See RW.pdf page 37, Eq. 2.30.

    """

    n = y.shape[0]
    rK = psd(prior_var * K + var_y * TT.eye(n))

    fit = .5 * dots(y, matrix_inverse(rK), y)
    complexity = 0.5 * TT.log(det(rK))
    normalization = n / 2.0 * TT.log(2 * np.pi)
    nll = fit + complexity + normalization
    return nll
Ejemplo n.º 10
0
def s_variance(K, y, var_y, prior_var, K_new, var_min):
    rK = psd(prior_var * K + var_y * TT.eye(y.shape[0]))
    L = cholesky(rK)
    v = dots(matrix_inverse(L), prior_var * K_new)
    var_x = TT.maximum(prior_var - (v ** 2).sum(axis=0), var_min)
    return var_x
Ejemplo n.º 11
0
def s_mean(K, y, var_y, prior_var, K_new):
    rK = psd(prior_var * K + var_y * TT.eye(y.shape[0]))
    alpha = TT.dot(matrix_inverse(rK), y)
    y_x = TT.dot(alpha, prior_var * K_new)
    return y_x