示例#1
0
 def logp(self, X):
     v = self.v
     p = self.p
     S = self.S
     Z = self.Z
     result = -Z + log(det(X)) * -(v + p + 1.) / 2. - trace(S.dot(matrix_inverse(X))) / 2.
     return ifelse(gt(v, p-1), result, self.invalid) 
示例#2
0
 def logp(self, X):
     v = self.v
     p = self.p
     Z = self.Z
     inv_S = self.inv_S 
     result = -Z + log(det(X)) * (v - p - 1) / 2. - trace(inv_S.dot(X)) / 2.
     return ifelse(gt(v, p-1), result, self.invalid) 
示例#3
0
    def logp(X):
        IVI = det(V)
        return bound(
            ((n - p - 1) * log(IVI) - trace(solve(V, X)) -
             n * p * log(
             2) - n * log(IVI) - 2 * multigammaln(p, n / 2)) / 2,

            n > p - 1)
示例#4
0
    def logp(X):
        IVI = det(V)
        return bound(
            ((n - p - 1) * log(IVI) - trace(matrix_inverse(V).dot(X)) -
             n * p * log(
             2) - n * log(IVI) - 2 * multigammaln(p, n / 2)) / 2,

            all(n > p - 1))
示例#5
0
    def s_deg_of_freedom(self):
        """
        Degrees of freedom aka "effective number of parameters"
        of kernel smoother.

        Defined pg. 25 of Rasmussen & Williams.
        """
        K, y, var_y, N = self.kyn()
        rK = psd(K + var_y * tensor.eye(N))
        dof = trace(tensor.dot(K, matrix_inverse(rK)))
        if dof.dtype != self.dtype:
            raise TypeError('dof dtype', dof.dtype)
        return dof
示例#6
0
    def s_deg_of_freedom(self):
        """
        Degrees of freedom aka "effective number of parameters"
        of kernel smoother.

        Defined pg. 25 of Rasmussen & Williams.
        """
        K, y, var_y, N = self.kyn()
        rK = psd(K + var_y * tensor.eye(N))
        dof = trace(tensor.dot(K, matrix_inverse(rK)))
        if dof.dtype != self.dtype:
            raise TypeError('dof dtype', dof.dtype)
        return dof
示例#7
0
    def logp(self, X):
        n = self.n
        p = self.p
        V = self.V

        IVI = det(V)

        return bound(
            ((n - p - 1) * log(IVI) - trace(matrix_inverse(V).dot(X)) -
             n * p * log(
             2) - n * log(IVI) - 2 * multigammaln(p, n / 2)) / 2,

             n > (p - 1))
示例#8
0
 def trace(self):
     from theano.sandbox.linalg import trace
     return trace(self)
示例#9
0
文件: var.py 项目: Donghuan/Theano
 def trace(self):
     from theano.sandbox.linalg import trace
     return trace(self)
示例#10
0
文件: niw.py 项目: afcarl/trmix
C2 = tt.dmatrix('C2')
v2 = tt.dscalar('v2')

logdet1 = tt.dscalar('logdet1')
logdet2 = tt.dscalar('logdet2')

# log-partition functions
A1 = log_partf(b1, s1, C1, v1)
A2 = log_partf(b2, s2, C2, v2)
A1s = log_partf(b1, s1, C1, v1, logdet1)
A2s = log_partf(b2, s2, C2, v2, logdet2)

# KL divergence
D_KL = A2s - A1s \
    + tt.dot((b1 - b2).T, tt.grad(A1, b1)) \
    + tl.trace(tt.dot(C1 - C2, tt.grad(A1, C1))) \
    + (s1 - s2) * tt.grad(A1, s1) \
    + (v1 - v2) * tt.grad(A1s, v1)


def KL_divergence(m1, s1, P1, v1, m2, s2, P2, v2):
    # natural parameters
    b1 = -2. * s1 * m1
    b2 = -2. * s2 * m2
    C1 = P1 + s1 * np.dot(m1, m1.T)
    C2 = P2 + s2 * np.dot(m2, m2.T)

    # precompute log-determinants
    logdet1 = np.linalg.slogdet(P1)[1]
    logdet2 = np.linalg.slogdet(P2)[1]
示例#11
0
 def logp(X):
     IVI = det(V)
     return bound(
         ((n - p - 1) * log(IVI) - trace(solve(V, X)) - n * p * log(2) -
          n * log(IVI) - 2 * multigammaln(p, n / 2)) / 2, n > p - 1)