Пример #1
0
  def E_step(self, Data, LocalP ):
    GroupIDs = Data['GroupIDs']
    X = Data['X']
    N,D = X.shape
    assert self.D == D

    # Create lpr : N x K matrix
    #   where lpr[n,k] =def= log r[n,k], as in Bishop PRML eq 10.67
    lpr = np.empty( (N, self.K) )

    # LIKELIHOOD Terms
    for k in range(self.K):
      lpr[:,k] = -0.5*self.qObs[k].dF*self.qObs[k].dist_mahalanobis( X ) \
                 -0.5*D/self.qObs[k].kappa
    lpr += 0.5*self.logdetLam

    # PRIOR Terms
    for gg in xrange( len(GroupIDs) ):
      lpr[ GroupIDs[gg] ] += LocalP['Elogw_perGroup'][gg]

    lprSUM = logsumexp(lpr, axis=1)
    resp   = np.exp(lpr - lprSUM[:, np.newaxis])
    resp   /= resp.sum( axis=1)[:,np.newaxis] # row normalize

    return resp
Пример #2
0
    def E_step(self, Data, LocalP):
        GroupIDs = Data['GroupIDs']
        X = Data['X']
        N, D = X.shape
        assert self.D == D

        # Create lpr : N x K matrix
        #   where lpr[n,k] =def= log r[n,k], as in Bishop PRML eq 10.67
        lpr = np.empty((N, self.K))

        # LIKELIHOOD Terms
        for k in range(self.K):
            lpr[:,k] = -0.5*self.qObs[k].dF*self.qObs[k].dist_mahalanobis( X ) \
                       -0.5*D/self.qObs[k].kappa
        lpr += 0.5 * self.logdetLam

        # PRIOR Terms
        for gg in xrange(len(GroupIDs)):
            lpr[GroupIDs[gg]] += LocalP['Elogw_perGroup'][gg]

        lprSUM = logsumexp(lpr, axis=1)
        resp = np.exp(lpr - lprSUM[:, np.newaxis])
        resp /= resp.sum(axis=1)[:, np.newaxis]  # row normalize

        return resp
Пример #3
0
 def E_step( self, X):
   lpr = np.log( self.gmm.w ) + self.gmm.calc_soft_evidence_mat( X )
   lprPerItem = logsumexp( lpr, axis=1 )
   resp   = np.exp( lpr-lprPerItem[:,np.newaxis] ) 
   if self.doVerify:
     if not np.allclose( np.sum(resp,axis=1), 1.0 ):
       np.set_printoptions( linewidth=120, precision=3, suppress=True )      
       raise Exception, 'Responsibilities do not sum to one!'
   logEvidence = lprPerItem.sum()
   return resp, logEvidence
Пример #4
0
 def E_step(self, X):
     lpr = np.log(self.gmm.w) + self.gmm.calc_soft_evidence_mat(X)
     lprPerItem = logsumexp(lpr, axis=1)
     resp = np.exp(lpr - lprPerItem[:, np.newaxis])
     if self.doVerify:
         if not np.allclose(np.sum(resp, axis=1), 1.0):
             np.set_printoptions(linewidth=120, precision=3, suppress=True)
             raise Exception, 'Responsibilities do not sum to one!'
     logEvidence = lprPerItem.sum()
     return resp, logEvidence
    def E_step(self, Xchunk):
        '''Expectation step

       Returns
       -------
          resp : NxK matrix, resp[n,k] = Pr(Z[n]=k | X[n],mu[k],Sigma[k])
    '''
        lpr = np.log(self.gmm.w) + self.gmm.calc_soft_evidence_mat(Xchunk)
        lprPerItem = logsumexp(lpr, axis=1)
        logEvidence = lprPerItem.sum()
        resp = np.exp(lpr - lprPerItem[:, np.newaxis])
        return resp, logEvidence
Пример #6
0
  def E_step( self, Xchunk ):
    '''Expectation step

       Returns
       -------
          resp : NxK matrix, resp[n,k] = Pr(Z[n]=k | X[n],mu[k],Sigma[k])
    '''
    lpr = np.log( self.gmm.w ) + self.gmm.calc_soft_evidence_mat( Xchunk )
    lprPerItem = logsumexp(lpr, axis=1)
    logEvidence = lprPerItem.sum()
    resp   = np.exp(lpr - lprPerItem[:, np.newaxis])
    return resp, logEvidence
Пример #7
0
  def calc_evidence(self, X):
    """Compute evidence for given data X under current model
       Parameters:
         X : array_like, N x D

       Returns:  
         evBound : scalar real
              =  \sum_n log( \sum_k w_k * N( x_n | \mu_k, \sigma_k )
                  see Bishop PRML eq. 9.28 (p. 439)
    """
    lpr = np.log( self.w ) + self.calc_soft_evidence_mat( X )
    evidenceBound = logsumexp(lpr, axis=1).sum()
    return evidenceBound
Пример #8
0
  def calc_posterior_prob_mat(self, X):
    """Compute posterior probability for hidden component assignment Z
          under current model parameters mu,sigma given data X
       Parameters:
         X : array_like, N x D

       Returns:  
         resp : N x K
              =  Pr( z_n = k | x_n, \mu_k, \Sigma_k )
                  see Bishop PRML eq. 9.23 (p. 438)
    """
    lpr = np.log( self.w ) + self.calc_soft_evidence_mat( X )
    lprSUM = logsumexp(lpr, axis=1)
    return np.exp(lpr - lprSUM[:, np.newaxis])
Пример #9
0
 def E_step(self, X):
     N, D = X.shape
     assert self.D == D
     # Create lpr : N x K matrix
     #   where lpr[n,k] =def= log r[n,k], as in Bishop PRML eq 10.67
     lpr = np.empty((N, self.K))
     for k in range(self.K):
         # calculate the ( x_n - m )'*W*(x_n-m) term
         lpr[:,k] = -0.5*self.qObs[k].dF*self.qObs[k].dist_mahalanobis( X ) \
                    -0.5*D/self.qObs[k].kappa
     lpr += self.Elogw
     lpr += 0.5 * self.logdetLam
     lprSUM = logsumexp(lpr, axis=1)
     resp = np.exp(lpr - lprSUM[:, np.newaxis])
     resp /= resp.sum(axis=1)[:, np.newaxis]  # row normalize
     return resp
Пример #10
0
 def E_step(self, X):
   N,D = X.shape
   assert self.D == D
   # Create lpr : N x K matrix
   #   where lpr[n,k] =def= log r[n,k], as in Bishop PRML eq 10.67
   lpr = np.empty( (N, self.K) )
   for k in range(self.K):
     # calculate the ( x_n - m )'*W*(x_n-m) term
     lpr[:,k] = -0.5*self.qObs[k].dF*self.qObs[k].dist_mahalanobis( X ) \
                -0.5*D/self.qObs[k].kappa
   lpr += self.Elogw
   lpr += 0.5*self.logdetLam
   lprSUM = logsumexp(lpr, axis=1)
   resp   = np.exp(lpr - lprSUM[:, np.newaxis])
   resp   /= resp.sum( axis=1)[:,np.newaxis] # row normalize
   return resp