Example #1
0
def test_BPlanner_makePlanAtBatch_someDisqualifiedForPrevFailures(K=10):
    SS = SuffStatBag(K=K)
    SS.setField('N', np.arange(K), dims='K')
    SSbatch = SS.copy()

    # Do the same test, while eliminating some uids
    MoveRecordsByUID = defaultdict(lambda: defaultdict(int))
    for uid in [0, 6, 9]:
        MoveRecordsByUID[uid]['b_nFail'] = 1
        MoveRecordsByUID[uid]['b_nFailRecent'] = 1
        MoveRecordsByUID[uid]['b_batchIDsWhoseProposalFailed'] = set([0])

    for b_minNumAtomsForTargetComp in [2, 5, K]:
        BArgs['b_minNumAtomsForTargetComp'] = b_minNumAtomsForTargetComp
        MovePlans = selectCompsForBirthAtCurrentBatch(
            SS=SS,
            SSbatch=SSbatch,
            MovePlans=dict(),
            MoveRecordsByUID=MoveRecordsByUID,
            **BArgs)
        nChosen = len(MovePlans['b_targetUIDs'])
        nFailPerUID = list()
        for uid in SS.uids:
            bIDs = MoveRecordsByUID[uid]['b_batchIDsWhoseProposalFailed']
            if isinstance(bIDs, set):
                nFailPerUID.append(len(bIDs))
            else:
                nFailPerUID.append(0)
        nFailPerUID = np.asarray(nFailPerUID)
        nExpected = np.sum(
            np.logical_and(SS.N >= b_minNumAtomsForTargetComp,
                           nFailPerUID < 1))
        assert nChosen == nExpected
Example #2
0
class TestMixModelEMUnifAlpha(object):
    def shortDescription(self):
        return None

    def setUp(self):
        '''
    Create a stupid simple case for making sure we're calculating things correctly
    '''
        self.alpha0 = 1.0
        self.allocM = MixModel('EM', dict(alpha0=self.alpha0))
        self.N = np.asarray([1., 2., 3, 4, 5.])
        self.SS = SuffStatBag(K=5, D=1)
        self.SS.setField('N', self.N, dims='K')
        self.resp = np.random.rand(100, 3)
        self.precompEntropy = np.sum(self.resp * np.log(self.resp), axis=0)

    def test_update_global_params_EM(self):
        self.allocM.update_global_params_EM(self.SS)
        wTrue = (self.N + self.alpha0 - 1.0)
        wTrue = wTrue / np.sum(wTrue)
        wEst = self.allocM.w
        print wTrue
        print wEst
        assert np.allclose(wTrue, wEst)

    def test_get_global_suff_stats(self):
        Data = bnpy.data.XData(np.random.randn(10, 1))
        SS = self.allocM.get_global_suff_stats(Data,
                                               dict(resp=self.resp),
                                               doPrecompEntropy=True)
        assert np.allclose(self.precompEntropy, SS.getELBOTerm('ElogqZ'))
        assert np.allclose(np.sum(self.resp, axis=0), SS.N)
Example #3
0
    def get_global_suff_stats(self, Data, LP, doPrecompEntropy=None, **kwargs):
        """ Calculate the sufficient statistics for global parameter updates
        Only adds stats relevant for this allocModel. 
        Other stats are added by the obsModel.
        
        Args
        -------
        Data : bnpy data object
        LP : local param dict with fields
              resp : Data.nObs x K array,
                       where resp[n,k] = posterior resp of comp k
        doPrecompEntropy : boolean flag
                      indicates whether to precompute ELBO terms in advance
                      used for memoized learning algorithms (moVB)

        Returns
        -------
        SS : SuffStats for K components, with field
              N : vector of length-K,
                   effective number of observations assigned to each comp
    """
        Nvec = np.sum(LP["resp"], axis=0)
        SS = SuffStatBag(K=Nvec.size, D=Data.dim)
        SS.setField("N", Nvec, dims=("K"))
        if doPrecompEntropy is not None:
            ElogqZ_vec = self.E_logqZ(LP)
            SS.setELBOTerm("ElogqZ", ElogqZ_vec, dims=("K"))
        return SS
class TestMixModelEMUnifAlpha(object):
  def shortDescription(self):
    return None

  def setUp(self):
    '''
    Create a stupid simple case for making sure we're calculating things correctly
    '''
    self.alpha0 = 1.0
    self.allocM = MixModel('EM', dict(alpha0=self.alpha0))
    self.N = np.asarray([1.,2.,3,4,5.])
    self.SS = SuffStatBag(K=5, D=1)
    self.SS.setField('N', self.N, dims='K')
    self.resp = np.random.rand(100,3)
    self.precompEntropy = np.sum(self.resp * np.log(self.resp), axis=0)
    
  def test_update_global_params_EM(self):
    self.allocM.update_global_params_EM(self.SS)
    wTrue = (self.N + self.alpha0 - 1.0)
    wTrue = wTrue / np.sum(wTrue)
    wEst = self.allocM.w
    print wTrue
    print wEst
    assert np.allclose(wTrue, wEst)
    
  def test_get_global_suff_stats(self):
    Data = bnpy.data.XData(np.random.randn(10,1))
    SS = self.allocM.get_global_suff_stats(Data, dict(resp=self.resp), doPrecompEntropy=True)
    assert np.allclose(self.precompEntropy, SS.getELBOTerm('ElogqZ'))
    assert np.allclose( np.sum(self.resp, axis=0), SS.N)
Example #5
0
class TestMixModelEMUnifGamma(object):
    def shortDescription(self):
        return None

    def setUp(self):
        ''' Create simple case to double-check calculations.
        '''
        self.gamma = 1.0
        self.allocM = FiniteMixtureModel('EM', dict(gamma=self.gamma))
        self.N = np.asarray([1., 2., 3, 4, 5.])
        self.SS = SuffStatBag(K=5, D=1)
        self.SS.setField('N', self.N, dims='K')
        self.resp = np.random.rand(100, 3)
        self.precompEntropy = -1 * np.sum(self.resp * np.log(self.resp),
                                          axis=0)

    def test_update_global_params_EM(self):
        K = self.N.size
        self.allocM.update_global_params_EM(self.SS)
        wTrue = (self.N + self.gamma / float(K) - 1.0)
        wTrue = wTrue / np.sum(wTrue)
        wEst = self.allocM.w
        print wTrue
        print wEst
        assert np.allclose(wTrue, wEst)

    def test_get_global_suff_stats(self):
        Data = bnpy.data.XData(np.random.randn(10, 1))
        SS = self.allocM.get_global_suff_stats(Data,
                                               dict(resp=self.resp),
                                               doPrecompEntropy=True)
        print self.precompEntropy
        print SS.getELBOTerm('Hresp')
        assert np.allclose(self.precompEntropy, SS.getELBOTerm('Hresp'))
        assert np.allclose(np.sum(self.resp, axis=0), SS.N)
Example #6
0
 def setUp(self):
     self.alpha0 = 2.0
     self.allocM = MixModel('EM', dict(alpha0=self.alpha0))
     self.N = np.asarray([1., 2., 3, 4, 5.])
     self.SS = SuffStatBag(K=5, D=1)
     self.SS.setField('N', self.N, dims='K')
     self.resp = np.random.rand(100, 3)
     self.precompEntropy = np.sum(self.resp * np.log(self.resp), axis=0)
class TestMixModelEMNonunifAlpha(TestMixModelEMUnifAlpha):
  def setUp(self):
    self.alpha0 = 2.0
    self.allocM = MixModel('EM', dict(alpha0=self.alpha0))
    self.N = np.asarray([1.,2.,3,4,5.])
    self.SS = SuffStatBag(K=5, D=1)
    self.SS.setField('N', self.N, dims='K')
    self.resp = np.random.rand(100,3)
    self.precompEntropy = np.sum(self.resp * np.log(self.resp), axis=0)
Example #8
0
 def setUp(self):
     self.gamma = 2.0
     self.allocM = FiniteMixtureModel('EM', dict(gamma=self.gamma))
     self.N = np.asarray([1., 2., 3, 4, 5.])
     self.SS = SuffStatBag(K=5, D=1)
     self.SS.setField('N', self.N, dims='K')
     self.resp = np.random.rand(100, 3)
     self.precompEntropy = -1 * np.sum(self.resp * np.log(self.resp),
                                       axis=0)
Example #9
0
 def setUp(self):
     ''' Create simple case to double-check calculations.
     '''
     self.gamma = 1.0
     self.allocM = FiniteMixtureModel('EM', dict(gamma=self.gamma))
     self.N = np.asarray([1., 2., 3, 4, 5.])
     self.SS = SuffStatBag(K=5, D=1)
     self.SS.setField('N', self.N, dims='K')
     self.resp = np.random.rand(100, 3)
     self.precompEntropy = -1 * np.sum(self.resp * np.log(self.resp),
                                       axis=0)
Example #10
0
def test_BPlanner_makePlanAtBatch_noPrevFailures(K=10):
	SS = SuffStatBag(K=K)
	SS.setField('N',  np.arange(K), dims='K')
	SSbatch = SS.copy()

	for b_minNumAtomsForTargetComp in [2, 5, K]:
		BArgs['b_minNumAtomsForTargetComp'] = b_minNumAtomsForTargetComp
		MovePlans = selectCompsForBirthAtCurrentBatch(
			SS=SS, SSbatch=SSbatch, MovePlans=dict(), **BArgs)
		nChosen = len(MovePlans['b_targetUIDs'])
		assert nChosen == np.sum(SS.N >= b_minNumAtomsForTargetComp)
Example #11
0
 def setUp(self):
     '''
 Create a stupid simple case for making sure we're calculating things correctly
 '''
     self.alpha0 = 1.0
     self.allocM = MixModel('EM', dict(alpha0=self.alpha0))
     self.N = np.asarray([1., 2., 3, 4, 5.])
     self.SS = SuffStatBag(K=5, D=1)
     self.SS.setField('N', self.N, dims='K')
     self.resp = np.random.rand(100, 3)
     self.precompEntropy = np.sum(self.resp * np.log(self.resp), axis=0)
Example #12
0
    def calcHardMergeGap(self, SS, kA, kB):
        ''' Calculate scalar improvement in ELBO for hard merge of comps kA, kB

        Does *not* include any entropy.

        Returns
        ---------
        L : scalar
        '''
        m_K = SS.K - 1
        m_SS = SuffStatBag(K=SS.K, D=0)
        m_SS.setField('StartStateCount', SS.StartStateCount.copy(), dims='K')
        m_SS.setField('TransStateCount',
                      SS.TransStateCount.copy(),
                      dims=('K', 'K'))
        m_SS.mergeComps(kA, kB)

        # Create candidate beta vector
        m_beta = StickBreakUtil.rho2beta(self.rho)
        m_beta[kA] += m_beta[kB]
        m_beta = np.delete(m_beta, kB, axis=0)

        # Create candidate rho and omega vectors
        m_rho = StickBreakUtil.beta2rho(m_beta, m_K)
        m_omega = np.delete(self.omega, kB)

        # Create candidate startTheta
        m_startTheta = self.startAlpha * m_beta.copy()
        m_startTheta[:m_K] += m_SS.StartStateCount

        # Create candidate transTheta
        m_transTheta = self.alpha * np.tile(m_beta, (m_K, 1))
        if self.kappa > 0:
            m_transTheta[:, :m_K] += self.kappa * np.eye(m_K)
        m_transTheta[:, :m_K] += m_SS.TransStateCount

        # Evaluate objective func. for both candidate and current model
        Lcur = calcELBO_LinearTerms(SS=SS,
                                    rho=self.rho,
                                    omega=self.omega,
                                    startTheta=self.startTheta,
                                    transTheta=self.transTheta,
                                    alpha=self.alpha,
                                    startAlpha=self.startAlpha,
                                    gamma=self.gamma,
                                    kappa=self.kappa)

        Lprop = calcELBO_LinearTerms(SS=m_SS,
                                     rho=m_rho,
                                     omega=m_omega,
                                     startTheta=m_startTheta,
                                     transTheta=m_transTheta,
                                     alpha=self.alpha,
                                     startAlpha=self.startAlpha,
                                     gamma=self.gamma,
                                     kappa=self.kappa)

        # Note: This gap relies on fact that all nonlinear terms are entropies,
        return Lprop - Lcur
Example #13
0
def test_BPlanner_makePlanAtBatch_someDQForPrevFailuresWithOtherBatches(K=20):
    print('')
    SS = SuffStatBag(K=K)
    SS.setField('N', np.arange(K), dims='K')
    SSbatch = SS.copy()

    # Select some subset of uids to be disqualified
    PRNG = np.random.RandomState(11)
    dqUIDs = PRNG.choice(K, size=3, replace=False)
    otherfailUIDs = PRNG.choice(K, size=3, replace=False)

    # Do the same test, while eliminating some uids
    MoveRecordsByUID = defaultdict(lambda: defaultdict(int))
    for uid in dqUIDs:
        MoveRecordsByUID[uid]['b_nFail'] = 1
        MoveRecordsByUID[uid]['b_nFailRecent'] = 1
        MoveRecordsByUID[uid]['b_batchIDsWhoseProposalFailed'] = set([0])
        print('PREV FAIL AT THIS BATCH: uid ', uid)
    for uid in otherfailUIDs:
        if uid in dqUIDs:
            continue
        MoveRecordsByUID[uid]['b_nFail'] = 1
        MoveRecordsByUID[uid]['b_nFailRecent'] = 1
        MoveRecordsByUID[uid]['b_batchIDsWhoseProposalFailed'] = set([1])
        print('PREV FAIL AT ANOTHER BATCH: uid ', uid)

    for b_minNumAtomsForTargetComp in [2, 5, 10, K]:
        BArgs['b_minNumAtomsForTargetComp'] = b_minNumAtomsForTargetComp
        MovePlans = selectCompsForBirthAtCurrentBatch(
            SS=SS,
            SSbatch=SSbatch,
            MovePlans=dict(),
            MoveRecordsByUID=MoveRecordsByUID,
            **BArgs)
        nChosen = len(MovePlans['b_targetUIDs'])
        nFailPerUID = list()
        for uid in SS.uids:
            bIDs = MoveRecordsByUID[uid]['b_batchIDsWhoseProposalFailed']
            if isinstance(bIDs, set) and 0 in bIDs:
                nFailPerUID.append(len(bIDs))
            else:
                nFailPerUID.append(0)
        nFailPerUID = np.asarray(nFailPerUID)
        nExpected = np.sum(
            np.logical_and(SS.N >= b_minNumAtomsForTargetComp,
                           nFailPerUID < 1))
        assert nChosen == nExpected
  def test_entropy_posterior_gets_smaller(self, N=1):
    PRNG = np.random.RandomState(seed=8675309)
    for trial in range(3):
      X = PRNG.randn(N, self.distr.D)
      xxT = np.dot(X.T, X)

      SS = SuffStatBag(K=1, D=self.distr.D)
      SS.setField('N', [N], dims='K')
      SS.setField('xxT', [xxT], dims=('K','D','D'))

      postD = self.distr.get_post_distr(SS, 0)
      assert postD.D == self.distr.D
      Hpost = postD.get_entropy()
      Hprior = self.distr.get_entropy()
      print 'Prior %.3g, Post %.3g' % (Hprior, Hpost)
      print self.distr.invW
      print postD.invW
      assert Hpost < Hprior
Example #15
0
    def get_global_suff_stats(self, Data, LP, doPrecompEntropy=None, **kwargs):
        ''' Create sufficient stats needed for global param updates

        Args
        -------
        Data : bnpy data object
        LP : Dictionary containing the local parameters. Expected to contain:
            resp : Data.nObs x K array
            respPair : Data.nObs x K x K array (from the def. of respPair, note
                       respPair[0,:,:] is undefined)

        Returns
        -------
        SS : SuffStatBag with fields
            StartStateCount : A vector of length K with entry i being
                             resp(z_{1k}) = resp[0,:]
            TransStateCount : A K x K matrix where TransStateCount[i,j] =
                           sum_{n=2}^K respPair(z_{n-1,j}, z_{nk})
            N : A vector of length K with entry k being
                sum_{n=1}^Data.nobs resp(z_{nk})

            The first two of these are used by FiniteHMM.update_global_params,
            and the third is used by ObsModel.update_global_params.

        (see the documentation for information about resp and respPair)
        '''
        resp = LP['resp']
        respPair = LP['respPair']
        K = resp.shape[1]
        startLocIDs = Data.doc_range[:-1]

        StartStateCount = np.sum(resp[startLocIDs], axis=0)
        N = np.sum(resp, axis=0)
        TransStateCount = np.sum(respPair, axis=0)

        SS = SuffStatBag(K=K, D=Data.dim)
        SS.setField('StartStateCount', StartStateCount, dims=('K'))
        SS.setField('TransStateCount', TransStateCount, dims=('K', 'K'))
        SS.setField('N', N, dims=('K'))

        if doPrecompEntropy is not None:
            entropy = self.elbo_entropy(Data, LP)
            SS.setELBOTerm('Elogqz', entropy, dims=None)
        return SS
Example #16
0
def calcSummaryStats(Data, SS, LP, **kwargs):
    ''' Calculate summary statistics for given dataset and local parameters

    Returns
    --------
    SS : SuffStatBag object, with K components.
    '''
    X = Data.X
    if 'resp' in LP:
        resp = LP['resp']
        K = resp.shape[1]
        # 1/2: Compute mean statistic
        S_x = dotATB(resp, X)
        # 2/2: Compute expected outer-product statistic
        S_xxT = np.zeros((K, Data.dim, Data.dim))
        sqrtResp_k = np.sqrt(resp[:, 0])
        sqrtRX_k = sqrtResp_k[:, np.newaxis] * Data.X
        S_xxT[0] = dotATA(sqrtRX_k)
        for k in xrange(1, K):
            np.sqrt(resp[:, k], out=sqrtResp_k)
            np.multiply(sqrtResp_k[:, np.newaxis], Data.X, out=sqrtRX_k)
            S_xxT[k] = dotATA(sqrtRX_k)
    else:
        spR = LP['spR']
        K = spR.shape[1]
        # 1/2: Compute mean statistic
        S_x = spR.T * X
        # 2/2: Compute expected outer-product statistic
        S_xxT = calcSpRXXT(X=X, spR_csr=spR)

    if SS is None:
        SS = SuffStatBag(K=K, D=Data.dim)
    # Expected mean for each state k
    SS.setField('x', S_x, dims=('K', 'D'))
    # Expected outer-product for each state k
    SS.setField('xxT', S_xxT, dims=('K', 'D', 'D'))
    # Expected count for each k
    #  Usually computed by allocmodel. But just in case...
    if not hasattr(SS, 'N'):
        if 'resp' in LP:
            SS.setField('N', LP['resp'].sum(axis=0), dims='K')
        else:
            SS.setField('N', as1D(toCArray(LP['spR'].sum(axis=0))), dims='K')
    return SS
Example #17
0
    def calcSummaryStatsForContigBlock(self, Data, SS=None, a=0, b=0):
        ''' Calculate sufficient stats for a single contiguous block of data
        '''
        if SS is None:
            SS = SuffStatBag(K=1, D=Data.dim)

        SS.setField('N', (b - a) * np.ones(1), dims='K')
        SS.setField(
            'x', np.sum(Data.X[a:b], axis=0)[np.newaxis, :], dims=('K', 'D'))
        SS.setField(
            'xxT', dotATA(Data.X[a:b])[np.newaxis, :, :], dims=('K', 'D', 'D'))
        return SS
 def setUp(self):
   '''
   Create a stupid simple case for making sure we're calculating things correctly
   '''
   self.alpha0 = 1.0
   self.allocM = MixModel('EM', dict(alpha0=self.alpha0))
   self.N = np.asarray([1.,2.,3,4,5.])
   self.SS = SuffStatBag(K=5, D=1)
   self.SS.setField('N', self.N, dims='K')
   self.resp = np.random.rand(100,3)
   self.precompEntropy = np.sum(self.resp * np.log(self.resp), axis=0)
Example #19
0
  def get_global_suff_stats(self, Data, LP, doPrecompEntropy=None, **kwargs):
    ''' Calculate the sufficient statistics for global parameter updates
        Only adds stats relevant for this allocModel. 
        Other stats are added by the obsModel.
        
        Args
        -------
        Data : bnpy data object
        LP : local param dict with fields
              resp : Data.nObs x K array,
                       where resp[n,k] = posterior resp of comp k
        doPrecompEntropy : boolean flag
                      indicates whether to precompute ELBO terms in advance
                      used for memoized learning algorithms (moVB)

        Returns
        -------
        SS : SuffStats for K components, with field
              N : vector of length-K,
                   effective number of observations assigned to each comp
    '''
    Nvec = np.sum( LP['resp'], axis=0 )
    SS = SuffStatBag(K=Nvec.size, D=Data.dim)
    SS.setField('N', Nvec, dims=('K'))
    if doPrecompEntropy is not None:
      ElogqZ_vec = self.E_logqZ(LP)
      SS.setELBOTerm('ElogqZ', ElogqZ_vec, dims=('K'))
    return SS
Example #20
0
 def get_global_suff_stats(self, Data, LP, doPrecompEntropy=None, **kwargs):
     ''' Calculate sufficient statistics.
         Admixture models have no suff stats for allocation   
     '''
     wv = LP['word_variational']
     _, K = wv.shape
     SS = SuffStatBag(K=K, D=Data.vocab_size)
     SS.setField('nDoc', Data.nDoc, dims=None)
     if doPrecompEntropy:
         SS.setELBOTerm('ElogpZ', self.E_log_pZ(Data, LP), dims='K')
         SS.setELBOTerm('ElogqZ', self.E_log_qZ(Data, LP), dims='K')
         SS.setELBOTerm('ElogpPi', self.E_log_pPI(Data, LP), dims=None)
         SS.setELBOTerm('ElogqPi', self.E_log_qPI(Data, LP), dims=None)
     return SS
Example #21
0
    def calcSummaryStatsForContigBlock(self, Data, a=0, b=0, **kwargs):
        ''' Calculate summary stats for a contiguous block of the data.

        Returns
        --------
        SS : SuffStatBag object, with 1 component.
        '''
        Xab = Data.X[a:b]  # 2D array, Nab x D
        CountON = np.sum(Xab, axis=0)[np.newaxis, :]
        CountOFF = (b - a) - CountON

        SS = SuffStatBag(K=1, D=Data.dim)
        SS.setField('N', np.asarray([b - a], dtype=np.float64), dims='K')
        SS.setField('Count1', CountON, dims=('K', 'D'))
        SS.setField('Count0', CountOFF, dims=('K', 'D'))
        return SS
Example #22
0
 def test_entropy_posterior_gets_smaller(self, N=10):
     PRNG = np.random.RandomState(seed=8675309)
     for trial in range(3):
         X = PRNG.randn(N, self.distr.D) + self.distr.m
         x = np.sum(X, axis=0)
         xxT = np.dot(X.T, X)
         SS = SuffStatBag(K=1, D=self.distr.D)
         SS.setField('N', [N], dims='K')
         SS.setField('x', [x], dims=('K', 'D'))
         SS.setField('xxT', [xxT], dims=('K', 'D', 'D'))
         postD = self.distr.get_post_distr(SS, 0)
         assert postD.D == self.distr.D
         Hpost = postD.entropyWish()
         Hprior = self.distr.entropyWish()
         print 'Prior %.3g, Post %.3g' % (Hprior, Hpost)
         assert Hpost < Hprior
Example #23
0
  def get_global_suff_stats(self, Data, LP,
                             doPrecompEntropy=False, 
                             doPrecompMergeEntropy=False, mPairIDs=None):
    ''' Calculate the sufficient statistics for global parameter updates
        Only adds stats relevant for this allocModel. 
        Other stats are added by the obsModel.
        
        Args
        -------
        Data : bnpy data object
        LP : local param dict with fields
              resp : Data.nObs x K array,
                       where resp[n,k] = posterior resp of comp k
        doPrecompEntropy : boolean flag
                      indicates whether to precompute ELBO terms in advance
                      used for memoized learning algorithms (moVB)
        doPrecompMergeEntropy : boolean flag
                      indicates whether to precompute ELBO terms in advance
                      for all possible merges of pairs of components
                      used for optional merge moves

        Returns
        -------
        SS : SuffStats for K components, with field
              N : vector of length-K,
                   effective number of observations assigned to each comp
    '''
    Nvec = np.sum(LP['resp'], axis=0)
    SS = SuffStatBag(K=Nvec.size, D=Data.dim)
    SS.setField('N', Nvec, dims=('K'))
    if doPrecompEntropy:
      ElogqZ_vec = self.E_logqZ(LP)
      SS.setELBOTerm('ElogqZ', ElogqZ_vec, dims=('K'))
    if doPrecompMergeEntropy:
      # Hmerge : KxK matrix of entropies for all possible pair-wise merges
      # for example, if we had only 3 components {0,1,2}
      # Hmerge = [ 0 H(0,1) H(0,2)
      #            0   0    H(1,2)
      #            0   0      0 ]      
      #  where H(i,j) is entropy if components i and j merged.
      Hmerge = np.zeros((self.K, self.K))
      for jj in range(self.K):
        compIDs = np.arange(jj+1, self.K)
        Rcombo = LP['resp'][:,jj][:,np.newaxis] + LP['resp'][:,compIDs]
        Hmerge[jj,compIDs] = np.sum(Rcombo*np.log(Rcombo+EPS), axis=0)
      SS.setMergeTerm('ElogqZ', Hmerge, dims=('K','K'))
    return SS
Example #24
0
    def get_global_suff_stats(self,
                              Data,
                              LP,
                              doPrecompEntropy=False,
                              doPrecompMergeEntropy=False,
                              mPairIDs=None):
        ''' Calculate the sufficient statistics for global parameter updates
        Only adds stats relevant for this allocModel. 
        Other stats are added by the obsModel.
        
        Args
        -------
        Data : bnpy data object
        LP : local param dict with fields
              resp : Data.nObs x K array,
                       where resp[n,k] = posterior resp of comp k
        doPrecompEntropy : boolean flag
                      indicates whether to precompute ELBO terms in advance
                      used for memoized learning algorithms (moVB)
        doPrecompMergeEntropy : boolean flag
                      indicates whether to precompute ELBO terms in advance
                      for all possible merges of pairs of components
                      used for optional merge moves

        Returns
        -------
        SS : SuffStats for K components, with field
              N : vector of length-K,
                   effective number of observations assigned to each comp
    '''
        Nvec = np.sum(LP['resp'], axis=0)
        SS = SuffStatBag(K=Nvec.size, D=Data.dim)
        SS.setField('N', Nvec, dims=('K'))
        if doPrecompEntropy:
            ElogqZ_vec = self.E_logqZ(LP)
            SS.setELBOTerm('ElogqZ', ElogqZ_vec, dims=('K'))
        if doPrecompMergeEntropy:
            # Hmerge : KxK matrix of entropies for all possible pair-wise merges
            # for example, if we had only 3 components {0,1,2}
            # Hmerge = [ 0 H(0,1) H(0,2)
            #            0   0    H(1,2)
            #            0   0      0 ]
            #  where H(i,j) is entropy if components i and j merged.
            Hmerge = np.zeros((self.K, self.K))
            for jj in range(self.K):
                compIDs = np.arange(jj + 1, self.K)
                Rcombo = LP['resp'][:, jj][:, np.
                                           newaxis] + LP['resp'][:, compIDs]
                Hmerge[jj, compIDs] = np.sum(
                    Rcombo * np.log(Rcombo + EPS), axis=0)
            SS.setMergeTerm('ElogqZ', Hmerge, dims=('K', 'K'))
        return SS
Example #25
0
def calcSummaryStats(Data, SS, LP, **kwargs):
    ''' Calculate summary statistics for given dataset and local parameters

    Returns
    --------
    SS : SuffStatBag object, with K components.
    '''
    X = Data.X
    if 'resp' in LP:
        resp = LP['resp']
        K = resp.shape[1]
        # 1/2: Compute mean statistic
        S_x = dotATB(resp, X)
        # 2/2: Compute expected outer-product statistic
        S_xx = calcRXX_withDenseResp(resp, X)
    else:
        spR = LP['spR']
        K = spR.shape[1]
        # 1/2: Compute mean statistic
        S_x = spR.T * X
        # 2/2: Compute expected outer-product statistic
        S_xx = calcSpRXX(X=X, spR_csr=spR)
    if SS is None:
        SS = SuffStatBag(K=K, D=Data.dim)
    # Expected mean for each state k
    SS.setField('x', S_x, dims=('K', 'D'))
    # Expected sum-of-squares for each state k
    SS.setField('xx', S_xx, dims=('K', 'D'))
    # Expected count for each k
    #  Usually computed by allocmodel. But sometimes not (eg TopicModel)
    if not hasattr(SS, 'N'):
        if 'resp' in LP:
            SS.setField('N', LP['resp'].sum(axis=0), dims='K')
        else:
            SS.setField('N', as1D(toCArray(LP['spR'].sum(axis=0))), dims='K')
    return SS
Example #26
0
 def get_global_suff_stats(self, Data, LP, doPrecompEntropy=None, **kwargs):
     ''' Calculate sufficient statistics.
         Admixture models have no suff stats for allocation   
     '''
     wv = LP['word_variational']
     _, K = wv.shape
     SS = SuffStatBag(K=K, D=Data.vocab_size)
     SS.setField('nDoc', Data.nDoc, dims=None)
     if doPrecompEntropy:
         SS.setELBOTerm('ElogpZ', self.E_log_pZ(Data, LP), dims='K')
         SS.setELBOTerm('ElogqZ', self.E_log_qZ(Data, LP), dims='K')
         SS.setELBOTerm('ElogpPi', self.E_log_pPI(Data, LP), dims=None)
         SS.setELBOTerm('ElogqPi', self.E_log_qPI(Data, LP), dims=None)
     return SS
Example #27
0
    def get_global_suff_stats(self, Data, LP, doPrecompEntropy=0, **kwargs):
        ''' Compute sufficient stats for provided dataset and local params

        Returns
        -------
        SS : SuffStatBag
            Updated fields
            * NodeStateCount : 2D array, nNodes x K
            * N : 2D array, size K x K
        '''
        K = LP['resp'].shape[-1]

        V = Data.nNodes
        SS = SuffStatBag(K=K, D=Data.dim, V=V)

        # NodeStateCount_src[i,k]
        #   Num edges assigned to topic k associated with node i as source
        srcResp = LP['resp'].sum(axis=2)
        NodeStateCount_src = Data.getSparseSrcNodeMat() * srcResp
        # Equivalent but slower: for loop
        # NodeStateCount_src = np.zeros((Data.nNodes, K))
        # for i in xrange(Data.nNodes):
        #     mask_i = Data.edges[:,0] == i
        #     NodeStateCount_src[i,:] = srcResp[mask_i].sum(axis=0)

        # NodeStateCount_rcv[i,k]
        #   Num edges assigned to topic k associated with node i as receiver
        rcvResp = LP['resp'].sum(axis=1)
        NodeStateCount_rcv = Data.getSparseRcvNodeMat() * rcvResp

        # Summing src counts and rcv counts gives the total
        SS.setField('NodeStateCount',
                    NodeStateCount_src + NodeStateCount_rcv,
                    dims=('V', 'K'))
        # Compute total atoms assigned to each cluster pair
        Nresp = np.sum(LP['resp'], axis=0)
        SS.setField('N', Nresp, dims=('K', 'K'))

        if doPrecompEntropy:
            # Remember, resp has shape nEdges x K x K
            # So, need to sum so we track scalar entropy, not K x K
            Hresp = calcLentropyAsScalar(LP)
            SS.setELBOTerm('Hresp', Hresp, dims=None)
        return SS
def calcSummaryStats(Data, SS, LP, DataAtomType='doc', **kwargs):
    ''' Calculate summary statistics for given dataset and local parameters

    Returns
    --------
    SS : SuffStatBag object, with K components.
    '''
    if 'resp' in LP:
        K = LP['resp'].shape[1]
    else:
        K = LP['spR'].shape[1]
        nnzPerRow = LP['nnzPerRow']
    if SS is None:
        SS = SuffStatBag(K=K, D=Data.vocab_size)
    if DataAtomType == 'doc':
        # X : 2D sparse matrix, size nDoc x vocab_size
        X = Data.getSparseDocTypeCountMatrix()
        # WordCounts : 2D array, size K x vocab_size
        # obtained by sparse matrix multiply
        # here, '*' operator does this because X is sparse matrix type
        Nvec = None
        if 'resp' in LP:
            WordCounts = LP['resp'].T * X
            if not hasattr(SS, 'N'):
                Nvec = LP['resp'].sum(axis=0)
        else:
            WordCounts = (LP['spR'].T * X).toarray()
            if not hasattr(SS, 'N'):
                Nvec = as1D(toCArray(LP['spR'].sum(axis=0)))
        if Nvec is not None:
            SS.setField('N', Nvec, dims=('K'))
    else:
        # 2D sparse matrix, size V x N
        X = Data.getSparseTokenTypeCountMatrix()
        if 'resp' in LP:
            WordCounts = (X * LP['resp']).T  # matrix-matrix product
        else:
            WordCounts = (X * LP['spR']).T.toarray()
    SS.setField('WordCounts', WordCounts, dims=('K', 'D'))
    SS.setField('SumWordCounts', np.sum(WordCounts, axis=1), dims=('K'))
    return SS
    """
Example #29
0
    def calcSummaryStatsForContigBlock(self, Data, SS=None, a=0, b=0):
        ''' Calculate sufficient stats for a single contiguous block of data
        '''
        D = Data.X.shape[1]
        E = Data.Xprev.shape[1]

        if SS is None:
            SS = SuffStatBag(K=1, D=D, E=E)
        elif not hasattr(SS, 'E'):
            SS._Fields.E = E

        ppT = dotATA(Data.Xprev[a:b])[np.newaxis, :, :]
        xxT = dotATA(Data.X[a:b])[np.newaxis, :, :]
        pxT = dotATB(Data.Xprev[a:b], Data.X[a:b])[np.newaxis, :, :]

        SS.setField('N', (b - a) * np.ones(1), dims='K')
        SS.setField('xxT', xxT, dims=('K', 'D', 'D'))
        SS.setField('ppT', ppT, dims=('K', 'E', 'E'))
        SS.setField('pxT', pxT, dims=('K', 'E', 'D'))
        return SS
Example #30
0
def calcSummaryStats(Data, SS, LP,
                     **kwargs):
    ''' Calculate sufficient statistics for local params at data slice.

    Returns
    -------
    SS
    '''
    X = Data.X
    Xprev = Data.Xprev
    resp = LP['resp']
    K = resp.shape[1]
    D = Data.X.shape[1]
    E = Data.Xprev.shape[1]

    if SS is None:
        SS = SuffStatBag(K=K, D=D, E=E)
    elif not hasattr(SS, 'E'):
        SS._Fields.E = E

    # Expected count for each k
    #  Usually computed by allocmodel. But just in case...
    if not hasattr(SS, 'N'):
        SS.setField('N', np.sum(resp, axis=0), dims='K')

    # Expected outer products
    sqrtResp = np.sqrt(resp)
    xxT = np.empty((K, D, D))
    ppT = np.empty((K, E, E))
    pxT = np.empty((K, E, D))
    for k in xrange(K):
        sqrtResp_k = sqrtResp[:, k][:, np.newaxis]
        xxT[k] = dotATA(sqrtResp_k * Data.X)
        ppT[k] = dotATA(sqrtResp_k * Data.Xprev)
        pxT[k] = np.dot(Data.Xprev.T, resp[:, k][:, np.newaxis] * Data.X)
    SS.setField('xxT', xxT, dims=('K', 'D', 'D'))
    SS.setField('ppT', ppT, dims=('K', 'E', 'E'))
    SS.setField('pxT', pxT, dims=('K', 'E', 'D'))
    return SS
Example #31
0
    def init_global_params(self, Data, K=0, **initArgs):
        ''' Initialize rho, omega, and theta to reasonable values.

        This is only called by "from scratch" init routines.
        '''
        self.K = K
        self.rho = OptimizerRhoOmega.create_initrho(K)
        self.omega = (1.0 + self.gamma) * np.ones(K)

        # To initialize theta, perform standard update given rho, omega
        # but with "empty" sufficient statistics.
        SS = SuffStatBag(K=self.K, D=Data.dim)
        SS.setField('StartStateCount', np.ones(K), dims=('K'))
        SS.setField('TransStateCount', np.ones((K, K)), dims=('K', 'K'))
        self.transTheta, self.startTheta = self._calcTheta(SS)
Example #32
0
    def calcSummaryStatsForContigBlock(self,
                                       Data,
                                       SS=None,
                                       a=None,
                                       b=None,
                                       **kwargs):
        ''' Calculate summary statistics for specific block of dataset

        Returns
        --------
        SS : SuffStatBag object, with K components.
        '''
        SS = SuffStatBag(K=1, D=Data.dim)

        # Expected count
        SS.setField('N', (b - a) * np.ones(1, dtype=np.float64), dims='K')

        # Expected outer-product
        xxT = dotATA(Data.X[a:b])[np.newaxis, :, :]
        SS.setField('xxT', xxT, dims=('K', 'D', 'D'))
        return SS
Example #33
0
def init_global_params(obsModel,
                       Data,
                       K=0,
                       seed=0,
                       initname='randexamples',
                       initBlockLen=20,
                       **kwargs):
    ''' Initialize parameters for Gaussian obsModel, in place.

    Parameters
    -------
    obsModel : bnpy.obsModel subclass
        Observation model object to initialize.
    Data : bnpy.data.DataObj
        Dataset to use to drive initialization.
        obsModel dimensions must match this dataset.
    initname : str
        name of routine used to do initialization
        Options: ['randexamples', 'randexamplesbydist', 'kmeans',
                  'randcontigblocks', 'randsoftpartition',
                 ]

    Post Condition
    -------
    obsModel has valid global parameters.
    Either its EstParams or Post attribute will be contain K components.
    '''
    K = int(K)
    PRNG = np.random.RandomState(seed)
    X = Data.X
    if initname == 'randexamples':
        # Choose K items uniformly at random from the Data
        #    then component params by M-step given those single items
        resp = np.zeros((Data.nObs, K))
        permIDs = PRNG.permutation(Data.nObs).tolist()
        for k in xrange(K):
            resp[permIDs[k], k] = 1.0

    elif initname == 'randexamplesbydist':
        # Choose K items from the Data,
        #  selecting the first at random,
        # then subsequently proportional to euclidean distance to the closest
        # item
        objID = PRNG.choice(Data.nObs)
        chosenObjIDs = list([objID])
        minDistVec = np.inf * np.ones(Data.nObs)
        for k in range(1, K):
            curDistVec = np.sum((Data.X - Data.X[objID])**2, axis=1)
            minDistVec = np.minimum(minDistVec, curDistVec)
            objID = PRNG.choice(Data.nObs, p=minDistVec / minDistVec.sum())
            chosenObjIDs.append(objID)
        resp = np.zeros((Data.nObs, K))
        for k in xrange(K):
            resp[chosenObjIDs[k], k] = 1.0

    elif initname == 'randcontigblocks':
        # Choose K contig blocks of provided size from the Data,
        #  selecting each block at random from a particular sequence
        if hasattr(Data, 'doc_range'):
            doc_range = Data.doc_range.copy()
        else:
            doc_range = np.asarray([0, Data.X.shape[0]])
        nDoc = doc_range.size - 1
        docIDs = np.arange(nDoc)
        PRNG.shuffle(docIDs)
        resp = np.zeros((Data.nObs, K))
        for k in xrange(K):
            n = docIDs[k % nDoc]
            start = doc_range[n]
            stop = doc_range[n + 1]
            T = stop - start
            if initBlockLen >= T:
                a = start
                b = stop
            else:
                a = start + PRNG.choice(T - initBlockLen)
                b = a + initBlockLen
            resp[a:b, k] = 1.0

    elif initname == 'randsoftpartition':
        # Randomly assign all data items some mass in each of K components
        #  then create component params by M-step given that soft partition
        resp = PRNG.gamma(1.0 / (K * K), 1, size=(Data.nObs, K))
        resp[resp < 1e-3] = 0
        rsum = np.sum(resp, axis=1)
        badIDs = rsum < 1e-8
        # if any rows have no content, just set them to unif resp.
        if np.any(badIDs):
            resp[badIDs] = 1.0 / K
            rsum[badIDs] = 1
        resp = resp / rsum[:, np.newaxis]
        assert np.allclose(np.sum(resp, axis=1), 1.0)

    elif initname == 'kmeans':
        # Fill in resp matrix with hard-clustering from K-means
        # using an initialization with K randomly selected points from X
        np.random.seed(seed)
        centroids, labels = kmeans2(data=Data.X, k=K, minit='points')
        resp = np.zeros((Data.nObs, K))
        for t in xrange(Data.nObs):
            resp[t, labels[t]] = 1

    else:
        raise NotImplementedError('Unrecognized initname ' + initname)

    tempLP = dict(resp=resp)
    SS = SuffStatBag(K=K, D=Data.dim)
    SS = obsModel.get_global_suff_stats(Data, SS, tempLP)
    obsModel.update_global_params(SS)
Example #34
0
  def get_global_suff_stats(self, Data, LP, doPrecompEntropy=False, 
                                              doPrecompMergeEntropy=False,
                                              mPairIDs=None):
    ''' Count expected number of times each topic is used across all docs    
    '''
    K = LP['DocTopicCount'].shape[1]
    SS = SuffStatBag(K=K, D=Data.vocab_size)
    SS.setField('nDoc', Data.nDoc, dims=None)
    sumLogPi = np.sum(LP['E_logPi'], axis=0)
    SS.setField('sumLogPiActive', sumLogPi[:K], dims='K')
    SS.setField('sumLogPiUnused', sumLogPi[-1], dims=None)

    if doPrecompEntropy:
      # ---------------- Z terms
      SS.setELBOTerm('ElogpZ', self.E_logpZ(Data, LP), dims='K')
      logFactData, logFactZ = self.E_logfactorialZ(Data, LP)
      SS.setELBOTerm('logFactData', logFactData, dims=None)
      SS.setELBOTerm('logFactZ', logFactZ, dims='K')

      # ---------------- Pi terms
      # Note: no terms needed for ElogpPI
      # SS already has field sumLogPi, which is sufficient for this term
      ElogqPiC, ElogqPiA, ElogqPiU = self.E_logqPi_Memoized_from_LP(LP)
      SS.setELBOTerm('ElogqPiConst', ElogqPiC, dims=None)
      SS.setELBOTerm('ElogqPiActive', ElogqPiA, dims='K')
      SS.setELBOTerm('ElogqPiUnused', ElogqPiU, dims=None)

    if doPrecompMergeEntropy:
      ElogpZMat, sLgPiMat, ElogqPiMat = self.memo_elbo_terms_for_merge(LP)
      SS.setMergeTerm('ElogpZ', ElogpZMat, dims=('K','K'))
      SS.setMergeTerm('ElogqPiActive', ElogqPiMat, dims=('K','K'))
      SS.setMergeTerm('sumLogPiActive', sLgPiMat, dims=('K','K'))

      SS.setMergeTerm('logFactZ', 
                     self.memo_factorial_term_for_merge(LP, mPairIDs),
                     dims=('K', 'K'))
    return SS
Example #35
0
def calcSummaryStats(Dslice,
                     LP=None,
                     alpha=None,
                     doPrecompEntropy=False,
                     cslice=(0, None),
                     **kwargs):
    """ Calculate summary from local parameters for given data slice.

    Parameters
    -------
    Data : bnpy data object
    LP : local param dict with fields
        resp : Data.nObs x K array,
            where resp[n,k] = posterior resp of comp k
        doPrecompEntropy : boolean flag
            indicates whether to precompute ELBO terms in advance
            used for memoized learning algorithms (moVB)

    Returns
    -------
    SS : SuffStatBag with K components
        * nDoc : scalar float
            Counts total documents available in provided data.

        Also has optional ELBO field when precompELBO is True
        * Hvec : 1D array, size K
            Vector of entropy contributions from each comp.
            Hvec[k] = \sum_{n=1}^N H[q(z_n)], a function of 'resp'
    """
    K = LP['DocTopicCount'].shape[1]
    SS = SuffStatBag(K=K, D=Dslice.dim)

    if cslice[1] is None:
        SS.setField('nDoc', Dslice.nDoc, dims=None)
    else:
        SS.setField('nDoc', cslice[1] - cslice[0], dims=None)

    SS.setField('nDoc', Dslice.nDoc, dims=None)
    if doPrecompEntropy:
        assert 'theta' in LP
        Lalloc = L_alloc(Dslice, LP, alpha=alpha)
        SS.setELBOTerm('L_alloc', Lalloc, dims=None)

        if 'nnzPerRow' in LP and LP['nnzPerRow'] == 1:
            SS.setELBOTerm('Hvec', 0.0, dims=None)
        else:
            Hvec = L_entropy(Dslice, LP, returnVector=1)
            SS.setELBOTerm('Hvec', Hvec, dims='K')
    return SS
def calcSummaryStats(Data, SS, LP, **kwargs):
    ''' Calculate summary statistics for given dataset and local parameters

    Returns
    --------
    SS : SuffStatBag object, with K components.
    '''
    if not hasattr(Data, 'X_NE'):
        Data.X_NE = np.hstack([Data.X, np.ones(Data.nObs)[:, np.newaxis]])

    Y_N = Data.Y
    X_NE = Data.X_NE
    E = X_NE.shape[1]

    if 'resp' in LP:
        # Dense responsibility calculations
        resp = LP['resp']
        K = resp.shape[1]
        S_yy_K = dotATB(resp, np.square(Y_N)).flatten()
        S_yx_KE = dotATB(resp, Y_N * X_NE)

        # Expected outer product
        S_xxT_KEE = np.zeros((K, E, E))
        sqrtResp_k_N = np.sqrt(resp[:, 0])
        sqrtR_X_k_NE = sqrtResp_k_N[:, np.newaxis] * X_NE
        S_xxT_KEE[0] = dotATA(sqrtR_X_k_NE)
        for k in xrange(1, K):
            np.sqrt(resp[:, k], out=sqrtResp_k_N)
            np.multiply(sqrtResp_k_N[:, np.newaxis], X_NE, out=sqrtR_X_k_NE)
            S_xxT_KEE[k] = dotATA(sqrtR_X_k_NE)
    else:
        raise ValueError("TODO")
        spR = LP['spR']
        K = spR.shape[1]

    if SS is None:
        SS = SuffStatBag(K=K, D=Data.dim, E=E)
    elif not hasattr(SS, 'E'):
        SS._Fields.E = E
    SS.setField('xxT_KEE', S_xxT_KEE, dims=('K', 'E', 'E'))
    SS.setField('yx_KE', S_yx_KE, dims=('K', 'E'))
    SS.setField('yy_K', S_yy_K, dims=('K'))
    # Expected count for each k
    # Usually computed by allocmodel. But just in case...
    if not hasattr(SS, 'N'):
        if 'resp' in LP:
            SS.setField('N', LP['resp'].sum(axis=0), dims='K')
        else:
            SS.setField('N', as1D(toCArray(LP['spR'].sum(axis=0))), dims='K')

    #SS.setField("N_K", SS.N, dims="K")
    return SS
Example #37
0
def calcSummaryStats(Data,
                     LP,
                     doPrecompEntropy=False,
                     doPrecompMergeEntropy=False,
                     mPairIDs=None,
                     mergePairSelection=None,
                     trackDocUsage=False,
                     **kwargs):
    """ Calculate sufficient statistics for global updates.

    Parameters
    -------
    Data : bnpy data object
    LP : local param dict with fields
        resp : Data.nObs x K array,
            where resp[n,k] = posterior resp of comp k
    doPrecompEntropy : boolean flag
        indicates whether to precompute ELBO terms in advance
        used for memoized learning algorithms (moVB)
    doPrecompMergeEntropy : boolean flag
        indicates whether to precompute ELBO terms in advance
        for certain merge candidates.

    Returns
    -------
    SS : SuffStatBag with K components
        Summarizes for this mixture model, with fields
        * N : 1D array, size K
            N[k] = expected number of items assigned to comp k

        Also has optional ELBO field when precompELBO is True
        * ElogqZ : 1D array, size K
            Vector of entropy contributions from each comp.
            ElogqZ[k] = \sum_{n=1}^N resp[n,k] log resp[n,k]

        Also has optional Merge field when precompMergeELBO is True
        * ElogqZ : 2D array, size K x K
            Each term is scalar entropy of merge candidate
    """
    if mPairIDs is not None and len(mPairIDs) > 0:
        M = len(mPairIDs)
    else:
        M = 0
    if 'resp' in LP:
        Nvec = np.sum(LP['resp'], axis=0)
        K = Nvec.size
    else:
        # Sparse assignment case
        Nvec = as1D(toCArray(LP['spR'].sum(axis=0)))
        K = LP['spR'].shape[1]

    if hasattr(Data, 'dim'):
        SS = SuffStatBag(K=K, D=Data.dim, M=M)
    else:
        SS = SuffStatBag(K=K, D=Data.vocab_size, M=M)
    SS.setField('N', Nvec, dims=('K'))
    if doPrecompEntropy:
        Mdict = calcELBO_NonlinearTerms(LP=LP, returnMemoizedDict=1)
        if type(Mdict['Hresp']) == float:
            # SPARSE HARD ASSIGNMENTS
            SS.setELBOTerm('Hresp', Mdict['Hresp'], dims=None)
        else:
            SS.setELBOTerm('Hresp', Mdict['Hresp'], dims=('K', ))

    if doPrecompMergeEntropy:
        m_Hresp = None
        if 'resp' in LP:
            m_Hresp = -1 * NumericUtil.calcRlogR_specificpairs(
                LP['resp'], mPairIDs)
        elif 'spR' in LP:
            if LP['nnzPerRow'] > 1:
                m_Hresp = calcSparseMergeRlogR(spR_csr=LP['spR'],
                                               nnzPerRow=LP['nnzPerRow'],
                                               mPairIDs=mPairIDs)
        else:
            raise ValueError("Need resp or spR in LP")
        if m_Hresp is not None:
            assert m_Hresp.size == len(mPairIDs)
            SS.setMergeTerm('Hresp', m_Hresp, dims=('M'))
    if trackDocUsage:
        Usage = np.sum(LP['resp'] > 0.01, axis=0)
        SS.setSelectionTerm('DocUsageCount', Usage, dims='K')

    return SS
Example #38
0
  def get_global_suff_stats(self, Data, LP, doPrecompEntropy=False, 
                                              doPrecompMergeEntropy=False,
                                              mPairIDs=None):
        ''' Count expected number of times each topic is used across all docs    
        '''
        wv = LP['word_variational']
        _, K = wv.shape
        # Turn dim checking off, since some stats have dim K+1 instead of K
        SS = SuffStatBag(K=K, D=Data.vocab_size)
        SS.setField('nDoc', Data.nDoc, dims=None)
        sumLogPi = np.sum(LP['E_logPi'], axis=0)
        SS.setField('sumLogPiActive', sumLogPi[:K], dims='K')
        SS.setField('sumLogPiUnused', sumLogPi[-1], dims=None)

        if 'DocTopicFrac' in LP:
          Nmajor = LP['DocTopicFrac']
          Nmajor[Nmajor < 0.05] = 0
          SS.setField('Nmajor', np.sum(Nmajor, axis=0), dims='K')
        if doPrecompEntropy:
            # Z terms
            SS.setELBOTerm('ElogpZ', self.E_logpZ(Data, LP), dims='K')
            SS.setELBOTerm('ElogqZ', self.E_logqZ(Data, LP), dims='K')
            # Pi terms
            # Note: no terms needed for ElogpPI
            # SS already has field sumLogPi, which is sufficient for this term
            ElogqPiC, ElogqPiA, ElogqPiU = self.E_logqPi_Memoized_from_LP(LP)
            SS.setELBOTerm('ElogqPiConst', ElogqPiC, dims=None)
            SS.setELBOTerm('ElogqPiActive', ElogqPiA, dims='K')
            SS.setELBOTerm('ElogqPiUnused', ElogqPiU, dims=None)

        if doPrecompMergeEntropy:
            ElogpZMat, sLgPiMat, ElogqPiMat = self.memo_elbo_terms_for_merge(LP)
            ElogqZMat = self.E_logqZ_memo_terms_for_merge(Data, LP, mPairIDs)
            SS.setMergeTerm('ElogpZ', ElogpZMat, dims=('K','K'))
            SS.setMergeTerm('ElogqZ', ElogqZMat, dims=('K','K'))
            SS.setMergeTerm('ElogqPiActive', ElogqPiMat, dims=('K','K'))
            SS.setMergeTerm('sumLogPiActive', sLgPiMat, dims=('K','K'))
        return SS
Example #39
0
  def get_global_suff_stats(self, Data, LP, doPrecompEntropy=False, 
                                              doPrecompMergeEntropy=False,
                                              mPairIDs=None):
        ''' Theta is a global parameter here so we need to get its sufficient stats
          Sufficient statistics for these require precomputing certain terms

        '''
        E, K = LP['E_logsoftev_EdgeLik'].shape
        # Turn dim checking off, since some stats have dim K+1 instead of K
        N = Data.nNodeTotal
        SS = SuffStatBag(K=K, D=N)

        # Summary statistics
        node_ss = np.zeros((N, K))
        node_z_ss = np.zeros((N, K))
        node_offset = np.zeros((E, K)) # used to cache ELBO
        ev = LP['edge_variational']
        edgeEps = LP['E_logsoftev_EdgeEps']

        for e in xrange(E):
          ii = Data.edges[e,0]
          jj = Data.edges[e,1]
          node_ss[ii,:] += ev[e]
          node_ss[jj,:] += ev[e]
          node_z_ss[ii,:] += LP['E_logsoftev_EdgeEps'][e] # need to check this if there's a better way

        SS.setField('nNodeTotal', N, dims=None)
        SS.setField('nEdgeTotal', E, dims=None)
        SS.setField('node_ss', node_ss, dims=('D','K'))
        SS.setField('node_z_ss', node_z_ss, dims=('D', 'K'))
        SS.setField('sumLogPiActive', LP['E_logPiSumK'][:self.K], dims='K')
        SS.setField('sumLogPiUnused', LP['E_logPiSumK'][-1], dims=None)

        return SS
Example #40
0
    def get_global_suff_stats(self, Data, LP, doPrecompEntropy=None, **kwargs):
        ''' Calculate sufficient statistics.
    '''
        resp = LP['resp']
        _, K = resp.shape
        SS = SuffStatBag(K=K, D=Data.get_dim())
        SS.setField('nDoc', Data.nDoc, dims=None)
        SS.setField('sumLogVd', np.sum(LP['ElogV'], axis=0), dims='K')
        SS.setField('sumLog1mVd', np.sum(LP['Elog1mV'], axis=0), dims='K')

        if doPrecompEntropy:
            ElogqZ = self.E_logqZ(Data, LP)
            VZlocal = self.E_logpVZ_logqV(Data, LP)
            SS.setELBOTerm('ElogqZ', ElogqZ, dims='K')
            SS.setELBOTerm('VZlocal', VZlocal, dims=None)
        return SS
Example #41
0
    def get_global_suff_stats(self, Data, LP, doPrecompEntropy=False, doPrecompMergeEntropy=False, mPairIDs=None):
        """ Count expected number of times each topic is used across all docs    
    """
        wv = LP["word_variational"]
        _, K = wv.shape
        # Turn dim checking off, since some stats have dim K+1 instead of K
        SS = SuffStatBag(K=K, D=Data.vocab_size)
        SS.setField("nDoc", Data.nDoc, dims=None)
        sumLogPi = np.sum(LP["E_logPi"], axis=0)
        SS.setField("sumLogPiActive", sumLogPi[:K], dims="K")
        SS.setField("sumLogPiUnused", sumLogPi[-1], dims=None)

        if "DocTopicFrac" in LP:
            Nmajor = LP["DocTopicFrac"]
            Nmajor[Nmajor < 0.05] = 0
            SS.setField("Nmajor", np.sum(Nmajor, axis=0), dims="K")
        if doPrecompEntropy:
            # ---------------- Z terms
            SS.setELBOTerm("ElogpZ", self.E_logpZ(Data, LP), dims="K")
            # ---------------- Pi terms
            # Note: no terms needed for ElogpPI
            # SS already has field sumLogPi, which is sufficient for this term
            ElogqPiC, ElogqPiA, ElogqPiU = self.E_logqPi_Memoized_from_LP(LP)
            SS.setELBOTerm("ElogqPiConst", ElogqPiC, dims=None)
            SS.setELBOTerm("ElogqPiActive", ElogqPiA, dims="K")
            SS.setELBOTerm("ElogqPiUnused", ElogqPiU, dims=None)

        if doPrecompMergeEntropy:
            ElogpZMat, sLgPiMat, ElogqPiMat = self.memo_elbo_terms_for_merge(LP)
            SS.setMergeTerm("ElogpZ", ElogpZMat, dims=("K", "K"))
            SS.setMergeTerm("ElogqPiActive", ElogqPiMat, dims=("K", "K"))
            SS.setMergeTerm("sumLogPiActive", sLgPiMat, dims=("K", "K"))
        return SS