Exemple #1
0
def calcLentropyAsScalar(LP):
    ''' Compute entropy term of objective as a scalar.

    Returns
    -------
    Hresp : scalar
    '''
    return -1.0 * np.sum(calcRlogR(LP['resp']))
    '''
Exemple #2
0
def calcMergeTermsFromSeparateLP(
        Data=None,
        LPa=None, SSa=None,
        LPb=None, SSb=None,
        mUIDPairs=None):
    ''' Compute merge terms that combine two comps from separate LP dicts.
    
    Returns
    -------
    Mdict : dict of key, array-value pairs
    '''
    M = len(mUIDPairs)
    m_sumLogPi = np.zeros(M)
    m_gammalnTheta = np.zeros(M)
    m_slackTheta = np.zeros(M)
    m_Hresp = np.zeros(M)

    assert np.allclose(LPa['digammaSumTheta'], LPb['digammaSumTheta'])
    for m, (uidA, uidB) in enumerate(mUIDPairs):
        kA = SSa.uid2k(uidA)
        kB = SSb.uid2k(uidB)

        m_resp = LPa['resp'][:, kA] + LPb['resp'][:, kB]
        if hasattr(Data, 'word_count') and \
                Data.nUniqueToken == m_resp.shape[0]:
            m_Hresp[m] = -1 * calcRlogRdotv(
                m_resp[:,np.newaxis], Data.word_count)
        else:
            m_Hresp[m] = -1 * calcRlogR(m_resp[:,np.newaxis])

        DTC_vec = LPa['DocTopicCount'][:, kA] + LPb['DocTopicCount'][:, kB]
        theta_vec = LPa['theta'][:, kA] + LPb['theta'][:, kB]
        m_gammalnTheta[m] = np.sum(gammaln(theta_vec))
        ElogPi_vec = digamma(theta_vec) - LPa['digammaSumTheta']
        m_sumLogPi[m] = np.sum(ElogPi_vec)
        # slack = (Ndm - theta_dm) * E[log pi_dm]
        slack_vec = ElogPi_vec
        slack_vec *= (DTC_vec - theta_vec)
        m_slackTheta[m] = np.sum(slack_vec)
    return dict(
        Hresp=m_Hresp,
        gammalnTheta=m_gammalnTheta,
        slackTheta=m_slackTheta,
        sumLogPi=m_sumLogPi)
def calcEvalMetricForStates_KL(LP_n=None, **kwargs):
    ''' Compute KL between empirical and model distr. for each state.

    Returns
    -------
    scores : 1D array, size K
        scores[k] gives the score of comp k
        if comp k doesn't appear in the sequence, scores[k] is 0
    '''
    np.maximum(LP_n['resp'], 1e-100, out=LP_n['resp'])
    N = np.sum(LP_n['resp'], axis=0)

    # Pemp_log_Pemp : 1D array, size K
    # equals the (negative) entropy of the empirical distribution
    # = 1/N_k r_nk * log(r_nk) - log(N_k)
    # = - log(N_k) + 1/N_k r_nk log _rnk
    Pemp_log_Pemp = -1 * np.log(N) + 1.0 / N * calcRlogR(LP_n['resp'])

    Pemp_log_Pmodel = 1.0 / N * np.sum(LP_n['resp'] * LP_n['E_log_soft_ev'])

    KLscore = -1 * Pemp_log_Pmodel + Pemp_log_Pemp
    return KLscore
Exemple #4
0
    def calc_local_params(self, Data, LP, **kwargs):
        ''' Compute local parameters for provided dataset.

        Args
        -------
        Data : GraphData object
        LP : dict of local params, with fields
            * E_log_soft_ev : nEdges x K

        Returns
        -------
        LP : dict of local params, with fields
            * resp : nEdges x K
                resp[e,k] = prob that edge e is explained by
                connection from state/block combination k,k
        '''
        K = self.K
        ElogPi = self.E_logPi()

        # epsEvVec : 1D array, size nEdges
        #    holds the likelihood that edge was generated by bg state "epsilon"
        logepsEvVec = np.sum(
            np.log(self.epsilon) * Data.X + \
            np.log(1-self.epsilon) * (1-Data.X),
            axis=1)
        epsEvVec = np.exp(logepsEvVec)

        # resp : 2D array, nEdges x K
        resp = ElogPi[Data.edges[:,0], :] + \
            ElogPi[Data.edges[:,1], :] + \
            LP['E_log_soft_ev']
        np.exp(resp, out=resp)

        expElogPi = np.exp(ElogPi)

        # sumPi_fg : 1D array, size nEdges
        #    sumPi_fg[e] = \sum_k \pi[s,k] \pi[t,k] for edge e=(s,t)
        sumPi_fg = np.sum(expElogPi[Data.edges[:, 0]] *
                          expElogPi[Data.edges[:, 1]],
                          axis=1)
        # sumPi : 1D array, size nEdges
        #    sumPi[e] = \sum_j,k \pi[s,j] \pi[t,k] for edge e=(s,t)
        sumexpElogPi = expElogPi.sum(axis=1)
        sumPi = sumexpElogPi[Data.edges[:,0]] * \
            sumexpElogPi[Data.edges[:,1]]

        # respNormConst : 1D array, size nEdges
        respNormConst = resp.sum(axis=1)
        respNormConst += (sumPi - sumPi_fg) * epsEvVec
        # Normalize the rows of resp
        resp /= respNormConst[:, np.newaxis]
        np.maximum(resp, 1e-100, out=resp)
        LP['resp'] = resp

        # Compute resp_bg : 1D array, size nEdges
        resp_bg = 1.0 - resp.sum(axis=1)
        LP['resp_bg'] = resp_bg

        # src/rcv resp_bg : 2D array, size nEdges x K
        #     srcresp_bg[n,k] = sum of resp mass
        #         when edge n's src asgned to k, but rcv is not
        #     rcvresp_bg[n,k] = sum of resp mass
        #         when edge n's rcv asgned to k, but src is not
        epsEvVec /= respNormConst
        expElogPi_bg = sumexpElogPi[:, np.newaxis] - expElogPi
        srcresp_bg = epsEvVec[:,np.newaxis] * \
                expElogPi[Data.edges[:,0]] * \
                expElogPi_bg[Data.edges[:,1]]
        rcvresp_bg = epsEvVec[:,np.newaxis] * \
                expElogPi[Data.edges[:,1]] * \
                expElogPi_bg[Data.edges[:,0]]
        # NodeStateCount_bg : 2D array, size nNodes x K
        #     NodeStateCount_bg[v,k] = count of node v asgned to state k
        #         when other node in edge is NOT assigned to state k
        NodeStateCount_bg = \
            Data.getSparseSrcNodeMat() * srcresp_bg + \
            Data.getSparseRcvNodeMat() * rcvresp_bg
        # NodeStateCount_fg : 2D array, size nNodes x K
        nodeMat = Data.getSparseSrcNodeMat() + Data.getSparseRcvNodeMat()
        NodeStateCount_fg = nodeMat * LP['resp']
        LP['NodeStateCount'] = NodeStateCount_bg + NodeStateCount_fg
        LP['N_fg'] = NodeStateCount_fg.sum(axis=0)

        # Ldata_bg : scalar
        #     cached value of ELBO term Ldata for background component
        LP['Ldata_bg'] = np.inner(resp_bg, logepsEvVec)

        LP['Lentropy_fg'] = -1 * calcRlogR(LP['resp'])

        Lentropy_fg = \
            -1 * np.sum(NodeStateCount_fg * ElogPi, axis=0) + \
            -1 * np.sum(LP['resp'] * LP['E_log_soft_ev'], axis=0) + \
            np.dot(np.log(respNormConst), LP['resp'])
        assert np.allclose(Lentropy_fg, LP['Lentropy_fg'])
        """
        LP['Lentropy_normConst'] = np.sum(np.log(respNormConst))
        LP['Lentropy_lik_fg'] = -1 * np.sum(
            LP['resp']*LP['E_log_soft_ev'], axis=0)
        LP['Lentropy_prior'] = -1 * np.sum(
            LP['NodeStateCount'] * ElogPi, axis=0)
        LP['Lentropy_lik_bg'] = -1 * LP['Ldata_bg']
        """
        # Lentropy_bg : scalar
        #     Cached value of entropy of all background resp values
        #     Equal to \sum_n \sum_{j\neq k} r_{njk} \log r_{njk}
        #     This is strictly lower-bounded (but NOT equal to)
        #      -1 * calcRlogR(LP['resp_bg'])
        LP['Lentropy_bg'] = \
            -1 * np.sum(NodeStateCount_bg * ElogPi) + \
            -1 * LP['Ldata_bg'] + \
            np.inner(np.log(respNormConst), LP['resp_bg'])
        return LP