Example #1
0
def slda_update_log_phi(text, log_phi, log_gamma, log_beta, y_d, eta, sigma_squared):
    """
        Same as update_phi_lda_E_step but in log probability space.
    """
    (N, K) = log_phi.shape

    log_phi_sum = logsumexp(log_phi, axis=0)
    Ns = (N * sigma_squared)
    ElogTheta = graphlib.dirichlet_expectation(np.exp(log_gamma))

    front = (-1.0 / (2 * N * Ns))
    pC = (1.0 * y_d / Ns * eta)  
    eta_dot_eta = front * (eta * eta)
    log_const = np.log(ElogTheta + pC + eta_dot_eta)

    log_right_eta_times_const = np.log(front * 2 * eta)

    ensure(isinstance(text, np.ndarray))

    # if text is in array form, do an approximate fast matrix update
    log_phi_minus_n = -1 + (logsumexp([log_phi, (-1 + log_phi_sum)]))

    log_phi[:,:] = logsumexp([log_beta[:,text].T, 
                              logdotexp(np.matrix(logdotexp(log_phi_minus_n, np.log(eta))).T, 
                                        np.matrix(log_right_eta_times_const)), 
                              log_const,], axis=0)

    graphlib.log_row_normalize(log_phi)

    return log_phi
Example #2
0
def lda_update_log_gamma(log_alpha, log_phi, log_gamma):
    """
     Same as lda_update_gamma, 
        but in log probability space.
    """
    ensure(log_phi.shape[1] == len(log_gamma))
    log_gamma[:] = logsumexp([log_alpha, logsumexp(log_phi, axis=0)], axis=0)
    return log_gamma
Example #3
0
def test_log_row_normalize():
    m = np.log(np.array([[2,2,4], [3,2,1]]))
    answer = np.log(np.array([[0.25, 0.25, 0.5], [0.5, 0.333333333333333, 0.166666666666667]]))

    assert abs(graphlib.logsumexp(m[0,:]) - np.log(8)) < .0000000001
    assert abs(graphlib.logsumexp(m[1,:]) - np.log(6)) < .0000000001
    assert abs(graphlib.logsumexp(answer[0,:])) < .0000000001

    out = graphlib.log_row_normalize(m)
    assert same(out, answer)
Example #4
0
def calculate_EZ_from_small_log_phis(log_phi1, log_phi2):
    """
        Accepts a two small phi matrices (like (NdxK) and (NcxJ))
        Calculates E[Zd].
        Returns the final vector (K+J).

        E[Z] = φ := (1/N)ΣNφn
    """
    Ndc = log_phi1.shape[0] + log_phi2.shape[0]
    ez = np.concatenate((logsumexp(log_phi1, axis=0), logsumexp(log_phi2, axis=0)), axis=1)
    return ez - np.log(Ndc)
Example #5
0
def calculate_EZ_from_big_log_phi(big_log_phi):
    """
        Accepts a big phi matrix (like ((Nd+Nc) x (K+J))
        Calculates E[Zd].
        Returns the final vector (K+J).

        E[Z] = φ := (1/N)ΣNφn
    """
    Ndc,KJ = big_log_phi.shape
    return logsumexp(big_log_phi, axis=0) - np.log(Ndc)
Example #6
0
def _unoptimized_slda_update_phi(text, phi, gamma, beta, y_d, eta, sigma_squared):
    """
        Update phi in LDA. 
        phi is N x K matrix.
        gamma is a K-size vector

     update phid:
     φd,n ∝ exp{ E[log θ|γ] + 
                 E[log p(wn|β1:K)] + 
                 (y / Nσ2) η  — 
                 [2(ηTφd,-n)η + (η∘η)] / (2N2σ2) }
     
     Note that E[log p(wn|β1:K)] = log βTwn
    """
    (N, K) = phi.shape
    #assert len(eta) == K
    #assert len(gamma) == K
    #assert beta.shape[0] == K

    phi_sum = np.sum(phi, axis=0)
    Ns = (N * sigma_squared)
    ElogTheta = graphlib.dirichlet_expectation(gamma)
    ensure(len(ElogTheta) == K)

    pC = (1.0 * y_d / Ns * eta)  
    eta_dot_eta = (eta * eta)
    front = (-1.0 / (2 * N * Ns))

    for n,word,count in iterwords(text):
        phi_sum -= phi[n]
        ensure(len(phi_sum) == K)

        pB = np.log(beta[:,word])
        pD = (front * (((2 * np.dot(eta, phi_sum) * eta) + eta_dot_eta))
                            )
        ensure(len(pB) == K)
        ensure(len(pC) == K)
        ensure(len(pD) == K)

        # must exponentiate and sum immediately!
        #phi[n,:] = np.exp(ElogTheta + pB + pC + pD)
        #phi[n,:] /= np.sum(phi[n,:])
        # log normalize before exp for numerical stability
        phi[n,:] = ElogTheta + pB + pC + pD
        phi[n,:] -= graphlib.logsumexp(phi[n,:])
        phi[n,:] = np.exp(phi[n,:])

        # add this back into the sum
        # unlike in LDA, this cannot be computed in parallel
        phi_sum += phi[n]

    return phi
Example #7
0
def partial_slda_update_phi(text, phi, gamma, beta, y_d, eta, sigma_squared):
    """Same as slda update phi, but eta may be smaller than total number of topics.
        So only some of the topics contribute to y.
    """
    (N, K) = phi.shape
    Ks = len(eta)

    phi_sum = np.sum(phi[:,:Ks], axis=0)
    Ns = (N * sigma_squared)
    ElogTheta = graphlib.dirichlet_expectation(gamma)

    front = (-1.0 / (2 * N * Ns))
    eta_dot_eta = front * (eta * eta)
    pC = ((1.0 * y_d / Ns) * eta) + eta_dot_eta

    right_eta_times_const = (front * 2 * eta)

    if isinstance(text, np.ndarray):
        # if text is in array form, do an approximate fast matrix update
        phi_minus_n = -(phi[:,:Ks] - phi_sum)
        phi[:,:] = ElogTheta + np.log(beta[:,text].T)
        phi[:,:Ks] += pC
        phi[:,:Ks] += np.dot(np.matrix(np.dot(phi_minus_n, eta)).T, np.matrix(right_eta_times_const))
        graphlib.log_row_normalize(phi)
        phi[:,:] = np.exp(phi[:,:])
    else:
        # otherwise, iterate through each word
        for n,word,count in iterwords(text):
            phi_sum -= phi[n,:Ks]

            pB = np.log(beta[:,word])
            pD = (np.dot(eta, phi_sum) * right_eta_times_const) 

            # must exponentiate and normalize immediately!
            phi[n,:] = ElogTheta + pB
            phi[n,:] += pC + pD
            phi[n,:] -= graphlib.logsumexp(phi[n,:]) # normalize in logspace
            phi[n,:] = np.exp(phi[n,:])


            # add this back into the sum
            # unlike in LDA, this cannot be computed in parallel
            phi_sum += phi[n,:Ks]
    return phi
Example #8
0
def calculate_EZZT_from_small_log_phis(phi1, phi2):
    """
        Accepts a big phi matrix (like ((Nd+Nc) x (K+J))
        Calculates E[ZdZdT].
        Returns the final matrix ((K+J) x (K+J)).

        (Also, E[ZdZdT] = (1/N2)(ΣNΣm!=nφd,nφd,mT  +  ΣNdiag{φd,n})
    """
    Nd,K = phi1.shape
    Nc,J = phi2.shape
    (Ndc, KJ) = (Nd+Nc, K+J)
    inner_sum = np.zeros((KJ, KJ))

    p1 = np.matrix(phi1)
    p2 = np.matrix(phi2)

    for i in xrange(K):
        for j in xrange(K):
            m = logdotexp(np.matrix(p1[:,i]), np.matrix(p1[:,j]).T)
            m += np.diagonal(np.ones(Nd) * -1000)
            inner_sum[i,j] = logsumexp(m.flatten())

    for i in xrange(J):
        for j in xrange(J):
            m = logdotexp(np.matrix(p2[:,i]), np.matrix(p2[:,j]).T)
            m += np.diagonal(np.ones(Nc) * -1000)
            inner_sum[K+i,K+j] = logsumexp(m.flatten())

    for i in xrange(K):
        for j in xrange(J):
            m = logdotexp(np.matrix(p1[:,i]), np.matrix(p2[:,j]).T)
            inner_sum[i,K+j] = logsumexp(m.flatten())

    for i in xrange(J):
        for j in xrange(K):
            m = logdotexp(np.matrix(p2[:,i]), np.matrix(p1[:,j]).T)
            inner_sum[K+i,j] = logsumexp(m.flatten())

    big_phi_sum = np.concatenate((logsumexp(phi1, axis=0),
                                  logsumexp(phi2, axis=0)), axis=1)
    ensure(big_phi_sum.shape == (KJ,))
    for i in xrange(KJ):
        inner_sum[i,i] = logsumexp([inner_sum[i,i], big_phi_sum[i]])

    inner_sum -= np.log(Ndc * Ndc)
    return inner_sum
Example #9
0
def test_initialize_random():
    original = np.ones((4,7))
    out = original.copy()
    graphlib.initialize_random(out)
    assert original.shape == out.shape

    assert not same(out, original)

    sumrows = np.sum(out, axis=1)
    assert same(sumrows, np.ones(out.shape[0]))

    # now test log of the same
    original = np.ones((4,7))
    out = original.copy()
    graphlib.initialize_log_random(out)
    assert original.shape == out.shape

    assert not same(out, original)

    sumrows = graphlib.logsumexp(out, axis=1)
    assert same(np.exp(sumrows), np.ones(out.shape[0]))