def _calc_q_pi(self, alpha):
     '''
     Update the annotator models.
     '''
     psi_alpha_sum = psi(np.sum(alpha, 1))[:, None, :]
     self.lnPi = psi(self.alpha) - psi_alpha_sum
     return self.lnPi
Exemplo n.º 2
0
 def _calc_q_pi(alpha):
     '''
     Update the annotator models.
     '''
     psi_alpha_sum = psi(np.sum(alpha, 1))[:, None, :]
     q_pi = psi(alpha) - psi_alpha_sum
     return q_pi
Exemplo n.º 3
0
    def _init_lnPi(alpha0):
        # Returns the initial values for alpha and lnPi
        psi_alpha_sum = psi(np.sum(alpha0, 1))
        lnPi = psi(alpha0) - psi_alpha_sum[:, None, :]

        # init to prior
        alpha = np.copy(alpha0)
        return alpha, lnPi
    def init_lnPi(self, N):
        # Returns the initial values for alpha and lnPi
        psi_alpha_sum = psi(np.sum(self.alpha0, 1))
        self.lnPi = psi(self.alpha0) - psi_alpha_sum[:, None, :]

        # init to prior
        self.alpha = np.copy(self.alpha0)
        self.alpha_taggers = {}
        for midx in range(self.nModels):
            self.alpha_taggers[midx] = np.copy(self.alpha0_taggers[midx])
Exemplo n.º 5
0
    def _calc_q_pi(self, alpha):
        '''
        Update the annotator models.
        '''
        psi_alpha_sum = np.zeros_like(alpha)
        psi_alpha_sum[0, :] = psi(alpha[0, :] + alpha[1, :])
        psi_alpha_sum[1, :] = psi_alpha_sum[0, :]
        psi_alpha_sum[2:, :] = psi(np.sum(alpha[2:, :], 0))[None, :]

        return psi(alpha) - psi_alpha_sum
Exemplo n.º 6
0
    def fit_predict(self, Et):

        # count the number of occurrences for each label value

        beta = self.beta0 + self.features_mat.dot(Et)

        self.ElnRho = psi(beta) - psi(np.sum(beta, 0)[None, :])

        lnptext_given_t = self.ElnRho[self.features, :]

        # normalise, assuming equal prior here
        pt_given_text = np.exp(lnptext_given_t -
                               logsumexp(lnptext_given_t, 1)[:, None])

        return pt_given_text
Exemplo n.º 7
0
    def _init_lnPi(alpha0):
        # Returns the initial values for alpha and lnPi

        psi_alpha_sum = np.zeros_like(alpha0)
        psi_alpha_sum[0, :] = psi(alpha0[0,:] + alpha0[1, :])
        psi_alpha_sum[1, :] = psi_alpha_sum[0, :]

        psi_alpha_sum[2:, :] = psi(np.sum(alpha0[2:, :], 0))[None, :]

        lnPi = psi(alpha0) - psi_alpha_sum

        # init to prior
        alpha = np.copy(alpha0)

        return alpha, lnPi
Exemplo n.º 8
0
    def _calc_q_pi(alpha):
        '''
        Update the annotator models.
        '''
        psi_alpha_sum = np.zeros_like(alpha)
        psi_alpha_sum[0, :] = psi(alpha[0,:] + alpha[1, :])
        psi_alpha_sum[1, :] = psi_alpha_sum[0, :]
        psi_alpha_sum[2:, :] = psi(np.sum(alpha[2:, :], 0))[None, :]

        ElnPi = psi(alpha) - psi_alpha_sum

        # ElnPi[0, :] = np.log(0.5)
        # ElnPi[1, :] = np.log(0.5)
        # ElnPi[2:, :] = np.log(1.0 / float(alpha.shape[1] - 2))

        return ElnPi
Exemplo n.º 9
0
    def init_lnPi(self, N):
        # Returns the initial values for alpha and lnPi

        psi_alpha_sum = np.zeros_like(self.alpha0)
        psi_alpha_sum[0, :] = psi(self.alpha0[0, :] + self.alpha0[1, :])
        psi_alpha_sum[1, :] = psi_alpha_sum[0, :]

        psi_alpha_sum[2:, :] = psi(np.sum(self.alpha0[2:, :], 0))[None, :]

        self.lnPi = psi(self.alpha0) - psi_alpha_sum

        # init to prior
        self.alpha = np.copy(self.alpha0)
        self.alpha_taggers = {}
        for midx in range(self.nModels):
            self.alpha_taggers[midx] = np.copy(self.alpha0_taggers[midx])
Exemplo n.º 10
0
    def _calc_q_pi(self, alpha):
        '''
        Update the annotator models.
        '''
        psi_alpha_sum = psi(np.sum(alpha, 1))[:, None, :]
        self.lnpi = psi(alpha) - psi_alpha_sum

        lnpi_incorrect = psi(np.sum(alpha, 1)
                        - alpha[range(self.alpha.shape[0]), range(alpha.shape[0]), :]) \
                        - psi_alpha_sum[:, 0, :]
        lnpi_incorrect = np.log(
            np.exp(lnpi_incorrect) / float(alpha.shape[1] - 1))  # J x K

        for j in range(alpha.shape[0]):
            for l in range(alpha.shape[1]):
                if j == l:
                    continue
                self.lnpi[j, l, :] = lnpi_incorrect[j, :]

        return self.lnpi
Exemplo n.º 11
0
    def _calc_q_pi(alpha):
        '''
        Update the annotator models.

        TODO Representing using a full matrix might break lower bound implementation
        '''
        psi_alpha_sum = psi(np.sum(alpha, 1))[:, None, :]
        q_pi = psi(alpha) - psi_alpha_sum

        q_pi_incorrect = psi(np.sum(alpha, 1) - alpha[np.arange(alpha.shape[0]), np.arange(alpha.shape[0]), :]) \
                         - psi_alpha_sum[:, 0, :]
        q_pi_incorrect = np.log(
            np.exp(q_pi_incorrect) / float(alpha.shape[1] - 1))  # J x K

        for j in range(alpha.shape[0]):
            for l in range(alpha.shape[1]):
                if j == l:
                    continue
                q_pi[j, l, :] = q_pi_incorrect[j, :]

        return q_pi
Exemplo n.º 12
0
def calc_grad_weight(nks, alphaS, alphaB, N, AB, AS):
    """ Calculate the weights for each bin k. Returns k-length vector."""
    K = nks.size
    w = sp.psi(nks + alphaS) - sp.psi(nks+alphaB) + sp.psi(N+AB) - sp.psi(N+AS)

    return w
Exemplo n.º 13
0
def calc_Qdiff(nVec, alphaS, alphaB):
    """ Calculate Q_k - Q_base, for each bin k. Returns k-length vector."""
    N = nVec.sum()
    Qdiff = sp.psi(nVec + alphaS) - sp.psi(nVec+alphaB)  - sp.psi(N+alphaS.sum()) + sp.psi(N+alphaB.sum())
    return Qdiff