示例#1
0
    def compute_forward_vec(self, seq_potential):
        """ compute alpha matrix
        
            Args:
                seq_potential: computed potential at every time step using :func:`self.compute_potential`.
                               It has the shape (seqlen,tagset_size,tagset_size)
                     
        """
        #         print("we are in compute_forward_vec")
        y_codebook = self.y_codebook
        T = seq_potential.size(0)

        offset = 1
        feat_score = seq_potential
        if (self.transmatrix_flag):
            feat_score = seq_potential + self.y_transparams
        if (
                self.stopstate_symb
        ):  # when _STOP_ state is defined a transition matrix is defined too
            offset = 2

        num_rows = T + offset
        # create alpha matrix
        alpha = torch.Tensor(num_rows, self.tagset_size).fill_(
            self.neginf).type(self.fdtype)
        alpha[0, y_codebook[self.startstate_symb]] = 0.
        alpha = autograd.Variable(alpha)
        #         print("alpha matrix: \n", alpha)
        for t in range(T):
            score = alpha[t, :].view(-1, 1) + feat_score[t, :, :]
            log_score = logsumexp_var(score, dim=0)
            alpha[t + 1, :] = log_score
        if (self.stopstate_symb):
            t = T
            score = alpha[t, :].view(-1, 1) + self.y_transparams
            log_score = logsumexp_var(score, dim=0)
            alpha[t + 1, :] = log_score
            Z = alpha[t + 1,
                      y_codebook[self.stopstate_symb]]  # partition function
        else:
            t = T
            Z = logsumexp_var(alpha[t, :].view(-1, 1),
                              dim=0)[0]  # compute partition function Z
#         print("alpha \n", alpha)
#         print("Z: ", Z)
        return (Z, alpha)
示例#2
0
    def compute_backward_vec(self, seq_potential):
        """ compute beta matrix
        
            Args:
                seq_potential: computed potential at every time step using :func:`self.compute_potential`.
                               It has the shape (seqlen,tagset_size,tagset_size)
                     
        """
        #         print("we are in compute_backward_vec")
        y_codebook = self.y_codebook
        T = seq_potential.size(0)

        offset = 1
        feat_score = seq_potential
        if (self.transmatrix_flag):
            feat_score = seq_potential + self.y_transparams
        if (self.stopstate_symb):
            offset = 2

        num_rows = T + offset
        # create beta matrix
        beta = torch.Tensor(num_rows, self.tagset_size).fill_(
            self.neginf).type(self.fdtype)
        if (self.stopstate_symb):
            beta[-1, y_codebook[self.stopstate_symb]] = 0
            beta = autograd.Variable(beta)
            t = T
            score = beta[t + 1, :].view(1, -1) + self.y_transparams
            log_score = logsumexp_var(score, dim=1)
            beta[t, :] = log_score
        else:
            beta[-1, :] = 0
            beta = autograd.Variable(beta)

#         print("beta matrix: \n", beta)
        for t in reversed(range(0, T)):
            score = beta[t + 1, :].view(1, -1) + feat_score[t, :, :]
            log_score = logsumexp_var(score, dim=1)
            beta[t, :] = log_score
        Z = beta[0, y_codebook[self.startstate_symb]]
        #         print("beta \n", beta)
        #         print("Z: ", Z)
        return (Z, beta)