Ejemplo n.º 1
0
    def forward(self, input, lengths=None, hidden=None, ent=None):
        if self.training and (self.dropword > 0):
            emb = embedded_dropout(self.embeddings,
                                   input,
                                   dropout=self.dropword)
        else:
            emb = self.embeddings(input)
        if self.ent_embedding is not None:
            emb_ent = self.ent_embedding(ent)
            emb = torch.cat((emb, emb_ent), 2)
        if self.word_dropout is not None:
            emb = self.word_dropout(emb)
        # s_len, batch, emb_dim = emb.size()

        packed_emb = emb
        need_pack = (lengths is not None) and (not self.no_pack_padded_seq)
        if need_pack:
            # Lengths data is wrapped inside a Variable.
            if not isinstance(lengths, list):
                lengths = lengths.view(-1).tolist()
            packed_emb = pack(emb, lengths)

        outputs, hidden_t = self.rnn(packed_emb, hidden)

        if need_pack:
            outputs = unpack(outputs)[0]

        return hidden_t, outputs
Ejemplo n.º 2
0
    def forward(self, inp, context, state, parent_index):
        """
        Forward through the decoder.
        Args:
            input (LongTensor): a sequence of input tokens tensors
                                of size (len x batch x nfeats).
            context (FloatTensor): output(tensor sequence) from the encoder
                        RNN of size (src_len x batch x hidden_size).
            state (FloatTensor): hidden state from the encoder RNN for
                                 initializing the decoder.
        Returns:
            outputs (FloatTensor): a Tensor sequence of output from the decoder
                                   of shape (len x batch x hidden_size).
            state (FloatTensor): final hidden state from the decoder.
            attns (dict of (str, FloatTensor)): a dictionary of different
                                type of attention Tensor from the decoder
                                of shape (src_len x batch).
        """
        # Args Check
        assert isinstance(state, RNNDecoderState)
        # END Args Check

        if self.embeddings is not None:
            if self.training and (self.dropword > 0):
                emb = embedded_dropout(self.embeddings,
                                       inp,
                                       dropout=self.dropword)
            else:
                emb = self.embeddings(inp)
        else:
            emb = inp
        if self.word_dropout is not None:
            emb = self.word_dropout(emb)

        # Run the forward pass of the RNN.
        hidden, outputs, attns, rnn_output, concat_c = self._run_forward_pass(
            emb, context, state, parent_index)

        # Update the state with the result.
        state.update_state(hidden)

        # Concatenates sequence of tensors along a new dimension.
        outputs = torch.stack(outputs)
        attns = torch.stack(attns)

        return outputs, state, attns, rnn_output, concat_c