コード例 #1
0
class RecurrentAttention(nn.Module):
    def __init__(self,
                 input_size,
                 context_size,
                 hidden_size,
                 num_layers=1,
                 bias=True,
                 batch_first=False,
                 dropout=0):

        super(RecurrentAttention, self).__init__()

        self.rnn = nn.LSTM(input_size, hidden_size, num_layers, bias,
                           batch_first)

        self.attn = BahdanauAttention(hidden_size,
                                      context_size,
                                      context_size,
                                      normalize=True,
                                      batch_first=batch_first)

        self.dropout = nn.Dropout(dropout)

    def forward(self, inputs, hidden, context, context_len):
        # set attention mask, sequences have different lengths, this mask
        # allows to include only valid elements of context in attention's
        # softmax
        self.attn.set_mask(context_len, context)

        rnn_outputs, hidden = self.rnn(inputs, hidden)
        attn_outputs, scores = self.attn(rnn_outputs, context)
        rnn_outputs = self.dropout(rnn_outputs)

        return rnn_outputs, hidden, attn_outputs, scores
コード例 #2
0
    def __init__(self,
                 input_size,
                 context_size,
                 hidden_size,
                 num_layers=1,
                 bias=True,
                 batch_first=False,
                 dropout=0):
        """
        Constructor for the RecurrentAttention.

        :param input_size: number of features in input tensor
        :param context_size: number of features in output from encoder
        :param hidden_size: internal hidden size
        :param num_layers: number of layers in LSTM
        :param bias: enables bias in LSTM layers
        :param batch_first: if True the model uses (batch,seq,feature) tensors,
            if false the model uses (seq, batch, feature)
        :param dropout: probability of dropout
        """

        super(RecurrentAttention, self).__init__()

        self.rnn = nn.LSTM(input_size, hidden_size, num_layers, bias,
                           batch_first)

        self.attn = BahdanauAttention(hidden_size,
                                      context_size,
                                      context_size,
                                      normalize=True,
                                      batch_first=batch_first)

        self.dropout = nn.Dropout(dropout)
コード例 #3
0
class RecurrentAttention(nn.Module):
    """
    LSTM with an attention module.
    """
    def __init__(self,
                 input_size,
                 context_size,
                 hidden_size,
                 num_layers=1,
                 bias=True,
                 batch_first=False,
                 dropout=0):
        """
        Constructor for the RecurrentAttention.

        :param input_size: number of features in input tensor
        :param context_size: number of features in output from encoder
        :param hidden_size: internal hidden size
        :param num_layers: number of layers in LSTM
        :param bias: enables bias in LSTM layers
        :param batch_first: if True the model uses (batch,seq,feature) tensors,
            if false the model uses (seq, batch, feature)
        :param dropout: probability of dropout
        """

        super(RecurrentAttention, self).__init__()

        self.rnn = nn.LSTM(input_size, hidden_size, num_layers, bias,
                           batch_first)

        self.attn = BahdanauAttention(hidden_size,
                                      context_size,
                                      context_size,
                                      normalize=True,
                                      batch_first=batch_first)

        self.dropout = nn.Dropout(dropout)

    def forward(self, inputs, hidden, context, context_len):
        """
        Execute RecurrentAttention.

        :param inputs: tensor with inputs
        :param hidden: hidden state for LSTM layer
        :param context: context tensor from encoder
        :param context_len: vector of encoder sequence lengths

        :returns (rnn_outputs, hidden, attn_output, attn_scores)
        """
        # set attention mask, sequences have different lengths, this mask
        # allows to include only valid elements of context in attention's
        # softmax
        self.attn.set_mask(context_len, context)

        rnn_outputs, hidden = self.rnn(inputs, hidden)
        attn_outputs, scores = self.attn(rnn_outputs, context)
        rnn_outputs = self.dropout(rnn_outputs)

        return rnn_outputs, hidden, attn_outputs, scores
コード例 #4
0
ファイル: decoder.py プロジェクト: shengyushen/training
    def __init__(self, input_size, context_size, hidden_size, num_layers=1,
                 bias=True, batch_first=False, dropout=0):

        super(RecurrentAttention, self).__init__()

        self.rnn = nn.LSTM(input_size, hidden_size, num_layers, bias,
                           batch_first)
        # SSY seq2seq/models/attention.py
        self.attn = BahdanauAttention(hidden_size, context_size, context_size,
                                      normalize=True, batch_first=batch_first)

        self.dropout = nn.Dropout(dropout)
コード例 #5
0
ファイル: decoder.py プロジェクト: shengyushen/training
class RecurrentAttention(nn.Module):
    def __init__(self,
                 input_size,
                 context_size,
                 hidden_size,
                 num_layers=1,
                 bias=True,
                 batch_first=False,
                 dropout=0):

        super(RecurrentAttention, self).__init__()

        self.rnn = nn.LSTM(input_size, hidden_size, num_layers, bias,
                           batch_first)
        # SSY 2.1.1  seq2seq/models/attention.py nn.Linear torch.bmm
        self.attn = BahdanauAttention(hidden_size,
                                      context_size,
                                      context_size,
                                      normalize=True,
                                      batch_first=batch_first)

        self.dropout = nn.Dropout(dropout)

    def forward(self, inputs, hidden, context, context_len):
        # set attention mask, sequences have different lengths, this mask
        # allows to include only valid elements of context in attention's
        # softmax
        self.attn.set_mask(context_len, context)

        rnn_outputs, hidden = self.rnn(bf16cutfp.apply(inputs), hidden)
        # SSY it seems to be None
        #bf16cutfp.apply(hidden))
        rnn_outputs = bf16cutbp.apply(rnn_outputs)
        hidden = bf16cutbp.apply(hidden)

        attn_outputs, scores = self.attn(rnn_outputs, context)
        rnn_outputs = self.dropout(rnn_outputs)

        return rnn_outputs, hidden, attn_outputs, scores
コード例 #6
0
    def __init__(self,
                 input_size=1024,
                 context_size=1024,
                 hidden_size=1024,
                 num_layers=1,
                 batch_first=False,
                 dropout=0.2,
                 init_weight=0.1,
                 fusion=True):
        """
        Constructor for the RecurrentAttention.

        :param input_size: number of features in input tensor
        :param context_size: number of features in output from encoder
        :param hidden_size: internal hidden size
        :param num_layers: number of layers in LSTM
        :param batch_first: if True the model uses (batch,seq,feature) tensors,
            if false the model uses (seq, batch, feature)
        :param dropout: probability of dropout (on input to LSTM layer)
        :param init_weight: range for the uniform initializer
        """

        super(RecurrentAttention, self).__init__()

        self.rnn = nn.LSTM(input_size,
                           hidden_size,
                           num_layers,
                           bias=True,
                           batch_first=batch_first)
        init_lstm_(self.rnn, init_weight)

        self.attn = BahdanauAttention(hidden_size,
                                      context_size,
                                      context_size,
                                      normalize=True,
                                      batch_first=batch_first,
                                      fusion=fusion)

        self.dropout = nn.Dropout(dropout)