def __init__(self,
                 embedding,
                 is_bidirectional=True,
                 lstm_size=512,
                 lstm_num_layer=3):
        """
        Output last hidden state of LSTM
        :param vocab_size:
        """
        super(Encoder, self).__init__()

        self.lstm_size = lstm_size
        self.lstm_num_layer = lstm_num_layer
        self.is_bidirectional = is_bidirectional
        pytorch_utils.register_buffer(self, 'dropout_rate', 0.3)

        self.embedding = embedding
        self.embedding_size = self.embedding.weight.size(1)
        self.lstm = nn.LSTM(input_size=self.embedding_size,
                            hidden_size=self.lstm_size,
                            num_layers=self.lstm_num_layer,
                            bidirectional=self.is_bidirectional,
                            dropout=self.dropout_rate.item())

        self.dropout = nn.Dropout(self.dropout_rate.item())
Esempio n. 2
0
    def __init__(self, vocab_size, enc_output_size, enc_embedding_size):
        """
        Common use for both Training and Inference
        :param vocab_size:
        """
        super(AttnRawDecoderWithSrc, self).__init__()
        pytorch_utils.register_buffer(self, 'embedding_size', 256)
        pytorch_utils.register_buffer(self, 'lstm_size', 512)
        pytorch_utils.register_buffer(self, 'lstm_num_layer', 3)
        pytorch_utils.register_buffer(self, 'dropout_rate', 0.3)
        pytorch_utils.register_buffer(self, 'half_window_size', 3)
        __embedding_size = self.embedding_size.item()
        __lstm_size = self.lstm_size.item()
        __lstm_num_layer = self.lstm_num_layer.item()
        __dropout_rate = self.dropout_rate.item()

        self.dec_embedding = nn.Embedding(num_embeddings=vocab_size,
                                          embedding_dim=__embedding_size)
        self.lstm = nn.LSTM(input_size=__embedding_size + enc_embedding_size,
                            hidden_size=__lstm_size,
                            num_layers=__lstm_num_layer,
                            bidirectional=False,
                            dropout=__dropout_rate)
        self.attention = Attention(enc_output_size=enc_output_size,
                                   dec_output_size=__lstm_size)
        __shrink_size = 512
        self.shrink_mapping = nn.Linear(__lstm_size + enc_output_size,
                                        __shrink_size)
        self.output_mapping = nn.Linear(__shrink_size, vocab_size)
        self.dropout = nn.Dropout(p=__dropout_rate)
Esempio n. 3
0
 def __init__(self, core_decoder, max_length, start_idx):
     """
     Output a fixed vector with size of `output_size` for each doc
     :param vocab_size:
     :param max_length: scala int
     :param start_idx: scala int
     """
     super(DecoderGreedyInfer, self).__init__()
     self.core_decoder = core_decoder
     self.register_buffer('start_idx', torch.tensor([[start_idx]]))
     pytorch_utils.register_buffer(self, 'max_length', max_length)
Esempio n. 4
0
    def __init__(self, vocab_size):
        """
        Common use for both Training and Inference
        :param vocab_size:
        """
        super(RawDecoder, self).__init__()
        self.embedding_size = 256
        self.lstm_size = 512
        self.lstm_num_layer = 3
        register_buffer(self, 'dropout_rate', 0.3)

        self.embedding = nn.Embedding(num_embeddings=vocab_size,
                                      embedding_dim=self.embedding_size)
        self.lstm = nn.LSTM(input_size=self.embedding_size,
                            hidden_size=self.lstm_size,
                            num_layers=self.lstm_num_layer,
                            bidirectional=False,
                            dropout=self.dropout_rate.item())
        self.output_mapping = nn.Linear(self.lstm_size, vocab_size)
        self.dropout = nn.Dropout(p=self.dropout_rate.item())
    def __init__(self, src_vocab_size, tgt_vocab_size, start_idx, padding_idx,
                 max_length):
        super(Seq2SeqChunk, self).__init__()

        pytorch_utils.register_buffer(self, 'lr_rate', 1e-3)
        pytorch_utils.register_buffer(self, 'max_length', max_length)
        pytorch_utils.register_buffer(self, 'chunk_size', 10)

        self.__start_idx_int = start_idx
        self.__padding_idx_int = padding_idx

        self.encoder = Encoder(vocab_size=src_vocab_size,
                               is_bidirectional=False)
        _enc_output_size = 2 * self.encoder.lstm_size.item(
        ) if self.encoder.is_bidirectional.item(
        ) else self.encoder.lstm_size.item()
        self.flatten_hidden_lstm = FlattenHiddenLSTM(
            lstm_num_layer=3,
            is_bidirectional=bool(self.encoder.is_bidirectional.item()))
        self.core_decoder = AttnRawDecoderWithSrc(
            vocab_size=tgt_vocab_size,
            enc_output_size=_enc_output_size,
            enc_embedding_size=self.encoder.embedding_size.item())
        self.greedy_infer = DecoderGreedyWithSrcInfer(
            core_decoder=self.core_decoder)

        self.xent = None
        self.optimizer = None

        self.register_buffer('start_idx', torch.Tensor([start_idx]).long())
        self.register_buffer('padding_idx',
                             torch.Tensor([[padding_idx]]).long())
Esempio n. 6
0
    def __init__(self, vocab_size, is_bidirectional=True):
        """

        :param vocab_size:
        :param is_bidirectional:
        """
        super(Encoder, self).__init__()

        self.embedding_size = 512
        self.lstm_size = 512
        self.lstm_num_layer = 3
        self.is_bidirectional = is_bidirectional
        pytorch_utils.register_buffer(self, 'dropout_rate', 0.3)
        __dropout_rate = self.dropout_rate.item()

        self.embedding = nn.Embedding(num_embeddings=vocab_size,
                                      embedding_dim=self.embedding_size)
        self.lstm = nn.LSTM(input_size=self.embedding_size,
                            hidden_size=self.lstm_size,
                            num_layers=self.lstm_num_layer,
                            bidirectional=self.is_bidirectional,
                            dropout=__dropout_rate)

        self.dropout = nn.Dropout(__dropout_rate)