Example #1
0
    def __init__(self, vocab_size, word_dim, n_sentiment_tag, tag_dim,
                 lstm_dim, mlp_dim, n_class, we):
        super(BiLSTMWordTag, self).__init__()

        self.vocab_size = vocab_size
        self.word_dim = word_dim

        self.n_sentiment_tag = n_sentiment_tag
        self.tag_dim = tag_dim

        self.lstm_dim = lstm_dim

        assert self.lstm_dim == self.word_dim + self.tag_dim, "lstm_dim != word_dim + tag_dim"

        self.mlp_dim = mlp_dim
        self.n_class = n_class
        self.drop = nn.Dropout(p=0.5)

        self.word_embedding = TokenEmbedding(self.vocab_size,
                                             self.word_dim,
                                             pretrained_emb=we)
        self.tag_embedding = nn.Embedding(self.n_sentiment_tag + 1,
                                          self.tag_dim,
                                          padding_idx=0)

        self.bilstm = BiLstmEncoder(self.lstm_dim, self.lstm_dim)
        self.interaction_layer = SelfAttention(2 * self.lstm_dim,
                                               self.lstm_dim)

        self.mlp_layer = nn.Linear(2 * self.lstm_dim, self.mlp_dim)

        self.classify_layer = nn.Linear(self.mlp_dim, self.n_class)
    def __init__(self,
                 n_sentiment_tag=5,
                 tag_dim=50,
                 lstm_dim=50,
                 mlp_dim=100,
                 n_class=2):

        super(BilstmClassifier, self).__init__()

        self.n_sentiment_tag = n_sentiment_tag
        self.tag_dim = tag_dim
        self.lstm_tag_dim = lstm_dim

        self.mlp_dim = mlp_dim
        self.n_class = n_class
        self.drop = nn.Dropout(p=0.5)

        self.tag_embedding = TokenEmbedding(1 + self.n_sentiment_tag,
                                            self.tag_dim)

        self.tag_bilstm = BiLstmEncoder(self.tag_dim, self.lstm_tag_dim)

        self.interaction_layer = SelfAttention(2 * self.lstm_tag_dim,
                                               self.lstm_tag_dim)

        self.mlp_layer = nn.Linear(2 * self.lstm_tag_dim, self.mlp_dim)

        self.classify_layer = nn.Linear(self.mlp_dim, self.n_class)
Example #3
0
    def __init__(self,
                 vocab_size,
                 word_dim,
                 lstm_dim,
                 mlp_dim,
                 n_class,
                 we=None):
        super(BiLSTMAttention, self).__init__()

        self.vocab_size = vocab_size
        self.word_dim = word_dim
        self.lstm_dim = lstm_dim
        self.mlp_dim = mlp_dim
        self.n_class = n_class
        self.drop = nn.Dropout(p=0.5)

        self.word_embedding = TokenEmbedding(self.vocab_size,
                                             self.word_dim,
                                             pretrained_emb=we)
        self.bilstm = BiLstmEncoder(self.word_dim, self.lstm_dim)

        self.interaction_layer = SelfAttention(2 * self.lstm_dim,
                                               self.lstm_dim)

        self.mlp_layer = nn.Linear(2 * self.lstm_dim, self.mlp_dim)
        self.classify_layer = nn.Linear(self.mlp_dim, self.n_class)
    def __init__(self, vocab_size, word_dim, lstm_dim, n_sentiment_tag,
                 mlp_dim, n_class, we, interaction_type):
        super(BilstmClassifier, self).__init__()

        self.vocab_size = vocab_size
        self.word_dim = word_dim
        self.lstm_dim = lstm_dim
        self.n_sentiment_tag = n_sentiment_tag
        self.mlp_dim = mlp_dim
        self.n_class = n_class

        self.word_embedding = TokenEmbedding(self.vocab_size,
                                             self.word_dim,
                                             pretrained_emb=we)
        self.bilstm = BiLstmEncoder(self.word_dim, self.lstm_dim)
        if interaction_type == "max":
            self.interaction_layer = MaxPooling()
        elif interaction_type == "avg":
            self.interaction_layer = AveragePooling()
        else:
            self.interaction_layer = SelfAttention(2 * self.lstm_dim,
                                                   self.lstm_dim)

        self.mlp_layer = nn.Linear(2 * self.lstm_dim, self.mlp_dim)
        self.classify_layer = nn.Linear(self.mlp_dim, self.n_class)

        self.word2sentiment_tag = nn.Linear(2 * self.lstm_dim,
                                            self.n_sentiment_tag)
    def __init__(self, vocab_size, word_dim, lstm_dim, mlp_dim, n_class, we=None):
        super(BiLSTMLast, self).__init__()

        self.vocab_size = vocab_size
        self.word_dim = word_dim
        self.lstm_dim = lstm_dim
        self.mlp_dim = mlp_dim
        self.n_class = n_class

        self.word_embedding = TokenEmbedding(self.vocab_size, self.word_dim, pretrained_emb=we)
        self.bilstm = nn.LSTM(self.word_dim, self.lstm_dim, batch_first=True, bidirectional=True)
        self.mlp_layer = nn.Linear(2 * self.lstm_dim, self.mlp_dim)
        self.classify_layer = nn.Linear(self.mlp_dim, self.n_class)
    def __init__(self, vocab_size, word_dim, lstm_dim, mlp_dim, n_class, we=None):
        super(BiLSTMAverage, self).__init__()

        self.vocab_size = vocab_size
        self.word_dim = word_dim
        self.lstm_dim = lstm_dim
        self.mlp_dim = mlp_dim
        self.n_class = n_class

        self.word_embedding = TokenEmbedding(self.vocab_size, self.word_dim, pretrained_emb=we)
        self.bilstm = BiLstmEncoder(self.word_dim, self.lstm_dim)
        self.average_pooling_layer = AveragePooling()
        self.mlp_layer = nn.Linear(2 * self.lstm_dim, self.mlp_dim)
        self.classify_layer = nn.Linear(self.mlp_dim, self.n_class)