Example #1
0
    def __init__(self, embedding_matrix, opt):
        super(AEN, self).__init__()
        self.opt = opt
        self.embed = nn.Embedding.from_pretrained(
            torch.tensor(embedding_matrix, dtype=torch.float))
        self.squeeze_embedding = SqueezeEmbedding()

        self.attn_k = Attention(opt.embed_dim,
                                out_dim=opt.hidden_dim,
                                n_head=8,
                                score_function='mlp',
                                dropout=opt.dropout)
        self.attn_q = Attention(opt.embed_dim,
                                out_dim=opt.hidden_dim,
                                n_head=8,
                                score_function='mlp',
                                dropout=opt.dropout)
        self.ffn_c = PositionwiseFeedForward(opt.hidden_dim,
                                             dropout=opt.dropout)
        self.ffn_t = PositionwiseFeedForward(opt.hidden_dim,
                                             dropout=opt.dropout)

        self.attn_s1 = Attention(opt.hidden_dim,
                                 n_head=8,
                                 score_function='mlp',
                                 dropout=opt.dropout)

        self.dense = nn.Linear(opt.hidden_dim * 3, opt.polarities_dim)
Example #2
0
    def __init__(self, embedding_matrix, opt):
        super(LCF_GLOVE, self).__init__()
        self.config = BertConfig.from_json_file("config.json")
        self.opt = opt
        self.embed = nn.Embedding.from_pretrained(
            torch.tensor(embedding_matrix, dtype=torch.float))
        self.mha_global = SelfAttention(self.config, opt)
        self.mha_local = SelfAttention(self.config, opt)
        self.ffn_global = PositionwiseFeedForward(self.opt.hidden_dim,
                                                  dropout=self.opt.dropout)
        self.ffn_local = PositionwiseFeedForward(self.opt.hidden_dim,
                                                 dropout=self.opt.dropout)
        self.mha_local_SA = SelfAttention(self.config, opt)
        self.mha_global_SA = SelfAttention(self.config, opt)
        self.mha_SA_single = SelfAttention(self.config, opt)
        self.bert_pooler = BertPooler(self.config)

        self.bert_pooler1 = BertPooler(self.config)
        self.bert_pooler2 = BertPooler(self.config)
        self.dense1 = nn.Linear(opt.hidden_dim, opt.polarities_dim)
        self.dense2 = nn.Linear(opt.hidden_dim, opt.polarities_dim)
        self.sentiment_pool = nn.Linear(6, 3)

        self.dropout = nn.Dropout(opt.dropout)
        self.mean_pooling_double = nn.Linear(opt.embed_dim * 2, opt.hidden_dim)
        self.mean_pooling_single = nn.Linear(opt.embed_dim, opt.hidden_dim)
        self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)
Example #3
0
 def __init__(self, bert, opt):
     super(AEN_BERT, self).__init__()
     #print(" 1 In AEN_BERT  ")
     self.opt = opt
     self.bert = bert
     self.squeeze_embedding = SqueezeEmbedding()
     self.dropout = nn.Dropout(opt.dropout)
     #print(" 2 In AEN_BERT  ")
     self.attn_k = Attention(opt.bert_dim,
                             out_dim=opt.hidden_dim,
                             n_head=8,
                             score_function='mlp',
                             dropout=opt.dropout)
     self.attn_q = Attention(opt.bert_dim,
                             out_dim=opt.hidden_dim,
                             n_head=8,
                             score_function='mlp',
                             dropout=opt.dropout)
     self.ffn_c = PositionwiseFeedForward(opt.hidden_dim,
                                          dropout=opt.dropout)
     self.ffn_t = PositionwiseFeedForward(opt.hidden_dim,
                                          dropout=opt.dropout)
     #print(" 3 In AEN_BERT  ")
     self.attn_s1 = Attention(opt.hidden_dim,
                              n_head=8,
                              score_function='mlp',
                              dropout=opt.dropout)
     self.hat = False
     self.last = torch.nn.ModuleList()
     for t in range(self.opt.taskcla):
         self.last.append(nn.Linear(opt.hidden_dim * 3, opt.polarities_dim))
Example #4
0
 def __init__(self, bert, opt):
     """
     注意力编码器网络, Attentional Encoder Network for Targeted Sentiment Classification
     :param bert:
     :param opt:
     """
     super(AEN_BERT, self).__init__()
     self.opt = opt
     self.bert = bert
     self.squeeze_embedding = SqueezeEmbedding()
     self.dropout = nn.Dropout(opt.dropout)
     # attn_k和 attn_q的初始化
     self.attn_k = Attention(opt.bert_dim,
                             out_dim=opt.hidden_dim,
                             n_head=8,
                             score_function='mlp',
                             dropout=opt.dropout)
     self.attn_q = Attention(opt.bert_dim,
                             out_dim=opt.hidden_dim,
                             n_head=8,
                             score_function='mlp',
                             dropout=opt.dropout)
     # 初始化ffn_c, PCT层
     self.ffn_c = PositionwiseFeedForward(opt.hidden_dim,
                                          dropout=opt.dropout)
     self.ffn_t = PositionwiseFeedForward(opt.hidden_dim,
                                          dropout=opt.dropout)
     # 目标特定的注意力层初始化
     self.attn_s1 = Attention(opt.hidden_dim,
                              n_head=8,
                              score_function='mlp',
                              dropout=opt.dropout)
     # 最终输出层定义
     self.dense = nn.Linear(opt.hidden_dim * 3, opt.polarities_dim)
Example #5
0
    def __init__(self, bert, opt):
        super(AEN_BERT, self).__init__()
        self.opt = opt
        self.bert = bert
        self.squeeze_embedding = SqueezeEmbedding()
        self.dropout = nn.Dropout(opt.dropout)

        self.attn_k = Attention(opt.bert_dim,
                                out_dim=opt.hidden_dim,
                                n_head=8,
                                score_function='mlp',
                                dropout=opt.dropout)
        self.attn_q = Attention(opt.bert_dim,
                                out_dim=opt.hidden_dim,
                                n_head=8,
                                score_function='mlp',
                                dropout=opt.dropout)
        self.ffn_c = PositionwiseFeedForward(opt.hidden_dim,
                                             dropout=opt.dropout)
        self.ffn_t = PositionwiseFeedForward(opt.hidden_dim,
                                             dropout=opt.dropout)

        self.attn_s1 = Attention(opt.hidden_dim,
                                 n_head=8,
                                 score_function='mlp',
                                 dropout=opt.dropout)

        self.dense = nn.Linear(opt.hidden_dim * 3, opt.polarities_dim)
Example #6
0
 def __init__(self, embedding_matrix, opt):
     super(LCF_GLOVE, self).__init__()
     self.config = BertConfig.from_json_file("utils/bert_config.json")
     self.opt = opt
     self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
     self.mha_global = SelfAttention(self.config, opt)
     self.mha_local = SelfAttention(self.config, opt)
     self.ffn_global = PositionwiseFeedForward(self.opt.embed_dim, dropout=self.opt.dropout)
     self.ffn_local = PositionwiseFeedForward(self.opt.embed_dim, dropout=self.opt.dropout)
     self.mha_local_SA = SelfAttention(self.config, opt)
     self.mha_global_SA = SelfAttention(self.config, opt)
     self.pool = BertPooler(self.config)
     self.dropout = nn.Dropout(opt.dropout)
     self.linear = nn.Linear(opt.embed_dim * 2, opt.embed_dim)
     self.dense = nn.Linear(opt.embed_dim, opt.polarities_dim)
Example #7
0
    def __init__(self, bert, opt):
        super(AEN_BERT_HAT, self).__init__()
        self.opt = opt
        self.bert = bert
        self.squeeze_embedding = SqueezeEmbedding()
        self.dropout = nn.Dropout(opt.dropout)

        self.attn_k = Attention(opt.bert_dim,
                                out_dim=opt.hidden_dim,
                                n_head=8,
                                score_function='mlp',
                                dropout=opt.dropout)
        self.attn_q = Attention(opt.bert_dim,
                                out_dim=opt.hidden_dim,
                                n_head=8,
                                score_function='mlp',
                                dropout=opt.dropout)
        self.ffn_c = PositionwiseFeedForward(opt.hidden_dim,
                                             dropout=opt.dropout)
        self.ffn_t = PositionwiseFeedForward(opt.hidden_dim,
                                             dropout=opt.dropout)

        self.attn_s1 = Attention(opt.hidden_dim,
                                 n_head=8,
                                 score_function='mlp',
                                 dropout=opt.dropout)

        self.gate = torch.nn.Sigmoid()

        self.ec0 = torch.nn.Embedding(self.opt.taskcla, opt.hidden_dim)
        self.ec1 = torch.nn.Embedding(self.opt.taskcla, opt.hidden_dim)
        self.ec2 = torch.nn.Embedding(self.opt.taskcla, opt.hidden_dim)
        self.ec3 = torch.nn.Embedding(self.opt.taskcla, opt.hidden_dim)
        self.ec4 = torch.nn.Embedding(self.opt.taskcla, opt.hidden_dim)

        self.hat = True

        self.last = torch.nn.ModuleList()
        for t in range(self.opt.taskcla):
            self.last.append(nn.Linear(opt.hidden_dim * 3, opt.polarities_dim))
Example #8
0
    def __init__(self, embedding_matrix, opt):
        super(AEN_GloVe, self).__init__()
        self.opt = opt
        self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
        self.squeeze_embedding = SqueezeEmbedding()

        self.attn_k = Attention(opt.embed_dim, out_dim=opt.hidden_dim, n_head=8, score_function='mlp', dropout=opt.dropout)
        self.attn_q = Attention(opt.embed_dim, out_dim=opt.hidden_dim, n_head=8, score_function='mlp', dropout=opt.dropout)
        self.ffn_c = PositionwiseFeedForward(opt.hidden_dim, dropout=opt.dropout)
        self.ffn_t = PositionwiseFeedForward(opt.hidden_dim, dropout=opt.dropout)

        self.attn_s1 = Attention(opt.hidden_dim, n_head=8, score_function='mlp', dropout=opt.dropout)
        self.dense = nn.Linear(opt.hidden_dim*3, opt.polarities_dim)


        self.cls_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
        self.cls_asp_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
        self.dense_total = nn.Linear(3*opt.hidden_dim*2, opt.polarities_dim)
        self.h_h_proj = nn.Linear(4*opt.hidden_dim,3*opt.hidden_dim)
        self.cls_dense = nn.Linear(3*opt.hidden_dim,3)
        
        self.conv = nn.Conv1d(300,2*opt.hidden_dim,3)
Example #9
0
    def __init__(self, bert, opt):
        super(AEN_SIMPLE, self).__init__()
        self.opt = opt
        self.bert = bert
        self.squeeze_embedding = SqueezeEmbedding()
        self.dropout = nn.Dropout(opt.dropout)

        self.attn_q = Attention(opt.bert_dim,
                                out_dim=opt.hidden_dim,
                                n_head=opt.mha_heads,
                                score_function='mlp',
                                dropout=opt.dropout)
        self.ffn_t = PositionwiseFeedForward(opt.hidden_dim,
                                             dropout=opt.dropout)

        self.lstm = nn.LSTM(opt.hidden_dim,
                            opt.lstm_hid,
                            opt.lstm_layer,
                            batch_first=True,
                            bidirectional=opt.lstm_bidir)

        self.dense = nn.Linear(opt.lstm_hid * (2 if opt.lstm_bidir else 1),
                               opt.polarities_dim)
Example #10
0
    def __init__(self, embedding_matrix, opt):
        super(TargetedTransformer, self).__init__()
        self.opt = opt
        self.embed = nn.Embedding.from_pretrained(
            torch.tensor(embedding_matrix, dtype=torch.float))
        self.squeeze_embedding = SqueezeEmbedding()

        #         self.position_enc = nn.Embedding.from_pretrained(
        #             get_sinusoid_encoding_table(n_position, d_word_vec, padding_idx=0),
        #             freeze=True)
        self.attn_text = BearAttention(opt.embed_dim,
                                       hidden_dim=opt.att_dim,
                                       out_dim=opt.hidden_dim,
                                       n_head=opt.heads,
                                       score_function='dot_product',
                                       dropout=opt.dropout)
        #         self.attn_text = Attention(
        #             opt.embed_dim,
        #             out_dim=opt.hidden_dim,
        #             n_head=8,
        #             score_function='dot_product',
        #             dropout=opt.dropout)

        self.attn_text2 = Attention(opt.att_dim,
                                    hidden_dim=opt.att_dim,
                                    out_dim=opt.hidden_dim,
                                    n_head=opt.heads,
                                    score_function='dot_product',
                                    dropout=opt.dropout)

        self.attn_aspect = BearAttention(opt.embed_dim,
                                         hidden_dim=opt.att_dim,
                                         out_dim=opt.hidden_dim,
                                         n_head=opt.heads,
                                         score_function='dot_product',
                                         dropout=opt.dropout)

        self.attn_aspect2 = Attention(opt.att_dim,
                                      hidden_dim=opt.att_dim,
                                      out_dim=opt.hidden_dim,
                                      n_head=opt.heads,
                                      score_function='dot_product',
                                      dropout=opt.dropout)

        self.ffn_c = PositionwiseFeedForward(opt.att_dim, dropout=opt.dropout)

        self.ffn_c2 = PositionwiseFeedForward(opt.hidden_dim,
                                              dropout=opt.dropout)

        self.ffn_t = PositionwiseFeedForward(opt.att_dim, dropout=opt.dropout)

        self.ffn_t2 = PositionwiseFeedForward(opt.hidden_dim,
                                              dropout=opt.dropout)

        self.attn_s1 = Attention(opt.hidden_dim,
                                 n_head=opt.heads,
                                 score_function='dot_product',
                                 dropout=opt.dropout)
        #         self.layer_norm1 = nn.LayerNorm(opt.hidden_dim)
        #         self.layer_norm2 = nn.LayerNorm(opt.hidden_dim)

        #         self.lstm =  DynamicLSTM(
        #             opt.hidden_dim,
        #             opt.hidden_dim,
        #             num_layers=1,
        #             only_use_last_hidden_state=True,
        #             batch_first=True)
        #         self.lstm = nn.LSTM(
        #                 opt.hidden_dim, opt.hidden_dim, num_layers=1,
        #                 bias=True, batch_first=True, dropout=opt.dropout, bidirectional=False)
        self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)