Ejemplo n.º 1
0
 def __init__(self, embedding_matrix, opt):
     super(RAM, self).__init__()
     self.opt = opt
     self.embed = nn.Embedding.from_pretrained(
         torch.tensor(embedding_matrix, dtype=torch.float))
     self.bi_lstm_context = DynamicLSTM(opt.embed_dim,
                                        opt.hidden_dim,
                                        num_layers=1,
                                        batch_first=True,
                                        bidirectional=True)
     self.bi_lstm_aspect = DynamicLSTM(opt.embed_dim,
                                       opt.hidden_dim,
                                       num_layers=1,
                                       batch_first=True,
                                       bidirectional=True)
     self.attention = Attention(opt.hidden_dim * 2, score_function='mlp')
     self.gru_cell = nn.GRUCell(opt.hidden_dim * 2, opt.hidden_dim * 2)
     self.dense = nn.Linear(opt.hidden_dim * 2, opt.polarities_dim)
Ejemplo n.º 2
0
 def __init__(self, embedding_matrix, opt):
     super(LSTM, self).__init__()
     self.embed = nn.Embedding.from_pretrained(
         torch.FloatTensor(embedding_matrix))
     self.lstm = DynamicLSTM(opt.embed_dim,
                             opt.hidden_dim,
                             num_layers=1,
                             batch_first=True)
     self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)
Ejemplo n.º 3
0
 def __init__(self, embedding_matrix, opt):
     super(PWCN_DEP, self).__init__()
     self.opt = opt
     self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
     self.embed_dropout = nn.Dropout(opt.dropout)
     self.txt_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
     self.proximity = DependencyProximity(opt)
     self.convs3 = nn.Conv1d(2*opt.hidden_dim, 2*opt.hidden_dim, 3, padding=1)   
     self.fc = nn.Linear(2*opt.hidden_dim, opt.polarities_dim)
Ejemplo n.º 4
0
 def __init__(self, embedding_matrix, opt):
     super(IAN, self).__init__()
     self.opt = opt
     self.embed = nn.Embedding.from_pretrained(
         torch.tensor(embedding_matrix, dtype=torch.float))
     self.lstm_context = DynamicLSTM(opt.embed_dim,
                                     opt.hidden_dim,
                                     num_layers=1,
                                     batch_first=True)
     self.lstm_aspect = DynamicLSTM(opt.embed_dim,
                                    opt.hidden_dim,
                                    num_layers=1,
                                    batch_first=True)
     self.attention_aspect = Attention(opt.hidden_dim,
                                       score_function='bi_linear')
     self.attention_context = Attention(opt.hidden_dim,
                                        score_function='bi_linear')
     self.dense = nn.Linear(opt.hidden_dim * 2, opt.polarities_dim)
Ejemplo n.º 5
0
 def __init__(self, embedding, opt):
     super(AT_LSTM, self).__init__()
     self.embed = nn.Embedding.from_pretrained(torch.FloatTensor(embedding))
     self.lstm = DynamicLSTM(opt.embed_dim,
                             opt.hidden_dim,
                             num_layers=1,
                             batch_first=True)
     self.attention = Attention(opt.hidden_dim,
                                out_dim=opt.polarities_dim,
                                score_function='mlp')
Ejemplo n.º 6
0
 def __init__(self, embedding_matrix, opt):
     super(AOA, self).__init__()
     self.opt = opt
     self.embed = nn.Embedding.from_pretrained(
         torch.tensor(embedding_matrix, dtype=torch.float))
     self.ctx_lstm = DynamicLSTM(opt.embed_dim,
                                 opt.hidden_dim,
                                 num_layers=1,
                                 batch_first=True,
                                 bidirectional=True)
     self.asp_lstm = DynamicLSTM(opt.embed_dim,
                                 opt.hidden_dim,
                                 num_layers=1,
                                 batch_first=True,
                                 bidirectional=True)
     self.dense = nn.Linear(2 * opt.hidden_dim, opt.polarities_dim)
     self.align_mat = None
     self.col_align = None
     self.row_align = None
Ejemplo n.º 7
0
    def __init__(self, embedding_matrix, opt):
        super(AELSTM, self).__init__()
        self.opt = opt
        self.n_head = 1
        self.embed_dim = opt.embed_dim
        self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float), freeze=False)

        self.lstm = DynamicLSTM(opt.embed_dim*2, opt.hidden_dim, num_layers=1, batch_first=True)
        self.attention = Attention(opt.hidden_dim, score_function='mlp')
        self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)
Ejemplo n.º 8
0
    def __init__(self, embedding_matrix, opt):
        super(MGAN, self).__init__()
        self.opt = opt
        self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
        self.ctx_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
        self.asp_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
        self.location = LocationEncoding(opt)
        self.w_a2c = nn.Parameter(torch.Tensor(2*opt.hidden_dim, 2*opt.hidden_dim))
        self.w_c2a = nn.Parameter(torch.Tensor(2*opt.hidden_dim, 2*opt.hidden_dim))
        self.alignment = AlignmentMatrix(opt)
        self.dense = nn.Linear(8*opt.hidden_dim, opt.polarities_dim)
        self.dense_total = nn.Linear(8*opt.hidden_dim*2, opt.polarities_dim)

        self.cls_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
        self.cls_asp_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
        self.h_h_proj = nn.Linear(4*opt.hidden_dim,8*opt.hidden_dim)
        self.cls_dense = nn.Linear(8*opt.hidden_dim,3)
        
        self.conv = nn.Conv1d(300,2*opt.hidden_dim,3)
Ejemplo n.º 9
0
 def __init__(self, embedding_matrix, opt):
     super(LSTM, self).__init__()
     self.embed = nn.Embedding.from_pretrained(
         torch.tensor(embedding_matrix, dtype=torch.float))
     self.lstm = DynamicLSTM(opt.embed_dim,
                             opt.hidden_dim,
                             bidirectional=True,
                             num_layers=1,
                             batch_first=True)
     self.fc = nn.Linear(opt.hidden_dim, opt.polarities_dim)
Ejemplo n.º 10
0
    def __init__(self, bert, opt):
        super(SDGCN, self).__init__()
        self.opt = opt
        self.bert = bert
        self.squeeze_embedding = SqueezeEmbedding()
        self.dropout = nn.Dropout(opt.dropout)

        self.lstm_context = DynamicLSTM(opt.bert_dim,
                                        opt.hidden_dim,
                                        num_layers=1,
                                        batch_first=True)
        self.lstm_aspect = DynamicLSTM(opt.bert_dim,
                                       opt.hidden_dim,
                                       num_layers=1,
                                       batch_first=True)

        self.attention_aspect = Bilinear_Attention(opt.hidden_dim)
        self.attention_context = Bilinear_Attention(opt.hidden_dim)

        self.dense = nn.Linear(opt.hidden_dim * 2, opt.polarities_dim)
Ejemplo n.º 11
0
 def __init__(self, embedding_matrix, opt):
     super(SELA, self).__init__()
     self.opt = opt
     self.fc_dim = 512
     self.embed = nn.Embedding.from_pretrained(
         torch.tensor(embedding_matrix, dtype=torch.float))
     self.ctx_lstm = DynamicLSTM(opt.embed_dim,
                                 opt.hidden_dim,
                                 num_layers=1,
                                 batch_first=True,
                                 bidirectional=True)
     self.ctxR_lstm = DynamicLSTM(opt.embed_dim,
                                  opt.hidden_dim,
                                  num_layers=1,
                                  batch_first=True,
                                  bidirectional=True)
     self.inputdim = 3 * 2 * opt.hidden_dim
     self.dense = nn.Sequential(nn.Linear(self.inputdim, self.fc_dim),
                                nn.Linear(self.fc_dim, self.fc_dim),
                                nn.Linear(self.fc_dim, opt.class_dim))
Ejemplo n.º 12
0
 def __init__(self, embedding_matrix, opt):
     super(SynGCN, self).__init__()
     self.opt = opt
     self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
     # print('self.embed ->', type(self.embed), )
     # self.embeddings = nn.Embedding(vocab_size, embedding_dim, padding_idx=padding_idx)
     self.text_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
     self.gc1 = GraphConvolution(2*opt.hidden_dim, 2*opt.hidden_dim)
     self.gc2 = GraphConvolution(2*opt.hidden_dim, 2*opt.hidden_dim)
     self.fc = nn.Linear(2*opt.hidden_dim, opt.polarities_dim)
     self.text_embed_dropout = nn.Dropout(0.3)
 def __init__(self, embedding_matrix_list, opt):
     super(BILSTM, self).__init__()
     self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix_list[0], dtype=torch.float), freeze=True)
     # self.input_drop = nn.Dropout(bilstm_config['input_drop'])
     self.bilstm = DynamicLSTM(bilstm_config['embed_dim'],
                             bilstm_config['hidden_dim'],
                             num_layers=bilstm_config['num_layers'],
                             batch_first=True,
                             bidirectional=True
                             )           
     self.dense = nn.Linear(bilstm_config['hidden_dim'] * 2, opt.polarities_dim)
Ejemplo n.º 14
0
 def __init__(self, config, opt):
     super(MGAN, self).__init__()
     self.opt = opt
     self.bert = BertModel(config)
     self.ctx_lstm = DynamicLSTM(opt.embed_dim,
                                 opt.hidden_dim,
                                 num_layers=1,
                                 batch_first=True,
                                 bidirectional=True)
     self.asp_lstm = DynamicLSTM(opt.embed_dim,
                                 opt.hidden_dim,
                                 num_layers=1,
                                 batch_first=True,
                                 bidirectional=True)
     self.location = LocationEncoding(opt)
     self.w_a2c = nn.Parameter(
         torch.Tensor(2 * opt.hidden_dim, 2 * opt.hidden_dim))
     self.w_c2a = nn.Parameter(
         torch.Tensor(2 * opt.hidden_dim, 2 * opt.hidden_dim))
     self.alignment = AlignmentMatrix(opt)
     self.dense = nn.Linear(8 * opt.hidden_dim, opt.output_dim)
Ejemplo n.º 15
0
 def __init__(self, embedding_matrix, opt):
     super(TD_LSTM, self).__init__()
     self.embed = nn.Embedding.from_pretrained(torch.tensor(
         embedding_matrix, dtype=torch.float),
                                               freeze=False)
     self.lstm_entity1 = DynamicLSTM(opt.embed_dim,
                                     opt.hidden_dim * 2,
                                     num_layers=1,
                                     batch_first=True,
                                     bidirectional=True)
     self.lstm_entity2 = DynamicLSTM(opt.embed_dim,
                                     opt.hidden_dim * 2,
                                     num_layers=1,
                                     batch_first=True,
                                     bidirectional=True)
     self.lstm_context = DynamicLSTM(opt.embed_dim,
                                     opt.hidden_dim * 2,
                                     num_layers=1,
                                     batch_first=True,
                                     bidirectional=True)
     self.dense = nn.Linear(opt.hidden_dim * 6, opt.hidden_dim)
 def __init__(self, embedding_matrix, opt):
     super(BiLSTM, self).__init__()
     self.opt = opt
     self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
     self.dropout = nn.Dropout(0.5)
     self.clause_cnn = ClauseCNN(opt)
     self.bilstm = DynamicLSTM(opt.hidden_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
     self.emotion_fc = nn.Linear(2 * opt.hidden_dim, 2 * opt.MLP_out_dim)
     self.cause_fc = nn.Linear(2 * opt.hidden_dim, 2 * opt.MLP_out_dim)
     self.pair_biaffine = Biaffine(opt, opt.MLP_out_dim, opt.MLP_out_dim, 1, bias=(True, False))
     self.emotion_fc1 = nn.Linear(opt.MLP_out_dim, opt.polarities_dim)
     self.cause_fc1 = nn.Linear(opt.MLP_out_dim, opt.polarities_dim)
Ejemplo n.º 17
0
 def __init__(self, opt):
     super(Bert_MGAN, self).__init__()
     self.opt = opt
     self.bert_embedding = bert_base.Bert_Base(opt)
     self.ctx_lstm = DynamicLSTM(opt.embed_dim,
                                 opt.hidden_dim,
                                 num_layers=1,
                                 batch_first=True,
                                 bidirectional=True)
     self.asp_lstm = DynamicLSTM(opt.embed_dim,
                                 opt.hidden_dim,
                                 num_layers=1,
                                 batch_first=True,
                                 bidirectional=True)
     self.location = LocationEncoding(opt)
     self.w_a2c = nn.Parameter(
         torch.Tensor(2 * opt.hidden_dim, 2 * opt.hidden_dim))
     self.w_c2a = nn.Parameter(
         torch.Tensor(2 * opt.hidden_dim, 2 * opt.hidden_dim))
     self.alignment = AlignmentMatrix(opt)
     self.dense = nn.Linear(8 * opt.hidden_dim, opt.polarities_dim)
Ejemplo n.º 18
0
 def __init__(self, embedding_matrix, opt):
     super(ATAE_LSTM, self).__init__()
     self.opt = opt
     self.embed = nn.Embedding.from_pretrained(
         torch.tensor(embedding_matrix, dtype=torch.float))
     self.squeeze_embedding = SqueezeEmbedding()
     self.lstm = DynamicLSTM(opt.embed_dim * 2,
                             opt.hidden_dim,
                             num_layers=1,
                             batch_first=True)
     self.attention = NoQueryAttention(opt.hidden_dim + opt.embed_dim,
                                       score_function='bi_linear')
     self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)
Ejemplo n.º 19
0
    def __init__(self, embedding_matrix, opt):
        super(MIMN, self).__init__()
        self.opt = opt
        self.img_extractor = nn.Sequential(
            *list(Models.resnet18(pretrained=True).children())[:-1])

        self.embed = nn.Embedding.from_pretrained(torch.tensor(
            embedding_matrix, dtype=torch.float),
                                                  freeze=False)
        self.bi_lstm_context = DynamicLSTM(opt.embed_dim,
                                           opt.hidden_dim,
                                           num_layers=1,
                                           batch_first=True,
                                           bidirectional=True)
        self.bi_lstm_aspect = DynamicLSTM(opt.embed_dim,
                                          opt.hidden_dim,
                                          num_layers=1,
                                          batch_first=True,
                                          bidirectional=True)
        self.bi_lstm_img = DynamicLSTM(opt.embed_dim_img,
                                       opt.hidden_dim,
                                       num_layers=1,
                                       batch_first=True,
                                       bidirectional=True)

        self.attention_text = Attention(opt.hidden_dim * 2,
                                        score_function='mlp')
        self.attention_img = Attention(opt.hidden_dim * 2,
                                       score_function='mlp')
        self.attention_text2img = Attention(opt.hidden_dim * 2,
                                            score_function='mlp')
        self.attention_img2text = Attention(opt.hidden_dim * 2,
                                            score_function='mlp')

        self.gru_cell_text = nn.GRUCell(opt.hidden_dim * 2, opt.hidden_dim * 2)
        self.gru_cell_img = nn.GRUCell(opt.hidden_dim * 2, opt.hidden_dim * 2)

        self.bn = nn.BatchNorm1d(opt.hidden_dim * 2, affine=False)
        self.fc = nn.Linear(opt.hidden_dim * 4, opt.polarities_dim)
Ejemplo n.º 20
0
    def __init__(self, embedding_matrix, opt):
        super(AEN_GloVe, self).__init__()
        self.opt = opt
        self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
        self.squeeze_embedding = SqueezeEmbedding()

        self.attn_k = Attention(opt.embed_dim, out_dim=opt.hidden_dim, n_head=8, score_function='mlp', dropout=opt.dropout)
        self.attn_q = Attention(opt.embed_dim, out_dim=opt.hidden_dim, n_head=8, score_function='mlp', dropout=opt.dropout)
        self.ffn_c = PositionwiseFeedForward(opt.hidden_dim, dropout=opt.dropout)
        self.ffn_t = PositionwiseFeedForward(opt.hidden_dim, dropout=opt.dropout)

        self.attn_s1 = Attention(opt.hidden_dim, n_head=8, score_function='mlp', dropout=opt.dropout)
        self.dense = nn.Linear(opt.hidden_dim*3, opt.polarities_dim)


        self.cls_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
        self.cls_asp_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
        self.dense_total = nn.Linear(3*opt.hidden_dim*2, opt.polarities_dim)
        self.h_h_proj = nn.Linear(4*opt.hidden_dim,3*opt.hidden_dim)
        self.cls_dense = nn.Linear(3*opt.hidden_dim,3)
        
        self.conv = nn.Conv1d(300,2*opt.hidden_dim,3)
Ejemplo n.º 21
0
    def __init__(self, embeddings, opt):
        super().__init__()
        self.opt = opt
        self.in_dim = opt.embed_dim
        self.emb = nn.Embedding.from_pretrained(
            torch.tensor(embeddings, dtype=torch.float))
        self.mem_dim = 2 * opt.hidden_dim  # 200

        self.input_dropout = nn.Dropout(opt.input_dropout)

        # rnn layer
        if self.opt.no_word_rnn == False:
            self.input_W_R = nn.Linear(self.in_dim, opt.rnn_hidden)
            self.w_rnn = DynamicLSTM(opt.embed_dim,
                                     opt.hidden_dim,
                                     num_layers=opt.rnn_layer,
                                     batch_first=True,
                                     bidirectional=True)  # (32,75,45,200)
            self.in_dim = opt.hidden_dim * 2
            self.w_rnn_drop = nn.Dropout(
                opt.rnn_word_dropout)  # use on last layer output

        if self.opt.no_clause_rnn == False:
            self.input_W_R = nn.Linear(self.in_dim, opt.rnn_hidden)
            self.c_rnn = DynamicLSTM(self.in_dim,
                                     opt.hidden_dim,
                                     num_layers=opt.rnn_layer,
                                     batch_first=True,
                                     bidirectional=True)  # (32,75,45,200)
            self.in_dim = opt.hidden_dim * 2
            self.c_rnn_drop = nn.Dropout(
                opt.rnn_clause_dropout)  # use on last layer output

        self.in_drop = nn.Dropout(opt.input_dropout)

        # self.emo_trans = nn.Linear(self.in_dim, 100)

        self.clause_encode = Attention(2 * opt.hidden_dim, 1, opt.max_sen_len,
                                       opt)  # (32,75,200)
Ejemplo n.º 22
0
 def __init__(self, embedding_matrix, opt):
     super(CAER, self).__init__()
     self.opt = opt
     self.embed = nn.Embedding.from_pretrained(
         torch.tensor(embedding_matrix, dtype=torch.float))
     self.text_lstm = DynamicLSTM(opt.embed_dim,
                                  opt.embed_dim,
                                  num_layers=1,
                                  batch_first=True,
                                  bidirectional=True)
     self.text_dfc = nn.Linear(2 * opt.embed_dim, 1)
     self.text_fc = nn.Linear(opt.embed_dim, 1)
     self.text_embed_dropout = nn.Dropout(0.3)
     self.fc_dropout = nn.Dropout(0.3)
Ejemplo n.º 23
0
 def __init__(self, config, opt):
     super(TNet_LF, self).__init__()
     print("this is TNet_LF model")
     self.bert = BertModel(config)
     self.position = Absolute_Position_Embedding(opt)
     self.opt = opt
     D = opt.embed_dim  # 模型词向量维度
     C = opt.output_dim  # 分类数目
     L = opt.max_seq_length
     HD = opt.hidden_dim
     self.lstm1 = DynamicLSTM(opt.embed_dim,
                              opt.hidden_dim,
                              num_layers=1,
                              batch_first=True,
                              bidirectional=True)
     self.lstm2 = DynamicLSTM(opt.embed_dim,
                              opt.hidden_dim,
                              num_layers=1,
                              batch_first=True,
                              bidirectional=True)
     self.convs3 = nn.Conv1d(2 * HD, 50, 3, padding=1)
     self.fc1 = nn.Linear(4 * HD, 2 * HD)
     self.fc = nn.Linear(50, C)
Ejemplo n.º 24
0
 def __init__(self, config, opt):
     super(RAM, self).__init__()
     self.opt = opt
     # self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
     self.bert = BertModel(config)
     self.bi_lstm_context = DynamicLSTM(opt.embed_dim,
                                        opt.hidden_dim,
                                        num_layers=1,
                                        batch_first=True,
                                        bidirectional=True)
     self.att_linear = nn.Linear(opt.hidden_dim * 2 + 1 + opt.embed_dim * 2,
                                 1)
     self.gru_cell = nn.GRUCell(opt.hidden_dim * 2 + 1, opt.embed_dim)
     self.dense = nn.Linear(opt.embed_dim, opt.output_dim)
Ejemplo n.º 25
0
 def __init__(self, embedding_matrix, opt):
     super(LSTM_Attention, self).__init__()
     self.opt = opt
     self.embed = nn.Embedding.from_pretrained(torch.tensor(
         embedding_matrix, dtype=torch.float),
                                               freeze=False)
     self.lstm_context = DynamicLSTM(opt.embed_dim,
                                     opt.hidden_dim,
                                     num_layers=1,
                                     batch_first=True)
     self.attention_context = SelfAttention(opt.hidden_dim,
                                            score_function='bi_linear')
     # self.pooling = nn.AvgPool1d(kernel_size=5)
     self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)
Ejemplo n.º 26
0
 def __init__(self, config, opt, _type='c'):
     super(Cabasc, self).__init__()
     self.opt = opt
     self.type = _type
     self.bert = BertModel(config)
     self.squeeze_embedding = SqueezeEmbedding(batch_first=True)
     self.linear1 = nn.Linear(3 * opt.embed_dim, opt.embed_dim)
     self.linear2 = nn.Linear(opt.embed_dim, 1, bias=False)
     self.mlp = nn.Linear(opt.embed_dim, opt.embed_dim)
     self.dense = nn.Linear(opt.embed_dim, opt.output_dim)
     # context attention layer
     self.rnn_l = DynamicLSTM(opt.embed_dim,
                              opt.hidden_dim,
                              num_layers=1,
                              batch_first=True,
                              rnn_type='GRU')
     self.rnn_r = DynamicLSTM(opt.embed_dim,
                              opt.hidden_dim,
                              num_layers=1,
                              batch_first=True,
                              rnn_type='GRU')
     self.mlp_l = nn.Linear(opt.hidden_dim, 1)
     self.mlp_r = nn.Linear(opt.hidden_dim, 1)
Ejemplo n.º 27
0
    def __init__(self, bert, opt):
        super(ATAE_LSTM, self).__init__()
        self.opt = opt
        self.bert = bert
        self.squeeze_embedding = SqueezeEmbedding()
        self.dropout = nn.Dropout(opt.dropout)

        self.squeeze_embedding = SqueezeEmbedding()
        self.lstm = DynamicLSTM(opt.bert_dim,
                                opt.hidden_dim,
                                num_layers=1,
                                batch_first=True)
        self.attention = NoQueryAttention(opt.hidden_dim + opt.bert_dim,
                                          score_function='bi_linear')
        self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)
Ejemplo n.º 28
0
 def __init__(self, embedding, opt):
     super(LRG, self).__init__()
     self.opt = opt
     self.embed = nn.Embedding.from_pretrained(torch.FloatTensor(embedding))
     self.bi_all_ctx = DynamicLSTM(opt.embed_dim,
                                   opt.hidden_dim,
                                   num_layers=1,
                                   batch_first=True,
                                   bidirectional=True)
     self.bi_left_ctx = DynamicLSTM(opt.embed_dim,
                                    opt.hidden_dim,
                                    num_layers=1,
                                    batch_first=True,
                                    bidirectional=True)
     self.bi_right_ctx = DynamicLSTM(opt.embed_dim,
                                     opt.hidden_dim,
                                     num_layers=1,
                                     batch_first=True,
                                     bidirectional=True)
     self.bi_lstm_aspect = DynamicLSTM(opt.embed_dim,
                                       opt.hidden_dim,
                                       num_layers=1,
                                       batch_first=True,
                                       bidirectional=True)
     self.attention = Attention(opt.hidden_dim * 2, score_function='mlp')
     self.left_attention = Attention(opt.hidden_dim * 2,
                                     score_function='mlp')
     self.right_attention = Attention(opt.hidden_dim * 2,
                                      score_function='mlp')
     self.left_gru_cell = nn.GRUCell(opt.hidden_dim * 2, opt.hidden_dim * 2)
     self.right_gru_cell = nn.GRUCell(opt.hidden_dim * 2,
                                      opt.hidden_dim * 2)
     self.w1 = nn.Linear(opt.hidden_dim * 2, opt.hidden_dim * 2)
     self.w2 = nn.Linear(opt.hidden_dim * 2, opt.hidden_dim * 2)
     self.w3 = nn.Linear(opt.hidden_dim * 2, opt.hidden_dim * 2)
     self.dense = nn.Linear(opt.hidden_dim * 2, opt.polarities_dim)
Ejemplo n.º 29
0
 def __init__(self, embedding_matrix, opt):
     super(TNet_LF, self).__init__()
     print("this is TNet_LF model")
     self.embed = nn.Embedding.from_pretrained(
         torch.tensor(embedding_matrix, dtype=torch.float))
     self.position = Absolute_Position_Embedding(opt)
     self.opt = opt
     D = opt.embed_dim  # 模型词向量维度
     C = opt.polarities_dim  # 分类数目
     L = opt.max_seq_len
     HD = opt.hidden_dim
     self.lstm1 = DynamicLSTM(opt.embed_dim,
                              opt.hidden_dim,
                              num_layers=1,
                              batch_first=True,
                              bidirectional=True)
     self.lstm2 = DynamicLSTM(opt.embed_dim,
                              opt.hidden_dim,
                              num_layers=1,
                              batch_first=True,
                              bidirectional=True)
     self.convs3 = nn.Conv1d(2 * HD, 50, 3, padding=1)
     self.fc1 = nn.Linear(4 * HD, 2 * HD)
     self.fc = nn.Linear(50, C)
Ejemplo n.º 30
0
 def __init__(self, embedding_matrix, opt, _type='c'):
     super(Cabasc, self).__init__()
     self.opt = opt
     self.type = _type
     self.embed = nn.Embedding.from_pretrained(
         torch.tensor(embedding_matrix, dtype=torch.float))
     self.squeeze_embedding = SqueezeEmbedding(batch_first=True)
     self.linear1 = nn.Linear(3 * opt.embed_dim, opt.embed_dim)
     self.linear2 = nn.Linear(opt.embed_dim, 1, bias=False)
     self.mlp = nn.Linear(opt.embed_dim, opt.embed_dim)
     self.dense = nn.Linear(opt.embed_dim, opt.polarities_dim)
     # context attention layer
     self.rnn_l = DynamicLSTM(opt.embed_dim,
                              opt.hidden_dim,
                              num_layers=1,
                              batch_first=True,
                              rnn_type='GRU')
     self.rnn_r = DynamicLSTM(opt.embed_dim,
                              opt.hidden_dim,
                              num_layers=1,
                              batch_first=True,
                              rnn_type='GRU')
     self.mlp_l = nn.Linear(opt.hidden_dim, 1)
     self.mlp_r = nn.Linear(opt.hidden_dim, 1)