Example #1
0
    def __init__(self, embedding_matrix_list, opt):
        super(BILSTM_SSA, self).__init__()
        self.embed0 = nn.Embedding.from_pretrained(torch.tensor(
            embedding_matrix_list[0], dtype=torch.float),
                                                   freeze=True)
        self.input_drop = nn.Dropout(bilstm_ssa_config['input_drop'])
        self.lstm = DynamicLSTM(bilstm_ssa_config['embed_dim'],
                                bilstm_ssa_config['hidden_dim'],
                                num_layers=bilstm_ssa_config['num_layers'],
                                batch_first=True,
                                bidirectional=True,
                                dropout=bilstm_ssa_config['lstm_drop'])

        self.self_attn = _SSA(input_size=bilstm_ssa_config['hidden_dim'] * 2,
                              hidden_size=bilstm_ssa_config['hidden_dim'],
                              n_hop=bilstm_ssa_config['n_hop'])

        self.drop = nn.Dropout(0.5)
        self.dense = nn.Linear(bilstm_ssa_config['hidden_dim'] * 2,
                               opt.polarities_dim)
Example #2
0
 def __init__(self, embedding_matrix, opt):
     super(BiLSTM, self).__init__()
     self.opt = opt
     self.embed = nn.Embedding.from_pretrained(
         torch.tensor(embedding_matrix, dtype=torch.float))
     self.dropout = nn.Dropout(0.5)
     self.clause_cnn = ClauseCNN(opt)
     self.bilstm = DynamicLSTM(opt.hidden_dim,
                               opt.hidden_dim,
                               num_layers=1,
                               batch_first=True,
                               bidirectional=True)
     self.emotion_fc = nn.Linear(2 * opt.hidden_dim, 2 * opt.MLP_out_dim)
     self.cause_fc = nn.Linear(2 * opt.hidden_dim, 2 * opt.MLP_out_dim)
     self.pair_biaffine = Biaffine(opt,
                                   opt.MLP_out_dim,
                                   opt.MLP_out_dim,
                                   1,
                                   bias=(True, False))
     self.emotion_fc1 = nn.Linear(opt.MLP_out_dim, opt.polarities_dim)
     self.cause_fc1 = nn.Linear(opt.MLP_out_dim, opt.polarities_dim)
Example #3
0
 def __init__(self, _, hidden_dim, batch_size, max_length, n_class, n_tag, embedding_matrix):
     super(Joint_model, self).__init__()
     self.hidden_dim = hidden_dim
     self.batch_size = batch_size
     self.max_length = max_length
     self.n_class = n_class
     self.n_tag = n_tag
     self.LayerNorm = LayerNorm(self.hidden_dim, eps=1e-12)
     self.emb_drop = nn.Dropout(config.emb_dorpout)
     self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float), padding_idx=0)
     self.embed.weight.requires_grad = True
     self.biLSTM = DynamicLSTM(config.emb_dim, config.hidden_dim // 2, bidirectional=True, batch_first=True,
                               dropout=config.lstm_dropout, num_layers=1)
     self.intent_fc = nn.Linear(self.hidden_dim, self.n_class)
     self.slot_fc = nn.Linear(self.hidden_dim, self.n_tag)
     self.I_S_Emb = Label_Attention(self.intent_fc, self.slot_fc)
     self.T_block1 = I_S_Block(self.intent_fc, self.slot_fc, self.hidden_dim)
     self.T_block2 = I_S_Block(self.intent_fc, self.slot_fc, self.hidden_dim)
     self.T_block3 = I_S_Block(self.intent_fc, self.slot_fc, self.hidden_dim)
     self.crflayer = CRF(self.n_tag)
     self.criterion = nn.CrossEntropyLoss()
Example #4
0
 def __init__(self, embedding_matrix, opt):
     super(ASBIGCN, self).__init__()
     self.opt = opt
     #        self.mul1=mutualatt(2*opt.hidden_dim)
     #        self.mul2=mutualatt(2*opt.hidden_dim)
     self.embed = nn.Embedding.from_pretrained(
         torch.tensor(embedding_matrix, dtype=torch.float))
     self.text_lstm = DynamicLSTM(768,
                                  opt.hidden_dim,
                                  num_layers=1,
                                  batch_first=True,
                                  bidirectional=True)
     #        self.text_lstm1 = DynamicLSTM(2*opt.hidden_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
     #        self.text_lstm2 = DynamicLSTM(opt.embed_dim, 384, num_layers=1, batch_first=True, bidirectional=True)
     #        self.gc1 = GraphConvolution(2*opt.hidden_dim, 2*opt.hidden_dim)
     #        self.gc2 = GraphConvolution(2*opt.hidden_dim, 2*opt.hidden_dim)
     self.gc = simpleGraphConvolutionalignment(2 * opt.hidden_dim,
                                               2 * opt.hidden_dim,
                                               opt.edge_size,
                                               bias=True)
     self.fc = nn.Linear(8 * opt.hidden_dim, opt.polarities_dim)
     #        self.fc1 = nn.Linear(768*2,768)
     self.text_embed_dropout = nn.Dropout(0.1)
Example #5
0
    def __init__(self, embedding_matrix_list, opt):
        super(BILSTM_ATTENTION, self).__init__()
        self.embed = nn.Embedding.from_pretrained(torch.tensor(
            embedding_matrix_list[0], dtype=torch.float),
                                                  freeze=True)
        # self.input_drop = nn.Dropout(bilstm_attention_config['input_drop'])
        self.bilstm = DynamicLSTM(
            bilstm_attention_config['embed_dim'],
            bilstm_attention_config['hidden_dim'],
            num_layers=bilstm_attention_config['num_layers'],
            batch_first=True,
            bidirectional=True)
        self.weight1 = nn.Parameter(
            torch.Tensor(bilstm_attention_config['hidden_dim'] * 2,
                         bilstm_attention_config['hidden_dim'] * 2))
        self.weight2 = nn.Parameter(
            torch.Tensor(bilstm_attention_config['hidden_dim'] * 2, 1))

        nn.init.uniform_(self.weight1, -0.1, 0.1)
        nn.init.uniform_(self.weight2, -0.1, 0.1)

        self.dense = nn.Linear(bilstm_attention_config['hidden_dim'] * 2,
                               opt.polarities_dim)
Example #6
0
    def __init__(self,
                 embedding_matrix,
                 opt,
                 dropout=0.6,
                 alpha=0.2,
                 nheads=6):
        """Dense version of GAT."""
        super(GAT, self).__init__()
        self.dropout = dropout
        self.embed = nn.Embedding.from_pretrained(
            torch.tensor(embedding_matrix, dtype=torch.float))
        self.text_lstm = DynamicLSTM(opt.embed_dim,
                                     opt.hidden_dim,
                                     num_layers=1,
                                     batch_first=True,
                                     bidirectional=True)
        self.opt = opt

        # nfeat, nhid
        self.attentions = [
            GraphAttentionLayer(2 * opt.hidden_dim,
                                2 * opt.hidden_dim,
                                dropout=dropout,
                                alpha=alpha,
                                concat=True) for _ in range(nheads)
        ]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        self.out_att = GraphAttentionLayer(2 * opt.hidden_dim,
                                           2 * opt.hidden_dim,
                                           dropout=dropout,
                                           alpha=alpha,
                                           concat=False)
        self.text_embed_dropout = nn.Dropout(0.3)
        self.fc = nn.Linear(2 * opt.hidden_dim, opt.polarities_dim)
Example #7
0
 def __init__(self, embedding_matrix, opt):
     super(TD_LSTM, self).__init__()
     self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
     self.lstm_l = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True)
     self.lstm_r = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True)
     self.dense = nn.Linear(opt.hidden_dim*2, opt.polarities_dim)
Example #8
0
 def __init__(self, embedding_matrix, opt):
     super(LSTM, self).__init__()
     self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float), freeze=False)
     self.lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
     self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)
Example #9
0
    def __init__(self, embedding_matrix, opt):
        super(MMFUSION, self).__init__()
        self.opt = opt
        self.embed = nn.Embedding.from_pretrained(
            torch.tensor(embedding_matrix, dtype=torch.float))
        self.lstm_aspect = DynamicLSTM(
            opt.embed_dim, opt.hidden_dim, num_layers=1,
            batch_first=True)  #, dropout = opt.dropout_rate
        self.lstm_l = DynamicLSTM(
            opt.embed_dim, opt.hidden_dim, num_layers=1,
            batch_first=True)  #, dropout = opt.dropout_rate
        self.lstm_r = DynamicLSTM(
            opt.embed_dim, opt.hidden_dim, num_layers=1,
            batch_first=True)  #, dropout = opt.dropout_rate
        self.attention_l = Attention2(opt.hidden_dim,
                                      score_function='bi_linear',
                                      dropout=opt.dropout_rate)
        self.attention_r = Attention2(opt.hidden_dim,
                                      score_function='bi_linear',
                                      dropout=opt.dropout_rate)
        self.visaspect_att_l = MMAttention(opt.hidden_dim,
                                           score_function='bi_linear',
                                           dropout=opt.dropout_rate)
        self.visaspect_att_r = MMAttention(opt.hidden_dim,
                                           score_function='bi_linear',
                                           dropout=opt.dropout_rate)
        self.ltext2hidden = nn.Linear(opt.hidden_dim, opt.hidden_dim)
        self.laspect2hidden = nn.Linear(opt.hidden_dim, opt.hidden_dim)
        self.rtext2hidden = nn.Linear(opt.hidden_dim, opt.hidden_dim)
        self.raspect2hidden = nn.Linear(opt.hidden_dim, opt.hidden_dim)
        self.dropout = nn.Dropout(self.opt.dropout_rate)
        #self.viscontext_att_aspect = MMAttention(opt.hidden_dim, score_function='mlp', dropout=opt.dropout_rate)
        #self.visaspect_att_context = MMAttention(opt.hidden_dim, score_function='mlp', dropout=opt.dropout_rate)

        self.aspect2text = nn.Linear(opt.hidden_dim, opt.hidden_dim)
        self.vismap2text = nn.Linear(2048, opt.hidden_dim)
        self.vis2text = nn.Linear(2048, opt.hidden_dim)
        self.gate = nn.Linear(2048 + 4 * opt.hidden_dim, opt.hidden_dim)

        self.madality_attetion = nn.Linear(opt.hidden_dim, 1)

        #blinear interaction between text vectors and image vectors
        #self.text2hidden = nn.Linear(opt.hidden_dim*3, opt.hidden_dim)
        #self.vis2hidden = nn.Linear(opt.hidden_dim, opt.hidden_dim)
        #self.hidden2final = nn.Linear(opt.hidden_dim, opt.hidden_dim)

        #self.text2hiddentext = nn.Linear(opt.hidden_dim*4, opt.hidden_dim*4)
        #self.vis2hiddentext = nn.Linear(opt.hidden_dim, opt.hidden_dim*4)

        self.text2hiddenvis = nn.Linear(opt.hidden_dim * 4, opt.hidden_dim)
        self.vis2hiddenvis = nn.Linear(opt.hidden_dim, opt.hidden_dim)

        #self.dense_2 = nn.Linear(opt.hidden_dim*2, opt.polarities_dim)
        #self.dense_3 = nn.Linear(opt.hidden_dim*3, opt.polarities_dim)
        #self.dense_4 = nn.Linear(opt.hidden_dim*4, opt.polarities_dim)
        #self.dense_5 = nn.Linear(opt.hidden_dim*5, opt.polarities_dim)
        #self.dense_10 = nn.Linear(opt.hidden_dim*10, opt.polarities_dim)
        if self.opt.att_mode == 'vis_concat_attimg' or self.opt.att_mode == 'vis_concat':
            self.dense_5 = nn.Linear(opt.hidden_dim * 5, opt.polarities_dim)
        elif self.opt.att_mode == 'vis_concat_attimg_gate':
            if self.opt.tfn:
                self.dense_6 = nn.Linear(opt.hidden_dim * opt.hidden_dim,
                                         opt.polarities_dim)
            else:
                self.dense_6 = nn.Linear(opt.hidden_dim * 6,
                                         opt.polarities_dim)