Exemplo n.º 1
0
    def __init__(self, bert, opt):
        super(ATAE_LSTM, self).__init__()
        self.opt = opt
        self.bert = bert
        self.squeeze_embedding = SqueezeEmbedding()
        self.dropout = nn.Dropout(opt.dropout)

        self.squeeze_embedding = SqueezeEmbedding()
        self.lstm = DynamicLSTM(opt.bert_dim,
                                opt.hidden_dim,
                                num_layers=1,
                                batch_first=True)
        self.attention = NoQueryAttention(opt.hidden_dim + opt.bert_dim,
                                          score_function='bi_linear')
        self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)
Exemplo n.º 2
0
    def __init__(self, embedding_matrix, opt):
        super(AEN, self).__init__()
        self.opt = opt
        self.embed = nn.Embedding.from_pretrained(
            torch.tensor(embedding_matrix, dtype=torch.float))
        self.squeeze_embedding = SqueezeEmbedding()

        self.attn_k = Attention(opt.embed_dim,
                                out_dim=opt.hidden_dim,
                                n_head=8,
                                score_function='mlp',
                                dropout=opt.dropout)
        self.attn_q = Attention(opt.embed_dim,
                                out_dim=opt.hidden_dim,
                                n_head=8,
                                score_function='mlp',
                                dropout=opt.dropout)
        self.ffn_c = PositionwiseFeedForward(opt.hidden_dim,
                                             dropout=opt.dropout)
        self.ffn_t = PositionwiseFeedForward(opt.hidden_dim,
                                             dropout=opt.dropout)

        self.attn_s1 = Attention(opt.hidden_dim,
                                 n_head=8,
                                 score_function='mlp',
                                 dropout=opt.dropout)

        self.dense = nn.Linear(opt.hidden_dim * 3, opt.polarities_dim)
Exemplo n.º 3
0
    def __init__(self, bert, opt):
        super(AEN_BERT, self).__init__()
        self.opt = opt
        self.bert = bert
        self.squeeze_embedding = SqueezeEmbedding()
        self.dropout = nn.Dropout(opt.dropout)

        self.attn_k = Attention(opt.bert_dim,
                                out_dim=opt.hidden_dim,
                                n_head=8,
                                score_function='mlp',
                                dropout=opt.dropout)
        self.attn_q = Attention(opt.bert_dim,
                                out_dim=opt.hidden_dim,
                                n_head=8,
                                score_function='mlp',
                                dropout=opt.dropout)
        self.ffn_c = PositionwiseFeedForward(opt.hidden_dim,
                                             dropout=opt.dropout)
        self.ffn_t = PositionwiseFeedForward(opt.hidden_dim,
                                             dropout=opt.dropout)

        self.attn_s1 = Attention(opt.hidden_dim,
                                 n_head=8,
                                 score_function='mlp',
                                 dropout=opt.dropout)

        self.dense = nn.Linear(opt.hidden_dim * 3, opt.polarities_dim)
Exemplo n.º 4
0
    def __init__(self, embedding_matrix, opt):
        super(Test, self).__init__()
        self.opt = opt
        self.embed = nn.Embedding.from_pretrained(
            torch.tensor(embedding_matrix, dtype=torch.float))
        self.squeeze_embedding = SqueezeEmbedding()

        self.layer_stack = nn.ModuleList([
            EncoderLayer(d_model,
                         d_inner,
                         n_head,
                         d_k,
                         d_v,
                         dropout=opt.dropout) for _ in range(n_layers)
        ])

        # self.ffn_c = PositionwiseFeedForward(opt.hidden_dim, dropout=opt.dropout)
        # self.ffn_t = PositionwiseFeedForward(opt.hidden_dim, dropout=opt.dropout)

        self.attn_s1 = Attention(opt.hidden_dim,
                                 n_head=8,
                                 score_function='mlp',
                                 dropout=opt.dropout)

        self.dense = nn.Linear(opt.hidden_dim * 3, opt.polarities_dim)
Exemplo n.º 5
0
 def __init__(self, bert, opt):
     """
     注意力编码器网络, Attentional Encoder Network for Targeted Sentiment Classification
     :param bert:
     :param opt:
     """
     super(AEN_BERT, self).__init__()
     self.opt = opt
     self.bert = bert
     self.squeeze_embedding = SqueezeEmbedding()
     self.dropout = nn.Dropout(opt.dropout)
     # attn_k和 attn_q的初始化
     self.attn_k = Attention(opt.bert_dim,
                             out_dim=opt.hidden_dim,
                             n_head=8,
                             score_function='mlp',
                             dropout=opt.dropout)
     self.attn_q = Attention(opt.bert_dim,
                             out_dim=opt.hidden_dim,
                             n_head=8,
                             score_function='mlp',
                             dropout=opt.dropout)
     # 初始化ffn_c, PCT层
     self.ffn_c = PositionwiseFeedForward(opt.hidden_dim,
                                          dropout=opt.dropout)
     self.ffn_t = PositionwiseFeedForward(opt.hidden_dim,
                                          dropout=opt.dropout)
     # 目标特定的注意力层初始化
     self.attn_s1 = Attention(opt.hidden_dim,
                              n_head=8,
                              score_function='mlp',
                              dropout=opt.dropout)
     # 最终输出层定义
     self.dense = nn.Linear(opt.hidden_dim * 3, opt.polarities_dim)
Exemplo n.º 6
0
 def __init__(self, bert, opt):
     super(AEN_BERT, self).__init__()
     #print(" 1 In AEN_BERT  ")
     self.opt = opt
     self.bert = bert
     self.squeeze_embedding = SqueezeEmbedding()
     self.dropout = nn.Dropout(opt.dropout)
     #print(" 2 In AEN_BERT  ")
     self.attn_k = Attention(opt.bert_dim,
                             out_dim=opt.hidden_dim,
                             n_head=8,
                             score_function='mlp',
                             dropout=opt.dropout)
     self.attn_q = Attention(opt.bert_dim,
                             out_dim=opt.hidden_dim,
                             n_head=8,
                             score_function='mlp',
                             dropout=opt.dropout)
     self.ffn_c = PositionwiseFeedForward(opt.hidden_dim,
                                          dropout=opt.dropout)
     self.ffn_t = PositionwiseFeedForward(opt.hidden_dim,
                                          dropout=opt.dropout)
     #print(" 3 In AEN_BERT  ")
     self.attn_s1 = Attention(opt.hidden_dim,
                              n_head=8,
                              score_function='mlp',
                              dropout=opt.dropout)
     self.hat = False
     self.last = torch.nn.ModuleList()
     for t in range(self.opt.taskcla):
         self.last.append(nn.Linear(opt.hidden_dim * 3, opt.polarities_dim))
Exemplo n.º 7
0
 def __init__(self, embedding_matrix, opt, type='cabasc'):
     super(Cabasc, self).__init__()
     self.opt = opt
     self.embed = nn.Embedding.from_pretrained(
         torch.tensor(embedding_matrix, dtype=torch.float))
     self.squeeze_embedding = SqueezeEmbedding(batch_first=True)
     self.attention = Attention(opt.embed_dim,
                                score_function='mlp',
                                dropout=opt.dropout)  # content attention
     self.m_linear = nn.Linear(opt.embed_dim, opt.embed_dim, bias=False)
     self.mlp = nn.Linear(opt.embed_dim, opt.embed_dim)  # W4
     self.dense = nn.Linear(opt.embed_dim, opt.polarities_dim)  # W5
     # context attention layer
     self.rnn_l = DynamicLSTM(opt.embed_dim,
                              opt.hidden_dim,
                              num_layers=1,
                              batch_first=True,
                              rnn_type='GRU')
     self.rnn_r = DynamicLSTM(opt.embed_dim,
                              opt.hidden_dim,
                              num_layers=1,
                              batch_first=True,
                              rnn_type='GRU')
     self.mlp_l = nn.Linear(opt.hidden_dim, 1)
     self.mlp_r = nn.Linear(opt.hidden_dim, 1)
Exemplo n.º 8
0
    def __init__(self, embedding_matrix, opt):
        super(ATAE_LSTM, self).__init__()
        self.opt = opt
        self.embed = nn.Embedding.from_pretrained(
            torch.tensor(embedding_matrix, dtype=torch.float))
        self.squeeze_embedding = SqueezeEmbedding()
        self.lstm = DynamicLSTM(opt.embed_dim * 2,
                                opt.hidden_dim,
                                num_layers=1,
                                batch_first=True)
        self.attention = NoQueryAttention(opt.hidden_dim + opt.embed_dim,
                                          score_function='bi_linear')
        self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)

        self.cls_lstm = DynamicLSTM(opt.embed_dim,
                                    opt.hidden_dim,
                                    num_layers=1,
                                    batch_first=True,
                                    bidirectional=True)
        self.cls_asp_lstm = DynamicLSTM(opt.embed_dim,
                                        opt.hidden_dim,
                                        num_layers=1,
                                        batch_first=True,
                                        bidirectional=True)
        self.h_h_proj = nn.Linear(4 * opt.hidden_dim, opt.hidden_dim)
        self.cls_dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)
        self.dense_total = nn.Linear(opt.hidden_dim + opt.hidden_dim,
                                     opt.polarities_dim)

        self.conv = nn.Conv1d(300, 2 * opt.hidden_dim, 3)
Exemplo n.º 9
0
 def __init__(self, embedding_matrix, opt):
     super(MemNet, self).__init__()
     self.opt = opt
     self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
     self.squeeze_embedding = SqueezeEmbedding(batch_first=True)
     self.attention = Attention(opt.embed_dim, score_function='mlp')
     self.x_linear = nn.Linear(opt.embed_dim, opt.embed_dim)
     self.dense = nn.Linear(opt.embed_dim, opt.polarities_dim)
Exemplo n.º 10
0
 def __init__(self, config, opt):
     super(MemNet, self).__init__()
     self.opt = opt
     self.bert = BertModel(config)
     self.squeeze_embedding = SqueezeEmbedding(batch_first=True)
     self.attention = Attention(opt.embed_dim, score_function='mlp')
     self.x_linear = nn.Linear(opt.embed_dim, opt.embed_dim)
     self.dense = nn.Linear(opt.embed_dim, opt.output_dim)
Exemplo n.º 11
0
	def __init__(self, embedding_matrix, opt):
		super(ATAE_LSTM, self).__init__()
		self.opt = opt #Parser arguments assigned
		self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float)) #Initialized the word embedding matrix with the GloVe word embeddings for fine tuning
		self.squeeze_embedding = SqueezeEmbedding() #Instance of Squeeze Embedding Class from layers.squeeze_embedding created
		self.lstm = DynamicLSTM(opt.embed_dim*2, opt.hidden_dim, num_layers=1, batch_first=True) #Instance of DynamicLSTM Class from layers.dynamic_rnn created parameters passed, input_size=opt.embed_dim*2, hidden_size=opt.hidden_dim
		self.attention = NoQueryAttention(opt.hidden_dim+opt.embed_dim, score_function='bi_linear') #Instance of NoQueryAttention Class from layers.attention created parameters passes, embed_dim=opt.hidden_dim+opt.embed_dim and score function is 'bi_linear'
		self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim) #A linear layer is created with in_features=opt.hidden_dim, out_features=opt.polarities_dim
    def __init__(self, embedding_matrix, opt):
        super(Gated_CNN, self).__init__()
        self.embed = nn.Embedding.from_pretrained(
            torch.tensor(embedding_matrix, dtype=torch.float))

        self.in_layer = GLU_Block(opt.kernel_size, opt.embed_dim,
                                  opt.in_channels, opt.downbot)
        self.squeeze_embedding = SqueezeEmbedding()
        self.glu_layers = self.make_glu_layers(opt)
        self.attention = NoQueryAttention(opt.in_channels,
                                          score_function='bi_linear')
        self.dense = nn.Linear(opt.in_channels, opt.polarities_dim)
Exemplo n.º 13
0
    def __init__(self, embedding_matrix, opt):
        super(AEGCN, self).__init__()
        self.opt = opt
        self.embed = nn.Embedding.from_pretrained(
            torch.tensor(embedding_matrix, dtype=torch.float))
        self.squeeze_embedding = SqueezeEmbedding()  #
        self.text_lstm = DynamicLSTM(opt.embed_dim,
                                     opt.hidden_dim,
                                     num_layers=1,
                                     batch_first=True,
                                     bidirectional=True)
        self.aspect_lstm = DynamicLSTM(opt.embed_dim,
                                       opt.hidden_dim,
                                       num_layers=1,
                                       batch_first=True,
                                       bidirectional=True)
        self.attn_k = Attention(opt.embed_dim * 2,
                                out_dim=opt.hidden_dim,
                                n_head=opt.head,
                                score_function='mlp',
                                dropout=opt.dropout)  #
        self.attn_a = Attention(opt.embed_dim * 2,
                                out_dim=opt.hidden_dim,
                                n_head=opt.head,
                                score_function='mlp',
                                dropout=opt.dropout)  #
        # self.attn_s1 = Attention(opt.embed_dim*2, out_dim=opt.hidden_dim, n_head=3, score_function='mlp', dropout=0.5)

        self.attn_q = Attention(opt.embed_dim * 2,
                                out_dim=opt.hidden_dim,
                                n_head=opt.head,
                                score_function='mlp',
                                dropout=opt.dropout)  #

        self.gc1 = GraphConvolution(2 * opt.hidden_dim, 2 * opt.hidden_dim,
                                    opt)
        self.gc2 = GraphConvolution(2 * opt.hidden_dim, 2 * opt.hidden_dim,
                                    opt)
        self.attn_k_q = Attention(opt.hidden_dim,
                                  n_head=opt.head,
                                  score_function='mlp',
                                  dropout=opt.dropout)  #
        self.attn_k_a = Attention(opt.hidden_dim,
                                  n_head=opt.head,
                                  score_function='mlp',
                                  dropout=opt.dropout)
        #self.fc = nn.Linear(2*opt.hidden_dim, opt.polarities_dim)

        self.text_embed_dropout = nn.Dropout(opt.dropout)
        self.aspect_embed_dropout = nn.Dropout(opt.dropout)

        self.dense = nn.Linear(opt.hidden_dim * 3, opt.polarities_dim)
Exemplo n.º 14
0
 def __init__(self, config, opt):
     super(ATAE_LSTM, self).__init__()
     self.opt = opt
     # self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
     self.bert = BertModel(config)
     self.squeeze_embedding = SqueezeEmbedding()
     self.lstm = DynamicLSTM(opt.embed_dim * 2,
                             opt.hidden_dim,
                             num_layers=1,
                             batch_first=True)
     self.attention = NoQueryAttention(opt.hidden_dim + opt.embed_dim,
                                       score_function='bi_linear')
     self.dense = nn.Linear(opt.hidden_dim, opt.output_dim)
 def __init__(self, embedding_matrix, opt):
     super(GCAE, self).__init__()
     self.embed = nn.Embedding.from_pretrained(
         torch.tensor(embedding_matrix, dtype=torch.float))
     self.squeeze_embedding = SqueezeEmbedding()
     self.cnn_context_x = nn.ModuleList(
         [self.cnn_layer(opt, k) for k in [3, 4, 5]])
     self.cnn_context_y = nn.ModuleList(
         [self.cnn_layer(opt, k) for k in [3, 4, 5]])
     self.cnn_aspect = nn.Sequential(
         nn.ConstantPad2d((0, 0, 2, 0), 0),
         nn.Conv2d(opt.embed_dim, opt.in_channels, (3, 1)))
     self.dropout = nn.Dropout(opt.dropout)
     self.dense = nn.Linear(3 * opt.in_channels, opt.polarities_dim)
Exemplo n.º 16
0
 def __init__(self, embedding_matrix, opt):
     super(BaseB, self).__init__()
     self.opt = opt
     self.embed = nn.Embedding.from_pretrained(
         torch.tensor(embedding_matrix, dtype=torch.float))
     self.squeeze_embedding = SqueezeEmbedding(batch_first=True)
     self.attention = Attention(opt.embed_dim, score_function='mlp')
     self.m_linear = nn.Linear(
         opt.embed_dim, opt.embed_dim, bias=False
     )  # FwNN3: I think this should be moved to attention module
     self.x_linear = nn.Linear(opt.embed_dim, opt.embed_dim, bias=False)
     self.s_linear = nn.Linear(opt.embed_dim, opt.embed_dim, bias=False)
     self.mlp = nn.Linear(opt.embed_dim, opt.embed_dim)  # W4
     self.dense = nn.Linear(opt.embed_dim, opt.polarities_dim)  # W5
Exemplo n.º 17
0
    def __init__(self, bert, opt):
        super(SDGCN_NEW, self).__init__()
        self.opt = opt
        self.bert = bert
        self.squeeze_embedding = SqueezeEmbedding()
        self.dropout = nn.Dropout(opt.dropout)

        self.lstm_context = sLSTM(opt.bert_dim, 768, window_size=2)
        self.lstm_aspect = sLSTM(opt.bert_dim, 768, window_size=2)
        # self.lstm_context = DynamicLSTM(opt.bert_dim, opt.hidden_dim, num_layers=1, batch_first=True,bidirectional=True)
        # self.lstm_aspect = DynamicLSTM(opt.bert_dim, opt.hidden_dim, num_layers=1, batch_first=True,bidirectional=True)

        self.attention_aspect = Attention(768, score_function='bi_linear')
        self.attention_context = Attention(768, score_function='bi_linear')
        self.dense = nn.Linear(768 * 2, opt.polarities_dim)
Exemplo n.º 18
0
    def __init__(self, embedding_matrix, opt):
        super(MemNet2, self).__init__()
        self.opt = opt
        self.embed = nn.Embedding.from_pretrained(torch.tensor(
            embedding_matrix, dtype=torch.float),
                                                  freeze=False)
        self.squeeze_embedding = SqueezeEmbedding(batch_first=True)
        self.bi_lstm_img = DynamicLSTM(opt.embed_dim_img,
                                       opt.embed_dim,
                                       num_layers=1,
                                       batch_first=True,
                                       bidirectional=False)

        self.attention = Attention(opt.embed_dim, score_function='mlp')
        self.x_linear = nn.Linear(opt.embed_dim, opt.embed_dim)
        self.dense = nn.Linear(opt.embed_dim * 2, opt.polarities_dim)
Exemplo n.º 19
0
 def __init__(self, embedding_matrix, opt):
     super(Cabasc, self).__init__()
     self.opt = opt
     self.dropout = nn.Dropout(0.5)
     self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
     if opt.freeze_embeddings == "yes":
         self.embed.weight.requieres_grad = False 
     self.squeeze_embedding = SqueezeEmbedding(batch_first=True)         
     self.linear1 = nn.Linear(3*opt.embed_dim, opt.embed_dim)
     self.linear2 = nn.Linear(opt.embed_dim, 1, bias=False)    
     self.mlp = nn.Linear(opt.embed_dim, opt.embed_dim)                           
     self.dense = nn.Linear(opt.embed_dim, opt.polarities_dim)  
     if opt.model_name == 'cabasc':                  
         self.rnn_l = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, rnn_type = 'GRU') 
         self.rnn_r = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, rnn_type = 'GRU')
         self.mlp_l = nn.Linear(opt.hidden_dim, 1)
         self.mlp_r = nn.Linear(opt.hidden_dim, 1)
Exemplo n.º 20
0
    def __init__(self, bert, opt):
        super(SDGCN, self).__init__()
        self.opt = opt
        self.bert = bert
        self.squeeze_embedding = SqueezeEmbedding()
        self.dropout = nn.Dropout(opt.dropout)

        self.lstm_context = DynamicLSTM(opt.bert_dim,
                                        opt.hidden_dim,
                                        bidirectional=True,
                                        num_layers=1,
                                        batch_first=True)
        # self.lstm_aspect = DynamicLSTM(opt.bert_dim, opt.hidden_dim, num_layers=1, batch_first=True)
        # self.attention_aspect = Attention(opt.hidden_dim, score_function='bi_linear')
        # self.attention_context = Attention(opt.hidden_dim, score_function='bi_linear')

        self.gc1 = GraphConvolution(2 * opt.hidden_dim, 2 * opt.hidden_dim)
        self.gc2 = GraphConvolution(2 * opt.hidden_dim, 2 * opt.hidden_dim)
        self.fc = nn.Linear(2 * opt.hidden_dim, opt.polarities_dim)
Exemplo n.º 21
0
    def __init__(self, bert, opt):
        super(RAM, self).__init__()
        self.opt = opt
        self.bert = bert
        self.squeeze_embedding = SqueezeEmbedding()
        self.dropout = nn.Dropout(opt.dropout)

        self.bi_lstm_context = DynamicLSTM(opt.bert_dim,
                                           opt.hidden_dim,
                                           num_layers=1,
                                           batch_first=True,
                                           bidirectional=True)

        self.att_linear = nn.Linear(opt.hidden_dim * 2 + 1 + opt.bert_dim * 2,
                                    1)

        self.gru_cell = nn.GRUCell(opt.hidden_dim * 2 + 1, opt.bert_dim)

        self.dense = nn.Linear(opt.bert_dim, opt.polarities_dim)
Exemplo n.º 22
0
    def __init__(self, bert, opt):
        super(SDGCN, self).__init__()
        self.opt = opt
        self.bert = bert
        self.squeeze_embedding = SqueezeEmbedding()
        self.dropout = nn.Dropout(opt.dropout)

        self.lstm_context = DynamicLSTM(opt.bert_dim,
                                        opt.hidden_dim,
                                        num_layers=1,
                                        batch_first=True)
        self.lstm_aspect = DynamicLSTM(opt.bert_dim,
                                       opt.hidden_dim,
                                       num_layers=1,
                                       batch_first=True)

        self.attention_aspect = Bilinear_Attention(opt.hidden_dim)
        self.attention_context = Bilinear_Attention(opt.hidden_dim)

        self.dense = nn.Linear(opt.hidden_dim * 2, opt.polarities_dim)
Exemplo n.º 23
0
 def __init__(self, bert_or_embedding_matrix, opt):
     super(BERT_ALBERT_GCN, self).__init__()
     self.opt = opt
     bert = bert_or_embedding_matrix
     self.context_bert = bert
     self.squeeze_embedding = SqueezeEmbedding()
     if not opt.no_gnn:
         self.gcn = GNN_RELU_DIFF(opt.bert_dim,
                                  opt.bert_dim,
                                  step=opt.gnn_step,
                                  drop=0.0)
         self.encoder_fn = nn.Linear(opt.bert_dim, opt.bert_dim)
         self.norm = LayerNorm(opt.bert_dim)
     if not opt.no_sa:
         self.bert_self_att = SelfAttention(bert.config, opt)
     if self.opt.pool_tp == 'bert_pool':
         self.bert_pooler = BertPooler(bert.config)
     elif self.opt.pool_tp == 'max_pool':
         self.pool_fc = nn.Linear(opt.bert_dim, opt.bert_dim)
     self.fc = nn.Linear(opt.bert_dim, 3)
Exemplo n.º 24
0
    def __init__(self, bert, opt):
        super(AEN_BERT_HAT, self).__init__()
        self.opt = opt
        self.bert = bert
        self.squeeze_embedding = SqueezeEmbedding()
        self.dropout = nn.Dropout(opt.dropout)

        self.attn_k = Attention(opt.bert_dim,
                                out_dim=opt.hidden_dim,
                                n_head=8,
                                score_function='mlp',
                                dropout=opt.dropout)
        self.attn_q = Attention(opt.bert_dim,
                                out_dim=opt.hidden_dim,
                                n_head=8,
                                score_function='mlp',
                                dropout=opt.dropout)
        self.ffn_c = PositionwiseFeedForward(opt.hidden_dim,
                                             dropout=opt.dropout)
        self.ffn_t = PositionwiseFeedForward(opt.hidden_dim,
                                             dropout=opt.dropout)

        self.attn_s1 = Attention(opt.hidden_dim,
                                 n_head=8,
                                 score_function='mlp',
                                 dropout=opt.dropout)

        self.gate = torch.nn.Sigmoid()

        self.ec0 = torch.nn.Embedding(self.opt.taskcla, opt.hidden_dim)
        self.ec1 = torch.nn.Embedding(self.opt.taskcla, opt.hidden_dim)
        self.ec2 = torch.nn.Embedding(self.opt.taskcla, opt.hidden_dim)
        self.ec3 = torch.nn.Embedding(self.opt.taskcla, opt.hidden_dim)
        self.ec4 = torch.nn.Embedding(self.opt.taskcla, opt.hidden_dim)

        self.hat = True

        self.last = torch.nn.ModuleList()
        for t in range(self.opt.taskcla):
            self.last.append(nn.Linear(opt.hidden_dim * 3, opt.polarities_dim))
Exemplo n.º 25
0
    def __init__(self, embedding_matrix, opt):
        super(AEN_GloVe, self).__init__()
        self.opt = opt
        self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
        self.squeeze_embedding = SqueezeEmbedding()

        self.attn_k = Attention(opt.embed_dim, out_dim=opt.hidden_dim, n_head=8, score_function='mlp', dropout=opt.dropout)
        self.attn_q = Attention(opt.embed_dim, out_dim=opt.hidden_dim, n_head=8, score_function='mlp', dropout=opt.dropout)
        self.ffn_c = PositionwiseFeedForward(opt.hidden_dim, dropout=opt.dropout)
        self.ffn_t = PositionwiseFeedForward(opt.hidden_dim, dropout=opt.dropout)

        self.attn_s1 = Attention(opt.hidden_dim, n_head=8, score_function='mlp', dropout=opt.dropout)
        self.dense = nn.Linear(opt.hidden_dim*3, opt.polarities_dim)


        self.cls_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
        self.cls_asp_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
        self.dense_total = nn.Linear(3*opt.hidden_dim*2, opt.polarities_dim)
        self.h_h_proj = nn.Linear(4*opt.hidden_dim,3*opt.hidden_dim)
        self.cls_dense = nn.Linear(3*opt.hidden_dim,3)
        
        self.conv = nn.Conv1d(300,2*opt.hidden_dim,3)
Exemplo n.º 26
0
    def __init__(self, embedding_matrix, opt):
        super(LifelongABSA, self).__init__()
        self.opt = opt
        self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
        self.squeeze_embedding = SqueezeEmbedding(batch_first=True)
        self.attention = Attention(opt.embed_dim, score_function='mlp')
        #self.x_linear = nn.Linear(opt.embed_dim, opt.embed_dim)
        # self.dense = nn.Linear(opt.embed_dim, opt.polarities_dim)
        self.polarities_dim = opt.polarities_dim
        self.last = torch.nn.ModuleList()
        self.hat = False
        self.word_in_domain = torch.zeros(1, dtype=torch.int64)

        self.context_attention = dict()
        self.aspect_context_attention = dict()
        self.currenttask = -1
        self.currentSentence = 0

        for t in range(self.opt.taskcla):
            self.last.append(nn.Linear(opt.polarities_dim, opt.polarities_dim))
            self.context_attention[t] = dict()
            self.aspect_context_attention[t] = dict()

        #Parameter(torch.Tensor(out_features, in_features))
        self.W =   torch.nn.Parameter(torch.randn(opt.embed_dim, opt.polarities_dim ))

        self.opt.initializer(self.W)

        #where the negative, neutral, and positive classes are denoted
        # as [1, 0 ,0], [0, 1 ,0] and [0, 0 ,1] respectively

        self.An = torch.tensor( np.array([[1,-1,-1]]), dtype=torch.float32, requires_grad=False )
        self.Bn = torch.tensor(np.array([[1, 0, 0]]), dtype=torch.float32, requires_grad=False)

        self.Ap = torch.tensor(np.array([[-1, -1, 1]]), dtype=torch.float32, requires_grad=False)
        self.Bp = torch.tensor(np.array([[0, 0, 1]]), dtype=torch.float32, requires_grad=False)

        self.L2MN = False
Exemplo n.º 27
0
    def __init__(self, embedding_matrix, opt):
        super(AEN, self).__init__()
        self.opt = opt
        self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
        self.squeeze_embedding = SqueezeEmbedding()

        self.attn_k = Attention(opt.embed_dim, out_dim=opt.hidden_dim, n_head=8, score_function='mlp', dropout=0.1)
        self.attn_q = Attention(opt.embed_dim, out_dim=opt.hidden_dim, n_head=8, score_function='mlp', dropout=0.1)

        ###
        D = opt.hidden_dim
        Co = 100
        Ks = [3, 4, 5]
        C = 3
        self.convs1 = nn.ModuleList([nn.Conv1d(D, Co, K) for K in Ks])
        self.convs2 = nn.ModuleList([nn.Conv1d(D, Co, K) for K in Ks])
        self.convs3 = nn.ModuleList([nn.Conv1d(D, Co, K, padding=K - 2) for K in [3]])

        # self.convs3 = nn.Conv1d(D, 300, 3, padding=1), smaller is better
        self.dropout = nn.Dropout(opt.dropout)

        self.fc1 = nn.Linear(len(Ks) * Co, C)
        self.fc_aspect = nn.Linear(100, Co)
Exemplo n.º 28
0
 def __init__(self, config, opt, _type='c'):
     super(Cabasc, self).__init__()
     self.opt = opt
     self.type = _type
     self.bert = BertModel(config)
     self.squeeze_embedding = SqueezeEmbedding(batch_first=True)
     self.linear1 = nn.Linear(3 * opt.embed_dim, opt.embed_dim)
     self.linear2 = nn.Linear(opt.embed_dim, 1, bias=False)
     self.mlp = nn.Linear(opt.embed_dim, opt.embed_dim)
     self.dense = nn.Linear(opt.embed_dim, opt.output_dim)
     # context attention layer
     self.rnn_l = DynamicLSTM(opt.embed_dim,
                              opt.hidden_dim,
                              num_layers=1,
                              batch_first=True,
                              rnn_type='GRU')
     self.rnn_r = DynamicLSTM(opt.embed_dim,
                              opt.hidden_dim,
                              num_layers=1,
                              batch_first=True,
                              rnn_type='GRU')
     self.mlp_l = nn.Linear(opt.hidden_dim, 1)
     self.mlp_r = nn.Linear(opt.hidden_dim, 1)
Exemplo n.º 29
0
    def __init__(self, bert, opt):
        super(AEN_SIMPLE, self).__init__()
        self.opt = opt
        self.bert = bert
        self.squeeze_embedding = SqueezeEmbedding()
        self.dropout = nn.Dropout(opt.dropout)

        self.attn_q = Attention(opt.bert_dim,
                                out_dim=opt.hidden_dim,
                                n_head=opt.mha_heads,
                                score_function='mlp',
                                dropout=opt.dropout)
        self.ffn_t = PositionwiseFeedForward(opt.hidden_dim,
                                             dropout=opt.dropout)

        self.lstm = nn.LSTM(opt.hidden_dim,
                            opt.lstm_hid,
                            opt.lstm_layer,
                            batch_first=True,
                            bidirectional=opt.lstm_bidir)

        self.dense = nn.Linear(opt.lstm_hid * (2 if opt.lstm_bidir else 1),
                               opt.polarities_dim)
Exemplo n.º 30
0
 def __init__(self, embedding_matrix, opt, _type='c'):
     super(Cabasc, self).__init__()
     self.opt = opt
     self.type = _type
     self.embed = nn.Embedding.from_pretrained(
         torch.tensor(embedding_matrix, dtype=torch.float))
     self.squeeze_embedding = SqueezeEmbedding(batch_first=True)
     self.linear1 = nn.Linear(3 * opt.embed_dim, opt.embed_dim)
     self.linear2 = nn.Linear(opt.embed_dim, 1, bias=False)
     self.mlp = nn.Linear(opt.embed_dim, opt.embed_dim)
     self.dense = nn.Linear(opt.embed_dim, opt.polarities_dim)
     # context attention layer
     self.rnn_l = DynamicLSTM(opt.embed_dim,
                              opt.hidden_dim,
                              num_layers=1,
                              batch_first=True,
                              rnn_type='GRU')
     self.rnn_r = DynamicLSTM(opt.embed_dim,
                              opt.hidden_dim,
                              num_layers=1,
                              batch_first=True,
                              rnn_type='GRU')
     self.mlp_l = nn.Linear(opt.hidden_dim, 1)
     self.mlp_r = nn.Linear(opt.hidden_dim, 1)