예제 #1
0
 def __init__(self,
              args,
              embedding_matrix=None,
              aspect_embedding_matrix=None):
     super(TD_LSTM, self).__init__()
     self.args = args
     self.encoder = nn.Embedding.from_pretrained(
         torch.tensor(embedding_matrix, dtype=torch.float))
     self.encoder_aspect = nn.Embedding.from_pretrained(
         torch.tensor(aspect_embedding_matrix, dtype=torch.float))
     self.lstm_l = DynamicRNN(args.embed_dim,
                              args.hidden_dim,
                              num_layers=1,
                              batch_first=True,
                              dropout=args.dropout,
                              rnn_type="LSTM")
     self.lstm_r = DynamicRNN(args.embed_dim,
                              args.hidden_dim,
                              num_layers=1,
                              batch_first=True,
                              dropout=args.dropout,
                              rnn_type="LSTM")
     self.dense = nn.Linear(args.hidden_dim * 2, args.polarities_dim)
     self.dropout = nn.Dropout(args.dropout)
     self.softmax = nn.Softmax()
 def __init__(self,
              args,
              embedding_matrix=None,
              aspect_embedding_matrix=None):
     super(RAM, self).__init__()
     self.args = args
     self.encoder = nn.Embedding.from_pretrained(
         torch.tensor(embedding_matrix, dtype=torch.float))
     self.encoder_aspect = nn.Embedding.from_pretrained(
         torch.tensor(aspect_embedding_matrix, dtype=torch.float))
     self.bi_lstm_context = DynamicRNN(args.embed_dim,
                                       args.hidden_dim,
                                       num_layers=1,
                                       batch_first=True,
                                       bidirectional=True,
                                       dropout=args.dropout,
                                       rnn_type="LSTM")
     self.bi_lstm_aspect = DynamicRNN(args.embed_dim,
                                      args.hidden_dim,
                                      num_layers=1,
                                      batch_first=True,
                                      bidirectional=True,
                                      dropout=args.dropout,
                                      rnn_type="LSTM")
     self.attention = Attention(args.hidden_dim * 2, score_function='mlp')
     self.gru_cell = nn.GRUCell(args.hidden_dim * 2, args.hidden_dim * 2)
     self.dense = nn.Linear(args.hidden_dim * 2, args.polarities_dim)
     self.softmax = nn.Softmax()
     self.dropout = nn.Dropout(args.dropout)
 def __init__(self, args, embedding_matrix=None, aspect_embedding_matrix=None):
     super(HAN, self).__init__()
     self.position_dim = 100
     self.args = args
     self.encoder = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
     self.encoder_aspect = nn.Embedding.from_pretrained(torch.tensor(aspect_embedding_matrix, dtype=torch.float))
     self.squeeze_embedding = SqueezeEmbedding(batch_first=True)
     self.word_rnn = DynamicRNN(args.embed_dim + self.position_dim, args.hidden_dim, num_layers=1, batch_first=True,
                                dropout=args.dropout, bidirectional=True, rnn_type="LSTM")
     self.sentence_rnn = DynamicRNN(args.hidden_dim * 2 + self.position_dim, args.hidden_dim, num_layers=1,
                                    batch_first=True,
                                    dropout=args.dropout, bidirectional=True, rnn_type="LSTM")
     self.word_W = nn.Parameter(
         torch.Tensor(args.hidden_dim * 2 + self.position_dim, args.hidden_dim * 2 + self.position_dim))
     self.word_bias = nn.Parameter(torch.Tensor(args.hidden_dim * 2 + self.position_dim, 1))
     self.word_weight_proj = nn.Parameter(torch.Tensor(args.hidden_dim * 2 + self.position_dim, 1))
     self.word_attention = BasicAttention(hidden_dim=args.hidden_dim * 2, score_function="basic")
     self.sentence_W = nn.Parameter(
         torch.Tensor(args.hidden_dim * 2 + self.position_dim, args.hidden_dim * 2 + self.position_dim))
     self.sentence_bias = nn.Parameter(torch.Tensor(args.hidden_dim * 2 + self.position_dim, 1))
     self.sentence_weight_proj = nn.Parameter(torch.Tensor(args.hidden_dim * 2 + self.position_dim, 1))
     self.sentence_attention = BasicAttention(hidden_dim=args.hidden_dim * 2, score_function="basic")
     self.dense = nn.Linear(args.hidden_dim * 4, args.polarities_dim)
     self.word_position_embed = nn.Embedding(1005, self.position_dim)
     self.segment_position_embed = nn.Embedding(25, self.position_dim)
     self.softmax = nn.Softmax()
     self.dropout = nn.Dropout(args.dropout)
 def __init__(self,
              args,
              embedding_matrix=None,
              aspect_embedding_matrix=None):
     super(ATAE_BiGRU, self).__init__()
     self.args = args
     self.encoder = nn.Embedding.from_pretrained(
         torch.tensor(embedding_matrix, dtype=torch.float))
     self.encoder_aspect = nn.Embedding.from_pretrained(
         torch.tensor(aspect_embedding_matrix, dtype=torch.float))
     self.squeeze_embedding = SqueezeEmbedding(batch_first=True)
     self.gru = DynamicRNN(args.embed_dim,
                           args.hidden_dim,
                           num_layers=1,
                           batch_first=True,
                           dropout=args.dropout,
                           bidirectional=True,
                           rnn_type="GRU")
     # self.attention = BasicAttention(hidden_dim=args.hidden_dim*2, score_function="aspect")
     self.attention = AttentionAspect(embed_dim_k=args.hidden_dim * 2,
                                      embed_dim_q=args.embed_dim,
                                      score_function="mlp")
     self.dense = nn.Linear(args.hidden_dim * 2, args.polarities_dim)
     self.softmax = nn.Softmax()
     self.dropout = nn.Dropout(args.dropout)
 def __init__(self, args, embedding_matrix=None, aspect_embedding_matrix=None):
     super(AT_GRU, self).__init__()
     self.args = args
     self.encoder = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
     self.encoder_aspect = nn.Embedding.from_pretrained(torch.tensor(aspect_embedding_matrix, dtype=torch.float))
     self.gru = DynamicRNN(args.embed_dim, args.hidden_dim, num_layers=1, dropout=args.dropout, rnn_type="GRU")
     self.attention = BasicAttention(hidden_dim=args.hidden_dim)
     self.dense = nn.Linear(args.hidden_dim, args.polarities_dim)
     self.softmax = nn.Softmax()
     self.dropout = nn.Dropout(args.dropout)
 def __init__(self, args, embedding_matrix=None, aspect_embedding_matrix=None):
     super(CABASC, self).__init__()
     self.args = args
     self.encoder = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
     self.encoder_aspect = nn.Embedding.from_pretrained(torch.tensor(aspect_embedding_matrix, dtype=torch.float))
     self.squeeze_embedding = SqueezeEmbedding(batch_first=True)
     self.attention = Attention(args.embed_dim, score_function='mlp')  # content attention
     self.m_linear = nn.Linear(args.embed_dim, args.embed_dim, bias=False)
     self.mlp = nn.Linear(args.embed_dim, args.embed_dim)  # W4
     self.dense = nn.Linear(args.embed_dim, args.polarities_dim)  # W5
     # context attention layer
     self.rnn_l = DynamicRNN(args.embed_dim, args.hidden_dim, num_layers=1, batch_first=True, dropout=args.dropout,
                             rnn_type='GRU')
     self.rnn_r = DynamicRNN(args.embed_dim, args.hidden_dim, num_layers=1, batch_first=True, dropout=args.dropout,
                             rnn_type='GRU')
     self.mlp_l = nn.Linear(args.hidden_dim, 1)
     self.mlp_r = nn.Linear(args.hidden_dim, 1)
     self.dropout = nn.Dropout(args.dropout)
     self.softmax = nn.Softmax()
 def __init__(self, args, embedding_matrix, aspect_embedding_matrix=None, memory_weighter='no'):
     super(LCRS, self).__init__()
     self.args = args
     self.memory_weighter = memory_weighter
     self.encoder = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
     self.encoder_aspect = nn.Embedding.from_pretrained(torch.tensor(aspect_embedding_matrix, dtype=torch.float))
     self.blstm_l = DynamicRNN(args.embed_dim, args.hidden_dim, num_layers=1, batch_first=True, dropout=args.dropout,
                               rnn_type='LSTM')
     self.blstm_c = DynamicRNN(args.embed_dim, args.hidden_dim, num_layers=1, batch_first=True, dropout=args.dropout,
                               rnn_type='LSTM')
     self.blstm_r = DynamicRNN(args.embed_dim, args.hidden_dim, num_layers=1, batch_first=True, dropout=args.dropout,
                               rnn_type='LSTM')
     self.dense = nn.Linear(args.hidden_dim * 4, args.polarities_dim)
     # target to context attention
     self.t2c_l_attention = Attention(args.hidden_dim, score_function='bi_linear')
     self.t2c_r_attention = Attention(args.hidden_dim, score_function='bi_linear')
     # context to target attention
     self.c2t_l_attention = Attention(args.hidden_dim, score_function='bi_linear')
     self.c2t_r_attention = Attention(args.hidden_dim, score_function='bi_linear')
     self.softmax = nn.Softmax()
     self.dropout = nn.Dropout(args.dropout)