def __init__(self, embedding_matrix, opt): super(IAN, self).__init__() self.opt = opt self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float)) self.lstm_context = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True) self.lstm_aspect = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True) self.attention_aspect = Attention(opt.hidden_dim, score_function='bi_linear') self.attention_context = Attention(opt.hidden_dim, score_function='bi_linear') self.dense = nn.Linear(opt.hidden_dim*2, opt.polarities_dim)
def __init__(self, embedding_matrix, opt): super(TD_LSTM, self).__init__() self.embed = nn.Embedding.from_pretrained( torch.tensor(embedding_matrix, dtype=torch.float)) self.lstm_l = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True) self.lstm_r = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True) self.dense = nn.Linear(opt.hidden_dim * 2, opt.polarities_dim)
def __init__(self, embedding_matrix, opt): super(AOA, self).__init__() self.opt = opt self.embed = nn.Embedding.from_pretrained( torch.tensor(embedding_matrix, dtype=torch.float)) self.ctx_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True) self.asp_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True) self.dense = nn.Linear(2 * opt.hidden_dim, opt.polarities_dim)
def __init__(self, embedding_matrix, opt): super(ATAE_LSTM, self).__init__() self.opt = opt self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float)) self.squeeze_embedding = SqueezeEmbedding() self.lstm = DynamicLSTM(opt.embed_dim*2, opt.hidden_dim, num_layers=1, batch_first=True) self.attention = NoQueryAttention(opt.hidden_dim+opt.embed_dim, score_function='bi_linear') self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)
def __init__(self, embedding_matrix, opt): super(MGAN, self).__init__() self.opt = opt self.embed = nn.Embedding.from_pretrained( torch.tensor(embedding_matrix, dtype=torch.float)) self.ctx_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True) self.asp_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True) self.location = LocationEncoding(opt) self.w_a2c = nn.Parameter( torch.Tensor(2 * opt.hidden_dim, 2 * opt.hidden_dim)) self.w_c2a = nn.Parameter( torch.Tensor(2 * opt.hidden_dim, 2 * opt.hidden_dim)) self.alignment = AlignmentMatrix(opt) self.dense = nn.Linear(8 * opt.hidden_dim, opt.polarities_dim)
def __init__(self, embedding_matrix, opt): super(LCA_LSTM, self).__init__() self.opt = opt self.embed = nn.Embedding.from_pretrained( torch.tensor(embedding_matrix, dtype=torch.float)) self.lstm = DynamicLSTM(opt.embed_dim, opt.embed_dim, num_layers=1, batch_first=True) self.dense = nn.Linear(opt.embed_dim, opt.polarities_dim) self.lc_embed = nn.Embedding(2, opt.embed_dim) self.linear = nn.Linear(opt.embed_dim * 2, opt.embed_dim) self.classifier = nn.Linear(opt.embed_dim, 2)
def __init__(self, embedding_matrix, opt): super(RAM, self).__init__() self.opt = opt self.embed = nn.Embedding.from_pretrained( torch.tensor(embedding_matrix, dtype=torch.float)) self.bi_lstm_context = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True) self.att_linear = nn.Linear(opt.hidden_dim * 2 + 1 + opt.embed_dim * 2, 1) self.gru_cell = nn.GRUCell(opt.hidden_dim * 2 + 1, opt.embed_dim) self.dense = nn.Linear(opt.embed_dim, opt.polarities_dim)
def __init__(self, embedding_matrix, opt, _type='c'): super(Cabasc, self).__init__() self.opt = opt self.type = _type self.embed = nn.Embedding.from_pretrained( torch.tensor(embedding_matrix, dtype=torch.float)) self.squeeze_embedding = SqueezeEmbedding(batch_first=True) self.linear1 = nn.Linear(3 * opt.embed_dim, opt.embed_dim) self.linear2 = nn.Linear(opt.embed_dim, 1, bias=False) self.mlp = nn.Linear(opt.embed_dim, opt.embed_dim) self.dense = nn.Linear(opt.embed_dim, opt.polarities_dim) # context attention layer self.rnn_l = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, rnn_type='GRU') self.rnn_r = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, rnn_type='GRU') self.mlp_l = nn.Linear(opt.hidden_dim, 1) self.mlp_r = nn.Linear(opt.hidden_dim, 1)
def __init__(self, embedding_matrix, opt): super(TNet_LF, self).__init__() print("this is TNet_LF model") self.embed = nn.Embedding.from_pretrained( torch.tensor(embedding_matrix, dtype=torch.float)) self.position = Absolute_Position_Embedding(opt) self.opt = opt D = opt.embed_dim # 模型词向量维度 C = opt.polarities_dim # 分类数目 L = opt.max_seq_len HD = opt.hidden_dim self.lstm1 = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True) self.lstm2 = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True) self.convs3 = nn.Conv1d(2 * HD, 50, 3, padding=1) self.fc1 = nn.Linear(4 * HD, 2 * HD) self.fc = nn.Linear(50, C)