def __init__(self, bert, opt): super(AEN_BERT, self).__init__() self.opt = opt self.bert = bert self.squeeze_embedding = SqueezeEmbedding() self.dropout = nn.Dropout(opt.dropout) self.attn_k = Attention(opt.bert_dim, out_dim=opt.hidden_dim, n_head=8, score_function='mlp', dropout=opt.dropout) self.attn_q = Attention(opt.bert_dim, out_dim=opt.hidden_dim, n_head=8, score_function='mlp', dropout=opt.dropout) self.ffn_c = PositionwiseFeedForward(opt.hidden_dim, dropout=opt.dropout) self.ffn_t = PositionwiseFeedForward(opt.hidden_dim, dropout=opt.dropout) self.attn_s1 = Attention(opt.hidden_dim, n_head=8, score_function='mlp', dropout=opt.dropout) self.dense = nn.Linear(opt.hidden_dim * 3, opt.polarities_dim)
def __init__(self, embedding_matrix, opt): super(AEN_GloVe, self).__init__() self.opt = opt self.embed = nn.Embedding.from_pretrained( torch.tensor(embedding_matrix, dtype=torch.float)) self.squeeze_embedding = SqueezeEmbedding() self.attn_k = Attention(opt.embed_dim, out_dim=opt.hidden_dim, n_head=8, score_function='mlp', dropout=opt.dropout) self.attn_q = Attention(opt.embed_dim, out_dim=opt.hidden_dim, n_head=8, score_function='mlp', dropout=opt.dropout) self.ffn_c = PositionwiseFeedForward(opt.hidden_dim, dropout=opt.dropout) self.ffn_t = PositionwiseFeedForward(opt.hidden_dim, dropout=opt.dropout) self.attn_s1 = Attention(opt.hidden_dim, n_head=8, score_function='mlp', dropout=opt.dropout) self.dense = nn.Linear(opt.hidden_dim * 3, opt.polarities_dim)
def __init__(self, embedding_matrix, opt): super(IAN, self).__init__() self.opt = opt self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float)) self.lstm_context = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True) self.lstm_aspect = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True) self.attention_aspect = Attention(opt.hidden_dim, score_function='bi_linear') self.attention_context = Attention(opt.hidden_dim, score_function='bi_linear') self.dense = nn.Linear(opt.hidden_dim*2, opt.polarities_dim)
def __init__(self, args, word_embeddings: TextFieldEmbedder, vocab: Vocabulary, domain_info: bool = True) -> None: super().__init__(vocab) # parameters self.args = args self.word_embeddings = word_embeddings self.domain = domain_info # layers self.event_embedding = EventEmbedding(args, self.word_embeddings) self.event_type_embedding = EventTypeEmbedding(args, self.word_embeddings) self.lstm = LSTM(input_size=self.args.embedding_size, hidden_size=self.args.hidden_size) self.W_c = Linear(self.args.embedding_size, self.args.hidden_size, bias=False) self.W_e = Linear(self.args.hidden_size, self.args.hidden_size, bias=False) self.relu = ReLU() self.linear = Linear(self.args.hidden_size, self.args.embedding_size) self.attention = Attention(self.args.hidden_size, score_function='mlp') self.score = Score(self.args.embedding_size, self.args.embedding_size, threshold=self.args.threshold) # metrics self.accuracy = BooleanAccuracy() self.f1_score = F1Measure(positive_label=1) self.loss_function = BCELoss()
def __init__(self, args, word_embeddings: TextFieldEmbedder, vocab: Vocabulary) -> None: super().__init__(vocab) # parameters self.args = args self.word_embeddings = word_embeddings # gate self.W_z = nn.Linear(self.args.embedding_size, 1, bias=False) self.U_z = nn.Linear(self.args.embedding_size, 1, bias=False) self.W_r = nn.Linear(self.args.embedding_size, 1, bias=False) self.U_r = nn.Linear(self.args.embedding_size, 1, bias=False) self.W = nn.Linear(self.args.embedding_size, 1, bias=False) self.U = nn.Linear(self.args.embedding_size, 1, bias=False) # layers self.event_embedding = EventEmbedding(args, self.word_embeddings) self.attention = Attention(self.args.embedding_size, score_function='mlp') self.sigmoid = Sigmoid() self.tanh = Tanh() self.score = Score(self.args.embedding_size, self.args.embedding_size, threshold=self.args.threshold) # metrics self.accuracy = BooleanAccuracy() self.f1_score = F1Measure(positive_label=1) self.loss_function = BCELoss()
def __init__(self, embedding_matrix, opt): super(MemNet, self).__init__() self.opt = opt self.embed = nn.Embedding.from_pretrained( torch.tensor(embedding_matrix, dtype=torch.float)) self.squeeze_embedding = SqueezeEmbedding(batch_first=True) self.attention = Attention(opt.embed_dim, score_function='mlp') self.x_linear = nn.Linear(opt.embed_dim, opt.embed_dim) self.dense = nn.Linear(opt.embed_dim, opt.polarities_dim)