Beispiel #1
0
    def __init__(self, embedding_matrix, opt):
        super(ATAE_LSTM, self).__init__()
        self.opt = opt
        self.embed = nn.Embedding.from_pretrained(
            torch.tensor(embedding_matrix, dtype=torch.float))
        self.squeeze_embedding = SqueezeEmbedding()
        self.lstm = DynamicLSTM(opt.embed_dim * 2,
                                opt.hidden_dim,
                                num_layers=1,
                                batch_first=True)
        self.attention = NoQueryAttention(opt.hidden_dim + opt.embed_dim,
                                          score_function='bi_linear')
        self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)

        self.cls_lstm = DynamicLSTM(opt.embed_dim,
                                    opt.hidden_dim,
                                    num_layers=1,
                                    batch_first=True,
                                    bidirectional=True)
        self.cls_asp_lstm = DynamicLSTM(opt.embed_dim,
                                        opt.hidden_dim,
                                        num_layers=1,
                                        batch_first=True,
                                        bidirectional=True)
        self.h_h_proj = nn.Linear(4 * opt.hidden_dim, opt.hidden_dim)
        self.cls_dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)
        self.dense_total = nn.Linear(opt.hidden_dim + opt.hidden_dim,
                                     opt.polarities_dim)

        self.conv = nn.Conv1d(300, 2 * opt.hidden_dim, 3)
Beispiel #2
0
	def __init__(self, embedding_matrix, opt):
		super(ATAE_LSTM, self).__init__()
		self.opt = opt #Parser arguments assigned
		self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float)) #Initialized the word embedding matrix with the GloVe word embeddings for fine tuning
		self.squeeze_embedding = SqueezeEmbedding() #Instance of Squeeze Embedding Class from layers.squeeze_embedding created
		self.lstm = DynamicLSTM(opt.embed_dim*2, opt.hidden_dim, num_layers=1, batch_first=True) #Instance of DynamicLSTM Class from layers.dynamic_rnn created parameters passed, input_size=opt.embed_dim*2, hidden_size=opt.hidden_dim
		self.attention = NoQueryAttention(opt.hidden_dim+opt.embed_dim, score_function='bi_linear') #Instance of NoQueryAttention Class from layers.attention created parameters passes, embed_dim=opt.hidden_dim+opt.embed_dim and score function is 'bi_linear'
		self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim) #A linear layer is created with in_features=opt.hidden_dim, out_features=opt.polarities_dim
    def __init__(self, embedding_matrix, opt):
        super(Gated_CNN, self).__init__()
        self.embed = nn.Embedding.from_pretrained(
            torch.tensor(embedding_matrix, dtype=torch.float))

        self.in_layer = GLU_Block(opt.kernel_size, opt.embed_dim,
                                  opt.in_channels, opt.downbot)
        self.squeeze_embedding = SqueezeEmbedding()
        self.glu_layers = self.make_glu_layers(opt)
        self.attention = NoQueryAttention(opt.in_channels,
                                          score_function='bi_linear')
        self.dense = nn.Linear(opt.in_channels, opt.polarities_dim)
Beispiel #4
0
 def __init__(self, config, opt):
     super(ATAE_LSTM, self).__init__()
     self.opt = opt
     # self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
     self.bert = BertModel(config)
     self.squeeze_embedding = SqueezeEmbedding()
     self.lstm = DynamicLSTM(opt.embed_dim * 2,
                             opt.hidden_dim,
                             num_layers=1,
                             batch_first=True)
     self.attention = NoQueryAttention(opt.hidden_dim + opt.embed_dim,
                                       score_function='bi_linear')
     self.dense = nn.Linear(opt.hidden_dim, opt.output_dim)
    def __init__(self, bert, opt):
        super(ATAE_LSTM, self).__init__()
        self.opt = opt
        self.bert = bert
        self.squeeze_embedding = SqueezeEmbedding()
        self.dropout = nn.Dropout(opt.dropout)

        self.squeeze_embedding = SqueezeEmbedding()
        self.lstm = DynamicLSTM(opt.bert_dim,
                                opt.hidden_dim,
                                num_layers=1,
                                batch_first=True)
        self.attention = NoQueryAttention(opt.hidden_dim + opt.bert_dim,
                                          score_function='bi_linear')
        self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)
Beispiel #6
0
 def __init__(
         self,
         config='a class with num_attention_heads, hidden_size, attention_probs_dropout_prob, output_attentions',
         bert_dir='/mnt/sda1/bert/uncased_L-12_H-768_A-12',
         drop=0.0,
         head_type_vec=[1] * 3 + [0] * 9,
         L=80,
         win_size=5,
         bert_dim=768,
         num_class=3):
     super(VH_BERT_TSA, self).__init__()
     self.register_buffer('att_mask',
                          self.get_atten_mask(head_type_vec, L, win_size))
     self.text_bert = VHBertModel.from_pretrained(bert_dir)
     self.aspect_bert = BertModel.from_pretrained(bert_dir)
     self.aspect_self_att_pooler = SelfAttention(config, L)
     self.aspect_emphasize_att = NoQueryAttention(embed_dim=bert_dim * 2,
                                                  hidden_dim=bert_dim,
                                                  out_dim=bert_dim)
     self.reduce2_bert_dim_linear = nn.Linear(bert_dim * 2, bert_dim)
     self.reduce2_num_class_linear = nn.Linear(bert_dim, num_class)
     self.drop = drop
     self.L = L