예제 #1
0
 def __init__(self, hidden_size, output_size, context_dim, embed, trained_aspect):
     super(align_WdeRnnEncoder, self).__init__()
     self.hidden_size = hidden_size
     self.blstm = nn.LSTM(hidden_size, 300, bidirectional=True, batch_first=True)
     self.embedded = nn.Embedding.from_pretrained(embed)
     self.aspect_embed = nn.Embedding.from_pretrained(trained_aspect)
     self.tanh = nn.Tanh()
     self.hidden_layer = nn.Linear(hidden_size * 2, hidden_size)
     self.context_input_ = nn.Linear(600, 50)
     self.embedding_layers = nn.Linear(0 + hidden_size, output_size)
     self.classifier_layer = nn.Linear(output_size, 2)
     self.softmax = nn.Softmax(dim=2)
     # self.slf_attention = attention.MultiHeadAttention(600, 3)
     # self.slf_attention = attention.MultiHeadAttentionDotProduct(3, 600, 300, 300, 0.01)
     # self.Position_wise = attention.PositionwiseFeedForward(600, 600, 0.01)
     self.min_context = nn.Linear(300, 50)
     self.attention = attention.NormalAttention(600, 50, 50)
     self.gate = Gate.Gate(300, 50, 50, 300)
     self.dropout = nn.Dropout(config.dropout)
예제 #2
0
 def __init__(self, hidden_size, output_size, context_dim, embed):
     super(AttentionEncoder, self).__init__()
     self.slf_attention = attention.MultiHeadAttentionDotProduct(3, 300, 300, 300)
     # self.slf_attention = attention.MultiHeadAttention(300, 300, 300, 3)
     self.hidden_size = hidden_size
     self.embedded = nn.Embedding.from_pretrained(embed)
     self.tanh = nn.Tanh()
     self.hidden_layer = nn.Linear(hidden_size, hidden_size)
     self.context_input_ = nn.Linear(300, 50)
     self.embedding_layers = nn.Linear(0 + hidden_size, output_size)
     self.classifier_layer = nn.Linear(output_size, 2)
     self.softmax = nn.Softmax(dim=2)
     # self.slf_attention = attention.MultiHeadAttention(600, 3)
     self.Position_wise = attention.PositionwiseFeedForward(300, 300)
     self.attention = attention.NormalAttention(300, 50, 50)
     self.gate = Gate.Gate(300, 50, 50, 300)
     self.dropout = nn.Dropout(config.dropout)
     self.position_enc = nn.Embedding.from_pretrained(
         get_sinusoid_encoding_table(301, 300, padding_idx=200),
         freeze=True)
예제 #3
0
    def __init__(self, hidden_dim, output_size, context_dim, embed):
        super(WdeRnnEncoderFix, self).__init__()
        self.hidden_dim = hidden_dim
        self.embed_dim = config.embed_dim
        self.blstm = nn.LSTM(self.embed_dim,
                             self.hidden_dim,
                             bidirectional=True,
                             batch_first=True)
        self.embedded = nn.Embedding.from_pretrained(embed)
        # self.aspect_embed = nn.Embedding.from_pretrained(trained_aspect)
        self.tanh = nn.Tanh()
        self.hidden_layer = nn.Linear(hidden_dim * 2, hidden_dim)
        self.context_input_ = nn.Linear(600, 50)
        self.embedding_layers = nn.Linear(0 + hidden_dim, output_size)
        # self.slf_attention = attention.MultiHeadAttention(600, 3)
        # self.slf_attention = attention.MultiHeadAttentionDotProduct(3, 600, 300, 300, 0.01)
        # self.Position_wise = attention.PositionwiseFeedForward(600, 600, 0.01)
        self.attention = attention.NormalAttention(600, 50, 50)
        self.gate = Gate.Gate(300, 50, 50, 300)
        self.min_context = nn.Linear(300, 50)

        self.aspect_mean = AspectMean(config.maxlen, if_expand=False)