예제 #1
0
 def __init__(self, config, pretrained_word_embedding):
     super(NewsEncoder, self).__init__()
     self.config = config
     word_embedding = nn.Embedding.from_pretrained(
         pretrained_word_embedding,
         freeze=config.freeze_word_embeddings,
         padding_idx=0)
     category_embedding = nn.Embedding(config.num_categories,
                                       config.category_embedding_dim,
                                       padding_idx=0)
     self.title_encoder = TimeDistributed(TextEncoder(
         word_embedding, config.word_embedding_dim, config.num_filters,
         config.window_size, config.query_vector_dim,
         config.dropout_probability),
                                          batch_first=True)
     self.abstract_encoder = TimeDistributed(TextEncoder(
         word_embedding, config.word_embedding_dim, config.num_filters,
         config.window_size, config.query_vector_dim,
         config.dropout_probability),
                                             batch_first=True)
     self.category_encoder = CategoryEncoder(category_embedding,
                                             config.category_embedding_dim,
                                             config.num_filters)
     self.sub_category_encoder = CategoryEncoder(
         category_embedding, config.category_embedding_dim,
         config.num_filters)
     self.final_attention = AdditiveAttention(config.query_vector_dim,
                                              config.num_filters)
예제 #2
0
 def __init__(self, config):
     super(UserEncoder, self).__init__()
     self.config = config
     self.mh_selfattention = nn.MultiheadAttention(
         config.word_embedding_dim, config.num_attention_heads)
     self.additive_attention = AdditiveAttention(config.query_vector_dim,
                                                 config.word_embedding_dim)
예제 #3
0
 def __init__(self, word_embedding, word_embedding_dim, num_filters,
              window_size, query_vector_dim, dropout_probability):
     super(TextEncoder, self).__init__()
     self.dropout_probability = dropout_probability
     self.word_embedding = word_embedding
     assert window_size >= 1 and window_size % 2 == 1
     self.CNN = nn.Conv1d(1,
                          num_filters, (window_size, word_embedding_dim),
                          padding=(int((window_size - 1) / 2), 0))
     self.additive_attention = AdditiveAttention(query_vector_dim,
                                                 num_filters)
예제 #4
0
 def __init__(self, config, pretrained_word_embedding):
     super(NewsEncoder, self).__init__()
     self.config = config
     self.word_embedding = nn.Embedding.from_pretrained(
         pretrained_word_embedding,
         freeze=config.freeze_word_embeddings,
         padding_idx=0)
     self.mh_selfattention = nn.MultiheadAttention(
         config.word_embedding_dim, config.num_attention_heads)
     self.additive_attention = AdditiveAttention(config.query_vector_dim,
                                                 config.word_embedding_dim)
예제 #5
0
 def __init__(self, config, pretrained_word_embedding):
     super(NewsEncoder, self).__init__()
     self.config = config
     self.word_embedding = nn.Embedding.from_pretrained(
         pretrained_word_embedding,
         freeze=config.freeze_word_embeddings,
         padding_idx=0)
     assert config.window_size >= 1 and config.window_size % 2 == 1
     self.title_CNN = nn.Conv1d(
         1,
         config.num_filters,
         (config.window_size, config.word_embedding_dim),
         padding=(int((config.window_size - 1) / 2), 0))
     self.title_attention = AdditiveAttention(config.query_vector_dim,
                                              config.num_filters)
예제 #6
0
 def __init__(self, config):
     super(UserEncoder, self).__init__()
     self.additive_attention = AdditiveAttention(config.query_vector_dim,
                                                 config.num_filters)