Exemple #1
0
 def __init__(self, config):
     self.vocab_size = int(config['vocabulary_size'])
     self.emb_size = int(config['embedding_dim'])
     self.rnn_hidden_size = int(config['rnn_hidden_size'])
     self.hidden_size = int(config['hidden_size'])
     self.left_name, self.seq_len1 = config['left_slots'][0]
     self.right_name, self.seq_len2 = config['right_slots'][0]
     self.task_mode = config['training_mode']
     self.emb_layer = layers.EmbeddingEnhancedLayer(self.vocab_size,
                                                    self.emb_size,
                                                    zero_pad=True,
                                                    scale=False)
     self.rnn = layers.LSTMLayer(self.rnn_hidden_size)
     self.extract = layers.ExtractLastLayer()
     if self.task_mode == "pointwise":
         self.n_class = int(config['n_class'])
         self.relu_layer = layers.ReluLayer()
         self.fc1_layer = layers.FCLayer(self.rnn_hidden_size * 2,
                                         self.hidden_size)
         self.fc2_layer = layers.FCLayer(self.hidden_size, self.n_class)
     elif self.task_mode == "pairwise":
         self.fc1_layer = layers.FCLayer(self.rnn_hidden_size * 1,
                                         self.hidden_size)
         self.cos_layer = layers.CosineLayer()
     else:
         logging.error("training mode not supported")
Exemple #2
0
 def __init__(self, config):
     self.vocab_size = int(config['vocabulary_size'])
     self.emb_size = int(config['embedding_dim'])
     self.kernel_size = int(config['num_filters'])
     self.win_size1 = int(config['window_size_left'])
     self.win_size2 = int(config['window_size_right'])
     self.dpool_size1 = int(config['dpool_size_left'])
     self.dpool_size2 = int(config['dpool_size_right'])
     self.hidden_size = int(config['hidden_size'])
     self.left_name, self.seq_len1 = config['left_slots'][0]
     self.right_name, self.seq_len2 = config['right_slots'][0]
     self.emb_layer = layers.EmbeddingEnhancedLayer(self.vocab_size,
                                                    self.emb_size,
                                                    zero_pad=True,
                                                    scale=False)
     self.cnn_layer = layers.CNNDynamicPoolingLayer(
         self.seq_len1, self.seq_len2, self.win_size1, self.win_size2,
         self.dpool_size1, self.dpool_size2, self.kernel_size)
     self.relu_layer = layers.ReluLayer()
     self.tanh_layer = layers.TanhLayer()
     if 'match_mask' in config and config['match_mask'] != 0:
         self.match_mask = True
     else:
         self.match_mask = False
     self.fc1_layer = layers.FCLayer(
         self.kernel_size * self.dpool_size1 * self.dpool_size2,
         self.hidden_size)
     self.n_class = int(config['n_class'])
     self.fc2_layer = layers.FCLayer(self.hidden_size, self.n_class)
 def __init__(self, config):
     self.vocab_size = int(config['vocabulary_size'])
     self.emb_size = int(config['embedding_dim'])
     self.kernel_size = int(config['num_filters'])
     self.win_size1 = int(config['window_size_left'])
     self.win_size2 = int(config['window_size_right'])
     self.dpool_size1 = int(config['dpool_size_left'])
     self.dpool_size2 = int(config['dpool_size_right'])
     self.hidden_size = int(config['hidden_size'])
     self.left_name, self.seq_len1 = config['left_slots'][0]
     self.right_name, self.seq_len2 = config['right_slots'][0]
     self.task_mode = config['training_mode']
     self.emb_layer = layers.EmbeddingEnhancedLayer(self.vocab_size,
                                                    self.emb_size,
                                                    zero_pad=True,
                                                    scale=False)  #加强版本的词向量
     self.cnn_layer = layers.CNNDynamicPoolingLayer(
         self.seq_len1, self.seq_len2, self.win_size1, self.win_size2,
         self.dpool_size1, self.dpool_size2, self.kernel_size)
     self.relu_layer = layers.ReluLayer()
     self.tanh_layer = layers.TanhLayer()
     if 'match_mask' in config and config['match_mask'] != 0:
         self.match_mask = True  #是否考虑填充区域
     else:
         self.match_mask = False
     self.fc1_layer = layers.FCLayer(
         self.kernel_size * self.dpool_size1 * self.dpool_size2,
         self.hidden_size)
     if self.task_mode == "pointwise":
         self.n_class = int(config['n_class'])
         self.fc2_layer = layers.FCLayer(self.hidden_size, self.n_class)
     elif self.task_mode == "pairwise":
         self.fc2_layer = layers.FCLayer(self.hidden_size, 1)
     else:
         logging.error("training mode not supported")
Exemple #4
0
 def __init__(self, config):
     self.vocab_size = int(config['vocabulary_size'])
     self.emb_size = int(config['embedding_dim'])
     self.kernel_size = int(config['num_filters'])
     self.win_size = int(config['window_size'])
     self.hidden_size = int(config['hidden_size'])
     self.left_name, self.seq_len = config['left_slots'][0]  #左文本长度
     self.right_name, self.seq_len = config['right_slots'][0]  #右文本长度
     self.task_mode = config['training_mode']
     self.emb_layer = layers.EmbeddingLayer(self.vocab_size,
                                            self.emb_size)  #embedding矩阵
     self.cnn_layer = layers.CNNLayer(self.seq_len, self.emb_size,
                                      self.win_size,
                                      self.kernel_size)  #卷积池化层
     self.relu_layer = layers.ReluLayer()  #激活函数层
     self.concat_layer = layers.ConcatLayer()  #concat连接层
     if self.task_mode == "pointwise":
         self.n_class = int(config['n_class'])
         self.fc1_layer = layers.FCLayer(
             2 * self.kernel_size,
             self.hidden_size)  #全连接层1 2*self.kernel_size hidden_size
         self.fc2_layer = layers.FCLayer(
             self.hidden_size, self.n_class)  #全连接层2 hidden_size * n_class
     elif self.task_mode == "pairwise":
         self.fc1_layer = layers.FCLayer(self.kernel_size, self.hidden_size)
         self.cos_layer = layers.CosineLayer()
     else:
         logging.error("training mode not supported")
Exemple #5
0
 def __init__(self, config):
     self.vocab_size = int(config['vocabulary_size'])
     self.emb_size = int(config['embedding_dim'])
     self.kernel_size = int(config['num_filters'])
     self.win_size = int(config['window_size'])
     self.hidden_size = int(config['hidden_size'])
     self.left_name, self.seq_len = config['left_slots'][0]
     self.right_name, self.seq_len = config['right_slots'][0]
     self.emb_layer = layers.EmbeddingLayer(self.vocab_size, self.emb_size)
     self.cnn_layer = layers.CNNLayer(self.seq_len, self.emb_size,
                                      self.win_size, self.kernel_size)
     self.relu_layer = layers.ReluLayer()
     self.tanh_layer = layers.TanhLayer()
     self.concat_layer = layers.ConcatLayer()
     self.fc1_layer = layers.FCLayer(self.kernel_size, self.hidden_size)
     self.n_class = int(config['n_class'])
     self.fc2_layer = layers.FCLayer(2 * self.hidden_size, self.n_class)
Exemple #6
0
 def __init__(self, config):
     self.vocab_size = int(config['vocabulary_size'])
     self.emb_size = int(config['embedding_dim'])
     self.bow_size = int(config["bow_size"])
     self.left_name, self.seq_len = config["left_slots"][0]
     self.right_name, self.seq_len = config["right_slots"][0]
     self.task_mode = config['training_mode']
     self.emb_layer = layers.EmbeddingLayer(self.vocab_size, self.emb_size)
     self.seq_pool_layer = layers.SequencePoolingLayer()#将词向量作简单的平均
     self.softsign_layer = layers.SoftsignLayer()#激活函数层
     if self.task_mode == "pointwise":
         self.n_class = int(config['n_class'])
         self.bow_layer = layers.FCLayer(self.emb_size * 2, self.bow_size)
         self.relu_layer = layers.ReluLayer()
         self.fc_layer = layers.FCLayer(self.bow_size, self.n_class)
     elif self.task_mode == "pairwise":
         self.bow_layer = layers.FCLayer(self.emb_size, self.bow_size)
         self.cos_layer = layers.CosineLayer()#余弦相似度计算
     else:
         logging.error("training mode not supported")