Exemple #1
0
 def __init__(self, config):
     self.vocab_size = int(config['vocabulary_size'])
     self.emb_size = int(config['embedding_dim'])
     self.kernel_size = int(config['num_filters'])
     self.win_size1 = int(config['window_size_left'])
     self.win_size2 = int(config['window_size_right'])
     self.dpool_size1 = int(config['dpool_size_left'])
     self.dpool_size2 = int(config['dpool_size_right'])
     self.hidden_size = int(config['hidden_size'])
     self.left_name, self.seq_len1 = config['left_slots'][0]
     self.right_name, self.seq_len2 = config['right_slots'][0]
     self.emb_layer = layers.EmbeddingEnhancedLayer(self.vocab_size,
                                                    self.emb_size,
                                                    zero_pad=True,
                                                    scale=False)
     self.cnn_layer = layers.CNNDynamicPoolingLayer(
         self.seq_len1, self.seq_len2, self.win_size1, self.win_size2,
         self.dpool_size1, self.dpool_size2, self.kernel_size)
     self.relu_layer = layers.ReluLayer()
     self.tanh_layer = layers.TanhLayer()
     if 'match_mask' in config and config['match_mask'] != 0:
         self.match_mask = True
     else:
         self.match_mask = False
     self.fc1_layer = layers.FCLayer(
         self.kernel_size * self.dpool_size1 * self.dpool_size2,
         self.hidden_size)
     self.n_class = int(config['n_class'])
     self.fc2_layer = layers.FCLayer(self.hidden_size, self.n_class)
Exemple #2
0
 def __init__(self, config):
     self.vocab_size = int(config['vocabulary_size'])
     self.emb_size = int(config['embedding_dim'])
     self.k_max_num = int(config['k_max_num'])
     self.hidden_size = int(config['hidden_size'])
     self.left_name, self.seq_len1 = config['left_slots'][0]
     self.right_name, self.seq_len2 = config['right_slots'][0]
     self.task_mode = config['training_mode']
     self.emb_layer = layers.EmbeddingEnhancedLayer(self.vocab_size,
                                                    self.emb_size,
                                                    zero_pad=True,
                                                    scale=False)
     self.fw_cell = tf.nn.rnn_cell.LSTMCell(num_units=self.hidden_size,
                                            state_is_tuple=True)
     self.bw_cell = tf.nn.rnn_cell.LSTMCell(num_units=self.hidden_size,
                                            state_is_tuple=True)
     if 'match_mask' in config and config['match_mask'] != 0:
         self.match_mask = True
     else:
         self.match_mask = False
     if self.task_mode == "pointwise":
         self.n_class = int(config['n_class'])
         self.fc2_layer = layers.FCLayer(self.k_max_num, self.n_class)
     elif self.task_mode == "pairwise":
         self.fc2_layer = layers.FCLayer(self.k_max_num, 1)
     else:
         logging.error("training mode not supported")
Exemple #3
0
 def __init__(self, config):
     self.vocab_size = int(config['vocabulary_size'])
     self.emb_size = int(config['embedding_dim'])
     self.rnn_hidden_size = int(config['rnn_hidden_size'])
     self.hidden_size = int(config['hidden_size'])
     self.left_name, self.seq_len1 = config['left_slots'][0]
     self.right_name, self.seq_len2 = config['right_slots'][0]
     self.task_mode = config['training_mode']
     self.emb_layer = layers.EmbeddingEnhancedLayer(self.vocab_size,
                                                    self.emb_size,
                                                    zero_pad=True,
                                                    scale=False)
     self.rnn = layers.LSTMLayer(self.rnn_hidden_size)
     self.extract = layers.ExtractLastLayer()
     if self.task_mode == "pointwise":
         self.n_class = int(config['n_class'])
         self.fc1_layer = layers.FCLayer(self.rnn_hidden_size * 2,
                                         self.hidden_size)
         self.fc2_layer = layers.FCLayer(self.hidden_size, self.n_class)
     elif self.task_mode == "pairwise":
         self.fc1_layer = layers.FCLayer(self.rnn_hidden_size * 1,
                                         self.hidden_size)
         self.cos_layer = layers.CosineLayer()
     else:
         logging.error("training mode not supported")
 def __init__(self, config):
     self.vocab_size = int(config['vocabulary_size'])
     self.emb_size = int(config['embedding_dim'])
     self.kernel_size = int(config['num_filters'])
     self.win_size1 = int(config['window_size_left'])
     self.win_size2 = int(config['window_size_right'])
     self.dpool_size1 = int(config['dpool_size_left'])
     self.dpool_size2 = int(config['dpool_size_right'])
     self.hidden_size = int(config['hidden_size'])
     self.left_name, self.seq_len1 = config['left_slots'][0]
     self.right_name, self.seq_len2 = config['right_slots'][0]
     self.task_mode = config['training_mode']
     self.emb_layer = layers.EmbeddingEnhancedLayer(self.vocab_size,
                                                    self.emb_size,
                                                    zero_pad=True,
                                                    scale=False)  #加强版本的词向量
     self.cnn_layer = layers.CNNDynamicPoolingLayer(
         self.seq_len1, self.seq_len2, self.win_size1, self.win_size2,
         self.dpool_size1, self.dpool_size2, self.kernel_size)
     self.relu_layer = layers.ReluLayer()
     self.tanh_layer = layers.TanhLayer()
     if 'match_mask' in config and config['match_mask'] != 0:
         self.match_mask = True  #是否考虑填充区域
     else:
         self.match_mask = False
     self.fc1_layer = layers.FCLayer(
         self.kernel_size * self.dpool_size1 * self.dpool_size2,
         self.hidden_size)
     if self.task_mode == "pointwise":
         self.n_class = int(config['n_class'])
         self.fc2_layer = layers.FCLayer(self.hidden_size, self.n_class)
     elif self.task_mode == "pairwise":
         self.fc2_layer = layers.FCLayer(self.hidden_size, 1)
     else:
         logging.error("training mode not supported")
Exemple #5
0
 def __init__(self, config):
     self.vocab_size = int(config['vocabulary_size'])
     self.emb_size = int(config['embedding_dim'])
     self.kernel_size = int(config['num_filters'])
     self.win_size = int(config['window_size'])
     self.hidden_size = int(config['hidden_size'])
     self.left_name, self.seq_len = config['left_slots'][0]  #左文本长度
     self.right_name, self.seq_len = config['right_slots'][0]  #右文本长度
     self.task_mode = config['training_mode']
     self.emb_layer = layers.EmbeddingLayer(self.vocab_size,
                                            self.emb_size)  #embedding矩阵
     self.cnn_layer = layers.CNNLayer(self.seq_len, self.emb_size,
                                      self.win_size,
                                      self.kernel_size)  #卷积池化层
     self.relu_layer = layers.ReluLayer()  #激活函数层
     self.concat_layer = layers.ConcatLayer()  #concat连接层
     if self.task_mode == "pointwise":
         self.n_class = int(config['n_class'])
         self.fc1_layer = layers.FCLayer(
             2 * self.kernel_size,
             self.hidden_size)  #全连接层1 2*self.kernel_size hidden_size
         self.fc2_layer = layers.FCLayer(
             self.hidden_size, self.n_class)  #全连接层2 hidden_size * n_class
     elif self.task_mode == "pairwise":
         self.fc1_layer = layers.FCLayer(self.kernel_size, self.hidden_size)
         self.cos_layer = layers.CosineLayer()
     else:
         logging.error("training mode not supported")
Exemple #6
0
 def __init__(self, config):
     self.vocab_size = int(config['vocabulary_size'])
     self.emb_size = int(config['embedding_dim'])
     self.bow_size = int(config["bow_size"])
     self.hidden_size = int(config['hidden_size'])
     self.left_name, self.seq_len = config["left_slots"][0]
     self.right_name, self.seq_len = config["right_slots"][0]
     self.emb_layer = layers.EmbeddingLayer(self.vocab_size, self.emb_size)
     self.seq_pool_layer = layers.SequencePoolingLayer()
     self.softsign_layer = layers.SoftsignLayer()
     self.bow_layer = layers.FCLayer(self.emb_size, self.bow_size)
     self.n_class = int(config['n_class'])
     self.fc_layer = layers.FCLayer(2 * self.hidden_size, self.n_class)
Exemple #7
0
 def __init__(self, config):
     self.vocab_size = int(config['vocabulary_size'])
     self.emb_size = int(config['embedding_dim'])
     self.rnn_hidden_size = int(config['rnn_hidden_size'])
     self.hidden_size = int(config['hidden_size'])
     self.left_name, self.seq_len1 = config['left_slots'][0]
     self.right_name, self.seq_len2 = config['right_slots'][0]
     self.emb_layer = layers.EmbeddingEnhancedLayer(self.vocab_size,
                                                    self.emb_size,
                                                    zero_pad=True,
                                                    scale=False)
     self.rnn = layers.LSTMLayer(self.rnn_hidden_size)
     self.extract = layers.ExtractLastLayer()
     self.n_class = int(config['n_class'])
     self.fc1_layer = layers.FCLayer(self.rnn_hidden_size * 2,
                                     self.hidden_size)
     self.fc2_layer = layers.FCLayer(self.hidden_size, self.n_class)
Exemple #8
0
 def __init__(self, config):
     self.vocab_size = int(config['vocabulary_size'])
     self.emb_size = int(config['embedding_dim'])
     self.kernel_size = int(config['num_filters'])
     self.win_size = int(config['window_size'])
     self.hidden_size = int(config['hidden_size'])
     self.left_name, self.seq_len = config['left_slots'][0]
     self.right_name, self.seq_len = config['right_slots'][0]
     self.emb_layer = layers.EmbeddingLayer(self.vocab_size, self.emb_size)
     self.cnn_layer = layers.CNNLayer(self.seq_len, self.emb_size,
                                      self.win_size, self.kernel_size)
     self.relu_layer = layers.ReluLayer()
     self.tanh_layer = layers.TanhLayer()
     self.concat_layer = layers.ConcatLayer()
     self.fc1_layer = layers.FCLayer(self.kernel_size, self.hidden_size)
     self.n_class = int(config['n_class'])
     self.fc2_layer = layers.FCLayer(2 * self.hidden_size, self.n_class)
 def __init__(self, config):
     self.vocab_size = int(config['vocabulary_size'])
     self.emb_size = int(config['embedding_dim'])
     self.kernel_num = int(config['kernel_num'])
     self.left_name, self.seq_len1 = config['left_slots'][0]
     self.right_name, self.seq_len2 = config['right_slots'][0]
     self.lamb = float(config['lamb'])
     self.task_mode = config['training_mode']
     self.emb_layer = layers.EmbeddingLayer(self.vocab_size, self.emb_size)
     self.sim_mat_layer = layers.SimilarityMatrixLayer()
     self.kernel_pool_layer = layers.KernelPoolingLayer(self.kernel_num, self.lamb)
     self.tanh_layer = layers.TanhLayer()
     if self.task_mode == "pointwise":
         self.n_class = int(config['n_class'])
         self.fc_layer = layers.FCLayer(self.kernel_num, self.n_class)
     elif self.task_mode == "pairwise":
         self.fc_layer = layers.FCLayer(self.kernel_num, 1)
     else:
         logging.error("training mode not supported")
Exemple #10
0
 def __init__(self, config):
     self.vocab_size = int(config['vocabulary_size'])
     self.emb_size = int(config['embedding_dim'])
     self.bow_size = int(config["bow_size"])
     self.left_name, self.seq_len = config["left_slots"][0]
     self.right_name, self.seq_len = config["right_slots"][0]
     self.task_mode = config['training_mode']
     self.emb_layer = layers.EmbeddingLayer(self.vocab_size, self.emb_size)
     self.seq_pool_layer = layers.SequencePoolingLayer()#将词向量作简单的平均
     self.softsign_layer = layers.SoftsignLayer()#激活函数层
     if self.task_mode == "pointwise":
         self.n_class = int(config['n_class'])
         self.bow_layer = layers.FCLayer(self.emb_size * 2, self.bow_size)
         self.relu_layer = layers.ReluLayer()
         self.fc_layer = layers.FCLayer(self.bow_size, self.n_class)
     elif self.task_mode == "pairwise":
         self.bow_layer = layers.FCLayer(self.emb_size, self.bow_size)
         self.cos_layer = layers.CosineLayer()#余弦相似度计算
     else:
         logging.error("training mode not supported")
Exemple #11
0
 def __init__(self, config):
     self.vocab_size = int(config['vocabulary_size'])
     self.emb_size = int(config['embedding_dim'])
     self.kernel_num = int(config['num_filters'])
     self.win_size = int(config['window_size'])
     self.pool_size = int(config['pool_size'])
     self.hidden_size = int(config['hidden_size'])
     self.dropout_rate = float(config['dropout_rate'])
     self.drop_out = layers.DropoutLayer(drop_rate=self.dropout_rate)
     self.tanh_layer = layers.TanhLayer()
     self.left_name, self.seq_len = config['left_slots'][0]
     self.right_name, self.seq_len = config['right_slots'][0]
     self.emb_layer = layers.EmbeddingLayer(self.vocab_size, self.emb_size)
     self.cnn_layer = layers.CNNLayerConfig(seq_len=self.seq_len, emb_dim=self.emb_size, win_height=1,
                                            win_width=self.win_size, kernel_num=self.kernel_num, same_conv=True,
                                            activate=False, pooling=True, pool_size=self.pool_size)
     self.concat_layer = layers.ConcatLayer()
     self.fc1_layer = layers.FCLayer(self.kernel_size, self.hidden_size)
     self.n_class = int(config['n_class'])
     self.fc2_layer = layers.FCLayer(2 * self.hidden_size, self.n_class)
Exemple #12
0
 def __init__(self, config):
     self.vocab_size = int(config['vocabulary_size'])
     self.emb_size = int(config['embedding_dim'])
     self.kernel_num = int(config['kernel_num'])
     self.left_name, self.seq_len1 = config['left_slots'][0]
     self.right_name, self.seq_len2 = config['right_slots'][0]
     self.lamb = float(config['lamb'])
     self.emb_layer = layers.EmbeddingLayer(self.vocab_size, self.emb_size)
     self.sim_mat_layer = layers.SimilarityMatrixLayer()
     self.kernel_pool_layer = layers.KernelPoolingLayer(
         self.kernel_num, self.lamb)
     self.tanh_layer = layers.TanhLayer()
     self.n_class = int(config['n_class'])
     self.fc_layer = layers.FCLayer(self.kernel_num, self.n_class)
Exemple #13
0
    def __init__(self, config):
        self.vocab_size = int(config['vocabulary_size'])
        self.emb_size = int(config['embedding_dim'])
        self.dropout_rate = float(config['dropout_rate'])

        self.left_name, self.seq_len = config['left_slots'][0]
        self.right_name, self.seq_len = config['right_slots'][0]

        self.kernel_num1 = int(config['num_filters_1'])
        self.win_size1 = int(config['window_size_1'])
        self.kernel_num2 = int(config['num_filters_2'])
        self.win_size2 = int(config['window_size_2'])
        self.pool_size2 = int(config['pool_size2'])

        self.drop_out = layers.DropoutLayer(drop_rate=self.dropout_rate)
        self.emb_layer = layers.EmbeddingLayer(self.vocab_size, self.emb_size)
        self.cnn1_layer = layers.CNNLayerConfig(seq_len=self.seq_len,
                                                emb_dim=self.emb_size,
                                                win_height=self.win_size1,
                                                win_width=self.emb_size,
                                                kernel_num=self.kernel_num1,
                                                same_conv=False,
                                                pooling=False,
                                                activate=False)
        self.cnn_out_len = self.seq_len - self.win_size1 + 1
        self.cnn2_layer = layers.CNNLayerConfig(seq_len=self.seq_len,
                                                emb_dim=self.seq_len,
                                                win_height=self.win_size2,
                                                win_width=self.win_size2,
                                                kernel_num=self.kernel_num2,
                                                same_conv=True,
                                                pooling=True,
                                                activate=True,
                                                pool_size=self.pool_size2)
        self.cnn2_out_size = (self.cnn_out_len - self.pool_size2 + 1) * (
            self.cnn_out_len - self.pool_size2 + 1) * self.kernel_num2
        self.n_class = int(config['n_class'])
        self.fc_layer = layers.FCLayer(self.cnn2_out_size, self.n_class)