def __init__(self, config): self.vocab_size = int(config['vocabulary_size']) self.emb_size = int(config['embedding_dim']) self.kernel_size = int(config['num_filters']) self.win_size1 = int(config['window_size_left']) self.win_size2 = int(config['window_size_right']) self.dpool_size1 = int(config['dpool_size_left']) self.dpool_size2 = int(config['dpool_size_right']) self.hidden_size = int(config['hidden_size']) self.left_name, self.seq_len1 = config['left_slots'][0] self.right_name, self.seq_len2 = config['right_slots'][0] self.emb_layer = layers.EmbeddingEnhancedLayer(self.vocab_size, self.emb_size, zero_pad=True, scale=False) self.cnn_layer = layers.CNNDynamicPoolingLayer( self.seq_len1, self.seq_len2, self.win_size1, self.win_size2, self.dpool_size1, self.dpool_size2, self.kernel_size) self.relu_layer = layers.ReluLayer() self.tanh_layer = layers.TanhLayer() if 'match_mask' in config and config['match_mask'] != 0: self.match_mask = True else: self.match_mask = False self.fc1_layer = layers.FCLayer( self.kernel_size * self.dpool_size1 * self.dpool_size2, self.hidden_size) self.n_class = int(config['n_class']) self.fc2_layer = layers.FCLayer(self.hidden_size, self.n_class)
def __init__(self, config): self.vocab_size = int(config['vocabulary_size']) self.emb_size = int(config['embedding_dim']) self.kernel_size = int(config['num_filters']) self.win_size1 = int(config['window_size_left']) self.win_size2 = int(config['window_size_right']) self.dpool_size1 = int(config['dpool_size_left']) self.dpool_size2 = int(config['dpool_size_right']) self.hidden_size = int(config['hidden_size']) self.left_name, self.seq_len1 = config['left_slots'][0] self.right_name, self.seq_len2 = config['right_slots'][0] self.task_mode = config['training_mode'] self.emb_layer = layers.EmbeddingEnhancedLayer(self.vocab_size, self.emb_size, zero_pad=True, scale=False) #加强版本的词向量 self.cnn_layer = layers.CNNDynamicPoolingLayer( self.seq_len1, self.seq_len2, self.win_size1, self.win_size2, self.dpool_size1, self.dpool_size2, self.kernel_size) self.relu_layer = layers.ReluLayer() self.tanh_layer = layers.TanhLayer() if 'match_mask' in config and config['match_mask'] != 0: self.match_mask = True #是否考虑填充区域 else: self.match_mask = False self.fc1_layer = layers.FCLayer( self.kernel_size * self.dpool_size1 * self.dpool_size2, self.hidden_size) if self.task_mode == "pointwise": self.n_class = int(config['n_class']) self.fc2_layer = layers.FCLayer(self.hidden_size, self.n_class) elif self.task_mode == "pairwise": self.fc2_layer = layers.FCLayer(self.hidden_size, 1) else: logging.error("training mode not supported")
def __init__(self, config): self.vocab_size = int(config['vocabulary_size']) self.emb_size = int(config['embedding_dim']) self.kernel_num = int(config['kernel_num']) self.left_name, self.seq_len1 = config['left_slots'][0] self.right_name, self.seq_len2 = config['right_slots'][0] self.lamb = float(config['lamb']) self.emb_layer = layers.EmbeddingLayer(self.vocab_size, self.emb_size) self.sim_mat_layer = layers.SimilarityMatrixLayer() self.kernel_pool_layer = layers.KernelPoolingLayer( self.kernel_num, self.lamb) self.tanh_layer = layers.TanhLayer() self.n_class = int(config['n_class']) self.fc_layer = layers.FCLayer(self.kernel_num, self.n_class)
def __init__(self, config): self.vocab_size = int(config['vocabulary_size']) self.emb_size = int(config['embedding_dim']) self.kernel_size = int(config['num_filters']) self.win_size = int(config['window_size']) self.hidden_size = int(config['hidden_size']) self.left_name, self.seq_len = config['left_slots'][0] self.right_name, self.seq_len = config['right_slots'][0] self.emb_layer = layers.EmbeddingLayer(self.vocab_size, self.emb_size) self.cnn_layer = layers.CNNLayer(self.seq_len, self.emb_size, self.win_size, self.kernel_size) self.relu_layer = layers.ReluLayer() self.tanh_layer = layers.TanhLayer() self.concat_layer = layers.ConcatLayer() self.fc1_layer = layers.FCLayer(self.kernel_size, self.hidden_size) self.n_class = int(config['n_class']) self.fc2_layer = layers.FCLayer(2 * self.hidden_size, self.n_class)
def __init__(self, config): self.vocab_size = int(config['vocabulary_size']) self.emb_size = int(config['embedding_dim']) self.kernel_num = int(config['kernel_num']) self.left_name, self.seq_len1 = config['left_slots'][0] self.right_name, self.seq_len2 = config['right_slots'][0] self.lamb = float(config['lamb']) self.task_mode = config['training_mode'] self.emb_layer = layers.EmbeddingLayer(self.vocab_size, self.emb_size) self.sim_mat_layer = layers.SimilarityMatrixLayer() self.kernel_pool_layer = layers.KernelPoolingLayer(self.kernel_num, self.lamb) self.tanh_layer = layers.TanhLayer() if self.task_mode == "pointwise": self.n_class = int(config['n_class']) self.fc_layer = layers.FCLayer(self.kernel_num, self.n_class) elif self.task_mode == "pairwise": self.fc_layer = layers.FCLayer(self.kernel_num, 1) else: logging.error("training mode not supported")
def __init__(self, config): self.vocab_size = int(config['vocabulary_size']) self.emb_size = int(config['embedding_dim']) self.kernel_num = int(config['num_filters']) self.win_size = int(config['window_size']) self.pool_size = int(config['pool_size']) self.hidden_size = int(config['hidden_size']) self.dropout_rate = float(config['dropout_rate']) self.drop_out = layers.DropoutLayer(drop_rate=self.dropout_rate) self.tanh_layer = layers.TanhLayer() self.left_name, self.seq_len = config['left_slots'][0] self.right_name, self.seq_len = config['right_slots'][0] self.emb_layer = layers.EmbeddingLayer(self.vocab_size, self.emb_size) self.cnn_layer = layers.CNNLayerConfig(seq_len=self.seq_len, emb_dim=self.emb_size, win_height=1, win_width=self.win_size, kernel_num=self.kernel_num, same_conv=True, activate=False, pooling=True, pool_size=self.pool_size) self.concat_layer = layers.ConcatLayer() self.fc1_layer = layers.FCLayer(self.kernel_size, self.hidden_size) self.n_class = int(config['n_class']) self.fc2_layer = layers.FCLayer(2 * self.hidden_size, self.n_class)
def __init__(self, config): self.vocab_size = int(config['vocabulary_size']) self.emb_size = int(config['embedding_dim']) self.kernel_size = int(config['num_filters']) self.win_size = int(config['window_size']) self.hidden_size = int(config['hidden_size']) self.left_name, self.seq_len = config['left_slots'][0] self.right_name, self.seq_len = config['right_slots'][0] self.task_mode = config['training_mode'] self.emb_layer = layers.EmbeddingLayer(self.vocab_size, self.emb_size) self.cnn_layer = layers.CNNLayer(self.seq_len, self.emb_size, self.win_size, self.kernel_size) self.relu_layer = layers.ReluLayer() self.tanh_layer = layers.TanhLayer() self.concat_layer = layers.ConcatLayer() self.fc1_layer = layers.FCLayer(self.kernel_size, self.hidden_size) if self.task_mode == "pointwise": self.n_class = int(config['n_class']) self.fc2_layer = layers.FCLayer(2 * self.hidden_size, self.n_class) elif self.task_mode == "pairwise": self.cos_layer = layers.CosineLayer() else: logging.error("training mode not supported")