Пример #1
0
    def siamese(self, seg):
        """
		孪生网络 transformer + rnn
		:param seg:
		:return:
		"""
        x = tf.concat([self.left_x, self.right_x], axis=0)
        seq_lens = tf.concat([self.left_seq_lens, self.right_seq_lens], axis=0)
        # layers embedding multi_head_attention rnn
        left_embed = embedding(self.left_x,
                               vocab_size=self.vocab_size,
                               num_units=hp.num_units,
                               scale=True,
                               scope="lembed")
        right_embed = embedding(self.right_x,
                                vocab_size=self.vocab_size,
                                num_units=hp.num_units,
                                scale=True,
                                scope="rembed")

        query, key = self.transformer(left_embed, right_embed)
        # output = self.rnn_layer(embed, seq_lens, seg)
        query = self.attention(query, query)
        key = self.attention(key, key)
        return query, key
Пример #2
0
	def match_pyramid(self):
		"""
		pyramid
		:return:
		"""
		left_embed = embedding(self.left_x, vocab_size=self.vocab_size, num_units=self.embedding_size, scale=True,
							   scope="left_embed")
		right_embed = embedding(self.right_x, vocab_size=self.vocab_size, num_units=self.embedding_size, scale=True,
								scope="right_embed")
		outputs = self.match_text(left_embed, right_embed)
		outputs = self.cnn_layer(outputs, 1)
		outputs = self.cnn_layer(outputs, 2)
		return outputs
Пример #3
0
    def __init__(self, vocab_size, num_tags):
        self.vocab_size = vocab_size
        self.num_tags = num_tags
        self.graph = tf.Graph()
        with self.graph.as_default():
            self.x = tf.placeholder(dtype=tf.int32, shape=[None, hp.max_len])
            self.y = tf.placeholder(dtype=tf.int32, shape=[None, hp.max_len])
            self.seq_lens = tf.placeholder(dtype=tf.int32, shape=[None])
            self.dropout_keep_prob = tf.placeholder(tf.float32,
                                                    name="keep_prob")
            self.global_step = tf.train.create_global_step()
            outputs = embedding(self.x,
                                vocab_size=self.vocab_size,
                                num_units=hp.num_units,
                                scale=True,
                                scope="embed")

            # cnn rnn 层

            outputs = self.rnn_layer(outputs)
            outputs = self.cnn_layer(outputs)
            self.logits = self.logits_layer(outputs)
            # crf 层
            self.loss, self.transition = self.crf_layer()
            # 优化器
            self.train_op = self.optimize()
Пример #4
0
	def siamese(self):
		"""
		孪生网络 transformer + rnn
		:return:
		"""
		x = tf.concat([self.left_x, self.right_x], axis=0)
		seq_lens = tf.concat([self.left_seq_lens, self.right_seq_lens], axis=0)
		# layers embedding multi_head_attention rnn
		embed = embedding(x, vocab_size=self.vocab_size, num_units=self.embedding_size, scale=True, scope="embed")
		
		# output = self.transformer(embed, x)
		inputs = tf.expand_dims(embed, -1)
		output = self.cnn_layer(inputs, 1)
		output = tf.expand_dims(output, -1)
		output = self.cnn_layer(output, 2)
		output = self.attention(embed, output)
		key, value = tf.split(output, 2, axis=0)
		return key, value