def contextual_embedding(self): """ contextual embedding :return: """ with tf.variable_scope('paragraph_encoding'): self.h = rnn(self.x_embed, self.hidden_size, self.x_length) with tf.variable_scope('question_enconding'): self.u = rnn(self.q_embed, self.hidden_size, self.q_length) if self.use_dropout: self.h = tf.nn.dropout(self.h, self.dropout_keep_prob) self.u = tf.nn.dropout(self.u, self.dropout_keep_prob)
def encode(self): with tf.variable_scope("passage_encoding"): self.u_p = rnn('gru', self.p_embed, self.hidden_size, self.p_length, layer_num=1) with tf.variable_scope("question_encoding"): self.u_q = rnn('gru', self.q_embed, self.hidden_size, self.q_length, layer_num=1) if self.use_dropout: self.u_p = tf.nn.dropout(self.u_p, self.dropout_keep_prob) self.u_q = tf.nn.dropout(self.u_q, self.dropout_keep_prob)
def output(self): self.p1 = linear(self.hidden_size * 10, self.g, self.m, '1') with tf.variable_scope("output_rnn"): m_ = rnn('lstm', self.m, self.hidden_size, self.x_length, layer_num=1) self.p2 = linear(self.hidden_size * 10, self.g, m_, '2')
def modeling(self): with tf.variable_scope("modeling"): self.m = rnn('lstm', self.g, self.hidden_size, self.x_length, layer_num=1) if self.use_dropout: self.m = tf.nn.dropout(self.m, self.dropout_keep_prob)
def output_layer(self): params = [([self.params["w_h_p"], self.params["w_h_a"]], self.params["v"]), ([self.params["w_u_q_"], self.params["w_v_q"]], self.params["v"])] with tf.variable_scope("output_layer"): self.h_p = self.h_p + rnn("gru", self.h_p, self.hidden_size, self.p_length, dropout_keep_prob=self.dropout_keep_prob) self.p1, self.p2 = answer_pointer(self.h_p, self.p_length, self.u_q, self.q_length, self.hidden_size, params, self.batch_size)