Esempio n. 1
0
 def _encode(self):
     with tf.variable_scope('answer_encoding', reuse=tf.AUTO_REUSE):
         a_rnn = cudnn_gru(num_layers=2 * self.layer_num,
                           num_units=self.hidden_size,
                           batch_size=self.N,
                           input_size=self.a_emb.get_shape().as_list()[-1],
                           keep_prob=self.dropout_keep_prob,
                           is_train=self.is_train)
         self.a_encodes = a_rnn(self.a_emb, seq_len=self.a_len)
     with tf.variable_scope('question_encoding', reuse=tf.AUTO_REUSE):
         q_rnn = cudnn_gru(num_layers=2 * self.layer_num,
                           num_units=self.hidden_size,
                           batch_size=self.N,
                           input_size=self.q_emb.get_shape().as_list()[-1],
                           keep_prob=self.dropout_keep_prob,
                           is_train=self.is_train)
         self.q_encodes = q_rnn(self.q_emb, seq_len=self.q_len)
Esempio n. 2
0
 def _self_attention(self):
     with tf.variable_scope('self_attention', reuse=tf.AUTO_REUSE):
         self.pp_att = dot_attention(self.gated_att,
                                     self.gated_att,
                                     mask=self.p_mask,
                                     hidden=self.hidden_size,
                                     keep_prob=self.dropout_keep_prob,
                                     is_train=self.is_train)
         self_rnn = cudnn_gru(
             num_layers=self.layer_num,
             num_units=self.hidden_size,
             batch_size=self.N,
             input_size=self.pp_att.get_shape().as_list()[-1],
             keep_prob=self.dropout_keep_prob,
             is_train=self.is_train)
         self.self_att = self_rnn(self.pp_att, self.p_len)