def rnet_matching_layer(layer_size: int, att_size: int, par_vecs: tf.Variable, qu_vecs: tf.Variable, par_num_words: tf.Variable, parallel_iterations: int = 64) -> tf.Variable: with tf.variable_scope('alignment_par_qu') as scope: with tf.variable_scope('fw/match_rnn_cell/attention'): fw_cell = MatchRNNCell(GRUCell(layer_size), qu_vecs, att_size) with tf.variable_scope('bw/match_rnn_cell/attention'): bw_cell = MatchRNNCell(GRUCell(layer_size), qu_vecs, att_size) (fw_out, bw_out), (_, _) = bidirectional_dynamic_rnn( fw_cell, bw_cell, inputs=par_vecs, dtype=tf.float32, sequence_length=par_num_words, scope=scope, parallel_iterations=parallel_iterations, swap_memory=True) match_par_qu_out = tf.concat([fw_out, bw_out], axis=2) return match_par_qu_out
def match_par_qu_layer(self): with tf.variable_scope('alignment_par_qu') as scope: rnn_cell = MatchRNNCell(GRUCell(self.conf_layer_size), self.qu_encoded, self.conf_att_size) outputs, final_state = dynamic_rnn(rnn_cell, self.par_encoded, self.par_num_words, parallel_iterations=self.conf_rnn_parallelity, scope=scope, swap_memory=True, dtype=tf.float32) with tf.variable_scope('encoding'): outputs, _, _ = bi_gru_layer([self.conf_layer_size], self.apply_dropout(outputs), self.par_num_words, self.apply_dropout) return outputs
def rnet_matching_layer_unidirectional( layer_size: int, att_size: int, par_vecs: tf.Variable, qu_vecs: tf.Variable, par_num_words: tf.Variable, parallel_iterations: int = 64) -> tf.Variable: with tf.variable_scope('alignment_par_qu') as scope: with tf.variable_scope('fw/match_rnn_cell/attention'): rnn_cell = MatchRNNCell(GRUCell(layer_size), qu_vecs, att_size) output, _ = dynamic_rnn(rnn_cell, inputs=par_vecs, dtype=tf.float32, sequence_length=par_num_words, scope=scope, parallel_iterations=parallel_iterations, swap_memory=True) return output