def _build_cudnn_rnn(self, units, n_hidden_list, cell_type, intra_layer_dropout, mask): sequence_lengths = tf.to_int32(tf.reduce_sum(mask, axis=1)) for n, n_hidden in enumerate(n_hidden_list): with tf.variable_scope(cell_type.upper() + '_' + str(n)): if cell_type.lower() == 'lstm': units, _ = cudnn_bi_lstm(units, n_hidden, sequence_lengths) elif cell_type.lower() == 'gru': units, _ = cudnn_bi_gru(units, n_hidden, sequence_lengths) else: raise RuntimeError('Wrong cell type "{}"! Only "gru" and "lstm"!'.format(cell_type)) units = tf.concat(units, -1) if intra_layer_dropout and n != len(n_hidden_list) - 1: units = variational_dropout(units, self._dropout_ph) return units
def _init_graph(self): self._init_placeholders() self.word_emb = tf.get_variable("word_emb", initializer=tf.constant( self.init_word_emb, dtype=tf.float32), trainable=False) self.char_emb = tf.get_variable("char_emb", initializer=tf.constant( self.init_char_emb, dtype=tf.float32), trainable=self.opt['train_char_emb']) self.c_mask = tf.cast(self.c_ph, tf.bool) self.q_mask = tf.cast(self.q_ph, tf.bool) self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1) self.q_len = tf.reduce_sum(tf.cast(self.q_mask, tf.int32), axis=1) bs = tf.shape(self.c_ph)[0] self.c_maxlen = tf.reduce_max(self.c_len) self.q_maxlen = tf.reduce_max(self.q_len) self.c = tf.slice(self.c_ph, [0, 0], [bs, self.c_maxlen]) self.q = tf.slice(self.q_ph, [0, 0], [bs, self.q_maxlen]) self.c_mask = tf.slice(self.c_mask, [0, 0], [bs, self.c_maxlen]) self.q_mask = tf.slice(self.q_mask, [0, 0], [bs, self.q_maxlen]) self.cc = tf.slice(self.cc_ph, [0, 0, 0], [bs, self.c_maxlen, self.char_limit]) self.qc = tf.slice(self.qc_ph, [0, 0, 0], [bs, self.q_maxlen, self.char_limit]) self.cc_len = tf.reshape( tf.reduce_sum(tf.cast(tf.cast(self.cc, tf.bool), tf.int32), axis=2), [-1]) self.qc_len = tf.reshape( tf.reduce_sum(tf.cast(tf.cast(self.qc, tf.bool), tf.int32), axis=2), [-1]) self.y1 = tf.one_hot(self.y1_ph, depth=self.context_limit) self.y2 = tf.one_hot(self.y2_ph, depth=self.context_limit) self.y1 = tf.slice(self.y1, [0, 0], [bs, self.c_maxlen]) self.y2 = tf.slice(self.y2, [0, 0], [bs, self.c_maxlen]) with tf.variable_scope("emb"): with tf.variable_scope("char"): cc_emb = tf.reshape( tf.nn.embedding_lookup(self.char_emb, self.cc), [bs * self.c_maxlen, self.char_limit, self.char_emb_dim]) qc_emb = tf.reshape( tf.nn.embedding_lookup(self.char_emb, self.qc), [bs * self.q_maxlen, self.char_limit, self.char_emb_dim]) cc_emb = variational_dropout(cc_emb, keep_prob=self.keep_prob_ph) qc_emb = variational_dropout(qc_emb, keep_prob=self.keep_prob_ph) _, (state_fw, state_bw) = cudnn_bi_gru(cc_emb, self.char_hidden_size, seq_lengths=self.cc_len, trainable_initial_states=True) cc_emb = tf.concat([state_fw, state_bw], axis=1) _, (state_fw, state_bw) = cudnn_bi_gru(qc_emb, self.char_hidden_size, seq_lengths=self.qc_len, trainable_initial_states=True, reuse=True) qc_emb = tf.concat([state_fw, state_bw], axis=1) cc_emb = tf.reshape( cc_emb, [bs, self.c_maxlen, 2 * self.char_hidden_size]) qc_emb = tf.reshape( qc_emb, [bs, self.q_maxlen, 2 * self.char_hidden_size]) with tf.name_scope("word"): c_emb = tf.nn.embedding_lookup(self.word_emb, self.c) q_emb = tf.nn.embedding_lookup(self.word_emb, self.q) c_emb = tf.concat([c_emb, cc_emb], axis=2) q_emb = tf.concat([q_emb, qc_emb], axis=2) with tf.variable_scope("encoding"): rnn = CudnnGRU(num_layers=3, num_units=self.hidden_size, batch_size=bs, input_size=c_emb.get_shape().as_list()[-1], keep_prob=self.keep_prob_ph) c = rnn(c_emb, seq_len=self.c_len) q = rnn(q_emb, seq_len=self.q_len) with tf.variable_scope("attention"): qc_att = dot_attention(c, q, mask=self.q_mask, att_size=self.attention_hidden_size, keep_prob=self.keep_prob_ph) rnn = CudnnGRU(num_layers=1, num_units=self.hidden_size, batch_size=bs, input_size=qc_att.get_shape().as_list()[-1], keep_prob=self.keep_prob_ph) att = rnn(qc_att, seq_len=self.c_len) with tf.variable_scope("match"): self_att = dot_attention(att, att, mask=self.c_mask, att_size=self.attention_hidden_size, keep_prob=self.keep_prob_ph) rnn = CudnnGRU(num_layers=1, num_units=self.hidden_size, batch_size=bs, input_size=self_att.get_shape().as_list()[-1], keep_prob=self.keep_prob_ph) match = rnn(self_att, seq_len=self.c_len) with tf.variable_scope("pointer"): init = simple_attention(q, self.hidden_size, mask=self.q_mask, keep_prob=self.keep_prob_ph) pointer = PtrNet(cell_size=init.get_shape().as_list()[-1], keep_prob=self.keep_prob_ph) logits1, logits2 = pointer(init, match, self.hidden_size, self.c_mask) with tf.variable_scope("predict"): outer = tf.matmul(tf.expand_dims(tf.nn.softmax(logits1), axis=2), tf.expand_dims(tf.nn.softmax(logits2), axis=1)) outer = tf.matrix_band_part( outer, 0, tf.cast(tf.minimum(15, self.c_maxlen), tf.int64)) self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1) self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1) loss_1 = tf.nn.softmax_cross_entropy_with_logits(logits=logits1, labels=self.y1) loss_2 = tf.nn.softmax_cross_entropy_with_logits(logits=logits2, labels=self.y2) self.loss = tf.reduce_mean(loss_1 + loss_2) if self.weight_decay < 1.0: self.var_ema = tf.train.ExponentialMovingAverage(self.weight_decay) ema_op = self.var_ema.apply(tf.trainable_variables()) with tf.control_dependencies([ema_op]): self.loss = tf.identity(self.loss) self.shadow_vars = [] self.global_vars = [] for var in tf.global_variables(): v = self.var_ema.average(var) if v: self.shadow_vars.append(v) self.global_vars.append(var) self.assign_vars = [] for g, v in zip(self.global_vars, self.shadow_vars): self.assign_vars.append(tf.assign(g, v))
def _init_graph(self): self._init_placeholders() self.word_emb = tf.get_variable("word_emb", initializer=tf.constant( self.init_word_emb, dtype=tf.float32), trainable=False) self.char_emb = tf.get_variable("char_emb", initializer=tf.constant( self.init_char_emb, dtype=tf.float32), trainable=self.train_char_emb) self.c_mask = tf.cast(self.c_ph, tf.bool) self.q_mask = tf.cast(self.q_ph, tf.bool) self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1) self.q_len = tf.reduce_sum(tf.cast(self.q_mask, tf.int32), axis=1) bs = tf.shape(self.c_ph)[0] self.c_maxlen = tf.reduce_max(self.c_len) self.q_maxlen = tf.reduce_max(self.q_len) self.c = tf.slice(self.c_ph, [0, 0], [bs, self.c_maxlen]) self.q = tf.slice(self.q_ph, [0, 0], [bs, self.q_maxlen]) self.c_mask = tf.slice(self.c_mask, [0, 0], [bs, self.c_maxlen]) self.q_mask = tf.slice(self.q_mask, [0, 0], [bs, self.q_maxlen]) self.cc = tf.slice(self.cc_ph, [0, 0, 0], [bs, self.c_maxlen, self.char_limit]) self.qc = tf.slice(self.qc_ph, [0, 0, 0], [bs, self.q_maxlen, self.char_limit]) self.cc_len = tf.reshape( tf.reduce_sum(tf.cast(tf.cast(self.cc, tf.bool), tf.int32), axis=2), [-1]) self.qc_len = tf.reshape( tf.reduce_sum(tf.cast(tf.cast(self.qc, tf.bool), tf.int32), axis=2), [-1]) # to remove char sequences with len equal zero (padded tokens) self.cc_len = tf.maximum(tf.ones_like(self.cc_len), self.cc_len) self.qc_len = tf.maximum(tf.ones_like(self.qc_len), self.qc_len) self.y1 = tf.one_hot(self.y1_ph, depth=self.context_limit) self.y2 = tf.one_hot(self.y2_ph, depth=self.context_limit) self.y1 = tf.slice(self.y1, [0, 0], [bs, self.c_maxlen]) self.y2 = tf.slice(self.y2, [0, 0], [bs, self.c_maxlen]) if self.noans_token: # we use additional 'no answer' token to allow model not to answer on question # later we will add 'no answer' token as first token in context question-aware representation self.y1 = tf.one_hot(self.y1_ph, depth=self.context_limit + 1) self.y2 = tf.one_hot(self.y2_ph, depth=self.context_limit + 1) self.y1 = tf.slice(self.y1, [0, 0], [bs, self.c_maxlen + 1]) self.y2 = tf.slice(self.y2, [0, 0], [bs, self.c_maxlen + 1]) with tf.variable_scope("emb"): with tf.variable_scope("char"): cc_emb = tf.reshape( tf.nn.embedding_lookup(self.char_emb, self.cc), [bs * self.c_maxlen, self.char_limit, self.char_emb_dim]) qc_emb = tf.reshape( tf.nn.embedding_lookup(self.char_emb, self.qc), [bs * self.q_maxlen, self.char_limit, self.char_emb_dim]) cc_emb = variational_dropout(cc_emb, keep_prob=self.keep_prob_ph) qc_emb = variational_dropout(qc_emb, keep_prob=self.keep_prob_ph) _, (state_fw, state_bw) = cudnn_bi_gru(cc_emb, self.char_hidden_size, seq_lengths=self.cc_len, trainable_initial_states=True) cc_emb = tf.concat([state_fw, state_bw], axis=1) _, (state_fw, state_bw) = cudnn_bi_gru(qc_emb, self.char_hidden_size, seq_lengths=self.qc_len, trainable_initial_states=True, reuse=True) qc_emb = tf.concat([state_fw, state_bw], axis=1) cc_emb = tf.reshape( cc_emb, [bs, self.c_maxlen, 2 * self.char_hidden_size]) qc_emb = tf.reshape( qc_emb, [bs, self.q_maxlen, 2 * self.char_hidden_size]) with tf.name_scope("word"): c_emb = tf.nn.embedding_lookup(self.word_emb, self.c) q_emb = tf.nn.embedding_lookup(self.word_emb, self.q) c_emb = tf.concat([c_emb, cc_emb], axis=2) q_emb = tf.concat([q_emb, qc_emb], axis=2) with tf.variable_scope("encoding"): rnn = self.GRU(num_layers=3, num_units=self.hidden_size, batch_size=bs, input_size=c_emb.get_shape().as_list()[-1], keep_prob=self.keep_prob_ph) c = rnn(c_emb, seq_len=self.c_len) q = rnn(q_emb, seq_len=self.q_len) with tf.variable_scope("attention"): qc_att = dot_attention(c, q, mask=self.q_mask, att_size=self.attention_hidden_size, keep_prob=self.keep_prob_ph) rnn = self.GRU(num_layers=1, num_units=self.hidden_size, batch_size=bs, input_size=qc_att.get_shape().as_list()[-1], keep_prob=self.keep_prob_ph) att = rnn(qc_att, seq_len=self.c_len) with tf.variable_scope("match"): self_att = dot_attention(att, att, mask=self.c_mask, att_size=self.attention_hidden_size, keep_prob=self.keep_prob_ph) rnn = self.GRU(num_layers=1, num_units=self.hidden_size, batch_size=bs, input_size=self_att.get_shape().as_list()[-1], keep_prob=self.keep_prob_ph) match = rnn(self_att, seq_len=self.c_len) with tf.variable_scope("pointer"): init = simple_attention(q, self.hidden_size, mask=self.q_mask, keep_prob=self.keep_prob_ph) pointer = PtrNet(cell_size=init.get_shape().as_list()[-1], keep_prob=self.keep_prob_ph) if self.noans_token: noans_token = tf.Variable( tf.random_uniform((match.get_shape().as_list()[-1], ), -0.1, 0.1), tf.float32) noans_token = tf.nn.dropout(noans_token, keep_prob=self.keep_prob_ph) noans_token = tf.expand_dims(tf.tile( tf.expand_dims(noans_token, axis=0), [bs, 1]), axis=1) match = tf.concat([noans_token, match], axis=1) self.c_mask = tf.concat( [tf.ones(shape=(bs, 1), dtype=tf.bool), self.c_mask], axis=1) logits1, logits2 = pointer(init, match, self.hidden_size, self.c_mask) with tf.variable_scope("predict"): max_ans_length = tf.cast(tf.minimum(15, self.c_maxlen), tf.int64) outer_logits = tf.exp( tf.expand_dims(logits1, axis=2) + tf.expand_dims(logits2, axis=1)) outer_logits = tf.matrix_band_part(outer_logits, 0, max_ans_length) outer = tf.matmul(tf.expand_dims(tf.nn.softmax(logits1), axis=2), tf.expand_dims(tf.nn.softmax(logits2), axis=1)) outer = tf.matrix_band_part(outer, 0, max_ans_length) self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1) self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1) self.yp_logits = tf.reduce_max(tf.reduce_max(outer_logits, axis=2), axis=1) if self.noans_token: self.yp_score = 1 - tf.nn.softmax( logits1)[:, 0] * tf.nn.softmax(logits2)[:, 0] loss_1 = tf.nn.softmax_cross_entropy_with_logits(logits=logits1, labels=self.y1) loss_2 = tf.nn.softmax_cross_entropy_with_logits(logits=logits2, labels=self.y2) self.loss = tf.reduce_mean(loss_1 + loss_2)
def _init_graph(self): self._init_placeholders() self.word_emb = tf.get_variable("word_emb", initializer=tf.constant(self.init_word_emb, dtype=tf.float32), trainable=False) self.char_emb = tf.get_variable("char_emb", initializer=tf.constant(self.init_char_emb, dtype=tf.float32), trainable=self.train_char_emb) self.c_mask = tf.cast(self.c_ph, tf.bool) self.q_mask = tf.cast(self.q_ph, tf.bool) self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1) self.q_len = tf.reduce_sum(tf.cast(self.q_mask, tf.int32), axis=1) bs = tf.shape(self.c_ph)[0] self.c_maxlen = tf.reduce_max(self.c_len) self.q_maxlen = tf.reduce_max(self.q_len) self.c = tf.slice(self.c_ph, [0, 0], [bs, self.c_maxlen]) self.q = tf.slice(self.q_ph, [0, 0], [bs, self.q_maxlen]) self.c_mask = tf.slice(self.c_mask, [0, 0], [bs, self.c_maxlen]) self.q_mask = tf.slice(self.q_mask, [0, 0], [bs, self.q_maxlen]) self.cc = tf.slice(self.cc_ph, [0, 0, 0], [bs, self.c_maxlen, self.char_limit]) self.qc = tf.slice(self.qc_ph, [0, 0, 0], [bs, self.q_maxlen, self.char_limit]) self.cc_len = tf.reshape(tf.reduce_sum(tf.cast(tf.cast(self.cc, tf.bool), tf.int32), axis=2), [-1]) self.qc_len = tf.reshape(tf.reduce_sum(tf.cast(tf.cast(self.qc, tf.bool), tf.int32), axis=2), [-1]) # to remove char sequences with len equal zero (padded tokens) self.cc_len = tf.maximum(tf.ones_like(self.cc_len), self.cc_len) self.qc_len = tf.maximum(tf.ones_like(self.qc_len), self.qc_len) self.y1 = tf.one_hot(self.y1_ph, depth=self.context_limit) self.y2 = tf.one_hot(self.y2_ph, depth=self.context_limit) self.y1 = tf.slice(self.y1, [0, 0], [bs, self.c_maxlen]) self.y2 = tf.slice(self.y2, [0, 0], [bs, self.c_maxlen]) if self.noans_token: # we use additional 'no answer' token to allow model not to answer on question # later we will add 'no answer' token as first token in context question-aware representation self.y1 = tf.one_hot(self.y1_ph, depth=self.context_limit + 1) self.y2 = tf.one_hot(self.y2_ph, depth=self.context_limit + 1) self.y1 = tf.slice(self.y1, [0, 0], [bs, self.c_maxlen + 1]) self.y2 = tf.slice(self.y2, [0, 0], [bs, self.c_maxlen + 1]) with tf.variable_scope("emb"): with tf.variable_scope("char"): cc_emb = tf.reshape(tf.nn.embedding_lookup(self.char_emb, self.cc), [bs * self.c_maxlen, self.char_limit, self.char_emb_dim]) qc_emb = tf.reshape(tf.nn.embedding_lookup(self.char_emb, self.qc), [bs * self.q_maxlen, self.char_limit, self.char_emb_dim]) cc_emb = variational_dropout(cc_emb, keep_prob=self.keep_prob_ph) qc_emb = variational_dropout(qc_emb, keep_prob=self.keep_prob_ph) _, (state_fw, state_bw) = cudnn_bi_gru(cc_emb, self.char_hidden_size, seq_lengths=self.cc_len, trainable_initial_states=True) cc_emb = tf.concat([state_fw, state_bw], axis=1) _, (state_fw, state_bw) = cudnn_bi_gru(qc_emb, self.char_hidden_size, seq_lengths=self.qc_len, trainable_initial_states=True, reuse=True) qc_emb = tf.concat([state_fw, state_bw], axis=1) cc_emb = tf.reshape(cc_emb, [bs, self.c_maxlen, 2 * self.char_hidden_size]) qc_emb = tf.reshape(qc_emb, [bs, self.q_maxlen, 2 * self.char_hidden_size]) with tf.name_scope("word"): c_emb = tf.nn.embedding_lookup(self.word_emb, self.c) q_emb = tf.nn.embedding_lookup(self.word_emb, self.q) c_emb = tf.concat([c_emb, cc_emb], axis=2) q_emb = tf.concat([q_emb, qc_emb], axis=2) with tf.variable_scope("encoding"): rnn = self.GRU(num_layers=3, num_units=self.hidden_size, batch_size=bs, input_size=c_emb.get_shape().as_list()[-1], keep_prob=self.keep_prob_ph) c = rnn(c_emb, seq_len=self.c_len) q = rnn(q_emb, seq_len=self.q_len) with tf.variable_scope("attention"): qc_att = dot_attention(c, q, mask=self.q_mask, att_size=self.attention_hidden_size, keep_prob=self.keep_prob_ph) rnn = self.GRU(num_layers=1, num_units=self.hidden_size, batch_size=bs, input_size=qc_att.get_shape().as_list()[-1], keep_prob=self.keep_prob_ph) att = rnn(qc_att, seq_len=self.c_len) with tf.variable_scope("match"): self_att = dot_attention(att, att, mask=self.c_mask, att_size=self.attention_hidden_size, keep_prob=self.keep_prob_ph) rnn = self.GRU(num_layers=1, num_units=self.hidden_size, batch_size=bs, input_size=self_att.get_shape().as_list()[-1], keep_prob=self.keep_prob_ph) match = rnn(self_att, seq_len=self.c_len) with tf.variable_scope("pointer"): init = simple_attention(q, self.hidden_size, mask=self.q_mask, keep_prob=self.keep_prob_ph) pointer = PtrNet(cell_size=init.get_shape().as_list()[-1], keep_prob=self.keep_prob_ph) if self.noans_token: noans_token = tf.Variable(tf.random_uniform((match.get_shape().as_list()[-1],), -0.1, 0.1), tf.float32) noans_token = tf.nn.dropout(noans_token, keep_prob=self.keep_prob_ph) noans_token = tf.expand_dims(tf.tile(tf.expand_dims(noans_token, axis=0), [bs, 1]), axis=1) match = tf.concat([noans_token, match], axis=1) self.c_mask = tf.concat([tf.ones(shape=(bs, 1), dtype=tf.bool), self.c_mask], axis=1) logits1, logits2 = pointer(init, match, self.hidden_size, self.c_mask) with tf.variable_scope("predict"): max_ans_length = tf.cast(tf.minimum(15, self.c_maxlen), tf.int64) outer_logits = tf.exp(tf.expand_dims(logits1, axis=2) + tf.expand_dims(logits2, axis=1)) outer_logits = tf.matrix_band_part(outer_logits, 0, max_ans_length) outer = tf.matmul(tf.expand_dims(tf.nn.softmax(logits1), axis=2), tf.expand_dims(tf.nn.softmax(logits2), axis=1)) outer = tf.matrix_band_part(outer, 0, max_ans_length) self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1) self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1) self.yp_logits = tf.reduce_max(tf.reduce_max(outer_logits, axis=2), axis=1) if self.noans_token: self.yp_score = 1 - tf.nn.softmax(logits1)[:, 0] * tf.nn.softmax(logits2)[:, 0] loss_1 = tf.nn.softmax_cross_entropy_with_logits(logits=logits1, labels=self.y1) loss_2 = tf.nn.softmax_cross_entropy_with_logits(logits=logits2, labels=self.y2) self.loss = tf.reduce_mean(loss_1 + loss_2)
def _init_graph(self): self._init_placeholders() self.word_emb = tf.get_variable("word_emb", initializer=tf.constant(self.init_word_emb, dtype=tf.float32), trainable=False) self.char_emb = tf.get_variable("char_emb", initializer=tf.constant(self.init_char_emb, dtype=tf.float32), trainable=self.train_char_emb) self.c_mask = tf.cast(self.c_ph, tf.bool) self.q_mask = tf.cast(self.q_ph, tf.bool) self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1) self.q_len = tf.reduce_sum(tf.cast(self.q_mask, tf.int32), axis=1) bs = tf.shape(self.c_ph)[0] self.c_maxlen = tf.reduce_max(self.c_len) self.q_maxlen = tf.reduce_max(self.q_len) self.c = tf.slice(self.c_ph, [0, 0], [bs, self.c_maxlen]) self.q = tf.slice(self.q_ph, [0, 0], [bs, self.q_maxlen]) self.c_mask = tf.slice(self.c_mask, [0, 0], [bs, self.c_maxlen]) self.q_mask = tf.slice(self.q_mask, [0, 0], [bs, self.q_maxlen]) self.cc = tf.slice(self.cc_ph, [0, 0, 0], [bs, self.c_maxlen, self.char_limit]) self.qc = tf.slice(self.qc_ph, [0, 0, 0], [bs, self.q_maxlen, self.char_limit]) self.cc_len = tf.reshape(tf.reduce_sum(tf.cast(tf.cast(self.cc, tf.bool), tf.int32), axis=2), [-1]) self.qc_len = tf.reshape(tf.reduce_sum(tf.cast(tf.cast(self.qc, tf.bool), tf.int32), axis=2), [-1]) self.y1 = tf.one_hot(self.y1_ph, depth=self.context_limit) self.y2 = tf.one_hot(self.y2_ph, depth=self.context_limit) self.y1 = tf.slice(self.y1, [0, 0], [bs, self.c_maxlen]) self.y2 = tf.slice(self.y2, [0, 0], [bs, self.c_maxlen]) with tf.variable_scope("emb"): with tf.variable_scope("char"): cc_emb = tf.reshape(tf.nn.embedding_lookup(self.char_emb, self.cc), [bs * self.c_maxlen, self.char_limit, self.char_emb_dim]) qc_emb = tf.reshape(tf.nn.embedding_lookup(self.char_emb, self.qc), [bs * self.q_maxlen, self.char_limit, self.char_emb_dim]) cc_emb = variational_dropout(cc_emb, keep_prob=self.keep_prob_ph) qc_emb = variational_dropout(qc_emb, keep_prob=self.keep_prob_ph) _, (state_fw, state_bw) = cudnn_bi_gru(cc_emb, self.char_hidden_size, seq_lengths=self.cc_len, trainable_initial_states=True) cc_emb = tf.concat([state_fw, state_bw], axis=1) _, (state_fw, state_bw) = cudnn_bi_gru(qc_emb, self.char_hidden_size, seq_lengths=self.qc_len, trainable_initial_states=True, reuse=True) qc_emb = tf.concat([state_fw, state_bw], axis=1) cc_emb = tf.reshape(cc_emb, [bs, self.c_maxlen, 2 * self.char_hidden_size]) qc_emb = tf.reshape(qc_emb, [bs, self.q_maxlen, 2 * self.char_hidden_size]) with tf.name_scope("word"): c_emb = tf.nn.embedding_lookup(self.word_emb, self.c) q_emb = tf.nn.embedding_lookup(self.word_emb, self.q) c_emb = tf.concat([c_emb, cc_emb], axis=2) q_emb = tf.concat([q_emb, qc_emb], axis=2) with tf.variable_scope("encoding"): rnn = self.GRU(num_layers=3, num_units=self.hidden_size, batch_size=bs, input_size=c_emb.get_shape().as_list()[-1], keep_prob=self.keep_prob_ph) c = rnn(c_emb, seq_len=self.c_len) q = rnn(q_emb, seq_len=self.q_len) with tf.variable_scope("attention"): qc_att = dot_attention(c, q, mask=self.q_mask, att_size=self.attention_hidden_size, keep_prob=self.keep_prob_ph) rnn = self.GRU(num_layers=1, num_units=self.hidden_size, batch_size=bs, input_size=qc_att.get_shape().as_list()[-1], keep_prob=self.keep_prob_ph) att = rnn(qc_att, seq_len=self.c_len) with tf.variable_scope("match"): self_att = dot_attention(att, att, mask=self.c_mask, att_size=self.attention_hidden_size, keep_prob=self.keep_prob_ph) rnn = self.GRU(num_layers=1, num_units=self.hidden_size, batch_size=bs, input_size=self_att.get_shape().as_list()[-1], keep_prob=self.keep_prob_ph) match = rnn(self_att, seq_len=self.c_len) with tf.variable_scope("pointer"): init = simple_attention(q, self.hidden_size, mask=self.q_mask, keep_prob=self.keep_prob_ph) pointer = PtrNet(cell_size=init.get_shape().as_list()[-1], keep_prob=self.keep_prob_ph) logits1, logits2 = pointer(init, match, self.hidden_size, self.c_mask) with tf.variable_scope("predict"): outer_logits = tf.exp(tf.expand_dims(logits1, axis=2) + tf.expand_dims(logits2, axis=1)) outer = tf.matmul(tf.expand_dims(tf.nn.softmax(logits1), axis=2), tf.expand_dims(tf.nn.softmax(logits2), axis=1)) outer = tf.matrix_band_part(outer, 0, tf.cast(tf.minimum(15, self.c_maxlen), tf.int64)) self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1) self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1) self.yp_logits = tf.reduce_max(tf.reduce_max(outer_logits, axis=2), axis=1) loss_1 = tf.nn.softmax_cross_entropy_with_logits(logits=logits1, labels=self.y1) loss_2 = tf.nn.softmax_cross_entropy_with_logits(logits=logits2, labels=self.y2) self.loss = tf.reduce_mean(loss_1 + loss_2) if self.weight_decay < 1.0: self.var_ema = tf.train.ExponentialMovingAverage(self.weight_decay) ema_op = self.var_ema.apply(tf.trainable_variables()) with tf.control_dependencies([ema_op]): self.loss = tf.identity(self.loss) self.shadow_vars = [] self.global_vars = [] for var in tf.global_variables(): v = self.var_ema.average(var) if v: self.shadow_vars.append(v) self.global_vars.append(var) self.assign_vars = [] for g, v in zip(self.global_vars, self.shadow_vars): self.assign_vars.append(tf.assign(g, v))