예제 #1
0
파일: mrc.py 프로젝트: wangpeng3891/MRC
 def r_net(self):
     hps = self._hps
     size = hps.size
     q_rep = self.question_inputs
     c_rep = self.context_inputs
     with tf.variable_scope('embedding_encoder_layer'):
         with tf.variable_scope('stacked_embedding_encoder_block'):
             # question encoding
             q_rep = encoder_block_v1(q_rep, self.batch_size, self.max_q_length, hps.dropout_rate, 4, 7, 3, size,
                                      self.dropout)
         tf.get_variable_scope().reuse_variables()
         with tf.variable_scope('stacked_embedding_encoder_block'):
             # context encoding
             c_rep = encoder_block_v1(c_rep, self.batch_size, self.max_c_length, hps.dropout_rate, 4, 7, 3, size,
                                      self.dropout)
     with tf.variable_scope('context_question_attention_layer'):
         with tf.variable_scope('question_aware_context'):
             with tf.variable_scope('context'):
                 context_c = multihead_attention(q_rep, c_rep)
             with tf.variable_scope('question_semantic_fusion'):
                 q_rep = tf.concat([q_rep, context_c, q_rep * context_c], axis=-1)
                 q_rep = encoder_block_v1(q_rep, self.batch_size, self.max_q_length, hps.dropout_rate, 2, 7, 3,
                                          size, self.dropout)
         with tf.variable_scope('context_aware_question'):
             with tf.variable_scope('context'):
                 context_q = multihead_attention(c_rep, q_rep)
             with tf.variable_scope('context_semantic_fusion'):
                 c_rep = tf.concat([c_rep, context_q, c_rep * context_q], axis=-1)
                 for i in xrange(hps.num_stacks):
                     with tf.variable_scope('stack_%d' % i):
                         c_rep = encoder_block_v1(c_rep, self.batch_size, self.max_c_length,
                                                  hps.dropout_rate, 2, 7, 3, size, self.dropout)
                     # with tf.variable_scope('residual_drop_%d' % i):
                     #     death_rate = self.set_death_rate(i, hps.num_stacks, hps.last_rate)
                     #     rand = tf.random_uniform([], minval=0.0, maxval=1.0)
                     #     gate = tf.Variable(rand > death_rate, trainable=False)
                     #     c_rep = tf.cond(self.dropout,
                     #                     lambda: residual_drop_train(c_rep, c_rep_new, gate),
                     #                     lambda: residual_drop_test(c_rep, c_rep_new, 1.0 - death_rate))
     with tf.variable_scope('memory_based_answer_pointer'):
         with tf.variable_scope('init_state'):
             z_s = tf.reduce_mean(q_rep, axis=1, keep_dims=True)
             z_s = tf.cond(self.dropout, lambda: tf.nn.dropout(z_s, keep_prob=1.0 - hps.dropout_rate), lambda: z_s)
         for i in xrange(hps.T):
             if i > 0:
                 tf.get_variable_scope().reuse_variables()
             with tf.variable_scope('start_position'):
                 start_pos_scores, u_s = fn(c_rep, z_s)
             with tf.variable_scope('start_pos_memory_semantic_fusion_unit'):
                 z_e = sfu(z_s, u_s)
                 z_e = tf.cond(self.dropout, lambda: tf.nn.dropout(z_e, keep_prob=1.0 - hps.dropout_rate),
                               lambda: z_e)
             with tf.variable_scope('end_position'):
                 end_pos_scores, u_e = fn(c_rep, z_e)
             with tf.variable_scope('end_pos_memory_semantic_fusion_unit'):
                 z_s = sfu(z_e, u_e)
                 z_s = tf.cond(self.dropout, lambda: tf.nn.dropout(z_s, keep_prob=1.0 - hps.dropout_rate),
                               lambda: z_s)
         self.pos_scores = [start_pos_scores, end_pos_scores]
예제 #2
0
파일: mrc.py 프로젝트: zhaodaolimeng/MRC
 def r_net(self):
     hps = self._hps
     with tf.variable_scope('question_encoding'):
         q_rep = self.question_inputs
         q_states = []
         for i in xrange(hps.num_layers):
             with tf.variable_scope('layer%d' % i):
                 q_cell = tf.contrib.rnn.GRUCell(hps.size)
                 q_rep, q_state = tf.nn.bidirectional_dynamic_rnn(
                     q_cell,
                     q_cell,
                     q_rep,
                     sequence_length=self.question_lens,
                     dtype=self.dtype)
                 q_rep = tf.concat(q_rep, axis=-1)
                 q_states.append(q_state)
         assert q_rep.get_shape()[-1].value == 2 * hps.size
     with tf.variable_scope('context_encoding'):
         c_rep = self.context_inputs
         for i in xrange(hps.num_layers):
             with tf.variable_scope('layer%d' % i):
                 c_cell = tf.contrib.rnn.GRUCell(hps.size)
                 c_rep, c_state = tf.nn.bidirectional_dynamic_rnn(
                     c_cell,
                     c_cell,
                     c_rep,
                     initial_state_fw=q_states[i][0],
                     initial_state_bw=q_states[i][1],
                     sequence_length=self.context_lens)
                 c_rep = tf.concat(c_rep, axis=-1)
         assert c_rep.get_shape()[-1].value == 2 * hps.size
     with tf.variable_scope('question_aware'):
         q_a_cell = tf.contrib.rnn.GRUCell(hps.size)
         context_q = multihead_attention(c_rep, q_rep)
         inputs = sfu(c_rep, context_q)
         c_rep, state = tf.nn.bidirectional_dynamic_rnn(q_a_cell,
                                                        q_a_cell,
                                                        inputs,
                                                        self.context_lens,
                                                        dtype=self.dtype)
         c_rep = tf.concat(c_rep, axis=-1)
     with tf.variable_scope('self_attention'):
         s_a_cell = tf.contrib.rnn.GRUCell(hps.size)
         context_c = multihead_attention(c_rep, c_rep)
         inputs = sfu(c_rep, context_c)
         c_rep, state = tf.nn.bidirectional_dynamic_rnn(s_a_cell,
                                                        s_a_cell,
                                                        inputs,
                                                        self.context_lens,
                                                        dtype=self.dtype)
         c_rep = tf.concat(c_rep, axis=-1)
         # if hps.mode == 'train':
         #     c_rep = tf.nn.dropout(c_rep, 1.0 - hps.dropout_rate)
         assert c_rep.get_shape()[-1].value == 2 * hps.size
     with tf.variable_scope('output_layer'):
         answer_cell = tf.contrib.rnn.GRUCell(2 * hps.size)
         with tf.variable_scope('pointer'):
             v_q = tf.get_variable('question_parameters',
                                   [hps.batch_size, 2 * hps.size],
                                   self.dtype,
                                   tf.truncated_normal_initializer())
             _, state = pointer(q_rep, v_q, answer_cell)
             tf.get_variable_scope().reuse_variables()
             start_pos_scores, state = pointer(c_rep, state, answer_cell)
             tf.get_variable_scope().reuse_variables()
             end_pos_scores, state = pointer(c_rep, state, answer_cell)
             self.pos_scores = [start_pos_scores, end_pos_scores]
예제 #3
0
파일: mrc.py 프로젝트: zhaodaolimeng/MRC
 def mnemonic_reader(self):
     hps = self._hps
     with tf.variable_scope('question_encoding'):
         q_rep = self.question_inputs
         q_states = []
         for i in xrange(hps.num_layers):
             with tf.variable_scope('layer%d' % i):
                 q_cell = tf.contrib.rnn.GRUCell(hps.size)
                 q_rep, q_state = tf.nn.bidirectional_dynamic_rnn(
                     q_cell,
                     q_cell,
                     q_rep,
                     sequence_length=self.question_lens,
                     dtype=self.dtype)
                 q_rep = tf.concat(q_rep, axis=-1)
                 q_states.append(q_state)
         assert q_rep.get_shape()[-1].value == 2 * hps.size
     with tf.variable_scope('context_encoding'):
         c_rep = self.context_inputs
         for i in xrange(hps.num_layers):
             with tf.variable_scope('layer%d' % i):
                 c_cell = tf.contrib.rnn.GRUCell(hps.size)
                 c_rep, c_state = tf.nn.bidirectional_dynamic_rnn(
                     c_cell,
                     c_cell,
                     c_rep,
                     initial_state_fw=q_states[i][0],
                     initial_state_bw=q_states[i][1],
                     sequence_length=self.context_lens)
                 c_rep = tf.concat(c_rep, axis=-1)
         assert c_rep.get_shape()[-1].value == 2 * hps.size
     with tf.variable_scope('iterative_aligner'):
         for i in xrange(hps.T):
             with tf.variable_scope('question_aware_%d' % i):
                 q_a_cell = tf.contrib.rnn.GRUCell(hps.size)
                 context_q = multihead_attention(c_rep, q_rep)
                 inputs = sfu(c_rep, context_q)
                 c_rep, state = tf.nn.bidirectional_dynamic_rnn(
                     q_a_cell,
                     q_a_cell,
                     inputs,
                     self.context_lens,
                     dtype=self.dtype)
                 c_rep = tf.concat(c_rep, axis=-1)
             with tf.variable_scope('self_attention_%d' % i):
                 s_a_cell = tf.contrib.rnn.GRUCell(hps.size)
                 context_c = multihead_attention(c_rep, c_rep)
                 inputs = sfu(c_rep, context_c)
                 c_rep, state = tf.nn.bidirectional_dynamic_rnn(
                     s_a_cell,
                     s_a_cell,
                     inputs,
                     self.context_lens,
                     dtype=self.dtype)
                 c_rep = tf.concat(c_rep, axis=-1)
                 # if hps.mode == 'train':
                 #     c_rep = tf.nn.dropout(c_rep, 1.0 - hps.dropout_rate)
                 assert c_rep.get_shape()[-1].value == 2 * hps.size
     with tf.variable_scope('memory_based_answer_pointer'):
         z_s = tf.expand_dims(tf.concat(q_state, axis=1), axis=1)
         for i in xrange(hps.L):
             with tf.variable_scope('start_position_%d' % i):
                 start_pos_scores, u_s = fn(c_rep, z_s)
             with tf.variable_scope(
                     'start_pos_memory_semantic_fusion_unit_%d' % i):
                 z_e = sfu(z_s, u_s)
             with tf.variable_scope('end_position_%d' % i):
                 end_pos_scores, u_e = fn(c_rep, z_e)
             with tf.variable_scope(
                     'end_pos_memory_semantic_fusion_unit_%d' % i):
                 z_s = sfu(z_e, u_e)
         self.pos_scores = [start_pos_scores, end_pos_scores]
예제 #4
0
파일: mrc.py 프로젝트: wangpeng3891/MRC
    def transformer(self):

        hps = self._hps
        with tf.variable_scope('question_convolution_encoding'):
            q_rep = self.question_inputs
            q_output = conv_glu_v2(q_rep, 3, 1, hps.size, self.batch_size)
            q_output = tf.cond(self.dropout, lambda: tf.nn.dropout(q_output, keep_prob=1.0 - hps.dropout_rate),
                               lambda: q_output)
            q_rep = short_cut(q_rep, q_output, q_output.get_shape()[-1].value)
        with tf.variable_scope('context_convolution_encoding'):
            c_rep = self.context_inputs
            c_output = conv_glu_v2(c_rep, 3, 1, hps.size, self.batch_size)
            c_output = tf.cond(self.dropout, lambda: tf.nn.dropout(c_output, keep_prob=1.0 - hps.dropout_rate),
                               lambda: c_output)
            c_rep = short_cut(c_rep, c_output, c_output.get_shape()[-1].value)
        with tf.variable_scope('question_encoding'):
            for i in xrange(hps.num_layers):
                with tf.variable_scope('layer%d' % i):
                    q_rep, q_state = bi_sru(
                        x=q_rep,
                        output_size=hps.size,
                        sequence_length=self.question_lens,
                        dtype=self.dtype
                    )
                    q_rep = tf.concat(q_rep, axis=-1)
                    q_rep = tf.layers.dense(q_rep, units=hps.size, use_bias=False, name='q_rep')
                    q_rep = tf.cond(self.dropout, lambda: tf.nn.dropout(q_rep, keep_prob=1.0 - hps.dropout_rate),
                                       lambda: q_rep)
            assert q_rep.get_shape()[-1].value == hps.size
        with tf.variable_scope('context_encoding'):
            for i in xrange(hps.num_layers):
                with tf.variable_scope('layer%d' % i):
                    c_rep, c_state = bi_sru(
                        x=c_rep,
                        output_size=hps.size,
                        sequence_length=self.context_lens,
                        dtype=self.dtype
                    )
                    c_rep = tf.concat(c_rep, axis=-1)
                    c_rep = tf.layers.dense(c_rep, units=hps.size, use_bias=False, name='c_rep')
                    c_rep = tf.cond(self.dropout, lambda: tf.nn.dropout(c_rep, keep_prob=1.0 - hps.dropout_rate),
                                       lambda: c_rep)
            assert c_rep.get_shape()[-1].value == hps.size
        with tf.variable_scope('iterative_aligner'):
            for i in xrange(hps.T):
                with tf.variable_scope('question_aware_%d' % i):
                    with tf.variable_scope('multihead_attention'):
                        context_q = multihead_attention(c_rep, q_rep)
                    with tf.variable_scope('gate'):
                        inputs = gate(c_rep, context_q)
                    with tf.variable_scope('GRU'):
                        c_rep, c_state = bi_sru(
                            x=inputs,
                            output_size=hps.size,
                            sequence_length=self.context_lens,
                            dtype=self.dtype
                        )
                        c_rep = tf.concat(c_rep, axis=-1)
                        c_rep = tf.layers.dense(c_rep, units=hps.size, use_bias=False, name='c_rep')
                        c_rep = tf.cond(self.dropout, lambda: tf.nn.dropout(c_rep, keep_prob=1.0 - hps.dropout_rate),
                                        lambda: c_rep)
                with tf.variable_scope('self_attention_%d' % i):
                    with tf.variable_scope('multihead_attention'):
                        context_c = multihead_attention(c_rep, c_rep)
                    with tf.variable_scope('semantic_fusion_unit'):
                        inputs = gate(c_rep, context_c)
                    with tf.variable_scope('GRU'):
                        c_rep, c_state = bi_sru(
                            x=inputs,
                            output_size=hps.size,
                            sequence_length=self.context_lens,
                            dtype=self.dtype
                        )
                        c_rep = tf.concat(c_rep, axis=-1)
                        c_rep = tf.layers.dense(c_rep, units=hps.size, use_bias=False, name='c_rep')
                        c_rep = tf.cond(self.dropout, lambda: tf.nn.dropout(c_rep, keep_prob=1.0 - hps.dropout_rate),
                                        lambda: c_rep)
                        assert c_rep.get_shape()[-1].value == hps.size
        with tf.variable_scope('output_layer'):
            with tf.variable_scope('init_state'):
                z_s = tf.layers.dense(tf.concat(q_state, axis=-1), units=hps.size, use_bias=False, name='z_s')
                z_s = tf.expand_dims(z_s, axis=1)
                z_s = tf.cond(self.dropout, lambda: tf.nn.dropout(z_s, keep_prob=1.0 - hps.dropout_rate), lambda: z_s)
            for i in xrange(hps.T):
                if i > 0:
                    tf.get_variable_scope().reuse_variables()
                with tf.variable_scope('start_position'):
                    start_pos_scores, u_s = fn(c_rep, z_s)
                with tf.variable_scope('start_pos_memory_semantic_fusion_unit'):
                    z_e = sfu(z_s, u_s)
                    z_e = tf.cond(self.dropout, lambda: tf.nn.dropout(z_e, keep_prob=1.0 - hps.dropout_rate),
                                  lambda: z_e)
                with tf.variable_scope('end_position'):
                    end_pos_scores, u_e = fn(c_rep, z_e)
                with tf.variable_scope('end_pos_memory_semantic_fusion_unit'):
                    z_s = sfu(z_e, u_e)
                    z_s = tf.cond(self.dropout, lambda: tf.nn.dropout(z_s, keep_prob=1.0 - hps.dropout_rate),
                                  lambda: z_s)
            self.pos_scores = [start_pos_scores, end_pos_scores]