Ejemplo n.º 1
0
def cal_max_question_representation(question_representation, atten_scores):
    atten_positions = tf.argmax(
        atten_scores, axis=2,
        output_type=tf.int32)  # [batch_size, passage_len]
    max_question_reps = layer_utils.collect_representation(
        question_representation, atten_positions)
    return max_question_reps
Ejemplo n.º 2
0
def cal_max_node_2_representation(node_2_rep, relevancy_matrix):
    # [batch_size, single_graph_1_nodes_size]
    atten_positions = tf.argmax(relevancy_matrix, axis=2, output_type=tf.int32)
    max_node_2_reps = layer_utils.collect_representation(
        node_2_rep, atten_positions)

    # [batch_size, single_graph_1_nodes_size, dim]
    return max_node_2_reps
Ejemplo n.º 3
0
 def cal_max_question_representation(self, question_representation,
                                     atten_scores):
     # question_representation: [batch_size, q_len, dim]
     # atten_scores: [batch_size, passage_len, q_len]
     atten_positions = tf.argmax(atten_scores, axis=2, output_type=tf.int32)
     max_question_reps = layer_utils.collect_representation(
         question_representation, atten_positions)
     # [batch_size, passage_len, dim]
     return max_question_reps
Ejemplo n.º 4
0
def onelayer_BiMPM_match(in_dim,
                         passage,
                         question,
                         passage_mask,
                         question_mask,
                         accum_dim=0,
                         passage_accum=None,
                         question_accum=None,
                         options=None,
                         scope_name='onelayer_BiMPM_match',
                         is_training=True,
                         dropout_rate=0.2,
                         reuse=False):
    if passage_accum is None:
        passage_accum = passage
        question_accum = question
        accum_dim = in_dim
    match_results = []
    match_dim = 0
    QoP_reps = None
    with tf.variable_scope(scope_name, reuse=reuse):
        # attention passage over question
        PoQ_atten = layer_utils.calcuate_attention(passage_accum,
                                                   question_accum,
                                                   accum_dim,
                                                   accum_dim,
                                                   scope_name="PoQ_atten",
                                                   att_type=options.att_type,
                                                   att_dim=options.att_dim,
                                                   remove_diagnoal=False,
                                                   mask1=passage_mask,
                                                   mask2=question_mask,
                                                   is_training=is_training,
                                                   dropout_rate=dropout_rate)
        PoQ_reps = tf.matmul(
            PoQ_atten,
            layer_utils.dropout_layer(question,
                                      dropout_rate,
                                      is_training=is_training))
        if options.with_QoP:
            # attention question over passage
            QoP_atten = layer_utils.calcuate_attention(
                question_accum,
                passage_accum,
                accum_dim,
                accum_dim,
                scope_name="QoP_atten",
                att_type=options.att_type,
                att_dim=options.att_dim,
                remove_diagnoal=False,
                mask1=question_mask,
                mask2=passage_mask,
                is_training=is_training,
                dropout_rate=dropout_rate)
            QoP_reps = tf.matmul(
                QoP_atten,
                layer_utils.dropout_layer(passage,
                                          dropout_rate,
                                          is_training=is_training))

        # attentive matching
        (att_match_rep,
         att_match_dim) = multi_perspective_match(in_dim,
                                                  passage,
                                                  PoQ_reps,
                                                  is_training=is_training,
                                                  dropout_rate=dropout_rate,
                                                  options=options,
                                                  scope_name='att_match')
        match_results.append(att_match_rep)
        match_dim += att_match_dim

        # max attentive matching
        PoQ_max_reps = layer_utils.collect_representation(
            question, tf.argmax(PoQ_atten, axis=2, output_type=tf.int32))
        (max_att_match_rep, max_att_match_dim) = multi_perspective_match(
            in_dim,
            passage,
            PoQ_max_reps,
            is_training=is_training,
            dropout_rate=dropout_rate,
            options=options,
            scope_name='max_att_match')
        match_results.append(max_att_match_rep)
        match_dim += max_att_match_dim

    match_results = tf.concat(axis=2, values=match_results)
    return (match_results, match_dim, PoQ_reps, QoP_reps)
Ejemplo n.º 5
0
def multi_granularity_match(feature_dim,
                            passage,
                            question,
                            passage_length,
                            question_length,
                            passage_mask=None,
                            question_mask=None,
                            is_training=True,
                            dropout_rate=0.2,
                            options=None,
                            with_full_matching=False,
                            with_attentive_matching=True,
                            with_max_attentive_matching=True,
                            scope_name='mgm',
                            reuse=False):
    '''
        passage: [batch_size, passage_length, feature_dim]
        question: [batch_size, question_length, feature_dim]
        passage_length: [batch_size]
        question_length: [batch_size]
    '''
    input_shape = tf.shape(passage)
    batch_size = input_shape[0]
    passage_len = input_shape[1]

    match_reps = []
    with tf.variable_scope(scope_name, reuse=reuse):
        match_dim = 0
        if with_full_matching:
            passage_fw = passage[:, :, 0:feature_dim / 2]
            passage_bw = passage[:, :, feature_dim / 2:feature_dim]

            question_fw = question[:, :, 0:feature_dim / 2]
            question_bw = question[:, :, feature_dim / 2:feature_dim]
            question_fw = layer_utils.collect_final_step_of_lstm(
                question_fw,
                question_length - 1)  # [batch_size, feature_dim/2]
            question_bw = question_bw[:, 0, :]

            question_fw = tf.expand_dims(question_fw, axis=1)
            question_fw = tf.tile(
                question_fw,
                [1, passage_len, 1])  # [batch_size, pasasge_len, feature_dim]

            question_bw = tf.expand_dims(question_bw, axis=1)
            question_bw = tf.tile(
                question_bw,
                [1, passage_len, 1])  # [batch_size, pasasge_len, feature_dim]
            (fw_full_match_reps, fw_full_match_dim) = multi_perspective_match(
                feature_dim / 2,
                passage_fw,
                question_fw,
                is_training=is_training,
                dropout_rate=dropout_rate,
                options=options,
                scope_name='fw_full_match')
            (bw_full_match_reps, bw_full_match_dim) = multi_perspective_match(
                feature_dim / 2,
                passage_bw,
                question_bw,
                is_training=is_training,
                dropout_rate=dropout_rate,
                options=options,
                scope_name='bw_full_match')
            match_reps.append(fw_full_match_reps)
            match_reps.append(bw_full_match_reps)
            match_dim += fw_full_match_dim
            match_dim += bw_full_match_dim

        if with_attentive_matching or with_max_attentive_matching:
            atten_scores = layer_utils.calcuate_attention(
                passage,
                question,
                feature_dim,
                feature_dim,
                scope_name="attention",
                att_type=options.attn_type,
                att_dim=options.attn_depth,
                remove_diagnoal=False,
                mask1=passage_mask,
                mask2=question_mask,
                is_training=is_training,
                dropout_rate=dropout_rate)
            # match_reps.append(tf.reduce_max(atten_scores, axis=2, keep_dims=True))
            # match_reps.append(tf.reduce_mean(atten_scores, axis=2, keep_dims=True))
            # match_dim += 2

        if with_max_attentive_matching:
            atten_positions = tf.argmax(
                atten_scores, axis=2,
                output_type=tf.int32)  # [batch_size, passage_len]
            max_question_reps = layer_utils.collect_representation(
                question, atten_positions)
            (max_att_match_rep, max_att_match_dim) = multi_perspective_match(
                feature_dim,
                passage,
                max_question_reps,
                is_training=is_training,
                dropout_rate=dropout_rate,
                options=options,
                scope_name='max_att_match')
            match_reps.append(max_att_match_rep)
            match_dim += max_att_match_dim

        if with_attentive_matching:
            att_rep = tf.matmul(atten_scores, question)
            (attentive_match_rep,
             attentive_match_dim) = multi_perspective_match(
                 feature_dim,
                 passage,
                 att_rep,
                 is_training=is_training,
                 dropout_rate=dropout_rate,
                 options=options,
                 scope_name='att_match')
            match_reps.append(attentive_match_rep)
            match_dim += attentive_match_dim
    match_reps = tf.concat(axis=2, values=match_reps)
    return (match_reps, match_dim)