Example #1
0
    def forward(self, x, x_single_mask, x_char, x_char_mask, x_features, x_pos,
                x_ent, x_bert, x_bert_mask, x_bert_offsets, q, q_mask, q_char,
                q_char_mask, q_bert, q_bert_mask, q_bert_offsets, context_len):
        batch_size = q.shape[0]
        x_mask = x_single_mask.expand(batch_size, -1)
        x_word_embed = self.vocab_embed(x).expand(
            batch_size, -1, -1)  # batch x x_len x vocab_dim
        ques_word_embed = self.vocab_embed(q)  # batch x q_len x vocab_dim

        x_input_list = [
            dropout(x_word_embed,
                    p=self.opt['dropout_emb'],
                    training=self.drop_emb)
        ]  # batch x x_len x vocab_dim
        ques_input_list = [
            dropout(ques_word_embed,
                    p=self.opt['dropout_emb'],
                    training=self.drop_emb)
        ]  # batch x q_len x vocab_dim

        # contextualized embedding
        x_cemb = ques_cemb = None
        if 'BERT' in self.opt:
            x_cemb = ques_cemb = None

            if 'BERT_LINEAR_COMBINE' in self.opt:
                x_bert_output = self.Bert(x_bert, x_bert_mask, x_bert_offsets,
                                          x_single_mask)
                x_cemb_mid = self.linear_sum(x_bert_output, self.alphaBERT,
                                             self.gammaBERT)
                ques_bert_output = self.Bert(q_bert, q_bert_mask,
                                             q_bert_offsets, q_mask)
                ques_cemb_mid = self.linear_sum(ques_bert_output,
                                                self.alphaBERT, self.gammaBERT)
                x_cemb_mid = x_cemb_mid.expand(batch_size, -1, -1)
            else:
                x_cemb_mid = self.Bert(x_bert, x_bert_mask, x_bert_offsets,
                                       x_single_mask)
                x_cemb_mid = x_cemb_mid.expand(batch_size, -1, -1)
                ques_cemb_mid = self.Bert(q_bert, q_bert_mask, q_bert_offsets,
                                          q_mask)

            x_input_list.append(x_cemb_mid)
            ques_input_list.append(ques_cemb_mid)

        if 'CHAR_CNN' in self.opt:
            x_char_final = self.character_cnn(x_char, x_char_mask)
            x_char_final = x_char_final.expand(batch_size, -1, -1)
            ques_char_final = self.character_cnn(q_char, q_char_mask)
            x_input_list.append(x_char_final)
            ques_input_list.append(ques_char_final)

        x_prealign = self.pre_align(x_word_embed, ques_word_embed, q_mask)
        x_input_list.append(
            x_prealign)  # batch x x_len x (vocab_dim + cdim + vocab_dim)

        x_pos_emb = self.pos_embedding(x_pos).expand(
            batch_size, -1, -1)  # batch x x_len x pos_dim
        x_ent_emb = self.ent_embedding(x_ent).expand(
            batch_size, -1, -1)  # batch x x_len x ent_dim
        x_input_list.append(x_pos_emb)
        x_input_list.append(x_ent_emb)
        x_input_list.append(
            x_features
        )  # batch x x_len x (vocab_dim + cdim + vocab_dim + pos_dim + ent_dim + feature_dim)

        x_input = torch.cat(
            x_input_list, 2
        )  # batch x x_len x (vocab_dim + cdim + vocab_dim + pos_dim + ent_dim + feature_dim)
        ques_input = torch.cat(ques_input_list,
                               2)  # batch x q_len x (vocab_dim + cdim)

        # Multi-layer RNN
        _, x_rnn_layers = self.context_rnn(
            x_input, x_mask, return_list=True, x_additional=x_cemb
        )  # layer x batch x x_len x context_rnn_output_size
        _, ques_rnn_layers = self.ques_rnn(
            ques_input, q_mask, return_list=True, x_additional=ques_cemb
        )  # layer x batch x q_len x ques_rnn_output_size

        # rnn with question only
        ques_highlvl = self.high_lvl_ques_rnn(
            torch.cat(ques_rnn_layers, 2),
            q_mask)  # batch x q_len x high_lvl_ques_rnn_output_size
        ques_rnn_layers.append(ques_highlvl)  # (layer + 1) layers

        # deep multilevel inter-attention
        if x_cemb is None:
            x_long = x_word_embed
            ques_long = ques_word_embed
        else:
            x_long = torch.cat([x_word_embed, x_cemb],
                               2)  # batch x x_len x (vocab_dim + cdim)
            ques_long = torch.cat([ques_word_embed, ques_cemb],
                                  2)  # batch x q_len x (vocab_dim + cdim)

        x_rnn_after_inter_attn, x_inter_attn = self.deep_attn(
            [x_long],
            x_rnn_layers, [ques_long],
            ques_rnn_layers,
            x_mask,
            q_mask,
            return_bef_rnn=True)
        # x_rnn_after_inter_attn: batch x x_len x deep_attn_output_size
        # x_inter_attn: batch x x_len x deep_attn_input_size

        # deep self attention
        if x_cemb is None:
            x_self_attn_input = torch.cat(
                [x_rnn_after_inter_attn, x_inter_attn, x_word_embed], 2)
        else:
            x_self_attn_input = torch.cat(
                [x_rnn_after_inter_attn, x_inter_attn, x_cemb, x_word_embed],
                2)
            # batch x x_len x (deep_attn_output_size + deep_attn_input_size + cdim + vocab_dim)

        x_self_attn_output = self.highlvl_self_att(x_self_attn_input,
                                                   x_self_attn_input,
                                                   x_mask,
                                                   x3=x_rnn_after_inter_attn,
                                                   drop_diagonal=True)
        # batch x x_len x deep_attn_output_size

        x_highlvl_output = self.high_lvl_context_rnn(
            torch.cat([x_rnn_after_inter_attn, x_self_attn_output], 2), x_mask)
        # bach x x_len x high_lvl_context_rnn.output_size
        x_final = x_highlvl_output

        # question self attention
        ques_final = self.ques_self_attn(
            ques_highlvl, ques_highlvl, q_mask, x3=None, drop_diagonal=True
        )  # batch x q_len x high_lvl_ques_rnn_output_size

        # merge questions
        q_merge_weights = self.ques_merger(ques_final, q_mask)
        ques_merged = weighted_avg(ques_final,
                                   q_merge_weights)  # batch x ques_final_size

        # predict scores
        score_s, score_e, score_no, score_yes, score_noanswer = self.get_answer(
            x_final, ques_merged, x_mask)
        return score_s, score_e, score_no, score_yes, score_noanswer
Example #2
0
    def forward(self, q_list, ocr_list, od_list, return_score=False):
        if return_score:
            att_score = {}
        else:
            att_score = None
        batch_size = len(ocr_list['num_cnt'])
        od_max_num = od_list['position'].size(1)
        ocr_max_num = ocr_list['position'].size(1)
        q_input = self.get_embedding_from_list(q_list, self.q_embedding, self.opt['q_emb_initial'])
        ocr_input = self.get_embedding_from_list(ocr_list, self.ocr_embedding, self.opt['ocr_emb_initial'])
        od_input = self.get_embedding_from_list(od_list, self.ocr_embedding, self.opt['ocr_emb_initial'])

        if 'PRE_ALIGN_befor_rnn' in self.opt:
            ocr_prealign, od_prealign = self.get_prealign_emb(q_list, ocr_list, od_list, batch_size)
            ocr_input = torch.cat([ocr_input, ocr_prealign], dim=-1)
            od_input = torch.cat([od_input, od_prealign], dim=-1)
        if 'fasttext' in self.opt['ocr_embedding']:
            multi2one_ocr_input = self.multi2one(ocr_input, ocr_list['fasttext_mask'])
            multi2one_od_input = self.multi2one(od_input, od_list['fasttext_mask'])
        elif 'glove' in self.opt['ocr_embedding']:
            multi2one_ocr_input = self.multi2one(ocr_input, ocr_list['glove_mask'])
            multi2one_od_input = self.multi2one(od_input, od_list['glove_mask'])

        if 'img_feature' in self.opt:
            img_fea = q_list['img_features']
            img_spa = q_list['img_spatials']
            if self.opt['img_fea_way'] == 'replace_od':
                od_input = self.img_fea2od(img_fea)
                od_mask = torch.ByteTensor(batch_size, self.img_fea_num).fill_(1).cuda()
            elif self.opt['img_fea_way'] == 'final_att':
                # img_fea = self.img_fea_linear(img_fea)
                od_input = torch.FloatTensor(batch_size, od_max_num, self.multi2one_output_size).fill_(0).cuda()
                od_mask = torch.ByteTensor(batch_size, od_max_num).fill_(0).cuda()
                # img_fea_mask = torch.ByteTensor(batch_size, self.img_fea_num).fill_(1).cuda()
        else:
            od_input = torch.FloatTensor(batch_size, od_max_num, self.multi2one_output_size).fill_(0).cuda()
            od_mask = torch.ByteTensor(batch_size, od_max_num).fill_(0).cuda()
        ocr_input = torch.FloatTensor(batch_size, ocr_max_num, self.multi2one_output_size).fill_(0).cuda()

        if 'ES_ocr' in self.opt and self.opt['ES_using_way'] == 'post_process':
            es_ocr_len = self.opt['ES_ocr_len']
            ocr_mask = torch.ByteTensor(batch_size, ocr_max_num-self.opt['ES_ocr_len']).fill_(0).cuda()
        else:
            es_ocr_len = None
            ocr_mask = torch.ByteTensor(batch_size, ocr_max_num).fill_(0).cuda()
        od_idx = ocr_idx = 0
        mask_copy = torch.ByteTensor(batch_size).fill_(0).cuda()
        for i in range(batch_size):
            if 'img_feature_replace_od' not in self.opt:
                od_cnt = 0
                for j in od_list['len_cnt'][i]:
                    od_input[i][od_cnt] = multi2one_od_input[od_idx][j-1]
                    od_cnt += 1
                    od_idx += 1
                od_mask[i][0:od_cnt] = 1
            ocr_cnt = 0
            for j in ocr_list['len_cnt'][i]:
                ocr_input[i][ocr_cnt] = multi2one_ocr_input[ocr_idx][j-1]
                ocr_cnt += 1
                ocr_idx += 1
            if es_ocr_len != None and ocr_cnt >= es_ocr_len and self.opt['ES_using_way'] == 'post_process':
                ocr_mask[i][0:ocr_cnt-es_ocr_len] = 1
                # o_mask_pre[i][0:ocr_cnt-101] = 0
            else:
                ocr_mask[i][0:ocr_cnt] = 1
                mask_copy[i] = ocr_cnt

        if es_ocr_len != None and self.opt['ES_using_way'] == 'post_process':
            es_emb = ocr_input[:, :es_ocr_len]
            ocr_input = ocr_input[:, es_ocr_len:]
            ocr_list['position'] = ocr_list['position'][:, es_ocr_len:]
            es_mask = torch.ByteTensor(batch_size, es_ocr_len).fill_(1).cuda()

        if 'fasttext' in self.opt['q_embedding']:
            q_mask = q_list['fasttext_mask']
        else:
            q_mask = q_list['glove_mask']
        if 'PRE_ALIGN_after_rnn' in self.opt:
            if 'fasttext' in self.opt['q_embedding']:
                ocr_prealign, ocr_word_leve_attention_score = self.pre_align(ocr_input, q_list['fasttext_emb'], q_mask)
                od_prealign, od_word_leve_attention_score = self.pre_align(od_input, q_list['fasttext_emb'], q_mask)
            else:
                ocr_prealign, ocr_word_leve_attention_score = self.pre_align(ocr_input, q_list['glove_emb'], q_mask)
                od_prealign, od_word_leve_attention_score = self.pre_align(od_input, q_list['glove_emb'], q_mask)

        _, ocr_rnn_layers = self.context_rnn(ocr_input, ocr_mask, return_list=True, x_additional=None, LN=True) # layer x batch x x_len x context_rnn_output_size
        _, q_rnn_layers = self.ques_rnn(q_input, q_mask, return_list=True, x_additional=None, LN=True) # layer x batch x q_len x ques_rnn_output_size
        _, od_rnn_layers = self.context_rnn(od_input, od_mask, return_list=True, x_additional=None, LN=True)
        # if 'LN' in self.opt:
        #     for i in range(len(ocr_rnn_layers)):
        #         ocr_rnn_layers[i] = self.ocr_rnn1_ln(ocr_rnn_layers[i])
        #     for i in range(len(od_rnn_layers)):
        #         od_rnn_layers[i] = self.od_rnn1_ln(od_rnn_layers[i])
        #     for i in range(len(q_rnn_layers)):
        #         q_rnn_layers[i] = self.q_rnn1_ln(q_rnn_layers[i])
        
        # rnn with question only 
        q_highlvl = self.high_lvl_ques_rnn(torch.cat(q_rnn_layers, 2), q_mask, LN=True) # batch x q_len x high_lvl_ques_rnn_output_size
        # if 'LN' in self.opt:
        #     q_highlvl = self.q_
        q_rnn_layers.append(q_highlvl) # (layer + 1) layers
        
        # deep multilevel inter-attention
        
        if 'GLOVE' not in self.opt and 'FastText' not in self.opt:
            ocr_long = []
            q_long = []
            od_long = []
        elif 'PRE_ALIGN_after_rnn' in self.opt:
            ocr_long = [ocr_prealign]
            if 'fasttext' in self.opt['q_embedding']:
                q_long = [q_list['fasttext_emb']]
            else:
                q_long = [q_list['glove_emb']]
            od_long = [od_prealign]
        else:
            ocr_long = [ocr_input]
            if 'fasttext' in self.opt['q_embedding']:
                q_long = [q_list['fasttext_emb']]
            else:
                q_long = [q_list['glove_emb']]
            od_long = [od_input]

        ocr_rnn_after_inter_attn, ocr_inter_attn = self.deep_attn(ocr_long, ocr_rnn_layers, q_long, q_rnn_layers, ocr_mask, q_mask, return_bef_rnn=True)
        od_rnn_after_inter_attn, od_inter_attn = self.deep_attn(od_long, od_rnn_layers, q_long, q_rnn_layers, od_mask, q_mask, return_bef_rnn=True)

        # deep self attention
        ocr_self_attn_input = torch.cat([ocr_rnn_after_inter_attn, ocr_inter_attn, ocr_input], 2)
        od_self_attn_input = torch.cat([od_rnn_after_inter_attn, od_inter_attn, od_input], 2)
        
        if 'no_Context_Self_Attention' in self.opt:
            ocr_highlvl_output = self.high_lvl_context_rnn(ocr_rnn_after_inter_attn, ocr_mask, LN=True)
            od_highlvl_output = self.high_lvl_context_rnn(od_rnn_after_inter_attn, od_mask, LN=True)
        else:
            ocr_self_attn_output = self.highlvl_self_att(ocr_self_attn_input, ocr_self_attn_input, ocr_mask, x3=ocr_rnn_after_inter_attn, drop_diagonal=False)
            od_self_attn_output = self.highlvl_self_att(od_self_attn_input, od_self_attn_input, od_mask, x3=od_rnn_after_inter_attn, drop_diagonal=False)
            ocr_highlvl_output = self.high_lvl_context_rnn(torch.cat([ocr_rnn_after_inter_attn, ocr_self_attn_output], 2), ocr_mask, LN=True)
            od_highlvl_output = self.high_lvl_context_rnn(torch.cat([od_rnn_after_inter_attn, od_self_attn_output], 2), od_mask, LN=True)

            
        if 'position_dim' in self.opt:
            ocr_position = ocr_list['position']
            od_position = od_list['position']
            if 'img_feature' in self.opt and self.opt['img_fea_way'] == 'replace_od':
                od_position = img_spa
            if self.opt['position_mod'] == 'qk+':
                x_od_ocr = self.od_ocr_attn(ocr_highlvl_output, od_highlvl_output, od_mask)
                pos_att = self.position_attn(ocr_position, od_position, od_mask, x3 = od_highlvl_output)
                x_od_ocr += pos_att
            elif self.opt['position_mod'] == 'cat':
                x_od_ocr = self.od_ocr_attn(torch.cat([ocr_highlvl_output, ocr_position],dim=2), torch.cat([od_highlvl_output, od_position],dim=2), od_mask)
        if self.opt['pos_att_merge_mod'] == 'cat':
            ocr_final = torch.cat([ocr_highlvl_output, x_od_ocr], 2)
        elif self.opt['pos_att_merge_mod'] == 'atted':
            ocr_final = x_od_ocr
        elif self.opt['pos_att_merge_mod'] == 'original':
            ocr_final = ocr_highlvl_output
        # question self attention  
        q_final = self.ques_self_attn(q_highlvl, q_highlvl, q_mask, drop_diagonal=False) # batch x q_len x high_lvl_ques_rnn_output_size

        # merge questions  
        q_merge_weights = self.ques_merger(q_final, q_mask) 
        q_merged = weighted_avg(q_final, q_merge_weights) # batch x ques_final_size

        # predict scores
        if es_ocr_len != None and self.opt['ES_using_way'] == 'post_process':
            es_mid = self.ES_linear(es_emb)
            es_final = self.ES_ocr_att(es_mid, ocr_final, ocr_mask)
            ocr_final = torch.cat([es_final, ocr_final], dim=-2)
            ocr_mask = torch.cat([es_mask, ocr_mask], dim=-1)
        if 'img_feature' in self.opt and self.opt['img_fea_way'] == 'final_att':
            img_fea = self.image_feature_model(q_merged, img_fea)
            # q_merged = torch.cat([q_merged, img_fea], dim=-1)
            #ocr_fea = self.ocr_final_model(q_merged, ocr_final, mask=ocr_mask)
            #q_merged = torch.cat([q_merged, ocr_fea, img_fea], dim=-1)
        if 'useES' in self.opt:
            score_s = self.get_answer(ocr_final, q_merged, ocr_mask, self.opt['ES_ocr_len'], mask_flag='mask_score' in self.opt)
        else: 
            score_s = self.get_answer(ocr_final, q_merged, ocr_mask, None, mask_flag='mask_score' in self.opt)
        if 'fixed_answers' in self.opt:
            fixed_ans_logits = self.fixed_ans_classifier(q_merged)
            fixed_ans_logits = self.fixed_ocr_alpha * fixed_ans_logits
            score_s = (1 - self.fixed_ocr_alpha) * score_s
            score_s = torch.cat([fixed_ans_logits, score_s], dim=-1)
        return score_s, att_score
Example #3
0
    def forward(self, x, x_single_mask, x_char, x_char_mask, x_features, x_pos,
                x_ent, x_bert, x_bert_mask, x_bert_offsets, q, q_mask, q_char,
                q_char_mask, q_bert, q_bert_mask, q_bert_offsets, context_len):
        """
        forward()前向计算函数以BatchGen()产生的批次数据作为输入,经过编码层、交互层和输出层计算得到最终的打分结果
        :param x: [1, x_len] (word_ids)
        :param x_single_mask: [1, x_len]
        :param x_char: [1, x_len, char_len] (char_ids)
        :param x_char_mask: [1, x_len, char_len]
        :param x_features: [batch_size, x_len, feature_len] (5 if answer_span_in_context_feature 4 otherwise)
        :param x_pos: [1, x_len] (POS id)
        :param x_ent: [1, x_len] (ENT id)
        :param x_bert: [1, x_bert_token_len]
        :param x_bert_mask: [1, x_bert_token_len]
        :param x_bert_offsets: [1, x_len, 2]
        :param q: [batch, q_len] (word_ids)
        :param q_mask: [batch, q_len]
        :param q_char: [batch, q_len, char_len] (char ids)
        :param q_char_mask: [batch, q_len, char_len]
        :param q_bert: [1, q_bert_token_len]
        :param q_bert_mask: [1, q_bert_token_len]
        :param q_bert_offsets: [1, q_len, 2]
        :param context_len: number of words in context (only one per batch)
        :return:
            score_s: [batch, context_len]
            score_e: [batch, context_len]
            score_no: [batch, 1]
            score_yes: [batch, 1]
            score_noanswer: [batch, 1]
        """
        batch_size = q.shape[0]
        # 由于同一个batch中的问答共享一篇文章,x_single_mask只有一行,这里将x_single_mask重复batch_size行,与问题数据对齐
        x_mask = x_single_mask.expand(batch_size, -1)
        # 获得文章单词编码,同样重复batch_size行
        x_word_embed = self.vocab_embed(x).expand(
            batch_size, -1, -1)  # [batch, x_len, vocab_dim]
        # 获得问题单词编码
        ques_word_embed = self.vocab_embed(q)  # [batch, q_len, vocab_dim]
        # 文章单词历史
        x_input_list = [
            dropout(x=x_word_embed,
                    p=self.opt['dropout_emb'],
                    training=self.drop_emb)
        ]  # [batch, x_len, vocab_dim]
        # 问题单词历史
        ques_input_list = [
            dropout(x=x_word_embed,
                    p=self.opt['dropout_emb'],
                    training=self.drop_emb)
        ]  # [batch, q_len, vocab_dim]
        # 上下文编码层
        x_cemb = ques_cemb = None
        if 'BERT' in self.opt:
            x_cemb = ques_cemb = None

            if 'BERT_LINEAR_COMBINE' in self.opt:
                # 得到BERT每一层输出的文章单词编码
                x_bert_output = self.Bert(x_bert, x_bert_mask, x_bert_offsets,
                                          x_single_mask)
                # 计算加权和
                x_cemb_mid = self.linear_sum(x_bert_output, self.alphaBERT,
                                             self.gammaBERT)
                # 得到BERT每一层输出的问题单词编码
                ques_bert_output = self.Bert(q_bert, q_bert_mask,
                                             q_bert_offsets, q_mask)
                # 计算加权和
                ques_cemb_mid = self.linear_sum(ques_bert_output,
                                                self.alphaBERT, self.gammaBERT)
                x_cemb_mid = x_cemb_mid.expand(batch_size, -1, -1)
            else:
                # 不计算加权和的情况
                x_cemb_mid = self.Bert(x_bert, x_bert_mask, x_bert_offsets,
                                       x_single_mask)
                x_cemb_mid = x_cemb_mid.expand(batch_size, -1, -1)
                ques_cemb_mid = self.Bert(q_bert, q_bert_mask, q_bert_offsets,
                                          q_mask)

            # 上下文编码加入单词历史
            x_input_list.append(x_cemb_mid)
            ques_input_list.append(ques_cemb_mid)

        if 'CHAR_CNN' in self.opt:
            x_char_final = self.character_cnn(x_char, x_char_mask)
            x_char_final = x_char_final.expand(batch_size, -1, -1)
            ques_char_final = self.character_cnn(q_char, q_char_mask)
            x_input_list.append(x_char_final)
            ques_input_list.append(ques_char_final)

        # 单词注意力层
        x_prealign = self.pre_align(x_word_embed, ques_word_embed, q_mask)
        x_input_list.append(
            x_prealign)  # [batch, x_len, vocab_dim + cdim + vocab_dim]
        # 词性编码
        x_pos_emb = self.pos_embedding(x_pos).expand(
            batch_size, -1, -1)  # [batch, x_len, pos_dim]
        # 命名实体编码
        x_ent_emb = self.ent_embedding(x_ent).expand(
            batch_size, -1, -1)  # [batch, x_len, ent_dim]
        x_input_list.append(x_pos_emb)
        x_input_list.append(x_ent_emb)
        # 加入文章单词的词频和精确匹配特征
        x_input_list.append(
            x_features
        )  # [batch_size, x_len, vocab_dim + cdim + vocab_dim + pos_dim, ent_dim, feature_dim]
        # 将文章答案的单词历史向量拼接起来
        x_input = torch.cat(
            x_input_list, 2
        )  # [batch_size, x_len, vocab_dim + cdim + vocab_dim + pos_dim + ent_dim + feature_dim]
        # 将问题答案的单词历史向量拼接起来
        ques_input = torch.cat(ques_input_list,
                               2)  # [batch_size, q_len, vocab_dim + cdim]
        # Multi-layer RNN, 获得文章和问题RNN层的输出
        _, x_rnn_layers = self.context_rnn(
            x_input, x_mask, return_list=True, x_additional=x_cemb
        )  # [layer, batch, x_len, context_rnn_output_size]
        _, ques_rnn_layers = self.ques_rnn(
            ques_input, q_mask, return_list=True, x_additional=ques_cemb
        )  # [layer, batch, q_len, ques_rnn_output_size]
        # 问题理解层
        ques_highlvl = self.high_lvl_ques_rnn(
            torch.cat(ques_rnn_layers, 2),
            q_mask)  # [batch, q_len, high_lvl_ques_rnn_output_size]
        ques_rnn_layers.append(ques_highlvl)  # (layer + 1) layers

        # deep multilevel inter-attention, 全关注互注意力层的输入
        if x_cemb is None:
            x_long = x_word_embed
            ques_long = ques_word_embed
        else:
            x_long = torch.cat([x_word_embed, x_cemb],
                               2)  # [batch, x_len, vocab_dim + cdim]
            ques_long = torch.cat([ques_word_embed, ques_cemb],
                                  2)  # [batch, q_len, vocab_dim + cdim]
        # 文章单词经过全关注互注意力层, x_rnn_after_inter_attn: [batch, x_len, deep_attn_output_size], x_inter_attn: [batch, x_len, deep_attn_input_size]
        x_rnn_after_inter_attn, x_inter_attn = self.deep_attn(
            [x_long],
            x_rnn_layers, [ques_long],
            ques_rnn_layers,
            x_mask,
            q_mask,
            return_bef_rnn=True)

        # deep self attention, 全关注自注意力层的输入, x_self_attn_input: [batch, x_len, deep_attn_output_size + deep_attn_input_size + cdim + vocab_dim]
        if x_cemb is None:
            x_self_attn_input = torch.cat(
                [x_rnn_after_inter_attn, x_inter_attn, x_word_embed], 2)
        else:
            x_self_attn_input = torch.cat(
                [x_rnn_after_inter_attn, x_inter_attn, x_cemb, x_word_embed],
                2)
        # 文章经过全关注自注意力层
        x_self_attn_output = self.highlvl_self_attn(
            x_self_attn_input,
            x_self_attn_input,
            x_mask,
            x3=x_rnn_after_inter_attn,
            drop_diagonal=True)  # [batch, x_len, deep_attn_output_size]

        # 文章单词经过高级RNN层
        x_highlvl_output = self.high_lvl_context_rnn(
            torch.cat([x_rnn_after_inter_attn, x_self_attn_output], 2), x_mask)

        # 文章单词的最终编码x_final
        x_final = x_highlvl_output  # [batch, x_len, high_lvl_context_rnn_output_size]

        # 问题单词的自注意力层
        ques_final = self.ques_self_attn(
            ques_highlvl, ques_highlvl, q_mask, x3=None, drop_diagonal=True
        )  # [batch, q_len, high_lvl_ques_rnn_output_size]

        # merge questions, 获得问题的向量表示
        q_merge_weights = self.ques_merger(ques_final, q_mask)
        ques_merged = weighted_avg(
            ques_final, q_merge_weights
        )  # [batch, ques_final_size], 按照q_merge_weights计算ques_final的加权和

        # 获得答案在文章每个位置开始和结束的概率以及三种特殊答案“是/否/没有答案”的概率
        score_s, score_e, score_no, score_yes, score_noanswer = self.get_answer(
            x_final, ques_merged, x_mask)

        return score_s, score_e, score_no, score_yes, score_noanswer