def __init__(self, param):
        """
        :param param: embedding, hidden_size, dropout_p, encoder_dropout_p, encoder_direction_num, encoder_layer_num
        """
        super(Model, self).__init__()

        self.mode = param['mode']
        self.hidden_size = param['hidden_size']
        self.dropout_p = param['dropout_p']
        self.encoder_dropout_p = param['encoder_dropout_p']
        self.encoder_layer_num = param['encoder_layer_num']
        self.is_bn = param['is_bn']

        self.embedding = embedding.ExtendEmbedding(param['embedding'])

        # encoder
        input_size = self.embedding.embedding_dim - 9
        self.encoder_a = encoder.Rnn(mode=self.mode,
                                     input_size=input_size,
                                     hidden_size=self.hidden_size,
                                     dropout_p=self.encoder_dropout_p,
                                     bidirectional=True,
                                     layer_num=self.encoder_layer_num,
                                     is_bn=False)

        input_size = self.embedding.embedding_dim
        self.encoder_p_q = encoder.Rnn(mode=self.mode,
                                       input_size=input_size,
                                       hidden_size=self.hidden_size,
                                       dropout_p=self.encoder_dropout_p,
                                       bidirectional=True,
                                       layer_num=self.encoder_layer_num,
                                       is_bn=True)

        # match rnn
        input_size = self.hidden_size * 2
        self.match_rnn = match_rnn.MatchRNN(mode=self.mode,
                                            input_size=input_size,
                                            hidden_size=self.hidden_size,
                                            dropout_p=self.dropout_p,
                                            gated_attention=True,
                                            is_bn=self.is_bn)

        # addition_rnn
        input_size = self.hidden_size * 2
        self.addition_rnn = encoder.Rnn(mode=self.mode,
                                        input_size=input_size,
                                        hidden_size=self.hidden_size,
                                        bidirectional=True,
                                        dropout_p=self.dropout_p,
                                        layer_num=1,
                                        is_bn=self.is_bn)

        # mean passage based on attn
        self.mean_p = pointer.AttentionPooling(input_size=self.hidden_size * 2,
                                               output_size=self.hidden_size *
                                               2)

        # outputs
        self.choose = choose.Choose(self.hidden_size * 2, self.hidden_size * 2)
Exemple #2
0
    def __init__(self, param):
        super(Model, self).__init__()

        self.mode = param['mode']
        self.hidden_size = param['hidden_size']
        self.dropout_p = param['dropout_p']
        self.encoder_dropout_p = param['encoder_dropout_p']
        self.encoder_layer_num = param['encoder_layer_num']
        self.is_bn = param['is_bn']
        self.k = param['k']
        self.perspective_num = 10
        self.lamda = 10

        self.embedding = embedding.ExtendEmbedding(param['embedding'])

        # encoder: p, q, a
        input_size = self.embedding.embedding_dim - 9
        self.encoder = encoder.Rnn(
            mode='LSTM',
            input_size=input_size,
            hidden_size=self.hidden_size,
            dropout_p=self.encoder_dropout_p,
            bidirectional=True,
            layer_num=self.encoder_layer_num,
            is_bn=False,
        )

        # attention: q2p
        self.q2p = MultiPerspective(self.hidden_size, self.perspective_num, self.dropout_p)

        # attention: a2p
        self.a2p = MultiPerspective(self.hidden_size, self.perspective_num, self.dropout_p)

        # attention: Mq2Ma
        self.mq2ma = MultiPerspective(self.perspective_num*4, self.perspective_num, self.dropout_p)

        # attention: Ma2Mq
        self.ma2mq = MultiPerspective(self.perspective_num*4, self.perspective_num, self.dropout_p)

        # aggregation
        self.aggregation = Aggregation(self.perspective_num, self.hidden_size, self.dropout_p)

        # memory
        self.q2p = MultiPerspective(self.hidden_size, self.perspective_num, self.dropout_p)
        self.m_rnn = encoder.Rnn(
            mode='LSTM',
            input_size=self.perspective_num*8,
            hidden_size=self.hidden_size,
            dropout_p=self.dropout_p,
            bidirectional=True,
            layer_num=1,
            is_bn=self.is_bn,
        )

        # answer score
        self.answer_score = AnswerScore(self.hidden_size, self.k, self.lamda, self.dropout_p)

        self.dropout = nn.Dropout(self.dropout_p)
Exemple #3
0
    def __init__(self, param):
        super(Model, self).__init__()

        self.mode = param['mode']
        self.hidden_size = param['hidden_size']
        self.dropout_p = param['dropout_p']
        self.encoder_dropout_p = param['encoder_dropout_p']
        self.encoder_layer_num = param['encoder_layer_num']
        self.is_bn = param['is_bn']

        # embedding
        self.embedding = embedding.ExtendEmbedding(param['embedding'])

        # encoder
        input_size = self.embedding.embedding_dim
        self.encoder = encoder.Rnn(
            mode=self.mode,
            input_size=input_size,
            hidden_size=self.hidden_size,
            dropout_p=self.encoder_dropout_p,
            bidirectional=True,
            layer_num=self.encoder_layer_num,
            is_bn=True
        )

        # attention flow layer
        self.att_c = nn.Linear(self.hidden_size * 2, 1)
        self.att_q = nn.Linear(self.hidden_size * 2, 1)
        self.att_cq = nn.Linear(self.hidden_size * 2, 1)

        # modeling layer
        self.modeling_rnn = encoder.Rnn(
            mode=self.mode,
            input_size=self.hidden_size * 8,
            hidden_size=self.hidden_size,
            dropout_p=self.dropout_p,
            bidirectional=True,
            layer_num=2,
            is_bn=self.is_bn
        )

        # outputs
        self.p1 = nn.Linear(self.hidden_size * 10, 1)
        self.p2 = nn.Linear(self.hidden_size * 10, 1)

        self.rnn = encoder.Rnn(
            mode=self.mode,
            input_size=self.hidden_size * 2,
            hidden_size=self.hidden_size,
            bidirectional=True,
            dropout_p=self.dropout_p,
            layer_num=1,
            is_bn=self.is_bn
        )

        self.dropout = nn.Dropout(self.dropout_p)
Exemple #4
0
    def __init__(self, param):
        """
        :param param: embedding, hidden_size, dropout_p, encoder_dropout_p, encoder_direction_num, encoder_layer_num
        """
        super(Model, self).__init__()

        self.mode = param['mode']
        self.hidden_size = param['hidden_size']
        self.dropout_p = param['dropout_p']
        self.encoder_dropout_p = param['encoder_dropout_p']
        self.encoder_layer_num = param['encoder_layer_num']
        self.is_bn = param['is_bn']

        self.embedding = embedding.ExtendEmbedding(param['embedding'])

        # encoder
        input_size = self.embedding.embedding_dim
        self.encoder = encoder.Rnn(mode=self.mode,
                                   input_size=input_size,
                                   hidden_size=self.hidden_size,
                                   dropout_p=self.encoder_dropout_p,
                                   bidirectional=True,
                                   layer_num=self.encoder_layer_num,
                                   is_bn=True)

        # match rnn
        input_size = self.hidden_size * 2
        self.match_rnn = match_rnn.MatchRNN(mode=self.mode,
                                            input_size=input_size,
                                            hidden_size=self.hidden_size,
                                            dropout_p=self.dropout_p,
                                            gated_attention=True,
                                            is_bn=self.is_bn)

        # addition_rnn
        input_size = self.hidden_size * 2
        self.addition_rnn = encoder.Rnn(mode=self.mode,
                                        input_size=input_size,
                                        hidden_size=self.hidden_size,
                                        bidirectional=True,
                                        dropout_p=self.dropout_p,
                                        layer_num=1,
                                        is_bn=self.is_bn)

        # init state of pointer
        self.init_state = pointer.AttentionPooling(
            input_size=self.hidden_size * 2, output_size=self.hidden_size)

        # pointer
        self.pointer_net = pointer.BoundaryPointer(
            mode=self.mode,
            input_size=self.hidden_size * 2,
            hidden_size=self.hidden_size,
            dropout_p=self.dropout_p,
            bidirectional=True,
            is_bn=self.is_bn)
Exemple #5
0
    def __init__(self, param):
        super(Model, self).__init__()

        self.mode = param['mode']
        self.hidden_size = param['hidden_size']
        self.dropout_p = param['dropout_p']
        self.encoder_dropout_p = param['encoder_dropout_p']
        self.encoder_layer_num = param['encoder_layer_num']
        self.is_bn = param['is_bn']

        self.embedding = embedding.ExtendEmbedding(param['embedding'])

        # encoder_p: 双向rnn
        input_size = self.embedding.embedding_dim
        self.encoder_p = encoder.Rnn(mode=self.mode,
                                     input_size=input_size,
                                     hidden_size=self.hidden_size,
                                     dropout_p=self.encoder_dropout_p,
                                     bidirectional=True,
                                     layer_num=self.encoder_layer_num,
                                     is_bn=True)

        # encoder_q: 单向rnn
        self.encoder_q = encoder.Rnn(mode=self.mode,
                                     input_size=input_size,
                                     hidden_size=self.hidden_size,
                                     dropout_p=self.encoder_dropout_p,
                                     bidirectional=False,
                                     layer_num=self.encoder_layer_num,
                                     is_bn=True)

        # encoder_a: 单向rnn
        input_size = self.embedding.sd_embedding.embedding_dim
        self.encoder_a = encoder.Rnn(mode=self.mode,
                                     input_size=input_size,
                                     hidden_size=self.hidden_size,
                                     dropout_p=self.encoder_dropout_p,
                                     bidirectional=False,
                                     layer_num=self.encoder_layer_num,
                                     is_bn=False)

        # similar W
        self.sim_w = nn.Linear(self.hidden_size, self.hidden_size * 2)

        # outputs
        self.choose = choose.Choose(self.hidden_size * 2, self.hidden_size)

        self.dropout = nn.Dropout(self.dropout_p)
Exemple #6
0
    def __init__(self, param):
        super(Model, self).__init__()

        self.mode = param['mode']
        self.hidden_size = param['hidden_size']
        self.dropout_p = param['dropout_p']
        self.encoder_dropout_p = param['encoder_dropout_p']
        self.encoder_layer_num = param['encoder_layer_num']
        self.is_bn = param['is_bn']

        # embedding
        self.embedding = embedding.ExtendEmbedding(param['embedding'])

        # encoder
        input_size = self.embedding.embedding_dim
        self.encoder = encoder.Rnn(
            mode=self.mode,
            input_size=input_size,
            hidden_size=self.hidden_size,
            dropout_p=self.encoder_dropout_p,
            bidirectional=True,
            layer_num=self.encoder_layer_num,
            is_bn=True
        )

        # align
        input_size = self.hidden_size * 2
        self.align_1 = Aligner(input_size, self.dropout_p, self.mode, self.is_bn, False)
        self.align_2 = Aligner(input_size, self.dropout_p, self.mode, self.is_bn, False)
        self.align_3 = Aligner(input_size, self.dropout_p, self.mode, self.is_bn, True)

        # pointer
        self.pointer = Pointer(input_size, self.dropout_p)
Exemple #7
0
    def __init__(self, param):
        """
        :param param: embedding, hidden_size, dropout_p, encoder_dropout_p, encoder_direction_num, encoder_layer_num
        """
        super(Model, self).__init__()

        self.w2v_size = param['embedding'].shape[1]
        self.vocab_size = param['embedding'].shape[0]
        self.embedding_type = param['embedding_type']
        self.embedding_is_training = param['embedding_is_training']
        self.mode = param['mode']
        self.hidden_size = param['hidden_size']
        self.dropout_p = param['dropout_p']
        self.encoder_dropout_p = param['encoder_dropout_p']
        self.encoder_bidirectional = param['encoder_bidirectional']
        self.encoder_layer_num = param['encoder_layer_num']
        self.is_bn = param['is_bn']

        if self.embedding_type == 'standard':
            self.embedding = embedding.Embedding(param['embedding'])
            is_bn = False
        else:
            self.embedding = embedding.ExtendEmbedding(param['embedding'])
            is_bn = True

        # encoder
        input_size = self.embedding.embedding_dim
        self.encoder = encoder.Rnn(
            mode=self.mode,
            input_size=input_size,
            hidden_size=self.hidden_size,
            dropout_p=self.encoder_dropout_p,
            bidirectional=self.encoder_bidirectional,
            layer_num=self.encoder_layer_num,
            is_bn=is_bn
        )

        # match rnn
        input_size = self.hidden_size * 2 if self.encoder_bidirectional else self.hidden_size
        self.match_rnn = match_rnn.MatchRNN(
            mode=self.mode,
            input_size=input_size,
            hidden_size=self.hidden_size,
            dropout_p=self.dropout_p,
            gated_attention=False,
            is_bn=self.is_bn
        )

        # pointer
        self.pointer_net = pointer.BoundaryPointer(
            mode=self.mode,
            input_size=self.hidden_size*2,
            hidden_size=self.hidden_size,
            dropout_p=self.dropout_p,
            bidirectional=True,
            is_bn=self.is_bn
        )
Exemple #8
0
    def __init__(self, mode, input_size, dropout_p, is_bn):
        super(EviCollection, self).__init__()

        self.rnn = encoder.Rnn(
            mode=mode,
            input_size=input_size,
            hidden_size=input_size//2,
            dropout_p=dropout_p,
            bidirectional=True,
            layer_num=1,
            is_bn=is_bn
        )
Exemple #9
0
    def __init__(self, perspective_num, hidden_size, dropout_p):
        super(Aggregation, self).__init__()

        self.perspective_num = perspective_num
        self.hidden_size = hidden_size

        self.rnn = encoder.Rnn(
            mode='LSTM',
            input_size=self.perspective_num*8,
            hidden_size=self.hidden_size,
            dropout_p=dropout_p,
            bidirectional=True,
            layer_num=1,
            is_bn=True,
        )
Exemple #10
0
    def __init__(self, input_size, dropout_p, mode, is_bn, use_rnn):
        super(Aligner, self).__init__()

        self.inter_align = InterAlign(input_size, dropout_p)
        self.self_align = SelfAlign(input_size, dropout_p)
        self.aggregation = EviCollection(mode, input_size, dropout_p, is_bn)

        self.use_rnn = use_rnn
        if use_rnn:
            self.rnn = encoder.Rnn(
                mode=mode,
                input_size=input_size*3,
                hidden_size=input_size//2,
                dropout_p=dropout_p,
                bidirectional=True,
                layer_num=1,
                is_bn=is_bn
            )
Exemple #11
0
    def __init__(self, param):
        super(Model, self).__init__()

        self.mode = param['mode']
        self.hidden_size = param['hidden_size']
        self.dropout_p = param['dropout_p']
        self.encoder_dropout_p = param['encoder_dropout_p']
        self.encoder_layer_num = param['encoder_layer_num']
        self.is_bn = param['is_bn']

        # embedding
        self.embedding = embedding.ExtendEmbedding(param['embedding'])

        # encoder: p
        input_size = self.embedding.embedding_dim
        self.encoder = encoder.Rnn(
            mode=self.mode,
            input_size=input_size,
            hidden_size=self.hidden_size,
            dropout_p=self.encoder_dropout_p,
            bidirectional=True,
            layer_num=self.encoder_layer_num,
            is_bn=self.is_bn
        )

        self.mean_q = pointer.AttentionPooling(self.hidden_size*2, self.hidden_size)
        self.mean_a = pointer.AttentionPooling(self.hidden_size*2, self.hidden_size)

        # align
        input_size = self.hidden_size * 2
        self.align_1 = Aligner(input_size, self.dropout_p, self.mode, self.is_bn, False)
        self.align_2 = Aligner(input_size, self.dropout_p, self.mode, self.is_bn, False)
        self.align_3 = Aligner(input_size, self.dropout_p, self.mode, self.is_bn, True)

        # p_rep, choosing
        self.wp1 = nn.Linear(self.hidden_size*2, self.hidden_size)
        self.wp2 = nn.Linear(self.hidden_size*2, self.hidden_size)
        self.vp = nn.Linear(self.hidden_size, 1)
        self.bi_linear = nn.Linear(self.hidden_size*2, self.hidden_size*2)

        self.dropout = nn.Dropout(self.dropout_p)

        self.reset_param()
Exemple #12
0
    def __init__(self, param):
        super(Model, self).__init__()

        self.mode = param['mode']
        self.hidden_size = param['hidden_size']
        self.dropout_p = param['dropout_p']
        self.encoder_dropout_p = param['encoder_dropout_p']
        self.encoder_layer_num = param['encoder_layer_num']
        self.is_bn = param['is_bn']

        self.embedding = embedding.ExtendEmbedding(param['embedding'])

        # encoder: p
        input_size = self.embedding.embedding_dim
        self.encoder_p = encoder.Rnn(mode=self.mode,
                                     input_size=input_size,
                                     hidden_size=self.hidden_size,
                                     dropout_p=self.encoder_dropout_p,
                                     bidirectional=True,
                                     layer_num=self.encoder_layer_num,
                                     is_bn=True)
        # encoder: q
        self.encoder_q = encoder.Rnn(mode=self.mode,
                                     input_size=input_size,
                                     hidden_size=self.hidden_size,
                                     dropout_p=self.encoder_dropout_p,
                                     bidirectional=True,
                                     layer_num=self.encoder_layer_num,
                                     is_bn=True)
        # encoder: a
        input_size = self.embedding.embedding_dim - 9
        self.encoder_a = encoder.Rnn(mode=self.mode,
                                     input_size=input_size,
                                     hidden_size=self.hidden_size // 2,
                                     dropout_p=self.encoder_dropout_p,
                                     bidirectional=True,
                                     layer_num=self.encoder_layer_num,
                                     is_bn=False)

        # self-att: a
        self.a_att = nn.Linear(self.hidden_size, 1, bias=False)

        # attention flow layer
        self.att_c = nn.Linear(self.hidden_size * 2, 1)
        self.att_q = nn.Linear(self.hidden_size * 2, 1)
        self.att_cq = nn.Linear(self.hidden_size * 2, 1)

        # modeling layer
        self.modeling_rnn = encoder.Rnn(mode=self.mode,
                                        input_size=self.hidden_size * 8,
                                        hidden_size=self.hidden_size,
                                        dropout_p=self.dropout_p,
                                        bidirectional=True,
                                        layer_num=2,
                                        is_bn=self.is_bn)

        # prediction
        self.wq = nn.Linear(self.hidden_size * 2, self.hidden_size, bias=False)
        self.vq = nn.Linear(self.hidden_size, 1, bias=False)
        self.wp1 = nn.Linear(self.hidden_size * 10,
                             self.hidden_size,
                             bias=False)
        self.wp2 = nn.Linear(self.hidden_size * 2,
                             self.hidden_size,
                             bias=False)
        self.vp = nn.Linear(self.hidden_size, 1, bias=False)
        self.predict = nn.Linear(self.hidden_size * 10,
                                 self.hidden_size,
                                 bias=False)

        self.dropout = nn.Dropout(self.dropout_p)

        self.reset_param()
Exemple #13
0
    def __init__(self, param):
        super(Model, self).__init__()

        self.mode = param['mode']
        self.hidden_size = param['hidden_size']
        self.dropout_p = param['dropout_p']
        self.encoder_dropout_p = param['encoder_dropout_p']
        self.encoder_layer_num = param['encoder_layer_num']
        self.is_bn = param['is_bn']
        self.k = 2

        self.embedding = embedding.ExtendEmbedding(param['embedding'])

        # encoder_a
        input_size = self.embedding.embedding_dim - 9
        self.encoder_a = encoder.Rnn(mode=self.mode,
                                     input_size=input_size,
                                     hidden_size=self.hidden_size,
                                     dropout_p=self.encoder_dropout_p,
                                     bidirectional=True,
                                     layer_num=self.encoder_layer_num,
                                     is_bn=False)

        # encoder p, q
        self.doc_list = nn.ModuleList()
        self.query_list = nn.ModuleList()
        input_size = self.embedding.embedding_dim
        for i in range(self.k):
            di_enc = encoder.Rnn(
                mode=self.mode,
                input_size=input_size if i == 0 else self.hidden_size * 2,
                hidden_size=self.hidden_size,
                dropout_p=self.dropout_p,
                bidirectional=True,
                layer_num=1,
                is_bn=self.is_bn)

            qi_enc = encoder.Rnn(mode=self.mode,
                                 input_size=input_size,
                                 hidden_size=self.hidden_size,
                                 dropout_p=self.dropout_p,
                                 bidirectional=True,
                                 layer_num=1,
                                 is_bn=self.is_bn)
            self.doc_list.append(di_enc)
            self.query_list.append(qi_enc)

        # mean passage based on attn
        self.mean_p = pointer.AttentionPooling(input_size=self.hidden_size * 2,
                                               output_size=self.hidden_size *
                                               2)

        # mean answer based on attn
        self.mean_a = pointer.AttentionPooling(input_size=self.hidden_size * 2,
                                               output_size=self.hidden_size *
                                               2)

        # outputs
        self.choose = choose.Choose(self.hidden_size * 2, self.hidden_size * 2)

        self.dropout = nn.Dropout(self.dropout_p)
Exemple #14
0
    def __init__(self, param):
        super(Model, self).__init__()

        self.mode = param['mode']
        self.hidden_size = param['hidden_size']
        self.dropout_p = param['dropout_p']
        self.encoder_dropout_p = param['encoder_dropout_p']
        self.encoder_layer_num = param['encoder_layer_num']
        self.is_bn = param['is_bn']
        self.num_align_hops = param['num_align_hops']  # 2

        # embedding
        self.embedding = embedding.ExtendEmbedding(param['embedding'])

        # encoder
        input_size = self.embedding.embedding_dim
        self.encoder = encoder.Rnn(
            mode=self.mode,
            input_size=input_size,
            hidden_size=self.hidden_size,
            dropout_p=self.encoder_dropout_p,
            bidirectional=True,
            layer_num=self.encoder_layer_num,
            is_bn=self.is_bn  # 应该修改一下
        )

        self.mean_q = pointer.AttentionPooling(self.hidden_size * 2,
                                               self.hidden_size)
        self.mean_a = pointer.AttentionPooling(self.hidden_size * 2,
                                               self.hidden_size)

        # merge q into p
        self.aligner = nn.ModuleList(
            [SeqToSeqAtten() for _ in range(self.num_align_hops)])
        self.aligner_sfu = nn.ModuleList([
            SFU(self.hidden_size * 2,
                self.hidden_size * 2 * 3,
                dropout_p=self.dropout_p) for _ in range(self.num_align_hops)
        ])
        # self align
        self.self_aligner = nn.ModuleList(
            [SelfSeqAtten() for _ in range(self.num_align_hops)])
        self.self_aligner_sfu = nn.ModuleList([
            SFU(self.hidden_size * 2,
                self.hidden_size * 2 * 3,
                dropout_p=self.dropout_p) for _ in range(self.num_align_hops)
        ])
        # aggregation
        self.choose_agg = nn.ModuleList([
            encoder.Rnn(mode=self.mode,
                        input_size=self.hidden_size * 2,
                        hidden_size=self.hidden_size,
                        bidirectional=True,
                        dropout_p=self.dropout_p,
                        layer_num=1,
                        is_bn=self.is_bn) for _ in range(self.num_align_hops)
        ])

        # p_rep, choosing
        self.wp1 = nn.Linear(self.hidden_size * 2, self.hidden_size)
        self.wp2 = nn.Linear(self.hidden_size * 2, self.hidden_size)
        self.vp = nn.Linear(self.hidden_size, 1)
        self.bi_linear = nn.Linear(self.hidden_size * 2, self.hidden_size * 2)

        self.dropout = nn.Dropout(self.dropout_p)

        self.reset_param()
Exemple #15
0
    def __init__(self, param):
        super(Model, self).__init__()

        self.mode = param['mode']
        self.hidden_size = param['hidden_size']
        self.dropout_p = param['dropout_p']
        self.encoder_dropout_p = param['encoder_dropout_p']
        self.encoder_layer_num = param['encoder_layer_num']
        self.is_bn = param['is_bn']

        self.embedding = embedding.ExtendEmbedding(param['embedding'])

        # encoder: p
        input_size = self.embedding.embedding_dim
        self.encoder_p = encoder.Rnn(
            mode=self.mode,
            input_size=input_size,
            hidden_size=self.hidden_size,
            dropout_p=self.encoder_dropout_p,
            bidirectional=True,
            layer_num=self.encoder_layer_num,
            is_bn=True
        )
        # encoder: q
        self.encoder_q = encoder.Rnn(
            mode=self.mode,
            input_size=input_size,
            hidden_size=self.hidden_size,
            dropout_p=self.encoder_dropout_p,
            bidirectional=True,
            layer_num=self.encoder_layer_num,
            is_bn=True
        )
        # encoder: a
        input_size = self.embedding.embedding_dim - 9
        self.encoder_a = encoder.Rnn(
            mode=self.mode,
            input_size=input_size,
            hidden_size=self.hidden_size//2,
            dropout_p=self.encoder_dropout_p,
            bidirectional=True,
            layer_num=self.encoder_layer_num,
            is_bn=False
        )

        # self-att: a
        self.a_att = nn.Linear(self.hidden_size, 1, bias=False)

        # Concat Attention
        self.Wc1 = nn.Linear(self.hidden_size*2, self.hidden_size, bias=False)
        self.Wc2 = nn.Linear(self.hidden_size*2, self.hidden_size, bias=False)
        self.vc = nn.Linear(self.hidden_size, 1, bias=False)

        # Bilinear Attention
        self.Wb = nn.Linear(self.hidden_size*2, self.hidden_size*2, bias=False)

        # Dot Attention :
        self.Wd = nn.Linear(self.hidden_size*2, self.hidden_size, bias=False)
        self.vd = nn.Linear(self.hidden_size, 1, bias=False)

        # Minus Attention :
        self.Wm = nn.Linear(self.hidden_size*2, self.hidden_size, bias=False)
        self.vm = nn.Linear(self.hidden_size, 1, bias=False)

        self.gru_agg = encoder.Rnn(
            mode=self.mode,
            input_size=self.hidden_size*10,
            hidden_size=self.hidden_size,
            dropout_p=self.encoder_dropout_p,
            bidirectional=True,
            layer_num=self.encoder_layer_num,
            is_bn=self.is_bn
        )

        # prediction
        self.wq = nn.Linear(self.hidden_size*2, self.hidden_size, bias=False)
        self.vq = nn.Linear(self.hidden_size, 1, bias=False)
        self.wp1 = nn.Linear(self.hidden_size*2, self.hidden_size, bias=False)
        self.wp2 = nn.Linear(self.hidden_size*2, self.hidden_size, bias=False)
        self.vp = nn.Linear(self.hidden_size, 1, bias=False)
        self.predict = nn.Linear(self.hidden_size*2, self.hidden_size, bias=False)

        self.dropout = nn.Dropout(self.dropout_p)

        self.reset_param()
Exemple #16
0
    def __init__(self, param):
        super(Model, self).__init__()

        self.mode = param['mode']
        self.hidden_size = param['hidden_size']
        self.dropout_p = param['dropout_p']
        self.encoder_dropout_p = param['encoder_dropout_p']
        self.encoder_layer_num = param['encoder_layer_num']
        self.is_bn = param['is_bn']

        # embedding
        self.embedding = embedding.ExtendEmbedding(param['embedding'])

        # encoder
        input_size = self.embedding.embedding_dim
        self.encoder = encoder.Rnn(mode=self.mode,
                                   input_size=input_size,
                                   hidden_size=self.hidden_size,
                                   dropout_p=self.encoder_dropout_p,
                                   bidirectional=True,
                                   layer_num=self.encoder_layer_num,
                                   is_bn=True)

        # align
        self.aligner = nn.ModuleList(
            [SeqToSeqAtten() for _ in range(num_align_hops)])
        self.aligner_sfu = nn.ModuleList([
            SFU(self.hidden_size * 2,
                self.hidden_size * 2 * 3,
                dropout_p=self.dropout_p) for _ in range(num_align_hops)
        ])

        # self align
        self.self_aligner = nn.ModuleList(
            [SelfSeqAtten() for _ in range(num_align_hops)])
        self.self_aligner_sfu = nn.ModuleList([
            SFU(self.hidden_size * 2,
                self.hidden_size * 2 * 3,
                dropout_p=self.dropout_p) for _ in range(num_align_hops)
        ])

        # aggregation
        self.aggregation = nn.ModuleList([
            encoder.Rnn(mode=self.mode,
                        input_size=self.hidden_size * 2,
                        hidden_size=self.hidden_size,
                        bidirectional=True,
                        dropout_p=self.dropout_p,
                        layer_num=1,
                        is_bn=self.is_bn) for _ in range(num_align_hops)
        ])

        # init zs
        self.init_state = pointer.AttentionPooling(
            input_size=self.hidden_size * 2, output_size=self.hidden_size * 2)

        # pointer
        self.ptr_net = nn.ModuleList([
            Pointer(self.hidden_size * 2, self.hidden_size, self.dropout_p)
            for _ in range(num_ptr_hops)
        ])