def __init__(self, args):
        super(MyModel, self).__init__()
        self.args = args
        self.embedding_dim = 300
        self.embedding = nn.Embedding(len(vocab),
                                      self.embedding_dim,
                                      padding_idx=0)
        self.embedding.weight.data.fill_(0)
        self.embedding.weight.data[:2].normal_(0, 0.1)

        self.pos_embedding = nn.Embedding(len(pos_vocab),
                                          args.pos_emb_dim,
                                          padding_idx=0)
        self.pos_embedding.weight.data.normal_(0, 0.1)
        self.ner_embedding = nn.Embedding(len(ner_vocab),
                                          args.ner_emb_dim,
                                          padding_idx=0)
        self.ner_embedding.weight.data.normal_(0, 0.1)
        self.rel_embedding = nn.Embedding(len(rel_vocab),
                                          args.rel_emb_dim,
                                          padding_idx=0)
        self.rel_embedding.weight.data.normal_(0, 0.1)

        #self.emb_match = layers.SeqAttnMatch(self.embedding_dim)
        #self.q_emb_match = layers.SeqAttnMatch(self.embedding_dim)
        #self.c_emb_match = layers.SeqAttnMatch(self.embedding_dim)
        #self.p_q_emb_match = layers.SeqAttnMatch(self.embedding_dim)
        #self.c_q_emb_match = layers.SeqAttnMatch(self.embedding_dim)
        #self.c_p_emb_match = layers.SeqAttnMatch(self.embedding_dim)
        self.RNN_TYPES = {'lstm': nn.LSTM, 'gru': nn.GRU}
        print("###########self.args.matching_order:  %s " %
              (self.args.matching_order))

        # RNN context encoder
        #rnn_input_size  = self.embedding_dim+ args.pos_emb_dim + args.ner_emb_dim +5+ 2*args.rel_emb_dim
        rnn_input_size = self.embedding_dim + args.pos_emb_dim + args.ner_emb_dim + 5 + 2 * args.rel_emb_dim
        #rnn_input_size  = self.embedding_dim+ 5
        self.context_rnn = layers.StackedBRNN(
            input_size=rnn_input_size,
            #input_size=self.embedding_dim,
            hidden_size=args.hidden_size,
            num_layers=args.doc_layers,
            dropout_rate=0,
            dropout_output=args.dropout_rnn_output,
            concat_layers=False,
            rnn_type=self.RNN_TYPES[args.rnn_type],
            padding=args.rnn_padding)

        self.hidden_match = layers.SeqDotAttnMatch()
        self.mtinfer = layers.MultiTurnInference(args, self.RNN_TYPES)

        #mtinfer output size
        if args.use_multiturn_infer or args.use_bilstm:
            choice_infer_hidden_size = 2 * args.hidden_size
            #choice_infer_hidden_size = 2 * args.hidden_size * len(args.matching_order)
        else:
            #choice_infer_hidden_size = args.hidden_size * len(args.matching_order)
            choice_infer_hidden_size = 2 * args.hidden_size

        #self.c_infer_self_attn = layers.LinearSeqAttn(choice_infer_hidden_size)
        self.q_self_attn = layers.LinearSeqAttn(2 * args.hidden_size)

        if args.use_multiturn_infer == True:
            self.c_infer_linear = nn.Linear(4 * choice_infer_hidden_size,
                                            args.hidden_size)
        #elif args.use_bilstm == True:
        else:
            self.c_infer_linear = nn.Linear(
                2 * choice_infer_hidden_size + 2 * 2 * args.hidden_size,
                args.hidden_size)

        self.logits_linear = nn.Linear(args.hidden_size, 1)
Esempio n. 2
0
    def __init__(self, args):
        super(MyModel, self).__init__()
        self.args = args
        self.embedding_dim = 300
        self.embedding = nn.Embedding(len(vocab),
                                      self.embedding_dim,
                                      padding_idx=0)
        self.embedding.weight.data.fill_(0)
        self.embedding.weight.data[:2].normal_(0, 0.1)

        self.pos_embedding = nn.Embedding(len(pos_vocab),
                                          args.pos_emb_dim,
                                          padding_idx=0)
        self.pos_embedding.weight.data.normal_(0, 0.1)
        self.ner_embedding = nn.Embedding(len(ner_vocab),
                                          args.ner_emb_dim,
                                          padding_idx=0)
        self.ner_embedding.weight.data.normal_(0, 0.1)
        self.rel_embedding = nn.Embedding(len(rel_vocab),
                                          args.rel_emb_dim,
                                          padding_idx=0)
        self.rel_embedding.weight.data.normal_(0, 0.1)

        self.RNN_TYPES = {'lstm': nn.LSTM, 'gru': nn.GRU}
        print("###########self.args.matching_order:  %s " %
              (self.args.matching_order))

        # RNN context encoder
        #rnn_input_size  = self.embedding_dim+ args.pos_emb_dim + args.ner_emb_dim +5+ 2*args.rel_emb_dim
        rnn_input_size = self.embedding_dim + args.pos_emb_dim + args.ner_emb_dim + 5 + 2 * args.rel_emb_dim
        #rnn_input_size  = self.embedding_dim+ 5
        self.context_rnn = layers.StackedBRNN(
            input_size=rnn_input_size,
            hidden_size=args.hidden_size,
            num_layers=args.doc_layers,
            dropout_rate=args.dropout_rnn_output,  # float
            dropout_output=args.rnn_output_dropout,  #True or False
            concat_layers=False,
            rnn_type=self.RNN_TYPES[args.rnn_type],
            padding=args.rnn_padding)

        self.Hq_BiLstm = layers.StackedBRNN(
            input_size=rnn_input_size + args.hidden_size,
            hidden_size=args.hidden_size,
            num_layers=1,
            dropout_rate=args.dropout_rnn_output,  # float
            dropout_output=args.rnn_output_dropout,  #True or False
            concat_layers=False,
            rnn_type=self.RNN_TYPES[args.rnn_type],
            padding=args.rnn_padding)

        self.hidden_match = layers.SeqDotAttnMatch()
        self.mtinfer = layers.MultiTurnInference(args, self.RNN_TYPES)

        if self.args.tri_input == 'NA':
            self.mfunction = self.NA_TriMatching

        elif self.args.tri_input == 'CA':
            self.mfunction = self.CA_TriMatching
        else:
            self.mfunction = self.NA_CA_TriMatching

        #mtinfer output size
        if args.use_multiturn_infer or args.use_bilstm:
            choice_infer_hidden_size = 2 * args.hidden_size
            #choice_infer_hidden_size = 2 * args.hidden_size * len(args.matching_order)
        else:
            #choice_infer_hidden_size = args.hidden_size * len(args.matching_order)
            choice_infer_hidden_size = 2 * args.hidden_size

        #self.c_infer_self_attn = layers.LinearSeqAttn(choice_infer_hidden_size)
        self.q_self_attn = layers.LinearSeqAttn(2 * args.hidden_size)
        '''my:'''
        self.linearlayer = nn.Linear(rnn_input_size, args.hidden_size)  ##my
        self.pre_y = nn.Linear(
            2 * args.hidden_size + args.pos_emb_dim + args.ner_emb_dim + 5 +
            2 * args.rel_emb_dim, 1)
        if args.use_multiturn_infer == True:
            #self.c_infer_linear= nn.Linear(4*choice_infer_hidden_size,args.hidden_size)
            self.c_infer_linear = nn.Linear(
                4 * choice_infer_hidden_size + 2 * 2 * args.hidden_size,
                args.hidden_size)
        #elif args.use_bilstm == True:
        else:
            infer_input_size = 2 * 2 * args.hidden_size
            if self.args.p_channel == True:
                infer_input_size += 2 * choice_infer_hidden_size

            if self.args.q_channel == True:
                infer_input_size += 2 * choice_infer_hidden_size

            if self.args.c_channel == True:
                infer_input_size += 2 * choice_infer_hidden_size

            self.c_infer_linear = nn.Linear(infer_input_size, args.hidden_size)