def __init__(self, param): """ :param param: embedding, hidden_size, dropout_p, encoder_dropout_p, encoder_direction_num, encoder_layer_num """ super(Model, self).__init__() self.mode = param['mode'] self.hidden_size = param['hidden_size'] self.dropout_p = param['dropout_p'] self.encoder_dropout_p = param['encoder_dropout_p'] self.encoder_layer_num = param['encoder_layer_num'] self.is_bn = param['is_bn'] self.embedding = embedding.ExtendEmbedding(param['embedding']) # encoder input_size = self.embedding.embedding_dim - 9 self.encoder_a = encoder.Rnn(mode=self.mode, input_size=input_size, hidden_size=self.hidden_size, dropout_p=self.encoder_dropout_p, bidirectional=True, layer_num=self.encoder_layer_num, is_bn=False) input_size = self.embedding.embedding_dim self.encoder_p_q = encoder.Rnn(mode=self.mode, input_size=input_size, hidden_size=self.hidden_size, dropout_p=self.encoder_dropout_p, bidirectional=True, layer_num=self.encoder_layer_num, is_bn=True) # match rnn input_size = self.hidden_size * 2 self.match_rnn = match_rnn.MatchRNN(mode=self.mode, input_size=input_size, hidden_size=self.hidden_size, dropout_p=self.dropout_p, gated_attention=True, is_bn=self.is_bn) # addition_rnn input_size = self.hidden_size * 2 self.addition_rnn = encoder.Rnn(mode=self.mode, input_size=input_size, hidden_size=self.hidden_size, bidirectional=True, dropout_p=self.dropout_p, layer_num=1, is_bn=self.is_bn) # mean passage based on attn self.mean_p = pointer.AttentionPooling(input_size=self.hidden_size * 2, output_size=self.hidden_size * 2) # outputs self.choose = choose.Choose(self.hidden_size * 2, self.hidden_size * 2)
def __init__(self, param): """ :param param: embedding, hidden_size, dropout_p, encoder_dropout_p, encoder_direction_num, encoder_layer_num """ super(Model, self).__init__() self.w2v_size = param['embedding'].shape[1] self.vocab_size = param['embedding'].shape[0] self.embedding_type = param['embedding_type'] self.embedding_is_training = param['embedding_is_training'] self.mode = param['mode'] self.hidden_size = param['hidden_size'] self.dropout_p = param['dropout_p'] self.encoder_dropout_p = param['encoder_dropout_p'] self.encoder_bidirectional = param['encoder_bidirectional'] self.encoder_layer_num = param['encoder_layer_num'] self.is_bn = param['is_bn'] if self.embedding_type == 'standard': self.embedding = embedding.Embedding(param['embedding']) is_bn = False else: self.embedding = embedding.ExtendEmbedding(param['embedding']) is_bn = True # encoder input_size = self.embedding.embedding_dim self.encoder = encoder.Rnn( mode=self.mode, input_size=input_size, hidden_size=self.hidden_size, dropout_p=self.encoder_dropout_p, bidirectional=self.encoder_bidirectional, layer_num=self.encoder_layer_num, is_bn=is_bn ) # match rnn input_size = self.hidden_size * 2 if self.encoder_bidirectional else self.hidden_size self.match_rnn = match_rnn.MatchRNN( mode=self.mode, input_size=input_size, hidden_size=self.hidden_size, dropout_p=self.dropout_p, gated_attention=False, is_bn=self.is_bn ) # pointer self.pointer_net = pointer.BoundaryPointer( mode=self.mode, input_size=self.hidden_size*2, hidden_size=self.hidden_size, dropout_p=self.dropout_p, bidirectional=True, is_bn=self.is_bn )
def __init__(self, param): """ :param param: embedding, hidden_size, dropout_p, encoder_dropout_p, encoder_direction_num, encoder_layer_num """ super(Model, self).__init__() self.mode = param['mode'] self.hidden_size = param['hidden_size'] self.dropout_p = param['dropout_p'] self.encoder_dropout_p = param['encoder_dropout_p'] self.encoder_layer_num = param['encoder_layer_num'] self.is_bn = param['is_bn'] self.embedding = embedding.ExtendEmbedding(param['embedding']) # encoder input_size = self.embedding.embedding_dim self.encoder = encoder.Rnn(mode=self.mode, input_size=input_size, hidden_size=self.hidden_size, dropout_p=self.encoder_dropout_p, bidirectional=True, layer_num=self.encoder_layer_num, is_bn=True) # match rnn input_size = self.hidden_size * 2 self.match_rnn = match_rnn.MatchRNN(mode=self.mode, input_size=input_size, hidden_size=self.hidden_size, dropout_p=self.dropout_p, gated_attention=True, is_bn=self.is_bn) # addition_rnn input_size = self.hidden_size * 2 self.addition_rnn = encoder.Rnn(mode=self.mode, input_size=input_size, hidden_size=self.hidden_size, bidirectional=True, dropout_p=self.dropout_p, layer_num=1, is_bn=self.is_bn) # init state of pointer self.init_state = pointer.AttentionPooling( input_size=self.hidden_size * 2, output_size=self.hidden_size) # pointer self.pointer_net = pointer.BoundaryPointer( mode=self.mode, input_size=self.hidden_size * 2, hidden_size=self.hidden_size, dropout_p=self.dropout_p, bidirectional=True, is_bn=self.is_bn)