def build(self): assert self.config['question_len'] == self.config['answer_len'] question = self.question answer = self.get_answer() # add embedding layers weights = self.model_params.get('initial_embed_weights', None) weights = weights if weights is None else [weights] embedding = Embedding(input_dim=self.config['n_words'], output_dim=self.model_params.get( 'n_embed_dims', 100), weights=weights) question_embedding = embedding(question) answer_embedding = embedding(answer) # turn off layer updating # embedding.params = [] # embedding.updates = [] # dense dense = TimeDistributed( Dense( self.model_params.get('n_hidden', 200), # activity_regularizer=regularizers.activity_l1(1e-4), # W_regularizer=regularizers.l1(1e-4), activation='tanh')) question_dense = dense(question_embedding) answer_dense = dense(answer_embedding) # cnn cnns = [ Convolution1D( filter_length=filter_length, nb_filter=self.model_params.get('nb_filters', 1000), activation=self.model_params.get('conv_activation', 'relu'), # W_regularizer=regularizers.l1(1e-4), # activity_regularizer=regularizers.activity_l1(1e-4), border_mode='same') for filter_length in [2, 3, 5, 7] ] question_cnn = merge([cnn(question_dense) for cnn in cnns], mode='concat') answer_cnn = merge([cnn(answer_dense) for cnn in cnns], mode='concat') # maxpooling maxpool = Lambda(lambda x: K.max(x, axis=-1, keepdims=False), output_shape=lambda x: (x[0], x[2])) avepool = Lambda(lambda x: K.mean(x, axis=-1, keepdims=False), output_shape=lambda x: (x[0], x[2])) maxpool.__setattr__('supports_masking', True) avepool.__setattr__('supports_masking', True) question_pool = maxpool(question_cnn) answer_pool = maxpool(answer_cnn) return question_pool, answer_pool
def build(self): question = self.question answer = self.get_answer() # add embedding layers weights = self.model_params.get('initial_embed_weights', None) weights = weights if weights is None else [weights] embedding = Embedding( input_dim=self.config['n_words'], output_dim=self.model_params.get('n_embed_dims', 100), # W_constraint=constraints.nonneg(), weights=weights, mask_zero=True) question_embedding = embedding(question) answer_embedding = embedding(answer) # maxpooling maxpool = Lambda(lambda x: K.max(x, axis=1, keepdims=False), output_shape=lambda x: (x[0], x[2])) maxpool.__setattr__('supports_masking', True) question_pool = maxpool(question_embedding) answer_pool = maxpool(answer_embedding) return question_pool, answer_pool
def build(self): question = self.question answer = self.get_answer() # add embedding layers weights = self.model_params.get('initial_embed_weights', None) weights = weights if weights is None else [weights] embedding = Embedding( input_dim=self.config['n_words'], output_dim=self.model_params.get('n_embed_dims', 256), # weights=weights, mask_zero=True) question_embedding = embedding(question) answer_embedding = embedding(answer) # turn off layer updating # embedding.params = [] # embedding.updates = [] # question rnn part f_rnn = LSTM(self.model_params.get('n_lstm_dims', 141), return_sequences=True, dropout_U=0.2, consume_less='mem') b_rnn = LSTM(self.model_params.get('n_lstm_dims', 141), return_sequences=True, dropout_U=0.2, consume_less='mem', go_backwards=True) question_f_rnn = f_rnn(question_embedding) question_b_rnn = b_rnn(question_embedding) # maxpooling maxpool = Lambda(lambda x: K.max(x, axis=1, keepdims=False), output_shape=lambda x: (x[0], x[2])) avepool = Lambda(lambda x: K.mean(x, axis=1, keepdims=False), output_shape=lambda x: (x[0], x[2])) # otherwise, it will raise a exception like: # Layer lambda_1 does not # support masking, but was passed an input_mask: Elemwise{neq,no_inplace}.0 maxpool.__setattr__('supports_masking', True) avepool.__setattr__('supports_masking', True) question_pool = merge( [maxpool(question_f_rnn), maxpool(question_b_rnn)], mode='concat', concat_axis=-1) # answer rnn part f_rnn = AttentionLSTM(self.model_params.get('n_lstm_dims', 141), question_pool, return_sequences=True, consume_less='mem', single_attention_param=True) b_rnn = AttentionLSTM(self.model_params.get('n_lstm_dims', 141), question_pool, return_sequences=True, consume_less='mem', go_backwards=True, single_attention_param=True) answer_f_rnn = f_rnn(answer_embedding) answer_b_rnn = b_rnn(answer_embedding) answer_pool = merge([maxpool(answer_f_rnn), maxpool(answer_b_rnn)], mode='concat', concat_axis=-1) return question_pool, answer_pool