def build(self):
        assert self.config['question_len'] == self.config['answer_len']

        question = self.question
        answer = self.get_answer()

        # add embedding layers
        embedding = Embedding(self.config['n_words'], self.model_params.get('n_embed_dims', 100))
        question_embedding = embedding(question)
        answer_embedding = embedding(answer)

        # turn off layer updating
        embedding.params = []
        embedding.updates = []

        # dropout
        dropout = Dropout(0.25)
        question_dropout = dropout(question_embedding)
        answer_dropout = dropout(answer_embedding)

        # dense
        dense = TimeDistributed(Dense(self.model_params.get('n_hidden', 200), activation='tanh'))
        question_dense = dense(question_dropout)
        answer_dense = dense(answer_dropout)

        # regularization
        question_dense = ActivityRegularization(l2=0.0001)(question_dense)
        answer_dense = ActivityRegularization(l2=0.0001)(answer_dense)

        # dropout
        question_dropout = dropout(question_dense)
        answer_dropout = dropout(answer_dense)

        # cnn
        cnns = [Convolution1D(filter_length=filter_length,
                              nb_filter=self.model_params.get('nb_filters', 1000),
                              activation=self.model_params.get('conv_activation', 'relu'),
                              border_mode='same') for filter_length in [2, 3, 5, 7]]
        question_cnn = merge([cnn(question_dropout) for cnn in cnns], mode='concat')
        answer_cnn = merge([cnn(answer_dropout) for cnn in cnns], mode='concat')

        # regularization
        question_cnn = ActivityRegularization(l2=0.0001)(question_cnn)
        answer_cnn = ActivityRegularization(l2=0.0001)(answer_cnn)

        # dropout
        question_dropout = dropout(question_cnn)
        answer_dropout = dropout(answer_cnn)

        # maxpooling
        maxpool = Lambda(lambda x: K.max(x, axis=1, keepdims=False), output_shape=lambda x: (x[0], x[2]))
        question_pool = maxpool(question_dropout)
        answer_pool = maxpool(answer_dropout)

        # activation
        activation = Activation('tanh')
        question_output = activation(question_pool)
        answer_output = activation(answer_pool)

        return question_output, answer_output
    def build(self):
        question = self.question
        answer = self.get_answer()

        # add embedding layers
        embedding = Embedding(self.config['n_words'], self.model_params.get('n_embed_dims', 100), mask_zero=False)
        question_embedding = embedding(question)
        answer_embedding = embedding(answer)

        # turn off layer updating
        embedding.params = []
        embedding.updates = []

        # dropout
        dropout = Dropout(0.25)
        question_dropout = dropout(question_embedding)
        answer_dropout = dropout(answer_embedding)

        # question rnn part
        f_rnn = LSTM(self.model_params.get('n_lstm_dims', 141), return_sequences=True)
        b_rnn = LSTM(self.model_params.get('n_lstm_dims', 141), return_sequences=True, go_backwards=True)
        question_f_rnn = f_rnn(question_dropout)
        question_b_rnn = b_rnn(question_dropout)
        question_f_dropout = dropout(question_f_rnn)
        question_b_dropout = dropout(question_b_rnn)

        # maxpooling
        maxpool = Lambda(lambda x: K.max(x, axis=1, keepdims=False), output_shape=lambda x: (x[0], x[2]))
        question_pool = merge([maxpool(question_f_dropout), maxpool(question_b_dropout)], mode='concat', concat_axis=-1)

        # answer rnn part
        f_rnn = AttentionLSTM(self.model_params.get('n_lstm_dims', 141), question_pool, single_attn=True, return_sequences=True)
        b_rnn = AttentionLSTM(self.model_params.get('n_lstm_dims', 141), question_pool, single_attn=True, return_sequences=True, go_backwards=True)
        answer_f_rnn = f_rnn(answer_dropout)
        answer_b_rnn = b_rnn(answer_dropout)
        answer_f_dropout = dropout(answer_f_rnn)
        answer_b_dropout = dropout(answer_b_rnn)
        answer_pool = merge([maxpool(answer_f_dropout), maxpool(answer_b_dropout)], mode='concat', concat_axis=-1)

        # activation
        activation = Activation('tanh')
        question_output = activation(question_pool)
        answer_output = activation(answer_pool)

        return question_output, answer_output
    def build(self):
        question = self.question
        answer = self.get_answer()

        # add embedding layers
        embedding = Embedding(self.config['n_words'],
                              self.model_params.get('n_embed_dims', 100))
        question_embedding = embedding(question)
        answer_embedding = embedding(answer)

        # turn off layer updating
        embedding.params = []
        embedding.updates = []

        # dropout
        dropout = Dropout(0.25)
        question_dropout = dropout(question_embedding)
        answer_dropout = dropout(answer_embedding)

        # question rnn part
        f_rnn = LSTM(self.model_params.get('n_lstm_dims', 141),
                     return_sequences=True)
        b_rnn = LSTM(self.model_params.get('n_lstm_dims', 141),
                     return_sequences=True,
                     go_backwards=True)
        question_rnn = merge(
            [f_rnn(question_dropout),
             b_rnn(question_dropout)],
            mode='concat',
            concat_axis=-1)
        question_dropout = dropout(question_rnn)

        # regularize
        regularize = ActivityRegularization(l2=0.0001)
        question_dropout = regularize(question_dropout)

        # could add convolution layer here (as in paper)

        # maxpooling
        maxpool = Lambda(lambda x: K.max(x, axis=1, keepdims=False),
                         output_shape=lambda x: (x[0], x[2]))
        question_pool = maxpool(question_dropout)

        # answer rnn part
        f_rnn = AttentionLSTM(self.model_params.get('n_lstm_dims', 141),
                              question_pool,
                              return_sequences=True)
        b_rnn = AttentionLSTM(self.model_params.get('n_lstm_dims', 141),
                              question_pool,
                              return_sequences=True,
                              go_backwards=True)
        # f_rnn = LSTM(self.model_params.get('n_lstm_dims', 141), return_sequences=True)
        # b_rnn = LSTM(self.model_params.get('n_lstm_dims', 141), return_sequences=True, go_backwards=True)
        answer_rnn = merge([f_rnn(answer_dropout),
                            b_rnn(answer_dropout)],
                           mode='concat',
                           concat_axis=-1)
        answer_dropout = dropout(answer_rnn)
        answer_dropout = regularize(answer_dropout)
        answer_pool = maxpool(answer_dropout)

        # activation
        activation = Activation('tanh')
        question_output = activation(question_pool)
        answer_output = activation(answer_pool)

        return question_output, answer_output
    def build(self):
        assert self.config['question_len'] == self.config['answer_len']

        question = self.question
        answer = self.get_answer()

        # add embedding layers
        embedding = Embedding(self.config['n_words'],
                              self.model_params.get('n_embed_dims', 100))
        question_embedding = embedding(question)
        answer_embedding = embedding(answer)

        # turn off layer updating
        embedding.params = []
        embedding.updates = []

        # dropout
        dropout = Dropout(0.25)
        question_dropout = dropout(question_embedding)
        answer_dropout = dropout(answer_embedding)

        # dense
        dense = TimeDistributed(
            Dense(self.model_params.get('n_hidden', 200), activation='tanh'))
        question_dense = dense(question_dropout)
        answer_dense = dense(answer_dropout)

        # regularization
        question_dense = ActivityRegularization(l2=0.0001)(question_dense)
        answer_dense = ActivityRegularization(l2=0.0001)(answer_dense)

        # dropout
        question_dropout = dropout(question_dense)
        answer_dropout = dropout(answer_dense)

        # cnn
        cnns = [
            Convolution1D(filter_length=filter_length,
                          nb_filter=self.model_params.get('nb_filters', 1000),
                          activation=self.model_params.get(
                              'conv_activation', 'relu'),
                          border_mode='same')
            for filter_length in [2, 3, 5, 7]
        ]
        question_cnn = merge([cnn(question_dropout) for cnn in cnns],
                             mode='concat')
        answer_cnn = merge([cnn(answer_dropout) for cnn in cnns],
                           mode='concat')

        # regularization
        question_cnn = ActivityRegularization(l2=0.0001)(question_cnn)
        answer_cnn = ActivityRegularization(l2=0.0001)(answer_cnn)

        # dropout
        question_dropout = dropout(question_cnn)
        answer_dropout = dropout(answer_cnn)

        # maxpooling
        maxpool = Lambda(lambda x: K.max(x, axis=1, keepdims=False),
                         output_shape=lambda x: (x[0], x[2]))
        question_pool = maxpool(question_dropout)
        answer_pool = maxpool(answer_dropout)

        # activation
        activation = Activation('tanh')
        question_output = activation(question_pool)
        answer_output = activation(answer_pool)

        return question_output, answer_output