def build_model(self, train_set, test_set, validation_set=None):
        super(FCAE, self).build_model(train_set, test_set, validation_set)

        y_train = get_output(self.model, self.sym_x)
        loss = aggregate(squared_error(y_train, self.sym_x), mode='mean')
        # loss += + 1e-4 * lasagne.regularization.regularize_network_params(self.model, lasagne.regularization.l2)

        y_test = get_output(self.model, self.sym_x, deterministic=True)
        loss_test = aggregate(squared_error(y_test, self.sym_x), mode='mean')

        all_params = get_all_params(self.model, trainable=True)
        sym_beta1 = T.scalar('beta1')
        sym_beta2 = T.scalar('beta2')
        grads = T.grad(loss, all_params)
        ngrads = lasagne.updates.total_norm_constraint(grads, 5)
        cgrads = [T.clip(g, -5, 5) for g in ngrads]
        updates = rmsprop(cgrads, all_params, self.sym_lr, sym_beta1,
                          sym_beta2)

        inputs = [
            self.sym_index, self.sym_batchsize, self.sym_lr, sym_beta1,
            sym_beta2
        ]
        f_train = theano.function(
            inputs,
            [loss],
            updates=updates,
            givens={
                self.sym_x: self.sh_train_x[self.batch_slice],
            },
        )

        f_test = theano.function(
            [self.sym_index, self.sym_batchsize],
            [loss_test],
            givens={
                self.sym_x: self.sh_test_x[self.batch_slice],
            },
            on_unused_input='ignore',
        )

        f_ae = None
        # f_ae = theano.function(
        #     [self.sym_batchsize], [y_test],
        #     givens={
        #         self.sym_x: self.sh_valid_x,
        #     },
        #     on_unused_input='ignore',
        # )

        self.train_args['inputs']['batchsize'] = 128
        self.train_args['inputs']['learningrate'] = 1e-3
        self.train_args['inputs']['beta1'] = 0.9
        self.train_args['inputs']['beta2'] = 1e-6
        self.train_args['outputs']['loss'] = '%0.6f'

        self.test_args['inputs']['batchsize'] = 128
        self.test_args['outputs']['loss_test'] = '%0.6f'

        # self.validate_args['inputs']['batchsize'] = 128
        # self.validate_args['outputs']['loss_eval'] = '%0.6f'
        # self.validate_args['outputs']['loss_acc'] = '%0.6f'
        return f_train, f_test, f_ae, self.train_args, self.test_args, self.validate_args
    def build_model(self, train_set, test_set, validation_set=None):
        super(CNN, self).build_model(train_set, test_set, validation_set)

        epsilon = 1e-8
        y_train = T.clip(get_output(self.model, self.sym_x), epsilon, 1)
        loss_cc = aggregate(categorical_crossentropy(y_train, self.sym_t),
                            mode='mean')
        loss_train_acc = categorical_accuracy(y_train, self.sym_t).mean()

        y = T.clip(get_output(self.model, self.sym_x, deterministic=True),
                   epsilon, 1)
        loss_eval = aggregate(categorical_crossentropy(y, self.sym_t),
                              mode='mean')
        loss_acc = categorical_accuracy(y, self.sym_t).mean()

        all_params = get_all_params(self.model, trainable=True)
        sym_beta1 = T.scalar('beta1')
        sym_beta2 = T.scalar('beta2')
        grads = T.grad(loss_cc, all_params)
        grads = [T.clip(g, -5, 5) for g in grads]
        updates = rmsprop(grads, all_params, self.sym_lr, sym_beta1, sym_beta2)

        inputs = [
            self.sym_index, self.sym_batchsize, self.sym_lr, sym_beta1,
            sym_beta2
        ]
        f_train = theano.function(
            inputs,
            [loss_cc, loss_train_acc],
            updates=updates,
            givens={
                self.sym_x: self.sh_train_x[self.batch_slice],
                self.sym_t: self.sh_train_t[self.batch_slice],
            },
        )

        f_test = theano.function(
            [self.sym_index, self.sym_batchsize],
            [loss_eval, loss_acc],
            givens={
                self.sym_x: self.sh_test_x[self.batch_slice],
                self.sym_t: self.sh_test_t[self.batch_slice],
            },
        )

        f_validate = None
        if validation_set is not None:
            f_validate = theano.function(
                [self.sym_index, self.sym_batchsize],
                [loss_eval, loss_acc],
                givens={
                    self.sym_x: self.sh_valid_x[self.batch_slice],
                    self.sym_t: self.sh_valid_t[self.batch_slice],
                },
            )

        self.train_args['inputs']['batchsize'] = 128
        self.train_args['inputs']['learningrate'] = 1e-3
        self.train_args['inputs']['beta1'] = 0.9
        self.train_args['inputs']['beta2'] = 0.999
        self.train_args['outputs']['loss_cc'] = '%0.6f'
        self.train_args['outputs']['loss_train_acc'] = '%0.6f'

        self.test_args['inputs']['batchsize'] = 128
        self.test_args['outputs']['loss_eval'] = '%0.6f'
        self.test_args['outputs']['loss_acc'] = '%0.6f'

        self.validate_args['inputs']['batchsize'] = 128
        # self.validate_args['outputs']['loss_eval'] = '%0.6f'
        # self.validate_args['outputs']['loss_acc'] = '%0.6f'
        return f_train, f_test, f_validate, self.train_args, self.test_args, self.validate_args
    def build_model(self,
                    train_set,
                    test_set,
                    validation_set=None,
                    weights=None):
        super(wconvRNN, self).build_model(train_set, test_set, validation_set)

        def brier_score(given, predicted, weight_vector):
            return T.power(given - predicted, 2.0).dot(weight_vector).mean()

        epsilon = 1e-8
        y_train = T.clip(get_output(self.model, self.sym_x), epsilon, 1)
        loss_brier_train = brier_score(y_train, self.sym_t, weights)
        loss_cc = aggregate(categorical_crossentropy(y_train, self.sym_t),
                            mode='mean')
        loss_train_acc = categorical_accuracy(y_train, self.sym_t).mean()

        y_test = T.clip(get_output(self.model, self.sym_x, deterministic=True),
                        epsilon, 1)
        loss_brier_test = brier_score(y_test, self.sym_t, weights)
        loss_eval = aggregate(categorical_crossentropy(y_test, self.sym_t),
                              mode='mean')
        loss_acc = categorical_accuracy(y_test, self.sym_t).mean()

        all_params = get_all_params(self.model, trainable=True)
        sym_beta1 = T.scalar('beta1')
        sym_beta2 = T.scalar('beta2')
        grads = T.grad(loss_brier_train, all_params)
        grads = [T.clip(g, -5, 5) for g in grads]
        updates = rmsprop(grads, all_params, self.sym_lr, sym_beta1, sym_beta2)

        inputs = [
            self.sym_index, self.sym_batchsize, self.sym_lr, sym_beta1,
            sym_beta2
        ]
        f_train = theano.function(
            inputs,
            [loss_brier_train],
            updates=updates,
            givens={
                self.sym_x: self.sh_train_x[self.batch_slice],
                self.sym_t: self.sh_train_t[self.batch_slice],
            },
        )

        f_test = theano.function(
            [],
            [loss_brier_test],
            givens={
                self.sym_x: self.sh_test_x,
                self.sym_t: self.sh_test_t,
            },
            on_unused_input='ignore',
        )

        f_validate = None
        if validation_set is not None:
            f_validate = theano.function(
                [self.sym_batchsize],
                [loss_brier_test],
                givens={
                    self.sym_x: self.sh_valid_x,
                    self.sym_t: self.sh_valid_t,
                },
                on_unused_input='ignore',
            )

        predict = theano.function([self.sym_x], [y_test])

        self.train_args['inputs']['batchsize'] = 128
        self.train_args['inputs']['learningrate'] = 1e-3
        self.train_args['inputs']['beta1'] = 0.9
        self.train_args['inputs']['beta2'] = 0.999
        self.train_args['outputs']['loss_brier_train'] = '%0.6f'
        # self.train_args['outputs']['loss_train_acc'] = '%0.6f'

        # self.test_args['inputs']['batchsize'] = 128
        self.test_args['outputs']['loss_brier_test'] = '%0.6f'
        # self.test_args['outputs']['loss_acc'] = '%0.6f'

        # self.validate_args['inputs']['batchsize'] = 128
        # self.validate_args['outputs']['loss_eval'] = '%0.6f'
        # self.validate_args['outputs']['loss_acc'] = '%0.6f'
        return f_train, f_test, f_validate, self.train_args, self.test_args, self.validate_args, predict