Example #1
0
    def set_train_model(self, train_set, cost_func, batch_size, learning_rate, l1_a=0.0, l2_a=0.0001):

        cost = cost_func(self.p_y, self.y) \
               + self.l1 * l1_a + self.l2 * l2_a

        print('compiling train model..')

        # compute gradients of weights and biases
        updates = []
        for i in range(self.depth):
            g_w = T.grad(cost, self.weights[i])
            g_b = T.grad(cost, self.biases[i])
            updates += [(self.weights[i], self.weights[i] - learning_rate * g_w),
                        (self.biases[i], self.biases[i] - learning_rate * g_b)]

        # print(self.train_set[1:3])
        train_set_x, train_set_y = train_set
        index = self.index
        self.train_model = my_theano.function([index], cost, updates=updates, givens={
            self.x: self._get_mini_batch(train_set_x, batch_size, index),
            self.y: self._get_mini_batch(train_set_y, batch_size, index)
        })

        # check if using gpu
        tu.check_gpu(self.train_model)
Example #2
0
    def set_train_model(self, train_set, cost_func, batch_size, learning_rate, l1_a=0.0, l2_a=0.0001):
        self.p_y = self.forward(self.x, batch_size)
        cost = cost_func(self.p_y, self.y) + self.l1 * l1_a + self.l2 * l2_a

        # set early stopping patience
        self.patience = 20
        self.lest_valid_error = np.inf

        print('compiling train model..')

        # compute gradients of weights and biases
        updates = []
        for layer in reversed(self.weighted_layers):
            g_w = T.grad(cost, layer.w)
            g_b = T.grad(cost, layer.b)
            updates += [(layer.w, layer.w - learning_rate * g_w),
                        (layer.b, layer.b - learning_rate * g_b)]

        train_set_x, train_set_y = train_set
        index = self.index
        self.train_model = my_theano.function([index], cost, updates=updates, givens={
            self.x: self._get_mini_batch(train_set_x, batch_size, index),
            self.y: self._get_mini_batch(train_set_y, batch_size, index)
        })

        # check if using gpu
        tu.check_gpu(self.train_model)
Example #3
0
    def set_train_model(self,
                        train_set,
                        cost_func,
                        batch_size,
                        learning_rate,
                        l1_a=0.0,
                        l2_a=0.0001):
        self.p_y = self.forward(self.x, batch_size)
        cost = cost_func(self.p_y, self.y) + self.l1 * l1_a + self.l2 * l2_a

        # set early stopping patience
        self.patience = 20
        self.lest_valid_error = np.inf

        print('compiling train model..')

        # compute gradients of weights and biases
        updates = []
        for layer in reversed(self.weighted_layers):
            g_w = T.grad(cost, layer.w)
            g_b = T.grad(cost, layer.b)
            updates += [(layer.w, layer.w - learning_rate * g_w),
                        (layer.b, layer.b - learning_rate * g_b)]

        train_set_x, train_set_y = train_set
        index = self.index
        self.train_model = my_theano.function(
            [index],
            cost,
            updates=updates,
            givens={
                self.x: self._get_mini_batch(train_set_x, batch_size, index),
                self.y: self._get_mini_batch(train_set_y, batch_size, index)
            })

        # check if using gpu
        tu.check_gpu(self.train_model)