def construct_model(self, x_train, y_train):
        # get number of features
        input_dim = x_train.shape[-1]
        # get number of classes
        output_dim = len(np.unique(y_train))
        layer_num = len(self.hidden_layer_sizes)
        hidden_layer_num = self.hidden_layer_sizes
        batch_size = self.batch_size
        _lambda = self._lambda
        if batch_size == 'auto':
            # use all data
            batch_size = x_train.shape[0]

        self.input = lfdnn.tensor([batch_size, input_dim], 'input')
        self.label = lfdnn.tensor([batch_size, output_dim], 'label')
        h = self.input
        # put your construction code here, feel free to modify the assignment of `w`
        # Hint: you should put all weight and bias variables into self.weight
        for i in range(layer_num):
            if i == 0:
                w = lfdnn.tensor([input_dim, hidden_layer_num[i]], 'Weight' + str(i))
                self.weight['Weight' + str(i)] = w
            else:
                w = lfdnn.tensor([hidden_layer_num[i - 1], hidden_layer_num[i]], 'Weight' + str(i))
                self.weight['Weight' + str(i)] = w
            b = lfdnn.tensor([1, hidden_layer_num[i]],'Bias' + str(i))
            self.weight['Bias' + str(i)] = b
            h = operator.add(operator.matmul(h, w), b)
            h = operator.sigmoid(h)
        if len(hidden_layer_num) > 0:
            w = lfdnn.tensor([hidden_layer_num[-1], output_dim], 'output_weight')
        else:
            w = lfdnn.tensor([input_dim, output_dim], 'output_weight')
        # end of your construction code

            
        self.weight['output_weight'] = w
        b = lfdnn.tensor([1, output_dim], 'output_bias')
        self.weight['output_bias'] = b
        h = operator.add(operator.matmul(h, w), b)
        self.output = operator.softmax(h)
        self.loss = operator.CE_with_logit(h, self.label)
        if _lambda > 0:
            for k, v in self.weight.items():
                if k.find('bias') > 0:
                    continue
                regularization_term = operator.scale(operator.mean_square_sum(v), _lambda)
                self.loss = operator.add(self.loss, regularization_term)
        self.accuracy = operator.accuracy(self.output, self.label)
Beispiel #2
0
    def construct_model(self, x_train, y_train):
        # get number of features
        input_dim = x_train.shape[-1]
        # get number of classes
        output_dim = 1
        batch_size = self.batch_size
        _lambda = self.alpha
        if batch_size == 'auto':
            # use all data
            batch_size = x_train.shape[0]

        self.input = lfdnn.tensor([batch_size, input_dim], 'input')
        self.label = lfdnn.tensor([batch_size, output_dim], 'label')
        w = lfdnn.tensor([input_dim, output_dim], 'output_weight')
        self.weight['output_weight'] = w
        b = lfdnn.tensor([1, output_dim], 'output_bias')
        self.weight['output_bias'] = b
        # put your code here, you can adjust the following lines
        h = self.input
        h = operator.add(operator.matmul(h, w), b)
        self.output = h
        self.loss = operator.mse(h, self.label)
        # end of your modification
        # dummy acc
        self.accuracy = self.loss
 def test_matrix_multiplication(self):
     a = tensor([2, 3], 'a')
     b = tensor([3, 1], 'b')
     feed = {
         'a': np.array([[0.4, 0.5, 1.1], [0.1, 2.3, -0.3]]),
         'b': np.array([[1.2], [-2.3], [0.2]])
     }
     true_matrix = np.array([[-0.45], [-5.23]])
     assert_array_almost_equal(
         operator.matmul(a, b).eval(feed), true_matrix)
from lfdnn import tensor, operator
a = tensor([3, 4], 't')
print(a.shape)
import numpy as np
b = operator.relu(a)
feed = {'t': np.random.normal(size=[3, 4])}
print(b.eval(feed))
print(b.differentiate(a, feed))
print(a.back(b, feed))
w = tensor([4, 1], 'w')
b = tensor([1, 1], 'b')
h = operator.add(operator.matmul(a, w), b)
y = operator.sigmoid(h)
feed.update({'w': np.ones([4, 1]), 'b': np.array([[2]])})
y.eval(feed)