Ejemplo n.º 1
0
 def test_relu_derivative(self):
     a = tensor([1, 3], 'a')
     feed = {'a': np.array([[-1, 0, 3]])}
     assert_array_almost_equal(
         operator.relu(a).differentiate(a, feed), np.array([[0, 0, 1]]))
     result_1 = operator.relu(operator.sigmoid(a)).differentiate(a, feed)
     result_2 = operator.sigmoid(a).differentiate(a, feed)
     assert_array_almost_equal(result_1, result_2)
Ejemplo n.º 2
0
    def construct_model(self, x_train, y_train):
        # get number of features
        input_dim = x_train.shape[-1]
        # get number of classes
        output_dim = len(np.unique(y_train))
        layer_num = len(self.hidden_layer_sizes)
        hidden_layer_num = self.hidden_layer_sizes
        batch_size = self.batch_size
        _lambda = self._lambda
        if batch_size == 'auto':
            # use all data
            batch_size = x_train.shape[0]

        self.input = lfdnn.tensor([batch_size, input_dim], 'input')
        self.label = lfdnn.tensor([batch_size, output_dim], 'label')
        h = self.input
        # put your construction code here, feel free to modify the assignment of `w`
        # Hint: you should put all weight and bias variables into self.weight
        for i in range(layer_num):
            if i == 0:
                w = lfdnn.tensor([input_dim, hidden_layer_num[i]], 'Weight' + str(i))
                self.weight['Weight' + str(i)] = w
            else:
                w = lfdnn.tensor([hidden_layer_num[i - 1], hidden_layer_num[i]], 'Weight' + str(i))
                self.weight['Weight' + str(i)] = w
            b = lfdnn.tensor([1, hidden_layer_num[i]],'Bias' + str(i))
            self.weight['Bias' + str(i)] = b
            h = operator.add(operator.matmul(h, w), b)
            h = operator.sigmoid(h)
        if len(hidden_layer_num) > 0:
            w = lfdnn.tensor([hidden_layer_num[-1], output_dim], 'output_weight')
        else:
            w = lfdnn.tensor([input_dim, output_dim], 'output_weight')
        # end of your construction code

            
        self.weight['output_weight'] = w
        b = lfdnn.tensor([1, output_dim], 'output_bias')
        self.weight['output_bias'] = b
        h = operator.add(operator.matmul(h, w), b)
        self.output = operator.softmax(h)
        self.loss = operator.CE_with_logit(h, self.label)
        if _lambda > 0:
            for k, v in self.weight.items():
                if k.find('bias') > 0:
                    continue
                regularization_term = operator.scale(operator.mean_square_sum(v), _lambda)
                self.loss = operator.add(self.loss, regularization_term)
        self.accuracy = operator.accuracy(self.output, self.label)
Ejemplo n.º 3
0
 def test_backward(self):
     a = tensor([1, 1], 'a')
     feed = {'a': np.array([[0]])}
     self.assertAlmostEqual(
         operator.sigmoid(a).differentiate(a, feed)[0, 0], 0.25)
Ejemplo n.º 4
0
from lfdnn import tensor, operator
a = tensor([3, 4], 't')
print(a.shape)
import numpy as np
b = operator.relu(a)
feed = {'t': np.random.normal(size=[3, 4])}
print(b.eval(feed))
print(b.differentiate(a, feed))
print(a.back(b, feed))
w = tensor([4, 1], 'w')
b = tensor([1, 1], 'b')
h = operator.add(operator.matmul(a, w), b)
y = operator.sigmoid(h)
feed.update({'w': np.ones([4, 1]), 'b': np.array([[2]])})
y.eval(feed)