Example #1
0
    def construct_model(self, x_train, y_train):
        # get number of features
        input_dim = x_train.shape[-1]
        # get number of classes
        output_dim = len(np.unique(y_train))
        layer_num = len(self.hidden_layer_sizes)
        hidden_layer_num = self.hidden_layer_sizes
        batch_size = self.batch_size
        _lambda = self._lambda
        if batch_size == 'auto':
            # use all data
            batch_size = x_train.shape[0]

        self.input = lfdnn.tensor([batch_size, input_dim], 'input')
        self.label = lfdnn.tensor([batch_size, output_dim], 'label')
        h = self.input
        # put your construction code here, feel free to modify the assignment of `w`
        # Hint: you should put all weight and bias variables into self.weight
        w = lfdnn.tensor([input_dim, output_dim], 'output_weight')
        # end of your construction code

            
        self.weight['output_weight'] = w
        b = lfdnn.tensor([1, output_dim], 'output_bias')
        self.weight['output_bias'] = b
        h = operator.add(operator.matmul(h, w), b)
        self.output = operator.softmax(h)
        self.loss = operator.CE_with_logit(h, self.label)
        if _lambda > 0:
            for k, v in self.weight.items():
                if k.find('bias') > 0:
                    continue
                regularization_term = operator.scale(operator.mean_square_sum(v), _lambda)
                self.loss = operator.add(self.loss, regularization_term)
        self.accuracy = operator.accuracy(self.output, self.label)
Example #2
0
 def test_constant_tensor_derivative(self):
     a = tensor([2, 2], 'a', value=3)
     b = tensor([2, 2], 'b')
     feed = {'b': np.array([[5, 6], [7, 8]])}
     assert_array_almost_equal(
         operator.reduce_sum(operator.product(a, b)).differentiate(b, feed),
         3 * np.ones([2, 2]))
Example #3
0
    def construct_model(self, x_train, y_train):
        # get number of features
        input_dim = x_train.shape[-1]
        # get number of classes
        output_dim = 1
        batch_size = self.batch_size
        _lambda = self.alpha
        if batch_size == 'auto':
            # use all data
            batch_size = x_train.shape[0]

        self.input = lfdnn.tensor([batch_size, input_dim], 'input')
        self.label = lfdnn.tensor([batch_size, output_dim], 'label')
        w = lfdnn.tensor([input_dim, output_dim], 'output_weight')
        self.weight['output_weight'] = w
        b = lfdnn.tensor([1, output_dim], 'output_bias')
        self.weight['output_bias'] = b
        # put your code here, you can adjust the following lines
        h = self.input
        h = operator.add(operator.matmul(h, w), b)
        self.output = h
        self.loss = operator.mse(h, self.label)
        # end of your modification
        # dummy acc
        self.accuracy = self.loss
Example #4
0
 def test_mse(self):
     a = tensor([3, 1], 'a')
     b = tensor([3, 1], 'b')
     feed = {
         'a': np.array([[1.3], [-2.2], [0.4]]),
         'b': np.array([[1.2], [-2.3], [0.2]])
     }
     true_value = mean_squared_error(feed['a'], feed['b'])
     self.assertAlmostEqual(operator.mse(a, b).eval(feed), true_value)
Example #5
0
 def test_cross_entropy(self):
     x = tensor([3, 3], 'x')
     y = tensor([3, 3], 'y')
     feed = {
         'x': np.array([[0.4, 0.5, 0.1], [0.4, 0.5, 0.1], [0.4, 0.5, 0.1]]),
         'y': np.array([[0, 0, 1], [0, 0, 1], [1, 0, 0]])
     }
     true_value = (2 * np.log(0.1) + np.log(0.4)) / 3
     self.assertAlmostEqual(operator.CE(x, y).eval(feed), -1.0 * true_value)
Example #6
0
 def test_matrix_multiplication(self):
     a = tensor([2, 3], 'a')
     b = tensor([3, 1], 'b')
     feed = {
         'a': np.array([[0.4, 0.5, 1.1], [0.1, 2.3, -0.3]]),
         'b': np.array([[1.2], [-2.3], [0.2]])
     }
     true_matrix = np.array([[-0.45], [-5.23]])
     assert_array_almost_equal(
         operator.matmul(a, b).eval(feed), true_matrix)
Example #7
0
 def test_backward_add(self):
     a = tensor([1, 1], 'a')
     feed = {'a': np.array([[0.1]])}
     target = operator.add(operator.product(a, a), operator.scale(a, 3))
     self.assertAlmostEqual(target.eval(feed)[0, 0], 0.31)
     self.assertAlmostEqual(
         operator.reduce_sum(target).differentiate(a, feed)[0, 0], 3.2)
Example #8
0
 def test_softmax(self):
     a = tensor([1, 3], 'a')
     feed = {'a': np.array([[1, 2, 3]])}
     answer_list = np.exp([1, 2, 3])
     answer_list /= np.sum(answer_list)
     assert_array_almost_equal(
         operator.softmax(a).forward(feed)[0], answer_list)
Example #9
0
 def test_relu_derivative(self):
     a = tensor([1, 3], 'a')
     feed = {'a': np.array([[-1, 0, 3]])}
     assert_array_almost_equal(
         operator.relu(a).differentiate(a, feed), np.array([[0, 0, 1]]))
     result_1 = operator.relu(operator.sigmoid(a)).differentiate(a, feed)
     result_2 = operator.sigmoid(a).differentiate(a, feed)
     assert_array_almost_equal(result_1, result_2)
Example #10
0
 def test_abs(self):
     a = tensor([1, 3], 'a')
     feed = {'a': np.array([[1, -1, 3]])}
     assert_array_almost_equal(
         operator.abs(a).forward(feed), np.array([[1, 1, 3]]))
     result_1 = operator.abs(operator.scale(a, 2)).differentiate(a, feed)
     result_2 = 2 * np.array([[1, -1, 1]])
     assert_array_almost_equal(result_1, result_2)
Example #11
0
 def test_backward(self):
     a = tensor([1, 1], 'a')
     feed = {'a': np.array([[0]])}
     self.assertAlmostEqual(
         operator.sigmoid(a).differentiate(a, feed)[0, 0], 0.25)
Example #12
0
 def test_product(self):
     a = tensor([2, 1], 'a')
     feed = {'a': np.array([[5], [6]])}
     assert_array_almost_equal(
         operator.product(a, a).eval(feed), np.array([[25], [36]]))
Example #13
0
 def test_forward(self):
     a = tensor([2], 'a')
     feed = {'a': np.array([5, 6])}
     self.assertAlmostEqual(operator.reduce_mean(a).forward(feed), 5.5)
Example #14
0
 def test_shape(self):
     a = tensor([1, 2], 'a')
     self.assertEqual(operator.reduce_mean(a).shape, [1, 1])
Example #15
0
 def test_relu(self):
     a = tensor([1, 3], 'a')
     feed = {'a': np.array([[1, -1, 3]])}
     assert_array_almost_equal(
         operator.relu(a).forward(feed), np.array([[1, 0, 3]]))
from lfdnn import tensor, operator
a = tensor([3, 4], 't')
print(a.shape)
import numpy as np
b = operator.relu(a)
feed = {'t': np.random.normal(size=[3, 4])}
print(b.eval(feed))
print(b.differentiate(a, feed))
print(a.back(b, feed))
w = tensor([4, 1], 'w')
b = tensor([1, 1], 'b')
h = operator.add(operator.matmul(a, w), b)
y = operator.sigmoid(h)
feed.update({'w': np.ones([4, 1]), 'b': np.array([[2]])})
y.eval(feed)
Example #17
0
 def test_log_softmax(self):
     a = tensor([1, 3], 'a')
     feed = {'a': np.array([[1, 2, 3]])}
     assert_array_almost_equal(
         operator.log_softmax(a).forward(feed),
         operator.log(operator.softmax(a)).forward(feed))
Example #18
0
 def test_constant_tensor(self):
     a = tensor([2, 2], 'a', value=3)
     assert_array_almost_equal(a.forward({}), 3 * np.ones([2, 2]))