Beispiel #1
0
    def test_optim_regression(self):
        state = numpy.random.RandomState(seed=0)  # pylint: disable=E1101
        X = numpy.abs(state.randn(10, 2))
        w0 = state.randn(3)
        w1 = numpy.array([-0.5, 0.8, -0.6])
        noise = state.randn(X.shape[0]) / 10
        noise[0] = 0
        noise[1] = 0.07
        X[1, 0] = 0.7
        X[1, 1] = -0.5
        y = w1[0] + X[:, 0] * w1[1] + X[:, 1] * w1[2] + noise

        for act in [
                'identity', 'relu', 'leakyrelu', 'sigmoid', 'sigmoid4', 'expit'
        ]:
            with self.subTest(act=act):
                neu = NeuralTreeNode(w1[1:], bias=w1[0], activation=act)
                loss = neu.loss(X, y).sum() / X.shape[0]
                if act == 'identity':
                    self.assertGreater(loss, 0)
                    self.assertLess(loss, 0.1)
                grad = neu.gradient(X[0], y[0])
                if act == 'identity':
                    self.assertEqualArray(grad, numpy.zeros(grad.shape))
                grad = neu.gradient(X[1], y[1])
                ming, maxg = grad[:2].min(), grad[:2].max()
                if ming == maxg:
                    raise AssertionError(
                        "Gradient is wrong\nloss={}\ngrad={}".format(
                            loss, grad))
                self.assertEqual(grad.shape, w0.shape)

                neu.fit(X, y, verbose=False)
                c1 = neu.training_weights
                neu = NeuralTreeNode(w0[1:], bias=w0[0], activation=act)
                neu.fit(X, y, verbose=False, lr_schedule='constant')
                c2 = neu.training_weights
                diff = numpy.abs(c2 - c1)
                if act == 'identity':
                    self.assertLess(diff.max(), 0.16)
    def test_optim_clas(self):
        X = numpy.abs(numpy.random.randn(10, 2))
        w1 = numpy.array([[0.1, 0.8, -0.6], [-0.1, 0.4, -0.3]])
        w0 = numpy.random.randn(*w1.shape)
        noise = numpy.random.randn(*X.shape) / 10
        noise[0] = 0
        noise[1] = 0.07
        y0 = (X[:, :1] @ w1[:, 1:2].T +
              X[:, 1:] @ w1[:, 2:3].T + w1[:, 0].T + noise)
        y = numpy.exp(y0)
        y /= numpy.sum(y, axis=1, keepdims=1)
        y[:-1, 0] = (y[:-1, 0] >= 0.5).astype(numpy.float64)
        y[:-1, 1] = (y[:-1, 1] >= 0.5).astype(numpy.float64)
        y /= numpy.sum(y, axis=1, keepdims=1)

        for act in ['softmax', 'softmax4']:
            with self.subTest(act=act):
                neu2 = NeuralTreeNode(2, activation=act)
                neu = NeuralTreeNode(w1[:, 1:], bias=w1[:, 0], activation=act)
                self.assertEqual(neu2.training_weights.shape,
                                 neu.training_weights.shape)
                self.assertEqual(neu2.input_weights.shape,
                                 neu.input_weights.shape)
                loss = neu.loss(X, y).sum() / X.shape[0]
                self.assertNotEmpty(loss)
                self.assertFalse(numpy.isinf(loss))
                self.assertFalse(numpy.isnan(loss))
                grad = neu.gradient(X[0], y[0])
                self.assertEqual(grad.ravel().shape, w1.ravel().shape)

                neu.fit(X, y, verbose=False)
                c1 = neu.training_weights
                neu = NeuralTreeNode(w0[:, 1:], bias=w0[:, 0], activation=act)
                neu.fit(X, y, verbose=False, lr_schedule='constant')
                c2 = neu.training_weights
                self.assertEqual(c1.shape, c2.shape)