def test_optim_regression(self): state = numpy.random.RandomState(seed=0) # pylint: disable=E1101 X = numpy.abs(state.randn(10, 2)) w0 = state.randn(3) w1 = numpy.array([-0.5, 0.8, -0.6]) noise = state.randn(X.shape[0]) / 10 noise[0] = 0 noise[1] = 0.07 X[1, 0] = 0.7 X[1, 1] = -0.5 y = w1[0] + X[:, 0] * w1[1] + X[:, 1] * w1[2] + noise for act in [ 'identity', 'relu', 'leakyrelu', 'sigmoid', 'sigmoid4', 'expit' ]: with self.subTest(act=act): neu = NeuralTreeNode(w1[1:], bias=w1[0], activation=act) loss = neu.loss(X, y).sum() / X.shape[0] if act == 'identity': self.assertGreater(loss, 0) self.assertLess(loss, 0.1) grad = neu.gradient(X[0], y[0]) if act == 'identity': self.assertEqualArray(grad, numpy.zeros(grad.shape)) grad = neu.gradient(X[1], y[1]) ming, maxg = grad[:2].min(), grad[:2].max() if ming == maxg: raise AssertionError( "Gradient is wrong\nloss={}\ngrad={}".format( loss, grad)) self.assertEqual(grad.shape, w0.shape) neu.fit(X, y, verbose=False) c1 = neu.training_weights neu = NeuralTreeNode(w0[1:], bias=w0[0], activation=act) neu.fit(X, y, verbose=False, lr_schedule='constant') c2 = neu.training_weights diff = numpy.abs(c2 - c1) if act == 'identity': self.assertLess(diff.max(), 0.16)
def test_neural_net_gradient_regression_2(self): X = numpy.abs(numpy.random.randn(10, 2)) w1 = numpy.array([-0.5, 0.8, -0.6]) noise = numpy.random.randn(X.shape[0]) / 10 noise[0] = 0 noise[1] = 0.07 X[1, 0] = 0.7 X[1, 1] = -0.5 y = w1[0] + X[:, 0] * w1[1] + X[:, 1] * w1[2] + noise for act in [ 'relu', 'sigmoid', 'identity', 'leakyrelu', 'sigmoid4', 'expit' ]: with self.subTest(act=act): neu = NeuralTreeNode(w1[1:], bias=w1[0], activation=act) loss1 = neu.loss(X, y) pred1 = neu.predict(X) if act == 'relu': self.assertEqualArray(pred1[1:2], numpy.array([0.36])) pred11 = neu.predict(X) self.assertEqualArray(pred11[1:2], numpy.array([0.36])) net = NeuralTreeNet(X.shape[1], empty=True) net.append(neu, numpy.arange(0, 2)) ide = NeuralTreeNode(numpy.array([1], dtype=X.dtype), bias=numpy.array([0], dtype=X.dtype), activation='identity') net.append(ide, numpy.arange(2, 3)) pred2 = net.predict(X) loss2 = net.loss(X, y) self.assertEqualArray(pred1, pred2[:, -1]) self.assertEqualArray(pred2[:, -2], pred2[:, -1]) self.assertEqualArray(pred2[:, 2], pred2[:, 3]) self.assertEqualArray(loss1, loss2) for p in range(0, 5): grad1 = neu.gradient(X[p], y[p]) grad2 = net.gradient(X[p], y[p]) self.assertEqualArray(grad1, grad2[:3])
def test_neural_net_gradient_regression(self): X = numpy.abs(numpy.random.randn(10, 2)) w1 = numpy.array([-0.5, 0.8, -0.6]) noise = numpy.random.randn(X.shape[0]) / 10 noise[0] = 0 noise[1] = 0.07 X[1, 0] = 0.7 X[1, 1] = -0.5 y = w1[0] + X[:, 0] * w1[1] + X[:, 1] * w1[2] + noise for act in ['identity', 'relu', 'leakyrelu', 'sigmoid', 'sigmoid4', 'expit']: with self.subTest(act=act): neu = NeuralTreeNode(w1[1:], bias=w1[0], activation=act) loss1 = neu.loss(X, y) grad1 = neu.gradient(X[0], y[0]) net = NeuralTreeNet(X.shape[1], empty=True) net.append(neu, numpy.arange(0, 2)) loss2 = net.loss(X, y) grad2 = net.gradient(X[0], y[0]) self.assertEqualArray(loss1, loss2) self.assertEqualArray(grad1, grad2)
def test_optim_clas(self): X = numpy.abs(numpy.random.randn(10, 2)) w1 = numpy.array([[0.1, 0.8, -0.6], [-0.1, 0.4, -0.3]]) w0 = numpy.random.randn(*w1.shape) noise = numpy.random.randn(*X.shape) / 10 noise[0] = 0 noise[1] = 0.07 y0 = (X[:, :1] @ w1[:, 1:2].T + X[:, 1:] @ w1[:, 2:3].T + w1[:, 0].T + noise) y = numpy.exp(y0) y /= numpy.sum(y, axis=1, keepdims=1) y[:-1, 0] = (y[:-1, 0] >= 0.5).astype(numpy.float64) y[:-1, 1] = (y[:-1, 1] >= 0.5).astype(numpy.float64) y /= numpy.sum(y, axis=1, keepdims=1) for act in ['softmax', 'softmax4']: with self.subTest(act=act): neu2 = NeuralTreeNode(2, activation=act) neu = NeuralTreeNode(w1[:, 1:], bias=w1[:, 0], activation=act) self.assertEqual(neu2.training_weights.shape, neu.training_weights.shape) self.assertEqual(neu2.input_weights.shape, neu.input_weights.shape) loss = neu.loss(X, y).sum() / X.shape[0] self.assertNotEmpty(loss) self.assertFalse(numpy.isinf(loss)) self.assertFalse(numpy.isnan(loss)) grad = neu.gradient(X[0], y[0]) self.assertEqual(grad.ravel().shape, w1.ravel().shape) neu.fit(X, y, verbose=False) c1 = neu.training_weights neu = NeuralTreeNode(w0[:, 1:], bias=w0[:, 0], activation=act) neu.fit(X, y, verbose=False, lr_schedule='constant') c2 = neu.training_weights self.assertEqual(c1.shape, c2.shape)