def test_blob_nonlinearity(self): # generate test data x = randn(1, 10000) * 4. y = exp(-(x - 2.)**2) / 2. + exp(-(x + 5.)**2 / 4.) / 4. z = (rand(*y.shape) < y) * 1. glm = GLM(1, BlobNonlinearity(3)) glm.weights = [[.5 + rand()]] err = glm._check_gradient(x, z, parameters={ 'train_weights': False, 'train_bias': False, 'train_nonlinearity': True }) self.assertLess(err, 1e-6) err = glm._check_gradient(x, z, parameters={ 'train_weights': True, 'train_bias': False, 'train_nonlinearity': False }) self.assertLess(err, 1e-6)
def test_glm_train(self): w = asarray([[-1., 0., 1., 2.]]).T b = 1. x = randn(4, 100000) p = 1. / (1. + exp(-dot(w.T, x) - b)) y = rand(*p.shape) < p glm = GLM(4, LogisticFunction, Bernoulli) # test gradient err = glm._check_gradient(x, y, 1e-5, parameters={ 'train_weights': False, 'train_bias': True}) self.assertLess(err, 1e-8) err = glm._check_gradient(x, y, 1e-5, parameters={ 'train_weights': True, 'train_bias': False}) self.assertLess(err, 1e-8) err = glm._check_gradient(x, y, 1e-5) self.assertLess(err, 1e-8) err = glm._check_gradient(x, y, 1e-5, parameters={ 'regularize_weights': 10., 'regularize_bias': 10.}) self.assertLess(err, 1e-8) # test training glm.train(x, y, parameters={'verbosity': 0}) self.assertLess(max(abs(glm.weights - w)), 0.1) self.assertLess(max(abs(glm.bias - b)), 0.1) glm.weights = w glm.bias = -1. glm.train(x, y, parameters={'verbosity': 0, 'train_weights': False}) self.assertLess(max(abs(glm.weights - w)), 1e-12) self.assertLess(max(abs(glm.bias - b)), 0.1) glm.weights = randn(*glm.weights.shape) glm.bias = b glm.train(x, y, parameters={'verbosity': 0, 'train_bias': False}) self.assertLess(max(abs(glm.weights - w)), 0.1) self.assertLess(max(abs(glm.bias - b)), 1e-12)
def test_blob_nonlinearity(self): # generate test data x = randn(1, 10000) * 4. y = exp(-(x - 2.)**2) / 2. + exp(-(x + 5.)**2 / 4.) / 4. z = (rand(*y.shape) < y) * 1. glm = GLM(1, BlobNonlinearity(3)) glm.weights = [[.5 + rand()]] err = glm._check_gradient(x, z, parameters={'train_weights': False, 'train_bias': False, 'train_nonlinearity': True}) self.assertLess(err, 1e-6) err = glm._check_gradient(x, z, parameters={'train_weights': True, 'train_bias': False, 'train_nonlinearity': False}) self.assertLess(err, 1e-6)
def test_glm_train(self): w = asarray([[-1., 0., 1., 2.]]).T b = 1. x = randn(4, 100000) p = 1. / (1. + exp(-dot(w.T, x) - b)) y = rand(*p.shape) < p glm = GLM(4, LogisticFunction, Bernoulli) # test gradient err = glm._check_gradient(x, y, 1e-5, parameters={ 'train_weights': False, 'train_bias': True }) self.assertLess(err, 1e-8) err = glm._check_gradient(x, y, 1e-5, parameters={ 'train_weights': True, 'train_bias': False }) self.assertLess(err, 1e-8) err = glm._check_gradient(x, y, 1e-5) self.assertLess(err, 1e-8) err = glm._check_gradient(x, y, 1e-5, parameters={ 'regularize_weights': 10., 'regularize_bias': 10. }) self.assertLess(err, 1e-8) # test training glm.train(x, y, parameters={'verbosity': 0}) self.assertLess(max(abs(glm.weights - w)), 0.1) self.assertLess(max(abs(glm.bias - b)), 0.1) glm.weights = w glm.bias = -1. glm.train(x, y, parameters={'verbosity': 0, 'train_weights': False}) self.assertLess(max(abs(glm.weights - w)), 1e-12) self.assertLess(max(abs(glm.bias - b)), 0.1) glm.weights = randn(*glm.weights.shape) glm.bias = b glm.train(x, y, parameters={'verbosity': 0, 'train_bias': False}) self.assertLess(max(abs(glm.weights - w)), 0.1) self.assertLess(max(abs(glm.bias - b)), 1e-12)