def test_glm_fisher_information(self): N = 2000 T = 1000 glm = GLM(4) glm.weights = randn(glm.dim_in, 1) glm.bias = -2. inputs = randn(glm.dim_in, N) outputs = glm.sample(inputs) x = glm._parameters() I = glm._fisher_information(inputs, outputs) x_mle = [] # repeated maximum likelihood estimation for t in range(T): inputs = randn(glm.dim_in, N) outputs = glm.sample(inputs) # initialize at true parameters for fast convergence glm_ = GLM(glm.dim_in) glm_.weights = glm.weights glm_.bias = glm.bias glm_.train(inputs, outputs) x_mle.append(glm_._parameters()) C = cov(hstack(x_mle), ddof=1) # inv(I) should be sufficiently close to C self.assertLess(max(abs(inv(I) - C) / (abs(C) + .1)), max(abs(C) / (abs(C) + .1)) / 2.)
def test_glm_fisher_information(self): N = 1000 T = 100 glm = GLM(3) glm.weights = randn(glm.dim_in, 1) glm.bias = -2. inputs = randn(glm.dim_in, N) outputs = glm.sample(inputs) x = glm._parameters() I = glm._fisher_information(inputs, outputs) x_mle = [] # repeated maximum likelihood estimation for t in range(T): inputs = randn(glm.dim_in, N) outputs = glm.sample(inputs) # initialize at true parameters for fast convergence glm_ = GLM(glm.dim_in) glm_.weights = glm.weights glm_.bias = glm.bias glm_.train(inputs, outputs) x_mle.append(glm_._parameters()) C = cov(hstack(x_mle), ddof=1) # inv(I) should be sufficiently close to C self.assertLess(max(abs(inv(I) - C) / (abs(C) + .1)), max(abs(C) / (abs(C) + .1)) / 2.)
def test_train(self): stm = STM(8, 4, 4, 10) parameters = stm._parameters() stm.train( randint(2, size=[stm.dim_in, 2000]), randint(2, size=[stm.dim_out, 2000]), parameters={ 'verbosity': 0, 'max_iter': 0, }) # parameters should not have changed self.assertLess(max(abs(stm._parameters() - parameters)), 1e-20) def callback(i, stm): callback.counter += 1 return callback.counter = 0 max_iter = 10 stm.train( randint(2, size=[stm.dim_in, 10000]), randint(2, size=[stm.dim_out, 10000]), parameters={ 'verbosity': 0, 'max_iter': max_iter, 'threshold': 0., 'batch_size': 1999, 'callback': callback, 'cb_iter': 2, }) self.assertEqual(callback.counter, max_iter / 2) # test zero-dimensional nonlinear inputs stm = STM(0, 5, 5) glm = GLM(stm.dim_in_linear, LogisticFunction, Bernoulli) glm.weights = randn(*glm.weights.shape) input = randn(stm.dim_in_linear, 10000) output = glm.sample(input) stm.train(input, output, parameters={'max_iter': 20}) # STM should be able to learn GLM behavior self.assertAlmostEqual(glm.evaluate(input, output), stm.evaluate(input, output), 1) # test zero-dimensional inputs stm = STM(0, 0, 10) input = empty([0, 10000]) output = rand(1, 10000) < 0.35 stm.train(input, output) self.assertLess(abs(mean(stm.sample(input)) - mean(output)), 0.1)
def test_glm_pickle(self): tmp_file = mkstemp()[1] model0 = GLM(5, BlobNonlinearity, Bernoulli) model0.weights = randn(*model0.weights.shape) model0.bias = randn() # store model with open(tmp_file, 'w') as handle: dump({'model': model0}, handle) # load model with open(tmp_file) as handle: model1 = load(handle)['model'] # make sure parameters haven't changed self.assertLess(max(abs(model0.bias - model1.bias)), 1e-20) self.assertLess(max(abs(model0.weights - model1.weights)), 1e-20) x = randn(model0.dim_in, 100) y = model0.sample(x) self.assertEqual(model0.evaluate(x, y), model1.evaluate(x, y))
def test_glm_data_gradient(self): glm = GLM(7, LogisticFunction, Bernoulli) x = randn(glm.dim_in, 100) y = glm.sample(x) dx, _, ll = glm._data_gradient(x, y) h = 1e-7 # compute numerical gradient dx_ = zeros_like(dx) for i in range(glm.dim_in): x_p = x.copy() x_m = x.copy() x_p[i] += h x_m[i] -= h dx_[i] = (glm.loglikelihood(x_p, y) - glm.loglikelihood(x_m, y)) / (2. * h) self.assertLess(max(abs(ll - glm.loglikelihood(x, y))), 1e-8) self.assertLess(max(abs(dx_ - dx)), 1e-7)
def test_glm_pickle(self): tmp_file = mkstemp()[1] model0 = GLM(5, BlobNonlinearity, Bernoulli) model0.weights = randn(*model0.weights.shape) model0.bias = randn() # store model with open(tmp_file, 'w') as handle: dump({'model': model0}, handle) # load model with open(tmp_file) as handle: model1 = load(handle)['model'] # make sure parameters haven't changed self.assertLess(max(abs(model0.bias - model1.bias)), 1e-20) self.assertLess(max(abs(model0.weights - model1.weights)), 1e-20) x = randn(model0.dim_in, 100) y = model0.sample(x) self.assertEqual( model0.evaluate(x, y), model1.evaluate(x, y))
def test_glm_data_gradient(self): glm = GLM(7, LogisticFunction, Bernoulli) x = randn(glm.dim_in, 100) y = glm.sample(x) dx, _, ll = glm._data_gradient(x, y) h = 1e-7 # compute numerical gradient dx_ = zeros_like(dx) for i in range(glm.dim_in): x_p = x.copy() x_m = x.copy() x_p[i] += h x_m[i] -= h dx_[i] = ( glm.loglikelihood(x_p, y) - glm.loglikelihood(x_m, y)) / (2. * h) self.assertLess(max(abs(ll - glm.loglikelihood(x, y))), 1e-8) self.assertLess(max(abs(dx_ - dx)), 1e-7)