def test_train(self): stm = STM(8, 4, 4, 10) parameters = stm._parameters() stm.train( randint(2, size=[stm.dim_in, 2000]), randint(2, size=[stm.dim_out, 2000]), parameters={ 'verbosity': 0, 'max_iter': 0, }) # parameters should not have changed self.assertLess(max(abs(stm._parameters() - parameters)), 1e-20) def callback(i, stm): callback.counter += 1 return callback.counter = 0 max_iter = 10 stm.train( randint(2, size=[stm.dim_in, 10000]), randint(2, size=[stm.dim_out, 10000]), parameters={ 'verbosity': 0, 'max_iter': max_iter, 'threshold': 0., 'batch_size': 1999, 'callback': callback, 'cb_iter': 2, }) self.assertEqual(callback.counter, max_iter / 2) # test zero-dimensional nonlinear inputs stm = STM(0, 5, 5) glm = GLM(stm.dim_in_linear, LogisticFunction, Bernoulli) glm.weights = randn(*glm.weights.shape) input = randn(stm.dim_in_linear, 10000) output = glm.sample(input) stm.train(input, output, parameters={'max_iter': 20}) # STM should be able to learn GLM behavior self.assertAlmostEqual(glm.evaluate(input, output), stm.evaluate(input, output), 1) # test zero-dimensional inputs stm = STM(0, 0, 10) input = empty([0, 10000]) output = rand(1, 10000) < 0.35 stm.train(input, output) self.assertLess(abs(mean(stm.sample(input)) - mean(output)), 0.1)
def test_gradient(self): stm = STM(5, 2, 10) stm.sharpness = 1.5 # choose random parameters stm._set_parameters(randn(*stm._parameters().shape) / 100.) err = stm._check_gradient( randn(stm.dim_in, 1000), randint(2, size=[stm.dim_out, 1000]), 1e-5, parameters={'train_sharpness': True}) self.assertLess(err, 1e-8) # test with regularization turned off for param in ['biases', 'weights', 'features', 'pred', 'linear_predictor', 'sharpness']: err = stm._check_gradient( randn(stm.dim_in, 1000), randint(2, size=[stm.dim_out, 1000]), 1e-6, parameters={ 'train_biases': param == 'biases', 'train_weights': param == 'weights', 'train_features': param == 'features', 'train_predictors': param == 'pred', 'train_linear_predictor': param == 'linear_predictor', 'train_sharpness': param == 'sharpness', }) self.assertLess(err, 1e-7) # test with regularization turned on for norm in ['L1', 'L2']: for param in ['priors', 'weights', 'features', 'pred', 'input_bias', 'output_bias']: err = stm._check_gradient( randint(2, size=[stm.dim_in, 1000]), randint(2, size=[stm.dim_out, 1000]), 1e-7, parameters={ 'train_prior': param == 'priors', 'train_weights': param == 'weights', 'train_features': param == 'features', 'train_predictors': param == 'pred', 'train_input_bias': param == 'input_bias', 'train_output_bias': param == 'output_bias', 'regularize_biases': {'strength': 0.6, 'norm': norm}, 'regularize_features': {'strength': 0.6, 'norm': norm}, 'regularize_predictors': {'strength': 0.6, 'norm': norm}, 'regularize_weights': {'strength': 0.6, 'norm': norm}, }) self.assertLess(err, 1e-6) self.assertFalse(any(isnan( stm._parameter_gradient( randint(2, size=[stm.dim_in, 1000]), randint(2, size=[stm.dim_out, 1000]), stm._parameters()))))
def test_poisson(self): stm = STM(5, 5, 3, 10, ExponentialFunction, Poisson) # choose random parameters stm._set_parameters(randn(*stm._parameters().shape) / 100.) err = stm._check_gradient( randn(stm.dim_in, 1000), randint(2, size=[stm.dim_out, 1000]), 1e-5) self.assertLess(err, 1e-8)