def test_gradient(self): stm = STM(5, 2, 10) stm.sharpness = 1.5 # choose random parameters stm._set_parameters(randn(*stm._parameters().shape) / 100.) err = stm._check_gradient( randn(stm.dim_in, 1000), randint(2, size=[stm.dim_out, 1000]), 1e-5, parameters={'train_sharpness': True}) self.assertLess(err, 1e-8) # test with regularization turned off for param in ['biases', 'weights', 'features', 'pred', 'linear_predictor', 'sharpness']: err = stm._check_gradient( randn(stm.dim_in, 1000), randint(2, size=[stm.dim_out, 1000]), 1e-6, parameters={ 'train_biases': param == 'biases', 'train_weights': param == 'weights', 'train_features': param == 'features', 'train_predictors': param == 'pred', 'train_linear_predictor': param == 'linear_predictor', 'train_sharpness': param == 'sharpness', }) self.assertLess(err, 1e-7) # test with regularization turned on for norm in ['L1', 'L2']: for param in ['priors', 'weights', 'features', 'pred', 'input_bias', 'output_bias']: err = stm._check_gradient( randint(2, size=[stm.dim_in, 1000]), randint(2, size=[stm.dim_out, 1000]), 1e-7, parameters={ 'train_prior': param == 'priors', 'train_weights': param == 'weights', 'train_features': param == 'features', 'train_predictors': param == 'pred', 'train_input_bias': param == 'input_bias', 'train_output_bias': param == 'output_bias', 'regularize_biases': {'strength': 0.6, 'norm': norm}, 'regularize_features': {'strength': 0.6, 'norm': norm}, 'regularize_predictors': {'strength': 0.6, 'norm': norm}, 'regularize_weights': {'strength': 0.6, 'norm': norm}, }) self.assertLess(err, 1e-6) self.assertFalse(any(isnan( stm._parameter_gradient( randint(2, size=[stm.dim_in, 1000]), randint(2, size=[stm.dim_out, 1000]), stm._parameters()))))
def test_poisson(self): stm = STM(5, 5, 3, 10, ExponentialFunction, Poisson) # choose random parameters stm._set_parameters(randn(*stm._parameters().shape) / 100.) err = stm._check_gradient( randn(stm.dim_in, 1000), randint(2, size=[stm.dim_out, 1000]), 1e-5) self.assertLess(err, 1e-8)