Exemple #1
0
	def test_train(self):
		stm = STM(8, 4, 4, 10)

		parameters = stm._parameters()

		stm.train(
			randint(2, size=[stm.dim_in, 2000]),
			randint(2, size=[stm.dim_out, 2000]),
			parameters={
				'verbosity': 0,
				'max_iter': 0,
				})

		# parameters should not have changed
		self.assertLess(max(abs(stm._parameters() - parameters)), 1e-20)

		def callback(i, stm):
			callback.counter += 1
			return
		callback.counter = 0

		max_iter = 10

		stm.train(
			randint(2, size=[stm.dim_in, 10000]),
			randint(2, size=[stm.dim_out, 10000]),
			parameters={
				'verbosity': 0,
				'max_iter': max_iter,
				'threshold': 0.,
				'batch_size': 1999,
				'callback': callback,
				'cb_iter': 2,
				})

		self.assertEqual(callback.counter, max_iter / 2)

		# test zero-dimensional nonlinear inputs
		stm = STM(0, 5, 5)

		glm = GLM(stm.dim_in_linear, LogisticFunction, Bernoulli)
		glm.weights = randn(*glm.weights.shape)

		input = randn(stm.dim_in_linear, 10000)
		output = glm.sample(input)

		stm.train(input, output, parameters={'max_iter': 20})

		# STM should be able to learn GLM behavior
		self.assertAlmostEqual(glm.evaluate(input, output), stm.evaluate(input, output), 1)

		# test zero-dimensional inputs
		stm = STM(0, 0, 10)

		input = empty([0, 10000])
		output = rand(1, 10000) < 0.35

		stm.train(input, output)

		self.assertLess(abs(mean(stm.sample(input)) - mean(output)), 0.1)
Exemple #2
0
	def test_sample(self):
		q = 0.92
		N = 10000

		stm = STM(0, 0, 1, 1)
		stm.biases = [log(q / (1. - q))]

		x = mean(stm.sample(empty([0, N]))) - q
		p = 2. - 2. * norm.cdf(abs(x), scale=sqrt(q * (1. - q) / N))

		# should fail in about 1/1000 tests, but not more
		self.assertGreater(p, 0.0001)
Exemple #3
0
	def test_basics(self):
		dim_in_nonlinear = 10
		dim_in_linear = 8
		num_components = 7
		num_features = 50
		num_samples = 100

		# create model
		stm = STM(dim_in_nonlinear, dim_in_linear, num_components, num_features)

		# generate output
		input_nonlinear = randint(2, size=[dim_in_nonlinear, num_samples])
		input_linear = randint(2, size=[dim_in_linear, num_samples])
		input = vstack([input_nonlinear, input_linear])

		output = stm.sample(input)
		loglik = stm.loglikelihood(input, output)

		# check hyperparameters
		self.assertEqual(stm.dim_in, dim_in_linear + dim_in_nonlinear)
		self.assertEqual(stm.dim_in_linear, dim_in_linear)
		self.assertEqual(stm.dim_in_nonlinear, dim_in_nonlinear)
		self.assertEqual(stm.num_components, num_components)
		self.assertEqual(stm.num_features, num_features)
	
		# check parameters
		self.assertEqual(stm.biases.shape[0], num_components)
		self.assertEqual(stm.biases.shape[1], 1)
		self.assertEqual(stm.weights.shape[0], num_components)
		self.assertEqual(stm.weights.shape[1], num_features)
		self.assertEqual(stm.features.shape[0], dim_in_nonlinear)
		self.assertEqual(stm.features.shape[1], num_features)
		self.assertEqual(stm.predictors.shape[0], num_components)
		self.assertEqual(stm.predictors.shape[1], dim_in_nonlinear)
		self.assertEqual(stm.linear_predictor.shape[0], dim_in_linear)
		self.assertEqual(stm.linear_predictor.shape[1], 1)

		# check dimensionality of output
		self.assertEqual(output.shape[0], 1)
		self.assertEqual(output.shape[1], num_samples)
		self.assertEqual(loglik.shape[0], 1)
		self.assertEqual(loglik.shape[1], num_samples)