コード例 #1
0
	def test_train(self):
		stm = STM(8, 4, 4, 10)

		parameters = stm._parameters()

		stm.train(
			randint(2, size=[stm.dim_in, 2000]),
			randint(2, size=[stm.dim_out, 2000]),
			parameters={
				'verbosity': 0,
				'max_iter': 0,
				})

		# parameters should not have changed
		self.assertLess(max(abs(stm._parameters() - parameters)), 1e-20)

		def callback(i, stm):
			callback.counter += 1
			return
		callback.counter = 0

		max_iter = 10

		stm.train(
			randint(2, size=[stm.dim_in, 10000]),
			randint(2, size=[stm.dim_out, 10000]),
			parameters={
				'verbosity': 0,
				'max_iter': max_iter,
				'threshold': 0.,
				'batch_size': 1999,
				'callback': callback,
				'cb_iter': 2,
				})

		self.assertEqual(callback.counter, max_iter / 2)

		# test zero-dimensional nonlinear inputs
		stm = STM(0, 5, 5)

		glm = GLM(stm.dim_in_linear, LogisticFunction, Bernoulli)
		glm.weights = randn(*glm.weights.shape)

		input = randn(stm.dim_in_linear, 10000)
		output = glm.sample(input)

		stm.train(input, output, parameters={'max_iter': 20})

		# STM should be able to learn GLM behavior
		self.assertAlmostEqual(glm.evaluate(input, output), stm.evaluate(input, output), 1)

		# test zero-dimensional inputs
		stm = STM(0, 0, 10)

		input = empty([0, 10000])
		output = rand(1, 10000) < 0.35

		stm.train(input, output)

		self.assertLess(abs(mean(stm.sample(input)) - mean(output)), 0.1)
コード例 #2
0
ファイル: nonlinear_test.py プロジェクト: ominux/cmt
    def test_blob_nonlinearity(self):
        # generate test data
        x = randn(1, 10000) * 4.
        y = exp(-(x - 2.)**2) / 2. + exp(-(x + 5.)**2 / 4.) / 4.
        z = (rand(*y.shape) < y) * 1.

        glm = GLM(1, BlobNonlinearity(3))
        glm.weights = [[.5 + rand()]]

        err = glm._check_gradient(x,
                                  z,
                                  parameters={
                                      'train_weights': False,
                                      'train_bias': False,
                                      'train_nonlinearity': True
                                  })

        self.assertLess(err, 1e-6)

        err = glm._check_gradient(x,
                                  z,
                                  parameters={
                                      'train_weights': True,
                                      'train_bias': False,
                                      'train_nonlinearity': False
                                  })

        self.assertLess(err, 1e-6)
コード例 #3
0
    def test_glm_basics(self):
        glm = GLM(4, LogisticFunction, Bernoulli)

        x = randn(1000)
        f = glm.nonlinearity
        y = f(x).ravel()

        for i in range(x.size):
            self.assertAlmostEqual(y[i], 1. / (1. + exp(-x[i])))

        glm.nonlinearity = f
        y = glm.nonlinearity(x).ravel()

        for i in range(x.size):
            self.assertAlmostEqual(y[i], 1. / (1. + exp(-x[i])))

        b = Bernoulli()

        glm = GLM(4, f, b)

        glm.nonlinearity = f
        y = glm.nonlinearity(x).ravel()

        for i in range(x.size):
            self.assertAlmostEqual(y[i], 1. / (1. + exp(-x[i])))

        self.assertTrue(isinstance(glm.distribution, Bernoulli))

        # test wrong order of arguments
        self.assertRaises(TypeError,
                          lambda: GLM(5, Bernoulli, LogisticFunction))
コード例 #4
0
ファイル: tools_test.py プロジェクト: ominux/cmt
    def test_sample_spike_train(self):
        inputs = array([[0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 0, 0, 1, 1],
                        [0, 1, 0, 1, 0, 1, 0, 1]])
        outputs = array([[1, 0, 0, 0, 1, 0, 0, 0]])

        glm = GLM(3, LogisticFunction, Bernoulli)
        glm.train(inputs, outputs)

        # generate a spike train without any stimulus input
        spike_train = sample_spike_train(empty([0, 100]), glm, 3)

        # test difference to expected spike train
        diff = spike_train.ravel()[:10] - [0, 0, 0, 1, 0, 0, 1, 0, 0, 1]
        self.assertLess(max(abs(diff)), 1e-8)

        # preconditioner which removes first (uninformative) dimension from input
        m = zeros([3, 1])
        A = array([[0, 1, 0], [0, 0, 1]])
        pre = AffineTransform(m, A)

        glm = GLM(2, LogisticFunction, Bernoulli)
        glm.train(pre(inputs), outputs)

        # generate a spike train with preconditioned spike history
        spike_train = sample_spike_train(empty([0, 100]), glm, 3, pre)

        # test difference to expected spike train
        diff = spike_train.ravel()[:10] - [0, 0, 0, 1, 0, 0, 1, 0, 0, 1]
        self.assertLess(max(abs(diff)), 1e-8)
コード例 #5
0
ファイル: fvbn_test.py プロジェクト: ominux/cmt
    def test_fvbn_train(self):
        xmask = ones([2, 2], dtype='bool')
        ymask = zeros([2, 2], dtype='bool')
        xmask[-1, -1] = False
        ymask[-1, -1] = True

        model = FVBN(2,
                     2,
                     xmask,
                     ymask,
                     model=GLM(sum(xmask), LogisticFunction, Bernoulli))

        # checkerboard
        data = array([[0, 1], [1, 0]], dtype='bool').reshape(-1, 1)
        data = tile(data, (1, 1000))

        logloss = model.evaluate(data)

        model.initialize(data, parameters={'max_iter': 100})

        # training should converge in much less than 2000 iterations
        self.assertTrue(model.train(data, parameters={'max_iter': 2000}))

        # negative log-likelihood should have decreased
        self.assertLess(model.evaluate(data), logloss)
コード例 #6
0
ファイル: nonlinear_test.py プロジェクト: cajal/cmt
	def test_blob_nonlinearity(self):
		# generate test data
		x = randn(1, 10000) * 4.
		y = exp(-(x - 2.)**2) / 2. + exp(-(x + 5.)**2 / 4.) / 4.
		z = (rand(*y.shape) < y) * 1.

		glm = GLM(1, BlobNonlinearity(3))
		glm.weights = [[.5 + rand()]]

		err = glm._check_gradient(x, z, 
			parameters={'train_weights': False, 'train_bias': False, 'train_nonlinearity': True})

		self.assertLess(err, 1e-6)

		err = glm._check_gradient(x, z, 
			parameters={'train_weights': True, 'train_bias': False, 'train_nonlinearity': False})

		self.assertLess(err, 1e-6)
コード例 #7
0
ファイル: glm_test.py プロジェクト: jakirkham/cmt
	def test_glm_basics(self):
		glm = GLM(4, LogisticFunction, Bernoulli)

		x = randn(1000)
		f = glm.nonlinearity
		y = f(x).ravel()

		for i in range(x.size):
			self.assertAlmostEqual(y[i], 1. / (1. + exp(-x[i])))

		glm.nonlinearity = f
		y = glm.nonlinearity(x).ravel()

		for i in range(x.size):
			self.assertAlmostEqual(y[i], 1. / (1. + exp(-x[i])))

		b = Bernoulli()

		glm = GLM(4, f, b)

		glm.nonlinearity = f
		y = glm.nonlinearity(x).ravel()

		for i in range(x.size):
			self.assertAlmostEqual(y[i], 1. / (1. + exp(-x[i])))

		self.assertTrue(isinstance(glm.distribution, Bernoulli))

		# test wrong order of arguments
		self.assertRaises(TypeError, lambda: GLM(5, Bernoulli, LogisticFunction))
コード例 #8
0
ファイル: tools_test.py プロジェクト: cajal/cmt
	def test_sample_spike_train(self):
		inputs = array([
			[0, 0, 0, 0, 1, 1, 1, 1],
			[0, 0, 1, 1, 0, 0, 1, 1],
			[0, 1, 0, 1, 0, 1, 0, 1]])
		outputs = array([[1, 0, 0, 0, 1, 0, 0, 0]])

		glm = GLM(3, LogisticFunction, Bernoulli)
		glm.train(inputs, outputs)

		# generate a spike train without any stimulus input
		spike_train = sample_spike_train(empty([0, 100]), glm, 3)

		# test difference to expected spike train
		diff = spike_train.ravel()[:10] - [0, 0, 0, 1, 0, 0, 1, 0, 0, 1]
		self.assertLess(max(abs(diff)), 1e-8)

		# preconditioner which removes first (uninformative) dimension from input
		m = zeros([3, 1])
		A = array([[0, 1, 0], [0, 0, 1]])
		pre = AffineTransform(m, A)

		glm = GLM(2, LogisticFunction, Bernoulli)
		glm.train(pre(inputs), outputs)

		# generate a spike train with preconditioned spike history
		spike_train = sample_spike_train(empty([0, 100]), glm, 3, pre)

		# test difference to expected spike train
		diff = spike_train.ravel()[:10] - [0, 0, 0, 1, 0, 0, 1, 0, 0, 1]
		self.assertLess(max(abs(diff)), 1e-8)
コード例 #9
0
ファイル: glm_test.py プロジェクト: jakirkham/cmt
	def test_glm_pickle(self):
		tmp_file = mkstemp()[1]

		model0 = GLM(5, BlobNonlinearity, Bernoulli)
		model0.weights = randn(*model0.weights.shape)
		model0.bias = randn()

		# store model
		with open(tmp_file, 'w') as handle:
			dump({'model': model0}, handle)

		# load model
		with open(tmp_file) as handle:
			model1 = load(handle)['model']

		# make sure parameters haven't changed
		self.assertLess(max(abs(model0.bias - model1.bias)), 1e-20)
		self.assertLess(max(abs(model0.weights - model1.weights)), 1e-20)

		x = randn(model0.dim_in, 100)
		y = model0.sample(x)
		self.assertEqual(
			model0.evaluate(x, y),
			model1.evaluate(x, y))
コード例 #10
0
ファイル: glm_test.py プロジェクト: jakirkham/cmt
	def test_glm_data_gradient(self):
		glm = GLM(7, LogisticFunction, Bernoulli)

		x = randn(glm.dim_in, 100)
		y = glm.sample(x)

		dx, _, ll = glm._data_gradient(x, y)

		h = 1e-7

		# compute numerical gradient
		dx_ = zeros_like(dx)

		for i in range(glm.dim_in):
			x_p = x.copy()
			x_m = x.copy()
			x_p[i] += h
			x_m[i] -= h
			dx_[i] = (
				glm.loglikelihood(x_p, y) -
				glm.loglikelihood(x_m, y)) / (2. * h)

		self.assertLess(max(abs(ll - glm.loglikelihood(x, y))), 1e-8)
		self.assertLess(max(abs(dx_ - dx)), 1e-7)
コード例 #11
0
ファイル: fvbn_test.py プロジェクト: ominux/cmt
    def test_pickle(self):
        xmask = ones([2, 2], dtype='bool')
        ymask = zeros([2, 2], dtype='bool')
        xmask[-1, -1] = False
        ymask[-1, -1] = True

        order = [(i // 2, i % 2) for i in permutation(4)]

        model0 = FVBN(2, 2, xmask, ymask, order,
                      GLM(sum(xmask), LogisticFunction, Bernoulli))

        samples = model0.sample(1000)

        tmp_file = mkstemp()[1]

        # store model
        with open(tmp_file, 'w') as handle:
            dump({'model': model0}, handle)

        # load model
        with open(tmp_file) as handle:
            model1 = load(handle)['model']

        # make sure parameters haven't changed
        self.assertEqual(model0.rows, model1.rows)
        self.assertEqual(model0.cols, model1.cols)

        for i in range(model0.rows):
            for j in range(model0.cols):
                if model0[i, j].dim_in > 0:
                    self.assertLess(
                        max(abs(model0[i, j].weights - model1[i, j].weights)),
                        1e-8)
                    self.assertLess(
                        max(abs(model0[i, j].bias - model1[i, j].bias)), 1e-8)

        self.assertAlmostEqual(mean(model0.loglikelihood(samples)),
                               mean(model1.loglikelihood(samples)))
コード例 #12
0
    def test_glm_pickle(self):
        tmp_file = mkstemp()[1]

        model0 = GLM(5, BlobNonlinearity, Bernoulli)
        model0.weights = randn(*model0.weights.shape)
        model0.bias = randn()

        # store model
        with open(tmp_file, 'w') as handle:
            dump({'model': model0}, handle)

        # load model
        with open(tmp_file) as handle:
            model1 = load(handle)['model']

        # make sure parameters haven't changed
        self.assertLess(max(abs(model0.bias - model1.bias)), 1e-20)
        self.assertLess(max(abs(model0.weights - model1.weights)), 1e-20)

        x = randn(model0.dim_in, 100)
        y = model0.sample(x)
        self.assertEqual(model0.evaluate(x, y), model1.evaluate(x, y))
コード例 #13
0
    def test_glm_data_gradient(self):
        glm = GLM(7, LogisticFunction, Bernoulli)

        x = randn(glm.dim_in, 100)
        y = glm.sample(x)

        dx, _, ll = glm._data_gradient(x, y)

        h = 1e-7

        # compute numerical gradient
        dx_ = zeros_like(dx)

        for i in range(glm.dim_in):
            x_p = x.copy()
            x_m = x.copy()
            x_p[i] += h
            x_m[i] -= h
            dx_[i] = (glm.loglikelihood(x_p, y) -
                      glm.loglikelihood(x_m, y)) / (2. * h)

        self.assertLess(max(abs(ll - glm.loglikelihood(x, y))), 1e-8)
        self.assertLess(max(abs(dx_ - dx)), 1e-7)
コード例 #14
0
    def test_glm_train(self):
        w = asarray([[-1., 0., 1., 2.]]).T
        b = 1.

        x = randn(4, 100000)
        p = 1. / (1. + exp(-dot(w.T, x) - b))
        y = rand(*p.shape) < p

        glm = GLM(4, LogisticFunction, Bernoulli)

        # test gradient
        err = glm._check_gradient(x,
                                  y,
                                  1e-5,
                                  parameters={
                                      'train_weights': False,
                                      'train_bias': True
                                  })
        self.assertLess(err, 1e-8)

        err = glm._check_gradient(x,
                                  y,
                                  1e-5,
                                  parameters={
                                      'train_weights': True,
                                      'train_bias': False
                                  })
        self.assertLess(err, 1e-8)

        err = glm._check_gradient(x, y, 1e-5)
        self.assertLess(err, 1e-8)

        err = glm._check_gradient(x,
                                  y,
                                  1e-5,
                                  parameters={
                                      'regularize_weights': 10.,
                                      'regularize_bias': 10.
                                  })
        self.assertLess(err, 1e-8)

        # test training
        glm.train(x, y, parameters={'verbosity': 0})

        self.assertLess(max(abs(glm.weights - w)), 0.1)
        self.assertLess(max(abs(glm.bias - b)), 0.1)

        glm.weights = w
        glm.bias = -1.

        glm.train(x, y, parameters={'verbosity': 0, 'train_weights': False})

        self.assertLess(max(abs(glm.weights - w)), 1e-12)
        self.assertLess(max(abs(glm.bias - b)), 0.1)

        glm.weights = randn(*glm.weights.shape)
        glm.bias = b

        glm.train(x, y, parameters={'verbosity': 0, 'train_bias': False})

        self.assertLess(max(abs(glm.weights - w)), 0.1)
        self.assertLess(max(abs(glm.bias - b)), 1e-12)
コード例 #15
0
    def test_glm_fisher_information(self):
        N = 2000
        T = 1000

        glm = GLM(4)
        glm.weights = randn(glm.dim_in, 1)
        glm.bias = -2.

        inputs = randn(glm.dim_in, N)
        outputs = glm.sample(inputs)

        x = glm._parameters()
        I = glm._fisher_information(inputs, outputs)

        x_mle = []

        # repeated maximum likelihood estimation
        for t in range(T):
            inputs = randn(glm.dim_in, N)
            outputs = glm.sample(inputs)

            # initialize at true parameters for fast convergence
            glm_ = GLM(glm.dim_in)
            glm_.weights = glm.weights
            glm_.bias = glm.bias
            glm_.train(inputs, outputs)

            x_mle.append(glm_._parameters())

        C = cov(hstack(x_mle), ddof=1)

        # inv(I) should be sufficiently close to C
        self.assertLess(max(abs(inv(I) - C) / (abs(C) + .1)),
                        max(abs(C) / (abs(C) + .1)) / 2.)
コード例 #16
0
ファイル: glm_test.py プロジェクト: jakirkham/cmt
	def test_glm_fisher_information(self):
		N = 1000
		T = 100

		glm = GLM(3)
		glm.weights = randn(glm.dim_in, 1)
		glm.bias = -2.

		inputs = randn(glm.dim_in, N)
		outputs = glm.sample(inputs)

		x = glm._parameters()
		I = glm._fisher_information(inputs, outputs)

		x_mle = []

		# repeated maximum likelihood estimation
		for t in range(T):
			inputs = randn(glm.dim_in, N)
			outputs = glm.sample(inputs)

			# initialize at true parameters for fast convergence
			glm_ = GLM(glm.dim_in)
			glm_.weights = glm.weights
			glm_.bias = glm.bias
			glm_.train(inputs, outputs)

			x_mle.append(glm_._parameters())

		C = cov(hstack(x_mle), ddof=1)

		# inv(I) should be sufficiently close to C
		self.assertLess(max(abs(inv(I) - C) / (abs(C) + .1)), max(abs(C) / (abs(C) + .1)) / 2.)
コード例 #17
0
ファイル: glm_test.py プロジェクト: jakirkham/cmt
	def test_glm_train(self):
		w = asarray([[-1., 0., 1., 2.]]).T
		b = 1.

		x = randn(4, 100000)
		p = 1. / (1. + exp(-dot(w.T, x) - b))
		y = rand(*p.shape) < p

		glm = GLM(4, LogisticFunction, Bernoulli)

		# test gradient
		err = glm._check_gradient(x, y, 1e-5, parameters={
			'train_weights': False,
			'train_bias': True})
		self.assertLess(err, 1e-8)

		err = glm._check_gradient(x, y, 1e-5, parameters={
			'train_weights': True,
			'train_bias': False})
		self.assertLess(err, 1e-8)

		err = glm._check_gradient(x, y, 1e-5)
		self.assertLess(err, 1e-8)

		err = glm._check_gradient(x, y, 1e-5, parameters={
			'regularize_weights': 10.,
			'regularize_bias': 10.})
		self.assertLess(err, 1e-8)

		# test training
		glm.train(x, y, parameters={'verbosity': 0})

		self.assertLess(max(abs(glm.weights - w)), 0.1)
		self.assertLess(max(abs(glm.bias - b)), 0.1)

		glm.weights = w
		glm.bias = -1.

		glm.train(x, y, parameters={'verbosity': 0, 'train_weights': False})

		self.assertLess(max(abs(glm.weights - w)), 1e-12)
		self.assertLess(max(abs(glm.bias - b)), 0.1)

		glm.weights = randn(*glm.weights.shape)
		glm.bias = b

		glm.train(x, y, parameters={'verbosity': 0, 'train_bias': False})

		self.assertLess(max(abs(glm.weights - w)), 0.1)
		self.assertLess(max(abs(glm.bias - b)), 1e-12)