Beispiel #1
0
    def test_glm_fisher_information(self):
        N = 2000
        T = 1000

        glm = GLM(4)
        glm.weights = randn(glm.dim_in, 1)
        glm.bias = -2.

        inputs = randn(glm.dim_in, N)
        outputs = glm.sample(inputs)

        x = glm._parameters()
        I = glm._fisher_information(inputs, outputs)

        x_mle = []

        # repeated maximum likelihood estimation
        for t in range(T):
            inputs = randn(glm.dim_in, N)
            outputs = glm.sample(inputs)

            # initialize at true parameters for fast convergence
            glm_ = GLM(glm.dim_in)
            glm_.weights = glm.weights
            glm_.bias = glm.bias
            glm_.train(inputs, outputs)

            x_mle.append(glm_._parameters())

        C = cov(hstack(x_mle), ddof=1)

        # inv(I) should be sufficiently close to C
        self.assertLess(max(abs(inv(I) - C) / (abs(C) + .1)),
                        max(abs(C) / (abs(C) + .1)) / 2.)
Beispiel #2
0
	def test_glm_fisher_information(self):
		N = 1000
		T = 100

		glm = GLM(3)
		glm.weights = randn(glm.dim_in, 1)
		glm.bias = -2.

		inputs = randn(glm.dim_in, N)
		outputs = glm.sample(inputs)

		x = glm._parameters()
		I = glm._fisher_information(inputs, outputs)

		x_mle = []

		# repeated maximum likelihood estimation
		for t in range(T):
			inputs = randn(glm.dim_in, N)
			outputs = glm.sample(inputs)

			# initialize at true parameters for fast convergence
			glm_ = GLM(glm.dim_in)
			glm_.weights = glm.weights
			glm_.bias = glm.bias
			glm_.train(inputs, outputs)

			x_mle.append(glm_._parameters())

		C = cov(hstack(x_mle), ddof=1)

		# inv(I) should be sufficiently close to C
		self.assertLess(max(abs(inv(I) - C) / (abs(C) + .1)), max(abs(C) / (abs(C) + .1)) / 2.)
Beispiel #3
0
	def test_sample_spike_train(self):
		inputs = array([
			[0, 0, 0, 0, 1, 1, 1, 1],
			[0, 0, 1, 1, 0, 0, 1, 1],
			[0, 1, 0, 1, 0, 1, 0, 1]])
		outputs = array([[1, 0, 0, 0, 1, 0, 0, 0]])

		glm = GLM(3, LogisticFunction, Bernoulli)
		glm.train(inputs, outputs)

		# generate a spike train without any stimulus input
		spike_train = sample_spike_train(empty([0, 100]), glm, 3)

		# test difference to expected spike train
		diff = spike_train.ravel()[:10] - [0, 0, 0, 1, 0, 0, 1, 0, 0, 1]
		self.assertLess(max(abs(diff)), 1e-8)

		# preconditioner which removes first (uninformative) dimension from input
		m = zeros([3, 1])
		A = array([[0, 1, 0], [0, 0, 1]])
		pre = AffineTransform(m, A)

		glm = GLM(2, LogisticFunction, Bernoulli)
		glm.train(pre(inputs), outputs)

		# generate a spike train with preconditioned spike history
		spike_train = sample_spike_train(empty([0, 100]), glm, 3, pre)

		# test difference to expected spike train
		diff = spike_train.ravel()[:10] - [0, 0, 0, 1, 0, 0, 1, 0, 0, 1]
		self.assertLess(max(abs(diff)), 1e-8)
Beispiel #4
0
    def test_sample_spike_train(self):
        inputs = array([[0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 0, 0, 1, 1],
                        [0, 1, 0, 1, 0, 1, 0, 1]])
        outputs = array([[1, 0, 0, 0, 1, 0, 0, 0]])

        glm = GLM(3, LogisticFunction, Bernoulli)
        glm.train(inputs, outputs)

        # generate a spike train without any stimulus input
        spike_train = sample_spike_train(empty([0, 100]), glm, 3)

        # test difference to expected spike train
        diff = spike_train.ravel()[:10] - [0, 0, 0, 1, 0, 0, 1, 0, 0, 1]
        self.assertLess(max(abs(diff)), 1e-8)

        # preconditioner which removes first (uninformative) dimension from input
        m = zeros([3, 1])
        A = array([[0, 1, 0], [0, 0, 1]])
        pre = AffineTransform(m, A)

        glm = GLM(2, LogisticFunction, Bernoulli)
        glm.train(pre(inputs), outputs)

        # generate a spike train with preconditioned spike history
        spike_train = sample_spike_train(empty([0, 100]), glm, 3, pre)

        # test difference to expected spike train
        diff = spike_train.ravel()[:10] - [0, 0, 0, 1, 0, 0, 1, 0, 0, 1]
        self.assertLess(max(abs(diff)), 1e-8)
Beispiel #5
0
	def test_glm_train(self):
		w = asarray([[-1., 0., 1., 2.]]).T
		b = 1.

		x = randn(4, 100000)
		p = 1. / (1. + exp(-dot(w.T, x) - b))
		y = rand(*p.shape) < p

		glm = GLM(4, LogisticFunction, Bernoulli)

		# test gradient
		err = glm._check_gradient(x, y, 1e-5, parameters={
			'train_weights': False,
			'train_bias': True})
		self.assertLess(err, 1e-8)

		err = glm._check_gradient(x, y, 1e-5, parameters={
			'train_weights': True,
			'train_bias': False})
		self.assertLess(err, 1e-8)

		err = glm._check_gradient(x, y, 1e-5)
		self.assertLess(err, 1e-8)

		err = glm._check_gradient(x, y, 1e-5, parameters={
			'regularize_weights': 10.,
			'regularize_bias': 10.})
		self.assertLess(err, 1e-8)

		# test training
		glm.train(x, y, parameters={'verbosity': 0})

		self.assertLess(max(abs(glm.weights - w)), 0.1)
		self.assertLess(max(abs(glm.bias - b)), 0.1)

		glm.weights = w
		glm.bias = -1.

		glm.train(x, y, parameters={'verbosity': 0, 'train_weights': False})

		self.assertLess(max(abs(glm.weights - w)), 1e-12)
		self.assertLess(max(abs(glm.bias - b)), 0.1)

		glm.weights = randn(*glm.weights.shape)
		glm.bias = b

		glm.train(x, y, parameters={'verbosity': 0, 'train_bias': False})

		self.assertLess(max(abs(glm.weights - w)), 0.1)
		self.assertLess(max(abs(glm.bias - b)), 1e-12)
Beispiel #6
0
    def test_glm_train(self):
        w = asarray([[-1., 0., 1., 2.]]).T
        b = 1.

        x = randn(4, 100000)
        p = 1. / (1. + exp(-dot(w.T, x) - b))
        y = rand(*p.shape) < p

        glm = GLM(4, LogisticFunction, Bernoulli)

        # test gradient
        err = glm._check_gradient(x,
                                  y,
                                  1e-5,
                                  parameters={
                                      'train_weights': False,
                                      'train_bias': True
                                  })
        self.assertLess(err, 1e-8)

        err = glm._check_gradient(x,
                                  y,
                                  1e-5,
                                  parameters={
                                      'train_weights': True,
                                      'train_bias': False
                                  })
        self.assertLess(err, 1e-8)

        err = glm._check_gradient(x, y, 1e-5)
        self.assertLess(err, 1e-8)

        err = glm._check_gradient(x,
                                  y,
                                  1e-5,
                                  parameters={
                                      'regularize_weights': 10.,
                                      'regularize_bias': 10.
                                  })
        self.assertLess(err, 1e-8)

        # test training
        glm.train(x, y, parameters={'verbosity': 0})

        self.assertLess(max(abs(glm.weights - w)), 0.1)
        self.assertLess(max(abs(glm.bias - b)), 0.1)

        glm.weights = w
        glm.bias = -1.

        glm.train(x, y, parameters={'verbosity': 0, 'train_weights': False})

        self.assertLess(max(abs(glm.weights - w)), 1e-12)
        self.assertLess(max(abs(glm.bias - b)), 0.1)

        glm.weights = randn(*glm.weights.shape)
        glm.bias = b

        glm.train(x, y, parameters={'verbosity': 0, 'train_bias': False})

        self.assertLess(max(abs(glm.weights - w)), 0.1)
        self.assertLess(max(abs(glm.bias - b)), 1e-12)